summaryrefslogtreecommitdiff
path: root/tests/nnapi/specs/skip/V1_2
diff options
context:
space:
mode:
authorChunseok Lee <chunseok.lee@samsung.com>2020-12-14 14:43:04 +0900
committerChunseok Lee <chunseok.lee@samsung.com>2020-12-14 14:43:04 +0900
commit12d88feea8573f8490629cf62fc342b152e57d65 (patch)
tree3c734cc4d629834d2d523f4575ef84cd64684e57 /tests/nnapi/specs/skip/V1_2
parentd6b371e095d737922187a518b8faba1ef6f3a2b1 (diff)
downloadnnfw-12d88feea8573f8490629cf62fc342b152e57d65.tar.gz
nnfw-12d88feea8573f8490629cf62fc342b152e57d65.tar.bz2
nnfw-12d88feea8573f8490629cf62fc342b152e57d65.zip
Imported Upstream version 1.11.0upstream/1.11.0
Diffstat (limited to 'tests/nnapi/specs/skip/V1_2')
-rw-r--r--tests/nnapi/specs/skip/V1_2/add_v1_2.mod.py99
-rw-r--r--tests/nnapi/specs/skip/V1_2/argmin_1.mod.py31
-rw-r--r--tests/nnapi/specs/skip/V1_2/argmin_2.mod.py31
-rw-r--r--tests/nnapi/specs/skip/V1_2/argmin_3.mod.py33
-rw-r--r--tests/nnapi/specs/skip/V1_2/avg_pool_v1_2.mod.py214
-rw-r--r--tests/nnapi/specs/skip/V1_2/axis_aligned_bbox_transform.mod.py121
-rw-r--r--tests/nnapi/specs/skip/V1_2/batch_to_space_v1_2.mod.py52
-rw-r--r--tests/nnapi/specs/skip/V1_2/bbox_graph.mod.py93
-rw-r--r--tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm.mod.py461
-rw-r--r--tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_aux_input.mod.py482
-rw-r--r--tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_cifg_peephole.mod.py447
-rw-r--r--tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_float16_batch_major.mod.py461
-rw-r--r--tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_float16_batch_major_aux_input.mod.py483
-rw-r--r--tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_float16_batch_major_merge_outputs.mod.py454
-rw-r--r--tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_merge_outputs.mod.py453
-rw-r--r--tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_norm_fw_output.mod.py482
-rw-r--r--tests/nnapi/specs/skip/V1_2/bidirectional_sequence_rnn.mod.py528
-rw-r--r--tests/nnapi/specs/skip/V1_2/box_with_nms_limit_gaussian.mod.py198
-rw-r--r--tests/nnapi/specs/skip/V1_2/box_with_nms_limit_hard.mod.py186
-rw-r--r--tests/nnapi/specs/skip/V1_2/box_with_nms_limit_linear.mod.py201
-rw-r--r--tests/nnapi/specs/skip/V1_2/channel_shuffle.mod.py42
-rw-r--r--tests/nnapi/specs/skip/V1_2/concat_float16_1.mod.py31
-rw-r--r--tests/nnapi/specs/skip/V1_2/concat_float16_2.mod.py41
-rw-r--r--tests/nnapi/specs/skip/V1_2/concat_float16_3.mod.py47
-rw-r--r--tests/nnapi/specs/skip/V1_2/concat_mixed_quant.mod.py56
-rw-r--r--tests/nnapi/specs/skip/V1_2/concat_zero_sized.mod.py97
-rw-r--r--tests/nnapi/specs/skip/V1_2/conv2d_dilation.mod.py146
-rw-r--r--tests/nnapi/specs/skip/V1_2/conv2d_per_channel.mod.py74
-rw-r--r--tests/nnapi/specs/skip/V1_2/conv2d_v1_2.mod.py297
-rw-r--r--tests/nnapi/specs/skip/V1_2/depth_to_space_v1_2.mod.py76
-rw-r--r--tests/nnapi/specs/skip/V1_2/depthwise_conv2d_dilation.mod.py154
-rw-r--r--tests/nnapi/specs/skip/V1_2/depthwise_conv2d_per_channel.mod.py65
-rw-r--r--tests/nnapi/specs/skip/V1_2/depthwise_conv2d_v1_2.mod.py167
-rw-r--r--tests/nnapi/specs/skip/V1_2/detection_postprocess.mod.py219
-rw-r--r--tests/nnapi/specs/skip/V1_2/div_v1_2.mod.py88
-rw-r--r--tests/nnapi/specs/skip/V1_2/floor_float16.mod.py17
-rw-r--r--tests/nnapi/specs/skip/V1_2/fully_connected_v1_2.mod.py86
-rw-r--r--tests/nnapi/specs/skip/V1_2/generate_proposals.mod.py211
-rw-r--r--tests/nnapi/specs/skip/V1_2/greater.mod.py99
-rw-r--r--tests/nnapi/specs/skip/V1_2/grouped_conv2d.mod.py135
-rw-r--r--tests/nnapi/specs/skip/V1_2/heatmap_max_keypoint.mod.py193
-rw-r--r--tests/nnapi/specs/skip/V1_2/instance_normalization.mod.py56
-rw-r--r--tests/nnapi/specs/skip/V1_2/l2_normalization_axis.mod.py47
-rw-r--r--tests/nnapi/specs/skip/V1_2/l2_normalization_v1_2.mod.py50
-rw-r--r--tests/nnapi/specs/skip/V1_2/l2_pool_v1_2.mod.py110
-rw-r--r--tests/nnapi/specs/skip/V1_2/layer_norm_lstm.mod.py357
-rw-r--r--tests/nnapi/specs/skip/V1_2/less_equal.mod.py99
-rw-r--r--tests/nnapi/specs/skip/V1_2/local_response_normalization_v1_2.mod.py44
-rw-r--r--tests/nnapi/specs/skip/V1_2/log.mod.py27
-rw-r--r--tests/nnapi/specs/skip/V1_2/log_softmax.mod.py72
-rw-r--r--tests/nnapi/specs/skip/V1_2/logistic_v1_2.mod.py94
-rw-r--r--tests/nnapi/specs/skip/V1_2/lsh_projection_3_relaxed.mod.py42
-rw-r--r--tests/nnapi/specs/skip/V1_2/lsh_projection_4_relaxed.mod.py42
-rw-r--r--tests/nnapi/specs/skip/V1_2/lsh_projection_deprecated.mod.py42
-rw-r--r--tests/nnapi/specs/skip/V1_2/lsh_projection_float16.mod.py39
-rw-r--r--tests/nnapi/specs/skip/V1_2/lstm2_float16.mod.py142
-rw-r--r--tests/nnapi/specs/skip/V1_2/lstm2_state2_float16.mod.py142
-rw-r--r--tests/nnapi/specs/skip/V1_2/lstm2_state_float16.mod.py141
-rw-r--r--tests/nnapi/specs/skip/V1_2/lstm3_float16.mod.py662
-rw-r--r--tests/nnapi/specs/skip/V1_2/lstm3_state2_float16.mod.py683
-rw-r--r--tests/nnapi/specs/skip/V1_2/lstm3_state3_float16.mod.py663
-rw-r--r--tests/nnapi/specs/skip/V1_2/lstm3_state_float16.mod.py683
-rw-r--r--tests/nnapi/specs/skip/V1_2/lstm_float16.mod.py148
-rw-r--r--tests/nnapi/specs/skip/V1_2/lstm_state2_float16.mod.py148
-rw-r--r--tests/nnapi/specs/skip/V1_2/lstm_state_float16.mod.py148
-rw-r--r--tests/nnapi/specs/skip/V1_2/max_pool_v1_2.mod.py186
-rw-r--r--tests/nnapi/specs/skip/V1_2/mean_float16.mod.py19
-rw-r--r--tests/nnapi/specs/skip/V1_2/mul_v1_2.mod.py99
-rw-r--r--tests/nnapi/specs/skip/V1_2/pad_all_dims.mod.py46
-rw-r--r--tests/nnapi/specs/skip/V1_2/pad_float16.mod.py20
-rw-r--r--tests/nnapi/specs/skip/V1_2/pad_low_rank.mod.py29
-rw-r--r--tests/nnapi/specs/skip/V1_2/pad_low_rank_quant8.mod.py26
-rw-r--r--tests/nnapi/specs/skip/V1_2/pad_quant8.mod.py33
-rw-r--r--tests/nnapi/specs/skip/V1_2/pad_quant8_nonzero.mod.py36
-rw-r--r--tests/nnapi/specs/skip/V1_2/pow.mod.py41
-rw-r--r--tests/nnapi/specs/skip/V1_2/quantized_lstm.mod.py199
-rw-r--r--tests/nnapi/specs/skip/V1_2/random_multinomial.mod.py285
-rw-r--r--tests/nnapi/specs/skip/V1_2/random_multinomial_float16.mod.py285
-rw-r--r--tests/nnapi/specs/skip/V1_2/relu1_v1_2.mod.py88
-rw-r--r--tests/nnapi/specs/skip/V1_2/relu6_v1_2.mod.py88
-rw-r--r--tests/nnapi/specs/skip/V1_2/relu_v1_2.mod.py88
-rw-r--r--tests/nnapi/specs/skip/V1_2/reshape_float16.mod.py18
-rw-r--r--tests/nnapi/specs/skip/V1_2/resize_bilinear_v1_2.mod.py166
-rw-r--r--tests/nnapi/specs/skip/V1_2/rnn_float16.mod.py201
-rw-r--r--tests/nnapi/specs/skip/V1_2/roi_align.mod.py265
-rw-r--r--tests/nnapi/specs/skip/V1_2/roi_pooling.mod.py152
-rw-r--r--tests/nnapi/specs/skip/V1_2/sin.mod.py27
-rw-r--r--tests/nnapi/specs/skip/V1_2/softmax_v1_2.mod.py99
-rw-r--r--tests/nnapi/specs/skip/V1_2/space_to_batch_quant8_nonzero.mod.py37
-rw-r--r--tests/nnapi/specs/skip/V1_2/space_to_batch_v1_2.mod.py94
-rw-r--r--tests/nnapi/specs/skip/V1_2/space_to_depth_v1_2.mod.py76
-rw-r--r--tests/nnapi/specs/skip/V1_2/squeeze_float16.mod.py16
-rw-r--r--tests/nnapi/specs/skip/V1_2/strided_slice_float16.mod.py23
-rw-r--r--tests/nnapi/specs/skip/V1_2/sub_quantized_different_scales.mod.py60
-rw-r--r--tests/nnapi/specs/skip/V1_2/svdf_bias_present_float16.mod.py138
-rw-r--r--tests/nnapi/specs/skip/V1_2/svdf_float16.mod.py138
-rw-r--r--tests/nnapi/specs/skip/V1_2/svdf_state_float16.mod.py114
-rw-r--r--tests/nnapi/specs/skip/V1_2/transpose_conv2d.mod.py284
-rw-r--r--tests/nnapi/specs/skip/V1_2/transpose_conv2d_large.mod.py48
-rw-r--r--tests/nnapi/specs/skip/V1_2/transpose_float16.mod.py18
-rw-r--r--tests/nnapi/specs/skip/V1_2/unidirectional_sequence_lstm_cifg_peephole.mod.py168
-rw-r--r--tests/nnapi/specs/skip/V1_2/unidirectional_sequence_lstm_f16_batch_major.mod.py177
-rw-r--r--tests/nnapi/specs/skip/V1_2/unidirectional_sequence_lstm_f16_norm_peephole_projection.mod.py168
-rw-r--r--tests/nnapi/specs/skip/V1_2/unidirectional_sequence_rnn.mod.py183
104 files changed, 16770 insertions, 0 deletions
diff --git a/tests/nnapi/specs/skip/V1_2/add_v1_2.mod.py b/tests/nnapi/specs/skip/V1_2/add_v1_2.mod.py
new file mode 100644
index 000000000..8af47a57f
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/add_v1_2.mod.py
@@ -0,0 +1,99 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TEST 1: ADD float16
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT16", "{3}") # a vector of 3 float16s
+i2 = Input("op2", "TENSOR_FLOAT16", "{3}") # another vector of 3 float16s
+act = Int32Scalar("act", 0) # an int32_t scalar activation
+i3 = Output("op3", "TENSOR_FLOAT16", "{3}")
+model = model.Operation("ADD", i1, i2, act).To(i3)
+model = model.RelaxedExecution(False)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0009765625, 1.0, 2.5],
+ i2: # input 1
+ [2E-23, 0.0001, 3.5]}
+
+output0 = {i3: # output 0
+ [1.0009765625, 1.0, 6.0]}
+
+# Instantiate an example
+Example((input0, output0))
+
+
+# TEST 2: ADD broadcast float16
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT16", "{1, 2}")
+i2 = Input("op2", "TENSOR_FLOAT16", "{2, 2}")
+act = Int32Scalar("act", 0)
+i3 = Output("op3", "TENSOR_FLOAT16", "{2, 2}")
+model = model.Operation("ADD", i1, i2, act).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2],
+ i2: # input 1
+ [1, 2, 3, 4]}
+
+output0 = {i3: # output 0
+ [2, 4, 4, 6]}
+
+# Instantiate an example
+Example((input0, output0))
+
+
+# TEST 3: ADD, zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 2}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 2}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# ADD op with numBatches = 0.
+i2 = Parameter("op", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [1, 2, 3, 4]) # weights
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 2}") # out
+model = model.Operation("ADD", zero_sized, i2, 0).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ i2: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1, 2],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddVariations("relaxed", quant8, "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/argmin_1.mod.py b/tests/nnapi/specs/skip/V1_2/argmin_1.mod.py
new file mode 100644
index 000000000..e89ceead9
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/argmin_1.mod.py
@@ -0,0 +1,31 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{2, 2}")
+axis = Int32Scalar("axis", 1)
+output0 = Output("output", "TENSOR_INT32", "{2}")
+
+model = Model().Operation("ARGMIN", input0, axis).To(output0)
+
+quant8 = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM", 1.0, 0],
+})
+
+Example({
+ input0: [1.0, 2.0,
+ 4.0, 3.0],
+ output0: [0, 1],
+}).AddVariations("relaxed", "float16", "int32", quant8)
diff --git a/tests/nnapi/specs/skip/V1_2/argmin_2.mod.py b/tests/nnapi/specs/skip/V1_2/argmin_2.mod.py
new file mode 100644
index 000000000..e54cff784
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/argmin_2.mod.py
@@ -0,0 +1,31 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{2, 2}")
+axis = Int32Scalar("axis", 0)
+output0 = Output("output", "TENSOR_INT32", "{2}")
+
+model = Model().Operation("ARGMIN", input0, axis).To(output0)
+
+quant8 = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM", 1.0, 0],
+})
+
+Example({
+ input0: [1.0, 2.0,
+ 4.0, 3.0],
+ output0: [0, 0],
+}).AddVariations("relaxed", "float16", "int32", quant8)
diff --git a/tests/nnapi/specs/skip/V1_2/argmin_3.mod.py b/tests/nnapi/specs/skip/V1_2/argmin_3.mod.py
new file mode 100644
index 000000000..d3cbd76ed
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/argmin_3.mod.py
@@ -0,0 +1,33 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Negative axis support test.
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{2, 2}")
+axis = Int32Scalar("axis", -1)
+output0 = Output("output", "TENSOR_INT32", "{2}")
+
+model = Model().Operation("ARGMIN", input0, axis).To(output0)
+
+quant8 = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM", 1.0, 0],
+})
+
+Example({
+ input0: [1.0, 2.0,
+ 4.0, 3.0],
+ output0: [0, 1],
+}).AddVariations("relaxed", "float16", "int32", quant8)
diff --git a/tests/nnapi/specs/skip/V1_2/avg_pool_v1_2.mod.py b/tests/nnapi/specs/skip/V1_2/avg_pool_v1_2.mod.py
new file mode 100644
index 000000000..43083f91d
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/avg_pool_v1_2.mod.py
@@ -0,0 +1,214 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: AVERAGE_POOL_2D_NCHW_1, pad = 0, stride = 1, filter = 1, act = none
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+Model().Operation("AVERAGE_POOL_2D", i1, 0, 0, 0, 0, 1, 1, 1, 1, 0, layout).To(o1)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.5, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [1.0, 2.0, 3.0, 4.0],
+ o1: [1.0, 2.0, 3.0, 4.0]
+}).AddNchw(i1, o1, layout).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 2: AVERAGE_POOL_2D_NCHW_2, act = none
+bat = 5
+row = 52
+col = 60
+chn = 3
+std = 5
+flt = 100
+pad = 50
+output_row = (row + 2 * pad - flt + std) // std
+output_col = (col + 2 * pad - flt + std) // std
+
+i2 = Input("op1", ("TENSOR_FLOAT32", [bat, row, col, chn]))
+o2 = Output("op4", ("TENSOR_FLOAT32", [bat, output_row, output_col, chn]))
+Model().Operation("AVERAGE_POOL_2D", i2, pad, pad, pad, pad, std, std, flt, flt, 0, layout).To(o2)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ o2: ("TENSOR_QUANT8_ASYMM", 0.5, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [1. for _ in range(bat * row * col * chn)],
+ o2: [1. for _ in range(bat * output_row * output_col * chn)]
+}).AddNchw(i2, o2, layout).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 3: AVERAGE_POOL_2D_NCHW_3, act = none
+bat = 1
+row = 200
+col = 180
+chn = 1
+std = 2
+flt = 10
+pad = 0
+output_row = (row + 2 * pad - flt + std) // std
+output_col = (col + 2 * pad - flt + std) // std
+
+i3 = Input("op1", ("TENSOR_FLOAT32", [bat, row, col, chn]))
+o3 = Output("op4", ("TENSOR_FLOAT32", [bat, output_row, output_col, chn]))
+Model().Operation("AVERAGE_POOL_2D", i3, pad, pad, pad, pad, std, std, flt, flt, 0, layout).To(o3)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM", 0.25, 0),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.25, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i3: [x % 2 for x in range(bat * row * col * chn)],
+ o3: [.5 for _ in range(bat * output_row * output_col * chn)]
+}).AddNchw(i3, o3, layout).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 4: AVERAGE_POOL_2D_NCHW_4, act = relu6
+bat = 5
+row = 52
+col = 60
+chn = 3
+std = 5
+flt = 100
+pad = 50
+output_row = (row + 2 * pad - flt + std) // std
+output_col = (col + 2 * pad - flt + std) // std
+
+i4 = Input("op1", ("TENSOR_FLOAT32", [bat, row, col, chn]))
+o4 = Output("op4", ("TENSOR_FLOAT32", [bat, output_row, output_col, chn]))
+Model().Operation("AVERAGE_POOL_2D", i4, pad, pad, pad, pad, std, std, flt, flt, 3, layout).To(o4)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ o4: ("TENSOR_QUANT8_ASYMM", 0.5, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i4: [10 for _ in range(bat * row * col * chn)],
+ o4: [6 for _ in range(bat * output_row * output_col * chn)]
+}).AddNchw(i4, o4, layout).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 5: AVERAGE_POOL_2D_NCHW_5, pad = same, stride = 2, filter = 2, act = none
+i5 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 4, 1}")
+o5 = Output("op4", "TENSOR_FLOAT32", "{1, 1, 2, 1}")
+Model().Operation("AVERAGE_POOL_2D", i5, 1, 2, 2, 2, 2, 0, layout).To(o5)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i5: ("TENSOR_QUANT8_ASYMM", 0.25, 0),
+ o5: ("TENSOR_QUANT8_ASYMM", 0.25, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i5: [0, 6, 2, 4, 3, 2, 10, 7],
+ o5: [2.75, 5.75]
+}).AddNchw(i5, o5, layout).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 6: zero-sized input, explicit padding
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# AVERAGE_POOL_2D op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 1, 1, 1}") # out
+model = model.Operation("AVERAGE_POOL_2D", zero_sized, 0, 0, 0, 0, 1, 1, 2, 2, 0, layout).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations("relaxed", quant8, "float16")
+
+
+# TEST 7: zero-sized input, implicit padding
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# AVERAGE_POOL_2D op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 1}") # out
+model = model.Operation("AVERAGE_POOL_2D", zero_sized, 1, 1, 1, 2, 2, 0, layout).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations("relaxed", quant8, "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/axis_aligned_bbox_transform.mod.py b/tests/nnapi/specs/skip/V1_2/axis_aligned_bbox_transform.mod.py
new file mode 100644
index 000000000..ad2eb0ad3
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/axis_aligned_bbox_transform.mod.py
@@ -0,0 +1,121 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TEST 1: AXIS_ALIGNED_BBOX_TRANSFORM
+r1 = Input("roi", "TENSOR_FLOAT32", "{5, 4}")
+d1 = Input("bboxDeltas", "TENSOR_FLOAT32", "{5, 8}")
+b1 = Input("batchSplit", "TENSOR_INT32", "{5}")
+i1 = Input("imageInfo", "TENSOR_FLOAT32", "{4, 2}")
+o1 = Output("out", "TENSOR_FLOAT32", "{5, 8}")
+model1 = Model().Operation("AXIS_ALIGNED_BBOX_TRANSFORM", r1, d1, b1, i1).To(o1)
+
+quant8 = DataTypeConverter().Identify({
+ r1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ d1: ("TENSOR_QUANT8_ASYMM", 0.05, 128),
+ i1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+inputs = {
+ r1: [100, 150, 400, 430,
+ 120, 60, 122, 61,
+ 10, 20, 20, 50,
+ 50, 120, 150, 250,
+ 400, 100, 1000, 2000],
+ d1: [0.2, 0.2, 0.1, 0.1,
+ 0.3, -0.1, -0.2, 0.1,
+ -0.5, 0.2, 0.2, -0.5,
+ -0.1, -0.1, 2.5, 3,
+ -0.5, -0.5, 1, 1,
+ 0.5, 0.5, -1.5, -1.2,
+ 0.2, 0.2, -3, -4,
+ 1, -0.5, 0.3, 0.5,
+ 0.3, -0.2, 1.1, -0.8,
+ 0.1, 0.05, -0.5, -0.5],
+ b1: [0, 1, 2, 2, 3],
+ i1: [512, 512,
+ 128, 256,
+ 256, 256,
+ 1024, 512]
+}
+
+Example((inputs, {
+ o1: [144.224350, 191.276062, 475.775635, 500.723938,
+ 217.190384, 107.276062, 462.809631, 416.723938,
+ 118.778594, 60.396736, 121.221406, 61.003266,
+ 108.617508, 50.357232, 132.982498, 70.442772,
+ 0.000000, 0.000000, 23.59140714, 60.77422571,
+ 18.88435 , 45.48208571, 21.11565 , 54.51791429,
+ 117.51063714, 209.80948286, 122.48935143, 212.19050857,
+ 132.50705143, 12.83312286, 255.99999571, 227.16685714,
+ 0. , 243.1374815, 512. , 1024. ,
+ 512. , 568.7958375, 512. , 1024. ]
+}), model=model1).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 2: AXIS_ALIGNED_BBOX_TRANSFORM_ZERO_BATCH
+r2 = Input("roi", "TENSOR_FLOAT32", "{5, 4}")
+d2 = Input("bboxDeltas", "TENSOR_FLOAT32", "{5, 8}")
+b2 = Input("batchSplit", "TENSOR_INT32", "{5}")
+i2 = Input("imageInfo", "TENSOR_FLOAT32", "{7, 2}")
+o2 = Output("out", "TENSOR_FLOAT32", "{5, 8}")
+model2 = Model().Operation("AXIS_ALIGNED_BBOX_TRANSFORM", r2, d2, b2, i2).To(o2)
+
+quant8 = DataTypeConverter().Identify({
+ r2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ d2: ("TENSOR_QUANT8_ASYMM", 0.05, 128),
+ i2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o2: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+inputs = {
+ r2: [100, 150, 400, 430,
+ 120, 60, 122, 61,
+ 10, 20, 20, 50,
+ 50, 120, 150, 250,
+ 400, 100, 1000, 2000],
+ d2: [0.2, 0.2, 0.1, 0.1,
+ 0.3, -0.1, -0.2, 0.1,
+ -0.5, 0.2, 0.2, -0.5,
+ -0.1, -0.1, 2.5, 3,
+ -0.5, -0.5, 1, 1,
+ 0.5, 0.5, -1.5, -1.2,
+ 0.2, 0.2, -3, -4,
+ 1, -0.5, 0.3, 0.5,
+ 0.3, -0.2, 1.1, -0.8,
+ 0.1, 0.05, -0.5, -0.5],
+ b2: [0, 2, 5, 5, 6],
+ i2: [512, 512,
+ 32, 32,
+ 128, 256,
+ 32, 32,
+ 32, 32,
+ 256, 256,
+ 1024, 512]
+}
+
+Example((inputs, {
+ o2: [144.224350, 191.276062, 475.775635, 500.723938,
+ 217.190384, 107.276062, 462.809631, 416.723938,
+ 118.778594, 60.396736, 121.221406, 61.003266,
+ 108.617508, 50.357232, 132.982498, 70.442772,
+ 0.000000, 0.000000, 23.59140714, 60.77422571,
+ 18.88435 , 45.48208571, 21.11565 , 54.51791429,
+ 117.51063714, 209.80948286, 122.48935143, 212.19050857,
+ 132.50705143, 12.83312286, 255.99999571, 227.16685714,
+ 0. , 243.1374815, 512. , 1024. ,
+ 512. , 568.7958375, 512. , 1024. ]
+}), model=model2).AddVariations("relaxed", "float16", quant8)
diff --git a/tests/nnapi/specs/skip/V1_2/batch_to_space_v1_2.mod.py b/tests/nnapi/specs/skip/V1_2/batch_to_space_v1_2.mod.py
new file mode 100644
index 000000000..e7247bf59
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/batch_to_space_v1_2.mod.py
@@ -0,0 +1,52 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: BATCH_TO_SPACE_NCHW_1, block_size = [2, 2]
+i1 = Input("op1", "TENSOR_FLOAT32", "{4, 1, 1, 2}")
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
+Model().Operation("BATCH_TO_SPACE_ND", i1, [2, 2], layout).To(o1)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1],
+ o1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]
+}).AddNchw(i1, o1, layout).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 2: BATCH_TO_SPACE_NCHW_2, block_size = [2, 2]
+i2 = Input("op1", "TENSOR_FLOAT32", "{4, 2, 2, 1}")
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
+Model().Operation("BATCH_TO_SPACE_ND", i2, [2, 2], layout).To(o2)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
+ o2: ("TENSOR_QUANT8_ASYMM", 0.5, 128)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
+ o2: [1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16]
+}).AddNchw(i2, o2, layout).AddVariations("relaxed", "float16", quant8)
diff --git a/tests/nnapi/specs/skip/V1_2/bbox_graph.mod.py b/tests/nnapi/specs/skip/V1_2/bbox_graph.mod.py
new file mode 100644
index 000000000..a7a94af28
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/bbox_graph.mod.py
@@ -0,0 +1,93 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# Operation 1, GENERATE_PROPOSALS
+scores = Input("scores", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+deltas = Input("deltas", "TENSOR_FLOAT32", "{1, 1, 1, 4}")
+anchors = Input("anchors", "TENSOR_FLOAT32", "{1, 4}")
+image = Input("imageInfo", "TENSOR_FLOAT32", "{1, 2}")
+scoresOut_1 = Output("scores", "TENSOR_FLOAT32", "{0}")
+roiOut_1 = Internal("roi", "TENSOR_FLOAT32", "{0, 4}")
+batchOut_1 = Internal("batches", "TENSOR_INT32", "{0}")
+model = Model("zero_sized").Operation("GENERATE_PROPOSALS", scores, deltas, anchors, image, 1.0, 1.0, -1, -1, 0.3, 10.0, layout).To(scoresOut_1, roiOut_1, batchOut_1)
+
+# Operation 2, ROI_ALIGN
+feature = Input("featureMap", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+featureOut_2 = Internal("scores", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", feature, roiOut_1, batchOut_1, 2, 2, 1.0, 1.0, 4, 4, layout).To(featureOut_2)
+
+# Operation 3, FULLY_CONNECTED
+weights_3 = Parameter("weights", "TENSOR_FLOAT32", "{8, 4}", [1] * 32)
+bias_3 = Parameter("bias", "TENSOR_FLOAT32", "{8}", [1] * 8)
+deltaOut_3 = Internal("delta", "TENSOR_FLOAT32", "{0, 8}")
+model = model.Operation("FULLY_CONNECTED", featureOut_2, weights_3, bias_3, 0).To(deltaOut_3)
+
+# Operation 4, FULLY_CONNECTED
+weights_4 = Parameter("weights", "TENSOR_FLOAT32", "{2, 4}", [1] * 8)
+bias_4 = Parameter("bias", "TENSOR_FLOAT32", "{2}", [1] * 2)
+scoresOut_4 = Internal("scores", "TENSOR_FLOAT32", "{0, 2}")
+model = model.Operation("FULLY_CONNECTED", featureOut_2, weights_4, bias_4, 0).To(scoresOut_4)
+
+# Operation 5, AXIS_ALIGNED_BBOX_TRANSFORM
+roiOut_5 = Internal("roi", "TENSOR_FLOAT32", "{0, 8}")
+model = model.Operation("AXIS_ALIGNED_BBOX_TRANSFORM", roiOut_1, deltaOut_3, batchOut_1, image).To(roiOut_5)
+
+# Operation 6, BOX_WITH_NMS_LIMIT
+scoresOut_6 = Output("scores", "TENSOR_FLOAT32", "{0}")
+roiOut_6 = Output("roi", "TENSOR_FLOAT32", "{0, 4}")
+classOut_6 = Output("classes", "TENSOR_INT32", "{0}")
+batchOut_6 = Output("batches", "TENSOR_INT32", "{0}")
+model = model.Operation("BOX_WITH_NMS_LIMIT", scoresOut_4, roiOut_5, batchOut_1, 0.1, -1, 0, 0.3, 1.0, 0.1).To(scoresOut_6, roiOut_6, classOut_6, batchOut_6)
+
+quant8 = DataTypeConverter().Identify({
+ scores: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ deltas: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ anchors: ("TENSOR_QUANT16_SYMM", 0.125, 0),
+ image: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ scoresOut_1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ roiOut_1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ feature: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ featureOut_2: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ weights_3: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ bias_3: ("TENSOR_INT32", 0.01, 0),
+ deltaOut_3: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ weights_4: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ bias_4: ("TENSOR_INT32", 0.01, 0),
+ scoresOut_4: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ roiOut_5: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ scoresOut_6: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ roiOut_6: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+})
+
+Example({
+
+ # Inputs that will lead to zero-sized output of GENERATE_PROPOSALS
+ scores: [0.5],
+ deltas: [0, 0, -10, -10],
+ anchors: [0, 0, 10, 10],
+ image: [32, 32],
+ feature: [1],
+
+ # Dummy outputs
+ scoresOut_1: [0],
+ scoresOut_6: [0],
+ roiOut_6: [0],
+ classOut_6: [0],
+ batchOut_6: [0],
+
+}).AddVariations("relaxed", "float16", quant8)
diff --git a/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm.mod.py b/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm.mod.py
new file mode 100644
index 000000000..40ebff903
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm.mod.py
@@ -0,0 +1,461 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Bidirectional Sequence LSTM Test:
+# FLOAT32, No Layer Normalization, No Cifg, No Peephole, No Projection, and No Clipping.
+
+n_batch = 1
+n_input = 2
+n_cell = 4
+n_output = 4
+max_time = 3
+
+input = Input("input", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_input))
+
+fw_input_to_input_weights = Input(
+ "fw_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_forget_weights = Input(
+ "fw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_cell_weights = Input(
+ "fw_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_output_weights = Input(
+ "fw_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+fw_recurrent_to_input_weights = Input(
+ "fw_recurrent_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_forget_weights = Input(
+ "fw_recurrent_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_cell_weights = Input(
+ "fw_recurrent_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_output_weights = Input(
+ "fw_recurrent_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+
+fw_cell_to_input_weights = Input(
+ "fw_cell_to_input_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_cell_to_forget_weights = Input(
+ "fw_cell_to_forget_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_cell_to_output_weights = Input(
+ "fw_cell_to_output_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+fw_input_gate_bias = Input(
+ "fw_input_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_forget_gate_bias = Input(
+ "fw_forget_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_cell_bias = Input(
+ "fw_cell_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_output_gate_bias = Input(
+ "fw_output_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+fw_projection_weights = Input(
+ "fw_projection_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_output, n_cell))
+fw_projection_bias = Input(
+ "fw_projection_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_output))
+
+bw_input_to_input_weights = Input(
+ "bw_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_forget_weights = Input(
+ "bw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_cell_weights = Input(
+ "bw_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_output_weights = Input(
+ "bw_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+bw_recurrent_to_input_weights = Input(
+ "bw_recurrent_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_forget_weights = Input(
+ "bw_recurrent_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_cell_weights = Input(
+ "bw_recurrent_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_output_weights = Input(
+ "bw_recurrent_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+
+bw_cell_to_input_weights = Input(
+ "bw_cell_to_input_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_cell_to_forget_weights = Input(
+ "bw_cell_to_forget_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_cell_to_output_weights = Input(
+ "bw_cell_to_output_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+bw_input_gate_bias = Input(
+ "bw_input_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_forget_gate_bias = Input(
+ "bw_forget_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_cell_bias = Input(
+ "bw_cell_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_output_gate_bias = Input(
+ "bw_output_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+bw_projection_weights = Input(
+ "bw_projection_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_output, n_cell))
+bw_projection_bias = Input(
+ "bw_projection_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_output))
+
+fw_activation_state = Input(
+ "fw_activatiom_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_output))
+fw_cell_state = Input(
+ "fw_cell_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_cell))
+
+bw_activation_state = Input(
+ "bw_activatiom_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_output))
+bw_cell_state = Input(
+ "bw_cell_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_cell))
+
+aux_input = Input("input", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_input))
+
+fw_aux_input_to_input_weights = Input(
+ "fw_aux_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_forget_weights = Input(
+ "fw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_cell_weights = Input(
+ "fw_aux_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_output_weights = Input(
+ "fw_aux_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+bw_aux_input_to_input_weights = Input(
+ "bw_aux_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_forget_weights = Input(
+ "bw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_cell_weights = Input(
+ "bw_aux_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_output_weights = Input(
+ "bw_aux_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+fw_input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+fw_forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+fw_cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+fw_output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+
+bw_input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+bw_forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+bw_cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+bw_output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+
+fw_output=Output("fw_output", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_output))
+bw_output=Output("bw_output", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_output))
+
+def test(
+ name,
+ input_data=[],
+ fw_input_to_input_weights_data=[],
+ fw_input_to_forget_weights_data=[],
+ fw_input_to_cell_weights_data=[],
+ fw_input_to_output_weights_data=[],
+ fw_recurrent_to_input_weights_data=[],
+ fw_recurrent_to_forget_weights_data=[],
+ fw_recurrent_to_cell_weights_data=[],
+ fw_recurrent_to_output_weights_data=[],
+ fw_cell_to_input_weights_data=[],
+ fw_cell_to_forget_weights_data=[],
+ fw_cell_to_output_weights_data=[],
+ fw_input_gate_bias_data=[],
+ fw_forget_gate_bias_data=[],
+ fw_cell_bias_data=[],
+ fw_output_gate_bias_data=[],
+ fw_projection_weights_data=[],
+ fw_projection_bias_data=[],
+ bw_input_to_input_weights_data=[],
+ bw_input_to_forget_weights_data=[],
+ bw_input_to_cell_weights_data=[],
+ bw_input_to_output_weights_data=[],
+ bw_recurrent_to_input_weights_data=[],
+ bw_recurrent_to_forget_weights_data=[],
+ bw_recurrent_to_cell_weights_data=[],
+ bw_recurrent_to_output_weights_data=[],
+ bw_cell_to_input_weights_data=[],
+ bw_cell_to_forget_weights_data=[],
+ bw_cell_to_output_weights_data=[],
+ bw_input_gate_bias_data=[],
+ bw_forget_gate_bias_data=[],
+ bw_cell_bias_data=[],
+ bw_output_gate_bias_data=[],
+ bw_projection_weights_data=[],
+ bw_projection_bias_data=[],
+ fw_activation_state_data=[],
+ fw_cell_state_data=[],
+ bw_activation_state_data=[],
+ bw_cell_state_data=[],
+ aux_input_data=[],
+ fw_aux_input_to_input_weights_data=[],
+ fw_aux_input_to_forget_weights_data=[],
+ fw_aux_input_to_cell_weights_data=[],
+ fw_aux_input_to_output_weights_data=[],
+ bw_aux_input_to_input_weights_data=[],
+ bw_aux_input_to_forget_weights_data=[],
+ bw_aux_input_to_cell_weights_data=[],
+ bw_aux_input_to_output_weights_data=[],
+ fw_input_layer_norm_weights_data=[],
+ fw_forget_layer_norm_weights_data=[],
+ fw_cell_layer_norm_weights_data=[],
+ fw_output_layer_norm_weights_data=[],
+ bw_input_layer_norm_weights_data=[],
+ bw_forget_layer_norm_weights_data=[],
+ bw_cell_layer_norm_weights_data=[],
+ bw_output_layer_norm_weights_data=[],
+ fw_output_data=[],
+ bw_output_data=[],):
+
+ activation = Int32Scalar("activation", 4)
+ cell_clip = Float32Scalar("cell_clip", 0.0)
+ proj_clip = Float32Scalar("proj_clip", 0.0)
+ merge_outputs = BoolScalar("merge_outputs", False)
+ time_major = BoolScalar("time_major", True)
+
+ model = Model().Operation(
+ "BIDIRECTIONAL_SEQUENCE_LSTM",
+ input,
+ fw_input_to_input_weights,
+ fw_input_to_forget_weights,
+ fw_input_to_cell_weights,
+ fw_input_to_output_weights,
+ fw_recurrent_to_input_weights,
+ fw_recurrent_to_forget_weights,
+ fw_recurrent_to_cell_weights,
+ fw_recurrent_to_output_weights,
+ fw_cell_to_input_weights,
+ fw_cell_to_forget_weights,
+ fw_cell_to_output_weights,
+ fw_input_gate_bias,
+ fw_forget_gate_bias,
+ fw_cell_bias,
+ fw_output_gate_bias,
+ fw_projection_weights,
+ fw_projection_bias,
+ bw_input_to_input_weights,
+ bw_input_to_forget_weights,
+ bw_input_to_cell_weights,
+ bw_input_to_output_weights,
+ bw_recurrent_to_input_weights,
+ bw_recurrent_to_forget_weights,
+ bw_recurrent_to_cell_weights,
+ bw_recurrent_to_output_weights,
+ bw_cell_to_input_weights,
+ bw_cell_to_forget_weights,
+ bw_cell_to_output_weights,
+ bw_input_gate_bias,
+ bw_forget_gate_bias,
+ bw_cell_bias,
+ bw_output_gate_bias,
+ bw_projection_weights,
+ bw_projection_bias,
+ fw_activation_state,
+ fw_cell_state,
+ bw_activation_state,
+ bw_cell_state,
+ aux_input,
+ fw_aux_input_to_input_weights,
+ fw_aux_input_to_forget_weights,
+ fw_aux_input_to_cell_weights,
+ fw_aux_input_to_output_weights,
+ bw_aux_input_to_input_weights,
+ bw_aux_input_to_forget_weights,
+ bw_aux_input_to_cell_weights,
+ bw_aux_input_to_output_weights,
+ activation, cell_clip, proj_clip, merge_outputs, time_major,
+ fw_input_layer_norm_weights,
+ fw_forget_layer_norm_weights,
+ fw_cell_layer_norm_weights,
+ fw_output_layer_norm_weights,
+ bw_input_layer_norm_weights,
+ bw_forget_layer_norm_weights,
+ bw_cell_layer_norm_weights,
+ bw_output_layer_norm_weights,).To(fw_output, bw_output)
+
+ example = Example(
+ {
+ input: input_data,
+ fw_input_to_input_weights: fw_input_to_input_weights_data,
+ fw_input_to_forget_weights: fw_input_to_forget_weights_data,
+ fw_input_to_cell_weights: fw_input_to_cell_weights_data,
+ fw_input_to_output_weights: fw_input_to_output_weights_data,
+ fw_recurrent_to_input_weights: fw_recurrent_to_input_weights_data,
+ fw_recurrent_to_forget_weights: fw_recurrent_to_forget_weights_data,
+ fw_recurrent_to_cell_weights: fw_recurrent_to_cell_weights_data,
+ fw_recurrent_to_output_weights: fw_recurrent_to_output_weights_data,
+ fw_cell_to_input_weights: fw_cell_to_input_weights_data,
+ fw_cell_to_forget_weights: fw_cell_to_forget_weights_data,
+ fw_cell_to_output_weights: fw_cell_to_output_weights_data,
+ fw_input_gate_bias: fw_input_gate_bias_data,
+ fw_forget_gate_bias: fw_forget_gate_bias_data,
+ fw_cell_bias: fw_cell_bias_data,
+ fw_output_gate_bias: fw_output_gate_bias_data,
+ fw_projection_weights: fw_projection_weights_data,
+ fw_projection_bias: fw_projection_bias_data,
+ bw_input_to_input_weights: bw_input_to_input_weights_data,
+ bw_input_to_forget_weights: bw_input_to_forget_weights_data,
+ bw_input_to_cell_weights: bw_input_to_cell_weights_data,
+ bw_input_to_output_weights: bw_input_to_output_weights_data,
+ bw_recurrent_to_input_weights: bw_recurrent_to_input_weights_data,
+ bw_recurrent_to_forget_weights: bw_recurrent_to_forget_weights_data,
+ bw_recurrent_to_cell_weights: bw_recurrent_to_cell_weights_data,
+ bw_recurrent_to_output_weights: bw_recurrent_to_output_weights_data,
+ bw_cell_to_input_weights: bw_cell_to_input_weights_data,
+ bw_cell_to_forget_weights: bw_cell_to_forget_weights_data,
+ bw_cell_to_output_weights: bw_cell_to_output_weights_data,
+ bw_input_gate_bias: bw_input_gate_bias_data,
+ bw_forget_gate_bias: bw_forget_gate_bias_data,
+ bw_cell_bias: bw_cell_bias_data,
+ bw_output_gate_bias: bw_output_gate_bias_data,
+ bw_projection_weights: bw_projection_weights_data,
+ bw_projection_bias: bw_projection_bias_data,
+ fw_activation_state: fw_activation_state_data,
+ fw_cell_state: fw_cell_state_data,
+ bw_activation_state: bw_activation_state_data,
+ bw_cell_state: bw_cell_state_data,
+ aux_input: aux_input_data,
+ fw_aux_input_to_input_weights: fw_aux_input_to_input_weights_data,
+ fw_aux_input_to_forget_weights: fw_aux_input_to_forget_weights_data,
+ fw_aux_input_to_cell_weights: fw_aux_input_to_cell_weights_data,
+ fw_aux_input_to_output_weights: fw_aux_input_to_output_weights_data,
+ bw_aux_input_to_input_weights: bw_aux_input_to_input_weights_data,
+ bw_aux_input_to_forget_weights: bw_aux_input_to_forget_weights_data,
+ bw_aux_input_to_cell_weights: bw_aux_input_to_cell_weights_data,
+ bw_aux_input_to_output_weights: bw_aux_input_to_output_weights_data,
+ fw_input_layer_norm_weights: fw_input_layer_norm_weights_data,
+ fw_forget_layer_norm_weights: fw_forget_layer_norm_weights_data,
+ fw_cell_layer_norm_weights: fw_cell_layer_norm_weights_data,
+ fw_output_layer_norm_weights: fw_output_layer_norm_weights_data,
+ bw_input_layer_norm_weights: bw_input_layer_norm_weights_data,
+ bw_forget_layer_norm_weights: bw_forget_layer_norm_weights_data,
+ bw_cell_layer_norm_weights: bw_cell_layer_norm_weights_data,
+ bw_output_layer_norm_weights: bw_output_layer_norm_weights_data,
+ fw_output: fw_output_data,
+ bw_output: bw_output_data,
+ },
+ model=model, name=name)
+
+
+fw_input_to_input_weights_data = [
+ -0.45018822, -0.02338299, -0.0870589,
+ -0.34550029, 0.04266912, -0.15680569,
+ -0.34856534, 0.43890524
+]
+bw_input_to_input_weights_data = fw_input_to_input_weights_data
+
+fw_input_to_forget_weights_data = [
+ 0.09701663, 0.20334584, -0.50592935,
+ -0.31343272, -0.40032279, 0.44781327,
+ 0.01387155, -0.35593212
+]
+bw_input_to_forget_weights_data = fw_input_to_forget_weights_data
+
+fw_input_to_cell_weights_data = [
+ -0.50013041, 0.1370284, 0.11810488, 0.2013163,
+ -0.20583314, 0.44344562, 0.22077113,
+ -0.29909778
+]
+bw_input_to_cell_weights_data = fw_input_to_cell_weights_data
+
+fw_input_to_output_weights_data = [
+ -0.25065863, -0.28290087, 0.04613829,
+ 0.40525138, 0.44272184, 0.03897077, -0.1556896,
+ 0.19487578
+]
+bw_input_to_output_weights_data = fw_input_to_output_weights_data
+
+fw_recurrent_to_input_weights_data = [
+ -0.0063535, -0.2042388, 0.31454784, -0.35746509, 0.28902304, 0.08183324,
+ -0.16555229, 0.02286911, -0.13566875, 0.03034258, 0.48091322,
+ -0.12528998, 0.24077177, -0.51332325, -0.33502164, 0.10629296
+]
+bw_recurrent_to_input_weights_data = fw_recurrent_to_input_weights_data
+
+fw_recurrent_to_forget_weights_data = [
+ -0.48684245, -0.06655136, 0.42224967, 0.2112639, 0.27654213, 0.20864892,
+ -0.07646349, 0.45877004, 0.00141793, -0.14609534, 0.36447752, 0.09196436,
+ 0.28053468, 0.01560611, -0.20127171, -0.01140004
+]
+bw_recurrent_to_forget_weights_data = fw_recurrent_to_forget_weights_data
+
+fw_recurrent_to_cell_weights_data = [
+ -0.3407414, 0.24443203, -0.2078532, 0.26320225, 0.05695659, -0.00123841,
+ -0.4744786, -0.35869038, -0.06418842, -0.13502428, -0.501764, 0.22830659,
+ -0.46367589, 0.26016325, -0.03894562, -0.16368064
+]
+bw_recurrent_to_cell_weights_data = fw_recurrent_to_cell_weights_data
+
+fw_recurrent_to_output_weights_data = [
+ 0.43385774, -0.17194885, 0.2718237, 0.09215671, 0.24107647, -0.39835793,
+ 0.18212086, 0.01301402, 0.48572797, -0.50656658, 0.20047462, -0.20607421,
+ -0.51818722, -0.15390486, 0.0468148, 0.39922136
+]
+bw_recurrent_to_output_weights_data = fw_recurrent_to_output_weights_data
+
+fw_input_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_input_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+
+fw_forget_gate_bias_data = [1.0, 1.0, 1.0, 1.0]
+bw_forget_gate_bias_data = [1.0, 1.0, 1.0, 1.0]
+
+fw_cell_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_cell_bias_data = [0.0, 0.0, 0.0, 0.0]
+
+fw_output_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_output_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+
+input_data = [2.0, 3.0, 3.0, 4.0, 1.0, 1.0]
+
+fw_activation_state_data = [0 for _ in range(n_batch * n_output)]
+bw_activation_state_data = [0 for _ in range(n_batch * n_output)]
+
+fw_cell_state_data = [0 for _ in range(n_batch * n_cell)]
+bw_cell_state_data = [0 for _ in range(n_batch * n_cell)]
+
+fw_golden_output_data = [
+ -0.02973187, 0.1229473, 0.20885126, -0.15358765,
+ -0.03716109, 0.12507336, 0.41193449, -0.20860538,
+ -0.15053082, 0.09120187, 0.24278517, -0.12222792
+]
+bw_golden_output_data = [
+ -0.0806187, 0.139077, 0.400476, -0.197842,
+ -0.0332076, 0.123838, 0.309777, -0.17621,
+ -0.0490733, 0.0739237, 0.067706, -0.0208124
+]
+
+
+test(
+ name="blackbox",
+ input_data=input_data,
+ fw_input_to_input_weights_data=fw_input_to_input_weights_data,
+ fw_input_to_forget_weights_data=fw_input_to_forget_weights_data,
+ fw_input_to_cell_weights_data=fw_input_to_cell_weights_data,
+ fw_input_to_output_weights_data=fw_input_to_output_weights_data,
+ fw_recurrent_to_input_weights_data=fw_recurrent_to_input_weights_data,
+ fw_recurrent_to_forget_weights_data=fw_recurrent_to_forget_weights_data,
+ fw_recurrent_to_cell_weights_data=fw_recurrent_to_cell_weights_data,
+ fw_recurrent_to_output_weights_data=fw_recurrent_to_output_weights_data,
+ fw_input_gate_bias_data=fw_input_gate_bias_data,
+ fw_forget_gate_bias_data=fw_forget_gate_bias_data,
+ fw_cell_bias_data=fw_cell_bias_data,
+ fw_output_gate_bias_data=fw_output_gate_bias_data,
+ bw_input_to_input_weights_data=bw_input_to_input_weights_data,
+ bw_input_to_forget_weights_data=bw_input_to_forget_weights_data,
+ bw_input_to_cell_weights_data=bw_input_to_cell_weights_data,
+ bw_input_to_output_weights_data=bw_input_to_output_weights_data,
+ bw_recurrent_to_input_weights_data=bw_recurrent_to_input_weights_data,
+ bw_recurrent_to_forget_weights_data=bw_recurrent_to_forget_weights_data,
+ bw_recurrent_to_cell_weights_data=bw_recurrent_to_cell_weights_data,
+ bw_recurrent_to_output_weights_data=bw_recurrent_to_output_weights_data,
+ bw_input_gate_bias_data=bw_input_gate_bias_data,
+ bw_forget_gate_bias_data=bw_forget_gate_bias_data,
+ bw_cell_bias_data=bw_cell_bias_data,
+ bw_output_gate_bias_data=bw_output_gate_bias_data,
+ fw_activation_state_data = fw_activation_state_data,
+ bw_activation_state_data = bw_activation_state_data,
+ fw_cell_state_data = fw_cell_state_data,
+ bw_cell_state_data = bw_cell_state_data,
+ fw_output_data=fw_golden_output_data,
+ bw_output_data=bw_golden_output_data
+)
diff --git a/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_aux_input.mod.py b/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_aux_input.mod.py
new file mode 100644
index 000000000..22c0e2459
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_aux_input.mod.py
@@ -0,0 +1,482 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Bidirectional Sequence LSTM Test:
+# FLOAT32, Aux Input, No Layer Normalization, No Cifg, No Peephole, No Projection, and No Clipping.
+#
+# Adapted from TFLite's LSTMOpTest.BlackBoxTestWithAuxInput.
+
+n_batch = 1
+n_input = 2
+n_cell = 4
+n_output = 4
+max_time = 3
+
+input = Input("input", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_input))
+
+fw_input_to_input_weights = Input(
+ "fw_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_forget_weights = Input(
+ "fw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_cell_weights = Input(
+ "fw_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_output_weights = Input(
+ "fw_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+fw_recurrent_to_input_weights = Input(
+ "fw_recurrent_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_forget_weights = Input(
+ "fw_recurrent_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_cell_weights = Input(
+ "fw_recurrent_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_output_weights = Input(
+ "fw_recurrent_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+
+fw_cell_to_input_weights = Input(
+ "fw_cell_to_input_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_cell_to_forget_weights = Input(
+ "fw_cell_to_forget_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_cell_to_output_weights = Input(
+ "fw_cell_to_output_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+fw_input_gate_bias = Input(
+ "fw_input_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_forget_gate_bias = Input(
+ "fw_forget_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_cell_bias = Input(
+ "fw_cell_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_output_gate_bias = Input(
+ "fw_output_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+fw_projection_weights = Input(
+ "fw_projection_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_output, n_cell))
+fw_projection_bias = Input(
+ "fw_projection_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_output))
+
+bw_input_to_input_weights = Input(
+ "bw_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_forget_weights = Input(
+ "bw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_cell_weights = Input(
+ "bw_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_output_weights = Input(
+ "bw_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+bw_recurrent_to_input_weights = Input(
+ "bw_recurrent_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_forget_weights = Input(
+ "bw_recurrent_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_cell_weights = Input(
+ "bw_recurrent_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_output_weights = Input(
+ "bw_recurrent_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+
+bw_cell_to_input_weights = Input(
+ "bw_cell_to_input_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_cell_to_forget_weights = Input(
+ "bw_cell_to_forget_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_cell_to_output_weights = Input(
+ "bw_cell_to_output_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+bw_input_gate_bias = Input(
+ "bw_input_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_forget_gate_bias = Input(
+ "bw_forget_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_cell_bias = Input(
+ "bw_cell_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_output_gate_bias = Input(
+ "bw_output_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+bw_projection_weights = Input(
+ "bw_projection_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_output, n_cell))
+bw_projection_bias = Input(
+ "bw_projection_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_output))
+
+fw_activation_state = Input(
+ "fw_activatiom_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_output))
+fw_cell_state = Input(
+ "fw_cell_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_cell))
+
+bw_activation_state = Input(
+ "bw_activatiom_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_output))
+bw_cell_state = Input(
+ "bw_cell_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_cell))
+
+aux_input = Input("input", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_input))
+
+fw_aux_input_to_input_weights = Input(
+ "fw_aux_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_forget_weights = Input(
+ "fw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_cell_weights = Input(
+ "fw_aux_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_output_weights = Input(
+ "fw_aux_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+bw_aux_input_to_input_weights = Input(
+ "bw_aux_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_forget_weights = Input(
+ "bw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_cell_weights = Input(
+ "bw_aux_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_output_weights = Input(
+ "bw_aux_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+fw_input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+fw_forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+fw_cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+fw_output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+
+bw_input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+bw_forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+bw_cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+bw_output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+
+fw_output=Output("fw_output", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_output))
+bw_output=Output("bw_output", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_output))
+
+def test(
+ name,
+ input_data=[],
+ fw_input_to_input_weights_data=[],
+ fw_input_to_forget_weights_data=[],
+ fw_input_to_cell_weights_data=[],
+ fw_input_to_output_weights_data=[],
+ fw_recurrent_to_input_weights_data=[],
+ fw_recurrent_to_forget_weights_data=[],
+ fw_recurrent_to_cell_weights_data=[],
+ fw_recurrent_to_output_weights_data=[],
+ fw_cell_to_input_weights_data=[],
+ fw_cell_to_forget_weights_data=[],
+ fw_cell_to_output_weights_data=[],
+ fw_input_gate_bias_data=[],
+ fw_forget_gate_bias_data=[],
+ fw_cell_bias_data=[],
+ fw_output_gate_bias_data=[],
+ fw_projection_weights_data=[],
+ fw_projection_bias_data=[],
+ bw_input_to_input_weights_data=[],
+ bw_input_to_forget_weights_data=[],
+ bw_input_to_cell_weights_data=[],
+ bw_input_to_output_weights_data=[],
+ bw_recurrent_to_input_weights_data=[],
+ bw_recurrent_to_forget_weights_data=[],
+ bw_recurrent_to_cell_weights_data=[],
+ bw_recurrent_to_output_weights_data=[],
+ bw_cell_to_input_weights_data=[],
+ bw_cell_to_forget_weights_data=[],
+ bw_cell_to_output_weights_data=[],
+ bw_input_gate_bias_data=[],
+ bw_forget_gate_bias_data=[],
+ bw_cell_bias_data=[],
+ bw_output_gate_bias_data=[],
+ bw_projection_weights_data=[],
+ bw_projection_bias_data=[],
+ fw_activation_state_data=[],
+ fw_cell_state_data=[],
+ bw_activation_state_data=[],
+ bw_cell_state_data=[],
+ aux_input_data=[],
+ fw_aux_input_to_input_weights_data=[],
+ fw_aux_input_to_forget_weights_data=[],
+ fw_aux_input_to_cell_weights_data=[],
+ fw_aux_input_to_output_weights_data=[],
+ bw_aux_input_to_input_weights_data=[],
+ bw_aux_input_to_forget_weights_data=[],
+ bw_aux_input_to_cell_weights_data=[],
+ bw_aux_input_to_output_weights_data=[],
+ fw_input_layer_norm_weights_data=[],
+ fw_forget_layer_norm_weights_data=[],
+ fw_cell_layer_norm_weights_data=[],
+ fw_output_layer_norm_weights_data=[],
+ bw_input_layer_norm_weights_data=[],
+ bw_forget_layer_norm_weights_data=[],
+ bw_cell_layer_norm_weights_data=[],
+ bw_output_layer_norm_weights_data=[],
+ fw_output_data=[],
+ bw_output_data=[],):
+
+ activation = Int32Scalar("activation", 4) # Tanh
+ cell_clip = Float32Scalar("cell_clip", 0.0)
+ proj_clip = Float32Scalar("proj_clip", 0.0)
+ merge_outputs = BoolScalar("merge_outputs", False)
+ time_major = BoolScalar("time_major", True)
+
+ model = Model().Operation(
+ "BIDIRECTIONAL_SEQUENCE_LSTM",
+ input,
+ fw_input_to_input_weights,
+ fw_input_to_forget_weights,
+ fw_input_to_cell_weights,
+ fw_input_to_output_weights,
+ fw_recurrent_to_input_weights,
+ fw_recurrent_to_forget_weights,
+ fw_recurrent_to_cell_weights,
+ fw_recurrent_to_output_weights,
+ fw_cell_to_input_weights,
+ fw_cell_to_forget_weights,
+ fw_cell_to_output_weights,
+ fw_input_gate_bias,
+ fw_forget_gate_bias,
+ fw_cell_bias,
+ fw_output_gate_bias,
+ fw_projection_weights,
+ fw_projection_bias,
+ bw_input_to_input_weights,
+ bw_input_to_forget_weights,
+ bw_input_to_cell_weights,
+ bw_input_to_output_weights,
+ bw_recurrent_to_input_weights,
+ bw_recurrent_to_forget_weights,
+ bw_recurrent_to_cell_weights,
+ bw_recurrent_to_output_weights,
+ bw_cell_to_input_weights,
+ bw_cell_to_forget_weights,
+ bw_cell_to_output_weights,
+ bw_input_gate_bias,
+ bw_forget_gate_bias,
+ bw_cell_bias,
+ bw_output_gate_bias,
+ bw_projection_weights,
+ bw_projection_bias,
+ fw_activation_state,
+ fw_cell_state,
+ bw_activation_state,
+ bw_cell_state,
+ aux_input,
+ fw_aux_input_to_input_weights,
+ fw_aux_input_to_forget_weights,
+ fw_aux_input_to_cell_weights,
+ fw_aux_input_to_output_weights,
+ bw_aux_input_to_input_weights,
+ bw_aux_input_to_forget_weights,
+ bw_aux_input_to_cell_weights,
+ bw_aux_input_to_output_weights,
+ activation, cell_clip, proj_clip, merge_outputs, time_major,
+ fw_input_layer_norm_weights,
+ fw_forget_layer_norm_weights,
+ fw_cell_layer_norm_weights,
+ fw_output_layer_norm_weights,
+ bw_input_layer_norm_weights,
+ bw_forget_layer_norm_weights,
+ bw_cell_layer_norm_weights,
+ bw_output_layer_norm_weights,).To(fw_output, bw_output)
+
+ example = Example(
+ {
+ input: input_data,
+ fw_input_to_input_weights: fw_input_to_input_weights_data,
+ fw_input_to_forget_weights: fw_input_to_forget_weights_data,
+ fw_input_to_cell_weights: fw_input_to_cell_weights_data,
+ fw_input_to_output_weights: fw_input_to_output_weights_data,
+ fw_recurrent_to_input_weights: fw_recurrent_to_input_weights_data,
+ fw_recurrent_to_forget_weights: fw_recurrent_to_forget_weights_data,
+ fw_recurrent_to_cell_weights: fw_recurrent_to_cell_weights_data,
+ fw_recurrent_to_output_weights: fw_recurrent_to_output_weights_data,
+ fw_cell_to_input_weights: fw_cell_to_input_weights_data,
+ fw_cell_to_forget_weights: fw_cell_to_forget_weights_data,
+ fw_cell_to_output_weights: fw_cell_to_output_weights_data,
+ fw_input_gate_bias: fw_input_gate_bias_data,
+ fw_forget_gate_bias: fw_forget_gate_bias_data,
+ fw_cell_bias: fw_cell_bias_data,
+ fw_output_gate_bias: fw_output_gate_bias_data,
+ fw_projection_weights: fw_projection_weights_data,
+ fw_projection_bias: fw_projection_bias_data,
+ bw_input_to_input_weights: bw_input_to_input_weights_data,
+ bw_input_to_forget_weights: bw_input_to_forget_weights_data,
+ bw_input_to_cell_weights: bw_input_to_cell_weights_data,
+ bw_input_to_output_weights: bw_input_to_output_weights_data,
+ bw_recurrent_to_input_weights: bw_recurrent_to_input_weights_data,
+ bw_recurrent_to_forget_weights: bw_recurrent_to_forget_weights_data,
+ bw_recurrent_to_cell_weights: bw_recurrent_to_cell_weights_data,
+ bw_recurrent_to_output_weights: bw_recurrent_to_output_weights_data,
+ bw_cell_to_input_weights: bw_cell_to_input_weights_data,
+ bw_cell_to_forget_weights: bw_cell_to_forget_weights_data,
+ bw_cell_to_output_weights: bw_cell_to_output_weights_data,
+ bw_input_gate_bias: bw_input_gate_bias_data,
+ bw_forget_gate_bias: bw_forget_gate_bias_data,
+ bw_cell_bias: bw_cell_bias_data,
+ bw_output_gate_bias: bw_output_gate_bias_data,
+ bw_projection_weights: bw_projection_weights_data,
+ bw_projection_bias: bw_projection_bias_data,
+ fw_activation_state: fw_activation_state_data,
+ fw_cell_state: fw_cell_state_data,
+ bw_activation_state: bw_activation_state_data,
+ bw_cell_state: bw_cell_state_data,
+ aux_input: aux_input_data,
+ fw_aux_input_to_input_weights: fw_aux_input_to_input_weights_data,
+ fw_aux_input_to_forget_weights: fw_aux_input_to_forget_weights_data,
+ fw_aux_input_to_cell_weights: fw_aux_input_to_cell_weights_data,
+ fw_aux_input_to_output_weights: fw_aux_input_to_output_weights_data,
+ bw_aux_input_to_input_weights: bw_aux_input_to_input_weights_data,
+ bw_aux_input_to_forget_weights: bw_aux_input_to_forget_weights_data,
+ bw_aux_input_to_cell_weights: bw_aux_input_to_cell_weights_data,
+ bw_aux_input_to_output_weights: bw_aux_input_to_output_weights_data,
+ fw_input_layer_norm_weights: fw_input_layer_norm_weights_data,
+ fw_forget_layer_norm_weights: fw_forget_layer_norm_weights_data,
+ fw_cell_layer_norm_weights: fw_cell_layer_norm_weights_data,
+ fw_output_layer_norm_weights: fw_output_layer_norm_weights_data,
+ bw_input_layer_norm_weights: bw_input_layer_norm_weights_data,
+ bw_forget_layer_norm_weights: bw_forget_layer_norm_weights_data,
+ bw_cell_layer_norm_weights: bw_cell_layer_norm_weights_data,
+ bw_output_layer_norm_weights: bw_output_layer_norm_weights_data,
+ fw_output: fw_output_data,
+ bw_output: bw_output_data,
+ },
+ model=model, name=name)
+
+
+fw_input_to_input_weights_data = [
+ -0.45018822, -0.02338299, -0.0870589,
+ -0.34550029, 0.04266912, -0.15680569,
+ -0.34856534, 0.43890524
+]
+bw_input_to_input_weights_data = fw_input_to_input_weights_data
+
+fw_input_to_forget_weights_data = [
+ 0.09701663, 0.20334584, -0.50592935,
+ -0.31343272, -0.40032279, 0.44781327,
+ 0.01387155, -0.35593212
+]
+bw_input_to_forget_weights_data = fw_input_to_forget_weights_data
+
+fw_input_to_cell_weights_data = [
+ -0.50013041, 0.1370284, 0.11810488, 0.2013163,
+ -0.20583314, 0.44344562, 0.22077113,
+ -0.29909778
+]
+bw_input_to_cell_weights_data = fw_input_to_cell_weights_data
+
+fw_input_to_output_weights_data = [
+ -0.25065863, -0.28290087, 0.04613829,
+ 0.40525138, 0.44272184, 0.03897077, -0.1556896,
+ 0.19487578
+]
+bw_input_to_output_weights_data = fw_input_to_output_weights_data
+
+fw_recurrent_to_input_weights_data = [
+ -0.0063535, -0.2042388, 0.31454784, -0.35746509, 0.28902304, 0.08183324,
+ -0.16555229, 0.02286911, -0.13566875, 0.03034258, 0.48091322,
+ -0.12528998, 0.24077177, -0.51332325, -0.33502164, 0.10629296
+]
+bw_recurrent_to_input_weights_data = fw_recurrent_to_input_weights_data
+
+fw_recurrent_to_forget_weights_data = [
+ -0.48684245, -0.06655136, 0.42224967, 0.2112639, 0.27654213, 0.20864892,
+ -0.07646349, 0.45877004, 0.00141793, -0.14609534, 0.36447752, 0.09196436,
+ 0.28053468, 0.01560611, -0.20127171, -0.01140004
+]
+bw_recurrent_to_forget_weights_data = fw_recurrent_to_forget_weights_data
+
+fw_recurrent_to_cell_weights_data = [
+ -0.3407414, 0.24443203, -0.2078532, 0.26320225, 0.05695659, -0.00123841,
+ -0.4744786, -0.35869038, -0.06418842, -0.13502428, -0.501764, 0.22830659,
+ -0.46367589, 0.26016325, -0.03894562, -0.16368064
+]
+bw_recurrent_to_cell_weights_data = fw_recurrent_to_cell_weights_data
+
+fw_recurrent_to_output_weights_data = [
+ 0.43385774, -0.17194885, 0.2718237, 0.09215671, 0.24107647, -0.39835793,
+ 0.18212086, 0.01301402, 0.48572797, -0.50656658, 0.20047462, -0.20607421,
+ -0.51818722, -0.15390486, 0.0468148, 0.39922136
+]
+bw_recurrent_to_output_weights_data = fw_recurrent_to_output_weights_data
+
+fw_input_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_input_gate_bias_data = fw_input_gate_bias_data
+
+fw_forget_gate_bias_data = [1.0, 1.0, 1.0, 1.0]
+bw_forget_gate_bias_data = fw_forget_gate_bias_data
+
+fw_cell_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_cell_bias_data = fw_cell_bias_data
+
+fw_output_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_output_gate_bias_data = fw_output_gate_bias_data
+
+input_data = [2.0, 3.0, 3.0, 4.0, 1.0, 1.0]
+aux_input_data = input_data
+
+fw_aux_input_to_input_weights_data = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
+bw_aux_input_to_input_weights_data = fw_aux_input_to_input_weights_data
+fw_aux_input_to_forget_weights_data = [0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 1.0]
+bw_aux_input_to_forget_weights_data = fw_aux_input_to_forget_weights_data
+fw_aux_input_to_cell_weights_data = [0.5, 0.6, 0.7, 0.8, 0.5, 0.6, 0.7, 0.8]
+bw_aux_input_to_cell_weights_data = fw_aux_input_to_cell_weights_data
+fw_aux_input_to_output_weights_data = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
+bw_aux_input_to_output_weights_data = fw_aux_input_to_output_weights_data
+
+fw_activation_state_data = [0 for _ in range(n_batch * n_output)]
+bw_activation_state_data = [0 for _ in range(n_batch * n_output)]
+
+fw_cell_state_data = [0 for _ in range(n_batch * n_cell)]
+bw_cell_state_data = [0 for _ in range(n_batch * n_cell)]
+
+fw_golden_output_data = [
+ 0.153335, 0.542754, 0.708602, 0.742855,
+ 0.247581, 0.835739, 0.947797, 0.958177,
+ 0.410892, 0.672268, 0.761909, 0.829133
+]
+bw_golden_output_data = [
+ 0.342275, 0.883431, 0.955930, 0.975621,
+ 0.204939, 0.806858, 0.914849, 0.934871,
+ 0.123236, 0.373087, 0.465377, 0.517630
+]
+
+
+test(
+ name="blackbox",
+ input_data=input_data,
+ fw_input_to_input_weights_data=fw_input_to_input_weights_data,
+ fw_input_to_forget_weights_data=fw_input_to_forget_weights_data,
+ fw_input_to_cell_weights_data=fw_input_to_cell_weights_data,
+ fw_input_to_output_weights_data=fw_input_to_output_weights_data,
+ fw_recurrent_to_input_weights_data=fw_recurrent_to_input_weights_data,
+ fw_recurrent_to_forget_weights_data=fw_recurrent_to_forget_weights_data,
+ fw_recurrent_to_cell_weights_data=fw_recurrent_to_cell_weights_data,
+ fw_recurrent_to_output_weights_data=fw_recurrent_to_output_weights_data,
+ fw_input_gate_bias_data=fw_input_gate_bias_data,
+ fw_forget_gate_bias_data=fw_forget_gate_bias_data,
+ fw_cell_bias_data=fw_cell_bias_data,
+ fw_output_gate_bias_data=fw_output_gate_bias_data,
+ bw_input_to_input_weights_data=bw_input_to_input_weights_data,
+ bw_input_to_forget_weights_data=bw_input_to_forget_weights_data,
+ bw_input_to_cell_weights_data=bw_input_to_cell_weights_data,
+ bw_input_to_output_weights_data=bw_input_to_output_weights_data,
+ bw_recurrent_to_input_weights_data=bw_recurrent_to_input_weights_data,
+ bw_recurrent_to_forget_weights_data=bw_recurrent_to_forget_weights_data,
+ bw_recurrent_to_cell_weights_data=bw_recurrent_to_cell_weights_data,
+ bw_recurrent_to_output_weights_data=bw_recurrent_to_output_weights_data,
+ bw_input_gate_bias_data=bw_input_gate_bias_data,
+ bw_forget_gate_bias_data=bw_forget_gate_bias_data,
+ bw_cell_bias_data=bw_cell_bias_data,
+ bw_output_gate_bias_data=bw_output_gate_bias_data,
+ fw_activation_state_data = fw_activation_state_data,
+ bw_activation_state_data = bw_activation_state_data,
+ fw_cell_state_data = fw_cell_state_data,
+ bw_cell_state_data = bw_cell_state_data,
+ aux_input_data = aux_input_data,
+ fw_aux_input_to_input_weights_data=fw_aux_input_to_input_weights_data,
+ fw_aux_input_to_forget_weights_data=fw_aux_input_to_forget_weights_data,
+ fw_aux_input_to_cell_weights_data=fw_aux_input_to_cell_weights_data,
+ fw_aux_input_to_output_weights_data=fw_aux_input_to_output_weights_data,
+ bw_aux_input_to_input_weights_data=bw_aux_input_to_input_weights_data,
+ bw_aux_input_to_forget_weights_data=bw_aux_input_to_forget_weights_data,
+ bw_aux_input_to_cell_weights_data=bw_aux_input_to_cell_weights_data,
+ bw_aux_input_to_output_weights_data=bw_aux_input_to_output_weights_data,
+ fw_output_data=fw_golden_output_data,
+ bw_output_data=bw_golden_output_data
+)
diff --git a/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_cifg_peephole.mod.py b/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_cifg_peephole.mod.py
new file mode 100644
index 000000000..5def31402
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_cifg_peephole.mod.py
@@ -0,0 +1,447 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Bidirectional Sequence LSTM Test:
+# FLOAT32, No Layer Normalization, Cifg, Peephole, No Projection, and No Clipping.
+
+n_batch = 1
+n_input = 2
+n_cell = 4
+n_output = 4
+max_time = 3
+
+input = Input("input", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_input))
+
+fw_input_to_input_weights = Input(
+ "fw_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_forget_weights = Input(
+ "fw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_cell_weights = Input(
+ "fw_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_output_weights = Input(
+ "fw_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+fw_recurrent_to_input_weights = Input(
+ "fw_recurrent_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_forget_weights = Input(
+ "fw_recurrent_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_cell_weights = Input(
+ "fw_recurrent_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_output_weights = Input(
+ "fw_recurrent_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+
+fw_cell_to_input_weights = Input(
+ "fw_cell_to_input_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_cell_to_forget_weights = Input(
+ "fw_cell_to_forget_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_cell_to_output_weights = Input(
+ "fw_cell_to_output_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+fw_input_gate_bias = Input(
+ "fw_input_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_forget_gate_bias = Input(
+ "fw_forget_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_cell_bias = Input(
+ "fw_cell_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_output_gate_bias = Input(
+ "fw_output_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+fw_projection_weights = Input(
+ "fw_projection_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_output, n_cell))
+fw_projection_bias = Input(
+ "fw_projection_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_output))
+
+bw_input_to_input_weights = Input(
+ "bw_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_forget_weights = Input(
+ "bw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_cell_weights = Input(
+ "bw_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_output_weights = Input(
+ "bw_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+bw_recurrent_to_input_weights = Input(
+ "bw_recurrent_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_forget_weights = Input(
+ "bw_recurrent_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_cell_weights = Input(
+ "bw_recurrent_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_output_weights = Input(
+ "bw_recurrent_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+
+bw_cell_to_input_weights = Input(
+ "bw_cell_to_input_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_cell_to_forget_weights = Input(
+ "bw_cell_to_forget_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_cell_to_output_weights = Input(
+ "bw_cell_to_output_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+bw_input_gate_bias = Input(
+ "bw_input_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_forget_gate_bias = Input(
+ "bw_forget_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_cell_bias = Input(
+ "bw_cell_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_output_gate_bias = Input(
+ "bw_output_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+bw_projection_weights = Input(
+ "bw_projection_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_output, n_cell))
+bw_projection_bias = Input(
+ "bw_projection_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_output))
+
+fw_activation_state = Input(
+ "fw_activatiom_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_output))
+fw_cell_state = Input(
+ "fw_cell_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_cell))
+
+bw_activation_state = Input(
+ "bw_activatiom_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_output))
+bw_cell_state = Input(
+ "bw_cell_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_cell))
+
+aux_input = Input("input", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_input))
+
+fw_aux_input_to_input_weights = Input(
+ "fw_aux_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_forget_weights = Input(
+ "fw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_cell_weights = Input(
+ "fw_aux_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_output_weights = Input(
+ "fw_aux_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+bw_aux_input_to_input_weights = Input(
+ "bw_aux_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_forget_weights = Input(
+ "bw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_cell_weights = Input(
+ "bw_aux_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_output_weights = Input(
+ "bw_aux_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+fw_input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+fw_forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+fw_cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+fw_output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+
+bw_input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+bw_forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+bw_cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+bw_output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+
+fw_output=Output("fw_output", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_output))
+bw_output=Output("bw_output", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_output))
+
+def test(
+ name,
+ input_data=[],
+ fw_input_to_input_weights_data=[],
+ fw_input_to_forget_weights_data=[],
+ fw_input_to_cell_weights_data=[],
+ fw_input_to_output_weights_data=[],
+ fw_recurrent_to_input_weights_data=[],
+ fw_recurrent_to_forget_weights_data=[],
+ fw_recurrent_to_cell_weights_data=[],
+ fw_recurrent_to_output_weights_data=[],
+ fw_cell_to_input_weights_data=[],
+ fw_cell_to_forget_weights_data=[],
+ fw_cell_to_output_weights_data=[],
+ fw_input_gate_bias_data=[],
+ fw_forget_gate_bias_data=[],
+ fw_cell_bias_data=[],
+ fw_output_gate_bias_data=[],
+ fw_projection_weights_data=[],
+ fw_projection_bias_data=[],
+ bw_input_to_input_weights_data=[],
+ bw_input_to_forget_weights_data=[],
+ bw_input_to_cell_weights_data=[],
+ bw_input_to_output_weights_data=[],
+ bw_recurrent_to_input_weights_data=[],
+ bw_recurrent_to_forget_weights_data=[],
+ bw_recurrent_to_cell_weights_data=[],
+ bw_recurrent_to_output_weights_data=[],
+ bw_cell_to_input_weights_data=[],
+ bw_cell_to_forget_weights_data=[],
+ bw_cell_to_output_weights_data=[],
+ bw_input_gate_bias_data=[],
+ bw_forget_gate_bias_data=[],
+ bw_cell_bias_data=[],
+ bw_output_gate_bias_data=[],
+ bw_projection_weights_data=[],
+ bw_projection_bias_data=[],
+ fw_activation_state_data=[],
+ fw_cell_state_data=[],
+ bw_activation_state_data=[],
+ bw_cell_state_data=[],
+ aux_input_data=[],
+ fw_aux_input_to_input_weights_data=[],
+ fw_aux_input_to_forget_weights_data=[],
+ fw_aux_input_to_cell_weights_data=[],
+ fw_aux_input_to_output_weights_data=[],
+ bw_aux_input_to_input_weights_data=[],
+ bw_aux_input_to_forget_weights_data=[],
+ bw_aux_input_to_cell_weights_data=[],
+ bw_aux_input_to_output_weights_data=[],
+ fw_input_layer_norm_weights_data=[],
+ fw_forget_layer_norm_weights_data=[],
+ fw_cell_layer_norm_weights_data=[],
+ fw_output_layer_norm_weights_data=[],
+ bw_input_layer_norm_weights_data=[],
+ bw_forget_layer_norm_weights_data=[],
+ bw_cell_layer_norm_weights_data=[],
+ bw_output_layer_norm_weights_data=[],
+ fw_output_data=[],
+ bw_output_data=[],):
+
+ activation = Int32Scalar("activation", 4)
+ cell_clip = Float32Scalar("cell_clip", 0.0)
+ proj_clip = Float32Scalar("proj_clip", 0.0)
+ merge_outputs = BoolScalar("merge_outputs", False)
+ time_major = BoolScalar("time_major", True)
+
+ model = Model().Operation(
+ "BIDIRECTIONAL_SEQUENCE_LSTM",
+ input,
+ fw_input_to_input_weights,
+ fw_input_to_forget_weights,
+ fw_input_to_cell_weights,
+ fw_input_to_output_weights,
+ fw_recurrent_to_input_weights,
+ fw_recurrent_to_forget_weights,
+ fw_recurrent_to_cell_weights,
+ fw_recurrent_to_output_weights,
+ fw_cell_to_input_weights,
+ fw_cell_to_forget_weights,
+ fw_cell_to_output_weights,
+ fw_input_gate_bias,
+ fw_forget_gate_bias,
+ fw_cell_bias,
+ fw_output_gate_bias,
+ fw_projection_weights,
+ fw_projection_bias,
+ bw_input_to_input_weights,
+ bw_input_to_forget_weights,
+ bw_input_to_cell_weights,
+ bw_input_to_output_weights,
+ bw_recurrent_to_input_weights,
+ bw_recurrent_to_forget_weights,
+ bw_recurrent_to_cell_weights,
+ bw_recurrent_to_output_weights,
+ bw_cell_to_input_weights,
+ bw_cell_to_forget_weights,
+ bw_cell_to_output_weights,
+ bw_input_gate_bias,
+ bw_forget_gate_bias,
+ bw_cell_bias,
+ bw_output_gate_bias,
+ bw_projection_weights,
+ bw_projection_bias,
+ fw_activation_state,
+ fw_cell_state,
+ bw_activation_state,
+ bw_cell_state,
+ aux_input,
+ fw_aux_input_to_input_weights,
+ fw_aux_input_to_forget_weights,
+ fw_aux_input_to_cell_weights,
+ fw_aux_input_to_output_weights,
+ bw_aux_input_to_input_weights,
+ bw_aux_input_to_forget_weights,
+ bw_aux_input_to_cell_weights,
+ bw_aux_input_to_output_weights,
+ activation, cell_clip, proj_clip, merge_outputs, time_major,
+ fw_input_layer_norm_weights,
+ fw_forget_layer_norm_weights,
+ fw_cell_layer_norm_weights,
+ fw_output_layer_norm_weights,
+ bw_input_layer_norm_weights,
+ bw_forget_layer_norm_weights,
+ bw_cell_layer_norm_weights,
+ bw_output_layer_norm_weights,).To(fw_output, bw_output)
+
+ example = Example(
+ {
+ input: input_data,
+ fw_input_to_input_weights: fw_input_to_input_weights_data,
+ fw_input_to_forget_weights: fw_input_to_forget_weights_data,
+ fw_input_to_cell_weights: fw_input_to_cell_weights_data,
+ fw_input_to_output_weights: fw_input_to_output_weights_data,
+ fw_recurrent_to_input_weights: fw_recurrent_to_input_weights_data,
+ fw_recurrent_to_forget_weights: fw_recurrent_to_forget_weights_data,
+ fw_recurrent_to_cell_weights: fw_recurrent_to_cell_weights_data,
+ fw_recurrent_to_output_weights: fw_recurrent_to_output_weights_data,
+ fw_cell_to_input_weights: fw_cell_to_input_weights_data,
+ fw_cell_to_forget_weights: fw_cell_to_forget_weights_data,
+ fw_cell_to_output_weights: fw_cell_to_output_weights_data,
+ fw_input_gate_bias: fw_input_gate_bias_data,
+ fw_forget_gate_bias: fw_forget_gate_bias_data,
+ fw_cell_bias: fw_cell_bias_data,
+ fw_output_gate_bias: fw_output_gate_bias_data,
+ fw_projection_weights: fw_projection_weights_data,
+ fw_projection_bias: fw_projection_bias_data,
+ bw_input_to_input_weights: bw_input_to_input_weights_data,
+ bw_input_to_forget_weights: bw_input_to_forget_weights_data,
+ bw_input_to_cell_weights: bw_input_to_cell_weights_data,
+ bw_input_to_output_weights: bw_input_to_output_weights_data,
+ bw_recurrent_to_input_weights: bw_recurrent_to_input_weights_data,
+ bw_recurrent_to_forget_weights: bw_recurrent_to_forget_weights_data,
+ bw_recurrent_to_cell_weights: bw_recurrent_to_cell_weights_data,
+ bw_recurrent_to_output_weights: bw_recurrent_to_output_weights_data,
+ bw_cell_to_input_weights: bw_cell_to_input_weights_data,
+ bw_cell_to_forget_weights: bw_cell_to_forget_weights_data,
+ bw_cell_to_output_weights: bw_cell_to_output_weights_data,
+ bw_input_gate_bias: bw_input_gate_bias_data,
+ bw_forget_gate_bias: bw_forget_gate_bias_data,
+ bw_cell_bias: bw_cell_bias_data,
+ bw_output_gate_bias: bw_output_gate_bias_data,
+ bw_projection_weights: bw_projection_weights_data,
+ bw_projection_bias: bw_projection_bias_data,
+ fw_activation_state: fw_activation_state_data,
+ fw_cell_state: fw_cell_state_data,
+ bw_activation_state: bw_activation_state_data,
+ bw_cell_state: bw_cell_state_data,
+ aux_input: aux_input_data,
+ fw_aux_input_to_input_weights: fw_aux_input_to_input_weights_data,
+ fw_aux_input_to_forget_weights: fw_aux_input_to_forget_weights_data,
+ fw_aux_input_to_cell_weights: fw_aux_input_to_cell_weights_data,
+ fw_aux_input_to_output_weights: fw_aux_input_to_output_weights_data,
+ bw_aux_input_to_input_weights: bw_aux_input_to_input_weights_data,
+ bw_aux_input_to_forget_weights: bw_aux_input_to_forget_weights_data,
+ bw_aux_input_to_cell_weights: bw_aux_input_to_cell_weights_data,
+ bw_aux_input_to_output_weights: bw_aux_input_to_output_weights_data,
+ fw_input_layer_norm_weights: fw_input_layer_norm_weights_data,
+ fw_forget_layer_norm_weights: fw_forget_layer_norm_weights_data,
+ fw_cell_layer_norm_weights: fw_cell_layer_norm_weights_data,
+ fw_output_layer_norm_weights: fw_output_layer_norm_weights_data,
+ bw_input_layer_norm_weights: bw_input_layer_norm_weights_data,
+ bw_forget_layer_norm_weights: bw_forget_layer_norm_weights_data,
+ bw_cell_layer_norm_weights: bw_cell_layer_norm_weights_data,
+ bw_output_layer_norm_weights: bw_output_layer_norm_weights_data,
+ fw_output: fw_output_data,
+ bw_output: bw_output_data,
+ },
+ model=model, name=name)
+
+fw_input_to_forget_weights_data = [
+ -0.55291498, -0.42866567, 0.13056988, -0.3633365,
+ -0.22755712, 0.28253698, 0.24407166, 0.33826375
+]
+bw_input_to_forget_weights_data = fw_input_to_forget_weights_data
+
+fw_input_to_cell_weights_data = [
+ -0.49770179, -0.27711356, -0.09624726, 0.05100781,
+ 0.04717243, 0.48944736, -0.38535351, -0.17212132
+]
+bw_input_to_cell_weights_data = fw_input_to_cell_weights_data
+
+fw_input_to_output_weights_data = [
+ 0.10725588, -0.02335852, -0.55932593, -0.09426838,
+ -0.44257352, 0.54939759, 0.01533556, 0.42751634
+]
+bw_input_to_output_weights_data = fw_input_to_output_weights_data
+
+fw_recurrent_to_forget_weights_data = [
+ -0.13832897, -0.0515101, -0.2359007, -0.16661474,
+ -0.14340827, 0.36986142, 0.23414481, 0.55899,
+ 0.10798943, -0.41174671, 0.17751795, -0.34484994,
+ -0.35874045, -0.11352962, 0.27268326, 0.54058349
+]
+bw_recurrent_to_forget_weights_data = fw_recurrent_to_forget_weights_data
+
+fw_recurrent_to_cell_weights_data = [
+ 0.54066205, -0.32668582, -0.43562764, -0.56094903,
+ 0.42957711, 0.01841056, -0.32764608, -0.33027974,
+ -0.10826075, 0.20675004, 0.19069612, -0.03026325,
+ -0.54532051, 0.33003211, 0.44901288, 0.21193194
+]
+bw_recurrent_to_cell_weights_data = fw_recurrent_to_cell_weights_data
+
+fw_recurrent_to_output_weights_data = [
+ 0.41613156, 0.42610586, -0.16495961, -0.5663873,
+ 0.30579174, -0.05115908, -0.33941799, 0.23364776,
+ 0.11178309, 0.09481031, -0.26424935, 0.46261835,
+ 0.50248802, 0.26114327, -0.43736315, 0.33149987
+]
+bw_recurrent_to_output_weights_data = fw_recurrent_to_output_weights_data
+
+fw_forget_gate_bias_data = [1.0, 1.0, 1.0, 1.0]
+bw_forget_gate_bias_data = [1.0, 1.0, 1.0, 1.0]
+
+fw_cell_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_cell_bias_data = [0.0, 0.0, 0.0, 0.0]
+
+fw_output_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_output_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+
+fw_cell_to_forget_weights_data = [ 0.47485286, -0.51955009, -0.24458408, 0.31544167 ]
+bw_cell_to_forget_weights_data = fw_cell_to_forget_weights_data
+
+fw_cell_to_output_weights_data = [ -0.17135078, 0.82760304, 0.85573703, -0.77109635 ]
+bw_cell_to_output_weights_data = fw_cell_to_output_weights_data
+
+input_data = [2.0, 3.0, 3.0, 4.0, 1.0, 1.0]
+
+fw_activation_state_data = [0 for _ in range(n_batch * n_output)]
+bw_activation_state_data = [0 for _ in range(n_batch * n_output)]
+
+fw_cell_state_data = [0 for _ in range(n_batch * n_cell)]
+bw_cell_state_data = [0 for _ in range(n_batch * n_cell)]
+
+fw_golden_output_data = [
+ -0.36444446, -0.00352185, 0.12886585, -0.05163646,
+ -0.42312205, -0.01218222, 0.24201041, -0.08124574,
+ -0.358325, -0.04621704, 0.21641694, -0.06471302
+]
+bw_golden_output_data = [
+ -0.401685, -0.0232794, 0.288642, -0.123074,
+ -0.42915, -0.00871577, 0.20912, -0.103567,
+ -0.166398, -0.00486649, 0.0697471, -0.0537578
+]
+
+
+test(
+ name="blackbox",
+ input_data=input_data,
+ fw_input_to_forget_weights_data=fw_input_to_forget_weights_data,
+ fw_input_to_cell_weights_data=fw_input_to_cell_weights_data,
+ fw_input_to_output_weights_data=fw_input_to_output_weights_data,
+ fw_recurrent_to_forget_weights_data=fw_recurrent_to_forget_weights_data,
+ fw_recurrent_to_cell_weights_data=fw_recurrent_to_cell_weights_data,
+ fw_recurrent_to_output_weights_data=fw_recurrent_to_output_weights_data,
+ fw_cell_to_forget_weights_data = fw_cell_to_forget_weights_data,
+ fw_cell_to_output_weights_data = fw_cell_to_output_weights_data,
+ fw_forget_gate_bias_data=fw_forget_gate_bias_data,
+ fw_cell_bias_data=fw_cell_bias_data,
+ fw_output_gate_bias_data=fw_output_gate_bias_data,
+ bw_input_to_forget_weights_data=bw_input_to_forget_weights_data,
+ bw_input_to_cell_weights_data=bw_input_to_cell_weights_data,
+ bw_input_to_output_weights_data=bw_input_to_output_weights_data,
+ bw_recurrent_to_forget_weights_data=bw_recurrent_to_forget_weights_data,
+ bw_recurrent_to_cell_weights_data=bw_recurrent_to_cell_weights_data,
+ bw_recurrent_to_output_weights_data=bw_recurrent_to_output_weights_data,
+ bw_cell_to_forget_weights_data = bw_cell_to_forget_weights_data,
+ bw_cell_to_output_weights_data = bw_cell_to_output_weights_data,
+ bw_forget_gate_bias_data=bw_forget_gate_bias_data,
+ bw_cell_bias_data=bw_cell_bias_data,
+ bw_output_gate_bias_data=bw_output_gate_bias_data,
+ fw_activation_state_data = fw_activation_state_data,
+ bw_activation_state_data = bw_activation_state_data,
+ fw_cell_state_data = fw_cell_state_data,
+ bw_cell_state_data = bw_cell_state_data,
+ fw_output_data=fw_golden_output_data,
+ bw_output_data=bw_golden_output_data
+)
diff --git a/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_float16_batch_major.mod.py b/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_float16_batch_major.mod.py
new file mode 100644
index 000000000..06160adfe
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_float16_batch_major.mod.py
@@ -0,0 +1,461 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Bidirectional Sequence LSTM Test:
+# FLOAT16, Batch Major, No Layer Normalization, No Cifg, No Peephole, No Projection, and No Clipping.
+
+n_batch = 1
+n_input = 2
+n_cell = 4
+n_output = 4
+max_time = 3
+
+input = Input("input", "TENSOR_FLOAT16", "{{{}, {}, {}}}".format(n_batch, max_time, n_input))
+
+fw_input_to_input_weights = Input(
+ "fw_input_to_input_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_forget_weights = Input(
+ "fw_input_to_forget_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_cell_weights = Input(
+ "fw_input_to_cell_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_output_weights = Input(
+ "fw_input_to_output_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+
+fw_recurrent_to_input_weights = Input(
+ "fw_recurrent_to_input_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_forget_weights = Input(
+ "fw_recurrent_to_forget_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_cell_weights = Input(
+ "fw_recurrent_to_cell_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_output_weights = Input(
+ "fw_recurrent_to_output_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+
+fw_cell_to_input_weights = Input(
+ "fw_cell_to_input_weights", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+fw_cell_to_forget_weights = Input(
+ "fw_cell_to_forget_weights", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+fw_cell_to_output_weights = Input(
+ "fw_cell_to_output_weights", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+
+fw_input_gate_bias = Input(
+ "fw_input_gate_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+fw_forget_gate_bias = Input(
+ "fw_forget_gate_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+fw_cell_bias = Input(
+ "fw_cell_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+fw_output_gate_bias = Input(
+ "fw_output_gate_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+
+fw_projection_weights = Input(
+ "fw_projection_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_output, n_cell))
+fw_projection_bias = Input(
+ "fw_projection_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_output))
+
+bw_input_to_input_weights = Input(
+ "bw_input_to_input_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_forget_weights = Input(
+ "bw_input_to_forget_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_cell_weights = Input(
+ "bw_input_to_cell_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_output_weights = Input(
+ "bw_input_to_output_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+
+bw_recurrent_to_input_weights = Input(
+ "bw_recurrent_to_input_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_forget_weights = Input(
+ "bw_recurrent_to_forget_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_cell_weights = Input(
+ "bw_recurrent_to_cell_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_output_weights = Input(
+ "bw_recurrent_to_output_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+
+bw_cell_to_input_weights = Input(
+ "bw_cell_to_input_weights", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+bw_cell_to_forget_weights = Input(
+ "bw_cell_to_forget_weights", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+bw_cell_to_output_weights = Input(
+ "bw_cell_to_output_weights", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+
+bw_input_gate_bias = Input(
+ "bw_input_gate_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+bw_forget_gate_bias = Input(
+ "bw_forget_gate_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+bw_cell_bias = Input(
+ "bw_cell_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+bw_output_gate_bias = Input(
+ "bw_output_gate_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+
+bw_projection_weights = Input(
+ "bw_projection_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_output, n_cell))
+bw_projection_bias = Input(
+ "bw_projection_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_output))
+
+fw_activation_state = Input(
+ "fw_activatiom_state", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_batch, n_output))
+fw_cell_state = Input(
+ "fw_cell_state", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_batch, n_cell))
+
+bw_activation_state = Input(
+ "bw_activatiom_state", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_batch, n_output))
+bw_cell_state = Input(
+ "bw_cell_state", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_batch, n_cell))
+
+aux_input = Input("input", "TENSOR_FLOAT16", "{{{}, {}, {}}}".format(n_batch, max_time, n_input))
+
+fw_aux_input_to_input_weights = Input(
+ "fw_aux_input_to_input_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_forget_weights = Input(
+ "fw_input_to_forget_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_cell_weights = Input(
+ "fw_aux_input_to_cell_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_output_weights = Input(
+ "fw_aux_input_to_output_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+
+bw_aux_input_to_input_weights = Input(
+ "bw_aux_input_to_input_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_forget_weights = Input(
+ "bw_input_to_forget_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_cell_weights = Input(
+ "bw_aux_input_to_cell_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_output_weights = Input(
+ "bw_aux_input_to_output_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+
+fw_input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+fw_forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+fw_cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+fw_output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+
+bw_input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+bw_forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+bw_cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+bw_output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+
+fw_output=Output("fw_output", "TENSOR_FLOAT16", "{{{}, {}, {}}}".format(n_batch, max_time, n_output))
+bw_output=Output("bw_output", "TENSOR_FLOAT16", "{{{}, {}, {}}}".format(n_batch, max_time, n_output))
+
+def test(
+ name,
+ input_data=[],
+ fw_input_to_input_weights_data=[],
+ fw_input_to_forget_weights_data=[],
+ fw_input_to_cell_weights_data=[],
+ fw_input_to_output_weights_data=[],
+ fw_recurrent_to_input_weights_data=[],
+ fw_recurrent_to_forget_weights_data=[],
+ fw_recurrent_to_cell_weights_data=[],
+ fw_recurrent_to_output_weights_data=[],
+ fw_cell_to_input_weights_data=[],
+ fw_cell_to_forget_weights_data=[],
+ fw_cell_to_output_weights_data=[],
+ fw_input_gate_bias_data=[],
+ fw_forget_gate_bias_data=[],
+ fw_cell_bias_data=[],
+ fw_output_gate_bias_data=[],
+ fw_projection_weights_data=[],
+ fw_projection_bias_data=[],
+ bw_input_to_input_weights_data=[],
+ bw_input_to_forget_weights_data=[],
+ bw_input_to_cell_weights_data=[],
+ bw_input_to_output_weights_data=[],
+ bw_recurrent_to_input_weights_data=[],
+ bw_recurrent_to_forget_weights_data=[],
+ bw_recurrent_to_cell_weights_data=[],
+ bw_recurrent_to_output_weights_data=[],
+ bw_cell_to_input_weights_data=[],
+ bw_cell_to_forget_weights_data=[],
+ bw_cell_to_output_weights_data=[],
+ bw_input_gate_bias_data=[],
+ bw_forget_gate_bias_data=[],
+ bw_cell_bias_data=[],
+ bw_output_gate_bias_data=[],
+ bw_projection_weights_data=[],
+ bw_projection_bias_data=[],
+ fw_activation_state_data=[],
+ fw_cell_state_data=[],
+ bw_activation_state_data=[],
+ bw_cell_state_data=[],
+ aux_input_data=[],
+ fw_aux_input_to_input_weights_data=[],
+ fw_aux_input_to_forget_weights_data=[],
+ fw_aux_input_to_cell_weights_data=[],
+ fw_aux_input_to_output_weights_data=[],
+ bw_aux_input_to_input_weights_data=[],
+ bw_aux_input_to_forget_weights_data=[],
+ bw_aux_input_to_cell_weights_data=[],
+ bw_aux_input_to_output_weights_data=[],
+ fw_input_layer_norm_weights_data=[],
+ fw_forget_layer_norm_weights_data=[],
+ fw_cell_layer_norm_weights_data=[],
+ fw_output_layer_norm_weights_data=[],
+ bw_input_layer_norm_weights_data=[],
+ bw_forget_layer_norm_weights_data=[],
+ bw_cell_layer_norm_weights_data=[],
+ bw_output_layer_norm_weights_data=[],
+ fw_output_data=[],
+ bw_output_data=[],):
+
+ activation = Int32Scalar("activation", 4)
+ cell_clip = Float16Scalar("cell_clip", 0.0)
+ proj_clip = Float16Scalar("proj_clip", 0.0)
+ merge_outputs = BoolScalar("merge_outputs", False)
+ time_major = BoolScalar("time_major", False)
+
+ model = Model().Operation(
+ "BIDIRECTIONAL_SEQUENCE_LSTM",
+ input,
+ fw_input_to_input_weights,
+ fw_input_to_forget_weights,
+ fw_input_to_cell_weights,
+ fw_input_to_output_weights,
+ fw_recurrent_to_input_weights,
+ fw_recurrent_to_forget_weights,
+ fw_recurrent_to_cell_weights,
+ fw_recurrent_to_output_weights,
+ fw_cell_to_input_weights,
+ fw_cell_to_forget_weights,
+ fw_cell_to_output_weights,
+ fw_input_gate_bias,
+ fw_forget_gate_bias,
+ fw_cell_bias,
+ fw_output_gate_bias,
+ fw_projection_weights,
+ fw_projection_bias,
+ bw_input_to_input_weights,
+ bw_input_to_forget_weights,
+ bw_input_to_cell_weights,
+ bw_input_to_output_weights,
+ bw_recurrent_to_input_weights,
+ bw_recurrent_to_forget_weights,
+ bw_recurrent_to_cell_weights,
+ bw_recurrent_to_output_weights,
+ bw_cell_to_input_weights,
+ bw_cell_to_forget_weights,
+ bw_cell_to_output_weights,
+ bw_input_gate_bias,
+ bw_forget_gate_bias,
+ bw_cell_bias,
+ bw_output_gate_bias,
+ bw_projection_weights,
+ bw_projection_bias,
+ fw_activation_state,
+ fw_cell_state,
+ bw_activation_state,
+ bw_cell_state,
+ aux_input,
+ fw_aux_input_to_input_weights,
+ fw_aux_input_to_forget_weights,
+ fw_aux_input_to_cell_weights,
+ fw_aux_input_to_output_weights,
+ bw_aux_input_to_input_weights,
+ bw_aux_input_to_forget_weights,
+ bw_aux_input_to_cell_weights,
+ bw_aux_input_to_output_weights,
+ activation, cell_clip, proj_clip, merge_outputs, time_major,
+ fw_input_layer_norm_weights,
+ fw_forget_layer_norm_weights,
+ fw_cell_layer_norm_weights,
+ fw_output_layer_norm_weights,
+ bw_input_layer_norm_weights,
+ bw_forget_layer_norm_weights,
+ bw_cell_layer_norm_weights,
+ bw_output_layer_norm_weights,).To(fw_output, bw_output)
+
+ example = Example(
+ {
+ input: input_data,
+ fw_input_to_input_weights: fw_input_to_input_weights_data,
+ fw_input_to_forget_weights: fw_input_to_forget_weights_data,
+ fw_input_to_cell_weights: fw_input_to_cell_weights_data,
+ fw_input_to_output_weights: fw_input_to_output_weights_data,
+ fw_recurrent_to_input_weights: fw_recurrent_to_input_weights_data,
+ fw_recurrent_to_forget_weights: fw_recurrent_to_forget_weights_data,
+ fw_recurrent_to_cell_weights: fw_recurrent_to_cell_weights_data,
+ fw_recurrent_to_output_weights: fw_recurrent_to_output_weights_data,
+ fw_cell_to_input_weights: fw_cell_to_input_weights_data,
+ fw_cell_to_forget_weights: fw_cell_to_forget_weights_data,
+ fw_cell_to_output_weights: fw_cell_to_output_weights_data,
+ fw_input_gate_bias: fw_input_gate_bias_data,
+ fw_forget_gate_bias: fw_forget_gate_bias_data,
+ fw_cell_bias: fw_cell_bias_data,
+ fw_output_gate_bias: fw_output_gate_bias_data,
+ fw_projection_weights: fw_projection_weights_data,
+ fw_projection_bias: fw_projection_bias_data,
+ bw_input_to_input_weights: bw_input_to_input_weights_data,
+ bw_input_to_forget_weights: bw_input_to_forget_weights_data,
+ bw_input_to_cell_weights: bw_input_to_cell_weights_data,
+ bw_input_to_output_weights: bw_input_to_output_weights_data,
+ bw_recurrent_to_input_weights: bw_recurrent_to_input_weights_data,
+ bw_recurrent_to_forget_weights: bw_recurrent_to_forget_weights_data,
+ bw_recurrent_to_cell_weights: bw_recurrent_to_cell_weights_data,
+ bw_recurrent_to_output_weights: bw_recurrent_to_output_weights_data,
+ bw_cell_to_input_weights: bw_cell_to_input_weights_data,
+ bw_cell_to_forget_weights: bw_cell_to_forget_weights_data,
+ bw_cell_to_output_weights: bw_cell_to_output_weights_data,
+ bw_input_gate_bias: bw_input_gate_bias_data,
+ bw_forget_gate_bias: bw_forget_gate_bias_data,
+ bw_cell_bias: bw_cell_bias_data,
+ bw_output_gate_bias: bw_output_gate_bias_data,
+ bw_projection_weights: bw_projection_weights_data,
+ bw_projection_bias: bw_projection_bias_data,
+ fw_activation_state: fw_activation_state_data,
+ fw_cell_state: fw_cell_state_data,
+ bw_activation_state: bw_activation_state_data,
+ bw_cell_state: bw_cell_state_data,
+ aux_input: aux_input_data,
+ fw_aux_input_to_input_weights: fw_aux_input_to_input_weights_data,
+ fw_aux_input_to_forget_weights: fw_aux_input_to_forget_weights_data,
+ fw_aux_input_to_cell_weights: fw_aux_input_to_cell_weights_data,
+ fw_aux_input_to_output_weights: fw_aux_input_to_output_weights_data,
+ bw_aux_input_to_input_weights: bw_aux_input_to_input_weights_data,
+ bw_aux_input_to_forget_weights: bw_aux_input_to_forget_weights_data,
+ bw_aux_input_to_cell_weights: bw_aux_input_to_cell_weights_data,
+ bw_aux_input_to_output_weights: bw_aux_input_to_output_weights_data,
+ fw_input_layer_norm_weights: fw_input_layer_norm_weights_data,
+ fw_forget_layer_norm_weights: fw_forget_layer_norm_weights_data,
+ fw_cell_layer_norm_weights: fw_cell_layer_norm_weights_data,
+ fw_output_layer_norm_weights: fw_output_layer_norm_weights_data,
+ bw_input_layer_norm_weights: bw_input_layer_norm_weights_data,
+ bw_forget_layer_norm_weights: bw_forget_layer_norm_weights_data,
+ bw_cell_layer_norm_weights: bw_cell_layer_norm_weights_data,
+ bw_output_layer_norm_weights: bw_output_layer_norm_weights_data,
+ fw_output: fw_output_data,
+ bw_output: bw_output_data,
+ },
+ model=model, name=name)
+
+
+fw_input_to_input_weights_data = [
+ -0.45018822, -0.02338299, -0.0870589,
+ -0.34550029, 0.04266912, -0.15680569,
+ -0.34856534, 0.43890524
+]
+bw_input_to_input_weights_data = fw_input_to_input_weights_data
+
+fw_input_to_forget_weights_data = [
+ 0.09701663, 0.20334584, -0.50592935,
+ -0.31343272, -0.40032279, 0.44781327,
+ 0.01387155, -0.35593212
+]
+bw_input_to_forget_weights_data = fw_input_to_forget_weights_data
+
+fw_input_to_cell_weights_data = [
+ -0.50013041, 0.1370284, 0.11810488, 0.2013163,
+ -0.20583314, 0.44344562, 0.22077113,
+ -0.29909778
+]
+bw_input_to_cell_weights_data = fw_input_to_cell_weights_data
+
+fw_input_to_output_weights_data = [
+ -0.25065863, -0.28290087, 0.04613829,
+ 0.40525138, 0.44272184, 0.03897077, -0.1556896,
+ 0.19487578
+]
+bw_input_to_output_weights_data = fw_input_to_output_weights_data
+
+fw_recurrent_to_input_weights_data = [
+ -0.0063535, -0.2042388, 0.31454784, -0.35746509, 0.28902304, 0.08183324,
+ -0.16555229, 0.02286911, -0.13566875, 0.03034258, 0.48091322,
+ -0.12528998, 0.24077177, -0.51332325, -0.33502164, 0.10629296
+]
+bw_recurrent_to_input_weights_data = fw_recurrent_to_input_weights_data
+
+fw_recurrent_to_forget_weights_data = [
+ -0.48684245, -0.06655136, 0.42224967, 0.2112639, 0.27654213, 0.20864892,
+ -0.07646349, 0.45877004, 0.00141793, -0.14609534, 0.36447752, 0.09196436,
+ 0.28053468, 0.01560611, -0.20127171, -0.01140004
+]
+bw_recurrent_to_forget_weights_data = fw_recurrent_to_forget_weights_data
+
+fw_recurrent_to_cell_weights_data = [
+ -0.3407414, 0.24443203, -0.2078532, 0.26320225, 0.05695659, -0.00123841,
+ -0.4744786, -0.35869038, -0.06418842, -0.13502428, -0.501764, 0.22830659,
+ -0.46367589, 0.26016325, -0.03894562, -0.16368064
+]
+bw_recurrent_to_cell_weights_data = fw_recurrent_to_cell_weights_data
+
+fw_recurrent_to_output_weights_data = [
+ 0.43385774, -0.17194885, 0.2718237, 0.09215671, 0.24107647, -0.39835793,
+ 0.18212086, 0.01301402, 0.48572797, -0.50656658, 0.20047462, -0.20607421,
+ -0.51818722, -0.15390486, 0.0468148, 0.39922136
+]
+bw_recurrent_to_output_weights_data = fw_recurrent_to_output_weights_data
+
+fw_input_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_input_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+
+fw_forget_gate_bias_data = [1.0, 1.0, 1.0, 1.0]
+bw_forget_gate_bias_data = [1.0, 1.0, 1.0, 1.0]
+
+fw_cell_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_cell_bias_data = [0.0, 0.0, 0.0, 0.0]
+
+fw_output_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_output_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+
+input_data = [2.0, 3.0, 3.0, 4.0, 1.0, 1.0]
+
+fw_activation_state_data = [0 for _ in range(n_batch * n_output)]
+bw_activation_state_data = [0 for _ in range(n_batch * n_output)]
+
+fw_cell_state_data = [0 for _ in range(n_batch * n_cell)]
+bw_cell_state_data = [0 for _ in range(n_batch * n_cell)]
+
+fw_golden_output_data = [
+ -0.02973187, 0.1229473, 0.20885126, -0.15358765,
+ -0.03716109, 0.12507336, 0.41193449, -0.20860538,
+ -0.15053082, 0.09120187, 0.24278517, -0.12222792
+]
+bw_golden_output_data = [
+ -0.0806187, 0.139077, 0.400476, -0.197842,
+ -0.0332076, 0.123838, 0.309777, -0.17621,
+ -0.0490733, 0.0739237, 0.067706, -0.0208124
+]
+
+
+test(
+ name="blackbox",
+ input_data=input_data,
+ fw_input_to_input_weights_data=fw_input_to_input_weights_data,
+ fw_input_to_forget_weights_data=fw_input_to_forget_weights_data,
+ fw_input_to_cell_weights_data=fw_input_to_cell_weights_data,
+ fw_input_to_output_weights_data=fw_input_to_output_weights_data,
+ fw_recurrent_to_input_weights_data=fw_recurrent_to_input_weights_data,
+ fw_recurrent_to_forget_weights_data=fw_recurrent_to_forget_weights_data,
+ fw_recurrent_to_cell_weights_data=fw_recurrent_to_cell_weights_data,
+ fw_recurrent_to_output_weights_data=fw_recurrent_to_output_weights_data,
+ fw_input_gate_bias_data=fw_input_gate_bias_data,
+ fw_forget_gate_bias_data=fw_forget_gate_bias_data,
+ fw_cell_bias_data=fw_cell_bias_data,
+ fw_output_gate_bias_data=fw_output_gate_bias_data,
+ bw_input_to_input_weights_data=bw_input_to_input_weights_data,
+ bw_input_to_forget_weights_data=bw_input_to_forget_weights_data,
+ bw_input_to_cell_weights_data=bw_input_to_cell_weights_data,
+ bw_input_to_output_weights_data=bw_input_to_output_weights_data,
+ bw_recurrent_to_input_weights_data=bw_recurrent_to_input_weights_data,
+ bw_recurrent_to_forget_weights_data=bw_recurrent_to_forget_weights_data,
+ bw_recurrent_to_cell_weights_data=bw_recurrent_to_cell_weights_data,
+ bw_recurrent_to_output_weights_data=bw_recurrent_to_output_weights_data,
+ bw_input_gate_bias_data=bw_input_gate_bias_data,
+ bw_forget_gate_bias_data=bw_forget_gate_bias_data,
+ bw_cell_bias_data=bw_cell_bias_data,
+ bw_output_gate_bias_data=bw_output_gate_bias_data,
+ fw_activation_state_data = fw_activation_state_data,
+ bw_activation_state_data = bw_activation_state_data,
+ fw_cell_state_data = fw_cell_state_data,
+ bw_cell_state_data = bw_cell_state_data,
+ fw_output_data=fw_golden_output_data,
+ bw_output_data=bw_golden_output_data
+)
diff --git a/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_float16_batch_major_aux_input.mod.py b/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_float16_batch_major_aux_input.mod.py
new file mode 100644
index 000000000..d73da9c6e
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_float16_batch_major_aux_input.mod.py
@@ -0,0 +1,483 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Bidirectional Sequence LSTM Test:
+# FLOAT16, Batch Major, Aux Input, No Layer Normalization, No Cifg, No Peephole, No Projection,
+# and No Clipping.
+#
+# Adapted from TFLite's LSTMOpTest.BlackBoxTestWithAuxInput.
+
+n_batch = 1
+n_input = 2
+n_cell = 4
+n_output = 4
+max_time = 3
+
+input = Input("input", "TENSOR_FLOAT16", "{{{}, {}, {}}}".format(n_batch, max_time, n_input))
+
+fw_input_to_input_weights = Input(
+ "fw_input_to_input_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_forget_weights = Input(
+ "fw_input_to_forget_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_cell_weights = Input(
+ "fw_input_to_cell_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_output_weights = Input(
+ "fw_input_to_output_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+
+fw_recurrent_to_input_weights = Input(
+ "fw_recurrent_to_input_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_forget_weights = Input(
+ "fw_recurrent_to_forget_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_cell_weights = Input(
+ "fw_recurrent_to_cell_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_output_weights = Input(
+ "fw_recurrent_to_output_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+
+fw_cell_to_input_weights = Input(
+ "fw_cell_to_input_weights", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+fw_cell_to_forget_weights = Input(
+ "fw_cell_to_forget_weights", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+fw_cell_to_output_weights = Input(
+ "fw_cell_to_output_weights", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+
+fw_input_gate_bias = Input(
+ "fw_input_gate_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+fw_forget_gate_bias = Input(
+ "fw_forget_gate_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+fw_cell_bias = Input(
+ "fw_cell_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+fw_output_gate_bias = Input(
+ "fw_output_gate_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+
+fw_projection_weights = Input(
+ "fw_projection_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_output, n_cell))
+fw_projection_bias = Input(
+ "fw_projection_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_output))
+
+bw_input_to_input_weights = Input(
+ "bw_input_to_input_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_forget_weights = Input(
+ "bw_input_to_forget_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_cell_weights = Input(
+ "bw_input_to_cell_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_output_weights = Input(
+ "bw_input_to_output_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+
+bw_recurrent_to_input_weights = Input(
+ "bw_recurrent_to_input_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_forget_weights = Input(
+ "bw_recurrent_to_forget_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_cell_weights = Input(
+ "bw_recurrent_to_cell_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_output_weights = Input(
+ "bw_recurrent_to_output_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+
+bw_cell_to_input_weights = Input(
+ "bw_cell_to_input_weights", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+bw_cell_to_forget_weights = Input(
+ "bw_cell_to_forget_weights", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+bw_cell_to_output_weights = Input(
+ "bw_cell_to_output_weights", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+
+bw_input_gate_bias = Input(
+ "bw_input_gate_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+bw_forget_gate_bias = Input(
+ "bw_forget_gate_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+bw_cell_bias = Input(
+ "bw_cell_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+bw_output_gate_bias = Input(
+ "bw_output_gate_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+
+bw_projection_weights = Input(
+ "bw_projection_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_output, n_cell))
+bw_projection_bias = Input(
+ "bw_projection_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_output))
+
+fw_activation_state = Input(
+ "fw_activatiom_state", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_batch, n_output))
+fw_cell_state = Input(
+ "fw_cell_state", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_batch, n_cell))
+
+bw_activation_state = Input(
+ "bw_activatiom_state", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_batch, n_output))
+bw_cell_state = Input(
+ "bw_cell_state", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_batch, n_cell))
+
+aux_input = Input("input", "TENSOR_FLOAT16", "{{{}, {}, {}}}".format(n_batch, max_time, n_input))
+
+fw_aux_input_to_input_weights = Input(
+ "fw_aux_input_to_input_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_forget_weights = Input(
+ "fw_input_to_forget_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_cell_weights = Input(
+ "fw_aux_input_to_cell_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_output_weights = Input(
+ "fw_aux_input_to_output_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+
+bw_aux_input_to_input_weights = Input(
+ "bw_aux_input_to_input_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_forget_weights = Input(
+ "bw_input_to_forget_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_cell_weights = Input(
+ "bw_aux_input_to_cell_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_output_weights = Input(
+ "bw_aux_input_to_output_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+
+fw_input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+fw_forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+fw_cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+fw_output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+
+bw_input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+bw_forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+bw_cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+bw_output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+
+fw_output=Output("fw_output", "TENSOR_FLOAT16", "{{{}, {}, {}}}".format(n_batch, max_time, n_output))
+bw_output=Output("bw_output", "TENSOR_FLOAT16", "{{{}, {}, {}}}".format(n_batch, max_time, n_output))
+
+def test(
+ name,
+ input_data=[],
+ fw_input_to_input_weights_data=[],
+ fw_input_to_forget_weights_data=[],
+ fw_input_to_cell_weights_data=[],
+ fw_input_to_output_weights_data=[],
+ fw_recurrent_to_input_weights_data=[],
+ fw_recurrent_to_forget_weights_data=[],
+ fw_recurrent_to_cell_weights_data=[],
+ fw_recurrent_to_output_weights_data=[],
+ fw_cell_to_input_weights_data=[],
+ fw_cell_to_forget_weights_data=[],
+ fw_cell_to_output_weights_data=[],
+ fw_input_gate_bias_data=[],
+ fw_forget_gate_bias_data=[],
+ fw_cell_bias_data=[],
+ fw_output_gate_bias_data=[],
+ fw_projection_weights_data=[],
+ fw_projection_bias_data=[],
+ bw_input_to_input_weights_data=[],
+ bw_input_to_forget_weights_data=[],
+ bw_input_to_cell_weights_data=[],
+ bw_input_to_output_weights_data=[],
+ bw_recurrent_to_input_weights_data=[],
+ bw_recurrent_to_forget_weights_data=[],
+ bw_recurrent_to_cell_weights_data=[],
+ bw_recurrent_to_output_weights_data=[],
+ bw_cell_to_input_weights_data=[],
+ bw_cell_to_forget_weights_data=[],
+ bw_cell_to_output_weights_data=[],
+ bw_input_gate_bias_data=[],
+ bw_forget_gate_bias_data=[],
+ bw_cell_bias_data=[],
+ bw_output_gate_bias_data=[],
+ bw_projection_weights_data=[],
+ bw_projection_bias_data=[],
+ fw_activation_state_data=[],
+ fw_cell_state_data=[],
+ bw_activation_state_data=[],
+ bw_cell_state_data=[],
+ aux_input_data=[],
+ fw_aux_input_to_input_weights_data=[],
+ fw_aux_input_to_forget_weights_data=[],
+ fw_aux_input_to_cell_weights_data=[],
+ fw_aux_input_to_output_weights_data=[],
+ bw_aux_input_to_input_weights_data=[],
+ bw_aux_input_to_forget_weights_data=[],
+ bw_aux_input_to_cell_weights_data=[],
+ bw_aux_input_to_output_weights_data=[],
+ fw_input_layer_norm_weights_data=[],
+ fw_forget_layer_norm_weights_data=[],
+ fw_cell_layer_norm_weights_data=[],
+ fw_output_layer_norm_weights_data=[],
+ bw_input_layer_norm_weights_data=[],
+ bw_forget_layer_norm_weights_data=[],
+ bw_cell_layer_norm_weights_data=[],
+ bw_output_layer_norm_weights_data=[],
+ fw_output_data=[],
+ bw_output_data=[],):
+
+ activation = Int32Scalar("activation", 4) # Tanh
+ cell_clip = Float16Scalar("cell_clip", 0.0)
+ proj_clip = Float16Scalar("proj_clip", 0.0)
+ merge_outputs = BoolScalar("merge_outputs", False)
+ time_major = BoolScalar("time_major", False)
+
+ model = Model().Operation(
+ "BIDIRECTIONAL_SEQUENCE_LSTM",
+ input,
+ fw_input_to_input_weights,
+ fw_input_to_forget_weights,
+ fw_input_to_cell_weights,
+ fw_input_to_output_weights,
+ fw_recurrent_to_input_weights,
+ fw_recurrent_to_forget_weights,
+ fw_recurrent_to_cell_weights,
+ fw_recurrent_to_output_weights,
+ fw_cell_to_input_weights,
+ fw_cell_to_forget_weights,
+ fw_cell_to_output_weights,
+ fw_input_gate_bias,
+ fw_forget_gate_bias,
+ fw_cell_bias,
+ fw_output_gate_bias,
+ fw_projection_weights,
+ fw_projection_bias,
+ bw_input_to_input_weights,
+ bw_input_to_forget_weights,
+ bw_input_to_cell_weights,
+ bw_input_to_output_weights,
+ bw_recurrent_to_input_weights,
+ bw_recurrent_to_forget_weights,
+ bw_recurrent_to_cell_weights,
+ bw_recurrent_to_output_weights,
+ bw_cell_to_input_weights,
+ bw_cell_to_forget_weights,
+ bw_cell_to_output_weights,
+ bw_input_gate_bias,
+ bw_forget_gate_bias,
+ bw_cell_bias,
+ bw_output_gate_bias,
+ bw_projection_weights,
+ bw_projection_bias,
+ fw_activation_state,
+ fw_cell_state,
+ bw_activation_state,
+ bw_cell_state,
+ aux_input,
+ fw_aux_input_to_input_weights,
+ fw_aux_input_to_forget_weights,
+ fw_aux_input_to_cell_weights,
+ fw_aux_input_to_output_weights,
+ bw_aux_input_to_input_weights,
+ bw_aux_input_to_forget_weights,
+ bw_aux_input_to_cell_weights,
+ bw_aux_input_to_output_weights,
+ activation, cell_clip, proj_clip, merge_outputs, time_major,
+ fw_input_layer_norm_weights,
+ fw_forget_layer_norm_weights,
+ fw_cell_layer_norm_weights,
+ fw_output_layer_norm_weights,
+ bw_input_layer_norm_weights,
+ bw_forget_layer_norm_weights,
+ bw_cell_layer_norm_weights,
+ bw_output_layer_norm_weights,).To(fw_output, bw_output)
+
+ example = Example(
+ {
+ input: input_data,
+ fw_input_to_input_weights: fw_input_to_input_weights_data,
+ fw_input_to_forget_weights: fw_input_to_forget_weights_data,
+ fw_input_to_cell_weights: fw_input_to_cell_weights_data,
+ fw_input_to_output_weights: fw_input_to_output_weights_data,
+ fw_recurrent_to_input_weights: fw_recurrent_to_input_weights_data,
+ fw_recurrent_to_forget_weights: fw_recurrent_to_forget_weights_data,
+ fw_recurrent_to_cell_weights: fw_recurrent_to_cell_weights_data,
+ fw_recurrent_to_output_weights: fw_recurrent_to_output_weights_data,
+ fw_cell_to_input_weights: fw_cell_to_input_weights_data,
+ fw_cell_to_forget_weights: fw_cell_to_forget_weights_data,
+ fw_cell_to_output_weights: fw_cell_to_output_weights_data,
+ fw_input_gate_bias: fw_input_gate_bias_data,
+ fw_forget_gate_bias: fw_forget_gate_bias_data,
+ fw_cell_bias: fw_cell_bias_data,
+ fw_output_gate_bias: fw_output_gate_bias_data,
+ fw_projection_weights: fw_projection_weights_data,
+ fw_projection_bias: fw_projection_bias_data,
+ bw_input_to_input_weights: bw_input_to_input_weights_data,
+ bw_input_to_forget_weights: bw_input_to_forget_weights_data,
+ bw_input_to_cell_weights: bw_input_to_cell_weights_data,
+ bw_input_to_output_weights: bw_input_to_output_weights_data,
+ bw_recurrent_to_input_weights: bw_recurrent_to_input_weights_data,
+ bw_recurrent_to_forget_weights: bw_recurrent_to_forget_weights_data,
+ bw_recurrent_to_cell_weights: bw_recurrent_to_cell_weights_data,
+ bw_recurrent_to_output_weights: bw_recurrent_to_output_weights_data,
+ bw_cell_to_input_weights: bw_cell_to_input_weights_data,
+ bw_cell_to_forget_weights: bw_cell_to_forget_weights_data,
+ bw_cell_to_output_weights: bw_cell_to_output_weights_data,
+ bw_input_gate_bias: bw_input_gate_bias_data,
+ bw_forget_gate_bias: bw_forget_gate_bias_data,
+ bw_cell_bias: bw_cell_bias_data,
+ bw_output_gate_bias: bw_output_gate_bias_data,
+ bw_projection_weights: bw_projection_weights_data,
+ bw_projection_bias: bw_projection_bias_data,
+ fw_activation_state: fw_activation_state_data,
+ fw_cell_state: fw_cell_state_data,
+ bw_activation_state: bw_activation_state_data,
+ bw_cell_state: bw_cell_state_data,
+ aux_input: aux_input_data,
+ fw_aux_input_to_input_weights: fw_aux_input_to_input_weights_data,
+ fw_aux_input_to_forget_weights: fw_aux_input_to_forget_weights_data,
+ fw_aux_input_to_cell_weights: fw_aux_input_to_cell_weights_data,
+ fw_aux_input_to_output_weights: fw_aux_input_to_output_weights_data,
+ bw_aux_input_to_input_weights: bw_aux_input_to_input_weights_data,
+ bw_aux_input_to_forget_weights: bw_aux_input_to_forget_weights_data,
+ bw_aux_input_to_cell_weights: bw_aux_input_to_cell_weights_data,
+ bw_aux_input_to_output_weights: bw_aux_input_to_output_weights_data,
+ fw_input_layer_norm_weights: fw_input_layer_norm_weights_data,
+ fw_forget_layer_norm_weights: fw_forget_layer_norm_weights_data,
+ fw_cell_layer_norm_weights: fw_cell_layer_norm_weights_data,
+ fw_output_layer_norm_weights: fw_output_layer_norm_weights_data,
+ bw_input_layer_norm_weights: bw_input_layer_norm_weights_data,
+ bw_forget_layer_norm_weights: bw_forget_layer_norm_weights_data,
+ bw_cell_layer_norm_weights: bw_cell_layer_norm_weights_data,
+ bw_output_layer_norm_weights: bw_output_layer_norm_weights_data,
+ fw_output: fw_output_data,
+ bw_output: bw_output_data,
+ },
+ model=model, name=name)
+
+
+fw_input_to_input_weights_data = [
+ -0.45018822, -0.02338299, -0.0870589,
+ -0.34550029, 0.04266912, -0.15680569,
+ -0.34856534, 0.43890524
+]
+bw_input_to_input_weights_data = fw_input_to_input_weights_data
+
+fw_input_to_forget_weights_data = [
+ 0.09701663, 0.20334584, -0.50592935,
+ -0.31343272, -0.40032279, 0.44781327,
+ 0.01387155, -0.35593212
+]
+bw_input_to_forget_weights_data = fw_input_to_forget_weights_data
+
+fw_input_to_cell_weights_data = [
+ -0.50013041, 0.1370284, 0.11810488, 0.2013163,
+ -0.20583314, 0.44344562, 0.22077113,
+ -0.29909778
+]
+bw_input_to_cell_weights_data = fw_input_to_cell_weights_data
+
+fw_input_to_output_weights_data = [
+ -0.25065863, -0.28290087, 0.04613829,
+ 0.40525138, 0.44272184, 0.03897077, -0.1556896,
+ 0.19487578
+]
+bw_input_to_output_weights_data = fw_input_to_output_weights_data
+
+fw_recurrent_to_input_weights_data = [
+ -0.0063535, -0.2042388, 0.31454784, -0.35746509, 0.28902304, 0.08183324,
+ -0.16555229, 0.02286911, -0.13566875, 0.03034258, 0.48091322,
+ -0.12528998, 0.24077177, -0.51332325, -0.33502164, 0.10629296
+]
+bw_recurrent_to_input_weights_data = fw_recurrent_to_input_weights_data
+
+fw_recurrent_to_forget_weights_data = [
+ -0.48684245, -0.06655136, 0.42224967, 0.2112639, 0.27654213, 0.20864892,
+ -0.07646349, 0.45877004, 0.00141793, -0.14609534, 0.36447752, 0.09196436,
+ 0.28053468, 0.01560611, -0.20127171, -0.01140004
+]
+bw_recurrent_to_forget_weights_data = fw_recurrent_to_forget_weights_data
+
+fw_recurrent_to_cell_weights_data = [
+ -0.3407414, 0.24443203, -0.2078532, 0.26320225, 0.05695659, -0.00123841,
+ -0.4744786, -0.35869038, -0.06418842, -0.13502428, -0.501764, 0.22830659,
+ -0.46367589, 0.26016325, -0.03894562, -0.16368064
+]
+bw_recurrent_to_cell_weights_data = fw_recurrent_to_cell_weights_data
+
+fw_recurrent_to_output_weights_data = [
+ 0.43385774, -0.17194885, 0.2718237, 0.09215671, 0.24107647, -0.39835793,
+ 0.18212086, 0.01301402, 0.48572797, -0.50656658, 0.20047462, -0.20607421,
+ -0.51818722, -0.15390486, 0.0468148, 0.39922136
+]
+bw_recurrent_to_output_weights_data = fw_recurrent_to_output_weights_data
+
+fw_input_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_input_gate_bias_data = fw_input_gate_bias_data
+
+fw_forget_gate_bias_data = [1.0, 1.0, 1.0, 1.0]
+bw_forget_gate_bias_data = fw_forget_gate_bias_data
+
+fw_cell_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_cell_bias_data = fw_cell_bias_data
+
+fw_output_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_output_gate_bias_data = fw_output_gate_bias_data
+
+input_data = [2.0, 3.0, 3.0, 4.0, 1.0, 1.0]
+aux_input_data = input_data
+
+fw_aux_input_to_input_weights_data = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
+bw_aux_input_to_input_weights_data = fw_aux_input_to_input_weights_data
+fw_aux_input_to_forget_weights_data = [0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 1.0]
+bw_aux_input_to_forget_weights_data = fw_aux_input_to_forget_weights_data
+fw_aux_input_to_cell_weights_data = [0.5, 0.6, 0.7, 0.8, 0.5, 0.6, 0.7, 0.8]
+bw_aux_input_to_cell_weights_data = fw_aux_input_to_cell_weights_data
+fw_aux_input_to_output_weights_data = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
+bw_aux_input_to_output_weights_data = fw_aux_input_to_output_weights_data
+
+fw_activation_state_data = [0 for _ in range(n_batch * n_output)]
+bw_activation_state_data = [0 for _ in range(n_batch * n_output)]
+
+fw_cell_state_data = [0 for _ in range(n_batch * n_cell)]
+bw_cell_state_data = [0 for _ in range(n_batch * n_cell)]
+
+fw_golden_output_data = [
+ 0.153335, 0.542754, 0.708602, 0.742855,
+ 0.247581, 0.835739, 0.947797, 0.958177,
+ 0.410892, 0.672268, 0.761909, 0.829133
+]
+bw_golden_output_data = [
+ 0.342275, 0.883431, 0.955930, 0.975621,
+ 0.204939, 0.806858, 0.914849, 0.934871,
+ 0.123236, 0.373087, 0.465377, 0.517630
+]
+
+
+test(
+ name="blackbox",
+ input_data=input_data,
+ fw_input_to_input_weights_data=fw_input_to_input_weights_data,
+ fw_input_to_forget_weights_data=fw_input_to_forget_weights_data,
+ fw_input_to_cell_weights_data=fw_input_to_cell_weights_data,
+ fw_input_to_output_weights_data=fw_input_to_output_weights_data,
+ fw_recurrent_to_input_weights_data=fw_recurrent_to_input_weights_data,
+ fw_recurrent_to_forget_weights_data=fw_recurrent_to_forget_weights_data,
+ fw_recurrent_to_cell_weights_data=fw_recurrent_to_cell_weights_data,
+ fw_recurrent_to_output_weights_data=fw_recurrent_to_output_weights_data,
+ fw_input_gate_bias_data=fw_input_gate_bias_data,
+ fw_forget_gate_bias_data=fw_forget_gate_bias_data,
+ fw_cell_bias_data=fw_cell_bias_data,
+ fw_output_gate_bias_data=fw_output_gate_bias_data,
+ bw_input_to_input_weights_data=bw_input_to_input_weights_data,
+ bw_input_to_forget_weights_data=bw_input_to_forget_weights_data,
+ bw_input_to_cell_weights_data=bw_input_to_cell_weights_data,
+ bw_input_to_output_weights_data=bw_input_to_output_weights_data,
+ bw_recurrent_to_input_weights_data=bw_recurrent_to_input_weights_data,
+ bw_recurrent_to_forget_weights_data=bw_recurrent_to_forget_weights_data,
+ bw_recurrent_to_cell_weights_data=bw_recurrent_to_cell_weights_data,
+ bw_recurrent_to_output_weights_data=bw_recurrent_to_output_weights_data,
+ bw_input_gate_bias_data=bw_input_gate_bias_data,
+ bw_forget_gate_bias_data=bw_forget_gate_bias_data,
+ bw_cell_bias_data=bw_cell_bias_data,
+ bw_output_gate_bias_data=bw_output_gate_bias_data,
+ fw_activation_state_data = fw_activation_state_data,
+ bw_activation_state_data = bw_activation_state_data,
+ fw_cell_state_data = fw_cell_state_data,
+ bw_cell_state_data = bw_cell_state_data,
+ aux_input_data = aux_input_data,
+ fw_aux_input_to_input_weights_data=fw_aux_input_to_input_weights_data,
+ fw_aux_input_to_forget_weights_data=fw_aux_input_to_forget_weights_data,
+ fw_aux_input_to_cell_weights_data=fw_aux_input_to_cell_weights_data,
+ fw_aux_input_to_output_weights_data=fw_aux_input_to_output_weights_data,
+ bw_aux_input_to_input_weights_data=bw_aux_input_to_input_weights_data,
+ bw_aux_input_to_forget_weights_data=bw_aux_input_to_forget_weights_data,
+ bw_aux_input_to_cell_weights_data=bw_aux_input_to_cell_weights_data,
+ bw_aux_input_to_output_weights_data=bw_aux_input_to_output_weights_data,
+ fw_output_data=fw_golden_output_data,
+ bw_output_data=bw_golden_output_data
+)
diff --git a/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_float16_batch_major_merge_outputs.mod.py b/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_float16_batch_major_merge_outputs.mod.py
new file mode 100644
index 000000000..cbf2ebdb7
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_float16_batch_major_merge_outputs.mod.py
@@ -0,0 +1,454 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Bidirectional Sequence LSTM Test:
+# FLOAT16, Batch Major, Merge Outputs.
+# No Layer Normalization, No Cifg, No Peephole, No Projection, and No Clipping.
+
+n_batch = 1
+n_input = 2
+n_cell = 4
+n_output = 4
+max_time = 3
+
+input = Input("input", "TENSOR_FLOAT16", "{{{}, {}, {}}}".format(n_batch, max_time, n_input))
+
+fw_input_to_input_weights = Input(
+ "fw_input_to_input_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_forget_weights = Input(
+ "fw_input_to_forget_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_cell_weights = Input(
+ "fw_input_to_cell_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_output_weights = Input(
+ "fw_input_to_output_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+
+fw_recurrent_to_input_weights = Input(
+ "fw_recurrent_to_input_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_forget_weights = Input(
+ "fw_recurrent_to_forget_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_cell_weights = Input(
+ "fw_recurrent_to_cell_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_output_weights = Input(
+ "fw_recurrent_to_output_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+
+fw_cell_to_input_weights = Input(
+ "fw_cell_to_input_weights", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+fw_cell_to_forget_weights = Input(
+ "fw_cell_to_forget_weights", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+fw_cell_to_output_weights = Input(
+ "fw_cell_to_output_weights", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+
+fw_input_gate_bias = Input(
+ "fw_input_gate_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+fw_forget_gate_bias = Input(
+ "fw_forget_gate_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+fw_cell_bias = Input(
+ "fw_cell_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+fw_output_gate_bias = Input(
+ "fw_output_gate_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+
+fw_projection_weights = Input(
+ "fw_projection_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_output, n_cell))
+fw_projection_bias = Input(
+ "fw_projection_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_output))
+
+bw_input_to_input_weights = Input(
+ "bw_input_to_input_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_forget_weights = Input(
+ "bw_input_to_forget_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_cell_weights = Input(
+ "bw_input_to_cell_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_output_weights = Input(
+ "bw_input_to_output_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+
+bw_recurrent_to_input_weights = Input(
+ "bw_recurrent_to_input_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_forget_weights = Input(
+ "bw_recurrent_to_forget_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_cell_weights = Input(
+ "bw_recurrent_to_cell_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_output_weights = Input(
+ "bw_recurrent_to_output_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_output))
+
+bw_cell_to_input_weights = Input(
+ "bw_cell_to_input_weights", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+bw_cell_to_forget_weights = Input(
+ "bw_cell_to_forget_weights", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+bw_cell_to_output_weights = Input(
+ "bw_cell_to_output_weights", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+
+bw_input_gate_bias = Input(
+ "bw_input_gate_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+bw_forget_gate_bias = Input(
+ "bw_forget_gate_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+bw_cell_bias = Input(
+ "bw_cell_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+bw_output_gate_bias = Input(
+ "bw_output_gate_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_cell))
+
+bw_projection_weights = Input(
+ "bw_projection_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_output, n_cell))
+bw_projection_bias = Input(
+ "bw_projection_bias", "TENSOR_FLOAT16", "{{{}}}".format(n_output))
+
+fw_activation_state = Input(
+ "fw_activatiom_state", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_batch, n_output))
+fw_cell_state = Input(
+ "fw_cell_state", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_batch, n_cell))
+
+bw_activation_state = Input(
+ "bw_activatiom_state", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_batch, n_output))
+bw_cell_state = Input(
+ "bw_cell_state", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_batch, n_cell))
+
+aux_input = Input("input", "TENSOR_FLOAT16", "{{{}, {}, {}}}".format(n_batch, max_time, n_input))
+
+fw_aux_input_to_input_weights = Input(
+ "fw_aux_input_to_input_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_forget_weights = Input(
+ "fw_input_to_forget_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_cell_weights = Input(
+ "fw_aux_input_to_cell_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_output_weights = Input(
+ "fw_aux_input_to_output_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+
+bw_aux_input_to_input_weights = Input(
+ "bw_aux_input_to_input_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_forget_weights = Input(
+ "bw_input_to_forget_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_cell_weights = Input(
+ "bw_aux_input_to_cell_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_output_weights = Input(
+ "bw_aux_input_to_output_weights", "TENSOR_FLOAT16", "{{{}, {}}}".format(n_cell, n_input))
+
+fw_input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+fw_forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+fw_cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+fw_output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+
+bw_input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+bw_forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+bw_cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+bw_output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT16", "{%d}" % n_cell)
+
+fw_output=Output("fw_output", "TENSOR_FLOAT16", "{{{}, {}, {}}}".format(n_batch, max_time,
+ 2 * n_output))
+
+def test(
+ name,
+ input_data=[],
+ fw_input_to_input_weights_data=[],
+ fw_input_to_forget_weights_data=[],
+ fw_input_to_cell_weights_data=[],
+ fw_input_to_output_weights_data=[],
+ fw_recurrent_to_input_weights_data=[],
+ fw_recurrent_to_forget_weights_data=[],
+ fw_recurrent_to_cell_weights_data=[],
+ fw_recurrent_to_output_weights_data=[],
+ fw_cell_to_input_weights_data=[],
+ fw_cell_to_forget_weights_data=[],
+ fw_cell_to_output_weights_data=[],
+ fw_input_gate_bias_data=[],
+ fw_forget_gate_bias_data=[],
+ fw_cell_bias_data=[],
+ fw_output_gate_bias_data=[],
+ fw_projection_weights_data=[],
+ fw_projection_bias_data=[],
+ bw_input_to_input_weights_data=[],
+ bw_input_to_forget_weights_data=[],
+ bw_input_to_cell_weights_data=[],
+ bw_input_to_output_weights_data=[],
+ bw_recurrent_to_input_weights_data=[],
+ bw_recurrent_to_forget_weights_data=[],
+ bw_recurrent_to_cell_weights_data=[],
+ bw_recurrent_to_output_weights_data=[],
+ bw_cell_to_input_weights_data=[],
+ bw_cell_to_forget_weights_data=[],
+ bw_cell_to_output_weights_data=[],
+ bw_input_gate_bias_data=[],
+ bw_forget_gate_bias_data=[],
+ bw_cell_bias_data=[],
+ bw_output_gate_bias_data=[],
+ bw_projection_weights_data=[],
+ bw_projection_bias_data=[],
+ fw_activation_state_data=[],
+ fw_cell_state_data=[],
+ bw_activation_state_data=[],
+ bw_cell_state_data=[],
+ aux_input_data=[],
+ fw_aux_input_to_input_weights_data=[],
+ fw_aux_input_to_forget_weights_data=[],
+ fw_aux_input_to_cell_weights_data=[],
+ fw_aux_input_to_output_weights_data=[],
+ bw_aux_input_to_input_weights_data=[],
+ bw_aux_input_to_forget_weights_data=[],
+ bw_aux_input_to_cell_weights_data=[],
+ bw_aux_input_to_output_weights_data=[],
+ fw_input_layer_norm_weights_data=[],
+ fw_forget_layer_norm_weights_data=[],
+ fw_cell_layer_norm_weights_data=[],
+ fw_output_layer_norm_weights_data=[],
+ bw_input_layer_norm_weights_data=[],
+ bw_forget_layer_norm_weights_data=[],
+ bw_cell_layer_norm_weights_data=[],
+ bw_output_layer_norm_weights_data=[],
+ fw_output_data=[]):
+
+ activation = Int32Scalar("activation", 4)
+ cell_clip = Float16Scalar("cell_clip", 0.0)
+ proj_clip = Float16Scalar("proj_clip", 0.0)
+ merge_outputs = BoolScalar("merge_outputs", True)
+ time_major = BoolScalar("time_major", False)
+
+ model = Model().Operation(
+ "BIDIRECTIONAL_SEQUENCE_LSTM",
+ input,
+ fw_input_to_input_weights,
+ fw_input_to_forget_weights,
+ fw_input_to_cell_weights,
+ fw_input_to_output_weights,
+ fw_recurrent_to_input_weights,
+ fw_recurrent_to_forget_weights,
+ fw_recurrent_to_cell_weights,
+ fw_recurrent_to_output_weights,
+ fw_cell_to_input_weights,
+ fw_cell_to_forget_weights,
+ fw_cell_to_output_weights,
+ fw_input_gate_bias,
+ fw_forget_gate_bias,
+ fw_cell_bias,
+ fw_output_gate_bias,
+ fw_projection_weights,
+ fw_projection_bias,
+ bw_input_to_input_weights,
+ bw_input_to_forget_weights,
+ bw_input_to_cell_weights,
+ bw_input_to_output_weights,
+ bw_recurrent_to_input_weights,
+ bw_recurrent_to_forget_weights,
+ bw_recurrent_to_cell_weights,
+ bw_recurrent_to_output_weights,
+ bw_cell_to_input_weights,
+ bw_cell_to_forget_weights,
+ bw_cell_to_output_weights,
+ bw_input_gate_bias,
+ bw_forget_gate_bias,
+ bw_cell_bias,
+ bw_output_gate_bias,
+ bw_projection_weights,
+ bw_projection_bias,
+ fw_activation_state,
+ fw_cell_state,
+ bw_activation_state,
+ bw_cell_state,
+ aux_input,
+ fw_aux_input_to_input_weights,
+ fw_aux_input_to_forget_weights,
+ fw_aux_input_to_cell_weights,
+ fw_aux_input_to_output_weights,
+ bw_aux_input_to_input_weights,
+ bw_aux_input_to_forget_weights,
+ bw_aux_input_to_cell_weights,
+ bw_aux_input_to_output_weights,
+ activation, cell_clip, proj_clip, merge_outputs, time_major,
+ fw_input_layer_norm_weights,
+ fw_forget_layer_norm_weights,
+ fw_cell_layer_norm_weights,
+ fw_output_layer_norm_weights,
+ bw_input_layer_norm_weights,
+ bw_forget_layer_norm_weights,
+ bw_cell_layer_norm_weights,
+ bw_output_layer_norm_weights,).To(fw_output)
+
+ example = Example(
+ {
+ input: input_data,
+ fw_input_to_input_weights: fw_input_to_input_weights_data,
+ fw_input_to_forget_weights: fw_input_to_forget_weights_data,
+ fw_input_to_cell_weights: fw_input_to_cell_weights_data,
+ fw_input_to_output_weights: fw_input_to_output_weights_data,
+ fw_recurrent_to_input_weights: fw_recurrent_to_input_weights_data,
+ fw_recurrent_to_forget_weights: fw_recurrent_to_forget_weights_data,
+ fw_recurrent_to_cell_weights: fw_recurrent_to_cell_weights_data,
+ fw_recurrent_to_output_weights: fw_recurrent_to_output_weights_data,
+ fw_cell_to_input_weights: fw_cell_to_input_weights_data,
+ fw_cell_to_forget_weights: fw_cell_to_forget_weights_data,
+ fw_cell_to_output_weights: fw_cell_to_output_weights_data,
+ fw_input_gate_bias: fw_input_gate_bias_data,
+ fw_forget_gate_bias: fw_forget_gate_bias_data,
+ fw_cell_bias: fw_cell_bias_data,
+ fw_output_gate_bias: fw_output_gate_bias_data,
+ fw_projection_weights: fw_projection_weights_data,
+ fw_projection_bias: fw_projection_bias_data,
+ bw_input_to_input_weights: bw_input_to_input_weights_data,
+ bw_input_to_forget_weights: bw_input_to_forget_weights_data,
+ bw_input_to_cell_weights: bw_input_to_cell_weights_data,
+ bw_input_to_output_weights: bw_input_to_output_weights_data,
+ bw_recurrent_to_input_weights: bw_recurrent_to_input_weights_data,
+ bw_recurrent_to_forget_weights: bw_recurrent_to_forget_weights_data,
+ bw_recurrent_to_cell_weights: bw_recurrent_to_cell_weights_data,
+ bw_recurrent_to_output_weights: bw_recurrent_to_output_weights_data,
+ bw_cell_to_input_weights: bw_cell_to_input_weights_data,
+ bw_cell_to_forget_weights: bw_cell_to_forget_weights_data,
+ bw_cell_to_output_weights: bw_cell_to_output_weights_data,
+ bw_input_gate_bias: bw_input_gate_bias_data,
+ bw_forget_gate_bias: bw_forget_gate_bias_data,
+ bw_cell_bias: bw_cell_bias_data,
+ bw_output_gate_bias: bw_output_gate_bias_data,
+ bw_projection_weights: bw_projection_weights_data,
+ bw_projection_bias: bw_projection_bias_data,
+ fw_activation_state: fw_activation_state_data,
+ fw_cell_state: fw_cell_state_data,
+ bw_activation_state: bw_activation_state_data,
+ bw_cell_state: bw_cell_state_data,
+ aux_input: aux_input_data,
+ fw_aux_input_to_input_weights: fw_aux_input_to_input_weights_data,
+ fw_aux_input_to_forget_weights: fw_aux_input_to_forget_weights_data,
+ fw_aux_input_to_cell_weights: fw_aux_input_to_cell_weights_data,
+ fw_aux_input_to_output_weights: fw_aux_input_to_output_weights_data,
+ bw_aux_input_to_input_weights: bw_aux_input_to_input_weights_data,
+ bw_aux_input_to_forget_weights: bw_aux_input_to_forget_weights_data,
+ bw_aux_input_to_cell_weights: bw_aux_input_to_cell_weights_data,
+ bw_aux_input_to_output_weights: bw_aux_input_to_output_weights_data,
+ fw_input_layer_norm_weights: fw_input_layer_norm_weights_data,
+ fw_forget_layer_norm_weights: fw_forget_layer_norm_weights_data,
+ fw_cell_layer_norm_weights: fw_cell_layer_norm_weights_data,
+ fw_output_layer_norm_weights: fw_output_layer_norm_weights_data,
+ bw_input_layer_norm_weights: bw_input_layer_norm_weights_data,
+ bw_forget_layer_norm_weights: bw_forget_layer_norm_weights_data,
+ bw_cell_layer_norm_weights: bw_cell_layer_norm_weights_data,
+ bw_output_layer_norm_weights: bw_output_layer_norm_weights_data,
+ fw_output: fw_output_data,
+ },
+ model=model, name=name)
+
+
+fw_input_to_input_weights_data = [
+ -0.45018822, -0.02338299, -0.0870589,
+ -0.34550029, 0.04266912, -0.15680569,
+ -0.34856534, 0.43890524
+]
+bw_input_to_input_weights_data = fw_input_to_input_weights_data
+
+fw_input_to_forget_weights_data = [
+ 0.09701663, 0.20334584, -0.50592935,
+ -0.31343272, -0.40032279, 0.44781327,
+ 0.01387155, -0.35593212
+]
+bw_input_to_forget_weights_data = fw_input_to_forget_weights_data
+
+fw_input_to_cell_weights_data = [
+ -0.50013041, 0.1370284, 0.11810488, 0.2013163,
+ -0.20583314, 0.44344562, 0.22077113,
+ -0.29909778
+]
+bw_input_to_cell_weights_data = fw_input_to_cell_weights_data
+
+fw_input_to_output_weights_data = [
+ -0.25065863, -0.28290087, 0.04613829,
+ 0.40525138, 0.44272184, 0.03897077, -0.1556896,
+ 0.19487578
+]
+bw_input_to_output_weights_data = fw_input_to_output_weights_data
+
+fw_recurrent_to_input_weights_data = [
+ -0.0063535, -0.2042388, 0.31454784, -0.35746509, 0.28902304, 0.08183324,
+ -0.16555229, 0.02286911, -0.13566875, 0.03034258, 0.48091322,
+ -0.12528998, 0.24077177, -0.51332325, -0.33502164, 0.10629296
+]
+bw_recurrent_to_input_weights_data = fw_recurrent_to_input_weights_data
+
+fw_recurrent_to_forget_weights_data = [
+ -0.48684245, -0.06655136, 0.42224967, 0.2112639, 0.27654213, 0.20864892,
+ -0.07646349, 0.45877004, 0.00141793, -0.14609534, 0.36447752, 0.09196436,
+ 0.28053468, 0.01560611, -0.20127171, -0.01140004
+]
+bw_recurrent_to_forget_weights_data = fw_recurrent_to_forget_weights_data
+
+fw_recurrent_to_cell_weights_data = [
+ -0.3407414, 0.24443203, -0.2078532, 0.26320225, 0.05695659, -0.00123841,
+ -0.4744786, -0.35869038, -0.06418842, -0.13502428, -0.501764, 0.22830659,
+ -0.46367589, 0.26016325, -0.03894562, -0.16368064
+]
+bw_recurrent_to_cell_weights_data = fw_recurrent_to_cell_weights_data
+
+fw_recurrent_to_output_weights_data = [
+ 0.43385774, -0.17194885, 0.2718237, 0.09215671, 0.24107647, -0.39835793,
+ 0.18212086, 0.01301402, 0.48572797, -0.50656658, 0.20047462, -0.20607421,
+ -0.51818722, -0.15390486, 0.0468148, 0.39922136
+]
+bw_recurrent_to_output_weights_data = fw_recurrent_to_output_weights_data
+
+fw_input_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_input_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+
+fw_forget_gate_bias_data = [1.0, 1.0, 1.0, 1.0]
+bw_forget_gate_bias_data = [1.0, 1.0, 1.0, 1.0]
+
+fw_cell_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_cell_bias_data = [0.0, 0.0, 0.0, 0.0]
+
+fw_output_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_output_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+
+input_data = [2.0, 3.0, 3.0, 4.0, 1.0, 1.0]
+
+fw_activation_state_data = [0 for _ in range(n_batch * n_output)]
+bw_activation_state_data = [0 for _ in range(n_batch * n_output)]
+
+fw_cell_state_data = [0 for _ in range(n_batch * n_cell)]
+bw_cell_state_data = [0 for _ in range(n_batch * n_cell)]
+
+fw_golden_output_data = [
+ -0.02973187, 0.1229473, 0.20885126, -0.15358765, -0.0806187, 0.139077, 0.400476, -0.197842,
+ -0.03716109, 0.12507336, 0.41193449, -0.20860538, -0.0332076, 0.123838, 0.309777, -0.17621,
+ -0.15053082, 0.09120187, 0.24278517, -0.12222792, -0.0490733, 0.0739237, 0.067706, -0.0208124
+]
+
+
+test(
+ name="blackbox",
+ input_data=input_data,
+ fw_input_to_input_weights_data=fw_input_to_input_weights_data,
+ fw_input_to_forget_weights_data=fw_input_to_forget_weights_data,
+ fw_input_to_cell_weights_data=fw_input_to_cell_weights_data,
+ fw_input_to_output_weights_data=fw_input_to_output_weights_data,
+ fw_recurrent_to_input_weights_data=fw_recurrent_to_input_weights_data,
+ fw_recurrent_to_forget_weights_data=fw_recurrent_to_forget_weights_data,
+ fw_recurrent_to_cell_weights_data=fw_recurrent_to_cell_weights_data,
+ fw_recurrent_to_output_weights_data=fw_recurrent_to_output_weights_data,
+ fw_input_gate_bias_data=fw_input_gate_bias_data,
+ fw_forget_gate_bias_data=fw_forget_gate_bias_data,
+ fw_cell_bias_data=fw_cell_bias_data,
+ fw_output_gate_bias_data=fw_output_gate_bias_data,
+ bw_input_to_input_weights_data=bw_input_to_input_weights_data,
+ bw_input_to_forget_weights_data=bw_input_to_forget_weights_data,
+ bw_input_to_cell_weights_data=bw_input_to_cell_weights_data,
+ bw_input_to_output_weights_data=bw_input_to_output_weights_data,
+ bw_recurrent_to_input_weights_data=bw_recurrent_to_input_weights_data,
+ bw_recurrent_to_forget_weights_data=bw_recurrent_to_forget_weights_data,
+ bw_recurrent_to_cell_weights_data=bw_recurrent_to_cell_weights_data,
+ bw_recurrent_to_output_weights_data=bw_recurrent_to_output_weights_data,
+ bw_input_gate_bias_data=bw_input_gate_bias_data,
+ bw_forget_gate_bias_data=bw_forget_gate_bias_data,
+ bw_cell_bias_data=bw_cell_bias_data,
+ bw_output_gate_bias_data=bw_output_gate_bias_data,
+ fw_activation_state_data = fw_activation_state_data,
+ bw_activation_state_data = bw_activation_state_data,
+ fw_cell_state_data = fw_cell_state_data,
+ bw_cell_state_data = bw_cell_state_data,
+ fw_output_data=fw_golden_output_data,
+)
diff --git a/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_merge_outputs.mod.py b/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_merge_outputs.mod.py
new file mode 100644
index 000000000..6e661001e
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_merge_outputs.mod.py
@@ -0,0 +1,453 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Bidirectional Sequence LSTM Test:
+# FLOAT32, No Layer Normalization, No Cifg, No Peephole, No Projection, and No Clipping.
+# Merge outputs.
+
+n_batch = 1
+n_input = 2
+n_cell = 4
+n_output = 4
+max_time = 3
+
+input = Input("input", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_input))
+
+fw_input_to_input_weights = Input(
+ "fw_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_forget_weights = Input(
+ "fw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_cell_weights = Input(
+ "fw_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_output_weights = Input(
+ "fw_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+fw_recurrent_to_input_weights = Input(
+ "fw_recurrent_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_forget_weights = Input(
+ "fw_recurrent_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_cell_weights = Input(
+ "fw_recurrent_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_output_weights = Input(
+ "fw_recurrent_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+
+fw_cell_to_input_weights = Input(
+ "fw_cell_to_input_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_cell_to_forget_weights = Input(
+ "fw_cell_to_forget_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_cell_to_output_weights = Input(
+ "fw_cell_to_output_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+fw_input_gate_bias = Input(
+ "fw_input_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_forget_gate_bias = Input(
+ "fw_forget_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_cell_bias = Input(
+ "fw_cell_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_output_gate_bias = Input(
+ "fw_output_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+fw_projection_weights = Input(
+ "fw_projection_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_output, n_cell))
+fw_projection_bias = Input(
+ "fw_projection_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_output))
+
+bw_input_to_input_weights = Input(
+ "bw_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_forget_weights = Input(
+ "bw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_cell_weights = Input(
+ "bw_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_output_weights = Input(
+ "bw_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+bw_recurrent_to_input_weights = Input(
+ "bw_recurrent_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_forget_weights = Input(
+ "bw_recurrent_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_cell_weights = Input(
+ "bw_recurrent_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_output_weights = Input(
+ "bw_recurrent_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+
+bw_cell_to_input_weights = Input(
+ "bw_cell_to_input_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_cell_to_forget_weights = Input(
+ "bw_cell_to_forget_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_cell_to_output_weights = Input(
+ "bw_cell_to_output_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+bw_input_gate_bias = Input(
+ "bw_input_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_forget_gate_bias = Input(
+ "bw_forget_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_cell_bias = Input(
+ "bw_cell_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_output_gate_bias = Input(
+ "bw_output_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+bw_projection_weights = Input(
+ "bw_projection_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_output, n_cell))
+bw_projection_bias = Input(
+ "bw_projection_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_output))
+
+fw_activation_state = Input(
+ "fw_activatiom_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_output))
+fw_cell_state = Input(
+ "fw_cell_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_cell))
+
+bw_activation_state = Input(
+ "bw_activatiom_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_output))
+bw_cell_state = Input(
+ "bw_cell_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_cell))
+
+aux_input = Input("input", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_input))
+
+fw_aux_input_to_input_weights = Input(
+ "fw_aux_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_forget_weights = Input(
+ "fw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_cell_weights = Input(
+ "fw_aux_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_output_weights = Input(
+ "fw_aux_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+bw_aux_input_to_input_weights = Input(
+ "bw_aux_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_forget_weights = Input(
+ "bw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_cell_weights = Input(
+ "bw_aux_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_output_weights = Input(
+ "bw_aux_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+fw_input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+fw_forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+fw_cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+fw_output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+
+bw_input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+bw_forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+bw_cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+bw_output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+
+fw_output=Output("fw_output", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, 2 * n_output))
+
+def test(
+ name,
+ input_data=[],
+ fw_input_to_input_weights_data=[],
+ fw_input_to_forget_weights_data=[],
+ fw_input_to_cell_weights_data=[],
+ fw_input_to_output_weights_data=[],
+ fw_recurrent_to_input_weights_data=[],
+ fw_recurrent_to_forget_weights_data=[],
+ fw_recurrent_to_cell_weights_data=[],
+ fw_recurrent_to_output_weights_data=[],
+ fw_cell_to_input_weights_data=[],
+ fw_cell_to_forget_weights_data=[],
+ fw_cell_to_output_weights_data=[],
+ fw_input_gate_bias_data=[],
+ fw_forget_gate_bias_data=[],
+ fw_cell_bias_data=[],
+ fw_output_gate_bias_data=[],
+ fw_projection_weights_data=[],
+ fw_projection_bias_data=[],
+ bw_input_to_input_weights_data=[],
+ bw_input_to_forget_weights_data=[],
+ bw_input_to_cell_weights_data=[],
+ bw_input_to_output_weights_data=[],
+ bw_recurrent_to_input_weights_data=[],
+ bw_recurrent_to_forget_weights_data=[],
+ bw_recurrent_to_cell_weights_data=[],
+ bw_recurrent_to_output_weights_data=[],
+ bw_cell_to_input_weights_data=[],
+ bw_cell_to_forget_weights_data=[],
+ bw_cell_to_output_weights_data=[],
+ bw_input_gate_bias_data=[],
+ bw_forget_gate_bias_data=[],
+ bw_cell_bias_data=[],
+ bw_output_gate_bias_data=[],
+ bw_projection_weights_data=[],
+ bw_projection_bias_data=[],
+ fw_activation_state_data=[],
+ fw_cell_state_data=[],
+ bw_activation_state_data=[],
+ bw_cell_state_data=[],
+ aux_input_data=[],
+ fw_aux_input_to_input_weights_data=[],
+ fw_aux_input_to_forget_weights_data=[],
+ fw_aux_input_to_cell_weights_data=[],
+ fw_aux_input_to_output_weights_data=[],
+ bw_aux_input_to_input_weights_data=[],
+ bw_aux_input_to_forget_weights_data=[],
+ bw_aux_input_to_cell_weights_data=[],
+ bw_aux_input_to_output_weights_data=[],
+ fw_input_layer_norm_weights_data=[],
+ fw_forget_layer_norm_weights_data=[],
+ fw_cell_layer_norm_weights_data=[],
+ fw_output_layer_norm_weights_data=[],
+ bw_input_layer_norm_weights_data=[],
+ bw_forget_layer_norm_weights_data=[],
+ bw_cell_layer_norm_weights_data=[],
+ bw_output_layer_norm_weights_data=[],
+ fw_output_data=[]):
+
+ activation = Int32Scalar("activation", 4)
+ cell_clip = Float32Scalar("cell_clip", 0.0)
+ proj_clip = Float32Scalar("proj_clip", 0.0)
+ merge_outputs = BoolScalar("merge_outputs", True)
+ time_major = BoolScalar("time_major", True)
+
+ model = Model().Operation(
+ "BIDIRECTIONAL_SEQUENCE_LSTM",
+ input,
+ fw_input_to_input_weights,
+ fw_input_to_forget_weights,
+ fw_input_to_cell_weights,
+ fw_input_to_output_weights,
+ fw_recurrent_to_input_weights,
+ fw_recurrent_to_forget_weights,
+ fw_recurrent_to_cell_weights,
+ fw_recurrent_to_output_weights,
+ fw_cell_to_input_weights,
+ fw_cell_to_forget_weights,
+ fw_cell_to_output_weights,
+ fw_input_gate_bias,
+ fw_forget_gate_bias,
+ fw_cell_bias,
+ fw_output_gate_bias,
+ fw_projection_weights,
+ fw_projection_bias,
+ bw_input_to_input_weights,
+ bw_input_to_forget_weights,
+ bw_input_to_cell_weights,
+ bw_input_to_output_weights,
+ bw_recurrent_to_input_weights,
+ bw_recurrent_to_forget_weights,
+ bw_recurrent_to_cell_weights,
+ bw_recurrent_to_output_weights,
+ bw_cell_to_input_weights,
+ bw_cell_to_forget_weights,
+ bw_cell_to_output_weights,
+ bw_input_gate_bias,
+ bw_forget_gate_bias,
+ bw_cell_bias,
+ bw_output_gate_bias,
+ bw_projection_weights,
+ bw_projection_bias,
+ fw_activation_state,
+ fw_cell_state,
+ bw_activation_state,
+ bw_cell_state,
+ aux_input,
+ fw_aux_input_to_input_weights,
+ fw_aux_input_to_forget_weights,
+ fw_aux_input_to_cell_weights,
+ fw_aux_input_to_output_weights,
+ bw_aux_input_to_input_weights,
+ bw_aux_input_to_forget_weights,
+ bw_aux_input_to_cell_weights,
+ bw_aux_input_to_output_weights,
+ activation, cell_clip, proj_clip, merge_outputs, time_major,
+ fw_input_layer_norm_weights,
+ fw_forget_layer_norm_weights,
+ fw_cell_layer_norm_weights,
+ fw_output_layer_norm_weights,
+ bw_input_layer_norm_weights,
+ bw_forget_layer_norm_weights,
+ bw_cell_layer_norm_weights,
+ bw_output_layer_norm_weights,).To(fw_output)
+
+ example = Example(
+ {
+ input: input_data,
+ fw_input_to_input_weights: fw_input_to_input_weights_data,
+ fw_input_to_forget_weights: fw_input_to_forget_weights_data,
+ fw_input_to_cell_weights: fw_input_to_cell_weights_data,
+ fw_input_to_output_weights: fw_input_to_output_weights_data,
+ fw_recurrent_to_input_weights: fw_recurrent_to_input_weights_data,
+ fw_recurrent_to_forget_weights: fw_recurrent_to_forget_weights_data,
+ fw_recurrent_to_cell_weights: fw_recurrent_to_cell_weights_data,
+ fw_recurrent_to_output_weights: fw_recurrent_to_output_weights_data,
+ fw_cell_to_input_weights: fw_cell_to_input_weights_data,
+ fw_cell_to_forget_weights: fw_cell_to_forget_weights_data,
+ fw_cell_to_output_weights: fw_cell_to_output_weights_data,
+ fw_input_gate_bias: fw_input_gate_bias_data,
+ fw_forget_gate_bias: fw_forget_gate_bias_data,
+ fw_cell_bias: fw_cell_bias_data,
+ fw_output_gate_bias: fw_output_gate_bias_data,
+ fw_projection_weights: fw_projection_weights_data,
+ fw_projection_bias: fw_projection_bias_data,
+ bw_input_to_input_weights: bw_input_to_input_weights_data,
+ bw_input_to_forget_weights: bw_input_to_forget_weights_data,
+ bw_input_to_cell_weights: bw_input_to_cell_weights_data,
+ bw_input_to_output_weights: bw_input_to_output_weights_data,
+ bw_recurrent_to_input_weights: bw_recurrent_to_input_weights_data,
+ bw_recurrent_to_forget_weights: bw_recurrent_to_forget_weights_data,
+ bw_recurrent_to_cell_weights: bw_recurrent_to_cell_weights_data,
+ bw_recurrent_to_output_weights: bw_recurrent_to_output_weights_data,
+ bw_cell_to_input_weights: bw_cell_to_input_weights_data,
+ bw_cell_to_forget_weights: bw_cell_to_forget_weights_data,
+ bw_cell_to_output_weights: bw_cell_to_output_weights_data,
+ bw_input_gate_bias: bw_input_gate_bias_data,
+ bw_forget_gate_bias: bw_forget_gate_bias_data,
+ bw_cell_bias: bw_cell_bias_data,
+ bw_output_gate_bias: bw_output_gate_bias_data,
+ bw_projection_weights: bw_projection_weights_data,
+ bw_projection_bias: bw_projection_bias_data,
+ fw_activation_state: fw_activation_state_data,
+ fw_cell_state: fw_cell_state_data,
+ bw_activation_state: bw_activation_state_data,
+ bw_cell_state: bw_cell_state_data,
+ aux_input: aux_input_data,
+ fw_aux_input_to_input_weights: fw_aux_input_to_input_weights_data,
+ fw_aux_input_to_forget_weights: fw_aux_input_to_forget_weights_data,
+ fw_aux_input_to_cell_weights: fw_aux_input_to_cell_weights_data,
+ fw_aux_input_to_output_weights: fw_aux_input_to_output_weights_data,
+ bw_aux_input_to_input_weights: bw_aux_input_to_input_weights_data,
+ bw_aux_input_to_forget_weights: bw_aux_input_to_forget_weights_data,
+ bw_aux_input_to_cell_weights: bw_aux_input_to_cell_weights_data,
+ bw_aux_input_to_output_weights: bw_aux_input_to_output_weights_data,
+ fw_input_layer_norm_weights: fw_input_layer_norm_weights_data,
+ fw_forget_layer_norm_weights: fw_forget_layer_norm_weights_data,
+ fw_cell_layer_norm_weights: fw_cell_layer_norm_weights_data,
+ fw_output_layer_norm_weights: fw_output_layer_norm_weights_data,
+ bw_input_layer_norm_weights: bw_input_layer_norm_weights_data,
+ bw_forget_layer_norm_weights: bw_forget_layer_norm_weights_data,
+ bw_cell_layer_norm_weights: bw_cell_layer_norm_weights_data,
+ bw_output_layer_norm_weights: bw_output_layer_norm_weights_data,
+ fw_output: fw_output_data,
+ },
+ model=model, name=name)
+
+
+fw_input_to_input_weights_data = [
+ -0.45018822, -0.02338299, -0.0870589,
+ -0.34550029, 0.04266912, -0.15680569,
+ -0.34856534, 0.43890524
+]
+bw_input_to_input_weights_data = fw_input_to_input_weights_data
+
+fw_input_to_forget_weights_data = [
+ 0.09701663, 0.20334584, -0.50592935,
+ -0.31343272, -0.40032279, 0.44781327,
+ 0.01387155, -0.35593212
+]
+bw_input_to_forget_weights_data = fw_input_to_forget_weights_data
+
+fw_input_to_cell_weights_data = [
+ -0.50013041, 0.1370284, 0.11810488, 0.2013163,
+ -0.20583314, 0.44344562, 0.22077113,
+ -0.29909778
+]
+bw_input_to_cell_weights_data = fw_input_to_cell_weights_data
+
+fw_input_to_output_weights_data = [
+ -0.25065863, -0.28290087, 0.04613829,
+ 0.40525138, 0.44272184, 0.03897077, -0.1556896,
+ 0.19487578
+]
+bw_input_to_output_weights_data = fw_input_to_output_weights_data
+
+fw_recurrent_to_input_weights_data = [
+ -0.0063535, -0.2042388, 0.31454784, -0.35746509, 0.28902304, 0.08183324,
+ -0.16555229, 0.02286911, -0.13566875, 0.03034258, 0.48091322,
+ -0.12528998, 0.24077177, -0.51332325, -0.33502164, 0.10629296
+]
+bw_recurrent_to_input_weights_data = fw_recurrent_to_input_weights_data
+
+fw_recurrent_to_forget_weights_data = [
+ -0.48684245, -0.06655136, 0.42224967, 0.2112639, 0.27654213, 0.20864892,
+ -0.07646349, 0.45877004, 0.00141793, -0.14609534, 0.36447752, 0.09196436,
+ 0.28053468, 0.01560611, -0.20127171, -0.01140004
+]
+bw_recurrent_to_forget_weights_data = fw_recurrent_to_forget_weights_data
+
+fw_recurrent_to_cell_weights_data = [
+ -0.3407414, 0.24443203, -0.2078532, 0.26320225, 0.05695659, -0.00123841,
+ -0.4744786, -0.35869038, -0.06418842, -0.13502428, -0.501764, 0.22830659,
+ -0.46367589, 0.26016325, -0.03894562, -0.16368064
+]
+bw_recurrent_to_cell_weights_data = fw_recurrent_to_cell_weights_data
+
+fw_recurrent_to_output_weights_data = [
+ 0.43385774, -0.17194885, 0.2718237, 0.09215671, 0.24107647, -0.39835793,
+ 0.18212086, 0.01301402, 0.48572797, -0.50656658, 0.20047462, -0.20607421,
+ -0.51818722, -0.15390486, 0.0468148, 0.39922136
+]
+bw_recurrent_to_output_weights_data = fw_recurrent_to_output_weights_data
+
+fw_input_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_input_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+
+fw_forget_gate_bias_data = [1.0, 1.0, 1.0, 1.0]
+bw_forget_gate_bias_data = [1.0, 1.0, 1.0, 1.0]
+
+fw_cell_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_cell_bias_data = [0.0, 0.0, 0.0, 0.0]
+
+fw_output_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+bw_output_gate_bias_data = [0.0, 0.0, 0.0, 0.0]
+
+input_data = [2.0, 3.0, 3.0, 4.0, 1.0, 1.0]
+
+fw_activation_state_data = [0 for _ in range(n_batch * n_output)]
+bw_activation_state_data = [0 for _ in range(n_batch * n_output)]
+
+fw_cell_state_data = [0 for _ in range(n_batch * n_cell)]
+bw_cell_state_data = [0 for _ in range(n_batch * n_cell)]
+
+fw_golden_output_data = [
+ -0.02973187, 0.1229473, 0.20885126, -0.15358765, -0.0806187, 0.139077, 0.400476, -0.197842,
+ -0.03716109, 0.12507336, 0.41193449, -0.20860538, -0.0332076, 0.123838, 0.309777, -0.17621,
+ -0.15053082, 0.09120187, 0.24278517, -0.12222792, -0.0490733, 0.0739237, 0.067706, -0.0208124
+]
+
+
+test(
+ name="blackbox",
+ input_data=input_data,
+ fw_input_to_input_weights_data=fw_input_to_input_weights_data,
+ fw_input_to_forget_weights_data=fw_input_to_forget_weights_data,
+ fw_input_to_cell_weights_data=fw_input_to_cell_weights_data,
+ fw_input_to_output_weights_data=fw_input_to_output_weights_data,
+ fw_recurrent_to_input_weights_data=fw_recurrent_to_input_weights_data,
+ fw_recurrent_to_forget_weights_data=fw_recurrent_to_forget_weights_data,
+ fw_recurrent_to_cell_weights_data=fw_recurrent_to_cell_weights_data,
+ fw_recurrent_to_output_weights_data=fw_recurrent_to_output_weights_data,
+ fw_input_gate_bias_data=fw_input_gate_bias_data,
+ fw_forget_gate_bias_data=fw_forget_gate_bias_data,
+ fw_cell_bias_data=fw_cell_bias_data,
+ fw_output_gate_bias_data=fw_output_gate_bias_data,
+ bw_input_to_input_weights_data=bw_input_to_input_weights_data,
+ bw_input_to_forget_weights_data=bw_input_to_forget_weights_data,
+ bw_input_to_cell_weights_data=bw_input_to_cell_weights_data,
+ bw_input_to_output_weights_data=bw_input_to_output_weights_data,
+ bw_recurrent_to_input_weights_data=bw_recurrent_to_input_weights_data,
+ bw_recurrent_to_forget_weights_data=bw_recurrent_to_forget_weights_data,
+ bw_recurrent_to_cell_weights_data=bw_recurrent_to_cell_weights_data,
+ bw_recurrent_to_output_weights_data=bw_recurrent_to_output_weights_data,
+ bw_input_gate_bias_data=bw_input_gate_bias_data,
+ bw_forget_gate_bias_data=bw_forget_gate_bias_data,
+ bw_cell_bias_data=bw_cell_bias_data,
+ bw_output_gate_bias_data=bw_output_gate_bias_data,
+ fw_activation_state_data = fw_activation_state_data,
+ bw_activation_state_data = bw_activation_state_data,
+ fw_cell_state_data = fw_cell_state_data,
+ bw_cell_state_data = bw_cell_state_data,
+ fw_output_data=fw_golden_output_data,
+)
diff --git a/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_norm_fw_output.mod.py b/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_norm_fw_output.mod.py
new file mode 100644
index 000000000..93de203f4
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_lstm_norm_fw_output.mod.py
@@ -0,0 +1,482 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Bidirectional Sequence LSTM Test:
+# FLOAT32, Layer Normalization, No Cifg, Peephole, Projection, and No Clipping.
+# Verifies forward output only.
+
+n_batch = 2
+n_input = 5
+n_cell = 4
+n_output = 3
+max_time = 3
+
+input = Input("input", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_input))
+
+fw_input_to_input_weights = Input(
+ "fw_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_forget_weights = Input(
+ "fw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_cell_weights = Input(
+ "fw_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_input_to_output_weights = Input(
+ "fw_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+fw_recurrent_to_input_weights = Input(
+ "fw_recurrent_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_forget_weights = Input(
+ "fw_recurrent_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_cell_weights = Input(
+ "fw_recurrent_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+fw_recurrent_to_output_weights = Input(
+ "fw_recurrent_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+
+fw_cell_to_input_weights = Input(
+ "fw_cell_to_input_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_cell_to_forget_weights = Input(
+ "fw_cell_to_forget_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_cell_to_output_weights = Input(
+ "fw_cell_to_output_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+fw_input_gate_bias = Input(
+ "fw_input_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_forget_gate_bias = Input(
+ "fw_forget_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_cell_bias = Input(
+ "fw_cell_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+fw_output_gate_bias = Input(
+ "fw_output_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+fw_projection_weights = Input(
+ "fw_projection_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_output, n_cell))
+fw_projection_bias = Input(
+ "fw_projection_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_output))
+
+bw_input_to_input_weights = Input(
+ "bw_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_forget_weights = Input(
+ "bw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_cell_weights = Input(
+ "bw_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_input_to_output_weights = Input(
+ "bw_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+bw_recurrent_to_input_weights = Input(
+ "bw_recurrent_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_forget_weights = Input(
+ "bw_recurrent_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_cell_weights = Input(
+ "bw_recurrent_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+bw_recurrent_to_output_weights = Input(
+ "bw_recurrent_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_output))
+
+bw_cell_to_input_weights = Input(
+ "bw_cell_to_input_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_cell_to_forget_weights = Input(
+ "bw_cell_to_forget_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_cell_to_output_weights = Input(
+ "bw_cell_to_output_weights", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+bw_input_gate_bias = Input(
+ "bw_input_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_forget_gate_bias = Input(
+ "bw_forget_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_cell_bias = Input(
+ "bw_cell_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+bw_output_gate_bias = Input(
+ "bw_output_gate_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_cell))
+
+bw_projection_weights = Input(
+ "bw_projection_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_output, n_cell))
+bw_projection_bias = Input(
+ "bw_projection_bias", "TENSOR_FLOAT32", "{{{}}}".format(n_output))
+
+fw_activation_state = Input(
+ "fw_activatiom_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_output))
+fw_cell_state = Input(
+ "fw_cell_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_cell))
+
+bw_activation_state = Input(
+ "bw_activatiom_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_output))
+bw_cell_state = Input(
+ "bw_cell_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_batch, n_cell))
+
+aux_input = Input("input", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_input))
+
+fw_aux_input_to_input_weights = Input(
+ "fw_aux_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_forget_weights = Input(
+ "fw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_cell_weights = Input(
+ "fw_aux_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+fw_aux_input_to_output_weights = Input(
+ "fw_aux_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+bw_aux_input_to_input_weights = Input(
+ "bw_aux_input_to_input_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_forget_weights = Input(
+ "bw_input_to_forget_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_cell_weights = Input(
+ "bw_aux_input_to_cell_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+bw_aux_input_to_output_weights = Input(
+ "bw_aux_input_to_output_weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(n_cell, n_input))
+
+fw_input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+fw_forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+fw_cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+fw_output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+
+bw_input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+bw_forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+bw_cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+bw_output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT32", "{%d}" % n_cell)
+
+fw_output=Output("fw_output", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_output))
+bw_output=IgnoredOutput("bw_output", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(max_time, n_batch, n_output))
+
+def test(
+ name,
+ input_data=[],
+ fw_input_to_input_weights_data=[],
+ fw_input_to_forget_weights_data=[],
+ fw_input_to_cell_weights_data=[],
+ fw_input_to_output_weights_data=[],
+ fw_recurrent_to_input_weights_data=[],
+ fw_recurrent_to_forget_weights_data=[],
+ fw_recurrent_to_cell_weights_data=[],
+ fw_recurrent_to_output_weights_data=[],
+ fw_cell_to_input_weights_data=[],
+ fw_cell_to_forget_weights_data=[],
+ fw_cell_to_output_weights_data=[],
+ fw_input_gate_bias_data=[],
+ fw_forget_gate_bias_data=[],
+ fw_cell_bias_data=[],
+ fw_output_gate_bias_data=[],
+ fw_projection_weights_data=[],
+ fw_projection_bias_data=[],
+ bw_input_to_input_weights_data=[],
+ bw_input_to_forget_weights_data=[],
+ bw_input_to_cell_weights_data=[],
+ bw_input_to_output_weights_data=[],
+ bw_recurrent_to_input_weights_data=[],
+ bw_recurrent_to_forget_weights_data=[],
+ bw_recurrent_to_cell_weights_data=[],
+ bw_recurrent_to_output_weights_data=[],
+ bw_cell_to_input_weights_data=[],
+ bw_cell_to_forget_weights_data=[],
+ bw_cell_to_output_weights_data=[],
+ bw_input_gate_bias_data=[],
+ bw_forget_gate_bias_data=[],
+ bw_cell_bias_data=[],
+ bw_output_gate_bias_data=[],
+ bw_projection_weights_data=[],
+ bw_projection_bias_data=[],
+ fw_activation_state_data=[],
+ fw_cell_state_data=[],
+ bw_activation_state_data=[],
+ bw_cell_state_data=[],
+ aux_input_data=[],
+ fw_aux_input_to_input_weights_data=[],
+ fw_aux_input_to_forget_weights_data=[],
+ fw_aux_input_to_cell_weights_data=[],
+ fw_aux_input_to_output_weights_data=[],
+ bw_aux_input_to_input_weights_data=[],
+ bw_aux_input_to_forget_weights_data=[],
+ bw_aux_input_to_cell_weights_data=[],
+ bw_aux_input_to_output_weights_data=[],
+ fw_input_layer_norm_weights_data=[],
+ fw_forget_layer_norm_weights_data=[],
+ fw_cell_layer_norm_weights_data=[],
+ fw_output_layer_norm_weights_data=[],
+ bw_input_layer_norm_weights_data=[],
+ bw_forget_layer_norm_weights_data=[],
+ bw_cell_layer_norm_weights_data=[],
+ bw_output_layer_norm_weights_data=[],
+ fw_output_data=[],
+ bw_output_data=[],):
+
+ activation = Int32Scalar("activation", 4)
+ cell_clip = Float32Scalar("cell_clip", 0.0)
+ proj_clip = Float32Scalar("proj_clip", 0.0)
+ merge_outputs = BoolScalar("merge_outputs", False)
+ time_major = BoolScalar("time_major", True)
+
+ model = Model().Operation(
+ "BIDIRECTIONAL_SEQUENCE_LSTM",
+ input,
+ fw_input_to_input_weights,
+ fw_input_to_forget_weights,
+ fw_input_to_cell_weights,
+ fw_input_to_output_weights,
+ fw_recurrent_to_input_weights,
+ fw_recurrent_to_forget_weights,
+ fw_recurrent_to_cell_weights,
+ fw_recurrent_to_output_weights,
+ fw_cell_to_input_weights,
+ fw_cell_to_forget_weights,
+ fw_cell_to_output_weights,
+ fw_input_gate_bias,
+ fw_forget_gate_bias,
+ fw_cell_bias,
+ fw_output_gate_bias,
+ fw_projection_weights,
+ fw_projection_bias,
+ bw_input_to_input_weights,
+ bw_input_to_forget_weights,
+ bw_input_to_cell_weights,
+ bw_input_to_output_weights,
+ bw_recurrent_to_input_weights,
+ bw_recurrent_to_forget_weights,
+ bw_recurrent_to_cell_weights,
+ bw_recurrent_to_output_weights,
+ bw_cell_to_input_weights,
+ bw_cell_to_forget_weights,
+ bw_cell_to_output_weights,
+ bw_input_gate_bias,
+ bw_forget_gate_bias,
+ bw_cell_bias,
+ bw_output_gate_bias,
+ bw_projection_weights,
+ bw_projection_bias,
+ fw_activation_state,
+ fw_cell_state,
+ bw_activation_state,
+ bw_cell_state,
+ aux_input,
+ fw_aux_input_to_input_weights,
+ fw_aux_input_to_forget_weights,
+ fw_aux_input_to_cell_weights,
+ fw_aux_input_to_output_weights,
+ bw_aux_input_to_input_weights,
+ bw_aux_input_to_forget_weights,
+ bw_aux_input_to_cell_weights,
+ bw_aux_input_to_output_weights,
+ activation, cell_clip, proj_clip, merge_outputs, time_major,
+ fw_input_layer_norm_weights,
+ fw_forget_layer_norm_weights,
+ fw_cell_layer_norm_weights,
+ fw_output_layer_norm_weights,
+ bw_input_layer_norm_weights,
+ bw_forget_layer_norm_weights,
+ bw_cell_layer_norm_weights,
+ bw_output_layer_norm_weights,).To(fw_output, bw_output)
+
+ example = Example(
+ {
+ input: input_data,
+ fw_input_to_input_weights: fw_input_to_input_weights_data,
+ fw_input_to_forget_weights: fw_input_to_forget_weights_data,
+ fw_input_to_cell_weights: fw_input_to_cell_weights_data,
+ fw_input_to_output_weights: fw_input_to_output_weights_data,
+ fw_recurrent_to_input_weights: fw_recurrent_to_input_weights_data,
+ fw_recurrent_to_forget_weights: fw_recurrent_to_forget_weights_data,
+ fw_recurrent_to_cell_weights: fw_recurrent_to_cell_weights_data,
+ fw_recurrent_to_output_weights: fw_recurrent_to_output_weights_data,
+ fw_cell_to_input_weights: fw_cell_to_input_weights_data,
+ fw_cell_to_forget_weights: fw_cell_to_forget_weights_data,
+ fw_cell_to_output_weights: fw_cell_to_output_weights_data,
+ fw_input_gate_bias: fw_input_gate_bias_data,
+ fw_forget_gate_bias: fw_forget_gate_bias_data,
+ fw_cell_bias: fw_cell_bias_data,
+ fw_output_gate_bias: fw_output_gate_bias_data,
+ fw_projection_weights: fw_projection_weights_data,
+ fw_projection_bias: fw_projection_bias_data,
+ bw_input_to_input_weights: bw_input_to_input_weights_data,
+ bw_input_to_forget_weights: bw_input_to_forget_weights_data,
+ bw_input_to_cell_weights: bw_input_to_cell_weights_data,
+ bw_input_to_output_weights: bw_input_to_output_weights_data,
+ bw_recurrent_to_input_weights: bw_recurrent_to_input_weights_data,
+ bw_recurrent_to_forget_weights: bw_recurrent_to_forget_weights_data,
+ bw_recurrent_to_cell_weights: bw_recurrent_to_cell_weights_data,
+ bw_recurrent_to_output_weights: bw_recurrent_to_output_weights_data,
+ bw_cell_to_input_weights: bw_cell_to_input_weights_data,
+ bw_cell_to_forget_weights: bw_cell_to_forget_weights_data,
+ bw_cell_to_output_weights: bw_cell_to_output_weights_data,
+ bw_input_gate_bias: bw_input_gate_bias_data,
+ bw_forget_gate_bias: bw_forget_gate_bias_data,
+ bw_cell_bias: bw_cell_bias_data,
+ bw_output_gate_bias: bw_output_gate_bias_data,
+ bw_projection_weights: bw_projection_weights_data,
+ bw_projection_bias: bw_projection_bias_data,
+ fw_activation_state: fw_activation_state_data,
+ fw_cell_state: fw_cell_state_data,
+ bw_activation_state: bw_activation_state_data,
+ bw_cell_state: bw_cell_state_data,
+ aux_input: aux_input_data,
+ fw_aux_input_to_input_weights: fw_aux_input_to_input_weights_data,
+ fw_aux_input_to_forget_weights: fw_aux_input_to_forget_weights_data,
+ fw_aux_input_to_cell_weights: fw_aux_input_to_cell_weights_data,
+ fw_aux_input_to_output_weights: fw_aux_input_to_output_weights_data,
+ bw_aux_input_to_input_weights: bw_aux_input_to_input_weights_data,
+ bw_aux_input_to_forget_weights: bw_aux_input_to_forget_weights_data,
+ bw_aux_input_to_cell_weights: bw_aux_input_to_cell_weights_data,
+ bw_aux_input_to_output_weights: bw_aux_input_to_output_weights_data,
+ fw_input_layer_norm_weights: fw_input_layer_norm_weights_data,
+ fw_forget_layer_norm_weights: fw_forget_layer_norm_weights_data,
+ fw_cell_layer_norm_weights: fw_cell_layer_norm_weights_data,
+ fw_output_layer_norm_weights: fw_output_layer_norm_weights_data,
+ bw_input_layer_norm_weights: bw_input_layer_norm_weights_data,
+ bw_forget_layer_norm_weights: bw_forget_layer_norm_weights_data,
+ bw_cell_layer_norm_weights: bw_cell_layer_norm_weights_data,
+ bw_output_layer_norm_weights: bw_output_layer_norm_weights_data,
+ fw_output: fw_output_data,
+ bw_output: bw_output_data,
+ },
+ model=model, name=name)
+
+
+fw_input_to_input_weights_data = [
+ 0.5, 0.6, 0.7, -0.8, -0.9, 0.1, 0.2, 0.3, -0.4, 0.5, -0.8, 0.7, -0.6,
+ 0.5, -0.4, -0.5, -0.4, -0.3, -0.2, -0.1
+]
+bw_input_to_input_weights_data = fw_input_to_input_weights_data
+
+fw_input_to_forget_weights_data = [
+ -0.6, -0.1, 0.3, 0.2, 0.9, -0.5, -0.2, -0.4, 0.3, -0.8, -0.4, 0.3, -0.5,
+ -0.4, -0.6, 0.3, -0.4, -0.6, -0.5, -0.5
+]
+bw_input_to_forget_weights_data = fw_input_to_forget_weights_data
+
+fw_input_to_cell_weights_data = [
+ -0.4, -0.3, -0.2, -0.1, -0.5, 0.5, -0.2, -0.3, -0.2, -0.6, 0.6, -0.1,
+ -0.4, -0.3, -0.7, 0.7, -0.9, -0.5, 0.8, 0.6
+]
+bw_input_to_cell_weights_data = fw_input_to_cell_weights_data
+
+fw_input_to_output_weights_data = [
+ -0.8, -0.4, -0.2, -0.9, -0.1, -0.7, 0.3, -0.3, -0.8, -0.2, 0.6, -0.2,
+ 0.4, -0.7, -0.3, -0.5, 0.1, 0.5, -0.6, -0.4
+]
+bw_input_to_output_weights_data = fw_input_to_output_weights_data
+
+fw_recurrent_to_input_weights_data = [
+ -0.2, -0.3, 0.4, 0.1, -0.5, 0.9, -0.2, -0.3, -0.7, 0.05, -0.2, -0.6
+]
+bw_recurrent_to_input_weights_data = fw_recurrent_to_input_weights_data
+
+fw_recurrent_to_forget_weights_data = [
+ -0.5, -0.3, -0.5, -0.2, 0.6, 0.4, 0.9, 0.3, -0.1, 0.2, 0.5, 0.2
+]
+bw_recurrent_to_forget_weights_data = fw_recurrent_to_forget_weights_data
+
+fw_recurrent_to_cell_weights_data = [
+ -0.3, 0.2, 0.1, -0.3, 0.8, -0.08, -0.2, 0.3, 0.8, -0.6, -0.1, 0.2
+]
+bw_recurrent_to_cell_weights_data = fw_recurrent_to_cell_weights_data
+
+fw_recurrent_to_output_weights_data = [
+ 0.3, -0.1, 0.1, -0.2, -0.5, -0.7, -0.2, -0.6, -0.1, -0.4, -0.7, -0.2
+]
+bw_recurrent_to_output_weights_data = fw_recurrent_to_output_weights_data
+
+fw_cell_to_input_weights_data = [0.05, 0.1, 0.25, 0.15]
+bw_cell_to_input_weights_data = fw_cell_to_input_weights_data
+
+fw_cell_to_forget_weights_data = [-0.02, -0.15, -0.25, -0.03]
+bw_cell_to_forget_weights_data = fw_cell_to_forget_weights_data
+
+fw_cell_to_output_weights_data = [0.1, -0.1, -0.5, 0.05]
+bw_cell_to_output_weights_data = fw_cell_to_output_weights_data
+
+fw_projection_weights_data = [
+ -0.1, 0.2, 0.01, -0.2, 0.1, 0.5, 0.3, 0.08, 0.07, 0.2, -0.4, 0.2
+]
+bw_projection_weights_data = fw_projection_weights_data
+
+fw_input_gate_bias_data = [0.03, 0.15, 0.22, 0.38]
+bw_input_gate_bias_data = fw_input_gate_bias_data
+
+fw_forget_gate_bias_data = [0.1, -0.3, -0.2, 0.1]
+bw_forget_gate_bias_data = fw_forget_gate_bias_data
+
+fw_cell_bias_data = [-0.05, 0.72, 0.25, 0.08]
+bw_cell_bias_data = fw_cell_bias_data
+
+fw_output_gate_bias_data = [0.05, -0.01, 0.2, 0.1]
+bw_output_gate_bias_data = fw_output_gate_bias_data
+
+input_layer_norm_weights_data = [0.1, 0.2, 0.3, 0.5]
+forget_layer_norm_weights_data = [0.2, 0.2, 0.4, 0.3]
+cell_layer_norm_weights_data = [0.7, 0.2, 0.3, 0.8]
+output_layer_norm_weights_data = [0.6, 0.2, 0.2, 0.5]
+
+input_data = [0.7, 0.8, 0.1, 0.2, 0.3, 0.3, 0.2, 0.9, 0.8, 0.1,
+ 0.8, 0.1, 0.2, 0.4, 0.5, 0.1, 0.5, 0.2, 0.4, 0.2,
+ 0.2, 0.7, 0.7, 0.1, 0.7, 0.6, 0.9, 0.2, 0.5, 0.7]
+
+fw_activation_state_data = [0 for _ in range(n_batch * n_output)]
+bw_activation_state_data = [0 for _ in range(n_batch * n_output)]
+
+fw_cell_state_data = [0 for _ in range(n_batch * n_cell)]
+bw_cell_state_data = [0 for _ in range(n_batch * n_cell)]
+
+fw_golden_output_data = [
+ 0.0244077, 0.128027, -0.00170918, -0.00692428, 0.0848741, 0.063445,
+ 0.0137642, 0.140751, 0.0395835, -0.00403912, 0.139963, 0.072681,
+ -0.00459231, 0.155278, 0.0837377, 0.00752706, 0.161903, 0.0561371,
+]
+bw_golden_output_data = [0 for _ in range(n_batch * max_time * n_output)]
+
+test(
+ name="blackbox",
+ input_data=input_data,
+ fw_input_to_input_weights_data=fw_input_to_input_weights_data,
+ fw_input_to_forget_weights_data=fw_input_to_forget_weights_data,
+ fw_input_to_cell_weights_data=fw_input_to_cell_weights_data,
+ fw_input_to_output_weights_data=fw_input_to_output_weights_data,
+ fw_recurrent_to_input_weights_data=fw_recurrent_to_input_weights_data,
+ fw_recurrent_to_forget_weights_data=fw_recurrent_to_forget_weights_data,
+ fw_recurrent_to_cell_weights_data=fw_recurrent_to_cell_weights_data,
+ fw_recurrent_to_output_weights_data=fw_recurrent_to_output_weights_data,
+ fw_cell_to_input_weights_data=fw_cell_to_input_weights_data,
+ fw_cell_to_forget_weights_data=fw_cell_to_forget_weights_data,
+ fw_cell_to_output_weights_data=fw_cell_to_output_weights_data,
+ fw_input_gate_bias_data=fw_input_gate_bias_data,
+ fw_forget_gate_bias_data=fw_forget_gate_bias_data,
+ fw_cell_bias_data=fw_cell_bias_data,
+ fw_output_gate_bias_data=fw_output_gate_bias_data,
+ fw_projection_weights_data=fw_projection_weights_data,
+ bw_input_to_input_weights_data=bw_input_to_input_weights_data,
+ bw_input_to_forget_weights_data=bw_input_to_forget_weights_data,
+ bw_input_to_cell_weights_data=bw_input_to_cell_weights_data,
+ bw_input_to_output_weights_data=bw_input_to_output_weights_data,
+ bw_recurrent_to_input_weights_data=bw_recurrent_to_input_weights_data,
+ bw_recurrent_to_forget_weights_data=bw_recurrent_to_forget_weights_data,
+ bw_recurrent_to_cell_weights_data=bw_recurrent_to_cell_weights_data,
+ bw_recurrent_to_output_weights_data=bw_recurrent_to_output_weights_data,
+ bw_cell_to_input_weights_data=bw_cell_to_input_weights_data,
+ bw_cell_to_forget_weights_data=bw_cell_to_forget_weights_data,
+ bw_cell_to_output_weights_data=bw_cell_to_output_weights_data,
+ bw_input_gate_bias_data=bw_input_gate_bias_data,
+ bw_forget_gate_bias_data=bw_forget_gate_bias_data,
+ bw_cell_bias_data=bw_cell_bias_data,
+ bw_output_gate_bias_data=bw_output_gate_bias_data,
+ bw_projection_weights_data=bw_projection_weights_data,
+ fw_activation_state_data = fw_activation_state_data,
+ bw_activation_state_data = bw_activation_state_data,
+ fw_cell_state_data = fw_cell_state_data,
+ bw_cell_state_data = bw_cell_state_data,
+ fw_input_layer_norm_weights_data = input_layer_norm_weights_data,
+ fw_forget_layer_norm_weights_data = forget_layer_norm_weights_data,
+ fw_cell_layer_norm_weights_data = cell_layer_norm_weights_data,
+ fw_output_layer_norm_weights_data = output_layer_norm_weights_data,
+ bw_input_layer_norm_weights_data = input_layer_norm_weights_data,
+ bw_forget_layer_norm_weights_data = forget_layer_norm_weights_data,
+ bw_cell_layer_norm_weights_data = cell_layer_norm_weights_data,
+ bw_output_layer_norm_weights_data = output_layer_norm_weights_data,
+ fw_output_data=fw_golden_output_data,
+ bw_output_data=bw_golden_output_data
+)
diff --git a/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_rnn.mod.py b/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_rnn.mod.py
new file mode 100644
index 000000000..b0a250170
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/bidirectional_sequence_rnn.mod.py
@@ -0,0 +1,528 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import numpy as np
+import sys
+
+
+def convert_to_time_major(tensor, tensor_shape):
+ return np.array(tensor).reshape(tensor_shape).transpose(
+ [1, 0, 2]).flatten().tolist()
+
+
+def merge_outputs(a, a_shape, b, b_shape):
+ a = np.array(a).reshape(a_shape)
+ b = np.array(b).reshape(b_shape)
+ return np.concatenate((a, b), axis=2).flatten().tolist()
+
+def reverse_batch_major(tensor, tensor_shape):
+ return np.array(tensor).reshape(tensor_shape)[:, ::-1, :].flatten().tolist()
+
+def split_tensor_in_two(tensor, tensor_shape):
+ tensor = np.array(tensor).reshape(tensor_shape)
+ left, right = np.split(tensor, 2, axis=len(tensor_shape) - 1)
+ return left.flatten().tolist(), right.flatten().tolist()
+
+def test(name, input, fw_weights, fw_recurrent_weights, fw_bias,
+ fw_hidden_state, bw_weights, bw_recurrent_weights, bw_bias,
+ bw_hidden_state, aux_input, fw_aux_weights, bw_aux_weights, activation,
+ time_major, merge_outputs, fw_output, bw_output, input_data,
+ fw_weights_data, fw_recurrent_weights_data, fw_bias_data,
+ fw_hidden_state_data, bw_weights_data, bw_recurrent_weights_data,
+ bw_bias_data, bw_hidden_state_data, aux_input_data,
+ fw_aux_weights_data, bw_aux_weights_data, fw_output_data,
+ bw_output_data):
+ activation = Int32Scalar("activation", activation)
+ time_major = BoolScalar("time_major", time_major)
+ merge_outputs_scalar = BoolScalar("merge_outputs", merge_outputs)
+ model = Model().Operation(
+ "BIDIRECTIONAL_SEQUENCE_RNN", input, fw_weights, fw_recurrent_weights,
+ fw_bias, fw_hidden_state, bw_weights, bw_recurrent_weights, bw_bias,
+ bw_hidden_state, aux_input, fw_aux_weights, bw_aux_weights, activation,
+ time_major, merge_outputs_scalar)
+ if merge_outputs:
+ model = model.To(fw_output)
+ else:
+ model = model.To(fw_output, bw_output)
+
+ data_dict = {
+ input: input_data,
+ fw_weights: fw_weights_data,
+ fw_recurrent_weights: fw_recurrent_weights_data,
+ fw_bias: fw_bias_data,
+ fw_hidden_state: fw_hidden_state_data,
+ bw_weights: bw_weights_data,
+ bw_recurrent_weights: bw_recurrent_weights_data,
+ bw_bias: bw_bias_data,
+ bw_hidden_state: bw_hidden_state_data,
+ aux_input: aux_input_data,
+ fw_aux_weights: fw_aux_weights_data,
+ bw_aux_weights: bw_aux_weights_data,
+ fw_output: fw_output_data,
+ }
+ if not merge_outputs:
+ data_dict[bw_output] = bw_output_data
+
+ example = Example(
+ data_dict, model=model, name=name).AddVariations("relaxed", "float16")
+
+
+num_batches = 2
+max_time = 16
+input_size = 8
+fw_num_units = 16
+bw_num_units = 16
+
+input_data = [
+ 0.23689353, 0.285385, 0.037029743, -0.19858193, -0.27569133, 0.43773448,
+ 0.60379338, 0.35562468, -0.69424844, -0.93421471, -0.87287879, 0.37144363,
+ -0.62476718, 0.23791671, 0.40060222, 0.1356622, -0.99774903, -0.98858172,
+ -0.38952237, -0.47685933, 0.31073618, 0.71511042, -0.63767755, -0.31729108,
+ 0.33468103, 0.75801885, 0.30660987, -0.37354088, 0.77002847, -0.62747043,
+ -0.68572164, 0.0069220066, 0.65791464, 0.35130811, 0.80834007, -0.61777675,
+ -0.21095741, 0.41213346, 0.73784804, 0.094794154, 0.47791874, 0.86496925,
+ -0.53376222, 0.85315156, 0.10288584, 0.86684, -0.011186242, 0.10513687,
+ 0.87825835, 0.59929144, 0.62827742, 0.18899453, 0.31440187, 0.99059987,
+ 0.87170351, -0.35091716, 0.74861872, 0.17831337, 0.2755419, 0.51864719,
+ 0.55084288, 0.58982027, -0.47443086, 0.20875752, -0.058871567, -0.66609079,
+ 0.59098077, 0.73017097, 0.74604273, 0.32882881, -0.17503482, 0.22396147,
+ 0.19379807, 0.29120302, 0.077113032, -0.70331609, 0.15804303, -0.93407321,
+ 0.40182066, 0.036301374, 0.66521823, 0.0300982, -0.7747041, -0.02038002,
+ 0.020698071, -0.90300065, 0.62870288, -0.23068321, 0.27531278, -0.095755219,
+ -0.712036, -0.17384434, -0.50593495, -0.18646687, -0.96508682, 0.43519354,
+ 0.14744234, 0.62589407, 0.1653645, -0.10651493, -0.045277178, 0.99032974,
+ -0.88255352, -0.85147917, 0.28153265, 0.19455957, -0.55479527, -0.56042433,
+ 0.26048636, 0.84702539, 0.47587705, -0.074295521, -0.12287641, 0.70117295,
+ 0.90532446, 0.89782166, 0.79817224, 0.53402734, -0.33286154, 0.073485017,
+ -0.56172788, -0.044897556, 0.89964068, -0.067662835, 0.76863563, 0.93455386,
+ -0.6324693, -0.083922029
+] * 2
+
+weights_data = [
+ 0.461459, 0.153381, 0.529743, -0.00371218, 0.676267, -0.211346, 0.317493,
+ 0.969689, -0.343251, 0.186423, 0.398151, 0.152399, 0.448504, 0.317662,
+ 0.523556, -0.323514, 0.480877, 0.333113, -0.757714, -0.674487, -0.643585,
+ 0.217766, -0.0251462, 0.79512, -0.595574, -0.422444, 0.371572, -0.452178,
+ -0.556069, -0.482188, -0.685456, -0.727851, 0.841829, 0.551535, -0.232336,
+ 0.729158, -0.00294906, -0.69754, 0.766073, -0.178424, 0.369513, -0.423241,
+ 0.548547, -0.0152023, -0.757482, -0.85491, 0.251331, -0.989183, 0.306261,
+ -0.340716, 0.886103, -0.0726757, -0.723523, -0.784303, 0.0354295, 0.566564,
+ -0.485469, -0.620498, 0.832546, 0.697884, -0.279115, 0.294415, -0.584313,
+ 0.548772, 0.0648819, 0.968726, 0.723834, -0.0080452, -0.350386, -0.272803,
+ 0.115121, -0.412644, -0.824713, -0.992843, -0.592904, -0.417893, 0.863791,
+ -0.423461, -0.147601, -0.770664, -0.479006, 0.654782, 0.587314, -0.639158,
+ 0.816969, -0.337228, 0.659878, 0.73107, 0.754768, -0.337042, 0.0960841,
+ 0.368357, 0.244191, -0.817703, -0.211223, 0.442012, 0.37225, -0.623598,
+ -0.405423, 0.455101, 0.673656, -0.145345, -0.511346, -0.901675, -0.81252,
+ -0.127006, 0.809865, -0.721884, 0.636255, 0.868989, -0.347973, -0.10179,
+ -0.777449, 0.917274, 0.819286, 0.206218, -0.00785118, 0.167141, 0.45872,
+ 0.972934, -0.276798, 0.837861, 0.747958, -0.0151566, -0.330057, -0.469077,
+ 0.277308, 0.415818
+]
+
+recurrent_weights_data = [
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1
+]
+
+bias_data = [
+ 0.065691948, -0.69055247, 0.1107955, -0.97084129, -0.23957068, -0.23566568,
+ -0.389184, 0.47481549, -0.4791103, 0.29931796, 0.10463274, 0.83918178,
+ 0.37197268, 0.61957061, 0.3956964, -0.37609905
+]
+
+fw_output_data = [
+ 0.496726, 0, 0.965996, 0, 0.0584254, 0, 0, 0.12315, 0, 0, 0.612266,
+ 0.456601, 0, 0.52286, 1.16099, 0.0291232, 0, 0, 0.524901, 0, 0, 0, 0,
+ 1.02116, 0, 1.35762, 0, 0.356909, 0.436415, 0.0355727, 0, 0, 0, 0, 0,
+ 0.262335, 0, 0, 0, 1.33992, 0, 2.9739, 0, 0, 1.31914, 2.66147, 0, 0,
+ 0.942568, 0, 0, 0, 0.025507, 0, 0, 0, 0.321429, 0.569141, 1.25274, 1.57719,
+ 0.8158, 1.21805, 0.586239, 0.25427, 1.04436, 0, 0.630725, 0, 0.133801,
+ 0.210693, 0.363026, 0, 0.533426, 0, 1.25926, 0.722707, 0, 1.22031, 1.30117,
+ 0.495867, 0.222187, 0, 0.72725, 0, 0.767003, 0, 0, 0.147835, 0, 0, 0,
+ 0.608758, 0.469394, 0.00720298, 0.927537, 0, 0.856974, 0.424257, 0, 0,
+ 0.937329, 0, 0, 0, 0.476425, 0, 0.566017, 0.418462, 0.141911, 0.996214,
+ 1.13063, 0, 0.967899, 0, 0, 0, 0.0831304, 0, 0, 1.00378, 0, 0, 0, 1.44818,
+ 1.01768, 0.943891, 0.502745, 0, 0.940135, 0, 0, 0, 0, 0, 0, 2.13243, 0,
+ 0.71208, 0.123918, 1.53907, 1.30225, 1.59644, 0.70222, 0, 0.804329, 0,
+ 0.430576, 0, 0.505872, 0.509603, 0.343448, 0, 0.107756, 0.614544, 1.44549,
+ 1.52311, 0.0454298, 0.300267, 0.562784, 0.395095, 0.228154, 0, 0.675323, 0,
+ 1.70536, 0.766217, 0, 0, 0, 0.735363, 0.0759267, 1.91017, 0.941888, 0, 0, 0,
+ 0, 0, 1.5909, 0, 0, 0, 0, 0.5755, 0, 0.184687, 0, 1.56296, 0.625285, 0, 0,
+ 0, 0, 0, 0.0857888, 0, 0, 0, 0, 0.488383, 0.252786, 0, 0, 0, 1.02817,
+ 1.85665, 0, 0, 0.00981836, 0, 1.06371, 0, 0, 0, 0, 0, 0, 0.290445, 0.316406,
+ 0, 0.304161, 1.25079, 0.0707152, 0, 0.986264, 0.309201, 0, 0, 0, 0, 0,
+ 1.64896, 0.346248, 0, 0.918175, 0.78884, 0.524981, 1.92076, 2.07013,
+ 0.333244, 0.415153, 0.210318, 0, 0, 0, 0, 0, 2.02616, 0, 0.728256, 0.84183,
+ 0.0907453, 0.628881, 3.58099, 1.49974, 0
+] * 2
+
+bw_output_data = [
+ 0.496726, 0, 1.00883, 0, 0.0584256, 0, 0, 0.236412, 0, 0, 0.612267,
+ 0.487726, 0, 0.54883, 1.16099, 0.0291233, 0, 0, 0.428302, 0, 0, 0, 0,
+ 1.13262, 0, 1.64415, 0, 0.311249, 0.570804, 0.259696, 0, 0, 0, 0, 0,
+ 0.262334, 0, 0, 0, 1.23781, 0, 2.86532, 0, 0, 1.34389, 2.76409, 0, 0,
+ 1.03969, 0, 0.00410865, 0, 0.0470295, 0, 0, 0, 0.371556, 0.27175, 1.36614,
+ 1.63956, 0.683887, 1.06176, 0.719552, 0.301314, 0.971195, 0, 0.697143, 0,
+ 0.215219, 0.210693, 0.363027, 0, 0.501283, 0, 1.13399, 0.623774, 0, 1.09851,
+ 1.33313, 0.470441, 0.210965, 0, 0.664178, 0, 0.839686, 0, 0, 0.147834, 0, 0,
+ 0, 0.58786, 0.490128, 0, 0.905806, 0, 0.932134, 0.424257, 0, 0, 0.860629, 0,
+ 0, 0, 0.476425, 0, 0.566017, 0.513721, 0.207341, 1.09508, 1.08385, 0,
+ 0.973787, 0, 0, 0, 0, 0, 0, 1.20698, 0, 0, 0, 1.56135, 1.12369, 0.99588,
+ 0.459803, 0, 0.915854, 0, 0, 0, 0, 0, 0, 2.03206, 0, 0.773264, 0.267228,
+ 1.55012, 1.202, 1.51611, 0.701202, 0, 0.725088, 0, 0.509069, 0, 0.671349,
+ 0.581129, 0.343447, 0, 0.107755, 0.611838, 1.4331, 1.55871, 0.015242,
+ 0.140624, 0.492562, 0.395095, 0.147722, 0, 0.784925, 0, 1.65477, 0.715257,
+ 0, 0, 0, 0.685024, 0, 1.89505, 1.00037, 0, 0, 0, 0, 0, 1.52659, 0, 0, 0, 0,
+ 0.618583, 0, 0.11115, 0, 1.37194, 0.630225, 0, 0, 0, 0, 0, 0.0322124, 0, 0,
+ 0, 0, 0.430834, 0.252786, 0, 0, 0, 0.991297, 1.98451, 0, 0, 0.111511, 0,
+ 1.05513, 0, 0, 0, 0, 0, 0, 0.290445, 0.412559, 0.0429958, 0.256564, 1.27858,
+ 0.289948, 0, 1.01693, 0.327141, 0, 0, 0, 0, 0, 1.83508, 0.346248, 0,
+ 0.961535, 0.790026, 0.552203, 2.13457, 2.19233, 0.333244, 0.316526,
+ 0.179398, 0, 0, 0, 0, 0, 1.86126, 0, 0.728256, 0.750013, 0.011861, 0.576383,
+ 3.38891, 1.29273, 0
+] * 2
+
+test(
+ name="blackbox",
+ input=Input("input", "TENSOR_FLOAT32", "{{ {}, {}, {} }}".format(
+ num_batches, max_time, input_size)),
+ fw_weights=Input("fw_weights", "TENSOR_FLOAT32", "{{ {}, {} }}".format(
+ fw_num_units, input_size)),
+ fw_recurrent_weights=Input(
+ "fw_recurrent_weights", "TENSOR_FLOAT32", "{{ {}, {} }}".format(
+ fw_num_units, fw_num_units)),
+ fw_bias=Input("fw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(fw_num_units)),
+ fw_hidden_state=Input("fw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, fw_num_units)),
+ bw_weights=Input("bw_weights", "TENSOR_FLOAT32", "{{ {}, {} }}".format(
+ bw_num_units, input_size)),
+ bw_recurrent_weights=Input(
+ "bw_recurrent_weights", "TENSOR_FLOAT32", "{{ {}, {} }}".format(
+ bw_num_units, bw_num_units)),
+ bw_bias=Input("bw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(bw_num_units)),
+ bw_hidden_state=Input("bw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, bw_num_units)),
+ aux_input=Input("aux_input", "TENSOR_FLOAT32", "{0}"),
+ fw_aux_weights=Input("fw_aux_weights", "TENSOR_FLOAT32",
+ "{0}"),
+ bw_aux_weights=Input("bw_aux_weights", "TENSOR_FLOAT32",
+ "{0}"),
+ fw_output=Output(
+ "fw_output", "TENSOR_FLOAT32", "{{ {}, {}, {} }}".format(
+ num_batches, max_time, fw_num_units)),
+ bw_output=Output(
+ "bw_output", "TENSOR_FLOAT32", "{{ {}, {}, {} }}".format(
+ num_batches, max_time, bw_num_units)),
+ activation=1,
+ time_major=0,
+ merge_outputs=0,
+ input_data=input_data,
+ fw_weights_data=weights_data,
+ fw_recurrent_weights_data=recurrent_weights_data,
+ fw_bias_data=bias_data,
+ fw_hidden_state_data=[0] * num_batches * fw_num_units,
+ bw_weights_data=weights_data,
+ bw_recurrent_weights_data=recurrent_weights_data,
+ bw_bias_data=bias_data,
+ bw_hidden_state_data=[0] * num_batches * bw_num_units,
+ aux_input_data=[],
+ fw_aux_weights_data=[],
+ bw_aux_weights_data=[],
+ fw_output_data=fw_output_data,
+ bw_output_data=bw_output_data,
+)
+
+test(
+ name="blackbox_time_major",
+ input=Input("input", "TENSOR_FLOAT32", "{{ {}, {}, {} }}".format(
+ max_time, num_batches, input_size)),
+ fw_weights=Input("fw_weights", "TENSOR_FLOAT32", "{{ {}, {} }}".format(
+ fw_num_units, input_size)),
+ fw_recurrent_weights=Input(
+ "fw_recurrent_weights", "TENSOR_FLOAT32", "{{ {}, {} }}".format(
+ fw_num_units, fw_num_units)),
+ fw_bias=Input("fw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(fw_num_units)),
+ fw_hidden_state=Input("fw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, fw_num_units)),
+ bw_weights=Input("bw_weights", "TENSOR_FLOAT32", "{{ {}, {} }}".format(
+ bw_num_units, input_size)),
+ bw_recurrent_weights=Input(
+ "bw_recurrent_weights", "TENSOR_FLOAT32", "{{ {}, {} }}".format(
+ bw_num_units, bw_num_units)),
+ bw_bias=Input("bw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(bw_num_units)),
+ bw_hidden_state=Input("bw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, bw_num_units)),
+ aux_input=Input("aux_input", "TENSOR_FLOAT32", "{0}"),
+ fw_aux_weights=Input("fw_aux_weights", "TENSOR_FLOAT32",
+ "{0}"),
+ bw_aux_weights=Input("bw_aux_weights", "TENSOR_FLOAT32",
+ "{0}"),
+ fw_output=Output(
+ "fw_output", "TENSOR_FLOAT32", "{{ {}, {}, {} }}".format(
+ max_time, num_batches, fw_num_units)),
+ bw_output=Output(
+ "bw_output", "TENSOR_FLOAT32", "{{ {}, {}, {} }}".format(
+ max_time, num_batches, bw_num_units)),
+ activation=1,
+ time_major=1,
+ merge_outputs=0,
+ input_data=convert_to_time_major(input_data,
+ [num_batches, max_time, input_size]),
+ fw_weights_data=weights_data,
+ fw_recurrent_weights_data=recurrent_weights_data,
+ fw_bias_data=bias_data,
+ fw_hidden_state_data=[0] * num_batches * fw_num_units,
+ bw_weights_data=weights_data,
+ bw_recurrent_weights_data=recurrent_weights_data,
+ bw_bias_data=bias_data,
+ bw_hidden_state_data=[0] * num_batches * bw_num_units,
+ aux_input_data=[],
+ fw_aux_weights_data=[],
+ bw_aux_weights_data=[],
+ fw_output_data=convert_to_time_major(fw_output_data,
+ [num_batches, max_time, fw_num_units]),
+ bw_output_data=convert_to_time_major(bw_output_data,
+ [num_batches, max_time, bw_num_units]),
+)
+
+test(
+ name="blackbox_time_major_merge_outputs",
+ input=Input("input", "TENSOR_FLOAT32", "{{ {}, {}, {} }}".format(
+ max_time, num_batches, input_size)),
+ fw_weights=Input("fw_weights", "TENSOR_FLOAT32", "{{ {}, {} }}".format(
+ fw_num_units, input_size)),
+ fw_recurrent_weights=Input(
+ "fw_recurrent_weights", "TENSOR_FLOAT32", "{{ {}, {} }}".format(
+ fw_num_units, fw_num_units)),
+ fw_bias=Input("fw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(fw_num_units)),
+ fw_hidden_state=Input("fw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, fw_num_units)),
+ bw_weights=Input("bw_weights", "TENSOR_FLOAT32", "{{ {}, {} }}".format(
+ bw_num_units, input_size)),
+ bw_recurrent_weights=Input(
+ "bw_recurrent_weights", "TENSOR_FLOAT32", "{{ {}, {} }}".format(
+ bw_num_units, bw_num_units)),
+ bw_bias=Input("bw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(bw_num_units)),
+ bw_hidden_state=Input("bw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, bw_num_units)),
+ aux_input=Input("aux_input", "TENSOR_FLOAT32", "{0}"),
+ fw_aux_weights=Input("fw_aux_weights", "TENSOR_FLOAT32",
+ "{0}"),
+ bw_aux_weights=Input("bw_aux_weights", "TENSOR_FLOAT32",
+ "{0}"),
+ fw_output=Output(
+ "fw_output", "TENSOR_FLOAT32", "{{ {}, {}, {} }}".format(
+ max_time, num_batches, fw_num_units + bw_num_units)),
+ bw_output=None,
+ activation=1,
+ time_major=1,
+ merge_outputs=1,
+ input_data=convert_to_time_major(input_data,
+ [num_batches, max_time, input_size]),
+ fw_weights_data=weights_data,
+ fw_recurrent_weights_data=recurrent_weights_data,
+ fw_bias_data=bias_data,
+ fw_hidden_state_data=[0] * num_batches * fw_num_units,
+ bw_weights_data=weights_data,
+ bw_recurrent_weights_data=recurrent_weights_data,
+ bw_bias_data=bias_data,
+ bw_hidden_state_data=[0] * num_batches * bw_num_units,
+ aux_input_data=[],
+ fw_aux_weights_data=[],
+ bw_aux_weights_data=[],
+ fw_output_data=merge_outputs(
+ convert_to_time_major(fw_output_data,
+ [num_batches, max_time, fw_num_units]),
+ [max_time, num_batches, fw_num_units],
+ convert_to_time_major(bw_output_data,
+ [num_batches, max_time, bw_num_units]),
+ [max_time, num_batches, bw_num_units],
+ ),
+ bw_output_data=None,
+)
+
+test(
+ name="blackbox_reversed_inputs",
+ input=Input("input", "TENSOR_FLOAT32", "{{ {}, {}, {} }}".format(
+ num_batches, max_time, input_size)),
+ fw_weights=Input("fw_weights", "TENSOR_FLOAT32", "{{ {}, {} }}".format(
+ fw_num_units, input_size)),
+ fw_recurrent_weights=Input(
+ "fw_recurrent_weights", "TENSOR_FLOAT32", "{{ {}, {} }}".format(
+ fw_num_units, fw_num_units)),
+ fw_bias=Input("fw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(fw_num_units)),
+ fw_hidden_state=Input("fw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, fw_num_units)),
+ bw_weights=Input("bw_weights", "TENSOR_FLOAT32", "{{ {}, {} }}".format(
+ bw_num_units, input_size)),
+ bw_recurrent_weights=Input(
+ "bw_recurrent_weights", "TENSOR_FLOAT32", "{{ {}, {} }}".format(
+ bw_num_units, bw_num_units)),
+ bw_bias=Input("bw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(bw_num_units)),
+ bw_hidden_state=Input("bw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, bw_num_units)),
+ aux_input=Input("aux_input", "TENSOR_FLOAT32", "{0}"),
+ fw_aux_weights=Input("fw_aux_weights", "TENSOR_FLOAT32", "{0}"),
+ bw_aux_weights=Input("bw_aux_weights", "TENSOR_FLOAT32", "{0}"),
+ fw_output=Output(
+ "fw_output", "TENSOR_FLOAT32", "{{ {}, {}, {} }}".format(
+ num_batches, max_time, fw_num_units)),
+ bw_output=Output(
+ "bw_output", "TENSOR_FLOAT32", "{{ {}, {}, {} }}".format(
+ num_batches, max_time, bw_num_units)),
+ activation=1,
+ time_major=0,
+ merge_outputs=0,
+ input_data=reverse_batch_major(input_data, [num_batches, max_time, input_size]),
+ fw_weights_data=weights_data,
+ fw_recurrent_weights_data=recurrent_weights_data,
+ fw_bias_data=bias_data,
+ fw_hidden_state_data=[0] * num_batches * fw_num_units,
+ bw_weights_data=weights_data,
+ bw_recurrent_weights_data=recurrent_weights_data,
+ bw_bias_data=bias_data,
+ bw_hidden_state_data=[0] * num_batches * bw_num_units,
+ aux_input_data=[],
+ fw_aux_weights_data=[],
+ bw_aux_weights_data=[],
+ fw_output_data=reverse_batch_major(bw_output_data, [num_batches, max_time, bw_num_units]),
+ bw_output_data=reverse_batch_major(fw_output_data, [num_batches, max_time, fw_num_units]),
+)
+
+# Same test as blackbox but an input is passed to auxiliary input instead of the
+# regular one. Regular input and weights are set to zero.
+test(
+ name="blackbox_aux_input",
+ input=Input("input", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, input_size)),
+ fw_weights=Input("fw_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(fw_num_units, input_size)),
+ fw_recurrent_weights=Input(
+ "fw_recurrent_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(fw_num_units, fw_num_units)),
+ fw_bias=Input("fw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(fw_num_units)),
+ fw_hidden_state=Input("fw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, fw_num_units)),
+ bw_weights=Input("bw_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(bw_num_units, input_size)),
+ bw_recurrent_weights=Input(
+ "bw_recurrent_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(bw_num_units, bw_num_units)),
+ bw_bias=Input("bw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(bw_num_units)),
+ bw_hidden_state=Input("bw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, bw_num_units)),
+ aux_input=Input(
+ "aux_input", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, input_size)),
+ fw_aux_weights=Input("fw_aux_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(fw_num_units, input_size)),
+ bw_aux_weights=Input("bw_aux_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(bw_num_units, input_size)),
+ fw_output=Output(
+ "fw_output", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, fw_num_units)),
+ bw_output=Output(
+ "bw_output", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, bw_num_units)),
+ activation=1,
+ time_major=0,
+ merge_outputs=0,
+ input_data=[0] * num_batches * max_time * input_size,
+ fw_weights_data=[0] * fw_num_units * input_size,
+ fw_recurrent_weights_data=recurrent_weights_data,
+ fw_bias_data=bias_data,
+ fw_hidden_state_data=[0] * num_batches * fw_num_units,
+ bw_weights_data=[0] * bw_num_units * input_size,
+ bw_recurrent_weights_data=recurrent_weights_data,
+ bw_bias_data=bias_data,
+ bw_hidden_state_data=[0] * num_batches * bw_num_units,
+ aux_input_data=input_data,
+ fw_aux_weights_data=weights_data,
+ bw_aux_weights_data=weights_data,
+ fw_output_data=fw_output_data,
+ bw_output_data=bw_output_data,
+)
+
+# Same test as blackbox but input is split in half and passed to both regular
+# and auxiliary input to test their interaction.
+regular_input_data, aux_input_data = split_tensor_in_two(
+ input_data, [num_batches, max_time, input_size])
+regular_fw_weights, aux_fw_weights = split_tensor_in_two(
+ weights_data, [fw_num_units, input_size])
+regular_bw_weights, aux_bw_weights = split_tensor_in_two(
+ weights_data, [bw_num_units, input_size])
+
+test(
+ name="blackbox_regular_and_aux_input",
+ input=Input(
+ "input", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, input_size // 2)),
+ fw_weights=Input("fw_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(fw_num_units, input_size // 2)),
+ fw_recurrent_weights=Input(
+ "fw_recurrent_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(fw_num_units, fw_num_units)),
+ fw_bias=Input("fw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(fw_num_units)),
+ fw_hidden_state=Input("fw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, fw_num_units)),
+ bw_weights=Input("bw_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(bw_num_units, input_size // 2)),
+ bw_recurrent_weights=Input(
+ "bw_recurrent_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(bw_num_units, bw_num_units)),
+ bw_bias=Input("bw_bias", "TENSOR_FLOAT32", "{{ {} }}".format(bw_num_units)),
+ bw_hidden_state=Input("bw_hidden_state", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(num_batches, bw_num_units)),
+ aux_input=Input(
+ "aux_input", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, input_size // 2)),
+ fw_aux_weights=Input("fw_aux_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(fw_num_units, input_size // 2)),
+ bw_aux_weights=Input("bw_aux_weights", "TENSOR_FLOAT32",
+ "{{ {}, {} }}".format(bw_num_units, input_size // 2)),
+ fw_output=Output(
+ "fw_output", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, fw_num_units)),
+ bw_output=Output(
+ "bw_output", "TENSOR_FLOAT32",
+ "{{ {}, {}, {} }}".format(num_batches, max_time, bw_num_units)),
+ activation=1,
+ time_major=0,
+ merge_outputs=0,
+ input_data=regular_input_data,
+ fw_weights_data=regular_fw_weights,
+ fw_recurrent_weights_data=recurrent_weights_data,
+ fw_bias_data=bias_data,
+ fw_hidden_state_data=[0] * num_batches * fw_num_units,
+ bw_weights_data=regular_bw_weights,
+ bw_recurrent_weights_data=recurrent_weights_data,
+ bw_bias_data=bias_data,
+ bw_hidden_state_data=[0] * num_batches * bw_num_units,
+ aux_input_data=aux_input_data,
+ fw_aux_weights_data=aux_fw_weights,
+ bw_aux_weights_data=aux_bw_weights,
+ fw_output_data=fw_output_data,
+ bw_output_data=bw_output_data,
+)
diff --git a/tests/nnapi/specs/skip/V1_2/box_with_nms_limit_gaussian.mod.py b/tests/nnapi/specs/skip/V1_2/box_with_nms_limit_gaussian.mod.py
new file mode 100644
index 000000000..ed79ccc76
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/box_with_nms_limit_gaussian.mod.py
@@ -0,0 +1,198 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TEST 1: BOX_WITH_NMS_LIMIT, score_threshold = 0.3, sigma = 0.5, max_detections = -1
+i1 = Input("scores", "TENSOR_FLOAT32", "{19, 3}") # scores
+i2 = Input("roi", "TENSOR_FLOAT32", "{19, 12}") # roi
+i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit
+
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{18}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{18, 4}") # roi out
+o3 = Output("classesOut", "TENSOR_INT32", "{18}") # classes out
+o4 = Output("batchSplitOut", "TENSOR_INT32", "{18}") # batch split out
+model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, -1, 2, 0.4, 0.5, 0.3).To(o1, o2, o3, o4)
+
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.01, 0),
+ i2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.01, 0),
+ o2: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+input0 = {
+ i1: [ # scores
+ 0.90, 0.95, 0.75,
+ 0.80, 0.70, 0.85,
+ 0.60, 0.90, 0.95,
+ 0.90, 0.65, 0.90,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.60, 0.20,
+ 0.60, 0.80, 0.40,
+ 0.90, 0.55, 0.60,
+ 0.90, 0.75, 0.70,
+ 0.80, 0.70, 0.85,
+ 0.90, 0.95, 0.75,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.90, 0.95,
+ 0.60, 0.60, 0.20,
+ 0.50, 0.90, 0.80,
+ 0.90, 0.75, 0.70,
+ 0.90, 0.65, 0.90,
+ 0.90, 0.55, 0.60,
+ 0.60, 0.80, 0.40
+ ],
+ i2: [ # roi
+ 1, 1, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10,
+ 2, 2, 11, 11, 1, 1, 11, 11, 1, 1, 11, 11,
+ 3, 3, 12, 12, 2, 2, 12, 12, 2, 2, 12, 12,
+ 4, 4, 13, 13, 3, 3, 13, 13, 3, 3, 13, 13,
+ 5, 5, 14, 14, 4, 4, 14, 14, 4, 4, 14, 14,
+ 6, 6, 15, 15, 5, 5, 15, 15, 5, 5, 15, 15,
+ 7, 7, 16, 16, 6, 6, 16, 16, 6, 6, 16, 16,
+ 8, 8, 17, 17, 7, 7, 17, 17, 7, 7, 17, 17,
+ 9, 9, 18, 18, 8, 8, 18, 18, 8, 8, 18, 18,
+ 2, 2, 11, 11, 2, 2, 12, 12, 2, 2, 12, 12,
+ 1, 1, 10, 10, 1, 1, 11, 11, 1, 1, 11, 11,
+ 5, 5, 14, 14, 5, 5, 15, 15, 5, 5, 15, 15,
+ 3, 3, 12, 12, 3, 3, 13, 13, 3, 3, 13, 13,
+ 6, 6, 15, 15, 6, 6, 16, 16, 6, 6, 16, 16,
+ 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 2, 2,
+ 9, 9, 18, 18, 9, 9, 19, 19, 9, 9, 19, 19,
+ 4, 4, 13, 13, 4, 4, 14, 14, 4, 4, 14, 14,
+ 8, 8, 17, 17, 8, 8, 18, 18, 8, 8, 18, 18,
+ 7, 7, 16, 16, 7, 7, 17, 17, 7, 7, 17, 17
+ ],
+ i3: [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # batch split
+}
+
+output0 = {
+ o1: [
+ 0.95, 0.7879927, 0.52485234, 0.47400165, 0.95, 0.6894936, 0.4812244, 0.42367333,
+ 0.95, 0.89983034, 0.7879927, 0.52485234, 0.47400165, 0.95, 0.8, 0.6894936, 0.4811337, 0.42367333
+ ],
+ o2: [
+ 0, 0, 10, 10,
+ 6, 6, 16, 16,
+ 2, 2, 12, 12,
+ 8, 8, 18, 18,
+ 2, 2, 12, 12,
+ 8, 8, 18, 18,
+ 0, 0, 10, 10,
+ 4, 4, 14, 14,
+ 1, 1, 11, 11,
+ 0, 0, 2, 2,
+ 7, 7, 17, 17,
+ 3, 3, 13, 13,
+ 9, 9, 19, 19,
+ 3, 3, 13, 13,
+ 0, 0, 2, 2,
+ 9, 9, 19, 19,
+ 1, 1, 11, 11,
+ 5, 5, 15, 15
+ ],
+ o3: [1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2],
+ o4: [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+}
+
+Example((input0, output0)).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 2: BOX_WITH_NMS_LIMIT, score_threshold = 0.3, nms_threshold = 0.4, max_detections = 5
+i1 = Input("scores", "TENSOR_FLOAT32", "{19, 3}") # scores
+i2 = Input("roi", "TENSOR_FLOAT32", "{19, 12}") # roi
+i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit
+
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{10}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{10, 4}") # roi out
+o3 = Output("classesOut", "TENSOR_INT32", "{10}") # classes out
+o4 = Output("batchSplitOut", "TENSOR_INT32", "{10}") # batch split out
+model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, 5, 2, 0.4, 0.5, 0.3).To(o1, o2, o3, o4)
+
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.01, 128),
+ i2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.01, 128),
+ o2: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+input0 = {
+ i1: [ # scores
+ 0.90, 0.95, 0.75,
+ 0.80, 0.70, 0.85,
+ 0.60, 0.90, 0.95,
+ 0.90, 0.65, 0.90,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.60, 0.20,
+ 0.60, 0.80, 0.40,
+ 0.90, 0.55, 0.60,
+ 0.90, 0.75, 0.70,
+ 0.80, 0.70, 0.85,
+ 0.90, 0.95, 0.75,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.90, 0.95,
+ 0.60, 0.60, 0.20,
+ 0.50, 0.90, 0.80,
+ 0.90, 0.75, 0.70,
+ 0.90, 0.65, 0.90,
+ 0.90, 0.55, 0.60,
+ 0.60, 0.80, 0.40
+ ],
+ i2: [ # roi
+ 1, 1, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10,
+ 2, 2, 11, 11, 1, 1, 11, 11, 1, 1, 11, 11,
+ 3, 3, 12, 12, 2, 2, 12, 12, 2, 2, 12, 12,
+ 4, 4, 13, 13, 3, 3, 13, 13, 3, 3, 13, 13,
+ 5, 5, 14, 14, 4, 4, 14, 14, 4, 4, 14, 14,
+ 6, 6, 15, 15, 5, 5, 15, 15, 5, 5, 15, 15,
+ 7, 7, 16, 16, 6, 6, 16, 16, 6, 6, 16, 16,
+ 8, 8, 17, 17, 7, 7, 17, 17, 7, 7, 17, 17,
+ 9, 9, 18, 18, 8, 8, 18, 18, 8, 8, 18, 18,
+ 2, 2, 11, 11, 2, 2, 12, 12, 2, 2, 12, 12,
+ 1, 1, 10, 10, 1, 1, 11, 11, 1, 1, 11, 11,
+ 5, 5, 14, 14, 5, 5, 15, 15, 5, 5, 15, 15,
+ 3, 3, 12, 12, 3, 3, 13, 13, 3, 3, 13, 13,
+ 6, 6, 15, 15, 6, 6, 16, 16, 6, 6, 16, 16,
+ 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 2, 2,
+ 9, 9, 18, 18, 9, 9, 19, 19, 9, 9, 19, 19,
+ 4, 4, 13, 13, 4, 4, 14, 14, 4, 4, 14, 14,
+ 8, 8, 17, 17, 8, 8, 18, 18, 8, 8, 18, 18,
+ 7, 7, 16, 16, 7, 7, 17, 17, 7, 7, 17, 17
+ ],
+ i3: [1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3] # batch split
+}
+
+output0 = {
+ o1: [
+ 0.95, 0.7879927, 0.52485234, 0.95, 0.6894936,
+ 0.95, 0.89983034, 0.7879927, 0.95, 0.8
+ ],
+ o2: [
+ 0, 0, 10, 10,
+ 6, 6, 16, 16,
+ 2, 2, 12, 12,
+ 2, 2, 12, 12,
+ 8, 8, 18, 18,
+ 1, 1, 11, 11,
+ 0, 0, 2, 2,
+ 7, 7, 17, 17,
+ 3, 3, 13, 13,
+ 0, 0, 2, 2,
+ ],
+ o3: [1, 1, 1, 2, 2, 1, 1, 1, 2, 2],
+ o4: [1, 1, 1, 1, 1, 3, 3, 3, 3, 3],
+}
+
+Example((input0, output0)).AddVariations("relaxed", "float16", quant8)
diff --git a/tests/nnapi/specs/skip/V1_2/box_with_nms_limit_hard.mod.py b/tests/nnapi/specs/skip/V1_2/box_with_nms_limit_hard.mod.py
new file mode 100644
index 000000000..b572a6468
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/box_with_nms_limit_hard.mod.py
@@ -0,0 +1,186 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TEST 1: BOX_WITH_NMS_LIMIT, score_threshold = 0.3, nms_threshold = 0.4, max_detections = -1
+i1 = Input("scores", "TENSOR_FLOAT32", "{19, 3}") # scores
+i2 = Input("roi", "TENSOR_FLOAT32", "{19, 12}") # roi
+i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit
+
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{12}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{12, 4}") # roi out
+o3 = Output("classesOut", "TENSOR_INT32", "{12}") # classes out
+o4 = Output("batchSplitOut", "TENSOR_INT32", "{12}") # batch split out
+model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, o2, o3, o4)
+
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.01, 0),
+ i2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.01, 0),
+ o2: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+input0 = {
+ i1: [ # scores
+ 0.90, 0.95, 0.75,
+ 0.80, 0.70, 0.85,
+ 0.60, 0.90, 0.95,
+ 0.90, 0.65, 0.90,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.60, 0.20,
+ 0.60, 0.80, 0.40,
+ 0.90, 0.55, 0.60,
+ 0.90, 0.75, 0.70,
+ 0.80, 0.70, 0.85,
+ 0.90, 0.95, 0.75,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.90, 0.95,
+ 0.60, 0.60, 0.20,
+ 0.50, 0.90, 0.80,
+ 0.90, 0.75, 0.70,
+ 0.90, 0.65, 0.90,
+ 0.90, 0.55, 0.60,
+ 0.60, 0.80, 0.40
+ ],
+ i2: [ # roi
+ 1, 1, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10,
+ 2, 2, 11, 11, 1, 1, 11, 11, 1, 1, 11, 11,
+ 3, 3, 12, 12, 2, 2, 12, 12, 2, 2, 12, 12,
+ 4, 4, 13, 13, 3, 3, 13, 13, 3, 3, 13, 13,
+ 5, 5, 14, 14, 4, 4, 14, 14, 4, 4, 14, 14,
+ 6, 6, 15, 15, 5, 5, 15, 15, 5, 5, 15, 15,
+ 7, 7, 16, 16, 6, 6, 16, 16, 6, 6, 16, 16,
+ 8, 8, 17, 17, 7, 7, 17, 17, 7, 7, 17, 17,
+ 9, 9, 18, 18, 8, 8, 18, 18, 8, 8, 18, 18,
+ 2, 2, 11, 11, 2, 2, 12, 12, 2, 2, 12, 12,
+ 1, 1, 10, 10, 1, 1, 11, 11, 1, 1, 11, 11,
+ 5, 5, 14, 14, 5, 5, 15, 15, 5, 5, 15, 15,
+ 3, 3, 12, 12, 3, 3, 13, 13, 3, 3, 13, 13,
+ 6, 6, 15, 15, 6, 6, 16, 16, 6, 6, 16, 16,
+ 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 2, 2,
+ 9, 9, 18, 18, 9, 9, 19, 19, 9, 9, 19, 19,
+ 4, 4, 13, 13, 4, 4, 14, 14, 4, 4, 14, 14,
+ 8, 8, 17, 17, 8, 8, 18, 18, 8, 8, 18, 18,
+ 7, 7, 16, 16, 7, 7, 17, 17, 7, 7, 17, 17
+ ],
+ i3: [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # batch split
+}
+
+output0 = {
+ o1: [0.95, 0.85, 0.75, 0.95, 0.7, 0.95, 0.9, 0.85, 0.75, 0.95, 0.8, 0.7],
+ o2: [
+ 0, 0, 10, 10,
+ 4, 4, 14, 14,
+ 8, 8, 18, 18,
+ 2, 2, 12, 12,
+ 8, 8, 18, 18,
+ 1, 1, 11, 11,
+ 0, 0, 2, 2,
+ 5, 5, 15, 15,
+ 9, 9, 19, 19,
+ 3, 3, 13, 13,
+ 0, 0, 2, 2,
+ 9, 9, 19, 19
+ ],
+ o3: [1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 2, 2],
+ o4: [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
+}
+
+Example((input0, output0)).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 2: BOX_WITH_NMS_LIMIT, score_threshold = 0.3, nms_threshold = 0.4, max_detections = 5
+i1 = Input("scores", "TENSOR_FLOAT32", "{19, 3}") # scores
+i2 = Input("roi", "TENSOR_FLOAT32", "{19, 12}") # roi
+i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit
+
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{10}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{10, 4}") # roi out
+o3 = Output("classesOut", "TENSOR_INT32", "{10}") # classes out
+o4 = Output("batchSplitOut", "TENSOR_INT32", "{10}") # batch split out
+model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, 5, 0, 0.4, 0.5, 0.3).To(o1, o2, o3, o4)
+
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.01, 128),
+ i2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.01, 128),
+ o2: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+input0 = {
+ i1: [ # scores
+ 0.90, 0.95, 0.75,
+ 0.80, 0.70, 0.85,
+ 0.60, 0.90, 0.95,
+ 0.90, 0.65, 0.90,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.60, 0.20,
+ 0.60, 0.80, 0.40,
+ 0.90, 0.55, 0.60,
+ 0.90, 0.75, 0.70,
+ 0.80, 0.70, 0.85,
+ 0.90, 0.95, 0.75,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.90, 0.95,
+ 0.60, 0.60, 0.20,
+ 0.50, 0.90, 0.80,
+ 0.90, 0.75, 0.70,
+ 0.90, 0.65, 0.90,
+ 0.90, 0.55, 0.60,
+ 0.60, 0.80, 0.40
+ ],
+ i2: [ # roi
+ 1, 1, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10,
+ 2, 2, 11, 11, 1, 1, 11, 11, 1, 1, 11, 11,
+ 3, 3, 12, 12, 2, 2, 12, 12, 2, 2, 12, 12,
+ 4, 4, 13, 13, 3, 3, 13, 13, 3, 3, 13, 13,
+ 5, 5, 14, 14, 4, 4, 14, 14, 4, 4, 14, 14,
+ 6, 6, 15, 15, 5, 5, 15, 15, 5, 5, 15, 15,
+ 7, 7, 16, 16, 6, 6, 16, 16, 6, 6, 16, 16,
+ 8, 8, 17, 17, 7, 7, 17, 17, 7, 7, 17, 17,
+ 9, 9, 18, 18, 8, 8, 18, 18, 8, 8, 18, 18,
+ 2, 2, 11, 11, 2, 2, 12, 12, 2, 2, 12, 12,
+ 1, 1, 10, 10, 1, 1, 11, 11, 1, 1, 11, 11,
+ 5, 5, 14, 14, 5, 5, 15, 15, 5, 5, 15, 15,
+ 3, 3, 12, 12, 3, 3, 13, 13, 3, 3, 13, 13,
+ 6, 6, 15, 15, 6, 6, 16, 16, 6, 6, 16, 16,
+ 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 2, 2,
+ 9, 9, 18, 18, 9, 9, 19, 19, 9, 9, 19, 19,
+ 4, 4, 13, 13, 4, 4, 14, 14, 4, 4, 14, 14,
+ 8, 8, 17, 17, 8, 8, 18, 18, 8, 8, 18, 18,
+ 7, 7, 16, 16, 7, 7, 17, 17, 7, 7, 17, 17
+ ],
+ i3: [1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3] # batch split
+}
+
+output0 = {
+ o1: [0.95, 0.85, 0.75, 0.95, 0.7, 0.95, 0.9, 0.85, 0.95, 0.8],
+ o2: [
+ 0, 0, 10, 10,
+ 4, 4, 14, 14,
+ 8, 8, 18, 18,
+ 2, 2, 12, 12,
+ 8, 8, 18, 18,
+ 1, 1, 11, 11,
+ 0, 0, 2, 2,
+ 5, 5, 15, 15,
+ 3, 3, 13, 13,
+ 0, 0, 2, 2
+ ],
+ o3: [1, 1, 1, 2, 2, 1, 1, 1, 2, 2],
+ o4: [1, 1, 1, 1, 1, 3, 3, 3, 3, 3],
+}
+
+Example((input0, output0)).AddVariations("relaxed", "float16", quant8)
diff --git a/tests/nnapi/specs/skip/V1_2/box_with_nms_limit_linear.mod.py b/tests/nnapi/specs/skip/V1_2/box_with_nms_limit_linear.mod.py
new file mode 100644
index 000000000..4d3bc2001
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/box_with_nms_limit_linear.mod.py
@@ -0,0 +1,201 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TEST 1: BOX_WITH_NMS_LIMIT, score_threshold = 0.3, nms_threshold = 0.4, max_detections = -1
+i1 = Input("scores", "TENSOR_FLOAT32", "{19, 3}") # scores
+i2 = Input("roi", "TENSOR_FLOAT32", "{19, 12}") # roi
+i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit
+
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{16}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{16, 4}") # roi out
+o3 = Output("classesOut", "TENSOR_INT32", "{16}") # classes out
+o4 = Output("batchSplitOut", "TENSOR_INT32", "{16}") # batch split out
+model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, -1, 1, 0.4, 1.0, 0.3).To(o1, o2, o3, o4)
+
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.01, 0),
+ i2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.01, 0),
+ o2: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+input0 = {
+ i1: [ # scores
+ 0.90, 0.95, 0.75,
+ 0.80, 0.70, 0.85,
+ 0.60, 0.90, 0.95,
+ 0.90, 0.65, 0.90,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.60, 0.20,
+ 0.60, 0.80, 0.40,
+ 0.90, 0.55, 0.60,
+ 0.90, 0.75, 0.70,
+ 0.80, 0.70, 0.85,
+ 0.90, 0.95, 0.75,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.90, 0.95,
+ 0.60, 0.60, 0.20,
+ 0.50, 0.90, 0.80,
+ 0.90, 0.75, 0.70,
+ 0.90, 0.65, 0.90,
+ 0.90, 0.55, 0.60,
+ 0.60, 0.80, 0.40
+ ],
+ i2: [ # roi
+ 1, 1, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10,
+ 2, 2, 11, 11, 1, 1, 11, 11, 1, 1, 11, 11,
+ 3, 3, 12, 12, 2, 2, 12, 12, 2, 2, 12, 12,
+ 4, 4, 13, 13, 3, 3, 13, 13, 3, 3, 13, 13,
+ 5, 5, 14, 14, 4, 4, 14, 14, 4, 4, 14, 14,
+ 6, 6, 15, 15, 5, 5, 15, 15, 5, 5, 15, 15,
+ 7, 7, 16, 16, 6, 6, 16, 16, 6, 6, 16, 16,
+ 8, 8, 17, 17, 7, 7, 17, 17, 7, 7, 17, 17,
+ 9, 9, 18, 18, 8, 8, 18, 18, 8, 8, 18, 18,
+ 2, 2, 11, 11, 2, 2, 12, 12, 2, 2, 12, 12,
+ 1, 1, 10, 10, 1, 1, 11, 11, 1, 1, 11, 11,
+ 5, 5, 14, 14, 5, 5, 15, 15, 5, 5, 15, 15,
+ 3, 3, 12, 12, 3, 3, 13, 13, 3, 3, 13, 13,
+ 6, 6, 15, 15, 6, 6, 16, 16, 6, 6, 16, 16,
+ 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 2, 2,
+ 9, 9, 18, 18, 9, 9, 19, 19, 9, 9, 19, 19,
+ 4, 4, 13, 13, 4, 4, 14, 14, 4, 4, 14, 14,
+ 8, 8, 17, 17, 8, 8, 18, 18, 8, 8, 18, 18,
+ 7, 7, 16, 16, 7, 7, 17, 17, 7, 7, 17, 17
+ ],
+ i3: [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # batch split
+}
+
+output0 = {
+ o1: [
+ 0.95, 0.85, 0.75, 0.95, 0.7, 0.42352945, 0.39705884,
+ 0.95, 0.9, 0.85, 0.75, 0.95, 0.8, 0.7, 0.42352945, 0.39705884
+ ],
+ o2: [
+ 0, 0, 10, 10,
+ 4, 4, 14, 14,
+ 8, 8, 18, 18,
+ 2, 2, 12, 12,
+ 8, 8, 18, 18,
+ 4, 4, 14, 14,
+ 0, 0, 10, 10,
+ 1, 1, 11, 11,
+ 0, 0, 2, 2,
+ 5, 5, 15, 15,
+ 9, 9, 19, 19,
+ 3, 3, 13, 13,
+ 0, 0, 2, 2,
+ 9, 9, 19, 19,
+ 5, 5, 15, 15,
+ 1, 1, 11, 11
+ ],
+ o3: [1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2],
+ o4: [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+}
+
+Example((input0, output0)).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 2: BOX_WITH_NMS_LIMIT, score_threshold = 0.3, nms_threshold = 0.4, max_detections = 5
+i1 = Input("scores", "TENSOR_FLOAT32", "{19, 3}") # scores
+i2 = Input("roi", "TENSOR_FLOAT32", "{19, 12}") # roi
+i3 = Input("batchSplit", "TENSOR_INT32", "{19}") # batchSplit
+
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{15}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{15, 4}") # roi out
+o3 = Output("classesOut", "TENSOR_INT32", "{15}") # classes out
+o4 = Output("batchSplitOut", "TENSOR_INT32", "{15}") # batch split out
+model = Model().Operation("BOX_WITH_NMS_LIMIT", i1, i2, i3, 0.3, 8, 1, 0.4, 0.5, 0.3).To(o1, o2, o3, o4)
+
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.01, 128),
+ i2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.01, 128),
+ o2: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+input0 = {
+ i1: [ # scores
+ 0.90, 0.95, 0.75,
+ 0.80, 0.70, 0.85,
+ 0.60, 0.90, 0.95,
+ 0.90, 0.65, 0.90,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.60, 0.20,
+ 0.60, 0.80, 0.40,
+ 0.90, 0.55, 0.60,
+ 0.90, 0.75, 0.70,
+ 0.80, 0.70, 0.85,
+ 0.90, 0.95, 0.75,
+ 0.80, 0.85, 0.80,
+ 0.60, 0.90, 0.95,
+ 0.60, 0.60, 0.20,
+ 0.50, 0.90, 0.80,
+ 0.90, 0.75, 0.70,
+ 0.90, 0.65, 0.90,
+ 0.90, 0.55, 0.60,
+ 0.60, 0.80, 0.40
+ ],
+ i2: [ # roi
+ 1, 1, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10,
+ 2, 2, 11, 11, 1, 1, 11, 11, 1, 1, 11, 11,
+ 3, 3, 12, 12, 2, 2, 12, 12, 2, 2, 12, 12,
+ 4, 4, 13, 13, 3, 3, 13, 13, 3, 3, 13, 13,
+ 5, 5, 14, 14, 4, 4, 14, 14, 4, 4, 14, 14,
+ 6, 6, 15, 15, 5, 5, 15, 15, 5, 5, 15, 15,
+ 7, 7, 16, 16, 6, 6, 16, 16, 6, 6, 16, 16,
+ 8, 8, 17, 17, 7, 7, 17, 17, 7, 7, 17, 17,
+ 9, 9, 18, 18, 8, 8, 18, 18, 8, 8, 18, 18,
+ 2, 2, 11, 11, 2, 2, 12, 12, 2, 2, 12, 12,
+ 1, 1, 10, 10, 1, 1, 11, 11, 1, 1, 11, 11,
+ 5, 5, 14, 14, 5, 5, 15, 15, 5, 5, 15, 15,
+ 3, 3, 12, 12, 3, 3, 13, 13, 3, 3, 13, 13,
+ 6, 6, 15, 15, 6, 6, 16, 16, 6, 6, 16, 16,
+ 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 2, 2,
+ 9, 9, 18, 18, 9, 9, 19, 19, 9, 9, 19, 19,
+ 4, 4, 13, 13, 4, 4, 14, 14, 4, 4, 14, 14,
+ 8, 8, 17, 17, 8, 8, 18, 18, 8, 8, 18, 18,
+ 7, 7, 16, 16, 7, 7, 17, 17, 7, 7, 17, 17
+ ],
+ i3: [1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3] # batch split
+}
+
+output0 = {
+ o1: [
+ 0.95, 0.85, 0.75, 0.95, 0.7, 0.42352945, 0.39705884,
+ 0.95, 0.9, 0.85, 0.75, 0.95, 0.8, 0.7, 0.42352945
+ ],
+ o2: [
+ 0, 0, 10, 10,
+ 4, 4, 14, 14,
+ 8, 8, 18, 18,
+ 2, 2, 12, 12,
+ 8, 8, 18, 18,
+ 4, 4, 14, 14,
+ 0, 0, 10, 10,
+ 1, 1, 11, 11,
+ 0, 0, 2, 2,
+ 5, 5, 15, 15,
+ 9, 9, 19, 19,
+ 3, 3, 13, 13,
+ 0, 0, 2, 2,
+ 9, 9, 19, 19,
+ 5, 5, 15, 15
+ ],
+ o3: [1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2],
+ o4: [1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3],
+}
+
+Example((input0, output0)).AddVariations("relaxed", "float16", quant8)
diff --git a/tests/nnapi/specs/skip/V1_2/channel_shuffle.mod.py b/tests/nnapi/specs/skip/V1_2/channel_shuffle.mod.py
new file mode 100644
index 000000000..bd8fcad83
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/channel_shuffle.mod.py
@@ -0,0 +1,42 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+i1 = Input("op1", "TENSOR_FLOAT32", "{2, 2, 3, 12}") # input 0
+o1 = Output("op2", "TENSOR_FLOAT32", "{2, 2, 3, 12}") # output 0
+axis = Int32Scalar("axis", -1) # last axis
+Model().Operation("CHANNEL_SHUFFLE", i1, 3, axis).To(o1)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.25, 128)
+})
+
+Example({
+ i1: list(range(2*2*3*12)),
+ o1: [ 0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11,
+ 12, 16, 20, 13, 17, 21, 14, 18, 22, 15, 19, 23,
+ 24, 28, 32, 25, 29, 33, 26, 30, 34, 27, 31, 35,
+ 36, 40, 44, 37, 41, 45, 38, 42, 46, 39, 43, 47,
+ 48, 52, 56, 49, 53, 57, 50, 54, 58, 51, 55, 59,
+ 60, 64, 68, 61, 65, 69, 62, 66, 70, 63, 67, 71,
+ 72, 76, 80, 73, 77, 81, 74, 78, 82, 75, 79, 83,
+ 84, 88, 92, 85, 89, 93, 86, 90, 94, 87, 91, 95,
+ 96, 100, 104, 97, 101, 105, 98, 102, 106, 99, 103, 107,
+ 108, 112, 116, 109, 113, 117, 110, 114, 118, 111, 115, 119,
+ 120, 124, 128, 121, 125, 129, 122, 126, 130, 123, 127, 131,
+ 132, 136, 140, 133, 137, 141, 134, 138, 142, 135, 139, 143]
+}).AddVariations("relaxed", quant8, "float16").AddAllDimsAndAxis(i1, o1, axis)
diff --git a/tests/nnapi/specs/skip/V1_2/concat_float16_1.mod.py b/tests/nnapi/specs/skip/V1_2/concat_float16_1.mod.py
new file mode 100644
index 000000000..f7c024a93
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/concat_float16_1.mod.py
@@ -0,0 +1,31 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT16", "{2, 3}") # input tensor 0
+i2 = Input("op2", "TENSOR_FLOAT16", "{2, 3}") # input tensor 1
+axis0 = Int32Scalar("axis0", 0)
+r = Output("result", "TENSOR_FLOAT16", "{4, 3}") # output
+model = model.Operation("CONCATENATION", i1, i2, axis0).To(r)
+
+# Example 1.
+input0 = {i1: [1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
+ i2: [7.0, 8.0, 9.0, 10.0, 11.0, 12.0]}
+output0 = {r: [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/concat_float16_2.mod.py b/tests/nnapi/specs/skip/V1_2/concat_float16_2.mod.py
new file mode 100644
index 000000000..64f2096b4
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/concat_float16_2.mod.py
@@ -0,0 +1,41 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# model
+model = Model()
+
+row1 = 52
+row2 = 40
+col = 230
+output_row = row1 + row2
+
+input1 = Input("input1", "TENSOR_FLOAT16", "{%d, %d}" % (row1, col)) # input tensor 1
+input2 = Input("input2", "TENSOR_FLOAT16", "{%d, %d}" % (row2, col)) # input tensor 2
+axis0 = Int32Scalar("axis0", 0)
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (output_row, col)) # output
+model = model.Operation("CONCATENATION", input1, input2, axis0).To(output)
+
+# Example 1.
+input1_values = [x for x in range(row1 * col)]
+input2_values = (lambda s1 = row1 * col, s2 = row2 * col:
+ [x + s1 for x in range(s2)])()
+input0 = {input1: input1_values,
+ input2: input2_values}
+output_values = [x for x in range(output_row * col)]
+output0 = {output: output_values}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/concat_float16_3.mod.py b/tests/nnapi/specs/skip/V1_2/concat_float16_3.mod.py
new file mode 100644
index 000000000..7c7bc6964
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/concat_float16_3.mod.py
@@ -0,0 +1,47 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# model
+model = Model()
+
+row = 212
+col1 = 60
+col2 = 30
+output_col = col1 + col2
+
+input1 = Input("input1", "TENSOR_FLOAT16", "{%d, %d}" % (row, col1)) # input tensor 1
+input2 = Input("input2", "TENSOR_FLOAT16", "{%d, %d}" % (row, col2)) # input tensor 2
+axis1 = Int32Scalar("axis1", 1)
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (row, output_col)) # output
+model = model.Operation("CONCATENATION", input1, input2, axis1).To(output)
+
+# Example 1.
+input1_values = [x for x in range(row * col1)]
+input2_values = [-x for x in range(row * col2)]
+input0 = {input1: input1_values,
+ input2: input2_values}
+
+output_values = [x for x in range(row * output_col)]
+for r in range(row):
+ for c1 in range(col1):
+ output_values[r * output_col + c1] = input1_values[r * col1 + c1]
+ for c2 in range(col2):
+ output_values[r * output_col + col1 + c2] = input2_values[r * col2 + c2]
+
+output0 = {output: output_values}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/concat_mixed_quant.mod.py b/tests/nnapi/specs/skip/V1_2/concat_mixed_quant.mod.py
new file mode 100644
index 000000000..6610fea9b
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/concat_mixed_quant.mod.py
@@ -0,0 +1,56 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Adapted from tensorflow/lite/kernels/concatenation_test.cc
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{2, 1, 2}")
+input1 = Input("input1", "TENSOR_FLOAT32", "{2, 1, 2}")
+input2 = Input("input2", "TENSOR_FLOAT32", "{2, 1, 2}")
+input3 = Input("input3", "TENSOR_FLOAT32", "{2, 1, 2}")
+axis = 2
+output0 = Output("output0", "TENSOR_FLOAT32", "{2, 1, 8}")
+
+model = Model().Operation("CONCATENATION", input0, input1, input2, input3, axis).To(output0)
+
+# FourInputsQuantizedMixedRange
+Example({
+ input0: [1.0, -3.0, -4.0, -7.0],
+ input1: [1.1, 3.1, 4.1, 7.1],
+ input2: [1.2, -3.2, -4.2, 7.2],
+ input3: [1.3, 3.3, 4.3, 7.3],
+ output0: [1.0, -3.0, 1.1, 3.1, 1.2, -3.2, 1.3, 3.3, -4.0, -7.0, 4.1, 7.1, -4.2, 7.2, 4.3, 7.3],
+}).AddVariations(DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM", 0.084, 127],
+ input1: ["TENSOR_QUANT8_ASYMM", 0.05, 0],
+ input2: ["TENSOR_QUANT8_ASYMM", 0.089, 123],
+ input3: ["TENSOR_QUANT8_ASYMM", 0.029, 0],
+ output0: ["TENSOR_QUANT8_ASYMM", 0.1, 127],
+}), includeDefault=False)
+
+# FourInputsQuantizedMixedRangeClampingLogic
+Example({
+ input0: [1.0, -3.0, -4.0, -7.0],
+ input1: [1.1, 3.1, 4.1, 7.1],
+ input2: [1.2, -3.2, -4.2, 7.2],
+ input3: [1.3, 3.3, 4.3, 7.3],
+ output0: [1.0, -1.0, 1.0, 1.0, 1.0, -1.0, 1.0, 1.0, -1.0, -1.0, 1.0, 1.0, -1.0, 1.0, 1.0, 1.0]
+}).AddVariations(DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM", 0.084, 127],
+ input1: ["TENSOR_QUANT8_ASYMM", 0.05, 0],
+ input2: ["TENSOR_QUANT8_ASYMM", 0.089, 123],
+ input3: ["TENSOR_QUANT8_ASYMM", 0.029, 0],
+ output0: ["TENSOR_QUANT8_ASYMM", 0.0078125, 127],
+}), includeDefault=False)
diff --git a/tests/nnapi/specs/skip/V1_2/concat_zero_sized.mod.py b/tests/nnapi/specs/skip/V1_2/concat_zero_sized.mod.py
new file mode 100644
index 000000000..1e374b4e4
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/concat_zero_sized.mod.py
@@ -0,0 +1,97 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Zero-sized input: zero dimension is not "axis"
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model().Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# CONCATENATION op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 2}") # out
+model = model.Operation("CONCATENATION", zero_sized, zero_sized, 3).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddVariations("relaxed", quant8, "float16")
+
+
+# Zero-sized input: zero dimension is "axis"
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model().Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# CONCATENATION op with numBatches = 0.
+i2 = Input("in", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+o3 = Output("out", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # out
+model = model.Operation("CONCATENATION", zero_sized, i2, 0).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ i2: ("TENSOR_QUANT8_ASYMM", 0.2, 128),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ i2: [1, 2, 3, 4],
+ o1: [0],
+ o2: [0],
+ o3: [1, 2, 3, 4],
+}).AddVariations("relaxed", quant8, "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/conv2d_dilation.mod.py b/tests/nnapi/specs/skip/V1_2/conv2d_dilation.mod.py
new file mode 100644
index 000000000..e30e5ede8
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/conv2d_dilation.mod.py
@@ -0,0 +1,146 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: dilation set to 1 (default)
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
+f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [.25, .25, .25, .25])
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0])
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+Model().Operation("CONV_2D", i1, f1, b1, 0, 0, 0, 0, 1, 1, 0, layout, 1, 1).To(o1)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ f1: ("TENSOR_QUANT8_ASYMM", 0.125, 0),
+ b1: ("TENSOR_INT32", 0.0625, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.125, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [1.0, 1.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0, 1.0],
+ o1: [.875, .875, .875, .875]
+}).AddNchw(i1, o1, layout).AddInput(f1, b1).AddVariations("relaxed", quant8, "float16")
+
+
+# TEST 2: dilation set to 3
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 9, 9, 1}")
+f2 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 1}", [1, 2, 3, 4, 5, 6, 7, 8, 9])
+b2 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0])
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
+Model().Operation("CONV_2D", i2, f2, b2, 0, 0, 0, 0, 1, 1, 0, layout, 3, 3).To(o2)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ f2: ("TENSOR_QUANT8_ASYMM", 0.125, 0),
+ b2: ("TENSOR_INT32", 0.0625, 0),
+ o2: ("TENSOR_QUANT8_ASYMM", 0.125, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ o2: [5, 5, 5, 5, 5, 5, 5, 5, 5]
+}).AddNchw(i2, o2, layout).AddInput(f2, b2).AddVariations("relaxed", quant8, "float16")
+
+# TEST 3: same as test 1 but with implicit VALID padding
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
+f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [.25, .25, .25, .25])
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0])
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+Model().Operation("CONV_2D", i1, f1, b1, 2, 1, 1, 0, layout, 1, 1).To(o1)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ f1: ("TENSOR_QUANT8_ASYMM", 0.125, 0),
+ b1: ("TENSOR_INT32", 0.0625, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.125, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [1.0, 1.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0, 1.0],
+ o1: [.875, .875, .875, .875]
+}, name="valid_padding").AddNchw(i1, o1, layout).AddInput(f1, b1).AddVariations("relaxed", quant8, "float16")
+
+
+# TEST 4: same as test 2 but with implicit VALID padding
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 9, 9, 1}")
+f2 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 1}", [1, 2, 3, 4, 5, 6, 7, 8, 9])
+b2 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0])
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
+Model().Operation("CONV_2D", i2, f2, b2, 2, 1, 1, 0, layout, 3, 3).To(o2)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ f2: ("TENSOR_QUANT8_ASYMM", 0.125, 0),
+ b2: ("TENSOR_INT32", 0.0625, 0),
+ o2: ("TENSOR_QUANT8_ASYMM", 0.125, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ o2: [5, 5, 5, 5, 5, 5, 5, 5, 5]
+}, name="valid_padding").AddNchw(i2, o2, layout).AddInput(f2, b2).AddVariations("relaxed", quant8, "float16")
+
+
+# TEST 5: dilation set to 3, SAME padding
+i3 = Input("op1", "TENSOR_FLOAT32", "{1, 6, 6, 1}")
+f3 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [1, 2, 3, 4])
+b3 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0])
+o3 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
+Model().Operation("CONV_2D", i3, f3, b3, 1, 2, 2, 0, layout, 3, 3).To(o3)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ f3: ("TENSOR_QUANT8_ASYMM", 0.125, 0),
+ b3: ("TENSOR_INT32", 0.0625, 0),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.125, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i3: [0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 4, 3, 0, 0,
+ 0, 0, 2, 1, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0],
+ o3: [16, 0, 9, 0, 0, 0, 4, 0, 1]
+}).AddNchw(i3, o3, layout).AddInput(f3, b3).AddVariations("relaxed", quant8, "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/conv2d_per_channel.mod.py b/tests/nnapi/specs/skip/V1_2/conv2d_per_channel.mod.py
new file mode 100644
index 000000000..8780b48d9
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/conv2d_per_channel.mod.py
@@ -0,0 +1,74 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TEST 1: No layout param specified
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 3, 1, 2}, 0.5f, 128")
+f1 = Parameter("op2", "TENSOR_QUANT8_SYMM_PER_CHANNEL", "{3, 1, 1, 2}, 0.0f, 0",
+ [1, 2, 1, 2, 1, 2], extraParams = SymmPerChannelQuantParams(channelDim=0, scales=[0.5, 0.75, 1.0]))
+b1 = Parameter("op3", "TENSOR_INT32", "{3}", [4, 4, 4])
+o1 = Output("op4", "TENSOR_QUANT8_ASYMM", "{1, 3, 1, 3}, 1.f, 128")
+Model().Operation("CONV_2D", i1, f1, b1, 0, 0, 0, 0, 1, 1, 0).To(o1)
+
+# Instantiate an example
+Example({
+ i1: [138, 138, 138, 138, 138, 138],
+ o1: [137, 141, 145, 137, 141, 145, 137, 141, 145]
+}).AddInput(f1, b1)
+
+# TEST 2: layout param, NHWC/NCHW layouts
+layout = BoolScalar("layout", False) # NHWC
+i2 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 3, 1, 2}, 0.5f, 128")
+f2 = Parameter("op2", "TENSOR_QUANT8_SYMM_PER_CHANNEL", "{3, 1, 1, 2}, 0.0f, 0",
+ [1, 2, 1, 2, 1, 2], extraParams = SymmPerChannelQuantParams(channelDim=0, scales=[0.5, 0.75, 1.0]))
+b2 = Parameter("op3", "TENSOR_INT32", "{3}", [4, 4, 4])
+o2 = Output("op4", "TENSOR_QUANT8_ASYMM", "{1, 3, 1, 3}, 1.f, 128")
+Model("layouts").Operation("CONV_2D", i2, f2, b2, 0, 0, 0, 0, 1, 1, 0, layout).To(o2)
+
+# Instantiate an example
+Example({
+ i2: [138, 108, 138, 108, 138, 108],
+ o2: [121, 118, 115, 121, 118, 115, 121, 118, 115]
+}).AddNchw(i2, o2, layout).AddInput(f2, b2)
+
+# TEST 3: zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_QUANT8_ASYMM", "{1, 2}, 0.1f, 128", [137, 129]) # scores
+p2 = Parameter("roi", "TENSOR_QUANT16_ASYMM", "{1, 8}, 0.125f, 0", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_QUANT8_ASYMM", "{0}, 0.1f, 128") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_QUANT16_ASYMM", "{0, 4}, 0.125f, 0") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_QUANT8_ASYMM", "{1, 1, 1, 2}, 0.5f, 128")
+zero_sized = Internal("featureMap", "TENSOR_QUANT8_ASYMM", "{0, 2, 2, 2}, 0.5f, 128")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# CONV_2D op with numBatches = 0.
+w = Parameter("weights", "TENSOR_QUANT8_SYMM_PER_CHANNEL", "{3, 1, 1, 2}, 0.0f, 0",
+ [1, 2, 1, 2, 1, 2], extraParams = SymmPerChannelQuantParams(channelDim=0, scales=[0.5, 0.75, 1.0]))
+b = Parameter("bias", "TENSOR_INT32", "{3}", [4, 4, 4])
+o3 = Output("out", "TENSOR_QUANT8_ASYMM", "{0, 2, 2, 3}, 1.f, 128") # out
+model = model.Operation("CONV_2D", zero_sized, w, b, 0, 0, 0, 0, 1, 1, 0, layout).To(o3)
+
+# Create test case with dummy values.
+Example({
+ i1: [130, 130],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddNchw(i1, zero_sized, o3, layout)
diff --git a/tests/nnapi/specs/skip/V1_2/conv2d_v1_2.mod.py b/tests/nnapi/specs/skip/V1_2/conv2d_v1_2.mod.py
new file mode 100644
index 000000000..3ea902b62
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/conv2d_v1_2.mod.py
@@ -0,0 +1,297 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: CONV_NCHW_1
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
+f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [.25, .25, .25, .25])
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0])
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+Model().Operation("CONV_2D", i1, f1, b1, 0, 0, 0, 0, 1, 1, 0, layout).To(o1)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ f1: ("TENSOR_QUANT8_ASYMM", 0.125, 0),
+ b1: ("TENSOR_INT32", 0.0625, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.125, 0)
+})
+channelQuant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ f1: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.125])),
+ b1: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.0625], hide=True)),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.125, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [1.0, 1.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0, 1.0],
+ o1: [.875, .875, .875, .875]
+}).AddNchw(i1, o1, layout).AddInput(f1, b1).AddVariations("relaxed", quant8, channelQuant8, "float16")
+
+
+# TEST 2: CONV_NCHW_2
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 4, 1}")
+f2 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 1}", [1, 4, 7, 2, 5, 8, 3, 6, 9])
+b2 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [-200])
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 4, 1}")
+Model().Operation("CONV_2D", i2, f2, b2, 1, 1, 1, 1, layout).To(o2)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 0.5, 127),
+ f2: ("TENSOR_QUANT8_ASYMM", 0.5, 127),
+ b2: ("TENSOR_INT32", 0.25, 0),
+ o2: ("TENSOR_QUANT8_ASYMM", 1.0, 50)
+})
+channelQuant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 0.5, 127),
+ f2: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.5])),
+ b2: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25], hide=True)),
+ o2: ("TENSOR_QUANT8_ASYMM", 1.0, 50)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
+ o2: [0, 0, 0, 0, 35, 112, 157, 0, 0, 34, 61, 0]
+}).AddNchw(i2, o2, layout).AddInput(f2, b2).AddVariations("relaxed", quant8, channelQuant8, "float16")
+
+
+# TEST 3: CONV_NCHW_CHANNEL
+i3 = Input("op1", "TENSOR_FLOAT32", "{1, 1, 1, 3}")
+f3 = Parameter("op2", "TENSOR_FLOAT32", "{3, 1, 1, 3}", [0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5])
+b3 = Parameter("op3", "TENSOR_FLOAT32", "{3}", [0., 0., 0.])
+o3 = Output("op4", "TENSOR_FLOAT32", "{1, 1, 1, 3}")
+Model("channel").Operation("CONV_2D", i3, f3, b3, 0, 0, 0, 0, 1, 1, 0, layout).To(o3)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ f3: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ b3: ("TENSOR_INT32", 0.25, 0),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.5, 0)
+})
+channelQuant8 = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ f3: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.5, 0.4, 0.3])),
+ b3: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25, 0.2, 0.15], hide=True)),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.5, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i3: [5., 5., 5.],
+ o3: [15., 37.5, 60.]
+}).AddNchw(i3, o3, layout).AddInput(f3, b3).AddVariations("relaxed", quant8, channelQuant8, "float16")
+
+
+# TEST 4: CONV_NCHW_LARGE
+i4 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 3, 3}")
+f4 = Parameter("op2", "TENSOR_FLOAT32", "{3, 1, 1, 3}", [1., 4., 7., 2., 5., 8., 3., 6., 9.])
+b4 = Parameter("op3", "TENSOR_FLOAT32", "{3}", [0., 0., 0.])
+o4 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 3, 3}")
+Model("large").Operation("CONV_2D", i4, f4, b4, 0, 0, 0, 0, 1, 1, 0, layout).To(o4)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
+ f4: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
+ b4: ("TENSOR_INT32", 0.25, 0),
+ o4: ("TENSOR_QUANT8_ASYMM", 2.0, 0)
+})
+channelQuant8 = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
+ f4: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.5, 1.0, 0.5])),
+ b4: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25, 0.5, 0.25], hide=True)),
+ o4: ("TENSOR_QUANT8_ASYMM", 2.0, 0)
+})
+channelQuant8_mult_gt_1 = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM", 1.0, 127),
+ f4: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.5, 1.0, 1.005])),
+ b4: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.5, 1.0, 1.005], hide=True)),
+ o4: ("TENSOR_QUANT8_ASYMM", 1.0, 127)
+})
+
+# Instantiate an example
+example = Example({
+ i4: [1., 2., 3., 4., 5., 6., 7., 8., 9.,
+ 10., 11., 12., 13., 14., 15., 16., 17., 18.],
+ o4: [30., 36., 42.,
+ 66., 81., 96.,
+ 102., 126., 150.,
+ 138., 171., 204.,
+ 174., 216., 258.,
+ 210., 261., 312.]
+}).AddNchw(i4, o4, layout).AddInput(f4, b4).AddVariations("relaxed", quant8, channelQuant8, channelQuant8_mult_gt_1, "float16")
+
+
+# TEST 5/6: CONV_1_H3_W2_[SAME|VALID]
+i5 = Input("op1", "TENSOR_FLOAT32", "{1, 8, 8, 3}")
+f5 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 2, 3}", [-0.966213, -0.467474, -0.82203, -0.579455, 0.0278809, -0.79946, -0.684259, 0.563238, 0.37289, 0.738216, 0.386045, -0.917775, 0.184325, -0.270568, 0.82236, 0.0973683, -0.941308, -0.144706])
+b5 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0.])
+o5 = Output("op4", "TENSOR_FLOAT32", "{1, 8, 8, 1}")
+o6 = Output("op4", "TENSOR_FLOAT32", "{1, 6, 7, 1}")
+model_1_same = Model("1_H3_W2_SAME").Operation("CONV_2D", i5, f5, b5, 1, 1, 1, 0, layout).To(o5)
+model_1_valid = Model("1_H3_W2_VALID").Operation("CONV_2D", i5, f5, b5, 2, 1, 1, 0, layout).To(o6)
+
+example = Example({
+ i5: [-0.869931, 0.644628, -0.918393, 0.153672, 0.868562, -0.358177, -0.134931, -0.247565, 0.22174, -0.259157, -0.284296, -0.538065, 0.765559, 0.41986, -0.556241, 0.658494, 0.214355, -0.850169, -0.252893, -0.478935, 0.530526, -0.0700663, -0.988729, -0.303061, 0.150845, 0.829915, 0.476349, 0.406537, -0.355343, 0.757145, -0.356362, 0.800482, -0.713861, 0.210483, -0.634303, 0.718236, -0.752038, 0.457547, -0.550769, -0.551178, 0.446766, -0.227462, 0.216348, -0.852806, -0.351486, 0.55906, -0.668493, -0.303493, -0.363763, -0.162837, 0.0701012, 0.756097, -0.142269, 0.329724, -0.656317, -0.998086, -0.652949, -0.40316, -0.893682, 0.432744, 0.612362, -0.869588, -0.71327, -0.398092, -0.0423559, 0.436576, -0.925272, 0.176549, 0.822904, 0.096833, -0.296802, -0.427195, 0.031654, -0.254479, 0.244905, 0.0948254, 0.643769, -0.90391, 0.352665, -0.901179, 0.266159, -0.968068, -0.615401, -0.388975, 0.939052, -0.116289, 0.107523, -0.0582711, 0.435172, 0.334675, 0.459711, 0.717436, 0.496627, -0.680175, -0.415066, 0.339848, 0.506004, -0.337808, -0.107218, -0.172496, 0.870638, 0.931872, -0.953884, 0.903042, 0.760078, 0.209727, -0.285384, -0.45514, 0.113194, 0.0756611, 0.0924435, -0.472863, 0.960609, -0.160385, -0.839445, 0.457097, 0.163348, 0.344867, -0.131619, 0.688715, -0.540827, 0.571259, -0.95587, 0.506164, -0.155839, 0.0789621, 0.756772, -0.662069, 0.242908, 0.460821, 0.177872, -0.289839, -0.640603, 0.702598, -0.506406, -0.568262, -0.0713716, 0.413792, 0.159673, -0.305208, 0.133816, -0.160254, 0.787323, -0.753244, 0.600721, 0.263186, -0.162387, 0.477962, -0.702951, -0.731036, -0.939481, -0.524519, 0.934072, -0.511637, -0.503499, 0.106236, -0.323684, 0.534444, -0.843745, 0.364171, 0.0370358, -0.168801, -0.404559, -0.814178, 0.91745, -0.334276, 0.66925, -0.801201, 0.156511, -0.427949, 0.379153, 0.818597, -0.649902, 0.427087, -0.586015, -0.559789, -0.833923, 0.0892409, -0.621251, 0.213826, 0.465509, 0.4704, 0.380261, 0.413067, 0.180822, 0.172866, 0.59614, 0.825575, 0.662916, -0.704381, -0.297631, 0.697778],
+ o5: [1.85284, -0.0393656, -0.127353, 1.43115, -0.302294, -1.0402, 0.655023, -0.587614, 1.72003, 1.55816, 0.667546, 2.23663, 0.0661516, 0.290254, 0.770222, -0.346357, -1.58197, -0.850595, -0.484224, 0.949967, -0.577263, -0.871949, 2.34132, -0.104506, -0.135965, -0.985713, 0.815147, 1.03114, -1.41915, -0.515534, -0.373639, 1.42026, -1.50604, 0.673113, 3.06139, -0.388578, -1.76707, -0.315667, -1.03815, -0.343435, 0.432787, -1.41643, 1.12944, -0.175806, -0.846415, 1.40095, 0.70832, -1.46717, 2.19562, -2.61266, -0.705383, 1.26124, 1.46545, -2.35761, 2.04494, 1.23741, -0.527402, -0.39954, -0.0128623, 1.3644, 0.985755, -0.718118, -0.1008, 1.24327]
+}, {
+ i5: [-0.295335, -0.00387601, -0.552251, 0.166084, -0.28482, -0.152143, -0.719885, -0.869386, -0.745598, 0.823947, 0.473183, -0.331337, 0.187631, 0.0426571, -0.826897, -0.755085, -0.472453, -0.0233656, 0.0483436, 0.933418, -0.961974, 0.0125783, 0.219742, 0.342604, -0.15166, 0.0934905, 0.783221, 0.129664, 0.838844, -0.271388, 0.924519, 0.342843, 0.274418, 0.350817, 0.841638, -0.543993, -0.00283395, -0.128467, -0.682943, -0.319117, 0.84634, 0.283003, 0.32865, 0.0293755, -0.0335696, 0.591266, -0.0743476, -0.741271, 0.462056, -0.583625, -0.590183, 0.6234, 0.535269, -0.670818, -0.955642, -0.770173, 0.479986, 0.664377, 0.399445, -0.968874, -0.276263, -0.901951, 0.544104, -0.958981, 0.482658, -0.807284, 0.305369, -0.947818, 0.827498, -0.382887, -0.805741, -0.796678, -0.299804, -0.229828, 0.818783, -0.103055, -0.45568, -0.227827, 0.543743, -0.96073, 0.946747, -0.857182, -0.96426, -0.292411, -0.715614, 0.765278, -0.475043, -0.590142, -0.238507, 0.673002, -0.473357, -0.319626, 0.936014, 0.486607, 0.580844, 0.425352, -0.800994, 0.290763, -0.494953, -0.441162, 0.718677, -0.828427, 0.96965, 7.53637e-05, -0.699973, -0.526886, -0.352682, 0.799466, 0.332789, 0.723389, 0.407659, -0.934084, -0.284705, 0.961484, -0.700395, -0.985808, -0.595342, -0.691721, 0.49448, -0.0842649, 0.0390966, 0.298938, -0.128094, -0.97158, 0.86393, 0.270606, -0.468986, -0.256605, 0.47215, -0.273117, -0.590343, -0.826529, -0.725381, -0.194821, -0.259661, -0.0949207, -0.180302, 0.0446834, -0.222133, -0.40393, 0.295772, -0.92949, 0.580079, -0.169856, 0.330311, 0.0173551, -0.635823, 0.475942, 0.907175, 0.242777, -0.512208, 0.362463, 0.0496289, 0.65171, 0.990057, 0.690733, -0.469013, -0.101311, -0.68372, -0.157841, -0.677711, -0.708224, -0.659437, -0.407607, 0.677033, 0.89032, 0.228307, -0.749514, 0.772958, 0.054701, 0.551705, 0.917052, -0.895022, -0.702397, 0.484142, 0.108648, 0.833347, 0.478872, -0.984112, 0.387176, -0.73299, 0.7526, 0.443312, -0.0987856, 0.125415, 0.10876, -0.498108, 0.43209, 0.344609, 0.928941, -0.130732, -0.0569167],
+ o5: [-0.000614278, -1.21221, 0.443861, 0.102117, -2.52714, 1.47489, 0.173474, -0.237577, 1.28735, 1.91315, 2.51734, 0.375841, 0.637563, 2.653, 2.72959, -1.6271, 1.17389, -2.12119, 2.91417, -2.24246, 0.0497045, -0.127107, -0.144473, -0.133762, -0.393284, -2.02346, -0.239178, -0.246508, 1.29277, 1.32963, 0.117521, 1.22372, 0.0665713, 1.09438, -1.31426, 2.52594, -0.969211, 0.515478, -1.60926, -0.838905, 0.135211, 0.786415, -1.14382, -0.739102, -1.01731, 0.281615, 2.36311, 0.891823, 1.93872, -0.150491, 3.45217, 2.28219, 1.18282, -2.25086, 3.05468, 0.166228, 0.434554, -2.57529, -0.958662, -2.23978, 2.66776, 0.542601, 1.76107, -1.08134]
+}, model=model_1_same).AddNchw(i5, o5, layout).AddVariations("relaxed", "float16")
+
+example = Example({
+ i5: [-0.869931, 0.644628, -0.918393, 0.153672, 0.868562, -0.358177, -0.134931, -0.247565, 0.22174, -0.259157, -0.284296, -0.538065, 0.765559, 0.41986, -0.556241, 0.658494, 0.214355, -0.850169, -0.252893, -0.478935, 0.530526, -0.0700663, -0.988729, -0.303061, 0.150845, 0.829915, 0.476349, 0.406537, -0.355343, 0.757145, -0.356362, 0.800482, -0.713861, 0.210483, -0.634303, 0.718236, -0.752038, 0.457547, -0.550769, -0.551178, 0.446766, -0.227462, 0.216348, -0.852806, -0.351486, 0.55906, -0.668493, -0.303493, -0.363763, -0.162837, 0.0701012, 0.756097, -0.142269, 0.329724, -0.656317, -0.998086, -0.652949, -0.40316, -0.893682, 0.432744, 0.612362, -0.869588, -0.71327, -0.398092, -0.0423559, 0.436576, -0.925272, 0.176549, 0.822904, 0.096833, -0.296802, -0.427195, 0.031654, -0.254479, 0.244905, 0.0948254, 0.643769, -0.90391, 0.352665, -0.901179, 0.266159, -0.968068, -0.615401, -0.388975, 0.939052, -0.116289, 0.107523, -0.0582711, 0.435172, 0.334675, 0.459711, 0.717436, 0.496627, -0.680175, -0.415066, 0.339848, 0.506004, -0.337808, -0.107218, -0.172496, 0.870638, 0.931872, -0.953884, 0.903042, 0.760078, 0.209727, -0.285384, -0.45514, 0.113194, 0.0756611, 0.0924435, -0.472863, 0.960609, -0.160385, -0.839445, 0.457097, 0.163348, 0.344867, -0.131619, 0.688715, -0.540827, 0.571259, -0.95587, 0.506164, -0.155839, 0.0789621, 0.756772, -0.662069, 0.242908, 0.460821, 0.177872, -0.289839, -0.640603, 0.702598, -0.506406, -0.568262, -0.0713716, 0.413792, 0.159673, -0.305208, 0.133816, -0.160254, 0.787323, -0.753244, 0.600721, 0.263186, -0.162387, 0.477962, -0.702951, -0.731036, -0.939481, -0.524519, 0.934072, -0.511637, -0.503499, 0.106236, -0.323684, 0.534444, -0.843745, 0.364171, 0.0370358, -0.168801, -0.404559, -0.814178, 0.91745, -0.334276, 0.66925, -0.801201, 0.156511, -0.427949, 0.379153, 0.818597, -0.649902, 0.427087, -0.586015, -0.559789, -0.833923, 0.0892409, -0.621251, 0.213826, 0.465509, 0.4704, 0.380261, 0.413067, 0.180822, 0.172866, 0.59614, 0.825575, 0.662916, -0.704381, -0.297631, 0.697778],
+ o6: [1.72003, 1.55816, 0.667546, 2.23663, 0.0661516, 0.290254, 0.770222, -1.58197, -0.850595, -0.484224, 0.949967, -0.577263, -0.871949, 2.34132, -0.135965, -0.985713, 0.815147, 1.03114, -1.41915, -0.515534, -0.373639, -1.50604, 0.673113, 3.06139, -0.388578, -1.76707, -0.315667, -1.03815, 0.432787, -1.41643, 1.12944, -0.175806, -0.846415, 1.40095, 0.70832, 2.19562, -2.61266, -0.705383, 1.26124, 1.46545, -2.35761, 2.04494, ]
+}, {
+ i5: [-0.295335, -0.00387601, -0.552251, 0.166084, -0.28482, -0.152143, -0.719885, -0.869386, -0.745598, 0.823947, 0.473183, -0.331337, 0.187631, 0.0426571, -0.826897, -0.755085, -0.472453, -0.0233656, 0.0483436, 0.933418, -0.961974, 0.0125783, 0.219742, 0.342604, -0.15166, 0.0934905, 0.783221, 0.129664, 0.838844, -0.271388, 0.924519, 0.342843, 0.274418, 0.350817, 0.841638, -0.543993, -0.00283395, -0.128467, -0.682943, -0.319117, 0.84634, 0.283003, 0.32865, 0.0293755, -0.0335696, 0.591266, -0.0743476, -0.741271, 0.462056, -0.583625, -0.590183, 0.6234, 0.535269, -0.670818, -0.955642, -0.770173, 0.479986, 0.664377, 0.399445, -0.968874, -0.276263, -0.901951, 0.544104, -0.958981, 0.482658, -0.807284, 0.305369, -0.947818, 0.827498, -0.382887, -0.805741, -0.796678, -0.299804, -0.229828, 0.818783, -0.103055, -0.45568, -0.227827, 0.543743, -0.96073, 0.946747, -0.857182, -0.96426, -0.292411, -0.715614, 0.765278, -0.475043, -0.590142, -0.238507, 0.673002, -0.473357, -0.319626, 0.936014, 0.486607, 0.580844, 0.425352, -0.800994, 0.290763, -0.494953, -0.441162, 0.718677, -0.828427, 0.96965, 7.53637e-05, -0.699973, -0.526886, -0.352682, 0.799466, 0.332789, 0.723389, 0.407659, -0.934084, -0.284705, 0.961484, -0.700395, -0.985808, -0.595342, -0.691721, 0.49448, -0.0842649, 0.0390966, 0.298938, -0.128094, -0.97158, 0.86393, 0.270606, -0.468986, -0.256605, 0.47215, -0.273117, -0.590343, -0.826529, -0.725381, -0.194821, -0.259661, -0.0949207, -0.180302, 0.0446834, -0.222133, -0.40393, 0.295772, -0.92949, 0.580079, -0.169856, 0.330311, 0.0173551, -0.635823, 0.475942, 0.907175, 0.242777, -0.512208, 0.362463, 0.0496289, 0.65171, 0.990057, 0.690733, -0.469013, -0.101311, -0.68372, -0.157841, -0.677711, -0.708224, -0.659437, -0.407607, 0.677033, 0.89032, 0.228307, -0.749514, 0.772958, 0.054701, 0.551705, 0.917052, -0.895022, -0.702397, 0.484142, 0.108648, 0.833347, 0.478872, -0.984112, 0.387176, -0.73299, 0.7526, 0.443312, -0.0987856, 0.125415, 0.10876, -0.498108, 0.43209, 0.344609, 0.928941, -0.130732, -0.0569167],
+ o6: [1.28735, 1.91315, 2.51734, 0.375841, 0.637563, 2.653, 2.72959, 1.17389, -2.12119, 2.91417, -2.24246, 0.0497045, -0.127107, -0.144473, -0.393284, -2.02346, -0.239178, -0.246508, 1.29277, 1.32963, 0.117521, 0.0665713, 1.09438, -1.31426, 2.52594, -0.969211, 0.515478, -1.60926, 0.135211, 0.786415, -1.14382, -0.739102, -1.01731, 0.281615, 2.36311, 1.93872, -0.150491, 3.45217, 2.28219, 1.18282, -2.25086, 3.05468]
+}, model=model_1_valid).AddNchw(i5, o6, layout).AddVariations("relaxed", "float16")
+
+
+# TEST 7/8: CONV_3_H3_W2_[SAME|VALID]
+i7 = Input("op1", "TENSOR_FLOAT32", "{1, 8, 8, 3}")
+f7 = Parameter("op2", "TENSOR_FLOAT32", "{3, 3, 2, 3}", [-0.966213, -0.579455, -0.684259, 0.738216, 0.184325, 0.0973683, -0.176863, -0.23936, -0.000233404, 0.055546, -0.232658, -0.316404, -0.012904, 0.320705, -0.326657, -0.919674, 0.868081, -0.824608, -0.467474, 0.0278809, 0.563238, 0.386045, -0.270568, -0.941308, -0.779227, -0.261492, -0.774804, -0.79665, 0.22473, -0.414312, 0.685897, -0.327792, 0.77395, -0.714578, -0.972365, 0.0696099, -0.82203, -0.79946, 0.37289, -0.917775, 0.82236, -0.144706, -0.167188, 0.268062, 0.702641, -0.412223, 0.755759, 0.721547, -0.43637, -0.274905, -0.269165, 0.16102, 0.819857, -0.312008])
+b7 = Parameter("op3", "TENSOR_FLOAT32", "{3}", [0., 0., 0.])
+o7 = Output("op4", "TENSOR_FLOAT32", "{1, 8, 8, 3}")
+o8 = Output("op4", "TENSOR_FLOAT32", "{1, 6, 7, 3}")
+model_3_same = Model("3_H3_W2_SAME").Operation("CONV_2D", i7, f7, b7, 1, 1, 1, 0, layout).To(o7)
+model_3_valid = Model("3_H3_W2_VALID").Operation("CONV_2D", i7, f7, b7, 2, 1, 1, 0, layout).To(o8)
+
+example = Example({
+ i7: [-0.869931, 0.644628, -0.918393, 0.153672, 0.868562, -0.358177, -0.134931, -0.247565, 0.22174, -0.259157, -0.284296, -0.538065, 0.765559, 0.41986, -0.556241, 0.658494, 0.214355, -0.850169, -0.252893, -0.478935, 0.530526, -0.0700663, -0.988729, -0.303061, 0.150845, 0.829915, 0.476349, 0.406537, -0.355343, 0.757145, -0.356362, 0.800482, -0.713861, 0.210483, -0.634303, 0.718236, -0.752038, 0.457547, -0.550769, -0.551178, 0.446766, -0.227462, 0.216348, -0.852806, -0.351486, 0.55906, -0.668493, -0.303493, -0.363763, -0.162837, 0.0701012, 0.756097, -0.142269, 0.329724, -0.656317, -0.998086, -0.652949, -0.40316, -0.893682, 0.432744, 0.612362, -0.869588, -0.71327, -0.398092, -0.0423559, 0.436576, -0.925272, 0.176549, 0.822904, 0.096833, -0.296802, -0.427195, 0.031654, -0.254479, 0.244905, 0.0948254, 0.643769, -0.90391, 0.352665, -0.901179, 0.266159, -0.968068, -0.615401, -0.388975, 0.939052, -0.116289, 0.107523, -0.0582711, 0.435172, 0.334675, 0.459711, 0.717436, 0.496627, -0.680175, -0.415066, 0.339848, 0.506004, -0.337808, -0.107218, -0.172496, 0.870638, 0.931872, -0.953884, 0.903042, 0.760078, 0.209727, -0.285384, -0.45514, 0.113194, 0.0756611, 0.0924435, -0.472863, 0.960609, -0.160385, -0.839445, 0.457097, 0.163348, 0.344867, -0.131619, 0.688715, -0.540827, 0.571259, -0.95587, 0.506164, -0.155839, 0.0789621, 0.756772, -0.662069, 0.242908, 0.460821, 0.177872, -0.289839, -0.640603, 0.702598, -0.506406, -0.568262, -0.0713716, 0.413792, 0.159673, -0.305208, 0.133816, -0.160254, 0.787323, -0.753244, 0.600721, 0.263186, -0.162387, 0.477962, -0.702951, -0.731036, -0.939481, -0.524519, 0.934072, -0.511637, -0.503499, 0.106236, -0.323684, 0.534444, -0.843745, 0.364171, 0.0370358, -0.168801, -0.404559, -0.814178, 0.91745, -0.334276, 0.66925, -0.801201, 0.156511, -0.427949, 0.379153, 0.818597, -0.649902, 0.427087, -0.586015, -0.559789, -0.833923, 0.0892409, -0.621251, 0.213826, 0.465509, 0.4704, 0.380261, 0.413067, 0.180822, 0.172866, 0.59614, 0.825575, 0.662916, -0.704381, -0.297631, 0.697778],
+ o7: [-1.27853, 1.74987, -0.876718, 0.989692, 0.298548, 0.522103, -0.536896, -0.179382, -0.966914, 1.33708, 1.37042, -0.495494, 1.43859, -1.548, -0.430026, -0.662793, -0.0867897, -0.900658, -0.524396, 0.255731, -0.779081, 0.12666, 0.915651, -0.444765, -0.186842, -1.87308, 1.21135, -0.385009, 1.72032, -1.56036, -1.23059, 1.23694, 0.00200015, 0.359522, 1.60084, 0.434006, -0.282945, 2.37292, -1.28653, 0.0847837, -0.352093, -2.39659, 0.149246, 0.920351, -1.34346, 0.952311, -0.35811, 0.403449, 0.484796, -1.19989, -0.684298, -1.41301, 0.103177, -0.307039, 1.17741, 2.58936, -2.76237, -1.21565, -1.09619, 1.17432, 0.512143, 0.771379, 0.399879, -0.0533093, 0.290864, 0.95563, 1.16328, 1.80768, -1.52564, -0.126476, -0.185224, -0.114779, 1.2248, 0.237127, -0.213297, -0.619941, 0.497944, -1.68688, 1.59314, -0.127337, 0.111419, 1.13719, 1.68537, -0.479644, 1.18608, -2.52744, 1.34136, 0.548297, -2.0838, 2.64585, -0.993354, 0.128238, 1.26092, 0.318668, 0.893795, -0.0600559, -0.629126, -0.949229, 2.25828, -1.961, 0.00589599, -0.187854, -1.02403, 0.396121, 1.3704, 3.99355, 0.434221, 0.274464, -0.562438, -0.914871, 0.539129, -0.928687, 0.834954, 0.844178, -0.566053, -0.957341, 0.933336, 1.13613, -1.22109, 1.4649, -0.414666, -0.452821, -0.706006, -1.72657, -0.726574, -0.0979362, -0.478669, 1.78703, -0.639288, 1.48565, -0.179904, 1.01003, -0.317118, -0.675387, 1.90969, -1.38343, 0.697255, -0.292255, 1.81634, 0.717801, 0.862479, -0.407478, -0.343106, -0.0353232, -0.481893, -0.135565, -2.95941, 0.247846, 2.67757, -2.23999, -0.519673, 0.254447, 0.415283, -1.01065, 0.507911, 0.979926, -0.184304, -0.000950437, -0.734348, -0.196685, -0.713241, 0.594972, 0.0845042, 2.48496, 0.385019, -0.201145, 0.533332, -0.904872, -0.333518, -0.581063, -2.07065, 0.118687, -1.86708, -0.601987, 0.432037, 1.73923, 0.590007, 0.419788, 0.314198, 2.12817, 0.570793, -1.15998, -0.348587, -1.10231, -2.13091, 0.134467, -0.460382, 0.138338, 3.455, 0.679068, -0.190282, -0.0307461]
+}, {
+ i7: [-0.295335, -0.00387601, -0.552251, 0.166084, -0.28482, -0.152143, -0.719885, -0.869386, -0.745598, 0.823947, 0.473183, -0.331337, 0.187631, 0.0426571, -0.826897, -0.755085, -0.472453, -0.0233656, 0.0483436, 0.933418, -0.961974, 0.0125783, 0.219742, 0.342604, -0.15166, 0.0934905, 0.783221, 0.129664, 0.838844, -0.271388, 0.924519, 0.342843, 0.274418, 0.350817, 0.841638, -0.543993, -0.00283395, -0.128467, -0.682943, -0.319117, 0.84634, 0.283003, 0.32865, 0.0293755, -0.0335696, 0.591266, -0.0743476, -0.741271, 0.462056, -0.583625, -0.590183, 0.6234, 0.535269, -0.670818, -0.955642, -0.770173, 0.479986, 0.664377, 0.399445, -0.968874, -0.276263, -0.901951, 0.544104, -0.958981, 0.482658, -0.807284, 0.305369, -0.947818, 0.827498, -0.382887, -0.805741, -0.796678, -0.299804, -0.229828, 0.818783, -0.103055, -0.45568, -0.227827, 0.543743, -0.96073, 0.946747, -0.857182, -0.96426, -0.292411, -0.715614, 0.765278, -0.475043, -0.590142, -0.238507, 0.673002, -0.473357, -0.319626, 0.936014, 0.486607, 0.580844, 0.425352, -0.800994, 0.290763, -0.494953, -0.441162, 0.718677, -0.828427, 0.96965, 7.53637e-05, -0.699973, -0.526886, -0.352682, 0.799466, 0.332789, 0.723389, 0.407659, -0.934084, -0.284705, 0.961484, -0.700395, -0.985808, -0.595342, -0.691721, 0.49448, -0.0842649, 0.0390966, 0.298938, -0.128094, -0.97158, 0.86393, 0.270606, -0.468986, -0.256605, 0.47215, -0.273117, -0.590343, -0.826529, -0.725381, -0.194821, -0.259661, -0.0949207, -0.180302, 0.0446834, -0.222133, -0.40393, 0.295772, -0.92949, 0.580079, -0.169856, 0.330311, 0.0173551, -0.635823, 0.475942, 0.907175, 0.242777, -0.512208, 0.362463, 0.0496289, 0.65171, 0.990057, 0.690733, -0.469013, -0.101311, -0.68372, -0.157841, -0.677711, -0.708224, -0.659437, -0.407607, 0.677033, 0.89032, 0.228307, -0.749514, 0.772958, 0.054701, 0.551705, 0.917052, -0.895022, -0.702397, 0.484142, 0.108648, 0.833347, 0.478872, -0.984112, 0.387176, -0.73299, 0.7526, 0.443312, -0.0987856, 0.125415, 0.10876, -0.498108, 0.43209, 0.344609, 0.928941, -0.130732, -0.0569167],
+ o7: [0.78574, 0.0700466, -0.110245, 0.0141003, -0.621007, -0.979104, 1.24104, 0.580398, -0.512997, 0.900559, -0.683229, -1.0162, 1.0089, -0.0752488, 0.110969, 0.270558, 0.756819, -0.10753, -0.371484, 0.149005, 0.0973829, 0.155766, -0.476502, 0.259481, 1.06709, -1.16534, 1.52694, -0.797245, 0.802736, -0.997109, 2.2661, -1.45548, 2.15506, -1.33682, 1.15225, -3.09324, 0.943457, 0.885211, 0.987944, -0.345875, -0.114708, 1.7107, 0.104745, 0.828324, -2.49964, -0.453742, -0.288829, -0.0948694, -0.489415, 1.74889, -0.378257, -2.10237, 0.613022, -2.5225, -0.746785, 3.63816, -1.9287, 0.774279, -0.613917, -0.650011, 1.03753, -0.177923, 0.891815, -1.00373, 1.83859, -1.59239, -0.0662623, 0.218806, -1.088, 0.280837, 0.902901, -1.90127, 3.04734, -1.57302, 1.10881, -0.980369, -3.85305, -0.955859, 1.64909, 2.33573, 0.31144, -0.594375, 0.325747, -0.952566, -0.613449, 2.85073, 1.94692, 1.12977, 1.1351, -0.449652, 0.118765, -0.199547, 2.873, 1.35182, -1.85457, 1.22364, 1.38049, 2.38342, 0.882321, 1.03795, -0.321571, -2.60202, -1.6372, 1.09302, 0.461768, 1.8485, -0.158928, 4.28871, -0.437375, -1.5794, 1.59869, 0.0811864, 0.912054, 0.452176, 2.01812, 2.62907, 1.50304, -0.840276, -0.455854, -0.224913, 0.609824, -0.11105, 3.35635, 2.02386, 1.4687, -0.708365, -0.508992, -3.02602, -0.75725, 1.85277, 2.92817, -0.172997, -1.13279, -0.355636, -0.337669, -0.588752, 2.05759, 1.0651, 0.884758, -0.0712112, 3.81319, 0.771629, 0.949634, 0.0838967, -2.19264, 0.114521, 0.543556, -1.63197, -0.267442, 1.15701, -2.37862, 2.57646, 0.531208, 0.9499, -0.231441, 1.51461, 1.58888, 0.895931, -0.753084, 0.545251, 0.746903, 0.012994, -0.790398, -1.1055, 1.77789, 0.430923, 0.818241, -0.731412, 0.979546, -2.48707, -1.53658, -1.66798, -1.04585, -0.667911, 1.00299, -2.20339, 0.137826, -2.31281, 0.755535, 0.495396, 0.549629, 0.713128, 0.751369, 0.283996, -0.814532, 1.4866, 1.12105, 0.927998, 0.517938, -0.612661, -1.47756, -1.42422]
+}, model=model_3_same).AddNchw(i7, o7, layout).AddVariations("relaxed", "float16")
+
+example = Example({
+ i7: [-0.869931, 0.644628, -0.918393, 0.153672, 0.868562, -0.358177, -0.134931, -0.247565, 0.22174, -0.259157, -0.284296, -0.538065, 0.765559, 0.41986, -0.556241, 0.658494, 0.214355, -0.850169, -0.252893, -0.478935, 0.530526, -0.0700663, -0.988729, -0.303061, 0.150845, 0.829915, 0.476349, 0.406537, -0.355343, 0.757145, -0.356362, 0.800482, -0.713861, 0.210483, -0.634303, 0.718236, -0.752038, 0.457547, -0.550769, -0.551178, 0.446766, -0.227462, 0.216348, -0.852806, -0.351486, 0.55906, -0.668493, -0.303493, -0.363763, -0.162837, 0.0701012, 0.756097, -0.142269, 0.329724, -0.656317, -0.998086, -0.652949, -0.40316, -0.893682, 0.432744, 0.612362, -0.869588, -0.71327, -0.398092, -0.0423559, 0.436576, -0.925272, 0.176549, 0.822904, 0.096833, -0.296802, -0.427195, 0.031654, -0.254479, 0.244905, 0.0948254, 0.643769, -0.90391, 0.352665, -0.901179, 0.266159, -0.968068, -0.615401, -0.388975, 0.939052, -0.116289, 0.107523, -0.0582711, 0.435172, 0.334675, 0.459711, 0.717436, 0.496627, -0.680175, -0.415066, 0.339848, 0.506004, -0.337808, -0.107218, -0.172496, 0.870638, 0.931872, -0.953884, 0.903042, 0.760078, 0.209727, -0.285384, -0.45514, 0.113194, 0.0756611, 0.0924435, -0.472863, 0.960609, -0.160385, -0.839445, 0.457097, 0.163348, 0.344867, -0.131619, 0.688715, -0.540827, 0.571259, -0.95587, 0.506164, -0.155839, 0.0789621, 0.756772, -0.662069, 0.242908, 0.460821, 0.177872, -0.289839, -0.640603, 0.702598, -0.506406, -0.568262, -0.0713716, 0.413792, 0.159673, -0.305208, 0.133816, -0.160254, 0.787323, -0.753244, 0.600721, 0.263186, -0.162387, 0.477962, -0.702951, -0.731036, -0.939481, -0.524519, 0.934072, -0.511637, -0.503499, 0.106236, -0.323684, 0.534444, -0.843745, 0.364171, 0.0370358, -0.168801, -0.404559, -0.814178, 0.91745, -0.334276, 0.66925, -0.801201, 0.156511, -0.427949, 0.379153, 0.818597, -0.649902, 0.427087, -0.586015, -0.559789, -0.833923, 0.0892409, -0.621251, 0.213826, 0.465509, 0.4704, 0.380261, 0.413067, 0.180822, 0.172866, 0.59614, 0.825575, 0.662916, -0.704381, -0.297631, 0.697778],
+ o8: [-0.186842, -1.87308, 1.21135, -0.385009, 1.72032, -1.56036, -1.23059, 1.23694, 0.00200015, 0.359522, 1.60084, 0.434006, -0.282945, 2.37292, -1.28653, 0.0847837, -0.352093, -2.39659, 0.149246, 0.920351, -1.34346, 0.484796, -1.19989, -0.684298, -1.41301, 0.103177, -0.307039, 1.17741, 2.58936, -2.76237, -1.21565, -1.09619, 1.17432, 0.512143, 0.771379, 0.399879, -0.0533093, 0.290864, 0.95563, 1.16328, 1.80768, -1.52564, 1.2248, 0.237127, -0.213297, -0.619941, 0.497944, -1.68688, 1.59314, -0.127337, 0.111419, 1.13719, 1.68537, -0.479644, 1.18608, -2.52744, 1.34136, 0.548297, -2.0838, 2.64585, -0.993354, 0.128238, 1.26092, -0.629126, -0.949229, 2.25828, -1.961, 0.00589599, -0.187854, -1.02403, 0.396121, 1.3704, 3.99355, 0.434221, 0.274464, -0.562438, -0.914871, 0.539129, -0.928687, 0.834954, 0.844178, -0.566053, -0.957341, 0.933336, -0.414666, -0.452821, -0.706006, -1.72657, -0.726574, -0.0979362, -0.478669, 1.78703, -0.639288, 1.48565, -0.179904, 1.01003, -0.317118, -0.675387, 1.90969, -1.38343, 0.697255, -0.292255, 1.81634, 0.717801, 0.862479, -0.481893, -0.135565, -2.95941, 0.247846, 2.67757, -2.23999, -0.519673, 0.254447, 0.415283, -1.01065, 0.507911, 0.979926, -0.184304, -0.000950437, -0.734348, -0.196685, -0.713241, 0.594972, 0.0845044, 2.48496, 0.385019]
+}, {
+ i7: [-0.295335, -0.00387601, -0.552251, 0.166084, -0.28482, -0.152143, -0.719885, -0.869386, -0.745598, 0.823947, 0.473183, -0.331337, 0.187631, 0.0426571, -0.826897, -0.755085, -0.472453, -0.0233656, 0.0483436, 0.933418, -0.961974, 0.0125783, 0.219742, 0.342604, -0.15166, 0.0934905, 0.783221, 0.129664, 0.838844, -0.271388, 0.924519, 0.342843, 0.274418, 0.350817, 0.841638, -0.543993, -0.00283395, -0.128467, -0.682943, -0.319117, 0.84634, 0.283003, 0.32865, 0.0293755, -0.0335696, 0.591266, -0.0743476, -0.741271, 0.462056, -0.583625, -0.590183, 0.6234, 0.535269, -0.670818, -0.955642, -0.770173, 0.479986, 0.664377, 0.399445, -0.968874, -0.276263, -0.901951, 0.544104, -0.958981, 0.482658, -0.807284, 0.305369, -0.947818, 0.827498, -0.382887, -0.805741, -0.796678, -0.299804, -0.229828, 0.818783, -0.103055, -0.45568, -0.227827, 0.543743, -0.96073, 0.946747, -0.857182, -0.96426, -0.292411, -0.715614, 0.765278, -0.475043, -0.590142, -0.238507, 0.673002, -0.473357, -0.319626, 0.936014, 0.486607, 0.580844, 0.425352, -0.800994, 0.290763, -0.494953, -0.441162, 0.718677, -0.828427, 0.96965, 7.53637e-05, -0.699973, -0.526886, -0.352682, 0.799466, 0.332789, 0.723389, 0.407659, -0.934084, -0.284705, 0.961484, -0.700395, -0.985808, -0.595342, -0.691721, 0.49448, -0.0842649, 0.0390966, 0.298938, -0.128094, -0.97158, 0.86393, 0.270606, -0.468986, -0.256605, 0.47215, -0.273117, -0.590343, -0.826529, -0.725381, -0.194821, -0.259661, -0.0949207, -0.180302, 0.0446834, -0.222133, -0.40393, 0.295772, -0.92949, 0.580079, -0.169856, 0.330311, 0.0173551, -0.635823, 0.475942, 0.907175, 0.242777, -0.512208, 0.362463, 0.0496289, 0.65171, 0.990057, 0.690733, -0.469013, -0.101311, -0.68372, -0.157841, -0.677711, -0.708224, -0.659437, -0.407607, 0.677033, 0.89032, 0.228307, -0.749514, 0.772958, 0.054701, 0.551705, 0.917052, -0.895022, -0.702397, 0.484142, 0.108648, 0.833347, 0.478872, -0.984112, 0.387176, -0.73299, 0.7526, 0.443312, -0.0987856, 0.125415, 0.10876, -0.498108, 0.43209, 0.344609, 0.928941, -0.130732, -0.0569167],
+ o8: [1.06709, -1.16534, 1.52694, -0.797245, 0.802736, -0.997109, 2.2661, -1.45548, 2.15506, -1.33682, 1.15225, -3.09324, 0.943457, 0.885211, 0.987944, -0.345875, -0.114708, 1.7107, 0.104745, 0.828324, -2.49964, -0.489415, 1.74889, -0.378257, -2.10237, 0.613022, -2.5225, -0.746785, 3.63816, -1.9287, 0.774279, -0.613917, -0.650011, 1.03753, -0.177923, 0.891815, -1.00373, 1.83859, -1.59239, -0.0662623, 0.218806, -1.088, 3.04734, -1.57302, 1.10881, -0.980369, -3.85305, -0.955859, 1.64909, 2.33573, 0.31144, -0.594375, 0.325747, -0.952566, -0.613449, 2.85073, 1.94692, 1.12977, 1.1351, -0.449652, 0.118765, -0.199547, 2.873, 1.38049, 2.38342, 0.882321, 1.03795, -0.321571, -2.60202, -1.6372, 1.09302, 0.461768, 1.8485, -0.158928, 4.28871, -0.437375, -1.5794, 1.59869, 0.0811864, 0.912054, 0.452176, 2.01812, 2.62907, 1.50304, 0.609824, -0.11105, 3.35635, 2.02386, 1.4687, -0.708365, -0.508992, -3.02602, -0.75725, 1.85277, 2.92817, -0.172997, -1.13279, -0.355636, -0.337669, -0.588752, 2.05759, 1.0651, 0.884758, -0.0712112, 3.81319, -2.19264, 0.114521, 0.543556, -1.63197, -0.267442, 1.15701, -2.37862, 2.57646, 0.531208, 0.9499, -0.231441, 1.51461, 1.58888, 0.895931, -0.753084, 0.545251, 0.746904, 0.0129939, -0.790398, -1.1055, 1.77789]
+}, model=model_3_valid).AddNchw(i7, o8, layout).AddVariations("relaxed", "float16")
+
+# TEST 9: quantized with scale product greater than output scale
+scale = 256.5 / 255
+zero_point = 128
+i9 = Input("op1", ("TENSOR_QUANT8_ASYMM", [2, 2, 4, 1], scale, zero_point))
+f9 = Parameter("op2", ("TENSOR_QUANT8_ASYMM", [3, 2, 2, 1], scale, zero_point),
+ [129, 130, 131, 132, 127, 129, 127, 129, 127, 127, 129, 129])
+b9 = Parameter("op3", ("TENSOR_INT32", [3], scale * scale, 0), [1, 2, 3])
+o9 = Output("op4", ("TENSOR_QUANT8_ASYMM", [2, 1, 2, 3], 1.0, 127))
+model9 = Model("quant_output_multiplier_gt_1").Operation("CONV_2D", i9, f9, b9, 2, 2, 2, 0).To(o9)
+
+# Instantiate an example
+example = Example({
+ i9: [
+ 129, 129, 129, 129, 130, 130, 130, 130, 129, 130, 131, 132, 129, 130,
+ 131, 132
+ ],
+ o9: [145, 129, 132, 145, 129, 132, 144, 131, 130, 164, 131, 130]
+}, model=model9).AddInput(f9, b9).AddVariations("relaxed")
+
+
+# TEST 10: zero-sized input, explicit padding
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# CONV_2D op with numBatches = 0.
+w = Parameter("weights", "TENSOR_FLOAT32", "{2, 1, 1, 1}", [3, 4]) # weights
+b = Parameter("bias", "TENSOR_FLOAT32", "{2}", [1, 2]) # bias
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 2}") # out
+model = model.Operation("CONV_2D", zero_sized, w, b, 0, 0, 0, 0, 1, 1, 0, layout).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ w: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ b: ("TENSOR_INT32", 0.01, 0),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations("relaxed", quant8, "float16")
+
+
+# TEST 11: zero-sized input, implicit padding
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# CONV_2D op with numBatches = 0.
+w = Parameter("weights", "TENSOR_FLOAT32", "{2, 1, 1, 1}", [3, 4]) # weights
+b = Parameter("bias", "TENSOR_FLOAT32", "{2}", [1, 2]) # bias
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 2}") # out
+model = model.Operation("CONV_2D", zero_sized, w, b, 1, 1, 1, 0, layout).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ w: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ b: ("TENSOR_INT32", 0.01, 0),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations("relaxed", quant8, "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/depth_to_space_v1_2.mod.py b/tests/nnapi/specs/skip/V1_2/depth_to_space_v1_2.mod.py
new file mode 100644
index 000000000..8d9de4b91
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/depth_to_space_v1_2.mod.py
@@ -0,0 +1,76 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: DEPTH_TO_SPACE_NCHW_1, block_size = 2
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 1, 1, 8}")
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
+Model().Operation("DEPTH_TO_SPACE", i1, 2, layout).To(o1)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1],
+ o1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]
+}).AddNchw(i1, o1, layout).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 2: DEPTH_TO_SPACE_NCHW_2, block_size = 2
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
+Model().Operation("DEPTH_TO_SPACE", i2, 2, layout).To(o2)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
+ o2: ("TENSOR_QUANT8_ASYMM", 0.5, 128)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [1., 2., 5., 6., 3., 4., 7., 8., 9., 10., 13., 14., 11., 12., 15., 16.],
+ o2: [1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16.]
+}).AddNchw(i2, o2, layout).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 3: DEPTH_TO_SPACE_NCHW_3, block_size = 2
+i3 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 8}")
+o3 = Output("op4", "TENSOR_FLOAT32", "{1, 4, 4, 2}")
+Model().Operation("DEPTH_TO_SPACE", i3, 2, layout).To(o3)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM", 1.0, 0),
+ o3: ("TENSOR_QUANT8_ASYMM", 1.0, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i3: [10, 20, 11, 21, 14, 24, 15, 25,
+ 12, 22, 13, 23, 16, 26, 17, 27,
+ 18, 28, 19, 29, 112, 212, 113, 213,
+ 110, 210, 111, 211, 114, 214, 115, 215],
+ o3: [10, 20, 11, 21, 12, 22, 13, 23,
+ 14, 24, 15, 25, 16, 26, 17, 27,
+ 18, 28, 19, 29, 110, 210, 111, 211,
+ 112, 212, 113, 213, 114, 214, 115, 215]
+}).AddNchw(i3, o3, layout).AddVariations("relaxed", "float16", quant8)
diff --git a/tests/nnapi/specs/skip/V1_2/depthwise_conv2d_dilation.mod.py b/tests/nnapi/specs/skip/V1_2/depthwise_conv2d_dilation.mod.py
new file mode 100644
index 000000000..4b90498e3
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/depthwise_conv2d_dilation.mod.py
@@ -0,0 +1,154 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: dilation set to 1 (default)
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 2}")
+f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}", [.25, 0., .2, 0., .25, 0., 0., .3, .25, 0., 0., 0., .25, .1, 0., 0.])
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{4}", [1, 2, 3, 4])
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
+Model().Operation("DEPTHWISE_CONV_2D", i1, f1, b1, 0, 0, 0, 0, 1, 1, 2, 0, layout, 1, 1).To(o1)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ f1: ("TENSOR_QUANT8_ASYMM", 0.01, 0),
+ b1: ("TENSOR_INT32", 0.005, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [10, 21, 10, 22, 10, 23,
+ 10, 24, 10, 25, 10, 26,
+ 10, 27, 10, 28, 10, 29],
+ o1: [11, 3, 7.2, 10.6,
+ 11, 3, 7.4, 10.9,
+ 11, 3, 7.8, 11.5,
+ 11, 3, 8.0, 11.8]
+}).AddNchw(i1, o1, layout).AddInput(f1, b1).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 2: dilation set to 2
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}")
+f2 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16])
+b2 = Parameter("op3", "TENSOR_FLOAT32", "{4}", [0,0,0,0])
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
+Model().Operation("DEPTHWISE_CONV_2D", i2, f2, b2, 0, 0, 0, 0, 1, 1, 2, 0, layout, 2, 2).To(o2)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ f2: ("TENSOR_QUANT8_ASYMM", 0.01, 0),
+ b2: ("TENSOR_INT32", 0.005, 0),
+ o2: ("TENSOR_QUANT8_ASYMM", 0.1, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 0, 0, 0,
+ 0, 0, 0, 1, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,],
+ o2: [13, 14, 0, 0,
+ 0, 0, 11, 12,
+ 5, 6, 0, 0,
+ 0, 0, 3, 4]
+}).AddNchw(i2, o2, layout).AddInput(f2, b2)
+
+
+# TEST 3: same as test 1 but with implicit padding
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 2}")
+f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}", [.25, 0., .2, 0., .25, 0., 0., .3, .25, 0., 0., 0., .25, .1, 0., 0.])
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{4}", [1, 2, 3, 4])
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
+Model().Operation("DEPTHWISE_CONV_2D", i1, f1, b1, 2, 1, 1, 2, 0, layout, 1, 1).To(o1)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ f1: ("TENSOR_QUANT8_ASYMM", 0.01, 0),
+ b1: ("TENSOR_INT32", 0.005, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [10, 21, 10, 22, 10, 23,
+ 10, 24, 10, 25, 10, 26,
+ 10, 27, 10, 28, 10, 29],
+ o1: [11, 3, 7.2, 10.6,
+ 11, 3, 7.4, 10.9,
+ 11, 3, 7.8, 11.5,
+ 11, 3, 8.0, 11.8]
+}, name="valid_padding").AddNchw(i1, o1, layout).AddInput(f1, b1).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 4: same as test 2 but with implicit padding
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}")
+f2 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}", [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16])
+b2 = Parameter("op3", "TENSOR_FLOAT32", "{4}", [0,0,0,0])
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
+Model().Operation("DEPTHWISE_CONV_2D", i2, f2, b2, 2, 1, 1, 2, 0, layout, 2, 2).To(o2)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ f2: ("TENSOR_QUANT8_ASYMM", 0.01, 0),
+ b2: ("TENSOR_INT32", 0.005, 0),
+ o2: ("TENSOR_QUANT8_ASYMM", 0.1, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 0, 0, 0,
+ 0, 0, 0, 1, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,],
+ o2: [13, 14, 0, 0,
+ 0, 0, 11, 12,
+ 5, 6, 0, 0,
+ 0, 0, 3, 4]
+}, name="valid_padding").AddNchw(i2, o2, layout).AddInput(f2, b2)
+
+# TEST 5: dilation set to 3, padding SAME, stride 2
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 6, 6, 1}")
+f2 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [1, 2, 3, 4])
+b2 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0])
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
+Model().Operation("DEPTHWISE_CONV_2D", i2, f2, b2, 1, 2, 2, 1, 0, layout, 3, 3).To(o2)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ f2: ("TENSOR_QUANT8_ASYMM", 0.125, 0),
+ b2: ("TENSOR_INT32", 0.0625, 0),
+ o2: ("TENSOR_QUANT8_ASYMM", 0.125, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 1, 0, 0,
+ 0, 0, 1, 1, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0],
+ o2: [4, 0, 3,
+ 0, 0, 0,
+ 2, 0, 1]
+}, name="same_padding_stride_2").AddNchw(i2, o2, layout).AddInput(f2, b2).AddVariations("relaxed", quant8, "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/depthwise_conv2d_per_channel.mod.py b/tests/nnapi/specs/skip/V1_2/depthwise_conv2d_per_channel.mod.py
new file mode 100644
index 000000000..2df79ee34
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/depthwise_conv2d_per_channel.mod.py
@@ -0,0 +1,65 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TEST 1: Same scales, zeroPoint = 0
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 2}, 0.5f, 0")
+f1 = Parameter("op2", "TENSOR_QUANT8_SYMM_PER_CHANNEL", "{1, 2, 2, 2}, 0.0f, 0",
+ [2, 4, 2, 0, 2, 2, 2, 0],
+ extraParams = SymmPerChannelQuantParams(channelDim=3, scales=[0.5, 0.5]))
+b1 = Parameter("op3", "TENSOR_INT32", "{2}", [0, 0])
+o1 = Output("op4", "TENSOR_QUANT8_ASYMM", "{1, 1, 1, 2}, 1.f, 0")
+Model("same").Operation("DEPTHWISE_CONV_2D", i1, f1, b1, 0, 0, 0, 0, 1, 1, 1, 0).To(o1)
+
+# Instantiate an example
+Example({
+ i1: [4, 16, 4, 32, 4, 64, 4, 128],
+ o1: [8, 48],
+}).AddInput(f1, b1)
+
+
+# TEST 2: Different scales, zeroPoint=128
+i2 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 3, 3, 2}, 0.5f, 128")
+f2 = Parameter("op2", "TENSOR_QUANT8_SYMM_PER_CHANNEL", "{1, 2, 2, 4}, 0.0f, 0",
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ extraParams = SymmPerChannelQuantParams(channelDim=3, scales=[1.0, 0.5, 1.0, 0.5]))
+b2 = Parameter("op3", "TENSOR_INT32", "{4}", [4, 4, 4, 4])
+o2 = Output("op4", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 4}, 1.f, 128")
+Model("different").Operation("DEPTHWISE_CONV_2D", i2, f2, b2, 0, 0, 0, 0, 1, 1, 2, 0).To(o2)
+
+# Instantiate an example
+Example({
+ i2: [129, 130] * 9,
+ o2: [132, 130, 134, 131, 132, 130, 134, 131,
+ 132, 130, 134, 131, 132, 130, 134, 131],
+}).AddInput(f2, b2)
+
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 3: With layout param
+i3 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 3, 3, 2}, 0.5f, 128")
+f3 = Parameter("op2", "TENSOR_QUANT8_SYMM_PER_CHANNEL", "{1, 2, 2, 4}, 0.0f, 0",
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ extraParams = SymmPerChannelQuantParams(channelDim=3, scales=[1.0, 0.5, 1.0, 0.5]))
+b3 = Parameter("op3", "TENSOR_INT32", "{4}", [4, 4, 4, 4])
+o3 = Output("op4", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 4}, 1.f, 128")
+Model("layout").Operation("DEPTHWISE_CONV_2D", i3, f3, b3, 0, 0, 0, 0, 1, 1, 2, 0, layout).To(o3)
+
+# Instantiate an example
+Example({
+ i3: [129, 130] * 9,
+ o3: [132, 130, 134, 131, 132, 130, 134, 131,
+ 132, 130, 134, 131, 132, 130, 134, 131],
+}).AddNchw(i3, o3, layout).AddInput(f3, b3)
diff --git a/tests/nnapi/specs/skip/V1_2/depthwise_conv2d_v1_2.mod.py b/tests/nnapi/specs/skip/V1_2/depthwise_conv2d_v1_2.mod.py
new file mode 100644
index 000000000..1ceb9c4eb
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/depthwise_conv2d_v1_2.mod.py
@@ -0,0 +1,167 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: DEPTHWISE_CONV2D_NCHW, pad = 0, stride = 1, cm = 2, act = none
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 2}")
+f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}", [.25, 0., .2, 0., .25, 0., 0., .3, .25, 0., 0., 0., .25, .1, 0., 0.])
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{4}", [1, 2, 3, 4])
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
+Model().Operation("DEPTHWISE_CONV_2D", i1, f1, b1, 0, 0, 0, 0, 1, 1, 2, 0, layout).To(o1)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ f1: ("TENSOR_QUANT8_ASYMM", 0.01, 0),
+ b1: ("TENSOR_INT32", 0.005, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 0)
+})
+channelQuant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ f1: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=3, scales=[0.01, 0.005, 0.01, 0.005])),
+ b1: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.005, 0.0025, 0.005, 0.0025], hide=True)),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 0)
+})
+channelQuant8_mult_gt_1 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ f1: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=3, scales=[0.01, 0.005, 0.01, 0.005])),
+ b1: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.005, 0.0025, 0.005, 0.0025], hide=True)),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.0001, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [10, 21, 10, 22, 10, 23,
+ 10, 24, 10, 25, 10, 26,
+ 10, 27, 10, 28, 10, 29],
+ o1: [11, 3, 7.2, 10.6,
+ 11, 3, 7.4, 10.9,
+ 11, 3, 7.8, 11.5,
+ 11, 3, 8.0, 11.8]
+}).AddNchw(i1, o1, layout).AddInput(f1, b1).AddVariations("relaxed", "float16", channelQuant8, channelQuant8_mult_gt_1, quant8)
+
+
+# TEST 2: DEPTHWISE_CONV2D_NCHW_2, pad = valid, stride = 1, cm = 2, act = none
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 2, 2}")
+f2 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}", [1, 2, 3, 4, -9, 10, -11, 12, 5, 6, 7, 8, 13, -14, 15, -16])
+b2 = Parameter("op3", "TENSOR_FLOAT32", "{4}", [1, 2, 3, 4])
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 1, 4}")
+Model().Operation("DEPTHWISE_CONV_2D", i2, f2, b2, 2, 1, 1, 2, 0, layout).To(o2)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
+ f2: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
+ b2: ("TENSOR_INT32", 0.25, 0),
+ o2: ("TENSOR_QUANT8_ASYMM", 1.0, 100)
+})
+channelQuant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
+ f2: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=3, scales=[0.5, 0.25, 0.5, 0.25])),
+ b2: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25, 0.125, 0.25, 0.125], hide=True)),
+ o2: ("TENSOR_QUANT8_ASYMM", 1.0, 100)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [1, 2, 7, 8, 3, 4, 9, 10, 5, 6, 11, 12],
+ o2: [71, -34, 99, -20, 91, -26, 127, -4]
+}).AddNchw(i2, o2, layout).AddInput(f2, b2).AddVariations("relaxed", "float16", quant8, channelQuant8)
+
+
+# TEST 3: DEPTHWISE_CONV2D_NCHW_LARGE, pad = 0, stride = 1, cm = 1, act = none
+i3 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
+f3 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 2}", [.25, 0, .25, 1, .25, 0, .25, 1])
+b3 = Parameter("op3", "TENSOR_FLOAT32", "{2}", [100, 200])
+o3 = Output("op4", "TENSOR_FLOAT32", "{1, 1, 1, 2}")
+Model("large").Operation("DEPTHWISE_CONV_2D", i3, f3, b3, 0, 0, 0, 0, 1, 1, 1, 0, layout).To(o3)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM", 0.5, 100),
+ f3: ("TENSOR_QUANT8_ASYMM", 0.125, 128),
+ b3: ("TENSOR_INT32", 0.0625, 0),
+ o3: ("TENSOR_QUANT8_ASYMM", 2.0, 128)
+})
+channelQuant8 = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
+ f3: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=3, scales=[0.125, 0.25])),
+ b3: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.0625, 0.125], hide=True)),
+ o3: ("TENSOR_QUANT8_ASYMM", 2.0, 128)
+})
+
+# Instantiate an example
+example = Example({
+ i3: [10, 21, 10, 22, 10, 23, 10, 24],
+ o3: [110, 246]
+}).AddNchw(i3, o3, layout).AddInput(f3, b3).AddVariations("relaxed", "float16", quant8, channelQuant8)
+
+
+# TEST 4: DEPTHWISE_CONV2D_NCHW_LARGE, pad = 0, stride = 1, cm = 1, act = none
+i4 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
+f4 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}", [.25, 0, 10, 50, .25, 1, 20, 50, .25, 0, 30, 50, .25, 1, 40, 50])
+b4 = Parameter("op3", "TENSOR_FLOAT32", "{4}", [6000, 7000, 8000, 9000])
+o4 = Output("op4", "TENSOR_FLOAT32", "{1, 1, 1, 4}")
+Model("large").Operation("DEPTHWISE_CONV_2D", i4, f4, b4, 0, 0, 0, 0, 1, 1, 1, 0, layout).To(o4)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
+ f4: ("TENSOR_QUANT8_ASYMM", 0.25, 0),
+ b4: ("TENSOR_INT32", 0.125, 0),
+ o4: ("TENSOR_QUANT8_ASYMM", 50.0, 0)
+})
+channelQuant8 = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
+ f4: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=3, scales=[1.0, 2.0, 1.0, 1.0])),
+ b4: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.5, 1.0, 0.5, 0.5], hide=True)),
+ o4: ("TENSOR_QUANT8_ASYMM", 50.0, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i4: [10, 21, 10, 0,
+ 10, 22, 20, 0,
+ 10, 23, 30, 0,
+ 10, 24, 40, 0],
+ o4: [6010, 7046, 11000, 9000]
+}).AddNchw(i4, o4, layout).AddInput(f4, b4).AddVariations("relaxed", "float16", quant8, channelQuant8)
+
+# TEST 9: quantized with scale product greater than output scale
+input_scale = 256.5 / 255
+input_zero_point = 127
+filter_scale = 256.5 / 255
+filter_zero_point = 128
+i9 = Input("op1",
+ ("TENSOR_QUANT8_ASYMM", [1, 3, 2, 2], input_scale, input_zero_point))
+f9 = Parameter(
+ "op2",
+ ("TENSOR_QUANT8_ASYMM", [1, 2, 2, 4], filter_scale, filter_zero_point), [
+ 129, 130, 131, 132, 119, 138, 117, 140, 133, 134, 135, 136, 141, 114,
+ 143, 112
+ ])
+b9 = Parameter("op3", ("TENSOR_INT32", [4], input_scale * filter_scale, 0),
+ [2, 4, 6, 8])
+o9 = Output("op4", ("TENSOR_QUANT8_ASYMM", [1, 2, 1, 4], 1.0, 127))
+model9 = Model("quant_output_multiplier_gt_1").Operation("DEPTHWISE_CONV_2D", i9, f9, b9, 2, 1, 1, 2,
+ 0).To(o9)
+
+# Instantiate an example
+example = Example({
+ i9: [129, 131, 141, 143, 133, 135, 145, 147, 137, 139, 149, 151],
+ o9: [255, 58, 255, 87, 255, 74, 255, 119]
+}, model=model9).AddInput(f9, b9).AddVariations("relaxed")
diff --git a/tests/nnapi/specs/skip/V1_2/detection_postprocess.mod.py b/tests/nnapi/specs/skip/V1_2/detection_postprocess.mod.py
new file mode 100644
index 000000000..b37989134
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/detection_postprocess.mod.py
@@ -0,0 +1,219 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TEST 1: DETECTION_POSTPROCESSING
+i1 = Input("scores", "TENSOR_FLOAT32", "{1, 6, 3}") # scores
+i2 = Input("roi", "TENSOR_FLOAT32", "{1, 6, 4}") # roi
+i3 = Input("anchors", "TENSOR_FLOAT32", "{6, 4}") # anchors
+
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{1, 3}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{1, 3, 4}") # roi out
+o3 = Output("classesOut", "TENSOR_INT32", "{1, 3}") # classes out
+o4 = Output("detectionOut", "TENSOR_INT32", "{1}") # num detections out
+Model("regular").Operation("DETECTION_POSTPROCESSING", i1, i2, i3, 10.0, 10.0, 5.0, 5.0, True, 3, 1, 1, 0.0, 0.5, False).To(o1, o2, o3, o4)
+
+input0 = {
+ i1: [ # class scores - two classes with background
+ 0., .9, .8,
+ 0., .75, .72,
+ 0., .6, .5,
+ 0., .93, .95,
+ 0., .5, .4,
+ 0., .3, .2
+ ],
+ i2: [ # six boxes in center-size encoding
+ 0.0, 0.0, 0.0, 0.0, # box #1
+ 0.0, 1.0, 0.0, 0.0, # box #2
+ 0.0, -1.0, 0.0, 0.0, # box #3
+ 0.0, 0.0, 0.0, 0.0, # box #4
+ 0.0, 1.0, 0.0, 0.0, # box #5
+ 0.0, 0.0, 0.0, 0.0 # box #6
+ ],
+ i3: [ # six anchors in center-size encoding
+ 0.5, 0.5, 1.0, 1.0, # anchor #1
+ 0.5, 0.5, 1.0, 1.0, # anchor #2
+ 0.5, 0.5, 1.0, 1.0, # anchor #3
+ 0.5, 10.5, 1.0, 1.0, # anchor #4
+ 0.5, 10.5, 1.0, 1.0, # anchor #5
+ 0.5, 100.5, 1.0, 1.0 # anchor #6
+ ]
+}
+
+output0 = {
+ o1: [0.95, 0.93, 0.0],
+ o2: [
+ 0.0, 10.0, 1.0, 11.0,
+ 0.0, 10.0, 1.0, 11.0,
+ 0.0, 0.0, 0.0, 0.0
+ ],
+ o3: [1, 0, 0],
+ o4: [2],
+}
+
+Example((input0, output0)).AddVariations("relaxed", "float16")
+
+# TEST 2: DETECTION_POSTPROCESSING
+i1 = Input("scores", "TENSOR_FLOAT32", "{1, 6, 3}") # scores
+i2 = Input("roi", "TENSOR_FLOAT32", "{1, 6, 4}") # roi
+i3 = Input("anchors", "TENSOR_FLOAT32", "{6, 4}") # anchors
+
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{1, 3}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{1, 3, 4}") # roi out
+o3 = Output("classesOut", "TENSOR_INT32", "{1, 3}") # classes out
+o4 = Output("detectionOut", "TENSOR_INT32", "{1}") # num detections out
+Model().Operation("DETECTION_POSTPROCESSING", i1, i2, i3, 10.0, 10.0, 5.0, 5.0, False, 3, 1, 1, 0.0, 0.5, False).To(o1, o2, o3, o4)
+
+input0 = {
+ i1: [ # class scores - two classes with background
+ 0., .9, .8,
+ 0., .75, .72,
+ 0., .6, .5,
+ 0., .93, .95,
+ 0., .5, .4,
+ 0., .3, .2
+ ],
+ i2: [ # six boxes in center-size encoding
+ 0.0, 0.0, 0.0, 0.0, # box #1
+ 0.0, 1.0, 0.0, 0.0, # box #2
+ 0.0, -1.0, 0.0, 0.0, # box #3
+ 0.0, 0.0, 0.0, 0.0, # box #4
+ 0.0, 1.0, 0.0, 0.0, # box #5
+ 0.0, 0.0, 0.0, 0.0 # box #6
+ ],
+ i3: [ # six anchors in center-size encoding
+ 0.5, 0.5, 1.0, 1.0, # anchor #1
+ 0.5, 0.5, 1.0, 1.0, # anchor #2
+ 0.5, 0.5, 1.0, 1.0, # anchor #3
+ 0.5, 10.5, 1.0, 1.0, # anchor #4
+ 0.5, 10.5, 1.0, 1.0, # anchor #5
+ 0.5, 100.5, 1.0, 1.0 # anchor #6
+ ]
+}
+
+output0 = {
+ o1: [0.95, 0.9, 0.3],
+ o2: [
+ 0.0, 10.0, 1.0, 11.0,
+ 0.0, 0.0, 1.0, 1.0,
+ 0.0, 100.0, 1.0, 101.0
+ ],
+ o3: [1, 0, 0],
+ o4: [3],
+}
+
+Example((input0, output0)).AddVariations("relaxed", "float16")
+
+# TEST 3: DETECTION_POSTPROCESSING
+i1 = Input("scores", "TENSOR_FLOAT32", "{1, 6, 3}") # scores
+i2 = Input("roi", "TENSOR_FLOAT32", "{1, 6, 7}") # roi
+i3 = Input("anchors", "TENSOR_FLOAT32", "{6, 4}") # anchors
+
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{1, 3}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{1, 3, 4}") # roi out
+o3 = Output("classesOut", "TENSOR_INT32", "{1, 3}") # classes out
+o4 = Output("detectionOut", "TENSOR_INT32", "{1}") # num detections out
+Model().Operation("DETECTION_POSTPROCESSING", i1, i2, i3, 10.0, 10.0, 5.0, 5.0, False, 3, 1, 1, 0.0, 0.5, False).To(o1, o2, o3, o4)
+
+input0 = {
+ i1: [ # class scores - two classes with background
+ 0., .9, .8,
+ 0., .75, .72,
+ 0., .6, .5,
+ 0., .93, .95,
+ 0., .5, .4,
+ 0., .3, .2
+ ],
+ i2: [ # six boxes in center-size encoding
+ 0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, # box #1
+ 0.0, 1.0, 0.0, 0.0, 1.0, 2.0, 3.0, # box #2
+ 0.0, -1.0, 0.0, 0.0, 1.0, 2.0, 3.0, # box #3
+ 0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, # box #4
+ 0.0, 1.0, 0.0, 0.0, 1.0, 2.0, 3.0, # box #5
+ 0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0 # box #6
+ ],
+ i3: [ # six anchors in center-size encoding
+ 0.5, 0.5, 1.0, 1.0, # anchor #1
+ 0.5, 0.5, 1.0, 1.0, # anchor #2
+ 0.5, 0.5, 1.0, 1.0, # anchor #3
+ 0.5, 10.5, 1.0, 1.0, # anchor #4
+ 0.5, 10.5, 1.0, 1.0, # anchor #5
+ 0.5, 100.5, 1.0, 1.0 # anchor #6
+ ]
+}
+
+output0 = {
+ o1: [0.95, 0.9, 0.3],
+ o2: [
+ 0.0, 10.0, 1.0, 11.0,
+ 0.0, 0.0, 1.0, 1.0,
+ 0.0, 100.0, 1.0, 101.0
+ ],
+ o3: [1, 0, 0],
+ o4: [3],
+}
+
+Example((input0, output0)).AddVariations("relaxed", "float16")
+
+# TEST 4: DETECTION_POSTPROCESSING
+i1 = Input("scores", "TENSOR_FLOAT32", "{1, 6, 3}") # scores
+i2 = Input("roi", "TENSOR_FLOAT32", "{1, 6, 7}") # roi
+i3 = Input("anchors", "TENSOR_FLOAT32", "{6, 4}") # anchors
+
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{1, 3}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{1, 3, 4}") # roi out
+o3 = Output("classesOut", "TENSOR_INT32", "{1, 3}") # classes out
+o4 = Output("detectionOut", "TENSOR_INT32", "{1}") # num detections out
+Model().Operation("DETECTION_POSTPROCESSING", i1, i2, i3, 10.0, 10.0, 5.0, 5.0, False, 3, 1, 1, 0.0, 0.5, True).To(o1, o2, o3, o4)
+
+input0 = {
+ i1: [ # class scores - two classes with background
+ 0., .9, .8,
+ 0., .75, .72,
+ 0., .6, .5,
+ 0., .93, .95,
+ 0., .5, .4,
+ 0., .3, .2
+ ],
+ i2: [ # six boxes in center-size encoding
+ 0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, # box #1
+ 0.0, 1.0, 0.0, 0.0, 1.0, 2.0, 3.0, # box #2
+ 0.0, -1.0, 0.0, 0.0, 1.0, 2.0, 3.0, # box #3
+ 0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, # box #4
+ 0.0, 1.0, 0.0, 0.0, 1.0, 2.0, 3.0, # box #5
+ 0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0 # box #6
+ ],
+ i3: [ # six anchors in center-size encoding
+ 0.5, 0.5, 1.0, 1.0, # anchor #1
+ 0.5, 0.5, 1.0, 1.0, # anchor #2
+ 0.5, 0.5, 1.0, 1.0, # anchor #3
+ 0.5, 10.5, 1.0, 1.0, # anchor #4
+ 0.5, 10.5, 1.0, 1.0, # anchor #5
+ 0.5, 100.5, 1.0, 1.0 # anchor #6
+ ]
+}
+
+output0 = {
+ o1: [0.95, 0.9, 0.3],
+ o2: [
+ 0.0, 10.0, 1.0, 11.0,
+ 0.0, 0.0, 1.0, 1.0,
+ 0.0, 100.0, 1.0, 101.0
+ ],
+ o3: [2, 1, 1],
+ o4: [3],
+}
+
+Example((input0, output0)).AddVariations("relaxed", "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/div_v1_2.mod.py b/tests/nnapi/specs/skip/V1_2/div_v1_2.mod.py
new file mode 100644
index 000000000..b92b67a99
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/div_v1_2.mod.py
@@ -0,0 +1,88 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TEST 1: DIV float16
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT16", "{3}") # a vector of 3 float16s
+i2 = Input("op2", "TENSOR_FLOAT16", "{3}") # another vector of 3 float16s
+act = Int32Scalar("act", 0) # an int32_t scalar activation
+i3 = Output("op3", "TENSOR_FLOAT16", "{3}")
+model = model.Operation("DIV", i1, i2, act).To(i3)
+model = model.RelaxedExecution(False)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [2.001953125, 0.0001000165, 8.75],
+ i2: # input 1
+ [2, 0.0001, 3.5]}
+
+output0 = {i3: # output 0
+ [1.0009765625, 1.0, 2.5]}
+
+# Instantiate an example
+Example((input0, output0))
+
+
+# TEST 2: DIV broadcast float16
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT16", "{2, 2}")
+i2 = Input("op2", "TENSOR_FLOAT16", "{1, 2}")
+act = Int32Scalar("act", 0)
+i3 = Output("op3", "TENSOR_FLOAT16", "{2, 2}")
+model = model.Operation("DIV", i1, i2, act).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 4, 3, 8],
+ i2: # input 1
+ [1, 2]}
+
+output0 = {i3: # output 0
+ [1, 2, 3, 4]}
+
+# Instantiate an example
+Example((input0, output0))
+
+
+# TEST 3: DIV, zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 2}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 2}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# DIV op with numBatches = 0.
+i2 = Parameter("op", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [1, 2, 3, 4]) # weights
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 2}") # out
+model = model.Operation("DIV", zero_sized, i2, 0).To(o3)
+
+# Create test case with dummy values.
+Example({
+ i1: [1, 2],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddVariations("relaxed", "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/floor_float16.mod.py b/tests/nnapi/specs/skip/V1_2/floor_float16.mod.py
new file mode 100644
index 000000000..5944b9bb9
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/floor_float16.mod.py
@@ -0,0 +1,17 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT16", "{1, 2, 2, 2}")
+i2 = Output("op2", "TENSOR_FLOAT16", "{1, 2, 2, 2}")
+model = model.Operation("FLOOR", i1).To(i2)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-1.5, -1.0, -0.5, 0.0,
+ .5, 1.0, 1.5, 10.2]}
+
+output0 = {i2: # output 0
+ [-2.0, -1.0, -1.0, 0.0,
+ 0.0, 1.0, 1.0, 10]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/fully_connected_v1_2.mod.py b/tests/nnapi/specs/skip/V1_2/fully_connected_v1_2.mod.py
new file mode 100644
index 000000000..13b45fa92
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/fully_connected_v1_2.mod.py
@@ -0,0 +1,86 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TEST 1: FULLY_CONNECTED
+model = Model()
+in0 = Input("op1", "TENSOR_FLOAT32", "{3, 1}")
+weights = Parameter("op2", "TENSOR_FLOAT32", "{1, 1}", [2])
+bias = Parameter("b0", "TENSOR_FLOAT32", "{1}", [4])
+out0 = Output("op3", "TENSOR_FLOAT32", "{3, 1}")
+act = Int32Scalar("act", 0)
+model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
+
+quant8_mult_gt_1 = DataTypeConverter(name="quant8_mult_gt_1").Identify({
+ in0: ("TENSOR_QUANT8_ASYMM", 0.5, 127),
+ weights: ("TENSOR_QUANT8_ASYMM", 0.5, 120),
+ bias: ("TENSOR_INT32", 0.25, 0),
+ out0: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+})
+
+# Example 1. Input in operand 0,
+input0 = {in0: # input 0
+ [2, 32, 16]}
+output0 = {out0: # output 0
+ [8, 68, 36]}
+
+# Instantiate an example
+Example((input0, output0)).AddVariations("relaxed", "float16", quant8_mult_gt_1)
+
+# FULLY_CONNECTED of data type TENSOR_FLOAT32 is introduced in V1_0.
+Example.SetVersion("V1_0", "fully_connected_v1_2")
+
+# TEST 2: FULLY_CONNECTED, zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 3}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 3}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# FULLY_CONNECTED op with numBatches = 0.
+w = Parameter("weights", "TENSOR_FLOAT32", "{1, 3}", [1, 2, 3]) # weights
+b = Parameter("bias", "TENSOR_FLOAT32", "{1}", [1]) # bias
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 1}") # out
+model = model.Operation("FULLY_CONNECTED", zero_sized, w, b, 0).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ w: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ b: ("TENSOR_INT32", 0.01, 0),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1, 2, 3],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddNchw(i1, zero_sized, layout).AddVariations("relaxed", quant8, "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/generate_proposals.mod.py b/tests/nnapi/specs/skip/V1_2/generate_proposals.mod.py
new file mode 100644
index 000000000..41d9ef94a
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/generate_proposals.mod.py
@@ -0,0 +1,211 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: GENERATE_PROPOSALS_1
+model = Model()
+i1 = Input("scores", "TENSOR_FLOAT32", "{1, 2, 2, 2}") # scores
+i2 = Input("bboxDeltas", "TENSOR_FLOAT32", "{1, 2, 2, 8}") # bounding box deltas
+i3 = Input("anchors", "TENSOR_FLOAT32", "{2, 4}") # anchors
+i4 = Input("imageInfo", "TENSOR_FLOAT32", "{1, 2}") # image info
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{4}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{4, 4}") # roi out
+o3 = Output("batchSplit", "TENSOR_INT32", "{4}") # batch split out
+model = model.Operation("GENERATE_PROPOSALS",
+ i1, i2, i3, i4, 4.0, 4.0, -1, -1, 0.30, 1.0, layout).To(o1, o2, o3)
+
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.01, 100),
+ i2: ("TENSOR_QUANT8_ASYMM", 0.05, 128),
+ i3: ("TENSOR_QUANT16_SYMM", 0.125, 0),
+ i4: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.01, 100),
+ o2: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+input0 = {
+ i1: [ # scores
+ 0.8, 0.9, 0.85, 0.85,
+ 0.75, 0.8, 0.9, 0.95
+ ],
+ i2: [ # bounding box deltas
+ 0.5, 0.1, 0.1, 0.1, 0.5, 0.1, 0.5, 0.1,
+ -0.25, 0.1, -0.1, -0.1, -0.25, 0.1, 0.2, 0.1,
+ 0.4, -0.1, -0.2, 0.2, 0.4, -0.1, -0.2, 0.2,
+ -0.2, -0.2, 0.2, 0.2, -0.2, -0.2, 0.2, 0.2
+ ],
+ i3: [0, 1, 4, 3, 1, 0, 3, 4], # anchors
+ i4: [32, 32], # image info
+}
+
+output0 = {
+ o1: [0.95, 0.9, 0.85, 0.8], # scores out
+ o2: [ # roi out
+ 4.3785973, 2.7571943 , 6.8214025, 7.642805,
+ 1.3512788, 0.18965816, 4.648721 , 4.610342,
+ 3.1903253, 1.2951627 , 6.8096747, 3.1048374,
+ 1.9812691, 3.1571944 , 3.6187308, 8.042806
+ ],
+ o3: [0, 0, 0, 0]
+}
+
+Example((input0, output0)).AddNchw(i1, i2, layout).AddVariations("relaxed", quant8, "float16")
+
+# TEST 2: GENERATE_PROPOSALS_2
+model = Model()
+i1 = Input("scores", "TENSOR_FLOAT32", "{2, 4, 4, 4}") # scores
+i2 = Input("bboxDeltas", "TENSOR_FLOAT32", "{2, 4, 4, 16}") # bounding box deltas
+i3 = Input("anchors", "TENSOR_FLOAT32", "{4, 4}") # anchors
+i4 = Input("imageInfo", "TENSOR_FLOAT32", "{2, 2}") # image info
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{30}") # scores out
+o2 = Output("roiOut", "TENSOR_FLOAT32", "{30, 4}") # roi out
+o3 = Output("batchSplit", "TENSOR_INT32", "{30}") # batch split out
+model = model.Operation("GENERATE_PROPOSALS",
+ i1, i2, i3, i4, 10.0, 10.0, 32, 16, 0.20, 1.0, layout).To(o1, o2, o3)
+
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.005, 0),
+ i2: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ i3: ("TENSOR_QUANT16_SYMM", 0.125, 0),
+ i4: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.005, 0),
+ o2: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+input0 = {
+ i1: [ # scores
+ 0.885, 0.21 , 0.78 , 0.57 ,
+ 0.795, 0.66 , 0.915, 0.615,
+ 0.27 , 0.69 , 0.645, 0.945,
+ 0.465, 0.345, 0.855, 0.555,
+ 0.48 , 0.6 , 0.735, 0.63 ,
+ 0.495, 0.03 , 0.12 , 0.225,
+ 0.24 , 0.285, 0.51 , 0.315,
+ 0.435, 0.255, 0.585, 0.06 ,
+ 0.9 , 0.75 , 0.18 , 0.45 ,
+ 0.36 , 0.09 , 0.405, 0.15 ,
+ 0. , 0.195, 0.075, 0.81 ,
+ 0.87 , 0.93 , 0.39 , 0.165,
+ 0.825, 0.525, 0.765, 0.105,
+ 0.54 , 0.705, 0.675, 0.3 ,
+ 0.42 , 0.045, 0.33 , 0.015,
+ 0.84 , 0.135, 0.72 , 0.375,
+ 0.495, 0.315, 0.195, 0.24 ,
+ 0.21 , 0.54 , 0.78 , 0.72 ,
+ 0.045, 0.93 , 0.27 , 0.735,
+ 0.135, 0.09 , 0.81 , 0.705,
+ 0.39 , 0.885, 0.42 , 0.945,
+ 0.9 , 0.225, 0.75 , 0.3 ,
+ 0.375, 0.63 , 0.825, 0.675,
+ 0.015, 0.48 , 0.645, 0.615,
+ 0.33 , 0.465, 0.66 , 0.6 ,
+ 0.075, 0.84 , 0.285, 0.57 ,
+ 0.585, 0.165, 0.06 , 0.36 ,
+ 0.795, 0.855, 0.105, 0.45 ,
+ 0. , 0.87 , 0.525, 0.255,
+ 0.69 , 0.555, 0.15 , 0.345,
+ 0.03 , 0.915, 0.405, 0.435,
+ 0.765, 0.12 , 0.51 , 0.18
+ ],
+ i2: [ # bounding box deltas
+ -1.9, 0.4, 1.4, 0.5, -1.5, -0.2, 0.3, 1.2, 0. , -0.6, 0.4, -1.3, 0.8, 0.9, -0.2, 0.8,
+ -0.2, 0. , 0.4, 0.1, -0.2, -1.6, -0.6, -0.1, -1. , 0.6, 0.5, -0.2, -1.7, -1.4, 0.5, -0.1,
+ -1.5, 1.3, -0.7, -0.9, 0.9, 0.2, -0.2, 0. , -0.7, 0.3, -0.4, -0.3, -0.5, -0.3, 1. , -0.7,
+ 1.2, -0.3, 0. , 0.3, -0.7, 1. , -0.2, -0.6, -1.3, 0. , 0.3, 0.1, 0.4, 0.2, 2.4, 0. ,
+ 0.1, 0. , 0.7, -0.9, 0.1, -0.4, 0.3, -0.3, -0.7, 0.1, 0.7, 0. , -0.3, 1.6, 0. , 1.1,
+ 0.4, -0.7, -0.9, 0. , 0. , 0.4, -0.6, 0.4, -1.9, -1.2, 0. , -0.3, 0.2, 0. , 0.1, 0.8,
+ 0. , 0.9, -1.7, 0.3, 0.7, -0.7, 0.7, 1.2, -0.4, -0.1, -0.6, 0.6, -0.4, -0.2, 0.3, -0.5,
+ 0. , 1. , -0.1, -0.3, -0.8, 0.1, -1.2, -2.4, 0.1, 1.4, 0.4, 0.1, -1.1, 0.4, -0.4, -0.2,
+ 0.1, 0. , 0.7, 0.1, -1.3, 0.1, -0.4, -0.2, 0.2, 0.1, -0.8, 0. , -1.4, 2. , -0.6, -0.5,
+ 0. , 1. , -1.4, -1.1, 0.6, -0.7, 0.4, 1.1, -1.1, 1.6, -0.3, 0. , -0.7, 0.3, -1.3, 0. ,
+ 0. , 0. , -0.3, 0. , -1.1, -1.5, 0.9, -1.4, -0.7, 0.1, -1.4, 0.9, 0.1, 0.2, -0.1, -1.7,
+ 0.2, -0.3, -0.9, 1.1, 0.1, 1. , 1. , -0.9, 0.7, 0. , -0.3, 0.2, -0.8, -0.5, 0.6, -1.2,
+ 1. , 0.6, 0. , -1.6, 0.1, -1.2, 0.7, 0.8, 0.5, -0.2, -0.8, -1.3, -0.3, 0. , 0. , 0.3,
+ -0.6, -0.3, 1.3, 0.1, 2.2, 1.2, -1.1, 0.1, 1.2, 1.2, 1.3, -0.9, 0.1, -0.5, 0.1, -0.7,
+ -1.3, 1.3, 0.1, 2. , 0. , 0.2, 0.6, 0. , -0.1, -0.4, -0.5, 0.1, -0.6, -0.3, 0.2, -0.4,
+ -0.4, -0.7, -1.8, 0.4, -0.7, 0.4, 1.4, -0.3, 0.8, 0. , 0.4, -0.1, -1. , 0.2, 0.5, -0.6,
+ -1.1, 0.2, 1.6, -0.2, -0.4, -0.9, 0. , 0.3, 0. , 0.3, -0.3, 0.3, 0.3, 1.9, 0.3, -0.5,
+ -0.8, -1.3, -0.8, 0.2, 0.2, -0.4, -0.3, 0.6, 0.2, -0.2, 1.2, 0. , 0. , -0.3, 0.3, -1.5,
+ -1. , -0.3, -0.7, -0.3, -0.4, -1. , -0.6, -0.7, -0.2, 0.6, -0.3, 0.5, -0.2, 0.3, -0.5, -1.7,
+ 0. , -0.7, -0.1, -1.5, -0.9, 0.6, 0.3, -0.1, 0.2, 0.5, 0.6, -0.8, -0.3, 0.6, 0.9, -0.3,
+ 0.1, -1.7, -1.5, 0. , -0.1, -0.3, 0.7, -0.3, -0.4, 0. , -0.4, -0.3, 0.1, 1.1, 1.8, -0.9,
+ 0.6, 0.5, 0.2, -0.7, 0.2, 0.1, 1.2, 2.2, 0.3, 0.6, 0.4, 0.1, 0.2, 0. , -1.1, -0.2,
+ -0.7, 0. , -1.2, 0.6, -0.6, -0.2, -0.4, 0. , 0.7, -1.2, 0.8, 0. , -0.3, 0.2, 0.6, -1. ,
+ -0.1, -0.1, 0. , -0.4, -0.2, 0.4, -1.4, 0.3, 0.1, 1.3, -0.2, -0.7, 0.6, 0.7, 0.6, 0.1,
+ -0.4, 0.1, -0.2, -0.8, 0. , -1.3, 1.2, 1.4, 1.1, 0.5, 0.3, 0. , 0.1, -0.4, 0.5, -0.1,
+ -0.5, 0.3, -0.7, 0.9, -0.1, -0.4, 0.2, -0.8, 1. , 1. , 0.1, 0.1, -0.2, 0. , -0.4, -0.3,
+ -0.8, 0.7, -0.9, -0.3, -0.3, -2.8, 1. , 1.4, 0. , -2.6, 1.1, -1.1, 0.5, 0.1, -0.4, -1.5,
+ 0. , 0.3, -0.3, -0.2, 0.7, -0.8, -0.1, 0.5, 0.7, 1.4, -1.2, -1. , -0.6, 0.2, 1.1, -0.9,
+ 0.7, -0.4, 0. , 0. , -0.2, -0.2, 0.1, 0. , 0. , -0.7, -0.7, -1.4, -0.9, -0.5, -0.6, 0.4,
+ 0.3, 0. , 0.9, -0.2, 0.7, 1.2, 0.5, 0.8, -0.5, 1. , 0.2, -0.5, 1.3, -0.5, 0.3, 1.2,
+ -0.3, -0.1, 1.3, 0.2, 0.6, -1.4, -0.1, -0.2, -0.4, -0.9, 1.2, -0.9, -0.2, -1.2, -1. , -0.2,
+ -1.6, 2.1, -0.6, -0.2, -0.3, 0.5, 0.9, -0.4, 0. , -0.1, 0.1, -0.6, -1. , -0.7, 0.2, -0.2
+ ],
+ i3: [ # anchors
+ 0, 6, 16, 10,
+ 6, 0, 10, 16,
+ 3, 5, 13, 11,
+ 5, 3, 11, 13
+ ],
+ i4: [64, 64, 32, 32], # image info
+}
+
+output0 = {
+ o1: [ # scores out
+ 0.945, 0.93 , 0.915, 0.9 , 0.87 , 0.84 , 0.81, 0.795, 0.78, 0.765, 0.75, 0.735,
+ 0.72 , 0.705, 0.69 , 0.675, 0.945, 0.915, 0.9 , 0.885, 0.87, 0.84 , 0.81, 0.78,
+ 0.735, 0.72 , 0.63 , 0.6 , 0.585, 0.54
+ ],
+ o2: [ # roi out
+ 16.845154 , 2.5170734, 33.154846 , 7.4829264,
+ 32.96344 , 40.747444 , 43.836563 , 47.252556 ,
+ 0. , 9.143808 , 16.243607 , 14.056192 ,
+ 0. , 25.789658 , 25.710022 , 30.210342 ,
+ 37.947445 , 20.791668 , 44.452557 , 32.80833 ,
+ 30.277609 , 32.21635 , 32.92239 , 38.18365 ,
+ 25.885489 , 29.086582 , 31.314512 , 30.913418 ,
+ 2.8654022, 5.789658 , 26.734598 , 10.210342 ,
+ 0.5408764, 3.5824041, 15.459124 , 5.217595 ,
+ 10.753355 , 35.982403 , 15.246645 , 37.617596 ,
+ 1.4593601, 23.050154 , 4.1406403, 36.149845 ,
+ 0. , 15.6 , 11.068764 , 21.6 ,
+ 38.54088 , 35.28549 , 53.45912 , 40.71451 ,
+ 26.134256 , 48.358635 , 27.465742 , 64. ,
+ 29.96254 , 3.1999998, 33.23746 , 19.2 ,
+ 11.653517 , 43.980293 , 48.34648 , 46.41971 ,
+ 0. , 26.967152 , 26.748941 , 31.032848 ,
+ 28.590324 , 9.050154 , 32. , 22.149847 ,
+ 17.828777 , 19.00683 , 32. , 20.99317 ,
+ 3.5724945, 7.273454 , 11.627505 , 19.126545 ,
+ 4.989658 , 26.8 , 9.410341 , 32. ,
+ 15.157195 , 18.00537 , 20.042807 , 25.194632 ,
+ 30.889404 , 9.652013 , 32. , 12.347987 ,
+ 3.399414 , 3.8000002, 32. , 9.8 ,
+ 24.980408 , 10.086582 , 28.61959 , 11.913418 ,
+ 13.950423 , 3.884349 , 22.049576 , 6.115651 ,
+ 24.259361 , 6.8 , 26.94064 , 22.8 ,
+ 3.6538367, 19.475813 , 13.546164 , 28.524187 ,
+ 11.947443 , 29.318363 , 18.452557 , 32. ,
+ 17.318363 , 0. , 20.281635 , 16.17695
+ ],
+ o3: [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+}
+
+Example((input0, output0)).AddNchw(i1, i2, layout).AddVariations("relaxed", quant8, "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/greater.mod.py b/tests/nnapi/specs/skip/V1_2/greater.mod.py
new file mode 100644
index 000000000..d811a4733
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/greater.mod.py
@@ -0,0 +1,99 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+def test(name, input0, input1, output0, input0_data, input1_data, output_data, do_variations=True):
+ model = Model().Operation("GREATER", input0, input1).To(output0)
+ example = Example({
+ input0: input0_data,
+ input1: input1_data,
+ output0: output_data,
+ }, model=model, name=name)
+ if do_variations:
+ example.AddVariations("int32", "float16", "relaxed")
+
+test(
+ name="simple",
+ input0=Input("input0", "TENSOR_FLOAT32", "{3}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{3}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[5, 7, 10],
+ input1_data=[10, 7, 5],
+ output_data=[False, False, True],
+)
+
+test(
+ name="broadcast",
+ input0=Input("input0", "TENSOR_FLOAT32", "{2, 1}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{2}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{2, 2}"),
+ input0_data=[5, 10],
+ input1_data=[10, 5],
+ output_data=[False, False, False, True],
+)
+
+test(
+ name="quantized_different_scale",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [3], 1.0, 128)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 2.0, 128)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[129, 130, 131], # effectively 1, 2, 3
+ input1_data=[129], # effectively 2
+ output_data=[False, False, True],
+ do_variations=False,
+)
+
+test(
+ name="quantized_different_zero_point",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [3], 1.0, 128)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 1.0, 129)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[129, 130, 131], # effectively 1, 2, 3
+ input1_data=[131], # effectively 2
+ output_data=[False, False, True],
+ do_variations=False,
+)
+
+test(
+ name="quantized_overflow_second_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [1], 1.64771, 31)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 1.49725, 240)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[0],
+ input1_data=[200],
+ output_data=[True],
+ do_variations=False,
+)
+
+test(
+ name="quantized_overflow_first_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [1], 1.49725, 240)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 1.64771, 31)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[200],
+ input1_data=[0],
+ output_data=[False],
+ do_variations=False,
+)
+
+test(
+ name="boolean",
+ input0=Input("input0", "TENSOR_BOOL8", "{4}"),
+ input1=Input("input1", "TENSOR_BOOL8", "{4}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{4}"),
+ input0_data=[False, True, False, True],
+ input1_data=[False, False, True, True],
+ output_data=[False, True, False, False],
+ do_variations=False,
+)
diff --git a/tests/nnapi/specs/skip/V1_2/grouped_conv2d.mod.py b/tests/nnapi/specs/skip/V1_2/grouped_conv2d.mod.py
new file mode 100644
index 000000000..32819317b
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/grouped_conv2d.mod.py
@@ -0,0 +1,135 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: GROUPED_CONV2D, pad = 0, stride = 1, numGroups = 2
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 2}") # input 0
+w1 = Parameter("op2", "TENSOR_FLOAT32", "{2, 2, 2, 1}", [1, 2, 2, 1, 4, 3, 2, 1]) # weight
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{2}", [10, -33.5]) # bias
+act = Int32Scalar("act", 0) # act = none
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 2}") # output 0
+Model().Operation("GROUPED_CONV_2D", i1, w1, b1, 0, 0, 0, 0, 1, 1, 2, act, layout).To(o1)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.25, 100),
+ w1: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
+ b1: ("TENSOR_INT32", 0.0625, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.5, 80)
+})
+
+quant8_mult_gt_1 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.25, 100),
+ w1: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
+ b1: ("TENSOR_INT32", 0.0625, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.05, 80)
+})
+
+# Per-channel quantization
+channelQuant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.25, 100),
+ w1: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25, 0.5])),
+ b1: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.0625, 0.125], hide=True)),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.5, 80)
+})
+
+channelQuant8_mult_gt_1 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.25, 100),
+ w1: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25, 0.5])),
+ b1: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.0625, 0.125], hide=True)),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 80)
+})
+
+example = Example({
+ i1: [1, 2, 3, 4, 5, 6,
+ 6, 5, 4, 3, 2, 1,
+ 2, 3, 3, 3, 3, 3],
+ o1: [33, -0.5,
+ 33, 7.5,
+ 31, 4.5,
+ 27, -9.5]
+}).AddNchw(i1, o1, layout).AddAllActivations(o1, act).AddVariations("relaxed", quant8, quant8_mult_gt_1, channelQuant8, channelQuant8_mult_gt_1, "float16").AddInput(w1, b1)
+
+
+# TEST 2: GROUPED_CONV2D_LARGE, pad = same, stride = 1, numGroups = 2, act = none
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 2, 2}") # input 0
+w2 = Parameter("op2", "TENSOR_FLOAT32", "{2, 2, 3, 1}", [100, 20, 1, 200, 10, 2, 200, 30, 1, 100, 20, 3]) # weight
+b2 = Parameter("op3", "TENSOR_FLOAT32", "{2}", [500, -1000]) # bias
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 2, 2}") # output 0
+Model("large").Operation("GROUPED_CONV_2D", i2, w2, b2, 1, 1, 1, 2, 0, layout).To(o2)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
+ w2: ("TENSOR_QUANT8_ASYMM", 1.0, 0),
+ b2: ("TENSOR_INT32", 0.25, 0),
+ o2: ("TENSOR_QUANT8_ASYMM", 10.0, 100)
+})
+
+# Per-channel quantization
+channelQuant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
+ w2: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[2.0, 2.5])),
+ b2: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.5, 0.625], hide=True)),
+ o2: ("TENSOR_QUANT8_ASYMM", 10.0, 100)
+})
+
+example = Example({
+ i2: [1, 2, 3, 4,
+ 4, 3, 2, 1,
+ 2, 3, 3, 3],
+ o2: [567, -873,
+ 1480, -160,
+ 608, -840,
+ 1370, -10,
+ 543, -907,
+ 760, -310]
+}).AddNchw(i2, o2, layout).AddVariations("relaxed", quant8, channelQuant8, "float16").AddInput(w2, b2)
+
+
+# TEST 3: GROUPED_CONV2D_CHANNEL, pad = same, stride = 1, numGroups = 3, act = none
+i3 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 9}") # input 0
+w3 = Parameter("op2", "TENSOR_FLOAT32", "{6, 1, 1, 3}", [1, 2, 3, 2, 1, 0, 2, 3, 3, 6, 6, 6, 9, 8, 5, 2, 1, 1]) # weight
+b3 = Parameter("op3", "TENSOR_FLOAT32", "{6}", [10, -20, 30, -40, 50, -60]) # bias
+o3 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 6}") # output 0
+Model("channel").Operation("GROUPED_CONV_2D", i3, w3, b3, 1, 1, 1, 3, 0, layout).To(o3)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ w3: ("TENSOR_QUANT8_ASYMM", 0.25, 0),
+ b3: ("TENSOR_INT32", 0.125, 0),
+ o3: ("TENSOR_QUANT8_ASYMM", 2.0, 60)
+})
+
+channelQuant8 = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ w3: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25, 0.3] * 3)),
+ b3: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.125, 0.15] * 3, hide=True)),
+ o3: ("TENSOR_QUANT8_ASYMM", 2.0, 60)
+})
+
+example = Example({
+ i3: [1, 2, 3, 4, 55, 4, 3, 2, 1,
+ 5, 4, 3, 2, 11, 2, 3, 4, 5,
+ 2, 3, 2, 3, 22, 3, 2, 3, 2,
+ 1, 0, 2, 1, 33, 1, 2, 0, 1],
+ o3: [24, -16, 215, 338, 98, -51,
+ 32, -6, 73, 50, 134, -45,
+ 24, -13, 111, 128, 102, -51,
+ 17, -18, 134, 170, 73, -55]
+}).AddNchw(i3, o3, layout).AddVariations("relaxed", quant8, channelQuant8, "float16").AddInput(w3, b3)
diff --git a/tests/nnapi/specs/skip/V1_2/heatmap_max_keypoint.mod.py b/tests/nnapi/specs/skip/V1_2/heatmap_max_keypoint.mod.py
new file mode 100644
index 000000000..9b738112a
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/heatmap_max_keypoint.mod.py
@@ -0,0 +1,193 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: HEATMAP_MAX_KEYPOINT_1
+heatmap1 = Input("heatmap", "TENSOR_FLOAT32", "{6, 4, 4, 1}")
+boxes1 = Input("boxes", "TENSOR_FLOAT32", "{6, 4}")
+score1 = Output("score", "TENSOR_FLOAT32", "{6, 1}")
+keypoint1 = Output("keypoint", "TENSOR_FLOAT32", "{6, 1, 2}")
+Model().Operation("HEATMAP_MAX_KEYPOINT", heatmap1, boxes1, layout).To(score1, keypoint1)
+
+# Instantiate an example
+Example({
+ heatmap1: [
+ -10, -1, 4, -5, # batch0
+ -8, -2, 9, 1,
+ 7, -2, 3, -7,
+ -2, 2, -3, 5,
+ -10, -1, 4, -5, # batch1 - test mirror bottom
+ -8, -2, 9, 1,
+ 7, -2, 3, -7,
+ -2, 10, -3, 5,
+ -10, -1, 4, -5, # batch2 - test mirror left
+ -8, -2, 4, 1,
+ 7, -2, 3, -7,
+ -2, 2, -3, 5,
+ -10, -1, 4, 10, # batch3 - test mirror top right
+ -8, -2, 4, 1,
+ 7, -2, 3, -7,
+ -2, 2, -3, 5,
+ -10,-56, 4, -5, # batch4 - test out of range delta
+ -8, -2, 9, 1,
+ 7, -2, 3, -7,
+ -2, 2, -3, 5,
+ -10,-57.827329175, 4, -5, # batch5 - test detA = 0
+ -8, -2, 9, 1,
+ 7, -2, 3, -7,
+ -2, 2, -3, 5
+ ],
+ boxes1: [
+ 5, 2, 10, 20,
+ 1, 7, 30, 10,
+ 8, 3, 15, 13,
+ 6, 5, 19, 12,
+ 5, 2, 10, 20,
+ 5, 2, 10, 20
+ ],
+ score1: [
+ 9.071493,
+ 10.00500,
+ 7.187500,
+ 10.00000,
+ 10.689667,
+ 9.000000
+ ],
+ keypoint1: [
+ 8.224462, 8.537316,
+ 11.73000, 9.625000,
+ 8.875000, 9.562500,
+ 17.37500, 5.875000,
+ 9.569672, 2.000000,
+ 8.125000, 8.750000
+ ]
+}).AddNchw(heatmap1, layout).AddVariations("relaxed", "float16")
+
+
+# TEST 2: HEATMAP_MAX_KEYPOINT_2
+heatmap2 = Input("heatmap", "TENSOR_FLOAT32", "{2, 4, 4, 4}")
+boxes2 = Input("boxes", "TENSOR_FLOAT32", "{2, 4}")
+score2 = Output("score", "TENSOR_FLOAT32", "{2, 4}")
+keypoint2 = Output("keypoint", "TENSOR_FLOAT32", "{2, 4, 2}")
+Model().Operation("HEATMAP_MAX_KEYPOINT", heatmap2, boxes2, layout).To(score2, keypoint2)
+
+quant8 = DataTypeConverter().Identify({
+ heatmap2: ("TENSOR_QUANT8_ASYMM", 0.01, 128),
+ boxes2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ score2: ("TENSOR_QUANT8_ASYMM", 0.01, 0),
+ keypoint2: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+# Instantiate an example
+Example({
+ heatmap2: [
+ 0.19, 0.61, 0.49, 0.01, 0.98, 0.65, 0.64, 0.70, 0.76, 0.55,
+ 0.83, 0.19, 0.46, 0.03, 0.67, 0.71, 0.17, 0.23, 0.89, 0.08,
+ 0.96, 0.65, 0.52, 0.40, 0.36, 0.80, 0.55, 0.89, 0.58, 0.29,
+ 0.27, 0.69, 0.66, 0.06, 0.51, 0.26, 0.96, 0.38, 0.41, 0.89,
+ 0.88, 0.46, 0.96, 0.73, 0.54, 0.64, 0.84, 0.74, 0.51, 0.41,
+ 0.13, 0.19, 0.52, 0.21, 0.50, 0.75, 0.89, 0.89, 0.20, 0.58,
+ 0.70, 0.13, 0.29, 0.39,
+ 0.91, 0.06, 0.93, 0.34, 0.80, 0.87, 0.59, 0.67, 0.57, 0.85,
+ 0.24, 0.25, 0.76, 0.34, 0.37, 0.11, 0.00, 0.29, 0.30, 0.77,
+ 0.34, 0.57, 0.48, 0.76, 0.93, 0.18, 0.64, 0.12, 0.67, 0.47,
+ 0.56, 0.50, 0.48, 0.99, 0.46, 0.66, 0.98, 0.06, 0.10, 0.66,
+ 0.66, 0.91, 0.67, 0.23, 0.40, 0.37, 0.17, 0.35, 0.48, 0.98,
+ 0.47, 0.49, 0.56, 0.18, 0.75, 0.29, 0.04, 0.23, 0.42, 0.55,
+ 0.38, 0.07, 0.71, 0.80
+ ],
+ boxes2: [
+ 5, 2, 10, 20,
+ 1, 7, 30, 10
+ ],
+ score2: [
+ 1.020210, 0.890556, 1.007110, 0.945129,
+ 0.987798, 1.073820, 0.930000, 0.800000
+ ],
+ keypoint2: [
+ 7.227723, 4.250000,
+ 8.090278, 17.750000,
+ 8.523379, 12.589181,
+ 8.365580, 10.122508,
+ 12.431603, 8.934225,
+ 4.625000, 9.239437,
+ 4.625000, 7.375000,
+ 26.375000, 9.625000
+ ]
+}).AddNchw(heatmap2, layout).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 3: HEATMAP_MAX_KEYPOINT_3
+heatmap3 = Input("heatmap", "TENSOR_FLOAT32", "{5, 4, 4, 1}")
+boxes3 = Input("boxes", "TENSOR_FLOAT32", "{5, 4}")
+score3 = Output("score", "TENSOR_FLOAT32", "{5, 1}")
+keypoint3 = Output("keypoint", "TENSOR_FLOAT32", "{5, 1, 2}")
+Model().Operation("HEATMAP_MAX_KEYPOINT", heatmap3, boxes3, layout).To(score3, keypoint3)
+
+quant8 = DataTypeConverter().Identify({
+ heatmap3: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
+ boxes3: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ score3: ("TENSOR_QUANT8_ASYMM", 0.1, 10),
+ keypoint3: ("TENSOR_QUANT16_ASYMM", 0.125, 0)
+})
+
+# Instantiate an example
+Example({
+ heatmap3: [
+ -10, -1, 4, -5, # batch0
+ -8, -2, 9, 1,
+ 7, -2, 3, -7,
+ -2, 2, -3, 5,
+ -10, -1, 4, -5, # batch1 - test mirror bottom
+ -8, -2, 9, 1,
+ 7, -2, 3, -7,
+ -2, 10, -3, 5,
+ -10, -1, 4, -5, # batch2 - test mirror left
+ -8, -2, 4, 1,
+ 7, -2, 3, -7,
+ -2, 2, -3, 5,
+ -10, -1, 4, 10, # batch3 - test mirror top right
+ -8, -2, 4, 1,
+ 7, -2, 3, -7,
+ -2, 2, -3, 5,
+ -10,-56, 4, -5, # batch4 - test out of range delta
+ -8, -2, 9, 1,
+ 7, -2, 3, -7,
+ -2, 2, -3, 5
+ ],
+ boxes3: [
+ 5, 2, 10, 20,
+ 1, 7, 30, 10,
+ 8, 3, 15, 13,
+ 6, 5, 19, 12,
+ 5, 2, 10, 20
+ ],
+ score3: [
+ 9.071493,
+ 10.00500,
+ 7.187500,
+ 10.00000,
+ 10.689667
+ ],
+ keypoint3: [
+ 8.224462, 8.537316,
+ 11.73000, 9.625000,
+ 8.875000, 9.562500,
+ 17.37500, 5.875000,
+ 9.569672, 2.000000
+ ]
+}).AddNchw(heatmap3, layout).AddVariations(quant8, includeDefault=False)
diff --git a/tests/nnapi/specs/skip/V1_2/instance_normalization.mod.py b/tests/nnapi/specs/skip/V1_2/instance_normalization.mod.py
new file mode 100644
index 000000000..9d540ff59
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/instance_normalization.mod.py
@@ -0,0 +1,56 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: INSTANCE_NORMALIZATION, gamma = 1, beta = 0, epsilon = 0.0001
+i1 = Input("in", "TENSOR_FLOAT32", "{2, 2, 2, 2}")
+o1 = Output("out", "TENSOR_FLOAT32", "{2, 2, 2, 2}")
+Model().Operation("INSTANCE_NORMALIZATION", i1, 1.0, 0.0, 0.0001, layout).To(o1)
+
+# Instantiate an example
+Example({
+ i1: [
+ 0, 1, 0, 2, 0, 2, 0, 4,
+ 1, -1, -1, 2, -1, -2, 1, 4
+ ],
+ o1: [
+ 0.00, -0.499996, 0.00, -0.0999992,
+ 0.00, -0.0999992, 0.00, 0.6999944,
+ 0.99995, -0.6999944, -0.99995, 0.499996,
+ -0.99995, -1.0999912, 0.99995, 1.2999896
+ ]
+}).AddNchw(i1, o1, layout).AddVariations("relaxed", "float16")
+
+
+# TEST 2: INSTANCE_NORMALIZATION, gamma = 2, beta = 10, epsilon = 0.0001
+i2 = Input("in", "TENSOR_FLOAT32", "{2, 2, 2, 2}")
+o2 = Output("out", "TENSOR_FLOAT32", "{2, 2, 2, 2}")
+Model().Operation("INSTANCE_NORMALIZATION", i2, 2.0, 10.0, 0.0001, layout).To(o2)
+
+# Instantiate an example
+Example({
+ i2: [
+ 0, 1, 0, 2, 0, 2, 0, 4,
+ 1, -1, -1, 2, -1, -2, 1, 4
+ ],
+ o2: [
+ 10. , 9.000008 , 10. , 9.8000016, 10. ,
+ 9.8000016, 10. , 11.3999888, 11.9999 , 8.6000112,
+ 8.0001 , 10.999992 , 8.0001 , 7.8000176, 11.9999 ,
+ 12.5999792
+ ]
+}).AddNchw(i2, o2, layout).AddVariations("relaxed", "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/l2_normalization_axis.mod.py b/tests/nnapi/specs/skip/V1_2/l2_normalization_axis.mod.py
new file mode 100644
index 000000000..b420032d4
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/l2_normalization_axis.mod.py
@@ -0,0 +1,47 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+i1 = Input("op1", "TENSOR_FLOAT32", "{2, 2, 2, 3}") # input 0
+o1 = Output("op2", "TENSOR_FLOAT32", "{2, 2, 2, 3}") # output 0
+axis = Int32Scalar("axis", -1) # last axis
+
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 32),
+ o1: ("TENSOR_QUANT8_ASYMM", 1.0 / 128, 128)
+})
+
+example0 = {
+ i1: [ 0, 3, 4,
+ 3, 0, 4,
+ 8, 6, 0,
+ 12, 0, 9,
+ 9, 12, 20,
+ 12, 15, 16,
+ 20, 9, 12,
+ 16, 15, 12],
+ o1: [0.00, 0.60, 0.80,
+ 0.60, 0.00, 0.80,
+ 0.80, 0.60, 0.00,
+ 0.80, 0.00, 0.60,
+ 0.36, 0.48, 0.80,
+ 0.48, 0.60, 0.64,
+ 0.80, 0.36, 0.48,
+ 0.64, 0.60, 0.48]
+}
+
+# All dimensions, with all possible axis parameter
+Model().Operation("L2_NORMALIZATION", i1, axis).To(o1)
+Example(example0).AddRelaxed().AddAllDimsAndAxis(i1, o1, axis).AddVariations("relaxed", "float16", quant8)
diff --git a/tests/nnapi/specs/skip/V1_2/l2_normalization_v1_2.mod.py b/tests/nnapi/specs/skip/V1_2/l2_normalization_v1_2.mod.py
new file mode 100644
index 000000000..f1cd7f517
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/l2_normalization_v1_2.mod.py
@@ -0,0 +1,50 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+i1 = Input("op1", "TENSOR_FLOAT32", "{2, 2, 2, 3}") # input 0
+o1 = Output("op2", "TENSOR_FLOAT32", "{2, 2, 2, 3}") # output 0
+axis = Int32Scalar("axis", -1) # last axis
+
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 32),
+ o1: ("TENSOR_QUANT8_ASYMM", 1.0 / 128, 128)
+})
+
+example0 = {
+ i1: [ 0, 3, 4,
+ 3, 0, 4,
+ 8, 6, 0,
+ 12, 0, 9,
+ 9, 12, 20,
+ 12, 15, 16,
+ 20, 9, 12,
+ 16, 15, 12],
+ o1: [0.00, 0.60, 0.80,
+ 0.60, 0.00, 0.80,
+ 0.80, 0.60, 0.00,
+ 0.80, 0.00, 0.60,
+ 0.36, 0.48, 0.80,
+ 0.48, 0.60, 0.64,
+ 0.80, 0.36, 0.48,
+ 0.64, 0.60, 0.48]
+}
+
+# All dimensions other than 4, without axis parameter
+Model().Operation("L2_NORMALIZATION", i1).To(o1)
+Example(example0).AddRelaxed().AddAllDims(i1, o1).AddVariations("relaxed", "float16", quant8)
+
+# L2_NORMALIZATION of rank 4 is introduced in V1_0.
+Example.SetVersion("V1_0", "l2_normalization_v1_2_dim4_axis3")
diff --git a/tests/nnapi/specs/skip/V1_2/l2_pool_v1_2.mod.py b/tests/nnapi/specs/skip/V1_2/l2_pool_v1_2.mod.py
new file mode 100644
index 000000000..1754478ea
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/l2_pool_v1_2.mod.py
@@ -0,0 +1,110 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: L2_POOL_2D_NCHW_1, pad = 0, stride = 1, filter = 1, act = none
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+Model().Operation("L2_POOL_2D", i1, 0, 0, 0, 0, 1, 1, 1, 1, 0, layout).To(o1)
+
+# Instantiate an example
+example = Example({
+ i1: [1.0, 2.0, 3.0, 4.0],
+ o1: [1.0, 2.0, 3.0, 4.0]
+}).AddNchw(i1, o1, layout).AddRelaxed().AddVariations("float16")
+
+
+# TEST 2: L2_POOL_2D_NCHW_2, pad = same, stride = 2, filter = 2, act = none
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 4, 1}")
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 1, 2, 1}")
+Model().Operation("L2_POOL_2D", i2, 1, 2, 2, 2, 2, 0, layout).To(o2)
+
+# Instantiate an example
+example = Example({
+ i2: [0, 6, 2, 4, 3, 2, 10, 7],
+ o2: [3.5, 6.5]
+}).AddNchw(i2, o2, layout).AddRelaxed().AddVariations("float16")
+
+
+# TEST 3: L2_POOL_2D_NCHW_LARGE, pad = 0, stride = 1, filter = 2, act = none
+i3 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 3}")
+o3 = Output("op4", "TENSOR_FLOAT32", "{1, 1, 1, 3}")
+Model("large").Operation("L2_POOL_2D", i3, 0, 0, 0, 0, 1, 1, 2, 2, 0, layout).To(o3)
+
+# Instantiate an example
+example = Example({
+ i3: [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0],
+ o3: [6.442049503326416, 7.3143692016601562, 8.2158384323120117]
+}).AddNchw(i3, o3, layout).AddRelaxed().AddVariations("float16")
+
+
+# TEST 4: zero-sized input, explicit padding
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# L2_POOL_2D op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 1, 1, 1}") # out
+model = model.Operation("L2_POOL_2D", zero_sized, 0, 0, 0, 0, 1, 1, 2, 2, 0, layout).To(o3)
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations("relaxed", "float16")
+
+
+# TEST 5: zero-sized input, implicit padding
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# L2_POOL_2D op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 1}") # out
+model = model.Operation("L2_POOL_2D", zero_sized, 1, 1, 1, 2, 2, 0, layout).To(o3)
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations("relaxed", "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/layer_norm_lstm.mod.py b/tests/nnapi/specs/skip/V1_2/layer_norm_lstm.mod.py
new file mode 100644
index 000000000..0a5edeaf5
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/layer_norm_lstm.mod.py
@@ -0,0 +1,357 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import copy
+
+# LSTM Test: Layer Normalization, No Cifg, Peephole, Projection, and No Clipping.
+model = Model()
+
+n_batch = 2
+n_input = 5
+n_cell = 4
+n_output = 3
+
+input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_input))
+
+input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_input))
+input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_input))
+input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_input))
+input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_input))
+
+recurrent_to_input_weights = Input("recurrent_to_intput_weights",
+ "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_output))
+recurrent_to_forget_weights = Input("recurrent_to_forget_weights",
+ "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_output))
+recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_output))
+recurrent_to_output_weights = Input("recurrent_to_output_weights",
+ "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_output))
+
+cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT32",
+ "{%d}" % (n_cell))
+cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT32",
+ "{%d}" % (n_cell))
+cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT32",
+ "{%d}" % (n_cell))
+
+input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT32", "{%d}" % (n_cell))
+forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT32",
+ "{%d}" % (n_cell))
+cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT32", "{%d}" % (n_cell))
+output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT32",
+ "{%d}" % (n_cell))
+
+projection_weights = Input("projection_weights", "TENSOR_FLOAT32",
+ "{%d,%d}" % (n_output, n_cell))
+projection_bias = Input("projection_bias", "TENSOR_FLOAT32", "{0}")
+
+output_state_in = Input("output_state_in", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_batch, n_output))
+cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_batch, n_cell))
+
+activation_param = Int32Scalar("activation_param", 4) # Tanh
+cell_clip_param = Float32Scalar("cell_clip_param", 0.)
+proj_clip_param = Float32Scalar("proj_clip_param", 0.)
+
+input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT32",
+ "{%d}" % n_cell)
+forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT32",
+ "{%d}" % n_cell)
+cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT32",
+ "{%d}" % n_cell)
+output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT32",
+ "{%d}" % n_cell)
+
+scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_batch, (n_cell * 4)))
+output_state_out = Output("output_state_out", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_batch, n_output))
+cell_state_out = Output("cell_state_out", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_batch, n_cell))
+output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output))
+
+model = model.Operation(
+ "LSTM", input, input_to_input_weights, input_to_forget_weights,
+ input_to_cell_weights, input_to_output_weights, recurrent_to_input_weights,
+ recurrent_to_forget_weights, recurrent_to_cell_weights,
+ recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights,
+ cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias,
+ output_gate_bias, projection_weights, projection_bias, output_state_in,
+ cell_state_in, activation_param, cell_clip_param, proj_clip_param,
+ input_layer_norm_weights, forget_layer_norm_weights,
+ cell_layer_norm_weights, output_layer_norm_weights).To(
+ [scratch_buffer, output_state_out, cell_state_out, output])
+
+# Example 1. Input in operand 0,
+input0 = {
+ input_to_input_weights: [
+ 0.5, 0.6, 0.7, -0.8, -0.9, 0.1, 0.2, 0.3, -0.4, 0.5, -0.8, 0.7, -0.6,
+ 0.5, -0.4, -0.5, -0.4, -0.3, -0.2, -0.1
+ ],
+ input_to_forget_weights: [
+ -0.6, -0.1, 0.3, 0.2, 0.9, -0.5, -0.2, -0.4, 0.3, -0.8, -0.4, 0.3, -0.5,
+ -0.4, -0.6, 0.3, -0.4, -0.6, -0.5, -0.5
+ ],
+ input_to_cell_weights: [
+ -0.4, -0.3, -0.2, -0.1, -0.5, 0.5, -0.2, -0.3, -0.2, -0.6, 0.6, -0.1,
+ -0.4, -0.3, -0.7, 0.7, -0.9, -0.5, 0.8, 0.6
+ ],
+ input_to_output_weights: [
+ -0.8, -0.4, -0.2, -0.9, -0.1, -0.7, 0.3, -0.3, -0.8, -0.2, 0.6, -0.2,
+ 0.4, -0.7, -0.3, -0.5, 0.1, 0.5, -0.6, -0.4
+ ],
+ input_gate_bias: [0.03, 0.15, 0.22, 0.38],
+ forget_gate_bias: [0.1, -0.3, -0.2, 0.1],
+ cell_gate_bias: [-0.05, 0.72, 0.25, 0.08],
+ output_gate_bias: [0.05, -0.01, 0.2, 0.1],
+ recurrent_to_input_weights: [
+ -0.2, -0.3, 0.4, 0.1, -0.5, 0.9, -0.2, -0.3, -0.7, 0.05, -0.2, -0.6
+ ],
+ recurrent_to_cell_weights: [
+ -0.3, 0.2, 0.1, -0.3, 0.8, -0.08, -0.2, 0.3, 0.8, -0.6, -0.1, 0.2
+ ],
+ recurrent_to_forget_weights: [
+ -0.5, -0.3, -0.5, -0.2, 0.6, 0.4, 0.9, 0.3, -0.1, 0.2, 0.5, 0.2
+ ],
+ recurrent_to_output_weights: [
+ 0.3, -0.1, 0.1, -0.2, -0.5, -0.7, -0.2, -0.6, -0.1, -0.4, -0.7, -0.2
+ ],
+ cell_to_input_weights: [0.05, 0.1, 0.25, 0.15],
+ cell_to_forget_weights: [-0.02, -0.15, -0.25, -0.03],
+ cell_to_output_weights: [0.1, -0.1, -0.5, 0.05],
+ projection_weights: [
+ -0.1, 0.2, 0.01, -0.2, 0.1, 0.5, 0.3, 0.08, 0.07, 0.2, -0.4, 0.2
+ ],
+ projection_bias: [],
+ input_layer_norm_weights: [0.1, 0.2, 0.3, 0.5],
+ forget_layer_norm_weights: [0.2, 0.2, 0.4, 0.3],
+ cell_layer_norm_weights: [0.7, 0.2, 0.3, 0.8],
+ output_layer_norm_weights: [0.6, 0.2, 0.2, 0.5]
+}
+
+test_inputs = [[0.7, 0.8, 0.1, 0.2, 0.3, 0.3, 0.2, 0.9, 0.8, 0.1],
+ [0.8, 0.1, 0.2, 0.4, 0.5, 0.1, 0.5, 0.2, 0.4, 0.2],
+ [0.2, 0.7, 0.7, 0.1, 0.7, 0.6, 0.9, 0.2, 0.5, 0.7]]
+golden_cell_states = [
+ [
+ -0.451771229505539, 0.376915663480759, 0.225425109267235, 0.232406347990036, -0.252585828304291, 0.330421179533005, 0.017305245622993, 0.366601228713989
+ ],
+ [
+ -0.645632147789001, 0.518238246440887, 0.168679088354111, 0.555787742137909, -0.493674814701080, 0.475847363471985, 0.106874041259289, 0.504309654235840
+ ],
+ [-0.742560744285583, 0.579139292240143, 0.114988230168819, 0.649957716464996, -0.686565399169922, 0.548869132995605, 0.173138767480850, 0.587379336357117],
+]
+cell_states = [[0, 0, 0, 0, 0, 0, 0, 0]] + golden_cell_states[:2]
+
+golden_outputs = [
+ [0.024407668039203, 0.128027379512787, -0.001709178090096, -0.006924282759428, 0.084874063730240, 0.063444979488850],
+ [0.013764165341854, 0.140751048922539, 0.039583537727594, -0.004039138555527, 0.139963015913963, 0.072681039571762],
+ [-0.004592306911945, 0.155278354883194, 0.083737745881081, 0.007527053356171, 0.161902531981468, 0.056137066334486],
+]
+output_states = [[0, 0, 0, 0, 0, 0]] + golden_outputs[:2]
+
+tests = zip(
+ test_inputs, output_states, cell_states, golden_cell_states, golden_outputs)
+
+for test_input, output_state, cell_state, golden_state, golden_output in tests:
+ cur_input = copy.deepcopy(input0)
+ cur_input[input] = test_input
+ cur_input[output_state_in] = output_state
+ cur_input[cell_state_in] = cell_state
+ cur_output = {
+ scratch_buffer: [0] * (n_batch * n_cell * 4),
+ cell_state_out: golden_state,
+ output_state_out: golden_output,
+ output: golden_output
+ }
+ Example((cur_input, cur_output), name="NoCifgPeepholeProjectionNoClippingLayerNormLstm")
+
+
+# LSTM Test: Layer Normalization, Cifg, Peephole, Projection, and No Clipping.
+model = Model()
+
+n_batch = 2
+n_input = 5
+n_cell = 4
+n_output = 3
+
+input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_input))
+
+input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT32",
+ "{0, 0}")
+input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_input))
+input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_input))
+input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_input))
+
+recurrent_to_input_weights = Input("recurrent_to_intput_weights",
+ "TENSOR_FLOAT32",
+ "{0, 0}")
+recurrent_to_forget_weights = Input("recurrent_to_forget_weights",
+ "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_output))
+recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_output))
+recurrent_to_output_weights = Input("recurrent_to_output_weights",
+ "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_output))
+
+cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT32",
+ "{0}")
+cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT32",
+ "{%d}" % (n_cell))
+cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT32",
+ "{%d}" % (n_cell))
+
+input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT32", "{0}")
+forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT32",
+ "{%d}" % (n_cell))
+cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT32", "{%d}" % (n_cell))
+output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT32",
+ "{%d}" % (n_cell))
+
+projection_weights = Input("projection_weights", "TENSOR_FLOAT32",
+ "{%d,%d}" % (n_output, n_cell))
+projection_bias = Input("projection_bias", "TENSOR_FLOAT32", "{0}")
+
+output_state_in = Input("output_state_in", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_batch, n_output))
+cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_batch, n_cell))
+
+activation_param = Int32Scalar("activation_param", 4) # Tanh
+cell_clip_param = Float32Scalar("cell_clip_param", 0.)
+proj_clip_param = Float32Scalar("proj_clip_param", 0.)
+
+input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT32",
+ "{0}")
+forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT32",
+ "{%d}" % n_cell)
+cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT32",
+ "{%d}" % n_cell)
+output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT32",
+ "{%d}" % n_cell)
+
+scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_batch, (n_cell * 3)))
+output_state_out = IgnoredOutput("output_state_out", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_batch, n_output))
+cell_state_out = Output("cell_state_out", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_batch, n_cell))
+output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output))
+
+model = model.Operation(
+ "LSTM", input, input_to_input_weights, input_to_forget_weights,
+ input_to_cell_weights, input_to_output_weights, recurrent_to_input_weights,
+ recurrent_to_forget_weights, recurrent_to_cell_weights,
+ recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights,
+ cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias,
+ output_gate_bias, projection_weights, projection_bias, output_state_in,
+ cell_state_in, activation_param, cell_clip_param, proj_clip_param,
+ input_layer_norm_weights, forget_layer_norm_weights,
+ cell_layer_norm_weights, output_layer_norm_weights).To(
+ [scratch_buffer, output_state_out, cell_state_out, output])
+
+# Example 1. Input in operand 0,
+input0 = {
+ input_to_input_weights: [],
+ input_to_forget_weights: [
+ -0.6, -0.1, 0.3, 0.2, 0.9, -0.5, -0.2, -0.4, 0.3, -0.8, -0.4, 0.3, -0.5,
+ -0.4, -0.6, 0.3, -0.4, -0.6, -0.5, -0.5
+ ],
+ input_to_cell_weights: [
+ -0.4, -0.3, -0.2, -0.1, -0.5, 0.5, -0.2, -0.3, -0.2, -0.6, 0.6, -0.1,
+ -0.4, -0.3, -0.7, 0.7, -0.9, -0.5, 0.8, 0.6
+ ],
+ input_to_output_weights: [
+ -0.8, -0.4, -0.2, -0.9, -0.1, -0.7, 0.3, -0.3, -0.8, -0.2, 0.6, -0.2,
+ 0.4, -0.7, -0.3, -0.5, 0.1, 0.5, -0.6, -0.4
+ ],
+ input_gate_bias: [],
+ forget_gate_bias: [0.1, -0.3, -0.2, 0.1],
+ cell_gate_bias: [-0.05, 0.72, 0.25, 0.08],
+ output_gate_bias: [0.05, -0.01, 0.2, 0.1],
+ recurrent_to_input_weights: [],
+ recurrent_to_cell_weights: [
+ -0.3, 0.2, 0.1, -0.3, 0.8, -0.08, -0.2, 0.3, 0.8, -0.6, -0.1, 0.2
+ ],
+ recurrent_to_forget_weights: [
+ -0.5, -0.3, -0.5, -0.2, 0.6, 0.4, 0.9, 0.3, -0.1, 0.2, 0.5, 0.2
+ ],
+ recurrent_to_output_weights: [
+ 0.3, -0.1, 0.1, -0.2, -0.5, -0.7, -0.2, -0.6, -0.1, -0.4, -0.7, -0.2
+ ],
+ cell_to_input_weights: [],
+ cell_to_forget_weights: [-0.02, -0.15, -0.25, -0.03],
+ cell_to_output_weights: [0.1, -0.1, -0.5, 0.05],
+ projection_weights: [
+ -0.1, 0.2, 0.01, -0.2, 0.1, 0.5, 0.3, 0.08, 0.07, 0.2, -0.4, 0.2
+ ],
+ projection_bias: [],
+ input_layer_norm_weights: [],
+ forget_layer_norm_weights: [0.2, 0.2, 0.4, 0.3],
+ cell_layer_norm_weights: [0.7, 0.2, 0.3, 0.8],
+ output_layer_norm_weights: [0.6, 0.2, 0.2, 0.5]
+}
+
+test_inputs = [[0.7, 0.8, 0.1, 0.2, 0.3, 0.3, 0.2, 0.9, 0.8, 0.1],
+ [0.8, 0.1, 0.2, 0.4, 0.5, 0.1, 0.5, 0.2, 0.4, 0.2],
+ [0.2, 0.7, 0.7, 0.1, 0.7, 0.6, 0.9, 0.2, 0.5, 0.7]]
+golden_cell_states = [
+ [
+ -0.3510298, 0.4261035, 0.2146365, 0.2771652, -0.1885517, 0.3252200, 0.0203665, 0.4896766
+ ],
+ [
+ -0.5069088, 0.5386363, 0.1980069, 0.5355753, -0.3866257, 0.4749442, 0.1074765, 0.7124508
+ ],
+ [
+ -0.5736622, 0.5952501, 0.1292950, 0.7110270, -0.5323033, 0.5556133, 0.1800992, 0.7845056
+ ],
+]
+cell_states = [[0, 0, 0, 0, 0, 0, 0, 0]] + golden_cell_states[:2]
+
+golden_outputs = [
+ [0.02129706, 0.140816242, 0.0112733059, -0.0226350538, 0.0916948169, 0.0769175813],
+ [0.0132302344, 0.152308047, 0.0346313119, -0.0269966982, 0.149707705, 0.094149217],
+ [-0.0123688057, 0.165790111, 0.0893077999, -0.0103429332, 0.173016444, 0.0720508844],
+]
+output_states = [[0, 0, 0, 0, 0, 0]] + golden_outputs[:2]
+
+tests = zip(
+ test_inputs, output_states, cell_states, golden_cell_states, golden_outputs)
+
+for test_input, output_state, cell_state, golden_state, golden_output in tests:
+ cur_input = copy.deepcopy(input0)
+ cur_input[input] = test_input
+ cur_input[output_state_in] = output_state
+ cur_input[cell_state_in] = cell_state
+ cur_output = {
+ scratch_buffer: [0] * (n_batch * n_cell * 3),
+ cell_state_out: golden_state,
+ output_state_out: golden_output,
+ output: golden_output
+ }
+ Example((cur_input, cur_output), name="CifgPeepholeProjectionNoClippingLayerNormLstm")
diff --git a/tests/nnapi/specs/skip/V1_2/less_equal.mod.py b/tests/nnapi/specs/skip/V1_2/less_equal.mod.py
new file mode 100644
index 000000000..e57ca55c5
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/less_equal.mod.py
@@ -0,0 +1,99 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+def test(name, input0, input1, output0, input0_data, input1_data, output_data, do_variations=True):
+ model = Model().Operation("LESS_EQUAL", input0, input1).To(output0)
+ example = Example({
+ input0: input0_data,
+ input1: input1_data,
+ output0: output_data,
+ }, model=model, name=name)
+ if do_variations:
+ example.AddVariations("int32", "float16", "relaxed")
+
+test(
+ name="simple",
+ input0=Input("input0", "TENSOR_FLOAT32", "{3}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{3}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[5, 7, 10],
+ input1_data=[10, 7, 5],
+ output_data=[True, True, False],
+)
+
+test(
+ name="broadcast",
+ input0=Input("input0", "TENSOR_FLOAT32", "{2, 1}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{2}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{2, 2}"),
+ input0_data=[5, 10],
+ input1_data=[10, 5],
+ output_data=[True, True, True, False],
+)
+
+test(
+ name="quantized_different_scale",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [3], 1.0, 128)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 2.0, 128)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[129, 130, 131], # effectively 1, 2, 3
+ input1_data=[129], # effectively 2
+ output_data=[True, True, False],
+ do_variations=False,
+)
+
+test(
+ name="quantized_different_zero_point",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [3], 1.0, 128)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 1.0, 129)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[129, 130, 131], # effectively 1, 2, 3
+ input1_data=[131], # effectively 2
+ output_data=[True, True, False],
+ do_variations=False,
+)
+
+test(
+ name="quantized_overflow_second_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [1], 1.64771, 31)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 1.49725, 240)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[0],
+ input1_data=[200],
+ output_data=[False],
+ do_variations=False,
+)
+
+test(
+ name="quantized_overflow_first_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [1], 1.49725, 240)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 1.64771, 31)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[200],
+ input1_data=[0],
+ output_data=[True],
+ do_variations=False,
+)
+
+test(
+ name="boolean",
+ input0=Input("input0", "TENSOR_BOOL8", "{4}"),
+ input1=Input("input1", "TENSOR_BOOL8", "{4}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{4}"),
+ input0_data=[False, True, False, True],
+ input1_data=[False, False, True, True],
+ output_data=[True, False, True, True],
+ do_variations=False,
+)
diff --git a/tests/nnapi/specs/skip/V1_2/local_response_normalization_v1_2.mod.py b/tests/nnapi/specs/skip/V1_2/local_response_normalization_v1_2.mod.py
new file mode 100644
index 000000000..784ec9c06
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/local_response_normalization_v1_2.mod.py
@@ -0,0 +1,44 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+i = Input("op1", "TENSOR_FLOAT32", "{2, 2, 2, 6}") # input
+o = Output("op2", "TENSOR_FLOAT32", "{2, 2, 2, 6}") # output
+axis = Int32Scalar("axis", -1) # last axis
+input0 = {i: [-1.1, .6, .7, 1.2, -.7, .1] * 8}
+
+# TEST 1: radius = 20, bias = 0.0, alpha = 1.0, beta = 0.5
+Model("axis").Operation("LOCAL_RESPONSE_NORMALIZATION", i, 20, 0.0, 1.0, 0.5, axis).To(o)
+Example((input0, {
+ o: [-.55, .3, .35, .6, -.35, .05] * 8
+ })).AddRelaxed().AddAllDimsAndAxis(i, o, axis).AddVariations("float16")
+
+# TEST 2: radius = 20, bias = 9.0, alpha = 4.0, beta = 0.5
+Model("axis").Operation("LOCAL_RESPONSE_NORMALIZATION", i, 20, 9.0, 4.0, 0.5, axis).To(o)
+Example((input0, {
+ o: [-.22, .12, .14, .24, -.14, .02] * 8
+ })).AddRelaxed().AddAllDimsAndAxis(i, o, axis).AddVariations("float16")
+
+# TEST 4: radius = 2, bias = 9.0, alpha = 4.0, beta = 0.5
+Model("axis").Operation("LOCAL_RESPONSE_NORMALIZATION", i, 2, 9.0, 4.0, 0.5, axis).To(o)
+Example((input0, {
+ o: [-.26492569, .12510864, .14011213, .26726127, -.16178755, .0244266] * 8
+ })).AddRelaxed().AddAllDimsAndAxis(i, o, axis).AddVariations("float16")
+
+# TEST5: All dimensions other than 4, without axis parameter
+Model().Operation("LOCAL_RESPONSE_NORMALIZATION", i, 2, 9.0, 4.0, 0.5).To(o)
+Example((input0, {
+ o: [-.26492569, .12510864, .14011213, .26726127, -.16178755, .0244266] * 8
+ })).AddRelaxed().AddDims([1, 2, 3], i, o, includeDefault=False).AddVariations("float16")
diff --git a/tests/nnapi/specs/skip/V1_2/log.mod.py b/tests/nnapi/specs/skip/V1_2/log.mod.py
new file mode 100644
index 000000000..d93f6b144
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/log.mod.py
@@ -0,0 +1,27 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{1, 2, 3, 4, 5}")
+output0 = Output("output0", "TENSOR_FLOAT32", "{1, 2, 3, 4, 5}")
+model = Model().Operation("LOG", input0).To(output0)
+
+input_data = [(i + 1) / 10 for i in range(120)]
+output_data = [math.log(x) for x in input_data]
+
+Example({
+ input0: input_data,
+ output0: output_data,
+}).AddVariations("relaxed", "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/log_softmax.mod.py b/tests/nnapi/specs/skip/V1_2/log_softmax.mod.py
new file mode 100644
index 000000000..7e4a6774f
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/log_softmax.mod.py
@@ -0,0 +1,72 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Derived from tensorflow/lite/kernels/activations_test.cc
+
+def test(input0, output0, input_data, beta, axis, output_data):
+ model = Model().Operation("LOG_SOFTMAX", input0, beta, axis).To(output0)
+ Example({
+ input0: input_data,
+ output0: output_data,
+ }, model=model).AddVariations("relaxed", "float16")
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{1, 1, 1, 2, 4}"),
+ output0=Output("output0", "TENSOR_FLOAT32", "{1, 1, 1, 2, 4}"),
+ input_data=[0, -6, 2, 4,
+ 3, -2, 10, 1],
+ beta=1.0,
+ axis=4,
+ output_data=[-4.14297, -10.14297, -2.14297, -.142971,
+ -7.00104, -12.00104, -.00104087, -9.00104],
+)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{1, 1, 1, 4, 2}"),
+ output0=Output("output0", "TENSOR_FLOAT32", "{1, 1, 1, 4, 2}"),
+ input_data=[0, -6,
+ 2, 4,
+ 3, -2,
+ 10, 1],
+ beta=1.0,
+ axis=-1,
+ output_data=[-.00247565, -6.00247,
+ -2.12692, -.126928,
+ -.00671534, -5.00671,
+ -.000123374, -9.00012],
+)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{1, 1, 2, 4, 1}"),
+ output0=Output("output0", "TENSOR_FLOAT32", "{1, 1, 2, 4, 1}"),
+ input_data=[0, 2, 3, 10,
+ -6, 4, -2, 1],
+ beta=1.0,
+ axis=-3,
+ output_data=[-.00247565, -2.12692, -.00671534, -.000123374,
+ -6.00247, -.126928, -5.00671, -9.00012],
+)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{1, 1, 1, 2, 4}"),
+ output0=Output("output0", "TENSOR_FLOAT32", "{1, 1, 1, 2, 4}"),
+ input_data=[0, -.6, .2, .4,
+ .3, -.2, 1, .1],
+ beta=10.0,
+ axis=4,
+ output_data=[-4.14297, -10.14297, -2.14297, -.142971,
+ -7.00104, -12.00104, -.00104087, -9.00104],
+)
diff --git a/tests/nnapi/specs/skip/V1_2/logistic_v1_2.mod.py b/tests/nnapi/specs/skip/V1_2/logistic_v1_2.mod.py
new file mode 100644
index 000000000..fe91a814d
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/logistic_v1_2.mod.py
@@ -0,0 +1,94 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TEST 1
+i1 = Input("op1", "TENSOR_FLOAT16", "{1, 2, 2, 1}")
+i3 = Output("op3", "TENSOR_FLOAT16", "{1, 2, 2, 1}")
+model = Model().Operation("LOGISTIC", i1).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 2.0, 4.0, 8.0]}
+
+output0 = {i3: # output 0
+ [0.73105859756469727,
+ 0.88079702854156494,
+ 0.9820137619972229,
+ 0.99966466426849365]}
+
+# Instantiate an example
+Example((input0, output0))
+
+
+# TEST 2
+d0 = 2
+d1 = 32
+d2 = 40
+d3 = 2
+
+i0 = Input("input", "TENSOR_FLOAT16", "{%d, %d, %d, %d}" % (d0, d1, d2, d3))
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d, %d, %d}" % (d0, d1, d2, d3))
+model = Model().Operation("LOGISTIC", i0).To(output)
+
+# Example 1. Input in operand 0,
+rng = d0 * d1 * d2 * d3
+input_values = (lambda r = rng: [x * (x % 2 - .5) * 2 % 512 for x in range(r)])()
+input0 = {i0: input_values}
+output_values = [1. / (1. + math.exp(-x)) for x in input_values]
+output0 = {output: output_values}
+
+# Instantiate an example
+Example((input0, output0))
+
+
+# TEST 3: zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# LOGISTIC op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 1}") # out
+model = model.Operation("LOGISTIC", zero_sized).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ o3: ("TENSOR_QUANT8_ASYMM", 1.0 / 256, 0)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddVariations("relaxed", quant8, "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/lsh_projection_3_relaxed.mod.py b/tests/nnapi/specs/skip/V1_2/lsh_projection_3_relaxed.mod.py
new file mode 100644
index 000000000..de7cec111
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/lsh_projection_3_relaxed.mod.py
@@ -0,0 +1,42 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+num_input = 3
+num_hash = 4
+num_bits = 2
+
+model = Model()
+
+hhash = Parameter("hash", "TENSOR_FLOAT32", "{%d, %d}" % (num_hash, num_bits),
+ [0.123, 0.456, -0.321, -0.654, 1.234, 5.678, -4.321, -8.765])
+lookup = Input("lookup", "TENSOR_INT32", "{%d, %d}" % (num_input, num_bits))
+weight = Input("weight", "TENSOR_FLOAT32", "{%d}" % (num_input))
+type_param = Int32Scalar("type_param", 3) # SPARSE
+output = Output("output", "TENSOR_INT32", "{%d}" % (num_hash))
+model = model.Operation("LSH_PROJECTION", hhash, lookup, weight,
+ type_param).To(output)
+model = model.RelaxedExecution(True)
+
+# Omit weight, since this is a sparse projection, for which the optional weight
+# input should be left unset.
+input0 = {
+ lookup: [12345, 54321, 67890, 9876, -12345678, -87654321],
+ weight: [],
+}
+
+output0 = {output: [1, 6, 10, 12]}
+
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/lsh_projection_4_relaxed.mod.py b/tests/nnapi/specs/skip/V1_2/lsh_projection_4_relaxed.mod.py
new file mode 100644
index 000000000..2b3b33a1e
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/lsh_projection_4_relaxed.mod.py
@@ -0,0 +1,42 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+num_input = 3
+num_hash = 4
+num_bits = 2
+
+model = Model()
+
+hhash = Parameter("hash", "TENSOR_FLOAT32", "{%d, %d}" % (num_hash, num_bits),
+ [0.123, 0.456, -0.321, -0.654, 1.234, 5.678, -4.321, -8.765])
+lookup = Input("lookup", "TENSOR_INT32", "{%d, %d}" % (num_input, num_bits))
+weight = Input("weight", "TENSOR_FLOAT32", "{%d}" % (num_input))
+type_param = Int32Scalar("type_param", 1) # SPARSE DEPRECATED
+output = Output("output", "TENSOR_INT32", "{%d}" % (num_hash))
+model = model.Operation("LSH_PROJECTION", hhash, lookup, weight,
+ type_param).To(output)
+model = model.RelaxedExecution(True)
+
+# Omit weight, since this is a sparse projection, for which the optional weight
+# input should be left unset.
+input0 = {
+ lookup: [12345, 54321, 67890, 9876, -12345678, -87654321],
+ weight: [],
+}
+
+output0 = {output: [1, 2, 2, 0]}
+
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/lsh_projection_deprecated.mod.py b/tests/nnapi/specs/skip/V1_2/lsh_projection_deprecated.mod.py
new file mode 100644
index 000000000..2b3b33a1e
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/lsh_projection_deprecated.mod.py
@@ -0,0 +1,42 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+num_input = 3
+num_hash = 4
+num_bits = 2
+
+model = Model()
+
+hhash = Parameter("hash", "TENSOR_FLOAT32", "{%d, %d}" % (num_hash, num_bits),
+ [0.123, 0.456, -0.321, -0.654, 1.234, 5.678, -4.321, -8.765])
+lookup = Input("lookup", "TENSOR_INT32", "{%d, %d}" % (num_input, num_bits))
+weight = Input("weight", "TENSOR_FLOAT32", "{%d}" % (num_input))
+type_param = Int32Scalar("type_param", 1) # SPARSE DEPRECATED
+output = Output("output", "TENSOR_INT32", "{%d}" % (num_hash))
+model = model.Operation("LSH_PROJECTION", hhash, lookup, weight,
+ type_param).To(output)
+model = model.RelaxedExecution(True)
+
+# Omit weight, since this is a sparse projection, for which the optional weight
+# input should be left unset.
+input0 = {
+ lookup: [12345, 54321, 67890, 9876, -12345678, -87654321],
+ weight: [],
+}
+
+output0 = {output: [1, 2, 2, 0]}
+
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/lsh_projection_float16.mod.py b/tests/nnapi/specs/skip/V1_2/lsh_projection_float16.mod.py
new file mode 100644
index 000000000..ed19b17f7
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/lsh_projection_float16.mod.py
@@ -0,0 +1,39 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+num_input = 3
+num_hash = 4
+num_bits = 2
+
+model = Model()
+
+hhash = Parameter("hash", "TENSOR_FLOAT16", "{%d, %d}" % (num_hash, num_bits),
+ [0.123, 0.456, -0.321, -0.654, 1.234, 5.678, -4.321, -8.765])
+lookup = Input("lookup", "TENSOR_INT32", "{%d, %d}" % (num_input, num_bits))
+weight = Input("weight", "TENSOR_FLOAT16", "{%d}" % (num_input))
+type_param = Int32Scalar("type_param", 2) # DENSE
+output = Output("output", "TENSOR_INT32", "{%d}" % (num_hash * num_bits))
+model = model.Operation("LSH_PROJECTION", hhash, lookup, weight,
+ type_param).To(output)
+
+#TODO: weight should be a constant, too.
+input0 = {
+ lookup: [12345, 54321, 67890, 9876, -12345678, -87654321],
+ weight: [0.12, 0.34, 0.56]
+}
+output0 = {output: [1, 1, 1, 1, 1, 0, 0, 0]}
+
+Example((input0, output0)).AddVariations("float16");
diff --git a/tests/nnapi/specs/skip/V1_2/lstm2_float16.mod.py b/tests/nnapi/specs/skip/V1_2/lstm2_float16.mod.py
new file mode 100644
index 000000000..6ca648a6c
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/lstm2_float16.mod.py
@@ -0,0 +1,142 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# LSTM Test, With Cifg, With Peephole, No Projection, No Clipping.
+
+model = Model()
+
+n_batch = 1
+n_input = 2
+# n_cell and n_output have the same size when there is no projection.
+n_cell = 4
+n_output = 4
+
+input = Input("input", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_input))
+
+input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+
+recurrent_to_input_weights = Input("recurrent_to_intput_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_forget_weights = Input("recurrent_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_output_weights = Input("recurrent_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+
+cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT16", "{0}")
+cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT16", "{%d}" % (n_cell))
+cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT16", "{%d}" % (n_cell))
+
+input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+
+projection_weights = Input("projection_weights", "TENSOR_FLOAT16", "{0,0}")
+projection_bias = Input("projection_bias", "TENSOR_FLOAT16", "{0}")
+
+output_state_in = Input("output_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+cell_state_in = Input("cell_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
+
+activation_param = Int32Scalar("activation_param", 4) # Tanh
+cell_clip_param = Float16Scalar("cell_clip_param", 0.)
+proj_clip_param = Float16Scalar("proj_clip_param", 0.)
+
+scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell * 3))
+output_state_out = Output("output_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+cell_state_out = Output("cell_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+
+model = model.Operation("LSTM",
+ input,
+
+ input_to_input_weights,
+ input_to_forget_weights,
+ input_to_cell_weights,
+ input_to_output_weights,
+
+ recurrent_to_input_weights,
+ recurrent_to_forget_weights,
+ recurrent_to_cell_weights,
+ recurrent_to_output_weights,
+
+ cell_to_input_weights,
+ cell_to_forget_weights,
+ cell_to_output_weights,
+
+ input_gate_bias,
+ forget_gate_bias,
+ cell_gate_bias,
+ output_gate_bias,
+
+ projection_weights,
+ projection_bias,
+
+ output_state_in,
+ cell_state_in,
+
+ activation_param,
+ cell_clip_param,
+ proj_clip_param
+).To([scratch_buffer, output_state_out, cell_state_out, output])
+
+input0 = {input_to_input_weights:[],
+ input_to_cell_weights: [-0.49770179, -0.27711356, -0.09624726, 0.05100781, 0.04717243, 0.48944736, -0.38535351, -0.17212132],
+ input_to_forget_weights: [-0.55291498, -0.42866567, 0.13056988, -0.3633365, -0.22755712, 0.28253698, 0.24407166, 0.33826375],
+ input_to_output_weights: [0.10725588, -0.02335852, -0.55932593, -0.09426838, -0.44257352, 0.54939759, 0.01533556, 0.42751634],
+
+ input_gate_bias: [],
+ forget_gate_bias: [1.,1.,1.,1.],
+ cell_gate_bias: [0.,0.,0.,0.],
+ output_gate_bias: [0.,0.,0.,0.],
+
+ recurrent_to_input_weights: [],
+ recurrent_to_cell_weights: [
+ 0.54066205, -0.32668582, -0.43562764, -0.56094903, 0.42957711,
+ 0.01841056, -0.32764608, -0.33027974, -0.10826075, 0.20675004,
+ 0.19069612, -0.03026325, -0.54532051, 0.33003211, 0.44901288,
+ 0.21193194],
+
+ recurrent_to_forget_weights: [
+ -0.13832897, -0.0515101, -0.2359007, -0.16661474, -0.14340827,
+ 0.36986142, 0.23414481, 0.55899, 0.10798943, -0.41174671, 0.17751795,
+ -0.34484994, -0.35874045, -0.11352962, 0.27268326, 0.54058349],
+
+ recurrent_to_output_weights: [
+ 0.41613156, 0.42610586, -0.16495961, -0.5663873, 0.30579174, -0.05115908,
+ -0.33941799, 0.23364776, 0.11178309, 0.09481031, -0.26424935, 0.46261835,
+ 0.50248802, 0.26114327, -0.43736315, 0.33149987],
+
+ cell_to_input_weights: [],
+ cell_to_forget_weights: [0.47485286, -0.51955009, -0.24458408, 0.31544167],
+ cell_to_output_weights: [-0.17135078, 0.82760304, 0.85573703, -0.77109635],
+
+ projection_weights: [],
+ projection_bias: [],
+}
+
+output0 = {
+ scratch_buffer: [ 0 for x in range(n_batch * n_cell * 3) ],
+ cell_state_out: [ -0.760444, -0.0180416, 0.182264, -0.0649371 ],
+ output_state_out: [ -0.364445, -0.00352185, 0.128866, -0.0516365 ],
+}
+
+input0[input] = [2., 3.]
+input0[output_state_in] = [ 0 for _ in range(n_batch * n_output) ]
+input0[cell_state_in] = [ 0 for _ in range(n_batch * n_cell) ]
+output0[output] = [-0.36444446, -0.00352185, 0.12886585, -0.05163646]
+
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/lstm2_state2_float16.mod.py b/tests/nnapi/specs/skip/V1_2/lstm2_state2_float16.mod.py
new file mode 100644
index 000000000..e9a143417
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/lstm2_state2_float16.mod.py
@@ -0,0 +1,142 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# LSTM Test, With Cifg, With Peephole, No Projection, No Clipping.
+
+model = Model()
+
+n_batch = 1
+n_input = 2
+# n_cell and n_output have the same size when there is no projection.
+n_cell = 4
+n_output = 4
+
+input = Input("input", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_input))
+
+input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+
+recurrent_to_input_weights = Input("recurrent_to_intput_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_forget_weights = Input("recurrent_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_output_weights = Input("recurrent_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+
+cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT16", "{0}")
+cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT16", "{%d}" % (n_cell))
+cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT16", "{%d}" % (n_cell))
+
+input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+
+projection_weights = Input("projection_weights", "TENSOR_FLOAT16", "{0,0}")
+projection_bias = Input("projection_bias", "TENSOR_FLOAT16", "{0}")
+
+output_state_in = Input("output_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+cell_state_in = Input("cell_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
+
+activation_param = Int32Scalar("activation_param", 4) # Tanh
+cell_clip_param = Float16Scalar("cell_clip_param", 0.)
+proj_clip_param = Float16Scalar("proj_clip_param", 0.)
+
+scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell * 3))
+output_state_out = IgnoredOutput("output_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+cell_state_out = IgnoredOutput("cell_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+
+model = model.Operation("LSTM",
+ input,
+
+ input_to_input_weights,
+ input_to_forget_weights,
+ input_to_cell_weights,
+ input_to_output_weights,
+
+ recurrent_to_input_weights,
+ recurrent_to_forget_weights,
+ recurrent_to_cell_weights,
+ recurrent_to_output_weights,
+
+ cell_to_input_weights,
+ cell_to_forget_weights,
+ cell_to_output_weights,
+
+ input_gate_bias,
+ forget_gate_bias,
+ cell_gate_bias,
+ output_gate_bias,
+
+ projection_weights,
+ projection_bias,
+
+ output_state_in,
+ cell_state_in,
+
+ activation_param,
+ cell_clip_param,
+ proj_clip_param
+).To([scratch_buffer, output_state_out, cell_state_out, output])
+
+input0 = {input_to_input_weights:[],
+ input_to_cell_weights: [-0.49770179, -0.27711356, -0.09624726, 0.05100781, 0.04717243, 0.48944736, -0.38535351, -0.17212132],
+ input_to_forget_weights: [-0.55291498, -0.42866567, 0.13056988, -0.3633365, -0.22755712, 0.28253698, 0.24407166, 0.33826375],
+ input_to_output_weights: [0.10725588, -0.02335852, -0.55932593, -0.09426838, -0.44257352, 0.54939759, 0.01533556, 0.42751634],
+
+ input_gate_bias: [],
+ forget_gate_bias: [1.,1.,1.,1.],
+ cell_gate_bias: [0.,0.,0.,0.],
+ output_gate_bias: [0.,0.,0.,0.],
+
+ recurrent_to_input_weights: [],
+ recurrent_to_cell_weights: [
+ 0.54066205, -0.32668582, -0.43562764, -0.56094903, 0.42957711,
+ 0.01841056, -0.32764608, -0.33027974, -0.10826075, 0.20675004,
+ 0.19069612, -0.03026325, -0.54532051, 0.33003211, 0.44901288,
+ 0.21193194],
+
+ recurrent_to_forget_weights: [
+ -0.13832897, -0.0515101, -0.2359007, -0.16661474, -0.14340827,
+ 0.36986142, 0.23414481, 0.55899, 0.10798943, -0.41174671, 0.17751795,
+ -0.34484994, -0.35874045, -0.11352962, 0.27268326, 0.54058349],
+
+ recurrent_to_output_weights: [
+ 0.41613156, 0.42610586, -0.16495961, -0.5663873, 0.30579174, -0.05115908,
+ -0.33941799, 0.23364776, 0.11178309, 0.09481031, -0.26424935, 0.46261835,
+ 0.50248802, 0.26114327, -0.43736315, 0.33149987],
+
+ cell_to_input_weights: [],
+ cell_to_forget_weights: [0.47485286, -0.51955009, -0.24458408, 0.31544167],
+ cell_to_output_weights: [-0.17135078, 0.82760304, 0.85573703, -0.77109635],
+
+ projection_weights: [],
+ projection_bias: [],
+}
+
+output0 = {
+ scratch_buffer: [ 0 for x in range(n_batch * n_cell * 3) ],
+ cell_state_out: [ 0 for x in range(n_batch * n_cell) ],
+ output_state_out: [ 0 for x in range(n_batch * n_output) ],
+}
+
+input0[input] = [1., 1.]
+input0[output_state_in] = [-0.423122, -0.0121822, 0.24201, -0.0812458]
+input0[cell_state_in] = [-0.978419, -0.139203, 0.338163, -0.0983904]
+output0[output] = [-0.358325, -0.04621704, 0.21641694, -0.06471302]
+
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/lstm2_state_float16.mod.py b/tests/nnapi/specs/skip/V1_2/lstm2_state_float16.mod.py
new file mode 100644
index 000000000..4d1500161
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/lstm2_state_float16.mod.py
@@ -0,0 +1,141 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# LSTM Test, With Cifg, With Peephole, No Projection, No Clipping.
+
+model = Model()
+
+n_batch = 1
+n_input = 2
+# n_cell and n_output have the same size when there is no projection.
+n_cell = 4
+n_output = 4
+
+input = Input("input", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_input))
+
+input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+
+recurrent_to_input_weights = Input("recurrent_to_intput_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_forget_weights = Input("recurrent_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_output_weights = Input("recurrent_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+
+cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT16", "{0}")
+cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT16", "{%d}" % (n_cell))
+cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT16", "{%d}" % (n_cell))
+
+input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+
+projection_weights = Input("projection_weights", "TENSOR_FLOAT16", "{0,0}")
+projection_bias = Input("projection_bias", "TENSOR_FLOAT16", "{0}")
+
+output_state_in = Input("output_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+cell_state_in = Input("cell_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
+
+activation_param = Int32Scalar("activation_param", 4) # Tanh
+cell_clip_param = Float16Scalar("cell_clip_param", 0.)
+proj_clip_param = Float16Scalar("proj_clip_param", 0.)
+
+scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell * 3))
+output_state_out = Output("output_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+cell_state_out = Output("cell_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+
+model = model.Operation("LSTM",
+ input,
+
+ input_to_input_weights,
+ input_to_forget_weights,
+ input_to_cell_weights,
+ input_to_output_weights,
+
+ recurrent_to_input_weights,
+ recurrent_to_forget_weights,
+ recurrent_to_cell_weights,
+ recurrent_to_output_weights,
+
+ cell_to_input_weights,
+ cell_to_forget_weights,
+ cell_to_output_weights,
+
+ input_gate_bias,
+ forget_gate_bias,
+ cell_gate_bias,
+ output_gate_bias,
+
+ projection_weights,
+ projection_bias,
+
+ output_state_in,
+ cell_state_in,
+
+ activation_param,
+ cell_clip_param,
+ proj_clip_param
+).To([scratch_buffer, output_state_out, cell_state_out, output])
+
+input0 = {input_to_input_weights:[],
+ input_to_cell_weights: [-0.49770179, -0.27711356, -0.09624726, 0.05100781, 0.04717243, 0.48944736, -0.38535351, -0.17212132],
+ input_to_forget_weights: [-0.55291498, -0.42866567, 0.13056988, -0.3633365, -0.22755712, 0.28253698, 0.24407166, 0.33826375],
+ input_to_output_weights: [0.10725588, -0.02335852, -0.55932593, -0.09426838, -0.44257352, 0.54939759, 0.01533556, 0.42751634],
+
+ input_gate_bias: [],
+ forget_gate_bias: [1.,1.,1.,1.],
+ cell_gate_bias: [0.,0.,0.,0.],
+ output_gate_bias: [0.,0.,0.,0.],
+
+ recurrent_to_input_weights: [],
+ recurrent_to_cell_weights: [
+ 0.54066205, -0.32668582, -0.43562764, -0.56094903, 0.42957711,
+ 0.01841056, -0.32764608, -0.33027974, -0.10826075, 0.20675004,
+ 0.19069612, -0.03026325, -0.54532051, 0.33003211, 0.44901288,
+ 0.21193194],
+
+ recurrent_to_forget_weights: [
+ -0.13832897, -0.0515101, -0.2359007, -0.16661474, -0.14340827,
+ 0.36986142, 0.23414481, 0.55899, 0.10798943, -0.41174671, 0.17751795,
+ -0.34484994, -0.35874045, -0.11352962, 0.27268326, 0.54058349],
+
+ recurrent_to_output_weights: [
+ 0.41613156, 0.42610586, -0.16495961, -0.5663873, 0.30579174, -0.05115908,
+ -0.33941799, 0.23364776, 0.11178309, 0.09481031, -0.26424935, 0.46261835,
+ 0.50248802, 0.26114327, -0.43736315, 0.33149987],
+
+ cell_to_input_weights: [],
+ cell_to_forget_weights: [0.47485286, -0.51955009, -0.24458408, 0.31544167],
+ cell_to_output_weights: [-0.17135078, 0.82760304, 0.85573703, -0.77109635],
+
+ projection_weights: [],
+ projection_bias: [],
+}
+
+output0 = {
+ scratch_buffer: [ 0 for x in range(n_batch * n_cell * 3) ],
+ cell_state_out: [ -0.978419, -0.139203, 0.338163, -0.0983904 ],
+ output_state_out: [ -0.423122, -0.0121822, 0.24201, -0.0812458 ],
+}
+
+input0[input] = [3., 4.]
+input0[output_state_in] = [-0.364445, -0.00352185, 0.128866, -0.0516365]
+input0[cell_state_in] = [-0.760444, -0.0180416, 0.182264, -0.0649371]
+output0[output] = [-0.42312205, -0.01218222, 0.24201041, -0.08124574]
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/lstm3_float16.mod.py b/tests/nnapi/specs/skip/V1_2/lstm3_float16.mod.py
new file mode 100644
index 000000000..aee476303
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/lstm3_float16.mod.py
@@ -0,0 +1,662 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# LSTM Test, With Peephole, With Projection, No Clipping
+
+model = Model()
+
+n_batch = 2
+n_input = 5
+# n_cell and n_output have the same size when there is no projection.
+n_cell = 20
+n_output = 16
+
+input = Input("input", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_input))
+
+input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+
+recurrent_to_input_weights = Input("recurrent_to_intput_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_forget_weights = Input("recurrent_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_output_weights = Input("recurrent_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+
+cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT16", "{%d}" % (n_cell))
+cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT16", "{%d}" %(n_cell))
+cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT16", "{%d}" % (n_cell))
+
+input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+
+projection_weights = Input("projection_weights", "TENSOR_FLOAT16", "{%d,%d}" % (n_output, n_cell))
+projection_bias = Input("projection_bias", "TENSOR_FLOAT16", "{0}")
+
+output_state_in = Input("output_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+cell_state_in = Input("cell_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
+
+activation_param = Int32Scalar("activation_param", 4) # Tanh
+cell_clip_param = Float16Scalar("cell_clip_param", 0.)
+proj_clip_param = Float16Scalar("proj_clip_param", 0.)
+
+scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, (n_cell * 4)))
+output_state_out = Output("output_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+cell_state_out = Output("cell_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+
+# TODO: need support for more than one output
+model = model.Operation("LSTM",
+ input,
+
+ input_to_input_weights,
+ input_to_forget_weights,
+ input_to_cell_weights,
+ input_to_output_weights,
+
+ recurrent_to_input_weights,
+ recurrent_to_forget_weights,
+ recurrent_to_cell_weights,
+ recurrent_to_output_weights,
+
+ cell_to_input_weights,
+ cell_to_forget_weights,
+ cell_to_output_weights,
+
+ input_gate_bias,
+ forget_gate_bias,
+ cell_gate_bias,
+ output_gate_bias,
+
+ projection_weights,
+ projection_bias,
+
+ output_state_in,
+ cell_state_in,
+
+ activation_param,
+ cell_clip_param,
+ proj_clip_param
+).To([scratch_buffer, output_state_out, cell_state_out, output])
+
+input0 = {input_to_input_weights: [
+ 0.021393683, 0.06124551, 0.046905167, -0.014657677, -0.03149463,
+ 0.09171803, 0.14647801, 0.10797193, -0.0057968358, 0.0019193048,
+ -0.2726754, 0.10154029, -0.018539885, 0.080349885, -0.10262385,
+ -0.022599787, -0.09121155, -0.008675967, -0.045206103, -0.0821282,
+ -0.008045952, 0.015478081, 0.055217247, 0.038719587, 0.044153627,
+ -0.06453243, 0.05031825, -0.046935108, -0.008164439, 0.014574226,
+ -0.1671009, -0.15519552, -0.16819797, -0.13971269, -0.11953059,
+ 0.25005487, -0.22790983, 0.009855087, -0.028140958, -0.11200698,
+ 0.11295408, -0.0035217577, 0.054485075, 0.05184695, 0.064711206,
+ 0.10989193, 0.11674786, 0.03490607, 0.07727357, 0.11390585,
+ -0.1863375, -0.1034451, -0.13945189, -0.049401227, -0.18767063,
+ 0.042483903, 0.14233552, 0.13832581, 0.18350165, 0.14545603,
+ -0.028545704, 0.024939531, 0.050929718, 0.0076203286, -0.0029723682,
+ -0.042484224, -0.11827596, -0.09171104, -0.10808628, -0.16327988,
+ -0.2273378, -0.0993647, -0.017155107, 0.0023917493, 0.049272764,
+ 0.0038534778, 0.054764505, 0.089753784, 0.06947234, 0.08014476,
+ -0.04544234, -0.0497073, -0.07135631, -0.048929106, -0.004042012,
+ -0.009284026, 0.018042054, 0.0036860977, -0.07427302, -0.11434604,
+ -0.018995456, 0.031487543, 0.012834908, 0.019977754, 0.044256654,
+ -0.39292613, -0.18519334, -0.11651281, -0.06809892, 0.011373677],
+
+ input_to_forget_weights: [
+ -0.0018401089, -0.004852237, 0.03698424, 0.014181704, 0.028273236,
+ -0.016726194, -0.05249759, -0.10204261, 0.00861066, -0.040979505,
+ -0.009899187, 0.01923892, -0.028177269, -0.08535103, -0.14585495,
+ 0.10662567, -0.01909731, -0.017883534, -0.0047269356, -0.045103323,
+ 0.0030784295, 0.076784775, 0.07463696, 0.094531395, 0.0814421,
+ -0.12257899, -0.033945758, -0.031303465, 0.045630626, 0.06843887,
+ -0.13492945, -0.012480007, -0.0811829, -0.07224499, -0.09628791,
+ 0.045100946, 0.0012300825, 0.013964662, 0.099372394, 0.02543059,
+ 0.06958324, 0.034257296, 0.0482646, 0.06267997, 0.052625068,
+ 0.12784666, 0.07077897, 0.025725935, 0.04165009, 0.07241905,
+ 0.018668644, -0.037377294, -0.06277783, -0.08833636, -0.040120605,
+ -0.011405586, -0.007808335, -0.010301386, -0.005102167, 0.027717464,
+ 0.05483423, 0.11449111, 0.11289652, 0.10939839, 0.13396506,
+ -0.08402166, -0.01901462, -0.044678304, -0.07720565, 0.014350063,
+ -0.11757958, -0.0652038, -0.08185733, -0.076754324, -0.092614375,
+ 0.10405491, 0.052960336, 0.035755895, 0.035839386, -0.012540553,
+ 0.036881298, 0.02913376, 0.03420159, 0.05448447, -0.054523353,
+ 0.02582715, 0.02327355, -0.011857179, -0.0011980024, -0.034641717,
+ -0.026125094, -0.17582615, -0.15923657, -0.27486774, -0.0006143371,
+ 0.0001771948, -8.470171e-05, 0.02651807, 0.045790765, 0.06956496],
+
+ input_to_cell_weights: [
+ -0.04580283, -0.09549462, -0.032418985, -0.06454633,
+ -0.043528453, 0.043018587, -0.049152344, -0.12418144,
+ -0.078985475, -0.07596889, 0.019484362, -0.11434962,
+ -0.0074034138, -0.06314844, -0.092981495, 0.0062155537,
+ -0.025034338, -0.0028890965, 0.048929527, 0.06235075,
+ 0.10665918, -0.032036792, -0.08505916, -0.10843358,
+ -0.13002433, -0.036816437, -0.02130134, -0.016518239,
+ 0.0047691227, -0.0025825808, 0.066017866, 0.029991534,
+ -0.10652836, -0.1037554, -0.13056071, -0.03266643,
+ -0.033702414, -0.006473424, -0.04611692, 0.014419339,
+ -0.025174323, 0.0396852, 0.081777506, 0.06157468,
+ 0.10210095, -0.009658194, 0.046511717, 0.03603906,
+ 0.0069369148, 0.015960095, -0.06507666, 0.09551598,
+ 0.053568836, 0.06408714, 0.12835667, -0.008714329,
+ -0.20211966, -0.12093674, 0.029450472, 0.2849013,
+ -0.029227901, 0.1164364, -0.08560263, 0.09941786,
+ -0.036999565, -0.028842626, -0.0033637602, -0.017012902,
+ -0.09720865, -0.11193351, -0.029155117, -0.017936034,
+ -0.009768936, -0.04223324, -0.036159635, 0.06505112,
+ -0.021742892, -0.023377212, -0.07221364, -0.06430552,
+ 0.05453865, 0.091149814, 0.06387331, 0.007518393,
+ 0.055960953, 0.069779344, 0.046411168, 0.10509911,
+ 0.07463894, 0.0075130584, 0.012850982, 0.04555431,
+ 0.056955688, 0.06555285, 0.050801456, -0.009862683,
+ 0.00826772, -0.026555609, -0.0073611983, -0.0014897042],
+
+ input_to_output_weights: [
+ -0.0998932, -0.07201956, -0.052803773, -0.15629593, -0.15001918,
+ -0.07650751, 0.02359855, -0.075155355, -0.08037709, -0.15093534,
+ 0.029517552, -0.04751393, 0.010350531, -0.02664851, -0.016839722,
+ -0.023121163, 0.0077019283, 0.012851257, -0.05040649, -0.0129761,
+ -0.021737747, -0.038305793, -0.06870586, -0.01481247, -0.001285394,
+ 0.10124236, 0.083122835, 0.053313006, -0.062235646, -0.075637154,
+ -0.027833903, 0.029774971, 0.1130802, 0.09218906, 0.09506135,
+ -0.086665764, -0.037162706, -0.038880914, -0.035832845, -0.014481564,
+ -0.09825003, -0.12048569, -0.097665586, -0.05287633, -0.0964047,
+ -0.11366429, 0.035777505, 0.13568819, 0.052451383, 0.050649304,
+ 0.05798951, -0.021852335, -0.099848844, 0.014740475, -0.078897946,
+ 0.04974699, 0.014160473, 0.06973932, 0.04964942, 0.033364646,
+ 0.08190124, 0.025535367, 0.050893165, 0.048514254, 0.06945813,
+ -0.078907564, -0.06707616, -0.11844508, -0.09986688, -0.07509403,
+ 0.06263226, 0.14925587, 0.20188436, 0.12098451, 0.14639415,
+ 0.0015017595, -0.014267382, -0.03417257, 0.012711468, 0.0028300495,
+ -0.024758482, -0.05098548, -0.0821182, 0.014225672, 0.021544158,
+ 0.08949725, 0.07505268, -0.0020780868, 0.04908258, 0.06476295,
+ -0.022907063, 0.027562456, 0.040185735, 0.019567577, -0.015598739,
+ -0.049097303, -0.017121866, -0.083368234, -0.02332002, -0.0840956],
+
+ input_gate_bias: [
+ 0.02234832, 0.14757581, 0.18176508, 0.10380666, 0.053110216,
+ -0.06928846, -0.13942584, -0.11816189, 0.19483899, 0.03652339,
+ -0.10250295, 0.036714908, -0.18426876, 0.036065217, 0.21810818,
+ 0.02383196, -0.043370757, 0.08690144, -0.04444982, 0.00030581196],
+
+ forget_gate_bias: [
+ 0.035185695, -0.042891346, -0.03032477, 0.23027696,
+ 0.11098921, 0.15378423, 0.09263801, 0.09790885,
+ 0.09508917, 0.061199076, 0.07665568, -0.015443159,
+ -0.03499149, 0.046190713, 0.08895977, 0.10899629,
+ 0.40694186, 0.06030037, 0.012413437, -0.06108739],
+
+ cell_gate_bias: [
+ -0.024379363, 0.0055531194, 0.23377132, 0.033463873,
+ -0.1483596, -0.10639995, -0.091433935, 0.058573797,
+ -0.06809782, -0.07889636, -0.043246906, -0.09829136,
+ -0.4279842, 0.034901652, 0.18797937, 0.0075234566,
+ 0.016178843, 0.1749513, 0.13975595, 0.92058027],
+
+ output_gate_bias: [
+ 0.046159424, -0.0012809046, 0.03563469, 0.12648113, 0.027195795,
+ 0.35373217, -0.018957434, 0.008907322, -0.0762701, 0.12018895,
+ 0.04216877, 0.0022856654, 0.040952638, 0.3147856, 0.08225149,
+ -0.057416286, -0.14995944, -0.008040261, 0.13208859, 0.029760877],
+
+ recurrent_to_input_weights: [
+ -0.001374326, -0.078856036, 0.10672688, 0.029162422,
+ -0.11585556, 0.02557986, -0.13446963, -0.035785314,
+ -0.01244275, 0.025961924, -0.02337298, -0.044228926,
+ -0.055839065, -0.046598054, -0.010546039, -0.06900766,
+ 0.027239809, 0.022582639, -0.013296484, -0.05459212,
+ 0.08981, -0.045407712, 0.08682226, -0.06867011,
+ -0.14390695, -0.02916037, 0.000996957, 0.091420636,
+ 0.14283475, -0.07390571, -0.06402044, 0.062524505,
+ -0.093129106, 0.04860203, -0.08364217, -0.08119002,
+ 0.009352075, 0.22920375, 0.0016303885, 0.11583097,
+ -0.13732095, 0.012405723, -0.07551853, 0.06343048,
+ 0.12162708, -0.031923793, -0.014335606, 0.01790974,
+ -0.10650317, -0.0724401, 0.08554849, -0.05727212,
+ 0.06556731, -0.042729504, -0.043227166, 0.011683251,
+ -0.013082158, -0.029302018, -0.010899579, -0.062036745,
+ -0.022509435, -0.00964907, -0.01567329, 0.04260106,
+ -0.07787477, -0.11576462, 0.017356863, 0.048673786,
+ -0.017577527, -0.05527947, -0.082487635, -0.040137455,
+ -0.10820036, -0.04666372, 0.022746278, -0.07851417,
+ 0.01068115, 0.032956902, 0.022433773, 0.0026891115,
+ 0.08944216, -0.0685835, 0.010513544, 0.07228705,
+ 0.02032331, -0.059686817, -0.0005566496, -0.086984694,
+ 0.040414046, -0.1380399, 0.094208956, -0.05722982,
+ 0.012092817, -0.04989123, -0.086576, -0.003399834,
+ -0.04696032, -0.045747425, 0.10091314, 0.048676282,
+ -0.029037097, 0.031399418, -0.0040285117, 0.047237843,
+ 0.09504992, 0.041799378, -0.049185462, -0.031518843,
+ -0.10516937, 0.026374253, 0.10058866, -0.0033195973,
+ -0.041975245, 0.0073591834, 0.0033782164, -0.004325073,
+ -0.10167381, 0.042500053, -0.01447153, 0.06464186,
+ -0.017142897, 0.03312627, 0.009205989, 0.024138335,
+ -0.011337001, 0.035530265, -0.010912711, 0.0706555,
+ -0.005894094, 0.051841937, -0.1401738, -0.02351249,
+ 0.0365468, 0.07590991, 0.08838724, 0.021681072,
+ -0.10086113, 0.019608743, -0.06195883, 0.077335775,
+ 0.023646897, -0.095322326, 0.02233014, 0.09756986,
+ -0.048691444, -0.009579111, 0.07595467, 0.11480546,
+ -0.09801813, 0.019894179, 0.08502348, 0.004032281,
+ 0.037211012, 0.068537936, -0.048005626, -0.091520436,
+ -0.028379958, -0.01556313, 0.06554592, -0.045599163,
+ -0.01672207, -0.020169014, -0.011877351, -0.20212261,
+ 0.010889619, 0.0047078193, 0.038385306, 0.08540671,
+ -0.017140968, -0.0035865551, 0.016678626, 0.005633034,
+ 0.015963363, 0.00871737, 0.060130805, 0.028611384,
+ 0.10109069, -0.015060172, -0.07894427, 0.06401885,
+ 0.011584063, -0.024466386, 0.0047652307, -0.09041358,
+ 0.030737216, -0.0046374933, 0.14215417, -0.11823516,
+ 0.019899689, 0.006106124, -0.027092824, 0.0786356,
+ 0.05052217, -0.058925, -0.011402121, -0.024987547,
+ -0.0013661642, -0.06832946, -0.015667673, -0.1083353,
+ -0.00096863037, -0.06988685, -0.053350925, -0.027275559,
+ -0.033664223, -0.07978348, -0.025200296, -0.017207067,
+ -0.058403496, -0.055697463, 0.005798788, 0.12965427,
+ -0.062582195, 0.0013350133, -0.10482091, 0.0379771,
+ 0.072521195, -0.0029455067, -0.13797039, -0.03628521,
+ 0.013806405, -0.017858358, -0.01008298, -0.07700066,
+ -0.017081132, 0.019358726, 0.0027079724, 0.004635139,
+ 0.062634714, -0.02338735, -0.039547626, -0.02050681,
+ 0.03385117, -0.083611414, 0.002862572, -0.09421313,
+ 0.058618143, -0.08598433, 0.00972939, 0.023867095,
+ -0.053934585, -0.023203006, 0.07452513, -0.048767887,
+ -0.07314807, -0.056307215, -0.10433547, -0.06440842,
+ 0.04328182, 0.04389765, -0.020006588, -0.09076438,
+ -0.11652589, -0.021705797, 0.03345259, -0.010329105,
+ -0.025767034, 0.013057034, -0.07316461, -0.10145612,
+ 0.06358255, 0.18531723, 0.07759293, 0.12006465,
+ 0.1305557, 0.058638252, -0.03393652, 0.09622831,
+ -0.16253184, -2.4580743e-06, 0.079869635, -0.070196845,
+ -0.005644518, 0.06857898, -0.12598175, -0.035084512,
+ 0.03156317, -0.12794146, -0.031963028, 0.04692781,
+ 0.030070418, 0.0071660685, -0.095516115, -0.004643372,
+ 0.040170413, -0.062104587, -0.0037324072, 0.0554317,
+ 0.08184801, -0.019164372, 0.06791302, 0.034257166,
+ -0.10307039, 0.021943003, 0.046745934, 0.0790918,
+ -0.0265588, -0.007824208, 0.042546265, -0.00977924,
+ -0.0002440307, -0.017384544, -0.017990116, 0.12252321,
+ -0.014512694, -0.08251313, 0.08861942, 0.13589665,
+ 0.026351685, 0.012641483, 0.07466548, 0.044301085,
+ -0.045414884, -0.051112458, 0.03444247, -0.08502782,
+ -0.04106223, -0.028126027, 0.028473156, 0.10467447],
+
+ recurrent_to_forget_weights: [
+ -0.057784554, -0.026057621, -0.068447545, -0.022581743,
+ 0.14811787, 0.10826372, 0.09471067, 0.03987225,
+ -0.0039523416, 0.00030638507, 0.053185795, 0.10572994,
+ 0.08414449, -0.022036452, -0.00066928595, -0.09203576,
+ 0.032950465, -0.10985798, -0.023809856, 0.0021431844,
+ -0.02196096, -0.00326074, 0.00058621005, -0.074678116,
+ -0.06193199, 0.055729095, 0.03736828, 0.020123724,
+ 0.061878487, -0.04729229, 0.034919553, -0.07585433,
+ -0.04421272, -0.044019096, 0.085488975, 0.04058006,
+ -0.06890133, -0.030951202, -0.024628663, -0.07672815,
+ 0.034293607, 0.08556707, -0.05293577, -0.033561368,
+ -0.04899627, 0.0241671, 0.015736353, -0.095442444,
+ -0.029564252, 0.016493602, -0.035026584, 0.022337519,
+ -0.026871363, 0.004780428, 0.0077918363, -0.03601621,
+ 0.016435321, -0.03263031, -0.09543275, -0.047392778,
+ 0.013454138, 0.028934088, 0.01685226, -0.086110644,
+ -0.046250615, -0.01847454, 0.047608484, 0.07339695,
+ 0.034546845, -0.04881143, 0.009128804, -0.08802852,
+ 0.03761666, 0.008096139, -0.014454086, 0.014361001,
+ -0.023502491, -0.0011840804, -0.07607001, 0.001856849,
+ -0.06509276, -0.006021153, -0.08570962, -0.1451793,
+ 0.060212336, 0.055259194, 0.06974018, 0.049454916,
+ -0.027794661, -0.08077226, -0.016179763, 0.1169753,
+ 0.17213494, -0.0056326236, -0.053934924, -0.0124349,
+ -0.11520337, 0.05409887, 0.088759385, 0.0019655675,
+ 0.0042065294, 0.03881498, 0.019844765, 0.041858196,
+ -0.05695512, 0.047233116, 0.038937137, -0.06542224,
+ 0.014429736, -0.09719407, 0.13908425, -0.05379757,
+ 0.012321099, 0.082840554, -0.029899208, 0.044217527,
+ 0.059855383, 0.07711018, -0.045319796, 0.0948846,
+ -0.011724666, -0.0033288454, -0.033542685, -0.04764985,
+ -0.13873616, 0.040668588, 0.034832682, -0.015319203,
+ -0.018715994, 0.046002675, 0.0599172, -0.043107376,
+ 0.0294216, -0.002314414, -0.022424703, 0.0030315618,
+ 0.0014641669, 0.0029166266, -0.11878115, 0.013738511,
+ 0.12375372, -0.0006038222, 0.029104086, 0.087442465,
+ 0.052958444, 0.07558703, 0.04817258, 0.044462286,
+ -0.015213451, -0.08783778, -0.0561384, -0.003008196,
+ 0.047060397, -0.002058388, 0.03429439, -0.018839769,
+ 0.024734668, 0.024614193, -0.042046934, 0.09597743,
+ -0.0043254104, 0.04320769, 0.0064070094, -0.0019131786,
+ -0.02558259, -0.022822596, -0.023273505, -0.02464396,
+ -0.10991725, -0.006240552, 0.0074488563, 0.024044557,
+ 0.04383914, -0.046476185, 0.028658995, 0.060410924,
+ 0.050786525, 0.009452605, -0.0073054377, -0.024810238,
+ 0.0052906186, 0.0066939713, -0.0020913032, 0.014515517,
+ 0.015898481, 0.021362653, -0.030262267, 0.016587038,
+ -0.011442813, 0.041154444, -0.007631438, -0.03423484,
+ -0.010977775, 0.036152758, 0.0066366293, 0.11915515,
+ 0.02318443, -0.041350313, 0.021485701, -0.10906167,
+ -0.028218046, -0.00954771, 0.020531068, -0.11995105,
+ -0.03672871, 0.024019798, 0.014255957, -0.05221243,
+ -0.00661567, -0.04630967, 0.033188973, 0.10107534,
+ -0.014027541, 0.030796422, -0.10270911, -0.035999842,
+ 0.15443139, 0.07684145, 0.036571592, -0.035900835,
+ -0.0034699554, 0.06209149, 0.015920248, -0.031122351,
+ -0.03858649, 0.01849943, 0.13872518, 0.01503974,
+ 0.069941424, -0.06948533, -0.0088794185, 0.061282158,
+ -0.047401894, 0.03100163, -0.041533746, -0.10430945,
+ 0.044574402, -0.01425562, -0.024290353, 0.034563623,
+ 0.05866852, 0.023947537, -0.09445152, 0.035450947,
+ 0.02247216, -0.0042998926, 0.061146557, -0.10250651,
+ 0.020881841, -0.06747029, 0.10062043, -0.0023941975,
+ 0.03532124, -0.016341697, 0.09685456, -0.016764693,
+ 0.051808182, 0.05875331, -0.04536488, 0.001626336,
+ -0.028892258, -0.01048663, -0.009793449, -0.017093895,
+ 0.010987891, 0.02357273, -0.00010856845, 0.0099760275,
+ -0.001845119, -0.03551521, 0.0018358806, 0.05763657,
+ -0.01769146, 0.040995963, 0.02235177, -0.060430344,
+ 0.11475477, -0.023854522, 0.10071741, 0.0686208,
+ -0.014250481, 0.034261297, 0.047418304, 0.08562733,
+ -0.030519066, 0.0060542435, 0.014653856, -0.038836084,
+ 0.04096551, 0.032249358, -0.08355519, -0.026823482,
+ 0.056386515, -0.010401743, -0.028396193, 0.08507674,
+ 0.014410365, 0.020995233, 0.17040324, 0.11511526,
+ 0.02459721, 0.0066619175, 0.025853224, -0.023133837,
+ -0.081302024, 0.017264642, -0.009585969, 0.09491168,
+ -0.051313367, 0.054532815, -0.014298593, 0.10657464,
+ 0.007076659, 0.10964551, 0.0409152, 0.008275321,
+ -0.07283536, 0.07937492, 0.04192024, -0.1075027],
+
+ recurrent_to_cell_weights: [
+ -0.037322544, 0.018592842, 0.0056175636, -0.06253426,
+ 0.055647098, -0.05713207, -0.05626563, 0.005559383,
+ 0.03375411, -0.025757805, -0.088049285, 0.06017052,
+ -0.06570978, 0.007384076, 0.035123326, -0.07920549,
+ 0.053676967, 0.044480428, -0.07663568, 0.0071805613,
+ 0.08089997, 0.05143358, 0.038261272, 0.03339287,
+ -0.027673481, 0.044746667, 0.028349208, 0.020090483,
+ -0.019443132, -0.030755889, -0.0040000007, 0.04465846,
+ -0.021585021, 0.0031670958, 0.0053199246, -0.056117613,
+ -0.10893326, 0.076739706, -0.08509834, -0.027997585,
+ 0.037871376, 0.01449768, -0.09002357, -0.06111149,
+ -0.046195522, 0.0422062, -0.005683705, -0.1253618,
+ -0.012925729, -0.04890792, 0.06985068, 0.037654128,
+ 0.03398274, -0.004781977, 0.007032333, -0.031787455,
+ 0.010868644, -0.031489216, 0.09525667, 0.013939797,
+ 0.0058680447, 0.0167067, 0.02668468, -0.04797466,
+ -0.048885044, -0.12722108, 0.035304096, 0.06554885,
+ 0.00972396, -0.039238118, -0.05159735, -0.11329045,
+ 0.1613692, -0.03750952, 0.06529313, -0.071974665,
+ -0.11769596, 0.015524369, -0.0013754242, -0.12446318,
+ 0.02786344, -0.014179351, 0.005264273, 0.14376344,
+ 0.015983658, 0.03406988, -0.06939408, 0.040699873,
+ 0.02111075, 0.09669095, 0.041345075, -0.08316494,
+ -0.07684199, -0.045768797, 0.032298047, -0.041805092,
+ 0.0119405, 0.0061010392, 0.12652606, 0.0064572375,
+ -0.024950314, 0.11574242, 0.04508852, -0.04335324,
+ 0.06760663, -0.027437469, 0.07216407, 0.06977076,
+ -0.05438599, 0.034033038, -0.028602652, 0.05346137,
+ 0.043184172, -0.037189785, 0.10420091, 0.00882477,
+ -0.054019816, -0.074273005, -0.030617684, -0.0028467078,
+ 0.024302477, -0.0038869337, 0.005332455, 0.0013399826,
+ 0.04361412, -0.007001822, 0.09631092, -0.06702025,
+ -0.042049985, -0.035070654, -0.04103342, -0.10273396,
+ 0.0544271, 0.037184782, -0.13150354, -0.0058036847,
+ -0.008264958, 0.042035464, 0.05891794, 0.029673764,
+ 0.0063542654, 0.044788733, 0.054816857, 0.062257513,
+ -0.00093483756, 0.048938446, -0.004952862, -0.007730018,
+ -0.04043371, -0.017094059, 0.07229206, -0.023670016,
+ -0.052195564, -0.025616996, -0.01520939, 0.045104615,
+ -0.007376126, 0.003533447, 0.006570588, 0.056037236,
+ 0.12436656, 0.051817212, 0.028532185, -0.08686856,
+ 0.11868599, 0.07663395, -0.07323171, 0.03463402,
+ -0.050708205, -0.04458982, -0.11590894, 0.021273347,
+ 0.1251325, -0.15313013, -0.12224372, 0.17228661,
+ 0.023029093, 0.086124025, 0.006445803, -0.03496501,
+ 0.028332196, 0.04449512, -0.042436164, -0.026587414,
+ -0.006041347, -0.09292539, -0.05678812, 0.03897832,
+ 0.09465633, 0.008115513, -0.02171956, 0.08304309,
+ 0.071401566, 0.019622514, 0.032163795, -0.004167056,
+ 0.02295182, 0.030739572, 0.056506045, 0.004612461,
+ 0.06524936, 0.059999723, 0.046395954, -0.0045512207,
+ -0.1335546, -0.030136576, 0.11584653, -0.014678886,
+ 0.0020118146, -0.09688814, -0.0790206, 0.039770417,
+ -0.0329582, 0.07922767, 0.029322514, 0.026405897,
+ 0.04207835, -0.07073373, 0.063781224, 0.0859677,
+ -0.10925287, -0.07011058, 0.048005477, 0.03438226,
+ -0.09606514, -0.006669445, -0.043381985, 0.04240257,
+ -0.06955775, -0.06769346, 0.043903265, -0.026784198,
+ -0.017840602, 0.024307009, -0.040079936, -0.019946516,
+ 0.045318738, -0.12233574, 0.026170589, 0.0074471775,
+ 0.15978073, 0.10185836, 0.10298046, -0.015476589,
+ -0.039390966, -0.072174534, 0.0739445, -0.1211869,
+ -0.0347889, -0.07943156, 0.014809798, -0.12412325,
+ -0.0030663363, 0.039695457, 0.0647603, -0.08291318,
+ -0.018529687, -0.004423833, 0.0037507233, 0.084633216,
+ -0.01514876, -0.056505352, -0.012800942, -0.06994386,
+ 0.012962922, -0.031234352, 0.07029052, 0.016418684,
+ 0.03618972, 0.055686004, -0.08663945, -0.017404709,
+ -0.054761406, 0.029065743, 0.052404847, 0.020238016,
+ 0.0048197987, -0.0214882, 0.07078733, 0.013016777,
+ 0.06262858, 0.009184685, 0.020785125, -0.043904778,
+ -0.0270329, -0.03299152, -0.060088247, -0.015162964,
+ -0.001828936, 0.12642565, -0.056757294, 0.013586685,
+ 0.09232601, -0.035886683, 0.06000002, 0.05229691,
+ -0.052580316, -0.082029596, -0.010794592, 0.012947712,
+ -0.036429964, -0.085508935, -0.13127148, -0.017744139,
+ 0.031502828, 0.036232427, -0.031581745, 0.023051167,
+ -0.05325106, -0.03421577, 0.028793324, -0.034633752,
+ -0.009881397, -0.043551125, -0.018609839, 0.0019097115,
+ -0.008799762, 0.056595087, 0.0022273948, 0.055752404],
+
+ recurrent_to_output_weights: [
+ 0.025825322, -0.05813119, 0.09495884, -0.045984812, -0.01255415,
+ -0.0026479573, -0.08196161, -0.054914974, -0.0046604523, -0.029587349,
+ -0.044576716, -0.07480124, -0.082868785, 0.023254942, 0.027502948,
+ -0.0039728214, -0.08683098, -0.08116779, -0.014675607, -0.037924774,
+ -0.023314456, -0.007401714, -0.09255757, 0.029460307, -0.08829125,
+ -0.005139627, -0.08989442, -0.0555066, 0.13596267, -0.025062224,
+ -0.048351806, -0.03850004, 0.07266485, -0.022414139, 0.05940088,
+ 0.075114764, 0.09597592, -0.010211725, -0.0049794707, -0.011523867,
+ -0.025980417, 0.072999895, 0.11091378, -0.081685916, 0.014416728,
+ 0.043229222, 0.034178585, -0.07530371, 0.035837382, -0.085607,
+ -0.007721233, -0.03287832, -0.043848954, -0.06404588, -0.06632928,
+ -0.073643476, 0.008214239, -0.045984086, 0.039764922, 0.03474462,
+ 0.060612556, -0.080590084, 0.049127717, 0.04151091, -0.030063879,
+ 0.008801774, -0.023021035, -0.019558564, 0.05158114, -0.010947698,
+ -0.011825728, 0.0075720972, 0.0699727, -0.0039981045, 0.069350146,
+ 0.08799282, 0.016156472, 0.035502106, 0.11695009, 0.006217345,
+ 0.13392477, -0.037875112, 0.025745004, 0.08940699, -0.00924166,
+ 0.0046702605, -0.036598757, -0.08811812, 0.10522024, -0.032441203,
+ 0.008176899, -0.04454919, 0.07058152, 0.0067963637, 0.039206743,
+ 0.03259838, 0.03725492, -0.09515802, 0.013326398, -0.052055415,
+ -0.025676316, 0.03198509, -0.015951829, -0.058556724, 0.036879618,
+ 0.043357447, 0.028362012, -0.05908629, 0.0059240665, -0.04995891,
+ -0.019187413, 0.0276265, -0.01628143, 0.0025863599, 0.08800015,
+ 0.035250366, -0.022165963, -0.07328642, -0.009415526, -0.07455109,
+ 0.11690406, 0.0363299, 0.07411125, 0.042103454, -0.009660886,
+ 0.019076364, 0.018299393, -0.046004917, 0.08891175, 0.0431396,
+ -0.026327137, -0.051502608, 0.08979574, -0.051670972, 0.04940282,
+ -0.07491107, -0.021240504, 0.022596184, -0.034280192, 0.060163025,
+ -0.058211457, -0.051837247, -0.01349775, -0.04639988, -0.035936575,
+ -0.011681591, 0.064818054, 0.0073146066, -0.021745546, -0.043124277,
+ -0.06471268, -0.07053354, -0.029321948, -0.05330136, 0.016933719,
+ -0.053782392, 0.13747959, -0.1361751, -0.11569455, 0.0033329215,
+ 0.05693899, -0.053219706, 0.063698, 0.07977434, -0.07924483,
+ 0.06936997, 0.0034815092, -0.007305279, -0.037325785, -0.07251102,
+ -0.033633437, -0.08677009, 0.091591336, -0.14165086, 0.021752775,
+ 0.019683983, 0.0011612234, -0.058154266, 0.049996935, 0.0288841,
+ -0.0024567875, -0.14345716, 0.010955264, -0.10234828, 0.1183656,
+ -0.0010731248, -0.023590032, -0.072285876, -0.0724771, -0.026382286,
+ -0.0014920527, 0.042667855, 0.0018776858, 0.02986552, 0.009814309,
+ 0.0733756, 0.12289186, 0.018043943, -0.0458958, 0.049412545,
+ 0.033632483, 0.05495232, 0.036686596, -0.013781798, -0.010036754,
+ 0.02576849, -0.08307328, 0.010112348, 0.042521734, -0.05869831,
+ -0.071689695, 0.03876447, -0.13275425, -0.0352966, -0.023077697,
+ 0.10285965, 0.084736146, 0.15568255, -0.00040734606, 0.027835453,
+ -0.10292561, -0.032401145, 0.10053256, -0.026142767, -0.08271222,
+ -0.0030240538, -0.016368777, 0.1070414, 0.042672627, 0.013456989,
+ -0.0437609, -0.022309763, 0.11576483, 0.04108048, 0.061026827,
+ -0.0190714, -0.0869359, 0.037901703, 0.0610107, 0.07202949,
+ 0.01675338, 0.086139716, -0.08795751, -0.014898893, -0.023771819,
+ -0.01965048, 0.007955471, -0.043740474, 0.03346837, -0.10549954,
+ 0.090567775, 0.042013682, -0.03176985, 0.12569028, -0.02421228,
+ -0.029526481, 0.023851605, 0.031539805, 0.05292009, -0.02344001,
+ -0.07811758, -0.08834428, 0.10094801, 0.16594367, -0.06861939,
+ -0.021256343, -0.041093912, -0.06669611, 0.035498552, 0.021757556,
+ -0.09302526, -0.015403468, -0.06614931, -0.051798206, -0.013874718,
+ 0.03630673, 0.010412845, -0.08077351, 0.046185967, 0.0035662893,
+ 0.03541868, -0.094149634, -0.034814864, 0.003128424, -0.020674974,
+ -0.03944324, -0.008110165, -0.11113267, 0.08484226, 0.043586485,
+ 0.040582247, 0.0968012, -0.065249965, -0.028036479, 0.0050708856,
+ 0.0017462453, 0.0326779, 0.041296225, 0.09164146, -0.047743853,
+ -0.015952192, -0.034451712, 0.084197424, -0.05347844, -0.11768019,
+ 0.085926116, -0.08251791, -0.045081906, 0.0948852, 0.068401024,
+ 0.024856757, 0.06978981, -0.057309967, -0.012775832, -0.0032452994,
+ 0.01977615, -0.041040014, -0.024264973, 0.063464895, 0.05431621],
+
+ cell_to_input_weights: [
+ 0.040369894, 0.030746894, 0.24704495, 0.018586371, -0.037586458,
+ -0.15312155, -0.11812848, -0.11465643, 0.20259799, 0.11418174,
+ -0.10116027, -0.011334949, 0.12411352, -0.076769054, -0.052169047,
+ 0.21198851, -0.38871562, -0.09061183, -0.09683246, -0.21929175],
+
+ cell_to_forget_weights: [
+ -0.01998659, -0.15568835, -0.24248174, -0.012770197, 0.041331276,
+ -0.072311886, -0.052123554, -0.0066330447, -0.043891653, 0.036225766,
+ -0.047248036, 0.021479502, 0.033189066, 0.11952997, -0.020432774,
+ 0.64658105, -0.06650122, -0.03467612, 0.095340036, 0.23647355],
+
+ cell_to_output_weights: [
+ 0.08286371, -0.08261836, -0.51210177, 0.002913762, 0.17764764,
+ -0.5495371, -0.08460716, -0.24552552, 0.030037103, 0.04123544,
+ -0.11940523, 0.007358328, 0.1890978, 0.4833202, -0.34441817,
+ 0.36312827, -0.26375428, 0.1457655, -0.19724406, 0.15548733],
+
+ projection_weights: [
+ -0.009802181, 0.09401916, 0.0717386, -0.13895074, 0.09641832,
+ 0.060420845, 0.08539281, 0.054285463, 0.061395317, 0.034448683,
+ -0.042991187, 0.019801661, -0.16840284, -0.015726732, -0.23041931,
+ -0.024478018, -0.10959692, -0.013875541, 0.18600968, -0.061274476,
+ 0.0138165, -0.08160894, -0.07661644, 0.032372914, 0.16169067,
+ 0.22465782, -0.03993472, -0.004017731, 0.08633481, -0.28869787,
+ 0.08682067, 0.17240396, 0.014975425, 0.056431185, 0.031037588,
+ 0.16702051, 0.0077946745, 0.15140012, 0.29405436, 0.120285,
+ -0.188994, -0.027265169, 0.043389652, -0.022061434, 0.014777949,
+ -0.20203483, 0.094781205, 0.19100232, 0.13987629, -0.036132768,
+ -0.06426278, -0.05108664, 0.13221376, 0.009441198, -0.16715929,
+ 0.15859416, -0.040437475, 0.050779544, -0.022187516, 0.012166504,
+ 0.027685808, -0.07675938, -0.0055694645, -0.09444123, 0.0046453946,
+ 0.050794356, 0.10770313, -0.20790008, -0.07149004, -0.11425117,
+ 0.008225835, -0.035802525, 0.14374903, 0.15262283, 0.048710253,
+ 0.1847461, -0.007487823, 0.11000021, -0.09542012, 0.22619456,
+ -0.029149994, 0.08527916, 0.009043713, 0.0042746216, 0.016261552,
+ 0.022461696, 0.12689082, -0.043589946, -0.12035478, -0.08361797,
+ -0.050666027, -0.1248618, -0.1275799, -0.071875185, 0.07377272,
+ 0.09944291, -0.18897448, -0.1593054, -0.06526116, -0.040107165,
+ -0.004618631, -0.067624845, -0.007576253, 0.10727444, 0.041546922,
+ -0.20424393, 0.06907816, 0.050412357, 0.00724631, 0.039827548,
+ 0.12449835, 0.10747581, 0.13708383, 0.09134148, -0.12617786,
+ -0.06428341, 0.09956831, 0.1208086, -0.14676677, -0.0727722,
+ 0.1126304, 0.010139365, 0.015571211, -0.038128063, 0.022913318,
+ -0.042050496, 0.16842307, -0.060597885, 0.10531834, -0.06411776,
+ -0.07451711, -0.03410368, -0.13393489, 0.06534304, 0.003620307,
+ 0.04490757, 0.05970546, 0.05197996, 0.02839995, 0.10434969,
+ -0.013699693, -0.028353551, -0.07260381, 0.047201227, -0.024575593,
+ -0.036445823, 0.07155557, 0.009672501, -0.02328883, 0.009533515,
+ -0.03606021, -0.07421458, -0.028082801, -0.2678904, -0.13221288,
+ 0.18419984, -0.13012612, -0.014588381, -0.035059117, -0.04824723,
+ 0.07830115, -0.056184657, 0.03277091, 0.025466874, 0.14494097,
+ -0.12522776, -0.098633975, -0.10766018, -0.08317623, 0.08594209,
+ 0.07749552, 0.039474737, 0.1776665, -0.07409566, -0.0477268,
+ 0.29323658, 0.10801441, 0.1154011, 0.013952499, 0.10739139,
+ 0.10708251, -0.051456142, 0.0074137426, -0.10430189, 0.10034707,
+ 0.045594677, 0.0635285, -0.0715442, -0.089667566, -0.10811871,
+ 0.00026344223, 0.08298446, -0.009525053, 0.006585689, -0.24567553,
+ -0.09450807, 0.09648481, 0.026996298, -0.06419476, -0.04752702,
+ -0.11063944, -0.23441927, -0.17608605, -0.052156363, 0.067035615,
+ 0.19271925, -0.0032889997, -0.043264326, 0.09663576, -0.057112187,
+ -0.10100678, 0.0628376, 0.04447668, 0.017961001, -0.10094388,
+ -0.10190601, 0.18335468, 0.10494553, -0.052095775, -0.0026118709,
+ 0.10539724, -0.04383912, -0.042349473, 0.08438151, -0.1947263,
+ 0.02251204, 0.11216432, -0.10307853, 0.17351969, -0.039091777,
+ 0.08066188, -0.00561982, 0.12633002, 0.11335965, -0.0088127935,
+ -0.019777594, 0.06864014, -0.059751723, 0.016233567, -0.06894641,
+ -0.28651384, -0.004228674, 0.019708522, -0.16305895, -0.07468996,
+ -0.0855457, 0.099339016, -0.07580735, -0.13775392, 0.08434318,
+ 0.08330512, -0.12131499, 0.031935584, 0.09180414, -0.08876437,
+ -0.08049874, 0.008753825, 0.03498998, 0.030215185, 0.03907079,
+ 0.089751154, 0.029194152, -0.03337423, -0.019092513, 0.04331237,
+ 0.04299654, -0.036394123, -0.12915532, 0.09793732, 0.07512415,
+ -0.11319543, -0.032502122, 0.15661901, 0.07671967, -0.005491124,
+ -0.19379048, -0.218606, 0.21448623, 0.017840758, 0.1416943,
+ -0.07051762, 0.19488361, 0.02664691, -0.18104725, -0.09334311,
+ 0.15026465, -0.15493552, -0.057762887, -0.11604192, -0.262013,
+ -0.01391798, 0.012185008, 0.11156489, -0.07483202, 0.06693364,
+ -0.26151478, 0.046425626, 0.036540434, -0.16435726, 0.17338543,
+ -0.21401681, -0.11385144, -0.08283257, -0.069031075, 0.030635102,
+ 0.010969227, 0.11109743, 0.010919218, 0.027526086, 0.13519906,
+ 0.01891392, -0.046839405, -0.040167913, 0.017953383, -0.09700955,
+ 0.0061885654, -0.07000971, 0.026893595, -0.038844477, 0.14543656],
+
+ projection_bias: [],
+}
+
+# Batch0: 4 (input_sequence_size) * 5 (n_input)
+input0[input] = [0.787926, 0.151646, 0.071352, 0.118426, 0.458058]
+# Batch1: 4 (input_sequence_size) * 5 (n_input)
+input0[input].extend(
+ [0.295743, 0.544053, 0.690064, 0.858138, 0.497181],
+)
+input0[cell_state_in] = [ 0 for _ in range(n_batch * n_cell) ]
+input0[output_state_in] = [ 0 for _ in range(n_batch * n_output) ]
+output0 = {
+ scratch_buffer: [ 0 for x in range(n_batch * n_cell * 4) ],
+ cell_state_out: [
+ -0.0531632, -0.0118138, 0.0870833, 0.0347929,
+ -0.076144, -0.0659219, -0.0463811, 0.0141307,
+ -0.0127706, -0.03782, -0.00402401, -0.00571876,
+ -0.187957, -0.0247127, 0.0711425, 0.008244,
+ 0.0492649, 0.126972, 0.0933097, 0.29848,
+ -0.0966178, -0.114417, 0.0387229, 0.0453255,
+ -0.181286, -0.0651251, -0.0996879, -0.00276995,
+ 0.0617558, -0.0100728, 0.056304, -0.077416,
+ -0.162858, -0.0541251, 0.0571202, -0.0525331,
+ 0.0724297, 0.171029, 0.141738, 0.295483,
+ ],
+ output_state_out: [
+ -0.00396806, 0.029352, -0.00279226, 0.0159977,
+ -0.00835577, -0.0211779, 0.0283512, -0.0114597,
+ 0.00907307, -0.0244004, -0.0152191, -0.0259063,
+ 0.00914318, 0.00415119, 0.017147, 0.0134203,
+ -0.013869, 0.0287268, -0.00334694, 0.00733397,
+ -0.0287926, -0.0186926, 0.0193662, -0.0115437,
+ 0.00422612, -0.0345232, 0.00223253, -0.00957321,
+ 0.0210624, 0.013331, 0.0150954, 0.0216801
+ ],
+}
+# Batch0: 4 (input_sequence_size) * 16 (n_output)
+output0[output] = [
+ -0.00396806, 0.029352, -0.00279226, 0.0159977, -0.00835576,
+ -0.0211779, 0.0283512, -0.0114597, 0.00907307, -0.0244004,
+ -0.0152191, -0.0259063, 0.00914318, 0.00415118, 0.017147,
+ 0.0134203]
+# Batch1: 4 (input_sequence_size) * 16 (n_output)
+output0[output].extend(
+ [-0.013869, 0.0287268, -0.00334693, 0.00733398, -0.0287926,
+ -0.0186926, 0.0193662, -0.0115437, 0.00422612, -0.0345232,
+ 0.00223253, -0.00957321, 0.0210624, 0.013331, 0.0150954,
+ 0.02168],
+ )
+
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/lstm3_state2_float16.mod.py b/tests/nnapi/specs/skip/V1_2/lstm3_state2_float16.mod.py
new file mode 100644
index 000000000..494c12aa5
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/lstm3_state2_float16.mod.py
@@ -0,0 +1,683 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# LSTM Test, With Peephole, With Projection, No Clipping
+
+model = Model()
+
+n_batch = 2
+n_input = 5
+# n_cell and n_output have the same size when there is no projection.
+n_cell = 20
+n_output = 16
+
+input = Input("input", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_input))
+
+input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+
+recurrent_to_input_weights = Input("recurrent_to_intput_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_forget_weights = Input("recurrent_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_output_weights = Input("recurrent_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+
+cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT16", "{%d}" % (n_cell))
+cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT16", "{%d}" %(n_cell))
+cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT16", "{%d}" % (n_cell))
+
+input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+
+projection_weights = Input("projection_weights", "TENSOR_FLOAT16", "{%d,%d}" % (n_output, n_cell))
+projection_bias = Input("projection_bias", "TENSOR_FLOAT16", "{0}")
+
+output_state_in = Input("output_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+cell_state_in = Input("cell_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
+
+activation_param = Int32Scalar("activation_param", 4) # Tanh
+cell_clip_param = Float16Scalar("cell_clip_param", 0.)
+proj_clip_param = Float16Scalar("proj_clip_param", 0.)
+
+scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, (n_cell * 4)))
+output_state_out = Output("output_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+cell_state_out = Output("cell_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+
+# TODO: need support for more than one output
+model = model.Operation("LSTM",
+ input,
+
+ input_to_input_weights,
+ input_to_forget_weights,
+ input_to_cell_weights,
+ input_to_output_weights,
+
+ recurrent_to_input_weights,
+ recurrent_to_forget_weights,
+ recurrent_to_cell_weights,
+ recurrent_to_output_weights,
+
+ cell_to_input_weights,
+ cell_to_forget_weights,
+ cell_to_output_weights,
+
+ input_gate_bias,
+ forget_gate_bias,
+ cell_gate_bias,
+ output_gate_bias,
+
+ projection_weights,
+ projection_bias,
+
+ output_state_in,
+ cell_state_in,
+
+ activation_param,
+ cell_clip_param,
+ proj_clip_param
+).To([scratch_buffer, output_state_out, cell_state_out, output])
+
+input0 = {input_to_input_weights: [
+ 0.021393683, 0.06124551, 0.046905167, -0.014657677, -0.03149463,
+ 0.09171803, 0.14647801, 0.10797193, -0.0057968358, 0.0019193048,
+ -0.2726754, 0.10154029, -0.018539885, 0.080349885, -0.10262385,
+ -0.022599787, -0.09121155, -0.008675967, -0.045206103, -0.0821282,
+ -0.008045952, 0.015478081, 0.055217247, 0.038719587, 0.044153627,
+ -0.06453243, 0.05031825, -0.046935108, -0.008164439, 0.014574226,
+ -0.1671009, -0.15519552, -0.16819797, -0.13971269, -0.11953059,
+ 0.25005487, -0.22790983, 0.009855087, -0.028140958, -0.11200698,
+ 0.11295408, -0.0035217577, 0.054485075, 0.05184695, 0.064711206,
+ 0.10989193, 0.11674786, 0.03490607, 0.07727357, 0.11390585,
+ -0.1863375, -0.1034451, -0.13945189, -0.049401227, -0.18767063,
+ 0.042483903, 0.14233552, 0.13832581, 0.18350165, 0.14545603,
+ -0.028545704, 0.024939531, 0.050929718, 0.0076203286, -0.0029723682,
+ -0.042484224, -0.11827596, -0.09171104, -0.10808628, -0.16327988,
+ -0.2273378, -0.0993647, -0.017155107, 0.0023917493, 0.049272764,
+ 0.0038534778, 0.054764505, 0.089753784, 0.06947234, 0.08014476,
+ -0.04544234, -0.0497073, -0.07135631, -0.048929106, -0.004042012,
+ -0.009284026, 0.018042054, 0.0036860977, -0.07427302, -0.11434604,
+ -0.018995456, 0.031487543, 0.012834908, 0.019977754, 0.044256654,
+ -0.39292613, -0.18519334, -0.11651281, -0.06809892, 0.011373677],
+
+ input_to_forget_weights: [
+ -0.0018401089, -0.004852237, 0.03698424, 0.014181704, 0.028273236,
+ -0.016726194, -0.05249759, -0.10204261, 0.00861066, -0.040979505,
+ -0.009899187, 0.01923892, -0.028177269, -0.08535103, -0.14585495,
+ 0.10662567, -0.01909731, -0.017883534, -0.0047269356, -0.045103323,
+ 0.0030784295, 0.076784775, 0.07463696, 0.094531395, 0.0814421,
+ -0.12257899, -0.033945758, -0.031303465, 0.045630626, 0.06843887,
+ -0.13492945, -0.012480007, -0.0811829, -0.07224499, -0.09628791,
+ 0.045100946, 0.0012300825, 0.013964662, 0.099372394, 0.02543059,
+ 0.06958324, 0.034257296, 0.0482646, 0.06267997, 0.052625068,
+ 0.12784666, 0.07077897, 0.025725935, 0.04165009, 0.07241905,
+ 0.018668644, -0.037377294, -0.06277783, -0.08833636, -0.040120605,
+ -0.011405586, -0.007808335, -0.010301386, -0.005102167, 0.027717464,
+ 0.05483423, 0.11449111, 0.11289652, 0.10939839, 0.13396506,
+ -0.08402166, -0.01901462, -0.044678304, -0.07720565, 0.014350063,
+ -0.11757958, -0.0652038, -0.08185733, -0.076754324, -0.092614375,
+ 0.10405491, 0.052960336, 0.035755895, 0.035839386, -0.012540553,
+ 0.036881298, 0.02913376, 0.03420159, 0.05448447, -0.054523353,
+ 0.02582715, 0.02327355, -0.011857179, -0.0011980024, -0.034641717,
+ -0.026125094, -0.17582615, -0.15923657, -0.27486774, -0.0006143371,
+ 0.0001771948, -8.470171e-05, 0.02651807, 0.045790765, 0.06956496],
+
+ input_to_cell_weights: [
+ -0.04580283, -0.09549462, -0.032418985, -0.06454633,
+ -0.043528453, 0.043018587, -0.049152344, -0.12418144,
+ -0.078985475, -0.07596889, 0.019484362, -0.11434962,
+ -0.0074034138, -0.06314844, -0.092981495, 0.0062155537,
+ -0.025034338, -0.0028890965, 0.048929527, 0.06235075,
+ 0.10665918, -0.032036792, -0.08505916, -0.10843358,
+ -0.13002433, -0.036816437, -0.02130134, -0.016518239,
+ 0.0047691227, -0.0025825808, 0.066017866, 0.029991534,
+ -0.10652836, -0.1037554, -0.13056071, -0.03266643,
+ -0.033702414, -0.006473424, -0.04611692, 0.014419339,
+ -0.025174323, 0.0396852, 0.081777506, 0.06157468,
+ 0.10210095, -0.009658194, 0.046511717, 0.03603906,
+ 0.0069369148, 0.015960095, -0.06507666, 0.09551598,
+ 0.053568836, 0.06408714, 0.12835667, -0.008714329,
+ -0.20211966, -0.12093674, 0.029450472, 0.2849013,
+ -0.029227901, 0.1164364, -0.08560263, 0.09941786,
+ -0.036999565, -0.028842626, -0.0033637602, -0.017012902,
+ -0.09720865, -0.11193351, -0.029155117, -0.017936034,
+ -0.009768936, -0.04223324, -0.036159635, 0.06505112,
+ -0.021742892, -0.023377212, -0.07221364, -0.06430552,
+ 0.05453865, 0.091149814, 0.06387331, 0.007518393,
+ 0.055960953, 0.069779344, 0.046411168, 0.10509911,
+ 0.07463894, 0.0075130584, 0.012850982, 0.04555431,
+ 0.056955688, 0.06555285, 0.050801456, -0.009862683,
+ 0.00826772, -0.026555609, -0.0073611983, -0.0014897042],
+
+ input_to_output_weights: [
+ -0.0998932, -0.07201956, -0.052803773, -0.15629593, -0.15001918,
+ -0.07650751, 0.02359855, -0.075155355, -0.08037709, -0.15093534,
+ 0.029517552, -0.04751393, 0.010350531, -0.02664851, -0.016839722,
+ -0.023121163, 0.0077019283, 0.012851257, -0.05040649, -0.0129761,
+ -0.021737747, -0.038305793, -0.06870586, -0.01481247, -0.001285394,
+ 0.10124236, 0.083122835, 0.053313006, -0.062235646, -0.075637154,
+ -0.027833903, 0.029774971, 0.1130802, 0.09218906, 0.09506135,
+ -0.086665764, -0.037162706, -0.038880914, -0.035832845, -0.014481564,
+ -0.09825003, -0.12048569, -0.097665586, -0.05287633, -0.0964047,
+ -0.11366429, 0.035777505, 0.13568819, 0.052451383, 0.050649304,
+ 0.05798951, -0.021852335, -0.099848844, 0.014740475, -0.078897946,
+ 0.04974699, 0.014160473, 0.06973932, 0.04964942, 0.033364646,
+ 0.08190124, 0.025535367, 0.050893165, 0.048514254, 0.06945813,
+ -0.078907564, -0.06707616, -0.11844508, -0.09986688, -0.07509403,
+ 0.06263226, 0.14925587, 0.20188436, 0.12098451, 0.14639415,
+ 0.0015017595, -0.014267382, -0.03417257, 0.012711468, 0.0028300495,
+ -0.024758482, -0.05098548, -0.0821182, 0.014225672, 0.021544158,
+ 0.08949725, 0.07505268, -0.0020780868, 0.04908258, 0.06476295,
+ -0.022907063, 0.027562456, 0.040185735, 0.019567577, -0.015598739,
+ -0.049097303, -0.017121866, -0.083368234, -0.02332002, -0.0840956],
+
+ input_gate_bias: [
+ 0.02234832, 0.14757581, 0.18176508, 0.10380666, 0.053110216,
+ -0.06928846, -0.13942584, -0.11816189, 0.19483899, 0.03652339,
+ -0.10250295, 0.036714908, -0.18426876, 0.036065217, 0.21810818,
+ 0.02383196, -0.043370757, 0.08690144, -0.04444982, 0.00030581196],
+
+ forget_gate_bias: [
+ 0.035185695, -0.042891346, -0.03032477, 0.23027696,
+ 0.11098921, 0.15378423, 0.09263801, 0.09790885,
+ 0.09508917, 0.061199076, 0.07665568, -0.015443159,
+ -0.03499149, 0.046190713, 0.08895977, 0.10899629,
+ 0.40694186, 0.06030037, 0.012413437, -0.06108739],
+
+ cell_gate_bias: [
+ -0.024379363, 0.0055531194, 0.23377132, 0.033463873,
+ -0.1483596, -0.10639995, -0.091433935, 0.058573797,
+ -0.06809782, -0.07889636, -0.043246906, -0.09829136,
+ -0.4279842, 0.034901652, 0.18797937, 0.0075234566,
+ 0.016178843, 0.1749513, 0.13975595, 0.92058027],
+
+ output_gate_bias: [
+ 0.046159424, -0.0012809046, 0.03563469, 0.12648113, 0.027195795,
+ 0.35373217, -0.018957434, 0.008907322, -0.0762701, 0.12018895,
+ 0.04216877, 0.0022856654, 0.040952638, 0.3147856, 0.08225149,
+ -0.057416286, -0.14995944, -0.008040261, 0.13208859, 0.029760877],
+
+ recurrent_to_input_weights: [
+ -0.001374326, -0.078856036, 0.10672688, 0.029162422,
+ -0.11585556, 0.02557986, -0.13446963, -0.035785314,
+ -0.01244275, 0.025961924, -0.02337298, -0.044228926,
+ -0.055839065, -0.046598054, -0.010546039, -0.06900766,
+ 0.027239809, 0.022582639, -0.013296484, -0.05459212,
+ 0.08981, -0.045407712, 0.08682226, -0.06867011,
+ -0.14390695, -0.02916037, 0.000996957, 0.091420636,
+ 0.14283475, -0.07390571, -0.06402044, 0.062524505,
+ -0.093129106, 0.04860203, -0.08364217, -0.08119002,
+ 0.009352075, 0.22920375, 0.0016303885, 0.11583097,
+ -0.13732095, 0.012405723, -0.07551853, 0.06343048,
+ 0.12162708, -0.031923793, -0.014335606, 0.01790974,
+ -0.10650317, -0.0724401, 0.08554849, -0.05727212,
+ 0.06556731, -0.042729504, -0.043227166, 0.011683251,
+ -0.013082158, -0.029302018, -0.010899579, -0.062036745,
+ -0.022509435, -0.00964907, -0.01567329, 0.04260106,
+ -0.07787477, -0.11576462, 0.017356863, 0.048673786,
+ -0.017577527, -0.05527947, -0.082487635, -0.040137455,
+ -0.10820036, -0.04666372, 0.022746278, -0.07851417,
+ 0.01068115, 0.032956902, 0.022433773, 0.0026891115,
+ 0.08944216, -0.0685835, 0.010513544, 0.07228705,
+ 0.02032331, -0.059686817, -0.0005566496, -0.086984694,
+ 0.040414046, -0.1380399, 0.094208956, -0.05722982,
+ 0.012092817, -0.04989123, -0.086576, -0.003399834,
+ -0.04696032, -0.045747425, 0.10091314, 0.048676282,
+ -0.029037097, 0.031399418, -0.0040285117, 0.047237843,
+ 0.09504992, 0.041799378, -0.049185462, -0.031518843,
+ -0.10516937, 0.026374253, 0.10058866, -0.0033195973,
+ -0.041975245, 0.0073591834, 0.0033782164, -0.004325073,
+ -0.10167381, 0.042500053, -0.01447153, 0.06464186,
+ -0.017142897, 0.03312627, 0.009205989, 0.024138335,
+ -0.011337001, 0.035530265, -0.010912711, 0.0706555,
+ -0.005894094, 0.051841937, -0.1401738, -0.02351249,
+ 0.0365468, 0.07590991, 0.08838724, 0.021681072,
+ -0.10086113, 0.019608743, -0.06195883, 0.077335775,
+ 0.023646897, -0.095322326, 0.02233014, 0.09756986,
+ -0.048691444, -0.009579111, 0.07595467, 0.11480546,
+ -0.09801813, 0.019894179, 0.08502348, 0.004032281,
+ 0.037211012, 0.068537936, -0.048005626, -0.091520436,
+ -0.028379958, -0.01556313, 0.06554592, -0.045599163,
+ -0.01672207, -0.020169014, -0.011877351, -0.20212261,
+ 0.010889619, 0.0047078193, 0.038385306, 0.08540671,
+ -0.017140968, -0.0035865551, 0.016678626, 0.005633034,
+ 0.015963363, 0.00871737, 0.060130805, 0.028611384,
+ 0.10109069, -0.015060172, -0.07894427, 0.06401885,
+ 0.011584063, -0.024466386, 0.0047652307, -0.09041358,
+ 0.030737216, -0.0046374933, 0.14215417, -0.11823516,
+ 0.019899689, 0.006106124, -0.027092824, 0.0786356,
+ 0.05052217, -0.058925, -0.011402121, -0.024987547,
+ -0.0013661642, -0.06832946, -0.015667673, -0.1083353,
+ -0.00096863037, -0.06988685, -0.053350925, -0.027275559,
+ -0.033664223, -0.07978348, -0.025200296, -0.017207067,
+ -0.058403496, -0.055697463, 0.005798788, 0.12965427,
+ -0.062582195, 0.0013350133, -0.10482091, 0.0379771,
+ 0.072521195, -0.0029455067, -0.13797039, -0.03628521,
+ 0.013806405, -0.017858358, -0.01008298, -0.07700066,
+ -0.017081132, 0.019358726, 0.0027079724, 0.004635139,
+ 0.062634714, -0.02338735, -0.039547626, -0.02050681,
+ 0.03385117, -0.083611414, 0.002862572, -0.09421313,
+ 0.058618143, -0.08598433, 0.00972939, 0.023867095,
+ -0.053934585, -0.023203006, 0.07452513, -0.048767887,
+ -0.07314807, -0.056307215, -0.10433547, -0.06440842,
+ 0.04328182, 0.04389765, -0.020006588, -0.09076438,
+ -0.11652589, -0.021705797, 0.03345259, -0.010329105,
+ -0.025767034, 0.013057034, -0.07316461, -0.10145612,
+ 0.06358255, 0.18531723, 0.07759293, 0.12006465,
+ 0.1305557, 0.058638252, -0.03393652, 0.09622831,
+ -0.16253184, -2.4580743e-06, 0.079869635, -0.070196845,
+ -0.005644518, 0.06857898, -0.12598175, -0.035084512,
+ 0.03156317, -0.12794146, -0.031963028, 0.04692781,
+ 0.030070418, 0.0071660685, -0.095516115, -0.004643372,
+ 0.040170413, -0.062104587, -0.0037324072, 0.0554317,
+ 0.08184801, -0.019164372, 0.06791302, 0.034257166,
+ -0.10307039, 0.021943003, 0.046745934, 0.0790918,
+ -0.0265588, -0.007824208, 0.042546265, -0.00977924,
+ -0.0002440307, -0.017384544, -0.017990116, 0.12252321,
+ -0.014512694, -0.08251313, 0.08861942, 0.13589665,
+ 0.026351685, 0.012641483, 0.07466548, 0.044301085,
+ -0.045414884, -0.051112458, 0.03444247, -0.08502782,
+ -0.04106223, -0.028126027, 0.028473156, 0.10467447],
+
+ recurrent_to_forget_weights: [
+ -0.057784554, -0.026057621, -0.068447545, -0.022581743,
+ 0.14811787, 0.10826372, 0.09471067, 0.03987225,
+ -0.0039523416, 0.00030638507, 0.053185795, 0.10572994,
+ 0.08414449, -0.022036452, -0.00066928595, -0.09203576,
+ 0.032950465, -0.10985798, -0.023809856, 0.0021431844,
+ -0.02196096, -0.00326074, 0.00058621005, -0.074678116,
+ -0.06193199, 0.055729095, 0.03736828, 0.020123724,
+ 0.061878487, -0.04729229, 0.034919553, -0.07585433,
+ -0.04421272, -0.044019096, 0.085488975, 0.04058006,
+ -0.06890133, -0.030951202, -0.024628663, -0.07672815,
+ 0.034293607, 0.08556707, -0.05293577, -0.033561368,
+ -0.04899627, 0.0241671, 0.015736353, -0.095442444,
+ -0.029564252, 0.016493602, -0.035026584, 0.022337519,
+ -0.026871363, 0.004780428, 0.0077918363, -0.03601621,
+ 0.016435321, -0.03263031, -0.09543275, -0.047392778,
+ 0.013454138, 0.028934088, 0.01685226, -0.086110644,
+ -0.046250615, -0.01847454, 0.047608484, 0.07339695,
+ 0.034546845, -0.04881143, 0.009128804, -0.08802852,
+ 0.03761666, 0.008096139, -0.014454086, 0.014361001,
+ -0.023502491, -0.0011840804, -0.07607001, 0.001856849,
+ -0.06509276, -0.006021153, -0.08570962, -0.1451793,
+ 0.060212336, 0.055259194, 0.06974018, 0.049454916,
+ -0.027794661, -0.08077226, -0.016179763, 0.1169753,
+ 0.17213494, -0.0056326236, -0.053934924, -0.0124349,
+ -0.11520337, 0.05409887, 0.088759385, 0.0019655675,
+ 0.0042065294, 0.03881498, 0.019844765, 0.041858196,
+ -0.05695512, 0.047233116, 0.038937137, -0.06542224,
+ 0.014429736, -0.09719407, 0.13908425, -0.05379757,
+ 0.012321099, 0.082840554, -0.029899208, 0.044217527,
+ 0.059855383, 0.07711018, -0.045319796, 0.0948846,
+ -0.011724666, -0.0033288454, -0.033542685, -0.04764985,
+ -0.13873616, 0.040668588, 0.034832682, -0.015319203,
+ -0.018715994, 0.046002675, 0.0599172, -0.043107376,
+ 0.0294216, -0.002314414, -0.022424703, 0.0030315618,
+ 0.0014641669, 0.0029166266, -0.11878115, 0.013738511,
+ 0.12375372, -0.0006038222, 0.029104086, 0.087442465,
+ 0.052958444, 0.07558703, 0.04817258, 0.044462286,
+ -0.015213451, -0.08783778, -0.0561384, -0.003008196,
+ 0.047060397, -0.002058388, 0.03429439, -0.018839769,
+ 0.024734668, 0.024614193, -0.042046934, 0.09597743,
+ -0.0043254104, 0.04320769, 0.0064070094, -0.0019131786,
+ -0.02558259, -0.022822596, -0.023273505, -0.02464396,
+ -0.10991725, -0.006240552, 0.0074488563, 0.024044557,
+ 0.04383914, -0.046476185, 0.028658995, 0.060410924,
+ 0.050786525, 0.009452605, -0.0073054377, -0.024810238,
+ 0.0052906186, 0.0066939713, -0.0020913032, 0.014515517,
+ 0.015898481, 0.021362653, -0.030262267, 0.016587038,
+ -0.011442813, 0.041154444, -0.007631438, -0.03423484,
+ -0.010977775, 0.036152758, 0.0066366293, 0.11915515,
+ 0.02318443, -0.041350313, 0.021485701, -0.10906167,
+ -0.028218046, -0.00954771, 0.020531068, -0.11995105,
+ -0.03672871, 0.024019798, 0.014255957, -0.05221243,
+ -0.00661567, -0.04630967, 0.033188973, 0.10107534,
+ -0.014027541, 0.030796422, -0.10270911, -0.035999842,
+ 0.15443139, 0.07684145, 0.036571592, -0.035900835,
+ -0.0034699554, 0.06209149, 0.015920248, -0.031122351,
+ -0.03858649, 0.01849943, 0.13872518, 0.01503974,
+ 0.069941424, -0.06948533, -0.0088794185, 0.061282158,
+ -0.047401894, 0.03100163, -0.041533746, -0.10430945,
+ 0.044574402, -0.01425562, -0.024290353, 0.034563623,
+ 0.05866852, 0.023947537, -0.09445152, 0.035450947,
+ 0.02247216, -0.0042998926, 0.061146557, -0.10250651,
+ 0.020881841, -0.06747029, 0.10062043, -0.0023941975,
+ 0.03532124, -0.016341697, 0.09685456, -0.016764693,
+ 0.051808182, 0.05875331, -0.04536488, 0.001626336,
+ -0.028892258, -0.01048663, -0.009793449, -0.017093895,
+ 0.010987891, 0.02357273, -0.00010856845, 0.0099760275,
+ -0.001845119, -0.03551521, 0.0018358806, 0.05763657,
+ -0.01769146, 0.040995963, 0.02235177, -0.060430344,
+ 0.11475477, -0.023854522, 0.10071741, 0.0686208,
+ -0.014250481, 0.034261297, 0.047418304, 0.08562733,
+ -0.030519066, 0.0060542435, 0.014653856, -0.038836084,
+ 0.04096551, 0.032249358, -0.08355519, -0.026823482,
+ 0.056386515, -0.010401743, -0.028396193, 0.08507674,
+ 0.014410365, 0.020995233, 0.17040324, 0.11511526,
+ 0.02459721, 0.0066619175, 0.025853224, -0.023133837,
+ -0.081302024, 0.017264642, -0.009585969, 0.09491168,
+ -0.051313367, 0.054532815, -0.014298593, 0.10657464,
+ 0.007076659, 0.10964551, 0.0409152, 0.008275321,
+ -0.07283536, 0.07937492, 0.04192024, -0.1075027],
+
+ recurrent_to_cell_weights: [
+ -0.037322544, 0.018592842, 0.0056175636, -0.06253426,
+ 0.055647098, -0.05713207, -0.05626563, 0.005559383,
+ 0.03375411, -0.025757805, -0.088049285, 0.06017052,
+ -0.06570978, 0.007384076, 0.035123326, -0.07920549,
+ 0.053676967, 0.044480428, -0.07663568, 0.0071805613,
+ 0.08089997, 0.05143358, 0.038261272, 0.03339287,
+ -0.027673481, 0.044746667, 0.028349208, 0.020090483,
+ -0.019443132, -0.030755889, -0.0040000007, 0.04465846,
+ -0.021585021, 0.0031670958, 0.0053199246, -0.056117613,
+ -0.10893326, 0.076739706, -0.08509834, -0.027997585,
+ 0.037871376, 0.01449768, -0.09002357, -0.06111149,
+ -0.046195522, 0.0422062, -0.005683705, -0.1253618,
+ -0.012925729, -0.04890792, 0.06985068, 0.037654128,
+ 0.03398274, -0.004781977, 0.007032333, -0.031787455,
+ 0.010868644, -0.031489216, 0.09525667, 0.013939797,
+ 0.0058680447, 0.0167067, 0.02668468, -0.04797466,
+ -0.048885044, -0.12722108, 0.035304096, 0.06554885,
+ 0.00972396, -0.039238118, -0.05159735, -0.11329045,
+ 0.1613692, -0.03750952, 0.06529313, -0.071974665,
+ -0.11769596, 0.015524369, -0.0013754242, -0.12446318,
+ 0.02786344, -0.014179351, 0.005264273, 0.14376344,
+ 0.015983658, 0.03406988, -0.06939408, 0.040699873,
+ 0.02111075, 0.09669095, 0.041345075, -0.08316494,
+ -0.07684199, -0.045768797, 0.032298047, -0.041805092,
+ 0.0119405, 0.0061010392, 0.12652606, 0.0064572375,
+ -0.024950314, 0.11574242, 0.04508852, -0.04335324,
+ 0.06760663, -0.027437469, 0.07216407, 0.06977076,
+ -0.05438599, 0.034033038, -0.028602652, 0.05346137,
+ 0.043184172, -0.037189785, 0.10420091, 0.00882477,
+ -0.054019816, -0.074273005, -0.030617684, -0.0028467078,
+ 0.024302477, -0.0038869337, 0.005332455, 0.0013399826,
+ 0.04361412, -0.007001822, 0.09631092, -0.06702025,
+ -0.042049985, -0.035070654, -0.04103342, -0.10273396,
+ 0.0544271, 0.037184782, -0.13150354, -0.0058036847,
+ -0.008264958, 0.042035464, 0.05891794, 0.029673764,
+ 0.0063542654, 0.044788733, 0.054816857, 0.062257513,
+ -0.00093483756, 0.048938446, -0.004952862, -0.007730018,
+ -0.04043371, -0.017094059, 0.07229206, -0.023670016,
+ -0.052195564, -0.025616996, -0.01520939, 0.045104615,
+ -0.007376126, 0.003533447, 0.006570588, 0.056037236,
+ 0.12436656, 0.051817212, 0.028532185, -0.08686856,
+ 0.11868599, 0.07663395, -0.07323171, 0.03463402,
+ -0.050708205, -0.04458982, -0.11590894, 0.021273347,
+ 0.1251325, -0.15313013, -0.12224372, 0.17228661,
+ 0.023029093, 0.086124025, 0.006445803, -0.03496501,
+ 0.028332196, 0.04449512, -0.042436164, -0.026587414,
+ -0.006041347, -0.09292539, -0.05678812, 0.03897832,
+ 0.09465633, 0.008115513, -0.02171956, 0.08304309,
+ 0.071401566, 0.019622514, 0.032163795, -0.004167056,
+ 0.02295182, 0.030739572, 0.056506045, 0.004612461,
+ 0.06524936, 0.059999723, 0.046395954, -0.0045512207,
+ -0.1335546, -0.030136576, 0.11584653, -0.014678886,
+ 0.0020118146, -0.09688814, -0.0790206, 0.039770417,
+ -0.0329582, 0.07922767, 0.029322514, 0.026405897,
+ 0.04207835, -0.07073373, 0.063781224, 0.0859677,
+ -0.10925287, -0.07011058, 0.048005477, 0.03438226,
+ -0.09606514, -0.006669445, -0.043381985, 0.04240257,
+ -0.06955775, -0.06769346, 0.043903265, -0.026784198,
+ -0.017840602, 0.024307009, -0.040079936, -0.019946516,
+ 0.045318738, -0.12233574, 0.026170589, 0.0074471775,
+ 0.15978073, 0.10185836, 0.10298046, -0.015476589,
+ -0.039390966, -0.072174534, 0.0739445, -0.1211869,
+ -0.0347889, -0.07943156, 0.014809798, -0.12412325,
+ -0.0030663363, 0.039695457, 0.0647603, -0.08291318,
+ -0.018529687, -0.004423833, 0.0037507233, 0.084633216,
+ -0.01514876, -0.056505352, -0.012800942, -0.06994386,
+ 0.012962922, -0.031234352, 0.07029052, 0.016418684,
+ 0.03618972, 0.055686004, -0.08663945, -0.017404709,
+ -0.054761406, 0.029065743, 0.052404847, 0.020238016,
+ 0.0048197987, -0.0214882, 0.07078733, 0.013016777,
+ 0.06262858, 0.009184685, 0.020785125, -0.043904778,
+ -0.0270329, -0.03299152, -0.060088247, -0.015162964,
+ -0.001828936, 0.12642565, -0.056757294, 0.013586685,
+ 0.09232601, -0.035886683, 0.06000002, 0.05229691,
+ -0.052580316, -0.082029596, -0.010794592, 0.012947712,
+ -0.036429964, -0.085508935, -0.13127148, -0.017744139,
+ 0.031502828, 0.036232427, -0.031581745, 0.023051167,
+ -0.05325106, -0.03421577, 0.028793324, -0.034633752,
+ -0.009881397, -0.043551125, -0.018609839, 0.0019097115,
+ -0.008799762, 0.056595087, 0.0022273948, 0.055752404],
+
+ recurrent_to_output_weights: [
+ 0.025825322, -0.05813119, 0.09495884, -0.045984812, -0.01255415,
+ -0.0026479573, -0.08196161, -0.054914974, -0.0046604523, -0.029587349,
+ -0.044576716, -0.07480124, -0.082868785, 0.023254942, 0.027502948,
+ -0.0039728214, -0.08683098, -0.08116779, -0.014675607, -0.037924774,
+ -0.023314456, -0.007401714, -0.09255757, 0.029460307, -0.08829125,
+ -0.005139627, -0.08989442, -0.0555066, 0.13596267, -0.025062224,
+ -0.048351806, -0.03850004, 0.07266485, -0.022414139, 0.05940088,
+ 0.075114764, 0.09597592, -0.010211725, -0.0049794707, -0.011523867,
+ -0.025980417, 0.072999895, 0.11091378, -0.081685916, 0.014416728,
+ 0.043229222, 0.034178585, -0.07530371, 0.035837382, -0.085607,
+ -0.007721233, -0.03287832, -0.043848954, -0.06404588, -0.06632928,
+ -0.073643476, 0.008214239, -0.045984086, 0.039764922, 0.03474462,
+ 0.060612556, -0.080590084, 0.049127717, 0.04151091, -0.030063879,
+ 0.008801774, -0.023021035, -0.019558564, 0.05158114, -0.010947698,
+ -0.011825728, 0.0075720972, 0.0699727, -0.0039981045, 0.069350146,
+ 0.08799282, 0.016156472, 0.035502106, 0.11695009, 0.006217345,
+ 0.13392477, -0.037875112, 0.025745004, 0.08940699, -0.00924166,
+ 0.0046702605, -0.036598757, -0.08811812, 0.10522024, -0.032441203,
+ 0.008176899, -0.04454919, 0.07058152, 0.0067963637, 0.039206743,
+ 0.03259838, 0.03725492, -0.09515802, 0.013326398, -0.052055415,
+ -0.025676316, 0.03198509, -0.015951829, -0.058556724, 0.036879618,
+ 0.043357447, 0.028362012, -0.05908629, 0.0059240665, -0.04995891,
+ -0.019187413, 0.0276265, -0.01628143, 0.0025863599, 0.08800015,
+ 0.035250366, -0.022165963, -0.07328642, -0.009415526, -0.07455109,
+ 0.11690406, 0.0363299, 0.07411125, 0.042103454, -0.009660886,
+ 0.019076364, 0.018299393, -0.046004917, 0.08891175, 0.0431396,
+ -0.026327137, -0.051502608, 0.08979574, -0.051670972, 0.04940282,
+ -0.07491107, -0.021240504, 0.022596184, -0.034280192, 0.060163025,
+ -0.058211457, -0.051837247, -0.01349775, -0.04639988, -0.035936575,
+ -0.011681591, 0.064818054, 0.0073146066, -0.021745546, -0.043124277,
+ -0.06471268, -0.07053354, -0.029321948, -0.05330136, 0.016933719,
+ -0.053782392, 0.13747959, -0.1361751, -0.11569455, 0.0033329215,
+ 0.05693899, -0.053219706, 0.063698, 0.07977434, -0.07924483,
+ 0.06936997, 0.0034815092, -0.007305279, -0.037325785, -0.07251102,
+ -0.033633437, -0.08677009, 0.091591336, -0.14165086, 0.021752775,
+ 0.019683983, 0.0011612234, -0.058154266, 0.049996935, 0.0288841,
+ -0.0024567875, -0.14345716, 0.010955264, -0.10234828, 0.1183656,
+ -0.0010731248, -0.023590032, -0.072285876, -0.0724771, -0.026382286,
+ -0.0014920527, 0.042667855, 0.0018776858, 0.02986552, 0.009814309,
+ 0.0733756, 0.12289186, 0.018043943, -0.0458958, 0.049412545,
+ 0.033632483, 0.05495232, 0.036686596, -0.013781798, -0.010036754,
+ 0.02576849, -0.08307328, 0.010112348, 0.042521734, -0.05869831,
+ -0.071689695, 0.03876447, -0.13275425, -0.0352966, -0.023077697,
+ 0.10285965, 0.084736146, 0.15568255, -0.00040734606, 0.027835453,
+ -0.10292561, -0.032401145, 0.10053256, -0.026142767, -0.08271222,
+ -0.0030240538, -0.016368777, 0.1070414, 0.042672627, 0.013456989,
+ -0.0437609, -0.022309763, 0.11576483, 0.04108048, 0.061026827,
+ -0.0190714, -0.0869359, 0.037901703, 0.0610107, 0.07202949,
+ 0.01675338, 0.086139716, -0.08795751, -0.014898893, -0.023771819,
+ -0.01965048, 0.007955471, -0.043740474, 0.03346837, -0.10549954,
+ 0.090567775, 0.042013682, -0.03176985, 0.12569028, -0.02421228,
+ -0.029526481, 0.023851605, 0.031539805, 0.05292009, -0.02344001,
+ -0.07811758, -0.08834428, 0.10094801, 0.16594367, -0.06861939,
+ -0.021256343, -0.041093912, -0.06669611, 0.035498552, 0.021757556,
+ -0.09302526, -0.015403468, -0.06614931, -0.051798206, -0.013874718,
+ 0.03630673, 0.010412845, -0.08077351, 0.046185967, 0.0035662893,
+ 0.03541868, -0.094149634, -0.034814864, 0.003128424, -0.020674974,
+ -0.03944324, -0.008110165, -0.11113267, 0.08484226, 0.043586485,
+ 0.040582247, 0.0968012, -0.065249965, -0.028036479, 0.0050708856,
+ 0.0017462453, 0.0326779, 0.041296225, 0.09164146, -0.047743853,
+ -0.015952192, -0.034451712, 0.084197424, -0.05347844, -0.11768019,
+ 0.085926116, -0.08251791, -0.045081906, 0.0948852, 0.068401024,
+ 0.024856757, 0.06978981, -0.057309967, -0.012775832, -0.0032452994,
+ 0.01977615, -0.041040014, -0.024264973, 0.063464895, 0.05431621],
+
+ cell_to_input_weights: [
+ 0.040369894, 0.030746894, 0.24704495, 0.018586371, -0.037586458,
+ -0.15312155, -0.11812848, -0.11465643, 0.20259799, 0.11418174,
+ -0.10116027, -0.011334949, 0.12411352, -0.076769054, -0.052169047,
+ 0.21198851, -0.38871562, -0.09061183, -0.09683246, -0.21929175],
+
+ cell_to_forget_weights: [
+ -0.01998659, -0.15568835, -0.24248174, -0.012770197, 0.041331276,
+ -0.072311886, -0.052123554, -0.0066330447, -0.043891653, 0.036225766,
+ -0.047248036, 0.021479502, 0.033189066, 0.11952997, -0.020432774,
+ 0.64658105, -0.06650122, -0.03467612, 0.095340036, 0.23647355],
+
+ cell_to_output_weights: [
+ 0.08286371, -0.08261836, -0.51210177, 0.002913762, 0.17764764,
+ -0.5495371, -0.08460716, -0.24552552, 0.030037103, 0.04123544,
+ -0.11940523, 0.007358328, 0.1890978, 0.4833202, -0.34441817,
+ 0.36312827, -0.26375428, 0.1457655, -0.19724406, 0.15548733],
+
+ projection_weights: [
+ -0.009802181, 0.09401916, 0.0717386, -0.13895074, 0.09641832,
+ 0.060420845, 0.08539281, 0.054285463, 0.061395317, 0.034448683,
+ -0.042991187, 0.019801661, -0.16840284, -0.015726732, -0.23041931,
+ -0.024478018, -0.10959692, -0.013875541, 0.18600968, -0.061274476,
+ 0.0138165, -0.08160894, -0.07661644, 0.032372914, 0.16169067,
+ 0.22465782, -0.03993472, -0.004017731, 0.08633481, -0.28869787,
+ 0.08682067, 0.17240396, 0.014975425, 0.056431185, 0.031037588,
+ 0.16702051, 0.0077946745, 0.15140012, 0.29405436, 0.120285,
+ -0.188994, -0.027265169, 0.043389652, -0.022061434, 0.014777949,
+ -0.20203483, 0.094781205, 0.19100232, 0.13987629, -0.036132768,
+ -0.06426278, -0.05108664, 0.13221376, 0.009441198, -0.16715929,
+ 0.15859416, -0.040437475, 0.050779544, -0.022187516, 0.012166504,
+ 0.027685808, -0.07675938, -0.0055694645, -0.09444123, 0.0046453946,
+ 0.050794356, 0.10770313, -0.20790008, -0.07149004, -0.11425117,
+ 0.008225835, -0.035802525, 0.14374903, 0.15262283, 0.048710253,
+ 0.1847461, -0.007487823, 0.11000021, -0.09542012, 0.22619456,
+ -0.029149994, 0.08527916, 0.009043713, 0.0042746216, 0.016261552,
+ 0.022461696, 0.12689082, -0.043589946, -0.12035478, -0.08361797,
+ -0.050666027, -0.1248618, -0.1275799, -0.071875185, 0.07377272,
+ 0.09944291, -0.18897448, -0.1593054, -0.06526116, -0.040107165,
+ -0.004618631, -0.067624845, -0.007576253, 0.10727444, 0.041546922,
+ -0.20424393, 0.06907816, 0.050412357, 0.00724631, 0.039827548,
+ 0.12449835, 0.10747581, 0.13708383, 0.09134148, -0.12617786,
+ -0.06428341, 0.09956831, 0.1208086, -0.14676677, -0.0727722,
+ 0.1126304, 0.010139365, 0.015571211, -0.038128063, 0.022913318,
+ -0.042050496, 0.16842307, -0.060597885, 0.10531834, -0.06411776,
+ -0.07451711, -0.03410368, -0.13393489, 0.06534304, 0.003620307,
+ 0.04490757, 0.05970546, 0.05197996, 0.02839995, 0.10434969,
+ -0.013699693, -0.028353551, -0.07260381, 0.047201227, -0.024575593,
+ -0.036445823, 0.07155557, 0.009672501, -0.02328883, 0.009533515,
+ -0.03606021, -0.07421458, -0.028082801, -0.2678904, -0.13221288,
+ 0.18419984, -0.13012612, -0.014588381, -0.035059117, -0.04824723,
+ 0.07830115, -0.056184657, 0.03277091, 0.025466874, 0.14494097,
+ -0.12522776, -0.098633975, -0.10766018, -0.08317623, 0.08594209,
+ 0.07749552, 0.039474737, 0.1776665, -0.07409566, -0.0477268,
+ 0.29323658, 0.10801441, 0.1154011, 0.013952499, 0.10739139,
+ 0.10708251, -0.051456142, 0.0074137426, -0.10430189, 0.10034707,
+ 0.045594677, 0.0635285, -0.0715442, -0.089667566, -0.10811871,
+ 0.00026344223, 0.08298446, -0.009525053, 0.006585689, -0.24567553,
+ -0.09450807, 0.09648481, 0.026996298, -0.06419476, -0.04752702,
+ -0.11063944, -0.23441927, -0.17608605, -0.052156363, 0.067035615,
+ 0.19271925, -0.0032889997, -0.043264326, 0.09663576, -0.057112187,
+ -0.10100678, 0.0628376, 0.04447668, 0.017961001, -0.10094388,
+ -0.10190601, 0.18335468, 0.10494553, -0.052095775, -0.0026118709,
+ 0.10539724, -0.04383912, -0.042349473, 0.08438151, -0.1947263,
+ 0.02251204, 0.11216432, -0.10307853, 0.17351969, -0.039091777,
+ 0.08066188, -0.00561982, 0.12633002, 0.11335965, -0.0088127935,
+ -0.019777594, 0.06864014, -0.059751723, 0.016233567, -0.06894641,
+ -0.28651384, -0.004228674, 0.019708522, -0.16305895, -0.07468996,
+ -0.0855457, 0.099339016, -0.07580735, -0.13775392, 0.08434318,
+ 0.08330512, -0.12131499, 0.031935584, 0.09180414, -0.08876437,
+ -0.08049874, 0.008753825, 0.03498998, 0.030215185, 0.03907079,
+ 0.089751154, 0.029194152, -0.03337423, -0.019092513, 0.04331237,
+ 0.04299654, -0.036394123, -0.12915532, 0.09793732, 0.07512415,
+ -0.11319543, -0.032502122, 0.15661901, 0.07671967, -0.005491124,
+ -0.19379048, -0.218606, 0.21448623, 0.017840758, 0.1416943,
+ -0.07051762, 0.19488361, 0.02664691, -0.18104725, -0.09334311,
+ 0.15026465, -0.15493552, -0.057762887, -0.11604192, -0.262013,
+ -0.01391798, 0.012185008, 0.11156489, -0.07483202, 0.06693364,
+ -0.26151478, 0.046425626, 0.036540434, -0.16435726, 0.17338543,
+ -0.21401681, -0.11385144, -0.08283257, -0.069031075, 0.030635102,
+ 0.010969227, 0.11109743, 0.010919218, 0.027526086, 0.13519906,
+ 0.01891392, -0.046839405, -0.040167913, 0.017953383, -0.09700955,
+ 0.0061885654, -0.07000971, 0.026893595, -0.038844477, 0.14543656],
+
+ projection_bias: [],
+}
+
+# Batch0: 4 (input_sequence_size) * 5 (n_input)
+input0[input] = [0.073204, 0.296072, 0.743333, 0.069199, 0.045348]
+# Batch1: 4 (input_sequence_size) * 5 (n_input)
+input0[input].extend(
+ [0.640394, 0.930399, 0.050782, 0.432485, 0.988078]
+)
+input0[output_state_in] = [
+ -0.0166936, 0.0381209, 0.000889684, 0.0143363,
+ -0.0328911, -0.0234288, 0.0333051, -0.012229,
+ 0.0110322, -0.0457725, -0.000832209, -0.0202817,
+ 0.0327257, 0.0121309, 0.0155969, 0.0312091,
+ -0.0141913, 0.0322082, 0.00227024, 0.0260507,
+ -0.0188721, -0.0296489, 0.0399134, -0.0160509,
+ 0.011604, -0.0447318, -0.0150515, -0.0277406,
+ 0.0316596, 0.0118233, 0.0214762, 0.0293641,
+]
+input0[cell_state_in] = [
+ -0.154022, -0.124934, 0.0478463, 0.0607819,
+ -0.218727, -0.111053, -0.103885, -0.00447221,
+ 0.0554757, -0.0207068, 0.0595767, -0.116297,
+ -0.249466, -0.0723206, 0.0794942, -0.0377107,
+ 0.124532, 0.249952, 0.188641, 0.411865,
+ -0.11012, -0.0694494, 0.103501, 0.0428427,
+ -0.167345, -0.106061, -0.0775679, 0.00936161,
+ 0.0105526, -0.0314523, 0.0243475, -0.132179,
+ -0.258763, -0.0307266, 0.107047, -0.0115197,
+ 0.0995485, 0.220027, 0.158355, 0.436369,
+]
+output0 = {
+ scratch_buffer: [ 0 for x in range(n_batch * n_cell * 4) ],
+ cell_state_out: [
+ -0.126572, -0.121882, 0.121569, 0.0489971,
+ -0.240177, -0.124685, -0.122565, 0.0162748,
+ 0.0317536, -0.0270355, 0.0418199, -0.179755,
+ -0.327279, -0.0342741, 0.133831, -0.0238279,
+ 0.122148, 0.269115, 0.185989, 0.525976,
+ -0.167208, -0.109612, 0.0531226, 0.0695387,
+ -0.248335, -0.134123, -0.108246, 0.00628498,
+ 0.0492984, -0.0264919, 0.0698144, -0.0635602,
+ -0.295363, -0.0760078, 0.102725, -0.0351708,
+ 0.149804, 0.259131, 0.202573, 0.500664,
+ ],
+ output_state_out: [
+ -0.0213783, 0.0350169, 0.000324787, 0.0276012,
+ -0.0263374, -0.0371449, 0.0446149, -0.0205474,
+ 0.0103729, -0.0576349, -0.0150052, -0.0292043,
+ 0.0376827, 0.0136115, 0.0243435, 0.0354492,
+ -0.0204549, 0.0450315, -0.00117379, 0.0167673,
+ -0.0375007, -0.0238314, 0.038784, -0.0174034,
+ 0.0131743, -0.0506589, -0.00484469, -0.0240239,
+ 0.0325789, 0.00790064, 0.0220157, 0.0333314,
+ ],
+}
+
+# Batch0: 4 (input_sequence_size) * 16 (n_output)
+output0[output] = [
+ -0.0213783, 0.0350169, 0.000324794,
+ 0.0276012, -0.0263374, -0.0371449, 0.0446149, -0.0205474,
+ 0.0103729, -0.0576349, -0.0150052, -0.0292043, 0.0376827,
+ 0.0136115, 0.0243435, 0.0354492]
+# Batch1: 4 (input_sequence_size) * 16 (n_output)
+output0[output].extend(
+ [-0.0204549, 0.0450315, -0.00117378,
+ 0.0167673, -0.0375007, -0.0238314, 0.038784, -0.0174034,
+ 0.0131743, -0.0506589, -0.0048447, -0.0240239, 0.0325789,
+ 0.00790065, 0.0220157, 0.0333314],
+ )
+
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/lstm3_state3_float16.mod.py b/tests/nnapi/specs/skip/V1_2/lstm3_state3_float16.mod.py
new file mode 100644
index 000000000..d359f84e1
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/lstm3_state3_float16.mod.py
@@ -0,0 +1,663 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# LSTM Test, With Peephole, With Projection, No Clipping
+
+model = Model()
+
+n_batch = 2
+n_input = 5
+# n_cell and n_output have the same size when there is no projection.
+n_cell = 20
+n_output = 16
+
+input = Input("input", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_input))
+
+input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+
+recurrent_to_input_weights = Input("recurrent_to_intput_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_forget_weights = Input("recurrent_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_output_weights = Input("recurrent_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+
+cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT16", "{%d}" % (n_cell))
+cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT16", "{%d}" %(n_cell))
+cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT16", "{%d}" % (n_cell))
+
+input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+
+projection_weights = Input("projection_weights", "TENSOR_FLOAT16", "{%d,%d}" % (n_output, n_cell))
+projection_bias = Input("projection_bias", "TENSOR_FLOAT16", "{0}")
+
+output_state_in = Input("output_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+cell_state_in = Input("cell_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
+
+activation_param = Int32Scalar("activation_param", 4) # Tanh
+cell_clip_param = Float16Scalar("cell_clip_param", 0.)
+proj_clip_param = Float16Scalar("proj_clip_param", 0.)
+
+scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, (n_cell * 4)))
+output_state_out = IgnoredOutput("output_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+cell_state_out = IgnoredOutput("cell_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+
+# TODO: need support for more than one output
+model = model.Operation("LSTM",
+ input,
+
+ input_to_input_weights,
+ input_to_forget_weights,
+ input_to_cell_weights,
+ input_to_output_weights,
+
+ recurrent_to_input_weights,
+ recurrent_to_forget_weights,
+ recurrent_to_cell_weights,
+ recurrent_to_output_weights,
+
+ cell_to_input_weights,
+ cell_to_forget_weights,
+ cell_to_output_weights,
+
+ input_gate_bias,
+ forget_gate_bias,
+ cell_gate_bias,
+ output_gate_bias,
+
+ projection_weights,
+ projection_bias,
+
+ output_state_in,
+ cell_state_in,
+
+ activation_param,
+ cell_clip_param,
+ proj_clip_param
+).To([scratch_buffer, output_state_out, cell_state_out, output])
+
+input0 = {input_to_input_weights: [
+ 0.021393683, 0.06124551, 0.046905167, -0.014657677, -0.03149463,
+ 0.09171803, 0.14647801, 0.10797193, -0.0057968358, 0.0019193048,
+ -0.2726754, 0.10154029, -0.018539885, 0.080349885, -0.10262385,
+ -0.022599787, -0.09121155, -0.008675967, -0.045206103, -0.0821282,
+ -0.008045952, 0.015478081, 0.055217247, 0.038719587, 0.044153627,
+ -0.06453243, 0.05031825, -0.046935108, -0.008164439, 0.014574226,
+ -0.1671009, -0.15519552, -0.16819797, -0.13971269, -0.11953059,
+ 0.25005487, -0.22790983, 0.009855087, -0.028140958, -0.11200698,
+ 0.11295408, -0.0035217577, 0.054485075, 0.05184695, 0.064711206,
+ 0.10989193, 0.11674786, 0.03490607, 0.07727357, 0.11390585,
+ -0.1863375, -0.1034451, -0.13945189, -0.049401227, -0.18767063,
+ 0.042483903, 0.14233552, 0.13832581, 0.18350165, 0.14545603,
+ -0.028545704, 0.024939531, 0.050929718, 0.0076203286, -0.0029723682,
+ -0.042484224, -0.11827596, -0.09171104, -0.10808628, -0.16327988,
+ -0.2273378, -0.0993647, -0.017155107, 0.0023917493, 0.049272764,
+ 0.0038534778, 0.054764505, 0.089753784, 0.06947234, 0.08014476,
+ -0.04544234, -0.0497073, -0.07135631, -0.048929106, -0.004042012,
+ -0.009284026, 0.018042054, 0.0036860977, -0.07427302, -0.11434604,
+ -0.018995456, 0.031487543, 0.012834908, 0.019977754, 0.044256654,
+ -0.39292613, -0.18519334, -0.11651281, -0.06809892, 0.011373677],
+
+ input_to_forget_weights: [
+ -0.0018401089, -0.004852237, 0.03698424, 0.014181704, 0.028273236,
+ -0.016726194, -0.05249759, -0.10204261, 0.00861066, -0.040979505,
+ -0.009899187, 0.01923892, -0.028177269, -0.08535103, -0.14585495,
+ 0.10662567, -0.01909731, -0.017883534, -0.0047269356, -0.045103323,
+ 0.0030784295, 0.076784775, 0.07463696, 0.094531395, 0.0814421,
+ -0.12257899, -0.033945758, -0.031303465, 0.045630626, 0.06843887,
+ -0.13492945, -0.012480007, -0.0811829, -0.07224499, -0.09628791,
+ 0.045100946, 0.0012300825, 0.013964662, 0.099372394, 0.02543059,
+ 0.06958324, 0.034257296, 0.0482646, 0.06267997, 0.052625068,
+ 0.12784666, 0.07077897, 0.025725935, 0.04165009, 0.07241905,
+ 0.018668644, -0.037377294, -0.06277783, -0.08833636, -0.040120605,
+ -0.011405586, -0.007808335, -0.010301386, -0.005102167, 0.027717464,
+ 0.05483423, 0.11449111, 0.11289652, 0.10939839, 0.13396506,
+ -0.08402166, -0.01901462, -0.044678304, -0.07720565, 0.014350063,
+ -0.11757958, -0.0652038, -0.08185733, -0.076754324, -0.092614375,
+ 0.10405491, 0.052960336, 0.035755895, 0.035839386, -0.012540553,
+ 0.036881298, 0.02913376, 0.03420159, 0.05448447, -0.054523353,
+ 0.02582715, 0.02327355, -0.011857179, -0.0011980024, -0.034641717,
+ -0.026125094, -0.17582615, -0.15923657, -0.27486774, -0.0006143371,
+ 0.0001771948, -8.470171e-05, 0.02651807, 0.045790765, 0.06956496],
+
+ input_to_cell_weights: [
+ -0.04580283, -0.09549462, -0.032418985, -0.06454633,
+ -0.043528453, 0.043018587, -0.049152344, -0.12418144,
+ -0.078985475, -0.07596889, 0.019484362, -0.11434962,
+ -0.0074034138, -0.06314844, -0.092981495, 0.0062155537,
+ -0.025034338, -0.0028890965, 0.048929527, 0.06235075,
+ 0.10665918, -0.032036792, -0.08505916, -0.10843358,
+ -0.13002433, -0.036816437, -0.02130134, -0.016518239,
+ 0.0047691227, -0.0025825808, 0.066017866, 0.029991534,
+ -0.10652836, -0.1037554, -0.13056071, -0.03266643,
+ -0.033702414, -0.006473424, -0.04611692, 0.014419339,
+ -0.025174323, 0.0396852, 0.081777506, 0.06157468,
+ 0.10210095, -0.009658194, 0.046511717, 0.03603906,
+ 0.0069369148, 0.015960095, -0.06507666, 0.09551598,
+ 0.053568836, 0.06408714, 0.12835667, -0.008714329,
+ -0.20211966, -0.12093674, 0.029450472, 0.2849013,
+ -0.029227901, 0.1164364, -0.08560263, 0.09941786,
+ -0.036999565, -0.028842626, -0.0033637602, -0.017012902,
+ -0.09720865, -0.11193351, -0.029155117, -0.017936034,
+ -0.009768936, -0.04223324, -0.036159635, 0.06505112,
+ -0.021742892, -0.023377212, -0.07221364, -0.06430552,
+ 0.05453865, 0.091149814, 0.06387331, 0.007518393,
+ 0.055960953, 0.069779344, 0.046411168, 0.10509911,
+ 0.07463894, 0.0075130584, 0.012850982, 0.04555431,
+ 0.056955688, 0.06555285, 0.050801456, -0.009862683,
+ 0.00826772, -0.026555609, -0.0073611983, -0.0014897042],
+
+ input_to_output_weights: [
+ -0.0998932, -0.07201956, -0.052803773, -0.15629593, -0.15001918,
+ -0.07650751, 0.02359855, -0.075155355, -0.08037709, -0.15093534,
+ 0.029517552, -0.04751393, 0.010350531, -0.02664851, -0.016839722,
+ -0.023121163, 0.0077019283, 0.012851257, -0.05040649, -0.0129761,
+ -0.021737747, -0.038305793, -0.06870586, -0.01481247, -0.001285394,
+ 0.10124236, 0.083122835, 0.053313006, -0.062235646, -0.075637154,
+ -0.027833903, 0.029774971, 0.1130802, 0.09218906, 0.09506135,
+ -0.086665764, -0.037162706, -0.038880914, -0.035832845, -0.014481564,
+ -0.09825003, -0.12048569, -0.097665586, -0.05287633, -0.0964047,
+ -0.11366429, 0.035777505, 0.13568819, 0.052451383, 0.050649304,
+ 0.05798951, -0.021852335, -0.099848844, 0.014740475, -0.078897946,
+ 0.04974699, 0.014160473, 0.06973932, 0.04964942, 0.033364646,
+ 0.08190124, 0.025535367, 0.050893165, 0.048514254, 0.06945813,
+ -0.078907564, -0.06707616, -0.11844508, -0.09986688, -0.07509403,
+ 0.06263226, 0.14925587, 0.20188436, 0.12098451, 0.14639415,
+ 0.0015017595, -0.014267382, -0.03417257, 0.012711468, 0.0028300495,
+ -0.024758482, -0.05098548, -0.0821182, 0.014225672, 0.021544158,
+ 0.08949725, 0.07505268, -0.0020780868, 0.04908258, 0.06476295,
+ -0.022907063, 0.027562456, 0.040185735, 0.019567577, -0.015598739,
+ -0.049097303, -0.017121866, -0.083368234, -0.02332002, -0.0840956],
+
+ input_gate_bias: [
+ 0.02234832, 0.14757581, 0.18176508, 0.10380666, 0.053110216,
+ -0.06928846, -0.13942584, -0.11816189, 0.19483899, 0.03652339,
+ -0.10250295, 0.036714908, -0.18426876, 0.036065217, 0.21810818,
+ 0.02383196, -0.043370757, 0.08690144, -0.04444982, 0.00030581196],
+
+ forget_gate_bias: [
+ 0.035185695, -0.042891346, -0.03032477, 0.23027696,
+ 0.11098921, 0.15378423, 0.09263801, 0.09790885,
+ 0.09508917, 0.061199076, 0.07665568, -0.015443159,
+ -0.03499149, 0.046190713, 0.08895977, 0.10899629,
+ 0.40694186, 0.06030037, 0.012413437, -0.06108739],
+
+ cell_gate_bias: [
+ -0.024379363, 0.0055531194, 0.23377132, 0.033463873,
+ -0.1483596, -0.10639995, -0.091433935, 0.058573797,
+ -0.06809782, -0.07889636, -0.043246906, -0.09829136,
+ -0.4279842, 0.034901652, 0.18797937, 0.0075234566,
+ 0.016178843, 0.1749513, 0.13975595, 0.92058027],
+
+ output_gate_bias: [
+ 0.046159424, -0.0012809046, 0.03563469, 0.12648113, 0.027195795,
+ 0.35373217, -0.018957434, 0.008907322, -0.0762701, 0.12018895,
+ 0.04216877, 0.0022856654, 0.040952638, 0.3147856, 0.08225149,
+ -0.057416286, -0.14995944, -0.008040261, 0.13208859, 0.029760877],
+
+ recurrent_to_input_weights: [
+ -0.001374326, -0.078856036, 0.10672688, 0.029162422,
+ -0.11585556, 0.02557986, -0.13446963, -0.035785314,
+ -0.01244275, 0.025961924, -0.02337298, -0.044228926,
+ -0.055839065, -0.046598054, -0.010546039, -0.06900766,
+ 0.027239809, 0.022582639, -0.013296484, -0.05459212,
+ 0.08981, -0.045407712, 0.08682226, -0.06867011,
+ -0.14390695, -0.02916037, 0.000996957, 0.091420636,
+ 0.14283475, -0.07390571, -0.06402044, 0.062524505,
+ -0.093129106, 0.04860203, -0.08364217, -0.08119002,
+ 0.009352075, 0.22920375, 0.0016303885, 0.11583097,
+ -0.13732095, 0.012405723, -0.07551853, 0.06343048,
+ 0.12162708, -0.031923793, -0.014335606, 0.01790974,
+ -0.10650317, -0.0724401, 0.08554849, -0.05727212,
+ 0.06556731, -0.042729504, -0.043227166, 0.011683251,
+ -0.013082158, -0.029302018, -0.010899579, -0.062036745,
+ -0.022509435, -0.00964907, -0.01567329, 0.04260106,
+ -0.07787477, -0.11576462, 0.017356863, 0.048673786,
+ -0.017577527, -0.05527947, -0.082487635, -0.040137455,
+ -0.10820036, -0.04666372, 0.022746278, -0.07851417,
+ 0.01068115, 0.032956902, 0.022433773, 0.0026891115,
+ 0.08944216, -0.0685835, 0.010513544, 0.07228705,
+ 0.02032331, -0.059686817, -0.0005566496, -0.086984694,
+ 0.040414046, -0.1380399, 0.094208956, -0.05722982,
+ 0.012092817, -0.04989123, -0.086576, -0.003399834,
+ -0.04696032, -0.045747425, 0.10091314, 0.048676282,
+ -0.029037097, 0.031399418, -0.0040285117, 0.047237843,
+ 0.09504992, 0.041799378, -0.049185462, -0.031518843,
+ -0.10516937, 0.026374253, 0.10058866, -0.0033195973,
+ -0.041975245, 0.0073591834, 0.0033782164, -0.004325073,
+ -0.10167381, 0.042500053, -0.01447153, 0.06464186,
+ -0.017142897, 0.03312627, 0.009205989, 0.024138335,
+ -0.011337001, 0.035530265, -0.010912711, 0.0706555,
+ -0.005894094, 0.051841937, -0.1401738, -0.02351249,
+ 0.0365468, 0.07590991, 0.08838724, 0.021681072,
+ -0.10086113, 0.019608743, -0.06195883, 0.077335775,
+ 0.023646897, -0.095322326, 0.02233014, 0.09756986,
+ -0.048691444, -0.009579111, 0.07595467, 0.11480546,
+ -0.09801813, 0.019894179, 0.08502348, 0.004032281,
+ 0.037211012, 0.068537936, -0.048005626, -0.091520436,
+ -0.028379958, -0.01556313, 0.06554592, -0.045599163,
+ -0.01672207, -0.020169014, -0.011877351, -0.20212261,
+ 0.010889619, 0.0047078193, 0.038385306, 0.08540671,
+ -0.017140968, -0.0035865551, 0.016678626, 0.005633034,
+ 0.015963363, 0.00871737, 0.060130805, 0.028611384,
+ 0.10109069, -0.015060172, -0.07894427, 0.06401885,
+ 0.011584063, -0.024466386, 0.0047652307, -0.09041358,
+ 0.030737216, -0.0046374933, 0.14215417, -0.11823516,
+ 0.019899689, 0.006106124, -0.027092824, 0.0786356,
+ 0.05052217, -0.058925, -0.011402121, -0.024987547,
+ -0.0013661642, -0.06832946, -0.015667673, -0.1083353,
+ -0.00096863037, -0.06988685, -0.053350925, -0.027275559,
+ -0.033664223, -0.07978348, -0.025200296, -0.017207067,
+ -0.058403496, -0.055697463, 0.005798788, 0.12965427,
+ -0.062582195, 0.0013350133, -0.10482091, 0.0379771,
+ 0.072521195, -0.0029455067, -0.13797039, -0.03628521,
+ 0.013806405, -0.017858358, -0.01008298, -0.07700066,
+ -0.017081132, 0.019358726, 0.0027079724, 0.004635139,
+ 0.062634714, -0.02338735, -0.039547626, -0.02050681,
+ 0.03385117, -0.083611414, 0.002862572, -0.09421313,
+ 0.058618143, -0.08598433, 0.00972939, 0.023867095,
+ -0.053934585, -0.023203006, 0.07452513, -0.048767887,
+ -0.07314807, -0.056307215, -0.10433547, -0.06440842,
+ 0.04328182, 0.04389765, -0.020006588, -0.09076438,
+ -0.11652589, -0.021705797, 0.03345259, -0.010329105,
+ -0.025767034, 0.013057034, -0.07316461, -0.10145612,
+ 0.06358255, 0.18531723, 0.07759293, 0.12006465,
+ 0.1305557, 0.058638252, -0.03393652, 0.09622831,
+ -0.16253184, -2.4580743e-06, 0.079869635, -0.070196845,
+ -0.005644518, 0.06857898, -0.12598175, -0.035084512,
+ 0.03156317, -0.12794146, -0.031963028, 0.04692781,
+ 0.030070418, 0.0071660685, -0.095516115, -0.004643372,
+ 0.040170413, -0.062104587, -0.0037324072, 0.0554317,
+ 0.08184801, -0.019164372, 0.06791302, 0.034257166,
+ -0.10307039, 0.021943003, 0.046745934, 0.0790918,
+ -0.0265588, -0.007824208, 0.042546265, -0.00977924,
+ -0.0002440307, -0.017384544, -0.017990116, 0.12252321,
+ -0.014512694, -0.08251313, 0.08861942, 0.13589665,
+ 0.026351685, 0.012641483, 0.07466548, 0.044301085,
+ -0.045414884, -0.051112458, 0.03444247, -0.08502782,
+ -0.04106223, -0.028126027, 0.028473156, 0.10467447],
+
+ recurrent_to_forget_weights: [
+ -0.057784554, -0.026057621, -0.068447545, -0.022581743,
+ 0.14811787, 0.10826372, 0.09471067, 0.03987225,
+ -0.0039523416, 0.00030638507, 0.053185795, 0.10572994,
+ 0.08414449, -0.022036452, -0.00066928595, -0.09203576,
+ 0.032950465, -0.10985798, -0.023809856, 0.0021431844,
+ -0.02196096, -0.00326074, 0.00058621005, -0.074678116,
+ -0.06193199, 0.055729095, 0.03736828, 0.020123724,
+ 0.061878487, -0.04729229, 0.034919553, -0.07585433,
+ -0.04421272, -0.044019096, 0.085488975, 0.04058006,
+ -0.06890133, -0.030951202, -0.024628663, -0.07672815,
+ 0.034293607, 0.08556707, -0.05293577, -0.033561368,
+ -0.04899627, 0.0241671, 0.015736353, -0.095442444,
+ -0.029564252, 0.016493602, -0.035026584, 0.022337519,
+ -0.026871363, 0.004780428, 0.0077918363, -0.03601621,
+ 0.016435321, -0.03263031, -0.09543275, -0.047392778,
+ 0.013454138, 0.028934088, 0.01685226, -0.086110644,
+ -0.046250615, -0.01847454, 0.047608484, 0.07339695,
+ 0.034546845, -0.04881143, 0.009128804, -0.08802852,
+ 0.03761666, 0.008096139, -0.014454086, 0.014361001,
+ -0.023502491, -0.0011840804, -0.07607001, 0.001856849,
+ -0.06509276, -0.006021153, -0.08570962, -0.1451793,
+ 0.060212336, 0.055259194, 0.06974018, 0.049454916,
+ -0.027794661, -0.08077226, -0.016179763, 0.1169753,
+ 0.17213494, -0.0056326236, -0.053934924, -0.0124349,
+ -0.11520337, 0.05409887, 0.088759385, 0.0019655675,
+ 0.0042065294, 0.03881498, 0.019844765, 0.041858196,
+ -0.05695512, 0.047233116, 0.038937137, -0.06542224,
+ 0.014429736, -0.09719407, 0.13908425, -0.05379757,
+ 0.012321099, 0.082840554, -0.029899208, 0.044217527,
+ 0.059855383, 0.07711018, -0.045319796, 0.0948846,
+ -0.011724666, -0.0033288454, -0.033542685, -0.04764985,
+ -0.13873616, 0.040668588, 0.034832682, -0.015319203,
+ -0.018715994, 0.046002675, 0.0599172, -0.043107376,
+ 0.0294216, -0.002314414, -0.022424703, 0.0030315618,
+ 0.0014641669, 0.0029166266, -0.11878115, 0.013738511,
+ 0.12375372, -0.0006038222, 0.029104086, 0.087442465,
+ 0.052958444, 0.07558703, 0.04817258, 0.044462286,
+ -0.015213451, -0.08783778, -0.0561384, -0.003008196,
+ 0.047060397, -0.002058388, 0.03429439, -0.018839769,
+ 0.024734668, 0.024614193, -0.042046934, 0.09597743,
+ -0.0043254104, 0.04320769, 0.0064070094, -0.0019131786,
+ -0.02558259, -0.022822596, -0.023273505, -0.02464396,
+ -0.10991725, -0.006240552, 0.0074488563, 0.024044557,
+ 0.04383914, -0.046476185, 0.028658995, 0.060410924,
+ 0.050786525, 0.009452605, -0.0073054377, -0.024810238,
+ 0.0052906186, 0.0066939713, -0.0020913032, 0.014515517,
+ 0.015898481, 0.021362653, -0.030262267, 0.016587038,
+ -0.011442813, 0.041154444, -0.007631438, -0.03423484,
+ -0.010977775, 0.036152758, 0.0066366293, 0.11915515,
+ 0.02318443, -0.041350313, 0.021485701, -0.10906167,
+ -0.028218046, -0.00954771, 0.020531068, -0.11995105,
+ -0.03672871, 0.024019798, 0.014255957, -0.05221243,
+ -0.00661567, -0.04630967, 0.033188973, 0.10107534,
+ -0.014027541, 0.030796422, -0.10270911, -0.035999842,
+ 0.15443139, 0.07684145, 0.036571592, -0.035900835,
+ -0.0034699554, 0.06209149, 0.015920248, -0.031122351,
+ -0.03858649, 0.01849943, 0.13872518, 0.01503974,
+ 0.069941424, -0.06948533, -0.0088794185, 0.061282158,
+ -0.047401894, 0.03100163, -0.041533746, -0.10430945,
+ 0.044574402, -0.01425562, -0.024290353, 0.034563623,
+ 0.05866852, 0.023947537, -0.09445152, 0.035450947,
+ 0.02247216, -0.0042998926, 0.061146557, -0.10250651,
+ 0.020881841, -0.06747029, 0.10062043, -0.0023941975,
+ 0.03532124, -0.016341697, 0.09685456, -0.016764693,
+ 0.051808182, 0.05875331, -0.04536488, 0.001626336,
+ -0.028892258, -0.01048663, -0.009793449, -0.017093895,
+ 0.010987891, 0.02357273, -0.00010856845, 0.0099760275,
+ -0.001845119, -0.03551521, 0.0018358806, 0.05763657,
+ -0.01769146, 0.040995963, 0.02235177, -0.060430344,
+ 0.11475477, -0.023854522, 0.10071741, 0.0686208,
+ -0.014250481, 0.034261297, 0.047418304, 0.08562733,
+ -0.030519066, 0.0060542435, 0.014653856, -0.038836084,
+ 0.04096551, 0.032249358, -0.08355519, -0.026823482,
+ 0.056386515, -0.010401743, -0.028396193, 0.08507674,
+ 0.014410365, 0.020995233, 0.17040324, 0.11511526,
+ 0.02459721, 0.0066619175, 0.025853224, -0.023133837,
+ -0.081302024, 0.017264642, -0.009585969, 0.09491168,
+ -0.051313367, 0.054532815, -0.014298593, 0.10657464,
+ 0.007076659, 0.10964551, 0.0409152, 0.008275321,
+ -0.07283536, 0.07937492, 0.04192024, -0.1075027],
+
+ recurrent_to_cell_weights: [
+ -0.037322544, 0.018592842, 0.0056175636, -0.06253426,
+ 0.055647098, -0.05713207, -0.05626563, 0.005559383,
+ 0.03375411, -0.025757805, -0.088049285, 0.06017052,
+ -0.06570978, 0.007384076, 0.035123326, -0.07920549,
+ 0.053676967, 0.044480428, -0.07663568, 0.0071805613,
+ 0.08089997, 0.05143358, 0.038261272, 0.03339287,
+ -0.027673481, 0.044746667, 0.028349208, 0.020090483,
+ -0.019443132, -0.030755889, -0.0040000007, 0.04465846,
+ -0.021585021, 0.0031670958, 0.0053199246, -0.056117613,
+ -0.10893326, 0.076739706, -0.08509834, -0.027997585,
+ 0.037871376, 0.01449768, -0.09002357, -0.06111149,
+ -0.046195522, 0.0422062, -0.005683705, -0.1253618,
+ -0.012925729, -0.04890792, 0.06985068, 0.037654128,
+ 0.03398274, -0.004781977, 0.007032333, -0.031787455,
+ 0.010868644, -0.031489216, 0.09525667, 0.013939797,
+ 0.0058680447, 0.0167067, 0.02668468, -0.04797466,
+ -0.048885044, -0.12722108, 0.035304096, 0.06554885,
+ 0.00972396, -0.039238118, -0.05159735, -0.11329045,
+ 0.1613692, -0.03750952, 0.06529313, -0.071974665,
+ -0.11769596, 0.015524369, -0.0013754242, -0.12446318,
+ 0.02786344, -0.014179351, 0.005264273, 0.14376344,
+ 0.015983658, 0.03406988, -0.06939408, 0.040699873,
+ 0.02111075, 0.09669095, 0.041345075, -0.08316494,
+ -0.07684199, -0.045768797, 0.032298047, -0.041805092,
+ 0.0119405, 0.0061010392, 0.12652606, 0.0064572375,
+ -0.024950314, 0.11574242, 0.04508852, -0.04335324,
+ 0.06760663, -0.027437469, 0.07216407, 0.06977076,
+ -0.05438599, 0.034033038, -0.028602652, 0.05346137,
+ 0.043184172, -0.037189785, 0.10420091, 0.00882477,
+ -0.054019816, -0.074273005, -0.030617684, -0.0028467078,
+ 0.024302477, -0.0038869337, 0.005332455, 0.0013399826,
+ 0.04361412, -0.007001822, 0.09631092, -0.06702025,
+ -0.042049985, -0.035070654, -0.04103342, -0.10273396,
+ 0.0544271, 0.037184782, -0.13150354, -0.0058036847,
+ -0.008264958, 0.042035464, 0.05891794, 0.029673764,
+ 0.0063542654, 0.044788733, 0.054816857, 0.062257513,
+ -0.00093483756, 0.048938446, -0.004952862, -0.007730018,
+ -0.04043371, -0.017094059, 0.07229206, -0.023670016,
+ -0.052195564, -0.025616996, -0.01520939, 0.045104615,
+ -0.007376126, 0.003533447, 0.006570588, 0.056037236,
+ 0.12436656, 0.051817212, 0.028532185, -0.08686856,
+ 0.11868599, 0.07663395, -0.07323171, 0.03463402,
+ -0.050708205, -0.04458982, -0.11590894, 0.021273347,
+ 0.1251325, -0.15313013, -0.12224372, 0.17228661,
+ 0.023029093, 0.086124025, 0.006445803, -0.03496501,
+ 0.028332196, 0.04449512, -0.042436164, -0.026587414,
+ -0.006041347, -0.09292539, -0.05678812, 0.03897832,
+ 0.09465633, 0.008115513, -0.02171956, 0.08304309,
+ 0.071401566, 0.019622514, 0.032163795, -0.004167056,
+ 0.02295182, 0.030739572, 0.056506045, 0.004612461,
+ 0.06524936, 0.059999723, 0.046395954, -0.0045512207,
+ -0.1335546, -0.030136576, 0.11584653, -0.014678886,
+ 0.0020118146, -0.09688814, -0.0790206, 0.039770417,
+ -0.0329582, 0.07922767, 0.029322514, 0.026405897,
+ 0.04207835, -0.07073373, 0.063781224, 0.0859677,
+ -0.10925287, -0.07011058, 0.048005477, 0.03438226,
+ -0.09606514, -0.006669445, -0.043381985, 0.04240257,
+ -0.06955775, -0.06769346, 0.043903265, -0.026784198,
+ -0.017840602, 0.024307009, -0.040079936, -0.019946516,
+ 0.045318738, -0.12233574, 0.026170589, 0.0074471775,
+ 0.15978073, 0.10185836, 0.10298046, -0.015476589,
+ -0.039390966, -0.072174534, 0.0739445, -0.1211869,
+ -0.0347889, -0.07943156, 0.014809798, -0.12412325,
+ -0.0030663363, 0.039695457, 0.0647603, -0.08291318,
+ -0.018529687, -0.004423833, 0.0037507233, 0.084633216,
+ -0.01514876, -0.056505352, -0.012800942, -0.06994386,
+ 0.012962922, -0.031234352, 0.07029052, 0.016418684,
+ 0.03618972, 0.055686004, -0.08663945, -0.017404709,
+ -0.054761406, 0.029065743, 0.052404847, 0.020238016,
+ 0.0048197987, -0.0214882, 0.07078733, 0.013016777,
+ 0.06262858, 0.009184685, 0.020785125, -0.043904778,
+ -0.0270329, -0.03299152, -0.060088247, -0.015162964,
+ -0.001828936, 0.12642565, -0.056757294, 0.013586685,
+ 0.09232601, -0.035886683, 0.06000002, 0.05229691,
+ -0.052580316, -0.082029596, -0.010794592, 0.012947712,
+ -0.036429964, -0.085508935, -0.13127148, -0.017744139,
+ 0.031502828, 0.036232427, -0.031581745, 0.023051167,
+ -0.05325106, -0.03421577, 0.028793324, -0.034633752,
+ -0.009881397, -0.043551125, -0.018609839, 0.0019097115,
+ -0.008799762, 0.056595087, 0.0022273948, 0.055752404],
+
+ recurrent_to_output_weights: [
+ 0.025825322, -0.05813119, 0.09495884, -0.045984812, -0.01255415,
+ -0.0026479573, -0.08196161, -0.054914974, -0.0046604523, -0.029587349,
+ -0.044576716, -0.07480124, -0.082868785, 0.023254942, 0.027502948,
+ -0.0039728214, -0.08683098, -0.08116779, -0.014675607, -0.037924774,
+ -0.023314456, -0.007401714, -0.09255757, 0.029460307, -0.08829125,
+ -0.005139627, -0.08989442, -0.0555066, 0.13596267, -0.025062224,
+ -0.048351806, -0.03850004, 0.07266485, -0.022414139, 0.05940088,
+ 0.075114764, 0.09597592, -0.010211725, -0.0049794707, -0.011523867,
+ -0.025980417, 0.072999895, 0.11091378, -0.081685916, 0.014416728,
+ 0.043229222, 0.034178585, -0.07530371, 0.035837382, -0.085607,
+ -0.007721233, -0.03287832, -0.043848954, -0.06404588, -0.06632928,
+ -0.073643476, 0.008214239, -0.045984086, 0.039764922, 0.03474462,
+ 0.060612556, -0.080590084, 0.049127717, 0.04151091, -0.030063879,
+ 0.008801774, -0.023021035, -0.019558564, 0.05158114, -0.010947698,
+ -0.011825728, 0.0075720972, 0.0699727, -0.0039981045, 0.069350146,
+ 0.08799282, 0.016156472, 0.035502106, 0.11695009, 0.006217345,
+ 0.13392477, -0.037875112, 0.025745004, 0.08940699, -0.00924166,
+ 0.0046702605, -0.036598757, -0.08811812, 0.10522024, -0.032441203,
+ 0.008176899, -0.04454919, 0.07058152, 0.0067963637, 0.039206743,
+ 0.03259838, 0.03725492, -0.09515802, 0.013326398, -0.052055415,
+ -0.025676316, 0.03198509, -0.015951829, -0.058556724, 0.036879618,
+ 0.043357447, 0.028362012, -0.05908629, 0.0059240665, -0.04995891,
+ -0.019187413, 0.0276265, -0.01628143, 0.0025863599, 0.08800015,
+ 0.035250366, -0.022165963, -0.07328642, -0.009415526, -0.07455109,
+ 0.11690406, 0.0363299, 0.07411125, 0.042103454, -0.009660886,
+ 0.019076364, 0.018299393, -0.046004917, 0.08891175, 0.0431396,
+ -0.026327137, -0.051502608, 0.08979574, -0.051670972, 0.04940282,
+ -0.07491107, -0.021240504, 0.022596184, -0.034280192, 0.060163025,
+ -0.058211457, -0.051837247, -0.01349775, -0.04639988, -0.035936575,
+ -0.011681591, 0.064818054, 0.0073146066, -0.021745546, -0.043124277,
+ -0.06471268, -0.07053354, -0.029321948, -0.05330136, 0.016933719,
+ -0.053782392, 0.13747959, -0.1361751, -0.11569455, 0.0033329215,
+ 0.05693899, -0.053219706, 0.063698, 0.07977434, -0.07924483,
+ 0.06936997, 0.0034815092, -0.007305279, -0.037325785, -0.07251102,
+ -0.033633437, -0.08677009, 0.091591336, -0.14165086, 0.021752775,
+ 0.019683983, 0.0011612234, -0.058154266, 0.049996935, 0.0288841,
+ -0.0024567875, -0.14345716, 0.010955264, -0.10234828, 0.1183656,
+ -0.0010731248, -0.023590032, -0.072285876, -0.0724771, -0.026382286,
+ -0.0014920527, 0.042667855, 0.0018776858, 0.02986552, 0.009814309,
+ 0.0733756, 0.12289186, 0.018043943, -0.0458958, 0.049412545,
+ 0.033632483, 0.05495232, 0.036686596, -0.013781798, -0.010036754,
+ 0.02576849, -0.08307328, 0.010112348, 0.042521734, -0.05869831,
+ -0.071689695, 0.03876447, -0.13275425, -0.0352966, -0.023077697,
+ 0.10285965, 0.084736146, 0.15568255, -0.00040734606, 0.027835453,
+ -0.10292561, -0.032401145, 0.10053256, -0.026142767, -0.08271222,
+ -0.0030240538, -0.016368777, 0.1070414, 0.042672627, 0.013456989,
+ -0.0437609, -0.022309763, 0.11576483, 0.04108048, 0.061026827,
+ -0.0190714, -0.0869359, 0.037901703, 0.0610107, 0.07202949,
+ 0.01675338, 0.086139716, -0.08795751, -0.014898893, -0.023771819,
+ -0.01965048, 0.007955471, -0.043740474, 0.03346837, -0.10549954,
+ 0.090567775, 0.042013682, -0.03176985, 0.12569028, -0.02421228,
+ -0.029526481, 0.023851605, 0.031539805, 0.05292009, -0.02344001,
+ -0.07811758, -0.08834428, 0.10094801, 0.16594367, -0.06861939,
+ -0.021256343, -0.041093912, -0.06669611, 0.035498552, 0.021757556,
+ -0.09302526, -0.015403468, -0.06614931, -0.051798206, -0.013874718,
+ 0.03630673, 0.010412845, -0.08077351, 0.046185967, 0.0035662893,
+ 0.03541868, -0.094149634, -0.034814864, 0.003128424, -0.020674974,
+ -0.03944324, -0.008110165, -0.11113267, 0.08484226, 0.043586485,
+ 0.040582247, 0.0968012, -0.065249965, -0.028036479, 0.0050708856,
+ 0.0017462453, 0.0326779, 0.041296225, 0.09164146, -0.047743853,
+ -0.015952192, -0.034451712, 0.084197424, -0.05347844, -0.11768019,
+ 0.085926116, -0.08251791, -0.045081906, 0.0948852, 0.068401024,
+ 0.024856757, 0.06978981, -0.057309967, -0.012775832, -0.0032452994,
+ 0.01977615, -0.041040014, -0.024264973, 0.063464895, 0.05431621],
+
+ cell_to_input_weights: [
+ 0.040369894, 0.030746894, 0.24704495, 0.018586371, -0.037586458,
+ -0.15312155, -0.11812848, -0.11465643, 0.20259799, 0.11418174,
+ -0.10116027, -0.011334949, 0.12411352, -0.076769054, -0.052169047,
+ 0.21198851, -0.38871562, -0.09061183, -0.09683246, -0.21929175],
+
+ cell_to_forget_weights: [
+ -0.01998659, -0.15568835, -0.24248174, -0.012770197, 0.041331276,
+ -0.072311886, -0.052123554, -0.0066330447, -0.043891653, 0.036225766,
+ -0.047248036, 0.021479502, 0.033189066, 0.11952997, -0.020432774,
+ 0.64658105, -0.06650122, -0.03467612, 0.095340036, 0.23647355],
+
+ cell_to_output_weights: [
+ 0.08286371, -0.08261836, -0.51210177, 0.002913762, 0.17764764,
+ -0.5495371, -0.08460716, -0.24552552, 0.030037103, 0.04123544,
+ -0.11940523, 0.007358328, 0.1890978, 0.4833202, -0.34441817,
+ 0.36312827, -0.26375428, 0.1457655, -0.19724406, 0.15548733],
+
+ projection_weights: [
+ -0.009802181, 0.09401916, 0.0717386, -0.13895074, 0.09641832,
+ 0.060420845, 0.08539281, 0.054285463, 0.061395317, 0.034448683,
+ -0.042991187, 0.019801661, -0.16840284, -0.015726732, -0.23041931,
+ -0.024478018, -0.10959692, -0.013875541, 0.18600968, -0.061274476,
+ 0.0138165, -0.08160894, -0.07661644, 0.032372914, 0.16169067,
+ 0.22465782, -0.03993472, -0.004017731, 0.08633481, -0.28869787,
+ 0.08682067, 0.17240396, 0.014975425, 0.056431185, 0.031037588,
+ 0.16702051, 0.0077946745, 0.15140012, 0.29405436, 0.120285,
+ -0.188994, -0.027265169, 0.043389652, -0.022061434, 0.014777949,
+ -0.20203483, 0.094781205, 0.19100232, 0.13987629, -0.036132768,
+ -0.06426278, -0.05108664, 0.13221376, 0.009441198, -0.16715929,
+ 0.15859416, -0.040437475, 0.050779544, -0.022187516, 0.012166504,
+ 0.027685808, -0.07675938, -0.0055694645, -0.09444123, 0.0046453946,
+ 0.050794356, 0.10770313, -0.20790008, -0.07149004, -0.11425117,
+ 0.008225835, -0.035802525, 0.14374903, 0.15262283, 0.048710253,
+ 0.1847461, -0.007487823, 0.11000021, -0.09542012, 0.22619456,
+ -0.029149994, 0.08527916, 0.009043713, 0.0042746216, 0.016261552,
+ 0.022461696, 0.12689082, -0.043589946, -0.12035478, -0.08361797,
+ -0.050666027, -0.1248618, -0.1275799, -0.071875185, 0.07377272,
+ 0.09944291, -0.18897448, -0.1593054, -0.06526116, -0.040107165,
+ -0.004618631, -0.067624845, -0.007576253, 0.10727444, 0.041546922,
+ -0.20424393, 0.06907816, 0.050412357, 0.00724631, 0.039827548,
+ 0.12449835, 0.10747581, 0.13708383, 0.09134148, -0.12617786,
+ -0.06428341, 0.09956831, 0.1208086, -0.14676677, -0.0727722,
+ 0.1126304, 0.010139365, 0.015571211, -0.038128063, 0.022913318,
+ -0.042050496, 0.16842307, -0.060597885, 0.10531834, -0.06411776,
+ -0.07451711, -0.03410368, -0.13393489, 0.06534304, 0.003620307,
+ 0.04490757, 0.05970546, 0.05197996, 0.02839995, 0.10434969,
+ -0.013699693, -0.028353551, -0.07260381, 0.047201227, -0.024575593,
+ -0.036445823, 0.07155557, 0.009672501, -0.02328883, 0.009533515,
+ -0.03606021, -0.07421458, -0.028082801, -0.2678904, -0.13221288,
+ 0.18419984, -0.13012612, -0.014588381, -0.035059117, -0.04824723,
+ 0.07830115, -0.056184657, 0.03277091, 0.025466874, 0.14494097,
+ -0.12522776, -0.098633975, -0.10766018, -0.08317623, 0.08594209,
+ 0.07749552, 0.039474737, 0.1776665, -0.07409566, -0.0477268,
+ 0.29323658, 0.10801441, 0.1154011, 0.013952499, 0.10739139,
+ 0.10708251, -0.051456142, 0.0074137426, -0.10430189, 0.10034707,
+ 0.045594677, 0.0635285, -0.0715442, -0.089667566, -0.10811871,
+ 0.00026344223, 0.08298446, -0.009525053, 0.006585689, -0.24567553,
+ -0.09450807, 0.09648481, 0.026996298, -0.06419476, -0.04752702,
+ -0.11063944, -0.23441927, -0.17608605, -0.052156363, 0.067035615,
+ 0.19271925, -0.0032889997, -0.043264326, 0.09663576, -0.057112187,
+ -0.10100678, 0.0628376, 0.04447668, 0.017961001, -0.10094388,
+ -0.10190601, 0.18335468, 0.10494553, -0.052095775, -0.0026118709,
+ 0.10539724, -0.04383912, -0.042349473, 0.08438151, -0.1947263,
+ 0.02251204, 0.11216432, -0.10307853, 0.17351969, -0.039091777,
+ 0.08066188, -0.00561982, 0.12633002, 0.11335965, -0.0088127935,
+ -0.019777594, 0.06864014, -0.059751723, 0.016233567, -0.06894641,
+ -0.28651384, -0.004228674, 0.019708522, -0.16305895, -0.07468996,
+ -0.0855457, 0.099339016, -0.07580735, -0.13775392, 0.08434318,
+ 0.08330512, -0.12131499, 0.031935584, 0.09180414, -0.08876437,
+ -0.08049874, 0.008753825, 0.03498998, 0.030215185, 0.03907079,
+ 0.089751154, 0.029194152, -0.03337423, -0.019092513, 0.04331237,
+ 0.04299654, -0.036394123, -0.12915532, 0.09793732, 0.07512415,
+ -0.11319543, -0.032502122, 0.15661901, 0.07671967, -0.005491124,
+ -0.19379048, -0.218606, 0.21448623, 0.017840758, 0.1416943,
+ -0.07051762, 0.19488361, 0.02664691, -0.18104725, -0.09334311,
+ 0.15026465, -0.15493552, -0.057762887, -0.11604192, -0.262013,
+ -0.01391798, 0.012185008, 0.11156489, -0.07483202, 0.06693364,
+ -0.26151478, 0.046425626, 0.036540434, -0.16435726, 0.17338543,
+ -0.21401681, -0.11385144, -0.08283257, -0.069031075, 0.030635102,
+ 0.010969227, 0.11109743, 0.010919218, 0.027526086, 0.13519906,
+ 0.01891392, -0.046839405, -0.040167913, 0.017953383, -0.09700955,
+ 0.0061885654, -0.07000971, 0.026893595, -0.038844477, 0.14543656],
+
+ projection_bias: [],
+}
+
+# Batch0: 4 (input_sequence_size) * 5 (n_input)
+input0[input] = [0.867394, 0.291279, 0.013714, 0.482521, 0.626339]
+# Batch1: 4 (input_sequence_size) * 5 (n_input)
+input0[input].extend(
+ [0.082922, 0.563329, 0.865614, 0.333232, 0.259916]
+)
+input0[output_state_in] = [
+ -0.0213783, 0.0350169, 0.000324787, 0.0276012,
+ -0.0263374, -0.0371449, 0.0446149, -0.0205474,
+ 0.0103729, -0.0576349, -0.0150052, -0.0292043,
+ 0.0376827, 0.0136115, 0.0243435, 0.0354492,
+ -0.0204549, 0.0450315, -0.00117379, 0.0167673,
+ -0.0375007, -0.0238314, 0.038784, -0.0174034,
+ 0.0131743, -0.0506589, -0.00484469, -0.0240239,
+ 0.0325789, 0.00790064, 0.0220157, 0.0333314,
+]
+input0[cell_state_in] = [
+ -0.126572, -0.121882, 0.121569, 0.0489971,
+ -0.240177, -0.124685, -0.122565, 0.0162748,
+ 0.0317536, -0.0270355, 0.0418199, -0.179755,
+ -0.327279, -0.0342741, 0.133831, -0.0238279,
+ 0.122148, 0.269115, 0.185989, 0.525976,
+ -0.167208, -0.109612, 0.0531226, 0.0695387,
+ -0.248335, -0.134123, -0.108246, 0.00628498,
+ 0.0492984, -0.0264919, 0.0698144, -0.0635602,
+ -0.295363, -0.0760078, 0.102725, -0.0351708,
+ 0.149804, 0.259131, 0.202573, 0.500664,
+]
+output0 = {
+ scratch_buffer: [ 0 for x in range(n_batch * n_cell * 4) ],
+ cell_state_out: [ 0 for x in range(n_batch * n_cell) ],
+ output_state_out: [ 0 for x in range(n_batch * n_output) ],
+}
+
+# Batch0: 4 (input_sequence_size) * 16 (n_output)
+output0[output] = [
+ -0.0189322, 0.0464512, -0.00251373, 0.0225745,
+ -0.0308346, -0.0317124, 0.0460407, -0.0189395,
+ 0.0149363, -0.0530162, -0.0150767, -0.0340193,
+ 0.0286833, 0.00824207, 0.0264887, 0.0305169]
+# Batch1: 4 (input_sequence_size) * 16 (n_output)
+output0[output].extend(
+ [-0.0264787, 0.0387855, -0.000764675, 0.0217599,
+ -0.037537, -0.0335206, 0.0431679, -0.0211424,
+ 0.010203, -0.062785, -0.00832363, -0.025181,
+ 0.0412031, 0.0118723, 0.0239643, 0.0394009]
+ )
+
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/lstm3_state_float16.mod.py b/tests/nnapi/specs/skip/V1_2/lstm3_state_float16.mod.py
new file mode 100644
index 000000000..695f0f53b
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/lstm3_state_float16.mod.py
@@ -0,0 +1,683 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# LSTM Test, With Peephole, With Projection, No Clipping
+
+model = Model()
+
+n_batch = 2
+n_input = 5
+# n_cell and n_output have the same size when there is no projection.
+n_cell = 20
+n_output = 16
+
+input = Input("input", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_input))
+
+input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+
+recurrent_to_input_weights = Input("recurrent_to_intput_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_forget_weights = Input("recurrent_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_output_weights = Input("recurrent_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+
+cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT16", "{%d}" % (n_cell))
+cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT16", "{%d}" %(n_cell))
+cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT16", "{%d}" % (n_cell))
+
+input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+
+projection_weights = Input("projection_weights", "TENSOR_FLOAT16", "{%d,%d}" % (n_output, n_cell))
+projection_bias = Input("projection_bias", "TENSOR_FLOAT16", "{0}")
+
+output_state_in = Input("output_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+cell_state_in = Input("cell_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
+
+activation_param = Int32Scalar("activation_param", 4) # Tanh
+cell_clip_param = Float16Scalar("cell_clip_param", 0.)
+proj_clip_param = Float16Scalar("proj_clip_param", 0.)
+
+scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, (n_cell * 4)))
+output_state_out = Output("output_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+cell_state_out = Output("cell_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+
+# TODO: need support for more than one output
+model = model.Operation("LSTM",
+ input,
+
+ input_to_input_weights,
+ input_to_forget_weights,
+ input_to_cell_weights,
+ input_to_output_weights,
+
+ recurrent_to_input_weights,
+ recurrent_to_forget_weights,
+ recurrent_to_cell_weights,
+ recurrent_to_output_weights,
+
+ cell_to_input_weights,
+ cell_to_forget_weights,
+ cell_to_output_weights,
+
+ input_gate_bias,
+ forget_gate_bias,
+ cell_gate_bias,
+ output_gate_bias,
+
+ projection_weights,
+ projection_bias,
+
+ output_state_in,
+ cell_state_in,
+
+ activation_param,
+ cell_clip_param,
+ proj_clip_param
+).To([scratch_buffer, output_state_out, cell_state_out, output])
+
+input0 = {input_to_input_weights: [
+ 0.021393683, 0.06124551, 0.046905167, -0.014657677, -0.03149463,
+ 0.09171803, 0.14647801, 0.10797193, -0.0057968358, 0.0019193048,
+ -0.2726754, 0.10154029, -0.018539885, 0.080349885, -0.10262385,
+ -0.022599787, -0.09121155, -0.008675967, -0.045206103, -0.0821282,
+ -0.008045952, 0.015478081, 0.055217247, 0.038719587, 0.044153627,
+ -0.06453243, 0.05031825, -0.046935108, -0.008164439, 0.014574226,
+ -0.1671009, -0.15519552, -0.16819797, -0.13971269, -0.11953059,
+ 0.25005487, -0.22790983, 0.009855087, -0.028140958, -0.11200698,
+ 0.11295408, -0.0035217577, 0.054485075, 0.05184695, 0.064711206,
+ 0.10989193, 0.11674786, 0.03490607, 0.07727357, 0.11390585,
+ -0.1863375, -0.1034451, -0.13945189, -0.049401227, -0.18767063,
+ 0.042483903, 0.14233552, 0.13832581, 0.18350165, 0.14545603,
+ -0.028545704, 0.024939531, 0.050929718, 0.0076203286, -0.0029723682,
+ -0.042484224, -0.11827596, -0.09171104, -0.10808628, -0.16327988,
+ -0.2273378, -0.0993647, -0.017155107, 0.0023917493, 0.049272764,
+ 0.0038534778, 0.054764505, 0.089753784, 0.06947234, 0.08014476,
+ -0.04544234, -0.0497073, -0.07135631, -0.048929106, -0.004042012,
+ -0.009284026, 0.018042054, 0.0036860977, -0.07427302, -0.11434604,
+ -0.018995456, 0.031487543, 0.012834908, 0.019977754, 0.044256654,
+ -0.39292613, -0.18519334, -0.11651281, -0.06809892, 0.011373677],
+
+ input_to_forget_weights: [
+ -0.0018401089, -0.004852237, 0.03698424, 0.014181704, 0.028273236,
+ -0.016726194, -0.05249759, -0.10204261, 0.00861066, -0.040979505,
+ -0.009899187, 0.01923892, -0.028177269, -0.08535103, -0.14585495,
+ 0.10662567, -0.01909731, -0.017883534, -0.0047269356, -0.045103323,
+ 0.0030784295, 0.076784775, 0.07463696, 0.094531395, 0.0814421,
+ -0.12257899, -0.033945758, -0.031303465, 0.045630626, 0.06843887,
+ -0.13492945, -0.012480007, -0.0811829, -0.07224499, -0.09628791,
+ 0.045100946, 0.0012300825, 0.013964662, 0.099372394, 0.02543059,
+ 0.06958324, 0.034257296, 0.0482646, 0.06267997, 0.052625068,
+ 0.12784666, 0.07077897, 0.025725935, 0.04165009, 0.07241905,
+ 0.018668644, -0.037377294, -0.06277783, -0.08833636, -0.040120605,
+ -0.011405586, -0.007808335, -0.010301386, -0.005102167, 0.027717464,
+ 0.05483423, 0.11449111, 0.11289652, 0.10939839, 0.13396506,
+ -0.08402166, -0.01901462, -0.044678304, -0.07720565, 0.014350063,
+ -0.11757958, -0.0652038, -0.08185733, -0.076754324, -0.092614375,
+ 0.10405491, 0.052960336, 0.035755895, 0.035839386, -0.012540553,
+ 0.036881298, 0.02913376, 0.03420159, 0.05448447, -0.054523353,
+ 0.02582715, 0.02327355, -0.011857179, -0.0011980024, -0.034641717,
+ -0.026125094, -0.17582615, -0.15923657, -0.27486774, -0.0006143371,
+ 0.0001771948, -8.470171e-05, 0.02651807, 0.045790765, 0.06956496],
+
+ input_to_cell_weights: [
+ -0.04580283, -0.09549462, -0.032418985, -0.06454633,
+ -0.043528453, 0.043018587, -0.049152344, -0.12418144,
+ -0.078985475, -0.07596889, 0.019484362, -0.11434962,
+ -0.0074034138, -0.06314844, -0.092981495, 0.0062155537,
+ -0.025034338, -0.0028890965, 0.048929527, 0.06235075,
+ 0.10665918, -0.032036792, -0.08505916, -0.10843358,
+ -0.13002433, -0.036816437, -0.02130134, -0.016518239,
+ 0.0047691227, -0.0025825808, 0.066017866, 0.029991534,
+ -0.10652836, -0.1037554, -0.13056071, -0.03266643,
+ -0.033702414, -0.006473424, -0.04611692, 0.014419339,
+ -0.025174323, 0.0396852, 0.081777506, 0.06157468,
+ 0.10210095, -0.009658194, 0.046511717, 0.03603906,
+ 0.0069369148, 0.015960095, -0.06507666, 0.09551598,
+ 0.053568836, 0.06408714, 0.12835667, -0.008714329,
+ -0.20211966, -0.12093674, 0.029450472, 0.2849013,
+ -0.029227901, 0.1164364, -0.08560263, 0.09941786,
+ -0.036999565, -0.028842626, -0.0033637602, -0.017012902,
+ -0.09720865, -0.11193351, -0.029155117, -0.017936034,
+ -0.009768936, -0.04223324, -0.036159635, 0.06505112,
+ -0.021742892, -0.023377212, -0.07221364, -0.06430552,
+ 0.05453865, 0.091149814, 0.06387331, 0.007518393,
+ 0.055960953, 0.069779344, 0.046411168, 0.10509911,
+ 0.07463894, 0.0075130584, 0.012850982, 0.04555431,
+ 0.056955688, 0.06555285, 0.050801456, -0.009862683,
+ 0.00826772, -0.026555609, -0.0073611983, -0.0014897042],
+
+ input_to_output_weights: [
+ -0.0998932, -0.07201956, -0.052803773, -0.15629593, -0.15001918,
+ -0.07650751, 0.02359855, -0.075155355, -0.08037709, -0.15093534,
+ 0.029517552, -0.04751393, 0.010350531, -0.02664851, -0.016839722,
+ -0.023121163, 0.0077019283, 0.012851257, -0.05040649, -0.0129761,
+ -0.021737747, -0.038305793, -0.06870586, -0.01481247, -0.001285394,
+ 0.10124236, 0.083122835, 0.053313006, -0.062235646, -0.075637154,
+ -0.027833903, 0.029774971, 0.1130802, 0.09218906, 0.09506135,
+ -0.086665764, -0.037162706, -0.038880914, -0.035832845, -0.014481564,
+ -0.09825003, -0.12048569, -0.097665586, -0.05287633, -0.0964047,
+ -0.11366429, 0.035777505, 0.13568819, 0.052451383, 0.050649304,
+ 0.05798951, -0.021852335, -0.099848844, 0.014740475, -0.078897946,
+ 0.04974699, 0.014160473, 0.06973932, 0.04964942, 0.033364646,
+ 0.08190124, 0.025535367, 0.050893165, 0.048514254, 0.06945813,
+ -0.078907564, -0.06707616, -0.11844508, -0.09986688, -0.07509403,
+ 0.06263226, 0.14925587, 0.20188436, 0.12098451, 0.14639415,
+ 0.0015017595, -0.014267382, -0.03417257, 0.012711468, 0.0028300495,
+ -0.024758482, -0.05098548, -0.0821182, 0.014225672, 0.021544158,
+ 0.08949725, 0.07505268, -0.0020780868, 0.04908258, 0.06476295,
+ -0.022907063, 0.027562456, 0.040185735, 0.019567577, -0.015598739,
+ -0.049097303, -0.017121866, -0.083368234, -0.02332002, -0.0840956],
+
+ input_gate_bias: [
+ 0.02234832, 0.14757581, 0.18176508, 0.10380666, 0.053110216,
+ -0.06928846, -0.13942584, -0.11816189, 0.19483899, 0.03652339,
+ -0.10250295, 0.036714908, -0.18426876, 0.036065217, 0.21810818,
+ 0.02383196, -0.043370757, 0.08690144, -0.04444982, 0.00030581196],
+
+ forget_gate_bias: [
+ 0.035185695, -0.042891346, -0.03032477, 0.23027696,
+ 0.11098921, 0.15378423, 0.09263801, 0.09790885,
+ 0.09508917, 0.061199076, 0.07665568, -0.015443159,
+ -0.03499149, 0.046190713, 0.08895977, 0.10899629,
+ 0.40694186, 0.06030037, 0.012413437, -0.06108739],
+
+ cell_gate_bias: [
+ -0.024379363, 0.0055531194, 0.23377132, 0.033463873,
+ -0.1483596, -0.10639995, -0.091433935, 0.058573797,
+ -0.06809782, -0.07889636, -0.043246906, -0.09829136,
+ -0.4279842, 0.034901652, 0.18797937, 0.0075234566,
+ 0.016178843, 0.1749513, 0.13975595, 0.92058027],
+
+ output_gate_bias: [
+ 0.046159424, -0.0012809046, 0.03563469, 0.12648113, 0.027195795,
+ 0.35373217, -0.018957434, 0.008907322, -0.0762701, 0.12018895,
+ 0.04216877, 0.0022856654, 0.040952638, 0.3147856, 0.08225149,
+ -0.057416286, -0.14995944, -0.008040261, 0.13208859, 0.029760877],
+
+ recurrent_to_input_weights: [
+ -0.001374326, -0.078856036, 0.10672688, 0.029162422,
+ -0.11585556, 0.02557986, -0.13446963, -0.035785314,
+ -0.01244275, 0.025961924, -0.02337298, -0.044228926,
+ -0.055839065, -0.046598054, -0.010546039, -0.06900766,
+ 0.027239809, 0.022582639, -0.013296484, -0.05459212,
+ 0.08981, -0.045407712, 0.08682226, -0.06867011,
+ -0.14390695, -0.02916037, 0.000996957, 0.091420636,
+ 0.14283475, -0.07390571, -0.06402044, 0.062524505,
+ -0.093129106, 0.04860203, -0.08364217, -0.08119002,
+ 0.009352075, 0.22920375, 0.0016303885, 0.11583097,
+ -0.13732095, 0.012405723, -0.07551853, 0.06343048,
+ 0.12162708, -0.031923793, -0.014335606, 0.01790974,
+ -0.10650317, -0.0724401, 0.08554849, -0.05727212,
+ 0.06556731, -0.042729504, -0.043227166, 0.011683251,
+ -0.013082158, -0.029302018, -0.010899579, -0.062036745,
+ -0.022509435, -0.00964907, -0.01567329, 0.04260106,
+ -0.07787477, -0.11576462, 0.017356863, 0.048673786,
+ -0.017577527, -0.05527947, -0.082487635, -0.040137455,
+ -0.10820036, -0.04666372, 0.022746278, -0.07851417,
+ 0.01068115, 0.032956902, 0.022433773, 0.0026891115,
+ 0.08944216, -0.0685835, 0.010513544, 0.07228705,
+ 0.02032331, -0.059686817, -0.0005566496, -0.086984694,
+ 0.040414046, -0.1380399, 0.094208956, -0.05722982,
+ 0.012092817, -0.04989123, -0.086576, -0.003399834,
+ -0.04696032, -0.045747425, 0.10091314, 0.048676282,
+ -0.029037097, 0.031399418, -0.0040285117, 0.047237843,
+ 0.09504992, 0.041799378, -0.049185462, -0.031518843,
+ -0.10516937, 0.026374253, 0.10058866, -0.0033195973,
+ -0.041975245, 0.0073591834, 0.0033782164, -0.004325073,
+ -0.10167381, 0.042500053, -0.01447153, 0.06464186,
+ -0.017142897, 0.03312627, 0.009205989, 0.024138335,
+ -0.011337001, 0.035530265, -0.010912711, 0.0706555,
+ -0.005894094, 0.051841937, -0.1401738, -0.02351249,
+ 0.0365468, 0.07590991, 0.08838724, 0.021681072,
+ -0.10086113, 0.019608743, -0.06195883, 0.077335775,
+ 0.023646897, -0.095322326, 0.02233014, 0.09756986,
+ -0.048691444, -0.009579111, 0.07595467, 0.11480546,
+ -0.09801813, 0.019894179, 0.08502348, 0.004032281,
+ 0.037211012, 0.068537936, -0.048005626, -0.091520436,
+ -0.028379958, -0.01556313, 0.06554592, -0.045599163,
+ -0.01672207, -0.020169014, -0.011877351, -0.20212261,
+ 0.010889619, 0.0047078193, 0.038385306, 0.08540671,
+ -0.017140968, -0.0035865551, 0.016678626, 0.005633034,
+ 0.015963363, 0.00871737, 0.060130805, 0.028611384,
+ 0.10109069, -0.015060172, -0.07894427, 0.06401885,
+ 0.011584063, -0.024466386, 0.0047652307, -0.09041358,
+ 0.030737216, -0.0046374933, 0.14215417, -0.11823516,
+ 0.019899689, 0.006106124, -0.027092824, 0.0786356,
+ 0.05052217, -0.058925, -0.011402121, -0.024987547,
+ -0.0013661642, -0.06832946, -0.015667673, -0.1083353,
+ -0.00096863037, -0.06988685, -0.053350925, -0.027275559,
+ -0.033664223, -0.07978348, -0.025200296, -0.017207067,
+ -0.058403496, -0.055697463, 0.005798788, 0.12965427,
+ -0.062582195, 0.0013350133, -0.10482091, 0.0379771,
+ 0.072521195, -0.0029455067, -0.13797039, -0.03628521,
+ 0.013806405, -0.017858358, -0.01008298, -0.07700066,
+ -0.017081132, 0.019358726, 0.0027079724, 0.004635139,
+ 0.062634714, -0.02338735, -0.039547626, -0.02050681,
+ 0.03385117, -0.083611414, 0.002862572, -0.09421313,
+ 0.058618143, -0.08598433, 0.00972939, 0.023867095,
+ -0.053934585, -0.023203006, 0.07452513, -0.048767887,
+ -0.07314807, -0.056307215, -0.10433547, -0.06440842,
+ 0.04328182, 0.04389765, -0.020006588, -0.09076438,
+ -0.11652589, -0.021705797, 0.03345259, -0.010329105,
+ -0.025767034, 0.013057034, -0.07316461, -0.10145612,
+ 0.06358255, 0.18531723, 0.07759293, 0.12006465,
+ 0.1305557, 0.058638252, -0.03393652, 0.09622831,
+ -0.16253184, -2.4580743e-06, 0.079869635, -0.070196845,
+ -0.005644518, 0.06857898, -0.12598175, -0.035084512,
+ 0.03156317, -0.12794146, -0.031963028, 0.04692781,
+ 0.030070418, 0.0071660685, -0.095516115, -0.004643372,
+ 0.040170413, -0.062104587, -0.0037324072, 0.0554317,
+ 0.08184801, -0.019164372, 0.06791302, 0.034257166,
+ -0.10307039, 0.021943003, 0.046745934, 0.0790918,
+ -0.0265588, -0.007824208, 0.042546265, -0.00977924,
+ -0.0002440307, -0.017384544, -0.017990116, 0.12252321,
+ -0.014512694, -0.08251313, 0.08861942, 0.13589665,
+ 0.026351685, 0.012641483, 0.07466548, 0.044301085,
+ -0.045414884, -0.051112458, 0.03444247, -0.08502782,
+ -0.04106223, -0.028126027, 0.028473156, 0.10467447],
+
+ recurrent_to_forget_weights: [
+ -0.057784554, -0.026057621, -0.068447545, -0.022581743,
+ 0.14811787, 0.10826372, 0.09471067, 0.03987225,
+ -0.0039523416, 0.00030638507, 0.053185795, 0.10572994,
+ 0.08414449, -0.022036452, -0.00066928595, -0.09203576,
+ 0.032950465, -0.10985798, -0.023809856, 0.0021431844,
+ -0.02196096, -0.00326074, 0.00058621005, -0.074678116,
+ -0.06193199, 0.055729095, 0.03736828, 0.020123724,
+ 0.061878487, -0.04729229, 0.034919553, -0.07585433,
+ -0.04421272, -0.044019096, 0.085488975, 0.04058006,
+ -0.06890133, -0.030951202, -0.024628663, -0.07672815,
+ 0.034293607, 0.08556707, -0.05293577, -0.033561368,
+ -0.04899627, 0.0241671, 0.015736353, -0.095442444,
+ -0.029564252, 0.016493602, -0.035026584, 0.022337519,
+ -0.026871363, 0.004780428, 0.0077918363, -0.03601621,
+ 0.016435321, -0.03263031, -0.09543275, -0.047392778,
+ 0.013454138, 0.028934088, 0.01685226, -0.086110644,
+ -0.046250615, -0.01847454, 0.047608484, 0.07339695,
+ 0.034546845, -0.04881143, 0.009128804, -0.08802852,
+ 0.03761666, 0.008096139, -0.014454086, 0.014361001,
+ -0.023502491, -0.0011840804, -0.07607001, 0.001856849,
+ -0.06509276, -0.006021153, -0.08570962, -0.1451793,
+ 0.060212336, 0.055259194, 0.06974018, 0.049454916,
+ -0.027794661, -0.08077226, -0.016179763, 0.1169753,
+ 0.17213494, -0.0056326236, -0.053934924, -0.0124349,
+ -0.11520337, 0.05409887, 0.088759385, 0.0019655675,
+ 0.0042065294, 0.03881498, 0.019844765, 0.041858196,
+ -0.05695512, 0.047233116, 0.038937137, -0.06542224,
+ 0.014429736, -0.09719407, 0.13908425, -0.05379757,
+ 0.012321099, 0.082840554, -0.029899208, 0.044217527,
+ 0.059855383, 0.07711018, -0.045319796, 0.0948846,
+ -0.011724666, -0.0033288454, -0.033542685, -0.04764985,
+ -0.13873616, 0.040668588, 0.034832682, -0.015319203,
+ -0.018715994, 0.046002675, 0.0599172, -0.043107376,
+ 0.0294216, -0.002314414, -0.022424703, 0.0030315618,
+ 0.0014641669, 0.0029166266, -0.11878115, 0.013738511,
+ 0.12375372, -0.0006038222, 0.029104086, 0.087442465,
+ 0.052958444, 0.07558703, 0.04817258, 0.044462286,
+ -0.015213451, -0.08783778, -0.0561384, -0.003008196,
+ 0.047060397, -0.002058388, 0.03429439, -0.018839769,
+ 0.024734668, 0.024614193, -0.042046934, 0.09597743,
+ -0.0043254104, 0.04320769, 0.0064070094, -0.0019131786,
+ -0.02558259, -0.022822596, -0.023273505, -0.02464396,
+ -0.10991725, -0.006240552, 0.0074488563, 0.024044557,
+ 0.04383914, -0.046476185, 0.028658995, 0.060410924,
+ 0.050786525, 0.009452605, -0.0073054377, -0.024810238,
+ 0.0052906186, 0.0066939713, -0.0020913032, 0.014515517,
+ 0.015898481, 0.021362653, -0.030262267, 0.016587038,
+ -0.011442813, 0.041154444, -0.007631438, -0.03423484,
+ -0.010977775, 0.036152758, 0.0066366293, 0.11915515,
+ 0.02318443, -0.041350313, 0.021485701, -0.10906167,
+ -0.028218046, -0.00954771, 0.020531068, -0.11995105,
+ -0.03672871, 0.024019798, 0.014255957, -0.05221243,
+ -0.00661567, -0.04630967, 0.033188973, 0.10107534,
+ -0.014027541, 0.030796422, -0.10270911, -0.035999842,
+ 0.15443139, 0.07684145, 0.036571592, -0.035900835,
+ -0.0034699554, 0.06209149, 0.015920248, -0.031122351,
+ -0.03858649, 0.01849943, 0.13872518, 0.01503974,
+ 0.069941424, -0.06948533, -0.0088794185, 0.061282158,
+ -0.047401894, 0.03100163, -0.041533746, -0.10430945,
+ 0.044574402, -0.01425562, -0.024290353, 0.034563623,
+ 0.05866852, 0.023947537, -0.09445152, 0.035450947,
+ 0.02247216, -0.0042998926, 0.061146557, -0.10250651,
+ 0.020881841, -0.06747029, 0.10062043, -0.0023941975,
+ 0.03532124, -0.016341697, 0.09685456, -0.016764693,
+ 0.051808182, 0.05875331, -0.04536488, 0.001626336,
+ -0.028892258, -0.01048663, -0.009793449, -0.017093895,
+ 0.010987891, 0.02357273, -0.00010856845, 0.0099760275,
+ -0.001845119, -0.03551521, 0.0018358806, 0.05763657,
+ -0.01769146, 0.040995963, 0.02235177, -0.060430344,
+ 0.11475477, -0.023854522, 0.10071741, 0.0686208,
+ -0.014250481, 0.034261297, 0.047418304, 0.08562733,
+ -0.030519066, 0.0060542435, 0.014653856, -0.038836084,
+ 0.04096551, 0.032249358, -0.08355519, -0.026823482,
+ 0.056386515, -0.010401743, -0.028396193, 0.08507674,
+ 0.014410365, 0.020995233, 0.17040324, 0.11511526,
+ 0.02459721, 0.0066619175, 0.025853224, -0.023133837,
+ -0.081302024, 0.017264642, -0.009585969, 0.09491168,
+ -0.051313367, 0.054532815, -0.014298593, 0.10657464,
+ 0.007076659, 0.10964551, 0.0409152, 0.008275321,
+ -0.07283536, 0.07937492, 0.04192024, -0.1075027],
+
+ recurrent_to_cell_weights: [
+ -0.037322544, 0.018592842, 0.0056175636, -0.06253426,
+ 0.055647098, -0.05713207, -0.05626563, 0.005559383,
+ 0.03375411, -0.025757805, -0.088049285, 0.06017052,
+ -0.06570978, 0.007384076, 0.035123326, -0.07920549,
+ 0.053676967, 0.044480428, -0.07663568, 0.0071805613,
+ 0.08089997, 0.05143358, 0.038261272, 0.03339287,
+ -0.027673481, 0.044746667, 0.028349208, 0.020090483,
+ -0.019443132, -0.030755889, -0.0040000007, 0.04465846,
+ -0.021585021, 0.0031670958, 0.0053199246, -0.056117613,
+ -0.10893326, 0.076739706, -0.08509834, -0.027997585,
+ 0.037871376, 0.01449768, -0.09002357, -0.06111149,
+ -0.046195522, 0.0422062, -0.005683705, -0.1253618,
+ -0.012925729, -0.04890792, 0.06985068, 0.037654128,
+ 0.03398274, -0.004781977, 0.007032333, -0.031787455,
+ 0.010868644, -0.031489216, 0.09525667, 0.013939797,
+ 0.0058680447, 0.0167067, 0.02668468, -0.04797466,
+ -0.048885044, -0.12722108, 0.035304096, 0.06554885,
+ 0.00972396, -0.039238118, -0.05159735, -0.11329045,
+ 0.1613692, -0.03750952, 0.06529313, -0.071974665,
+ -0.11769596, 0.015524369, -0.0013754242, -0.12446318,
+ 0.02786344, -0.014179351, 0.005264273, 0.14376344,
+ 0.015983658, 0.03406988, -0.06939408, 0.040699873,
+ 0.02111075, 0.09669095, 0.041345075, -0.08316494,
+ -0.07684199, -0.045768797, 0.032298047, -0.041805092,
+ 0.0119405, 0.0061010392, 0.12652606, 0.0064572375,
+ -0.024950314, 0.11574242, 0.04508852, -0.04335324,
+ 0.06760663, -0.027437469, 0.07216407, 0.06977076,
+ -0.05438599, 0.034033038, -0.028602652, 0.05346137,
+ 0.043184172, -0.037189785, 0.10420091, 0.00882477,
+ -0.054019816, -0.074273005, -0.030617684, -0.0028467078,
+ 0.024302477, -0.0038869337, 0.005332455, 0.0013399826,
+ 0.04361412, -0.007001822, 0.09631092, -0.06702025,
+ -0.042049985, -0.035070654, -0.04103342, -0.10273396,
+ 0.0544271, 0.037184782, -0.13150354, -0.0058036847,
+ -0.008264958, 0.042035464, 0.05891794, 0.029673764,
+ 0.0063542654, 0.044788733, 0.054816857, 0.062257513,
+ -0.00093483756, 0.048938446, -0.004952862, -0.007730018,
+ -0.04043371, -0.017094059, 0.07229206, -0.023670016,
+ -0.052195564, -0.025616996, -0.01520939, 0.045104615,
+ -0.007376126, 0.003533447, 0.006570588, 0.056037236,
+ 0.12436656, 0.051817212, 0.028532185, -0.08686856,
+ 0.11868599, 0.07663395, -0.07323171, 0.03463402,
+ -0.050708205, -0.04458982, -0.11590894, 0.021273347,
+ 0.1251325, -0.15313013, -0.12224372, 0.17228661,
+ 0.023029093, 0.086124025, 0.006445803, -0.03496501,
+ 0.028332196, 0.04449512, -0.042436164, -0.026587414,
+ -0.006041347, -0.09292539, -0.05678812, 0.03897832,
+ 0.09465633, 0.008115513, -0.02171956, 0.08304309,
+ 0.071401566, 0.019622514, 0.032163795, -0.004167056,
+ 0.02295182, 0.030739572, 0.056506045, 0.004612461,
+ 0.06524936, 0.059999723, 0.046395954, -0.0045512207,
+ -0.1335546, -0.030136576, 0.11584653, -0.014678886,
+ 0.0020118146, -0.09688814, -0.0790206, 0.039770417,
+ -0.0329582, 0.07922767, 0.029322514, 0.026405897,
+ 0.04207835, -0.07073373, 0.063781224, 0.0859677,
+ -0.10925287, -0.07011058, 0.048005477, 0.03438226,
+ -0.09606514, -0.006669445, -0.043381985, 0.04240257,
+ -0.06955775, -0.06769346, 0.043903265, -0.026784198,
+ -0.017840602, 0.024307009, -0.040079936, -0.019946516,
+ 0.045318738, -0.12233574, 0.026170589, 0.0074471775,
+ 0.15978073, 0.10185836, 0.10298046, -0.015476589,
+ -0.039390966, -0.072174534, 0.0739445, -0.1211869,
+ -0.0347889, -0.07943156, 0.014809798, -0.12412325,
+ -0.0030663363, 0.039695457, 0.0647603, -0.08291318,
+ -0.018529687, -0.004423833, 0.0037507233, 0.084633216,
+ -0.01514876, -0.056505352, -0.012800942, -0.06994386,
+ 0.012962922, -0.031234352, 0.07029052, 0.016418684,
+ 0.03618972, 0.055686004, -0.08663945, -0.017404709,
+ -0.054761406, 0.029065743, 0.052404847, 0.020238016,
+ 0.0048197987, -0.0214882, 0.07078733, 0.013016777,
+ 0.06262858, 0.009184685, 0.020785125, -0.043904778,
+ -0.0270329, -0.03299152, -0.060088247, -0.015162964,
+ -0.001828936, 0.12642565, -0.056757294, 0.013586685,
+ 0.09232601, -0.035886683, 0.06000002, 0.05229691,
+ -0.052580316, -0.082029596, -0.010794592, 0.012947712,
+ -0.036429964, -0.085508935, -0.13127148, -0.017744139,
+ 0.031502828, 0.036232427, -0.031581745, 0.023051167,
+ -0.05325106, -0.03421577, 0.028793324, -0.034633752,
+ -0.009881397, -0.043551125, -0.018609839, 0.0019097115,
+ -0.008799762, 0.056595087, 0.0022273948, 0.055752404],
+
+ recurrent_to_output_weights: [
+ 0.025825322, -0.05813119, 0.09495884, -0.045984812, -0.01255415,
+ -0.0026479573, -0.08196161, -0.054914974, -0.0046604523, -0.029587349,
+ -0.044576716, -0.07480124, -0.082868785, 0.023254942, 0.027502948,
+ -0.0039728214, -0.08683098, -0.08116779, -0.014675607, -0.037924774,
+ -0.023314456, -0.007401714, -0.09255757, 0.029460307, -0.08829125,
+ -0.005139627, -0.08989442, -0.0555066, 0.13596267, -0.025062224,
+ -0.048351806, -0.03850004, 0.07266485, -0.022414139, 0.05940088,
+ 0.075114764, 0.09597592, -0.010211725, -0.0049794707, -0.011523867,
+ -0.025980417, 0.072999895, 0.11091378, -0.081685916, 0.014416728,
+ 0.043229222, 0.034178585, -0.07530371, 0.035837382, -0.085607,
+ -0.007721233, -0.03287832, -0.043848954, -0.06404588, -0.06632928,
+ -0.073643476, 0.008214239, -0.045984086, 0.039764922, 0.03474462,
+ 0.060612556, -0.080590084, 0.049127717, 0.04151091, -0.030063879,
+ 0.008801774, -0.023021035, -0.019558564, 0.05158114, -0.010947698,
+ -0.011825728, 0.0075720972, 0.0699727, -0.0039981045, 0.069350146,
+ 0.08799282, 0.016156472, 0.035502106, 0.11695009, 0.006217345,
+ 0.13392477, -0.037875112, 0.025745004, 0.08940699, -0.00924166,
+ 0.0046702605, -0.036598757, -0.08811812, 0.10522024, -0.032441203,
+ 0.008176899, -0.04454919, 0.07058152, 0.0067963637, 0.039206743,
+ 0.03259838, 0.03725492, -0.09515802, 0.013326398, -0.052055415,
+ -0.025676316, 0.03198509, -0.015951829, -0.058556724, 0.036879618,
+ 0.043357447, 0.028362012, -0.05908629, 0.0059240665, -0.04995891,
+ -0.019187413, 0.0276265, -0.01628143, 0.0025863599, 0.08800015,
+ 0.035250366, -0.022165963, -0.07328642, -0.009415526, -0.07455109,
+ 0.11690406, 0.0363299, 0.07411125, 0.042103454, -0.009660886,
+ 0.019076364, 0.018299393, -0.046004917, 0.08891175, 0.0431396,
+ -0.026327137, -0.051502608, 0.08979574, -0.051670972, 0.04940282,
+ -0.07491107, -0.021240504, 0.022596184, -0.034280192, 0.060163025,
+ -0.058211457, -0.051837247, -0.01349775, -0.04639988, -0.035936575,
+ -0.011681591, 0.064818054, 0.0073146066, -0.021745546, -0.043124277,
+ -0.06471268, -0.07053354, -0.029321948, -0.05330136, 0.016933719,
+ -0.053782392, 0.13747959, -0.1361751, -0.11569455, 0.0033329215,
+ 0.05693899, -0.053219706, 0.063698, 0.07977434, -0.07924483,
+ 0.06936997, 0.0034815092, -0.007305279, -0.037325785, -0.07251102,
+ -0.033633437, -0.08677009, 0.091591336, -0.14165086, 0.021752775,
+ 0.019683983, 0.0011612234, -0.058154266, 0.049996935, 0.0288841,
+ -0.0024567875, -0.14345716, 0.010955264, -0.10234828, 0.1183656,
+ -0.0010731248, -0.023590032, -0.072285876, -0.0724771, -0.026382286,
+ -0.0014920527, 0.042667855, 0.0018776858, 0.02986552, 0.009814309,
+ 0.0733756, 0.12289186, 0.018043943, -0.0458958, 0.049412545,
+ 0.033632483, 0.05495232, 0.036686596, -0.013781798, -0.010036754,
+ 0.02576849, -0.08307328, 0.010112348, 0.042521734, -0.05869831,
+ -0.071689695, 0.03876447, -0.13275425, -0.0352966, -0.023077697,
+ 0.10285965, 0.084736146, 0.15568255, -0.00040734606, 0.027835453,
+ -0.10292561, -0.032401145, 0.10053256, -0.026142767, -0.08271222,
+ -0.0030240538, -0.016368777, 0.1070414, 0.042672627, 0.013456989,
+ -0.0437609, -0.022309763, 0.11576483, 0.04108048, 0.061026827,
+ -0.0190714, -0.0869359, 0.037901703, 0.0610107, 0.07202949,
+ 0.01675338, 0.086139716, -0.08795751, -0.014898893, -0.023771819,
+ -0.01965048, 0.007955471, -0.043740474, 0.03346837, -0.10549954,
+ 0.090567775, 0.042013682, -0.03176985, 0.12569028, -0.02421228,
+ -0.029526481, 0.023851605, 0.031539805, 0.05292009, -0.02344001,
+ -0.07811758, -0.08834428, 0.10094801, 0.16594367, -0.06861939,
+ -0.021256343, -0.041093912, -0.06669611, 0.035498552, 0.021757556,
+ -0.09302526, -0.015403468, -0.06614931, -0.051798206, -0.013874718,
+ 0.03630673, 0.010412845, -0.08077351, 0.046185967, 0.0035662893,
+ 0.03541868, -0.094149634, -0.034814864, 0.003128424, -0.020674974,
+ -0.03944324, -0.008110165, -0.11113267, 0.08484226, 0.043586485,
+ 0.040582247, 0.0968012, -0.065249965, -0.028036479, 0.0050708856,
+ 0.0017462453, 0.0326779, 0.041296225, 0.09164146, -0.047743853,
+ -0.015952192, -0.034451712, 0.084197424, -0.05347844, -0.11768019,
+ 0.085926116, -0.08251791, -0.045081906, 0.0948852, 0.068401024,
+ 0.024856757, 0.06978981, -0.057309967, -0.012775832, -0.0032452994,
+ 0.01977615, -0.041040014, -0.024264973, 0.063464895, 0.05431621],
+
+ cell_to_input_weights: [
+ 0.040369894, 0.030746894, 0.24704495, 0.018586371, -0.037586458,
+ -0.15312155, -0.11812848, -0.11465643, 0.20259799, 0.11418174,
+ -0.10116027, -0.011334949, 0.12411352, -0.076769054, -0.052169047,
+ 0.21198851, -0.38871562, -0.09061183, -0.09683246, -0.21929175],
+
+ cell_to_forget_weights: [
+ -0.01998659, -0.15568835, -0.24248174, -0.012770197, 0.041331276,
+ -0.072311886, -0.052123554, -0.0066330447, -0.043891653, 0.036225766,
+ -0.047248036, 0.021479502, 0.033189066, 0.11952997, -0.020432774,
+ 0.64658105, -0.06650122, -0.03467612, 0.095340036, 0.23647355],
+
+ cell_to_output_weights: [
+ 0.08286371, -0.08261836, -0.51210177, 0.002913762, 0.17764764,
+ -0.5495371, -0.08460716, -0.24552552, 0.030037103, 0.04123544,
+ -0.11940523, 0.007358328, 0.1890978, 0.4833202, -0.34441817,
+ 0.36312827, -0.26375428, 0.1457655, -0.19724406, 0.15548733],
+
+ projection_weights: [
+ -0.009802181, 0.09401916, 0.0717386, -0.13895074, 0.09641832,
+ 0.060420845, 0.08539281, 0.054285463, 0.061395317, 0.034448683,
+ -0.042991187, 0.019801661, -0.16840284, -0.015726732, -0.23041931,
+ -0.024478018, -0.10959692, -0.013875541, 0.18600968, -0.061274476,
+ 0.0138165, -0.08160894, -0.07661644, 0.032372914, 0.16169067,
+ 0.22465782, -0.03993472, -0.004017731, 0.08633481, -0.28869787,
+ 0.08682067, 0.17240396, 0.014975425, 0.056431185, 0.031037588,
+ 0.16702051, 0.0077946745, 0.15140012, 0.29405436, 0.120285,
+ -0.188994, -0.027265169, 0.043389652, -0.022061434, 0.014777949,
+ -0.20203483, 0.094781205, 0.19100232, 0.13987629, -0.036132768,
+ -0.06426278, -0.05108664, 0.13221376, 0.009441198, -0.16715929,
+ 0.15859416, -0.040437475, 0.050779544, -0.022187516, 0.012166504,
+ 0.027685808, -0.07675938, -0.0055694645, -0.09444123, 0.0046453946,
+ 0.050794356, 0.10770313, -0.20790008, -0.07149004, -0.11425117,
+ 0.008225835, -0.035802525, 0.14374903, 0.15262283, 0.048710253,
+ 0.1847461, -0.007487823, 0.11000021, -0.09542012, 0.22619456,
+ -0.029149994, 0.08527916, 0.009043713, 0.0042746216, 0.016261552,
+ 0.022461696, 0.12689082, -0.043589946, -0.12035478, -0.08361797,
+ -0.050666027, -0.1248618, -0.1275799, -0.071875185, 0.07377272,
+ 0.09944291, -0.18897448, -0.1593054, -0.06526116, -0.040107165,
+ -0.004618631, -0.067624845, -0.007576253, 0.10727444, 0.041546922,
+ -0.20424393, 0.06907816, 0.050412357, 0.00724631, 0.039827548,
+ 0.12449835, 0.10747581, 0.13708383, 0.09134148, -0.12617786,
+ -0.06428341, 0.09956831, 0.1208086, -0.14676677, -0.0727722,
+ 0.1126304, 0.010139365, 0.015571211, -0.038128063, 0.022913318,
+ -0.042050496, 0.16842307, -0.060597885, 0.10531834, -0.06411776,
+ -0.07451711, -0.03410368, -0.13393489, 0.06534304, 0.003620307,
+ 0.04490757, 0.05970546, 0.05197996, 0.02839995, 0.10434969,
+ -0.013699693, -0.028353551, -0.07260381, 0.047201227, -0.024575593,
+ -0.036445823, 0.07155557, 0.009672501, -0.02328883, 0.009533515,
+ -0.03606021, -0.07421458, -0.028082801, -0.2678904, -0.13221288,
+ 0.18419984, -0.13012612, -0.014588381, -0.035059117, -0.04824723,
+ 0.07830115, -0.056184657, 0.03277091, 0.025466874, 0.14494097,
+ -0.12522776, -0.098633975, -0.10766018, -0.08317623, 0.08594209,
+ 0.07749552, 0.039474737, 0.1776665, -0.07409566, -0.0477268,
+ 0.29323658, 0.10801441, 0.1154011, 0.013952499, 0.10739139,
+ 0.10708251, -0.051456142, 0.0074137426, -0.10430189, 0.10034707,
+ 0.045594677, 0.0635285, -0.0715442, -0.089667566, -0.10811871,
+ 0.00026344223, 0.08298446, -0.009525053, 0.006585689, -0.24567553,
+ -0.09450807, 0.09648481, 0.026996298, -0.06419476, -0.04752702,
+ -0.11063944, -0.23441927, -0.17608605, -0.052156363, 0.067035615,
+ 0.19271925, -0.0032889997, -0.043264326, 0.09663576, -0.057112187,
+ -0.10100678, 0.0628376, 0.04447668, 0.017961001, -0.10094388,
+ -0.10190601, 0.18335468, 0.10494553, -0.052095775, -0.0026118709,
+ 0.10539724, -0.04383912, -0.042349473, 0.08438151, -0.1947263,
+ 0.02251204, 0.11216432, -0.10307853, 0.17351969, -0.039091777,
+ 0.08066188, -0.00561982, 0.12633002, 0.11335965, -0.0088127935,
+ -0.019777594, 0.06864014, -0.059751723, 0.016233567, -0.06894641,
+ -0.28651384, -0.004228674, 0.019708522, -0.16305895, -0.07468996,
+ -0.0855457, 0.099339016, -0.07580735, -0.13775392, 0.08434318,
+ 0.08330512, -0.12131499, 0.031935584, 0.09180414, -0.08876437,
+ -0.08049874, 0.008753825, 0.03498998, 0.030215185, 0.03907079,
+ 0.089751154, 0.029194152, -0.03337423, -0.019092513, 0.04331237,
+ 0.04299654, -0.036394123, -0.12915532, 0.09793732, 0.07512415,
+ -0.11319543, -0.032502122, 0.15661901, 0.07671967, -0.005491124,
+ -0.19379048, -0.218606, 0.21448623, 0.017840758, 0.1416943,
+ -0.07051762, 0.19488361, 0.02664691, -0.18104725, -0.09334311,
+ 0.15026465, -0.15493552, -0.057762887, -0.11604192, -0.262013,
+ -0.01391798, 0.012185008, 0.11156489, -0.07483202, 0.06693364,
+ -0.26151478, 0.046425626, 0.036540434, -0.16435726, 0.17338543,
+ -0.21401681, -0.11385144, -0.08283257, -0.069031075, 0.030635102,
+ 0.010969227, 0.11109743, 0.010919218, 0.027526086, 0.13519906,
+ 0.01891392, -0.046839405, -0.040167913, 0.017953383, -0.09700955,
+ 0.0061885654, -0.07000971, 0.026893595, -0.038844477, 0.14543656],
+
+ projection_bias: [],
+}
+
+# Batch0: 4 (input_sequence_size) * 5 (n_input)
+input0[input] = [0.596268, 0.998386, 0.568695, 0.864524, 0.571277]
+# Batch1: 4 (input_sequence_size) * 5 (n_input)
+input0[input].extend(
+ [0.642421, 0.524260, 0.134799, 0.003639, 0.162482]
+)
+input0[output_state_in] = [
+ -0.00396806, 0.029352, -0.00279226, 0.0159977,
+ -0.00835577, -0.0211779, 0.0283512, -0.0114597,
+ 0.00907307, -0.0244004, -0.0152191, -0.0259063,
+ 0.00914318, 0.00415119, 0.017147, 0.0134203,
+ -0.013869, 0.0287268, -0.00334694, 0.00733397,
+ -0.0287926, -0.0186926, 0.0193662, -0.0115437,
+ 0.00422612, -0.0345232, 0.00223253, -0.00957321,
+ 0.0210624, 0.013331, 0.0150954, 0.0216801,
+]
+input0[cell_state_in] = [
+ -0.0531632, -0.0118138, 0.0870833, 0.0347929,
+ -0.076144, -0.0659219, -0.0463811, 0.0141307,
+ -0.0127706, -0.03782, -0.00402401, -0.00571876,
+ -0.187957, -0.0247127, 0.0711425, 0.008244,
+ 0.0492649, 0.126972, 0.0933097, 0.29848,
+ -0.0966178, -0.114417, 0.0387229, 0.0453255,
+ -0.181286, -0.0651251, -0.0996879, -0.00276995,
+ 0.0617558, -0.0100728, 0.056304, -0.077416,
+ -0.162858, -0.0541251, 0.0571202, -0.0525331,
+ 0.0724297, 0.171029, 0.141738, 0.295483,
+]
+output0 = {
+ scratch_buffer: [ 0 for x in range(n_batch * n_cell * 4) ],
+ cell_state_out: [
+ -0.154022, -0.124934, 0.0478463, 0.0607819,
+ -0.218727, -0.111053, -0.103885, -0.00447221,
+ 0.0554757, -0.0207068, 0.0595767, -0.116297,
+ -0.249466, -0.0723206, 0.0794942, -0.0377107,
+ 0.124532, 0.249952, 0.188641, 0.411865,
+ -0.11012, -0.0694494, 0.103501, 0.0428427,
+ -0.167345, -0.106061, -0.0775679, 0.00936161,
+ 0.0105526, -0.0314523, 0.0243475, -0.132179,
+ -0.258763, -0.0307266, 0.107047, -0.0115197,
+ 0.0995485, 0.220027, 0.158355, 0.436369,
+ ],
+ output_state_out: [
+ -0.0166936, 0.0381209, 0.000889684, 0.0143363,
+ -0.0328911, -0.0234288, 0.0333051, -0.012229,
+ 0.0110322, -0.0457725, -0.000832209, -0.0202817,
+ 0.0327257, 0.0121309, 0.0155969, 0.0312091,
+ -0.0141913, 0.0322082, 0.00227024, 0.0260507,
+ -0.0188721, -0.0296489, 0.0399134, -0.0160509,
+ 0.011604, -0.0447318, -0.0150515, -0.0277406,
+ 0.0316596, 0.0118233, 0.0214762, 0.0293641
+ ],
+}
+
+# Batch0: 4 (input_sequence_size) * 16 (n_output)
+output0[output] = [
+ -0.0166936, 0.0381209, 0.000889694, 0.0143363,
+ -0.0328911, -0.0234288, 0.0333051, -0.012229, 0.0110322,
+ -0.0457725, -0.000832209, -0.0202817, 0.0327257, 0.0121308,
+ 0.0155969, 0.0312091]
+# Batch1: 4 (input_sequence_size) * 16 (n_output)
+output0[output].extend(
+ [-0.0141913, 0.0322082, 0.00227024, 0.0260507,
+ -0.0188721, -0.0296489, 0.0399134, -0.0160509, 0.0116039,
+ -0.0447318, -0.0150515, -0.0277406, 0.0316596, 0.0118233,
+ 0.0214762, 0.0293641]
+ )
+
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/lstm_float16.mod.py b/tests/nnapi/specs/skip/V1_2/lstm_float16.mod.py
new file mode 100644
index 000000000..ff7be5ebb
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/lstm_float16.mod.py
@@ -0,0 +1,148 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# LSTM Test: No Cifg, No Peephole, No Projection, and No Clipping.
+
+model = Model()
+
+n_batch = 1
+n_input = 2
+# n_cell and n_output have the same size when there is no projection.
+n_cell = 4
+n_output = 4
+
+input = Input("input", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_input))
+
+input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+
+recurrent_to_input_weights = Input("recurrent_to_intput_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_forget_weights = Input("recurrent_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_output_weights = Input("recurrent_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+
+cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT16", "{0}")
+cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT16", "{0}")
+cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT16", "{0}")
+
+input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+
+projection_weights = Input("projection_weights", "TENSOR_FLOAT16", "{0,0}")
+projection_bias = Input("projection_bias", "TENSOR_FLOAT16", "{0}")
+
+output_state_in = Input("output_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+cell_state_in = Input("cell_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
+
+activation_param = Int32Scalar("activation_param", 4) # Tanh
+cell_clip_param = Float16Scalar("cell_clip_param", 0.)
+proj_clip_param = Float16Scalar("proj_clip_param", 0.)
+
+scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, (n_cell * 4)))
+output_state_out = Output("output_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+cell_state_out = Output("cell_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+
+model = model.Operation("LSTM",
+ input,
+
+ input_to_input_weights,
+ input_to_forget_weights,
+ input_to_cell_weights,
+ input_to_output_weights,
+
+ recurrent_to_input_weights,
+ recurrent_to_forget_weights,
+ recurrent_to_cell_weights,
+ recurrent_to_output_weights,
+
+ cell_to_input_weights,
+ cell_to_forget_weights,
+ cell_to_output_weights,
+
+ input_gate_bias,
+ forget_gate_bias,
+ cell_gate_bias,
+ output_gate_bias,
+
+ projection_weights,
+ projection_bias,
+
+ output_state_in,
+ cell_state_in,
+
+ activation_param,
+ cell_clip_param,
+ proj_clip_param
+).To([scratch_buffer, output_state_out, cell_state_out, output])
+
+# Example 1. Input in operand 0,
+input0 = {input_to_input_weights: [-0.45018822, -0.02338299, -0.0870589, -0.34550029, 0.04266912, -0.15680569, -0.34856534, 0.43890524],
+ input_to_forget_weights: [0.09701663, 0.20334584, -0.50592935, -0.31343272, -0.40032279, 0.44781327, 0.01387155, -0.35593212],
+ input_to_cell_weights: [-0.50013041, 0.1370284, 0.11810488, 0.2013163, -0.20583314, 0.44344562, 0.22077113, -0.29909778],
+ input_to_output_weights: [-0.25065863, -0.28290087, 0.04613829, 0.40525138, 0.44272184, 0.03897077, -0.1556896, 0.19487578],
+
+ input_gate_bias: [0.,0.,0.,0.],
+ forget_gate_bias: [1.,1.,1.,1.],
+ cell_gate_bias: [0.,0.,0.,0.],
+ output_gate_bias: [0.,0.,0.,0.],
+
+ recurrent_to_input_weights: [
+ -0.0063535, -0.2042388, 0.31454784, -0.35746509, 0.28902304, 0.08183324,
+ -0.16555229, 0.02286911, -0.13566875, 0.03034258, 0.48091322,
+ -0.12528998, 0.24077177, -0.51332325, -0.33502164, 0.10629296],
+
+ recurrent_to_cell_weights: [
+ -0.3407414, 0.24443203, -0.2078532, 0.26320225, 0.05695659, -0.00123841,
+ -0.4744786, -0.35869038, -0.06418842, -0.13502428, -0.501764, 0.22830659,
+ -0.46367589, 0.26016325, -0.03894562, -0.16368064],
+
+ recurrent_to_forget_weights: [
+ -0.48684245, -0.06655136, 0.42224967, 0.2112639, 0.27654213, 0.20864892,
+ -0.07646349, 0.45877004, 0.00141793, -0.14609534, 0.36447752, 0.09196436,
+ 0.28053468, 0.01560611, -0.20127171, -0.01140004],
+
+ recurrent_to_output_weights: [
+ 0.43385774, -0.17194885, 0.2718237, 0.09215671, 0.24107647, -0.39835793,
+ 0.18212086, 0.01301402, 0.48572797, -0.50656658, 0.20047462, -0.20607421,
+ -0.51818722, -0.15390486, 0.0468148, 0.39922136],
+
+ cell_to_input_weights: [],
+ cell_to_forget_weights: [],
+ cell_to_output_weights: [],
+
+ projection_weights: [],
+ projection_bias: [],
+}
+
+test_input = [2., 3.]
+output_state = [0, 0, 0, 0]
+cell_state = [0, 0, 0, 0]
+golden_output = [-0.02973187, 0.1229473, 0.20885126, -0.15358765,]
+output0 = {
+ scratch_buffer: [ 0 for x in range(n_batch * n_cell * 4) ],
+ cell_state_out: [ -0.145439, 0.157475, 0.293663, -0.277353 ],
+ output_state_out: [ -0.0297319, 0.122947, 0.208851, -0.153588 ],
+ output: golden_output
+}
+input0[input] = test_input
+input0[output_state_in] = output_state
+input0[cell_state_in] = cell_state
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/lstm_state2_float16.mod.py b/tests/nnapi/specs/skip/V1_2/lstm_state2_float16.mod.py
new file mode 100644
index 000000000..470dd7b50
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/lstm_state2_float16.mod.py
@@ -0,0 +1,148 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# LSTM Test: No Cifg, No Peephole, No Projection, and No Clipping.
+
+model = Model()
+
+n_batch = 1
+n_input = 2
+# n_cell and n_output have the same size when there is no projection.
+n_cell = 4
+n_output = 4
+
+input = Input("input", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_input))
+
+input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+
+recurrent_to_input_weights = Input("recurrent_to_intput_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_forget_weights = Input("recurrent_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_output_weights = Input("recurrent_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+
+cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT16", "{0}")
+cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT16", "{0}")
+cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT16", "{0}")
+
+input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+
+projection_weights = Input("projection_weights", "TENSOR_FLOAT16", "{0,0}")
+projection_bias = Input("projection_bias", "TENSOR_FLOAT16", "{0}")
+
+output_state_in = Input("output_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+cell_state_in = Input("cell_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
+
+activation_param = Int32Scalar("activation_param", 4) # Tanh
+cell_clip_param = Float16Scalar("cell_clip_param", 0.)
+proj_clip_param = Float16Scalar("proj_clip_param", 0.)
+
+scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, (n_cell * 4)))
+output_state_out = IgnoredOutput("output_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+cell_state_out = IgnoredOutput("cell_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+
+model = model.Operation("LSTM",
+ input,
+
+ input_to_input_weights,
+ input_to_forget_weights,
+ input_to_cell_weights,
+ input_to_output_weights,
+
+ recurrent_to_input_weights,
+ recurrent_to_forget_weights,
+ recurrent_to_cell_weights,
+ recurrent_to_output_weights,
+
+ cell_to_input_weights,
+ cell_to_forget_weights,
+ cell_to_output_weights,
+
+ input_gate_bias,
+ forget_gate_bias,
+ cell_gate_bias,
+ output_gate_bias,
+
+ projection_weights,
+ projection_bias,
+
+ output_state_in,
+ cell_state_in,
+
+ activation_param,
+ cell_clip_param,
+ proj_clip_param
+).To([scratch_buffer, output_state_out, cell_state_out, output])
+
+# Example 1. Input in operand 0,
+input0 = {input_to_input_weights: [-0.45018822, -0.02338299, -0.0870589, -0.34550029, 0.04266912, -0.15680569, -0.34856534, 0.43890524],
+ input_to_forget_weights: [0.09701663, 0.20334584, -0.50592935, -0.31343272, -0.40032279, 0.44781327, 0.01387155, -0.35593212],
+ input_to_cell_weights: [-0.50013041, 0.1370284, 0.11810488, 0.2013163, -0.20583314, 0.44344562, 0.22077113, -0.29909778],
+ input_to_output_weights: [-0.25065863, -0.28290087, 0.04613829, 0.40525138, 0.44272184, 0.03897077, -0.1556896, 0.19487578],
+
+ input_gate_bias: [0.,0.,0.,0.],
+ forget_gate_bias: [1.,1.,1.,1.],
+ cell_gate_bias: [0.,0.,0.,0.],
+ output_gate_bias: [0.,0.,0.,0.],
+
+ recurrent_to_input_weights: [
+ -0.0063535, -0.2042388, 0.31454784, -0.35746509, 0.28902304, 0.08183324,
+ -0.16555229, 0.02286911, -0.13566875, 0.03034258, 0.48091322,
+ -0.12528998, 0.24077177, -0.51332325, -0.33502164, 0.10629296],
+
+ recurrent_to_cell_weights: [
+ -0.3407414, 0.24443203, -0.2078532, 0.26320225, 0.05695659, -0.00123841,
+ -0.4744786, -0.35869038, -0.06418842, -0.13502428, -0.501764, 0.22830659,
+ -0.46367589, 0.26016325, -0.03894562, -0.16368064],
+
+ recurrent_to_forget_weights: [
+ -0.48684245, -0.06655136, 0.42224967, 0.2112639, 0.27654213, 0.20864892,
+ -0.07646349, 0.45877004, 0.00141793, -0.14609534, 0.36447752, 0.09196436,
+ 0.28053468, 0.01560611, -0.20127171, -0.01140004],
+
+ recurrent_to_output_weights: [
+ 0.43385774, -0.17194885, 0.2718237, 0.09215671, 0.24107647, -0.39835793,
+ 0.18212086, 0.01301402, 0.48572797, -0.50656658, 0.20047462, -0.20607421,
+ -0.51818722, -0.15390486, 0.0468148, 0.39922136],
+
+ cell_to_input_weights: [],
+ cell_to_forget_weights: [],
+ cell_to_output_weights: [],
+
+ projection_weights: [],
+ projection_bias: [],
+}
+
+test_input = [1., 1.]
+output_state = [-0.0371611, 0.125073, 0.411934, -0.208605]
+cell_state = [-0.287121, 0.148115, 0.556837, -0.388276]
+golden_output = [-0.15053082, 0.09120187, 0.24278517, -0.12222792]
+output0 = {
+ scratch_buffer: [ 0 for x in range(n_batch * n_cell * 4) ],
+ cell_state_out: [ 0 for x in range(n_batch * n_cell) ],
+ output_state_out: [ 0 for x in range(n_batch * n_output) ],
+ output: golden_output
+}
+input0[input] = test_input
+input0[output_state_in] = output_state
+input0[cell_state_in] = cell_state
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/lstm_state_float16.mod.py b/tests/nnapi/specs/skip/V1_2/lstm_state_float16.mod.py
new file mode 100644
index 000000000..78f65366d
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/lstm_state_float16.mod.py
@@ -0,0 +1,148 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# LSTM Test: No Cifg, No Peephole, No Projection, and No Clipping.
+
+model = Model()
+
+n_batch = 1
+n_input = 2
+# n_cell and n_output have the same size when there is no projection.
+n_cell = 4
+n_output = 4
+
+input = Input("input", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_input))
+
+input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_input))
+
+recurrent_to_input_weights = Input("recurrent_to_intput_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_forget_weights = Input("recurrent_to_forget_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+recurrent_to_output_weights = Input("recurrent_to_output_weights", "TENSOR_FLOAT16", "{%d, %d}" % (n_cell, n_output))
+
+cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT16", "{0}")
+cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT16", "{0}")
+cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT16", "{0}")
+
+input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT16", "{%d}"%(n_cell))
+
+projection_weights = Input("projection_weights", "TENSOR_FLOAT16", "{0,0}")
+projection_bias = Input("projection_bias", "TENSOR_FLOAT16", "{0}")
+
+output_state_in = Input("output_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+cell_state_in = Input("cell_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
+
+activation_param = Int32Scalar("activation_param", 4) # Tanh
+cell_clip_param = Float16Scalar("cell_clip_param", 0.)
+proj_clip_param = Float16Scalar("proj_clip_param", 0.)
+
+scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, (n_cell * 4)))
+output_state_out = Output("output_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+cell_state_out = Output("cell_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_cell))
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (n_batch, n_output))
+
+model = model.Operation("LSTM",
+ input,
+
+ input_to_input_weights,
+ input_to_forget_weights,
+ input_to_cell_weights,
+ input_to_output_weights,
+
+ recurrent_to_input_weights,
+ recurrent_to_forget_weights,
+ recurrent_to_cell_weights,
+ recurrent_to_output_weights,
+
+ cell_to_input_weights,
+ cell_to_forget_weights,
+ cell_to_output_weights,
+
+ input_gate_bias,
+ forget_gate_bias,
+ cell_gate_bias,
+ output_gate_bias,
+
+ projection_weights,
+ projection_bias,
+
+ output_state_in,
+ cell_state_in,
+
+ activation_param,
+ cell_clip_param,
+ proj_clip_param
+).To([scratch_buffer, output_state_out, cell_state_out, output])
+
+# Example 1. Input in operand 0,
+input0 = {input_to_input_weights: [-0.45018822, -0.02338299, -0.0870589, -0.34550029, 0.04266912, -0.15680569, -0.34856534, 0.43890524],
+ input_to_forget_weights: [0.09701663, 0.20334584, -0.50592935, -0.31343272, -0.40032279, 0.44781327, 0.01387155, -0.35593212],
+ input_to_cell_weights: [-0.50013041, 0.1370284, 0.11810488, 0.2013163, -0.20583314, 0.44344562, 0.22077113, -0.29909778],
+ input_to_output_weights: [-0.25065863, -0.28290087, 0.04613829, 0.40525138, 0.44272184, 0.03897077, -0.1556896, 0.19487578],
+
+ input_gate_bias: [0.,0.,0.,0.],
+ forget_gate_bias: [1.,1.,1.,1.],
+ cell_gate_bias: [0.,0.,0.,0.],
+ output_gate_bias: [0.,0.,0.,0.],
+
+ recurrent_to_input_weights: [
+ -0.0063535, -0.2042388, 0.31454784, -0.35746509, 0.28902304, 0.08183324,
+ -0.16555229, 0.02286911, -0.13566875, 0.03034258, 0.48091322,
+ -0.12528998, 0.24077177, -0.51332325, -0.33502164, 0.10629296],
+
+ recurrent_to_cell_weights: [
+ -0.3407414, 0.24443203, -0.2078532, 0.26320225, 0.05695659, -0.00123841,
+ -0.4744786, -0.35869038, -0.06418842, -0.13502428, -0.501764, 0.22830659,
+ -0.46367589, 0.26016325, -0.03894562, -0.16368064],
+
+ recurrent_to_forget_weights: [
+ -0.48684245, -0.06655136, 0.42224967, 0.2112639, 0.27654213, 0.20864892,
+ -0.07646349, 0.45877004, 0.00141793, -0.14609534, 0.36447752, 0.09196436,
+ 0.28053468, 0.01560611, -0.20127171, -0.01140004],
+
+ recurrent_to_output_weights: [
+ 0.43385774, -0.17194885, 0.2718237, 0.09215671, 0.24107647, -0.39835793,
+ 0.18212086, 0.01301402, 0.48572797, -0.50656658, 0.20047462, -0.20607421,
+ -0.51818722, -0.15390486, 0.0468148, 0.39922136],
+
+ cell_to_input_weights: [],
+ cell_to_forget_weights: [],
+ cell_to_output_weights: [],
+
+ projection_weights: [],
+ projection_bias: [],
+}
+
+test_input = [3., 4.]
+output_state = [-0.0297319, 0.122947, 0.208851, -0.153588]
+cell_state = [-0.145439, 0.157475, 0.293663, -0.277353,]
+golden_output = [-0.03716109, 0.12507336, 0.41193449, -0.20860538]
+output0 = {
+ scratch_buffer: [ 0 for x in range(n_batch * n_cell * 4) ],
+ cell_state_out: [ -0.287121, 0.148115, 0.556837, -0.388276 ],
+ output_state_out: [ -0.0371611, 0.125073, 0.411934, -0.208605 ],
+ output: golden_output
+}
+input0[input] = test_input
+input0[output_state_in] = output_state
+input0[cell_state_in] = cell_state
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/max_pool_v1_2.mod.py b/tests/nnapi/specs/skip/V1_2/max_pool_v1_2.mod.py
new file mode 100644
index 000000000..979cf2ea3
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/max_pool_v1_2.mod.py
@@ -0,0 +1,186 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: MAX_POOL_2D_NCHW_1, pad = 0, stride = 1, filter = 1, act = none
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+Model().Operation("MAX_POOL_2D", i1, 0, 0, 0, 0, 1, 1, 1, 1, 0, layout).To(o1)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.5, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [1.0, 2.0, 3.0, 4.0],
+ o1: [1.0, 2.0, 3.0, 4.0]
+}).AddNchw(i1, o1, layout).AddVariations("relaxed", quant8, "float16")
+
+
+# TEST 2: MAX_POOL_2D_NCHW_2, act = none
+bat = 5
+row = 50
+col = 70
+chn = 3
+std = 20
+flt = 20
+pad = 0
+output_row = (row + 2 * pad - flt + std) // std
+output_col = (col + 2 * pad - flt + std) // std
+
+i2 = Input("op1", ("TENSOR_FLOAT32", [bat, row, col, chn]))
+o2 = Output("op4", ("TENSOR_FLOAT32", [bat, output_row, output_col, chn]))
+Model().Operation("MAX_POOL_2D", i2, pad, pad, pad, pad, std, std, flt, flt, 0, layout).To(o2)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ o2: ("TENSOR_QUANT8_ASYMM", 0.5, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [x % std + 1 for x in range(bat * row * col * chn)],
+ o2: [std for _ in range(bat * output_row * output_col * chn)]
+}).AddNchw(i2, o2, layout).AddVariations("relaxed", quant8, "float16")
+
+
+# TEST 3: MAX_POOL_2D_NCHW_3, act = relu6
+bat = 5
+row = 50
+col = 70
+chn = 3
+std = 20
+flt = 20
+pad = 0
+output_row = (row + 2 * pad - flt + std) // std
+output_col = (col + 2 * pad - flt + std) // std
+
+i3 = Input("op1", ("TENSOR_FLOAT32", [bat, row, col, chn]))
+o3 = Output("op4", ("TENSOR_FLOAT32", [bat, output_row, output_col, chn]))
+Model().Operation("MAX_POOL_2D", i3, pad, pad, pad, pad, std, std, flt, flt, 3, layout).To(o3)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM", 0.25, 0),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.25, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i3: [x % std + 1 for x in range(bat * row * col * chn)],
+ o3: [6 for _ in range(bat * output_row * output_col * chn)]
+}).AddNchw(i3, o3, layout).AddVariations("relaxed", quant8, "float16")
+
+
+# TEST 4: MAX_POOL_2D_NCHW_4, pad = same, stride = 2, filter = 2, act = none
+i4 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 4, 1}")
+o4 = Output("op4", "TENSOR_FLOAT32", "{1, 1, 2, 1}")
+Model().Operation("MAX_POOL_2D", i4, 1, 2, 2, 2, 2, 0, layout).To(o4)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM", 0.25, 0),
+ o4: ("TENSOR_QUANT8_ASYMM", 0.25, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i4: [0, 6, 2, 4, 3, 2, 10, 7],
+ o4: [6, 10]
+}).AddNchw(i4, o4, layout).AddVariations("relaxed", quant8, "float16")
+
+
+# TEST 5: zero-sized input, explicit padding
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# MAX_POOL_2D op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 1, 1, 1}") # out
+model = model.Operation("MAX_POOL_2D", zero_sized, 0, 0, 0, 0, 1, 1, 2, 2, 0, layout).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations("relaxed", quant8, "float16")
+
+
+# TEST 6: zero-sized input, implicit padding
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# MAX_POOL_2D op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 1}") # out
+model = model.Operation("MAX_POOL_2D", zero_sized, 1, 1, 1, 2, 2, 0, layout).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations("relaxed", quant8, "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/mean_float16.mod.py b/tests/nnapi/specs/skip/V1_2/mean_float16.mod.py
new file mode 100644
index 000000000..5814f60fe
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/mean_float16.mod.py
@@ -0,0 +1,19 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT16", "{1, 2, 2, 1}")
+axis = Parameter("axis", "TENSOR_INT32", "{1}", [2])
+keepDims = Int32Scalar("keepDims", 0)
+output = Output("output", "TENSOR_FLOAT16", "{1, 2, 1}")
+
+model = model.Operation("MEAN", i1, axis, keepDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 2.0,
+ 3.0, 4.0]}
+
+output0 = {output: # output 0
+ [1.5,
+ 3.5]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/mul_v1_2.mod.py b/tests/nnapi/specs/skip/V1_2/mul_v1_2.mod.py
new file mode 100644
index 000000000..8d1002b92
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/mul_v1_2.mod.py
@@ -0,0 +1,99 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TEST 1: MUL float16
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT16", "{3}") # a vector of 3 float16s
+i2 = Input("op2", "TENSOR_FLOAT16", "{3}") # another vector of 3 float16s
+act = Int32Scalar("act", 0) # an int32_t scalar activation
+i3 = Output("op3", "TENSOR_FLOAT16", "{3}")
+model = model.Operation("MUL", i1, i2, act).To(i3)
+model = model.RelaxedExecution(False)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0009765625, 1.0, 2.5],
+ i2: # input 1
+ [2, 0.0001, 3.5]}
+
+output0 = {i3: # output 0
+ [2.001953125, 0.0001000165, 8.75]}
+
+# Instantiate an example
+Example((input0, output0))
+
+
+# TEST 2: MUL broadcast float16
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT16", "{1, 2}")
+i2 = Input("op2", "TENSOR_FLOAT16", "{2, 2}")
+act = Int32Scalar("act", 0)
+i3 = Output("op3", "TENSOR_FLOAT16", "{2, 2}")
+model = model.Operation("MUL", i1, i2, act).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2],
+ i2: # input 1
+ [1, 2, 3, 4]}
+
+output0 = {i3: # output 0
+ [1, 4, 3, 8]}
+
+# Instantiate an example
+Example((input0, output0))
+
+
+# TEST 3: MUL, zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 2}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 2}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# MUL op with numBatches = 0.
+i2 = Parameter("op", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [1, 2, 3, 4]) # weights
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 2}") # out
+model = model.Operation("MUL", zero_sized, i2, 0).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ i2: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1, 2],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddVariations("relaxed", quant8, "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/pad_all_dims.mod.py b/tests/nnapi/specs/skip/V1_2/pad_all_dims.mod.py
new file mode 100644
index 000000000..3ae88b87e
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/pad_all_dims.mod.py
@@ -0,0 +1,46 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import numpy as np
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{1, 1, 2, 3}")
+paddings = Parameter("paddings", "TENSOR_INT32", "{4, 2}", [1, 2,
+ 3, 4,
+ 3, 3,
+ 2, 1])
+output0 = Output("output0", "TENSOR_FLOAT32", "{4, 8, 8, 6}")
+
+model = Model().Operation("PAD", input0, paddings).To(output0)
+
+quant8 = DataTypeConverter().Identify({
+ input0: ("TENSOR_QUANT8_ASYMM", 2.3, 0),
+ output0: ("TENSOR_QUANT8_ASYMM", 2.3, 0),
+})
+
+Example({
+ input0: [1.0, 2.0, 3.0,
+ 4.0, 5.0, 6.0],
+ output0: np.pad([[[[1.0, 2.0, 3.0],
+ [4.0, 5.0, 6.0]]]],
+ [[1, 2],
+ [3, 4],
+ [3, 3],
+ [2, 1]],
+ "constant").flatten().tolist(),
+}).AddVariations("float16", "relaxed", quant8)
+
+# PAD of TENSOR_FLOAT32 and TENSOR_QUANT8_ASYMM data type is introduced in V1_1.
+Example.SetVersion("V1_1", "pad_all_dims", "pad_all_dims_quant8")
diff --git a/tests/nnapi/specs/skip/V1_2/pad_float16.mod.py b/tests/nnapi/specs/skip/V1_2/pad_float16.mod.py
new file mode 100644
index 000000000..7a6b29ccf
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/pad_float16.mod.py
@@ -0,0 +1,20 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT16", "{1, 2, 2, 1}")
+i2 = Parameter("op2", "TENSOR_INT32", "{4, 2}", [0, 0, 1, 1, 1, 1, 0, 0])
+i3 = Output("op3", "TENSOR_FLOAT16", "{1, 4, 4, 1}")
+model = model.Operation("PAD", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 2.0,
+ 3.0, 4.0,]}
+
+output0 = {i3: # output 0
+ [0.0, 0.0, 0.0, 0.0,
+ 0.0, 1.0, 2.0, 0.0,
+ 0.0, 3.0, 4.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/pad_low_rank.mod.py b/tests/nnapi/specs/skip/V1_2/pad_low_rank.mod.py
new file mode 100644
index 000000000..7394f796d
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/pad_low_rank.mod.py
@@ -0,0 +1,29 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{3}")
+paddings = Parameter("paddings", "TENSOR_INT32", "{1, 2}", [3, 1])
+output0 = Output("output0", "TENSOR_FLOAT32", "{7}")
+
+model = Model().Operation("PAD", input0, paddings).To(output0)
+
+Example({
+ input0: [1.0, 2.0, 3.0],
+ output0: [0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 0.0],
+}).AddVariations("float16")
+
+# PAD of TENSOR_FLOAT32 data type is introduced in V1_1.
+Example.SetVersion("V1_1", "pad_low_rank")
diff --git a/tests/nnapi/specs/skip/V1_2/pad_low_rank_quant8.mod.py b/tests/nnapi/specs/skip/V1_2/pad_low_rank_quant8.mod.py
new file mode 100644
index 000000000..be939341e
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/pad_low_rank_quant8.mod.py
@@ -0,0 +1,26 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM", "{3}, 2.3, 0")
+paddings = Parameter("paddings", "TENSOR_INT32", "{1, 2}", [3, 1])
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM", "{7}, 2.3, 0")
+
+model = Model().IntroducedIn("V1_1").Operation("PAD", input0, paddings).To(output0)
+
+Example({
+ input0: [1, 2, 3],
+ output0: [0, 0, 0, 1, 2, 3, 0],
+})
diff --git a/tests/nnapi/specs/skip/V1_2/pad_quant8.mod.py b/tests/nnapi/specs/skip/V1_2/pad_quant8.mod.py
new file mode 100644
index 000000000..9257f6972
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/pad_quant8.mod.py
@@ -0,0 +1,33 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM", "{1, 2, 3, 1}, 2.3, 0")
+paddings = Parameter("paddings", "TENSOR_INT32", "{4, 2}", [0, 0,
+ 0, 2,
+ 1, 3,
+ 0, 0])
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM", "{1, 4, 7, 1}, 2.3, 0")
+
+model = Model().IntroducedIn("V1_1").Operation("PAD", input0, paddings).To(output0)
+
+Example({
+ input0: [1, 2, 3,
+ 4, 5, 6],
+ output0: [0, 1, 2, 3, 0, 0, 0,
+ 0, 4, 5, 6, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0],
+})
diff --git a/tests/nnapi/specs/skip/V1_2/pad_quant8_nonzero.mod.py b/tests/nnapi/specs/skip/V1_2/pad_quant8_nonzero.mod.py
new file mode 100644
index 000000000..75e7f7b60
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/pad_quant8_nonzero.mod.py
@@ -0,0 +1,36 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Quantized PAD with non-zero zeroPoint is supported since 1.2.
+# See http://b/132112227.
+
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM", "{1, 2, 3, 1}, 2.3, 9")
+paddings = Parameter("paddings", "TENSOR_INT32", "{4, 2}", [0, 0,
+ 0, 2,
+ 1, 3,
+ 0, 0])
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM", "{1, 4, 7, 1}, 2.3, 9")
+
+model = Model().Operation("PAD", input0, paddings).To(output0)
+
+Example({
+ input0: [1, 2, 3,
+ 4, 5, 6],
+ output0: [9, 1, 2, 3, 9, 9, 9,
+ 9, 4, 5, 6, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9],
+})
diff --git a/tests/nnapi/specs/skip/V1_2/pow.mod.py b/tests/nnapi/specs/skip/V1_2/pow.mod.py
new file mode 100644
index 000000000..2d174ed3d
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/pow.mod.py
@@ -0,0 +1,41 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+base = Input("base", "TENSOR_FLOAT32", "{2, 1}")
+
+exponents = [Input("exponent", "TENSOR_FLOAT32", "{1}"),
+ Input("exponent", "TENSOR_FLOAT32", "{1, 2}"),
+ Input("exponent", "TENSOR_FLOAT32", "{3, 1, 2}")]
+
+outputs = [Output("output", "TENSOR_FLOAT32", "{2, 1}"),
+ Output("output", "TENSOR_FLOAT32", "{2, 2}"),
+ Output("output", "TENSOR_FLOAT32", "{3, 2, 2}")]
+
+base_data = [2., 3.]
+exponents_data = [[2.],
+ [2., 3.],
+ [0., 0.5, 1., 2., 3., 4.]]
+
+outputs_data = [[4., 9.],
+ [4., 8., 9., 27.],
+ [1., 2 ** 0.5, 1., 3 ** 0.5, 2., 4., 3., 9., 8., 16., 27., 81.]]
+
+for exponent, output, exponent_data, output_data in zip(exponents, outputs, exponents_data, outputs_data):
+ model = Model().Operation("POW", base, exponent).To(output)
+ Example({
+ base: base_data,
+ exponent: exponent_data,
+ output: output_data
+ }, model=model).AddVariations("relaxed", "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/quantized_lstm.mod.py b/tests/nnapi/specs/skip/V1_2/quantized_lstm.mod.py
new file mode 100644
index 000000000..5fd4c7a84
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/quantized_lstm.mod.py
@@ -0,0 +1,199 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. #
+
+# LSTM Test: No Cifg, No Peephole, No Projection, and No Clipping.
+
+model = Model()
+
+n_batch = 2
+n_input = 2
+n_cell = 4
+n_output = n_cell
+
+input_ = Input("input", ("TENSOR_QUANT8_ASYMM", (n_batch, n_input), 1 / 128, 128))
+
+weights_scale = 0.00408021
+weights_zero_point = 100
+
+input_to_input_weights = Input("inputToInputWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_input), weights_scale, weights_zero_point))
+input_to_forget_weights = Input("inputToForgetWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_input), weights_scale, weights_zero_point))
+input_to_cell_weights = Input("inputToCellWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_input), weights_scale, weights_zero_point))
+input_to_output_weights = Input("inputToOutputWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_input), weights_scale, weights_zero_point))
+
+recurrent_to_input_weights = Input("recurrentToInputWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_output), weights_scale, weights_zero_point))
+recurrent_to_forget_weights = Input("recurrentToForgetWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_output), weights_scale, weights_zero_point))
+recurrent_to_cell_weights = Input("recurrentToCellWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_output), weights_scale, weights_zero_point))
+recurrent_to_output_weights = Input("recurrentToOutputWeights", ("TENSOR_QUANT8_ASYMM", (n_output, n_output), weights_scale, weights_zero_point))
+
+input_gate_bias = Input("inputGateBias", ("TENSOR_INT32", (n_output,), weights_scale / 128., 0))
+forget_gate_bias = Input("forgetGateBias", ("TENSOR_INT32", (n_output,), weights_scale / 128., 0))
+cell_gate_bias = Input("cellGateBias", ("TENSOR_INT32", (n_output,), weights_scale / 128., 0))
+output_gate_bias = Input("outputGateBias", ("TENSOR_INT32", (n_output,), weights_scale / 128., 0))
+
+prev_cell_state = Input("prevCellState", ("TENSOR_QUANT16_SYMM", (n_batch, n_cell), 1 / 2048, 0))
+prev_output = Input("prevOutput", ("TENSOR_QUANT8_ASYMM", (n_batch, n_output), 1 / 128, 128))
+
+cell_state_out = Output("cellStateOut", ("TENSOR_QUANT16_SYMM", (n_batch, n_cell), 1 / 2048, 0))
+output = Output("output", ("TENSOR_QUANT8_ASYMM", (n_batch, n_output), 1 / 128, 128))
+
+
+model = model.Operation("QUANTIZED_16BIT_LSTM",
+ input_,
+ input_to_input_weights,
+ input_to_forget_weights,
+ input_to_cell_weights,
+ input_to_output_weights,
+ recurrent_to_input_weights,
+ recurrent_to_forget_weights,
+ recurrent_to_cell_weights,
+ recurrent_to_output_weights,
+ input_gate_bias,
+ forget_gate_bias,
+ cell_gate_bias,
+ output_gate_bias,
+ prev_cell_state,
+ prev_output
+).To([cell_state_out, output])
+
+input_dict = {
+ input_: [166, 179, 50, 150],
+ input_to_input_weights: [146, 250, 235, 171, 10, 218, 171, 108],
+ input_to_forget_weights: [24, 50, 132, 179, 158, 110, 3, 169],
+ input_to_cell_weights: [133, 34, 29, 49, 206, 109, 54, 183],
+ input_to_output_weights: [195, 187, 11, 99, 109, 10, 218, 48],
+ recurrent_to_input_weights: [254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26],
+ recurrent_to_forget_weights: [137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253],
+ recurrent_to_cell_weights: [172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216],
+ recurrent_to_output_weights: [106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98],
+ input_gate_bias: [-7876, 13488, -726, 32839],
+ forget_gate_bias: [9206, -46884, -11693, -38724],
+ cell_gate_bias: [39481, 48624, 48976, -21419],
+ output_gate_bias: [-58999, -17050, -41852, -40538],
+ prev_cell_state: [876, 1034, 955, -909, 761, 1029, 796, -1036],
+ prev_output: [136, 150, 140, 115, 135, 152, 138, 112],
+}
+
+output_dict = {
+ cell_state_out: [1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235],
+ output: [140, 151, 146, 112, 136, 156, 142, 112]
+}
+Example((input_dict, output_dict), model=model).AddVariations("relaxed")
+
+
+# TEST 2: same as the first one but only the first batch is tested and weights
+# are compile time constants
+model = Model()
+
+n_batch = 1
+n_input = 2
+n_cell = 4
+n_output = n_cell
+
+input_ = Input("input",
+ ("TENSOR_QUANT8_ASYMM", (n_batch, n_input), 1 / 128, 128))
+
+weights_scale = 0.00408021
+weights_zero_point = 100
+
+input_to_input_weights = Parameter(
+ "inputToInputWeights",
+ ("TENSOR_QUANT8_ASYMM",
+ (n_output, n_input), weights_scale, weights_zero_point),
+ [146, 250, 235, 171, 10, 218, 171, 108])
+input_to_forget_weights = Parameter(
+ "inputToForgetWeights",
+ ("TENSOR_QUANT8_ASYMM",
+ (n_output, n_input), weights_scale, weights_zero_point),
+ [24, 50, 132, 179, 158, 110, 3, 169])
+input_to_cell_weights = Parameter(
+ "inputToCellWeights",
+ ("TENSOR_QUANT8_ASYMM",
+ (n_output, n_input), weights_scale, weights_zero_point),
+ [133, 34, 29, 49, 206, 109, 54, 183])
+input_to_output_weights = Parameter(
+ "inputToOutputWeights",
+ ("TENSOR_QUANT8_ASYMM",
+ (n_output, n_input), weights_scale, weights_zero_point),
+ [195, 187, 11, 99, 109, 10, 218, 48])
+
+recurrent_to_input_weights = Parameter(
+ "recurrentToInputWeights",
+ ("TENSOR_QUANT8_ASYMM",
+ (n_output, n_output), weights_scale, weights_zero_point),
+ [254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26])
+recurrent_to_forget_weights = Parameter(
+ "recurrentToForgetWeights",
+ ("TENSOR_QUANT8_ASYMM",
+ (n_output, n_output), weights_scale, weights_zero_point),
+ [137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253])
+recurrent_to_cell_weights = Parameter(
+ "recurrentToCellWeights",
+ ("TENSOR_QUANT8_ASYMM",
+ (n_output, n_output), weights_scale, weights_zero_point),
+ [172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216])
+recurrent_to_output_weights = Parameter(
+ "recurrentToOutputWeights",
+ ("TENSOR_QUANT8_ASYMM",
+ (n_output, n_output), weights_scale, weights_zero_point),
+ [106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98])
+
+input_gate_bias = Parameter("inputGateBias",
+ ("TENSOR_INT32",
+ (n_output,), weights_scale / 128., 0),
+ [-7876, 13488, -726, 32839])
+forget_gate_bias = Parameter("forgetGateBias",
+ ("TENSOR_INT32",
+ (n_output,), weights_scale / 128., 0),
+ [9206, -46884, -11693, -38724])
+cell_gate_bias = Parameter("cellGateBias",
+ ("TENSOR_INT32",
+ (n_output,), weights_scale / 128., 0),
+ [39481, 48624, 48976, -21419])
+output_gate_bias = Parameter("outputGateBias",
+ ("TENSOR_INT32",
+ (n_output,), weights_scale / 128., 0),
+ [-58999, -17050, -41852, -40538])
+
+prev_cell_state = Input("prevCellState",
+ ("TENSOR_QUANT16_SYMM", (n_batch, n_cell), 1 / 2048, 0))
+prev_output = Input("prevOutput",
+ ("TENSOR_QUANT8_ASYMM", (n_batch, n_output), 1 / 128, 128))
+
+cell_state_out = Output("cellStateOut",
+ ("TENSOR_QUANT16_SYMM", (n_batch, n_cell), 1 / 2048, 0))
+output = Output("output",
+ ("TENSOR_QUANT8_ASYMM", (n_batch, n_output), 1 / 128, 128))
+
+model = model.Operation("QUANTIZED_16BIT_LSTM", input_, input_to_input_weights,
+ input_to_forget_weights, input_to_cell_weights,
+ input_to_output_weights, recurrent_to_input_weights,
+ recurrent_to_forget_weights, recurrent_to_cell_weights,
+ recurrent_to_output_weights, input_gate_bias,
+ forget_gate_bias, cell_gate_bias, output_gate_bias,
+ prev_cell_state,
+ prev_output).To([cell_state_out, output])
+
+input_dict = {
+ input_: [166, 179],
+ prev_cell_state: [876, 1034, 955, -909],
+ prev_output: [136, 150, 140, 115],
+}
+
+output_dict = {
+ cell_state_out: [1485, 1177, 1373, -1023],
+ output: [140, 151, 146, 112]
+}
+Example((input_dict, output_dict), model=model,
+ name="constant_weights").AddVariations("relaxed")
diff --git a/tests/nnapi/specs/skip/V1_2/random_multinomial.mod.py b/tests/nnapi/specs/skip/V1_2/random_multinomial.mod.py
new file mode 100644
index 000000000..bea061723
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/random_multinomial.mod.py
@@ -0,0 +1,285 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{1, 1024}")
+sample_count = Int32Scalar("sample_count", 128)
+seeds = Parameter("seeds", "TENSOR_INT32", "{2}", [37, 42])
+output0 = IgnoredOutput("output", "TENSOR_INT32", "{1, 128}")
+
+model = Model().Operation("RANDOM_MULTINOMIAL", input0, sample_count, seeds).To(output0)
+
+Example(({
+ input0: [
+ 0.18163621, 0.33273480, 0.50951556, 0.08877703,
+ 0.51185218, 0.16734240, 0.63870587, 0.56538613,
+ 0.96808477, 0.56056615, 0.02934992, 0.03258404,
+ 0.72106282, 0.54496657, 0.22183018, 0.14761066,
+ 0.38261428, 0.73637053, 0.78623964, 0.00538754,
+ 0.17758578, 0.21533023, 0.83887367, 0.71035332,
+ 0.28841254, 0.24017199, 0.11599192, 0.55738622,
+ 0.35368502, 0.09133554, 0.81038602, 0.03550774,
+ 0.88201054, 0.07141711, 0.20562562, 0.99982140,
+ 0.36866199, 0.84549652, 0.68196711, 0.60800431,
+ 0.04897644, 0.79349817, 0.34294643, 0.31649012,
+ 0.61759858, 0.67423009, 0.14606593, 0.82548304,
+ 0.14200278, 0.58680437, 0.41861224, 0.86453261,
+ 0.80694849, 0.23185477, 0.81298628, 0.14150890,
+ 0.16687062, 0.03645446, 0.50345389, 0.82756624,
+ 0.84377461, 0.46665451, 0.91611352, 0.29161655,
+ 0.86669246, 0.21630808, 0.98051105, 0.71589220,
+ 0.54117114, 0.37597655, 0.61811207, 0.39746145,
+ 0.19850883, 0.10498004, 0.45398218, 0.84427816,
+ 0.44244307, 0.97186493, 0.02943687, 0.05995579,
+ 0.91047162, 0.84049659, 0.29692092, 0.37859579,
+ 0.68067858, 0.53199727, 0.30587859, 0.96904311,
+ 0.53895138, 0.80350520, 0.56936886, 0.90789923,
+ 0.48829865, 0.74295622, 0.07730283, 0.35745998,
+ 0.63879813, 0.88581710, 0.78119555, 0.34389121,
+ 0.00664631, 0.43921788, 0.38457199, 0.95430791,
+ 0.92432083, 0.29473732, 0.25805162, 0.94078243,
+ 0.63156303, 0.49350546, 0.60020588, 0.16993110,
+ 0.78426143, 0.31030305, 0.74027296, 0.86801874,
+ 0.71064432, 0.98042120, 0.26738505, 0.22304029,
+ 0.97474880, 0.93888746, 0.02851034, 0.22468271,
+ 0.67886092, 0.79198019, 0.56653174, 0.17544579,
+ 0.89579936, 0.96279060, 0.04028579, 0.58690500,
+ 0.72510547, 0.60705131, 0.43325570, 0.15888959,
+ 0.34523460, 0.56390766, 0.31160624, 0.14503308,
+ 0.19243339, 0.26307077, 0.53711017, 0.90120554,
+ 0.20798102, 0.44519176, 0.83232068, 0.82690943,
+ 0.23728192, 0.27897126, 0.24336233, 0.56491850,
+ 0.80009130, 0.91884854, 0.00823675, 0.30183011,
+ 0.64548693, 0.06524323, 0.02960910, 0.10885612,
+ 0.93308847, 0.01737334, 0.87532111, 0.64499222,
+ 0.80151762, 0.68087718, 0.48854077, 0.33778072,
+ 0.89418992, 0.00524248, 0.49021969, 0.42403950,
+ 0.74438303, 0.93005140, 0.98648675, 0.06316910,
+ 0.77617813, 0.77113286, 0.51341796, 0.49668114,
+ 0.85817043, 0.12039487, 0.69007245, 0.63516463,
+ 0.43368987, 0.03034840, 0.75404114, 0.78141053,
+ 0.23417318, 0.07462540, 0.02689441, 0.66631840,
+ 0.86519194, 0.91861606, 0.10818770, 0.42462775,
+ 0.26566337, 0.68942528, 0.27363712, 0.54874752,
+ 0.58072208, 0.18649499, 0.79037057, 0.14188329,
+ 0.29430633, 0.91596697, 0.40646783, 0.77726510,
+ 0.75446749, 0.01020716, 0.20875567, 0.73854318,
+ 0.73204509, 0.02641734, 0.15423192, 0.97863180,
+ 0.14997906, 0.56356818, 0.71459404, 0.74379692,
+ 0.40063276, 0.05400237, 0.51403009, 0.27387991,
+ 0.68738814, 0.44641846, 0.38901113, 0.52327729,
+ 0.97282648, 0.43078061, 0.66980505, 0.21611701,
+ 0.78165645, 0.01494616, 0.14573566, 0.89647321,
+ 0.27992757, 0.93745905, 0.95605115, 0.14323041,
+ 0.93223624, 0.13351544, 0.48690382, 0.48056268,
+ 0.58399360, 0.38147627, 0.57308770, 0.88455851,
+ 0.61266891, 0.88541185, 0.88516824, 0.53789164,
+ 0.04956664, 0.11410213, 0.47340589, 0.30371802,
+ 0.68811040, 0.60006376, 0.98732277, 0.11725557,
+ 0.91365836, 0.49581686, 0.43231324, 0.88435984,
+ 0.07224436, 0.35188695, 0.74323035, 0.20613255,
+ 0.80579434, 0.19316965, 0.05335943, 0.77709435,
+ 0.20689616, 0.85582175, 0.14171426, 0.05923329,
+ 0.57708418, 0.58697364, 0.50752432, 0.88097219,
+ 0.93936580, 0.78270476, 0.07068334, 0.80061511,
+ 0.66090995, 0.40403670, 0.37639738, 0.31872702,
+ 0.10560548, 0.32295307, 0.71104409, 0.95057601,
+ 0.62568311, 0.82158469, 0.87822325, 0.34216374,
+ 0.42095343, 0.49155748, 0.04824981, 0.93577404,
+ 0.02006045, 0.52143329, 0.35735855, 0.22339355,
+ 0.21191254, 0.25463790, 0.00673706, 0.02776729,
+ 0.29688424, 0.28939652, 0.93429233, 0.54223604,
+ 0.24985107, 0.83891685, 0.16851543, 0.09681473,
+ 0.12913905, 0.41894106, 0.88119316, 0.56941667,
+ 0.98743163, 0.24583594, 0.26439969, 0.98900542,
+ 0.59552390, 0.10598290, 0.19145128, 0.82124177,
+ 0.16724271, 0.44599363, 0.59829451, 0.72605966,
+ 0.09989227, 0.82615394, 0.58065050, 0.11331605,
+ 0.28981000, 0.84587381, 0.07083202, 0.14833035,
+ 0.65867223, 0.59021865, 0.58735805, 0.59678862,
+ 0.70551718, 0.19207017, 0.73940653, 0.22106109,
+ 0.33934016, 0.77465068, 0.22509303, 0.12357820,
+ 0.94794034, 0.97312112, 0.73280797, 0.59965500,
+ 0.18524258, 0.11258899, 0.55591609, 0.46436632,
+ 0.21880912, 0.19875870, 0.38389680, 0.14392435,
+ 0.70625112, 0.93164951, 0.79722014, 0.48610288,
+ 0.72093904, 0.72137738, 0.27261254, 0.76680176,
+ 0.11663760, 0.18397960, 0.03195002, 0.82115076,
+ 0.73068862, 0.28732616, 0.61875003, 0.88276158,
+ 0.02675303, 0.93052378, 0.45810254, 0.80719106,
+ 0.78505935, 0.15465711, 0.81852908, 0.01035686,
+ 0.63759970, 0.57701143, 0.61182946, 0.81902549,
+ 0.64849716, 0.63775381, 0.33944463, 0.01088021,
+ 0.81885416, 0.06320171, 0.09684302, 0.04174445,
+ 0.93106984, 0.07546183, 0.24547596, 0.93751226,
+ 0.74065679, 0.54327627, 0.17024274, 0.94692311,
+ 0.09290775, 0.53920561, 0.72992514, 0.62150301,
+ 0.40669172, 0.06787872, 0.72004642, 0.39490120,
+ 0.98234857, 0.94828936, 0.74268101, 0.73370598,
+ 0.68092173, 0.37850705, 0.61021436, 0.26261629,
+ 0.52934261, 0.06334639, 0.76891534, 0.32289764,
+ 0.92373486, 0.42402109, 0.41378012, 0.10279785,
+ 0.67314394, 0.48186146, 0.50675380, 0.86822955,
+ 0.82499410, 0.15719373, 0.29668140, 0.92028725,
+ 0.95904319, 0.25983566, 0.75594963, 0.46969604,
+ 0.82638328, 0.56783068, 0.96105872, 0.43980714,
+ 0.98960801, 0.70075472, 0.15540090, 0.57160886,
+ 0.88793223, 0.57795871, 0.56514445, 0.65909586,
+ 0.69458952, 0.42359339, 0.09707922, 0.04027207,
+ 0.79641460, 0.55542973, 0.57159987, 0.41985797,
+ 0.61658945, 0.03278444, 0.63403447, 0.09048499,
+ 0.84939516, 0.04907535, 0.57921900, 0.96982613,
+ 0.96066375, 0.60938927, 0.98017393, 0.47613619,
+ 0.04483615, 0.35458106, 0.77925608, 0.09762995,
+ 0.17605426, 0.65475580, 0.49400027, 0.74430323,
+ 0.66834557, 0.55180554, 0.56149147, 0.17780739,
+ 0.45887371, 0.87113438, 0.34729137, 0.03621890,
+ 0.02752394, 0.58748568, 0.94949200, 0.01234387,
+ 0.22512224, 0.20421475, 0.30241591, 0.44287630,
+ 0.92693591, 0.85988589, 0.58523018, 0.75493725,
+ 0.67976038, 0.90210808, 0.16193264, 0.06854948,
+ 0.78065400, 0.56588785, 0.06676102, 0.42662219,
+ 0.12653993, 0.18016388, 0.74432183, 0.80186216,
+ 0.61353588, 0.30734192, 0.60950496, 0.73033964,
+ 0.45933113, 0.95340344, 0.95873238, 0.22091518,
+ 0.41664395, 0.89282994, 0.12649949, 0.94538995,
+ 0.38797159, 0.21194355, 0.93976699, 0.13237574,
+ 0.17614998, 0.63638084, 0.74515463, 0.15821088,
+ 0.26233025, 0.97151094, 0.84639784, 0.96858076,
+ 0.12372874, 0.00061914, 0.47989416, 0.46585169,
+ 0.93181998, 0.24168970, 0.51493176, 0.84527806,
+ 0.71583991, 0.47779283, 0.74898920, 0.14752760,
+ 0.50176804, 0.23792488, 0.36169898, 0.07560302,
+ 0.38701148, 0.76247368, 0.20033977, 0.48230152,
+ 0.30565115, 0.22688719, 0.31492229, 0.73908020,
+ 0.63944999, 0.63692535, 0.36776983, 0.99915443,
+ 0.37796898, 0.57700454, 0.19073928, 0.35272975,
+ 0.47523137, 0.86415822, 0.14259931, 0.86897617,
+ 0.87083832, 0.09469065, 0.01176569, 0.66519020,
+ 0.53688186, 0.57907948, 0.92104488, 0.53224148,
+ 0.94279853, 0.66933028, 0.76264173, 0.00846143,
+ 0.17787411, 0.27029984, 0.94069575, 0.97091936,
+ 0.34419143, 0.80513430, 0.97102144, 0.56356255,
+ 0.96426302, 0.36658938, 0.83537716, 0.99772803,
+ 0.44309853, 0.82985523, 0.10590215, 0.26874156,
+ 0.99451632, 0.40830606, 0.55523556, 0.66017859,
+ 0.55543373, 0.33966445, 0.68147221, 0.15953739,
+ 0.70994904, 0.34768995, 0.26252758, 0.61505059,
+ 0.73006930, 0.19949312, 0.20781777, 0.56998090,
+ 0.08808883, 0.90775056, 0.64990724, 0.85463078,
+ 0.57091962, 0.37328744, 0.94231607, 0.48375077,
+ 0.51243150, 0.08293697, 0.84244579, 0.71510894,
+ 0.16874849, 0.98483478, 0.79377902, 0.71630545,
+ 0.02827830, 0.05768694, 0.19752560, 0.91946121,
+ 0.75047528, 0.77643189, 0.55484145, 0.09883586,
+ 0.95207175, 0.61484315, 0.65478232, 0.89697994,
+ 0.81336748, 0.68487048, 0.30860410, 0.69941932,
+ 0.81105303, 0.30717890, 0.98674485, 0.61447425,
+ 0.69822731, 0.75686959, 0.52146685, 0.40302938,
+ 0.26923451, 0.51424179, 0.65125432, 0.35501958,
+ 0.51128504, 0.62502966, 0.93869369, 0.04485744,
+ 0.46558787, 0.36337906, 0.06694895, 0.56433501,
+ 0.11381991, 0.25193077, 0.98502529, 0.05704914,
+ 0.42741233, 0.94695681, 0.34237149, 0.21235143,
+ 0.38026753, 0.87707973, 0.19586441, 0.12177076,
+ 0.50809963, 0.75425738, 0.73740277, 0.95442052,
+ 0.30532292, 0.28454304, 0.11094620, 0.28705514,
+ 0.60379470, 0.82317726, 0.68476054, 0.19807496,
+ 0.62396085, 0.93379787, 0.54316971, 0.63767898,
+ 0.48464992, 0.62082514, 0.88571107, 0.53376650,
+ 0.33199652, 0.73353233, 0.40077416, 0.74618470,
+ 0.59866563, 0.21305606, 0.12555324, 0.99799893,
+ 0.55033241, 0.03249085, 0.26086445, 0.98521994,
+ 0.99166855, 0.53523486, 0.69955169, 0.04899369,
+ 0.23795922, 0.47763494, 0.76727401, 0.33971988,
+ 0.13467868, 0.61420180, 0.15563938, 0.55256845,
+ 0.26988188, 0.13261020, 0.27974280, 0.11176598,
+ 0.32525126, 0.88784146, 0.26752581, 0.03067154,
+ 0.60569129, 0.02002373, 0.48760334, 0.62073825,
+ 0.21474893, 0.76444057, 0.55800774, 0.73889036,
+ 0.29518644, 0.94996021, 0.56444047, 0.47058584,
+ 0.43366718, 0.85572272, 0.90300854, 0.59503714,
+ 0.65801756, 0.56853684, 0.58558048, 0.72015027,
+ 0.17025921, 0.30271306, 0.53116499, 0.97653227,
+ 0.51964288, 0.52717848, 0.05840294, 0.52634715,
+ 0.29470665, 0.99334131, 0.65047692, 0.47785087,
+ 0.90506666, 0.67078885, 0.90046675, 0.32475029,
+ 0.34752749, 0.91294030, 0.03475684, 0.26864050,
+ 0.50324954, 0.46955497, 0.53413073, 0.84181129,
+ 0.36085900, 0.59277558, 0.88209431, 0.25836241,
+ 0.70103928, 0.02857411, 0.36042473, 0.56857452,
+ 0.45256708, 0.61420176, 0.18950828, 0.57047725,
+ 0.27502452, 0.76338308, 0.54628702, 0.97671683,
+ 0.91241649, 0.87801976, 0.90878537, 0.53572628,
+ 0.28748983, 0.38151063, 0.53979463, 0.02287989,
+ 0.16685784, 0.29065976, 0.90010275, 0.22090120,
+ 0.00914414, 0.02322095, 0.25122691, 0.39070380,
+ 0.52512120, 0.72430885, 0.72957361, 0.97871460,
+ 0.93695260, 0.21384469, 0.68111323, 0.93152877,
+ 0.38167531, 0.64671057, 0.99357667, 0.81439462,
+ 0.18172161, 0.34157997, 0.14163516, 0.97008374,
+ 0.00017817, 0.17492667, 0.89017036, 0.10573359,
+ 0.01900931, 0.16774126, 0.79037446, 0.84010306,
+ 0.47519226, 0.00439313, 0.18381522, 0.84613238,
+ 0.75610369, 0.38004291, 0.12868142, 0.35629285,
+ 0.80828631, 0.28274608, 0.44106362, 0.73265737,
+ 0.61325191, 0.24187840, 0.97955674, 0.93618438,
+ 0.96051047, 0.23422243, 0.97520706, 0.82584169,
+ 0.88025727, 0.35319169, 0.11022647, 0.48966716,
+ 0.33862352, 0.46629508, 0.35234246, 0.99066635,
+ 0.99262152, 0.00977917, 0.61749715, 0.22281960,
+ 0.71707526, 0.87362648, 0.91055938, 0.47073659,
+ 0.88101976, 0.21494194, 0.33205552, 0.54351819,
+ 0.55835019, 0.01768484, 0.02116836, 0.70469912,
+ 0.76899386, 0.64696939, 0.40084197, 0.59803212,
+ 0.52970593, 0.89719532, 0.87168575, 0.35151884,
+ 0.56087250, 0.38735172, 0.10494279, 0.41009167,
+ 0.74723117, 0.32829241, 0.92508072, 0.08944341,
+ 0.61823771, 0.95199810, 0.38566778, 0.45387474,
+ 0.14874216, 0.06920534, 0.16466161, 0.57534195,
+ 0.72012502, 0.22840780, 0.64040413, 0.72252710,
+ 0.46071354, 0.95938459, 0.15282101, 0.25416612,
+ 0.27624054, 0.40343682, 0.21556083, 0.10719734,
+ 0.01178395, 0.81544681, 0.61108854, 0.58873211,
+ 0.08313659, 0.31389776, 0.26683639, 0.17408690,
+ 0.19860426, 0.54852056, 0.45089482, 0.29739356,
+ 0.06490634, 0.94512628, 0.75476861, 0.79302202,
+ 0.91709407, 0.44093711, 0.42285809, 0.87353064,
+ 0.05154859, 0.05673061, 0.03360053, 0.47314265,
+ 0.14352713, 0.86919501, 0.89407749, 0.71384359,
+ 0.43505102, 0.76105734, 0.48072900, 0.26590561,
+ 0.23636561, 0.53526685, 0.43621137, 0.70461497,
+ 0.04695302, 0.29312615, 0.47657411, 0.47776949,
+ 0.67893515, 0.74761854, 0.19647090, 0.90858326,
+ 0.65050969, 0.78866488, 0.56645663, 0.28301728,
+ 0.21439215, 0.23534408, 0.99123621, 0.33798052,
+ 0.57132079, 0.13509136, 0.23913264, 0.98822790,
+ 0.12259069, 0.59413715, 0.98916346, 0.15804781,
+ 0.53868433, 0.82989573, 0.31032958, 0.52338512,
+ 0.43014882, 0.80809309, 0.58102790, 0.41232677,
+ 0.72325580, 0.15152519, 0.61332742, 0.69908457,
+ 0.68902723, 0.40867770, 0.56938072, 0.30977628,
+ 0.75155389, 0.77055871, 0.24496359, 0.00515177,
+ 0.68565391, 0.04478322, 0.74595021, 0.44720965,
+ 0.25587623, 0.42443591, 0.05974449, 0.20046287,
+ 0.30343490, 0.90622420, 0.64120083, 0.52238185,
+ 0.11133412, 0.43655075, 0.76620214, 0.36598683,
+ 0.67400905, 0.89241105, 0.69407209, 0.64427034,
+ 0.18430072, 0.92961135, 0.37992458, 0.41103806,
+ 0.99307206, 0.62659181, 0.44814843, 0.07694981,
+ ],
+}, {
+ output0: [],
+})).WithMultinomialDistributionTolerance(0.025)
diff --git a/tests/nnapi/specs/skip/V1_2/random_multinomial_float16.mod.py b/tests/nnapi/specs/skip/V1_2/random_multinomial_float16.mod.py
new file mode 100644
index 000000000..33d774ec7
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/random_multinomial_float16.mod.py
@@ -0,0 +1,285 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+input0 = Input("input0", "TENSOR_FLOAT16", "{1, 1024}")
+sample_count = Int32Scalar("sample_count", 128)
+seeds = Parameter("seeds", "TENSOR_INT32", "{2}", [37, 42])
+output0 = IgnoredOutput("output", "TENSOR_INT32", "{1, 128}")
+
+model = Model().Operation("RANDOM_MULTINOMIAL", input0, sample_count, seeds).To(output0)
+
+Example(({
+ input0: [
+ 0.18163621, 0.33273480, 0.50951556, 0.08877703,
+ 0.51185218, 0.16734240, 0.63870587, 0.56538613,
+ 0.96808477, 0.56056615, 0.02934992, 0.03258404,
+ 0.72106282, 0.54496657, 0.22183018, 0.14761066,
+ 0.38261428, 0.73637053, 0.78623964, 0.00538754,
+ 0.17758578, 0.21533023, 0.83887367, 0.71035332,
+ 0.28841254, 0.24017199, 0.11599192, 0.55738622,
+ 0.35368502, 0.09133554, 0.81038602, 0.03550774,
+ 0.88201054, 0.07141711, 0.20562562, 0.99982140,
+ 0.36866199, 0.84549652, 0.68196711, 0.60800431,
+ 0.04897644, 0.79349817, 0.34294643, 0.31649012,
+ 0.61759858, 0.67423009, 0.14606593, 0.82548304,
+ 0.14200278, 0.58680437, 0.41861224, 0.86453261,
+ 0.80694849, 0.23185477, 0.81298628, 0.14150890,
+ 0.16687062, 0.03645446, 0.50345389, 0.82756624,
+ 0.84377461, 0.46665451, 0.91611352, 0.29161655,
+ 0.86669246, 0.21630808, 0.98051105, 0.71589220,
+ 0.54117114, 0.37597655, 0.61811207, 0.39746145,
+ 0.19850883, 0.10498004, 0.45398218, 0.84427816,
+ 0.44244307, 0.97186493, 0.02943687, 0.05995579,
+ 0.91047162, 0.84049659, 0.29692092, 0.37859579,
+ 0.68067858, 0.53199727, 0.30587859, 0.96904311,
+ 0.53895138, 0.80350520, 0.56936886, 0.90789923,
+ 0.48829865, 0.74295622, 0.07730283, 0.35745998,
+ 0.63879813, 0.88581710, 0.78119555, 0.34389121,
+ 0.00664631, 0.43921788, 0.38457199, 0.95430791,
+ 0.92432083, 0.29473732, 0.25805162, 0.94078243,
+ 0.63156303, 0.49350546, 0.60020588, 0.16993110,
+ 0.78426143, 0.31030305, 0.74027296, 0.86801874,
+ 0.71064432, 0.98042120, 0.26738505, 0.22304029,
+ 0.97474880, 0.93888746, 0.02851034, 0.22468271,
+ 0.67886092, 0.79198019, 0.56653174, 0.17544579,
+ 0.89579936, 0.96279060, 0.04028579, 0.58690500,
+ 0.72510547, 0.60705131, 0.43325570, 0.15888959,
+ 0.34523460, 0.56390766, 0.31160624, 0.14503308,
+ 0.19243339, 0.26307077, 0.53711017, 0.90120554,
+ 0.20798102, 0.44519176, 0.83232068, 0.82690943,
+ 0.23728192, 0.27897126, 0.24336233, 0.56491850,
+ 0.80009130, 0.91884854, 0.00823675, 0.30183011,
+ 0.64548693, 0.06524323, 0.02960910, 0.10885612,
+ 0.93308847, 0.01737334, 0.87532111, 0.64499222,
+ 0.80151762, 0.68087718, 0.48854077, 0.33778072,
+ 0.89418992, 0.00524248, 0.49021969, 0.42403950,
+ 0.74438303, 0.93005140, 0.98648675, 0.06316910,
+ 0.77617813, 0.77113286, 0.51341796, 0.49668114,
+ 0.85817043, 0.12039487, 0.69007245, 0.63516463,
+ 0.43368987, 0.03034840, 0.75404114, 0.78141053,
+ 0.23417318, 0.07462540, 0.02689441, 0.66631840,
+ 0.86519194, 0.91861606, 0.10818770, 0.42462775,
+ 0.26566337, 0.68942528, 0.27363712, 0.54874752,
+ 0.58072208, 0.18649499, 0.79037057, 0.14188329,
+ 0.29430633, 0.91596697, 0.40646783, 0.77726510,
+ 0.75446749, 0.01020716, 0.20875567, 0.73854318,
+ 0.73204509, 0.02641734, 0.15423192, 0.97863180,
+ 0.14997906, 0.56356818, 0.71459404, 0.74379692,
+ 0.40063276, 0.05400237, 0.51403009, 0.27387991,
+ 0.68738814, 0.44641846, 0.38901113, 0.52327729,
+ 0.97282648, 0.43078061, 0.66980505, 0.21611701,
+ 0.78165645, 0.01494616, 0.14573566, 0.89647321,
+ 0.27992757, 0.93745905, 0.95605115, 0.14323041,
+ 0.93223624, 0.13351544, 0.48690382, 0.48056268,
+ 0.58399360, 0.38147627, 0.57308770, 0.88455851,
+ 0.61266891, 0.88541185, 0.88516824, 0.53789164,
+ 0.04956664, 0.11410213, 0.47340589, 0.30371802,
+ 0.68811040, 0.60006376, 0.98732277, 0.11725557,
+ 0.91365836, 0.49581686, 0.43231324, 0.88435984,
+ 0.07224436, 0.35188695, 0.74323035, 0.20613255,
+ 0.80579434, 0.19316965, 0.05335943, 0.77709435,
+ 0.20689616, 0.85582175, 0.14171426, 0.05923329,
+ 0.57708418, 0.58697364, 0.50752432, 0.88097219,
+ 0.93936580, 0.78270476, 0.07068334, 0.80061511,
+ 0.66090995, 0.40403670, 0.37639738, 0.31872702,
+ 0.10560548, 0.32295307, 0.71104409, 0.95057601,
+ 0.62568311, 0.82158469, 0.87822325, 0.34216374,
+ 0.42095343, 0.49155748, 0.04824981, 0.93577404,
+ 0.02006045, 0.52143329, 0.35735855, 0.22339355,
+ 0.21191254, 0.25463790, 0.00673706, 0.02776729,
+ 0.29688424, 0.28939652, 0.93429233, 0.54223604,
+ 0.24985107, 0.83891685, 0.16851543, 0.09681473,
+ 0.12913905, 0.41894106, 0.88119316, 0.56941667,
+ 0.98743163, 0.24583594, 0.26439969, 0.98900542,
+ 0.59552390, 0.10598290, 0.19145128, 0.82124177,
+ 0.16724271, 0.44599363, 0.59829451, 0.72605966,
+ 0.09989227, 0.82615394, 0.58065050, 0.11331605,
+ 0.28981000, 0.84587381, 0.07083202, 0.14833035,
+ 0.65867223, 0.59021865, 0.58735805, 0.59678862,
+ 0.70551718, 0.19207017, 0.73940653, 0.22106109,
+ 0.33934016, 0.77465068, 0.22509303, 0.12357820,
+ 0.94794034, 0.97312112, 0.73280797, 0.59965500,
+ 0.18524258, 0.11258899, 0.55591609, 0.46436632,
+ 0.21880912, 0.19875870, 0.38389680, 0.14392435,
+ 0.70625112, 0.93164951, 0.79722014, 0.48610288,
+ 0.72093904, 0.72137738, 0.27261254, 0.76680176,
+ 0.11663760, 0.18397960, 0.03195002, 0.82115076,
+ 0.73068862, 0.28732616, 0.61875003, 0.88276158,
+ 0.02675303, 0.93052378, 0.45810254, 0.80719106,
+ 0.78505935, 0.15465711, 0.81852908, 0.01035686,
+ 0.63759970, 0.57701143, 0.61182946, 0.81902549,
+ 0.64849716, 0.63775381, 0.33944463, 0.01088021,
+ 0.81885416, 0.06320171, 0.09684302, 0.04174445,
+ 0.93106984, 0.07546183, 0.24547596, 0.93751226,
+ 0.74065679, 0.54327627, 0.17024274, 0.94692311,
+ 0.09290775, 0.53920561, 0.72992514, 0.62150301,
+ 0.40669172, 0.06787872, 0.72004642, 0.39490120,
+ 0.98234857, 0.94828936, 0.74268101, 0.73370598,
+ 0.68092173, 0.37850705, 0.61021436, 0.26261629,
+ 0.52934261, 0.06334639, 0.76891534, 0.32289764,
+ 0.92373486, 0.42402109, 0.41378012, 0.10279785,
+ 0.67314394, 0.48186146, 0.50675380, 0.86822955,
+ 0.82499410, 0.15719373, 0.29668140, 0.92028725,
+ 0.95904319, 0.25983566, 0.75594963, 0.46969604,
+ 0.82638328, 0.56783068, 0.96105872, 0.43980714,
+ 0.98960801, 0.70075472, 0.15540090, 0.57160886,
+ 0.88793223, 0.57795871, 0.56514445, 0.65909586,
+ 0.69458952, 0.42359339, 0.09707922, 0.04027207,
+ 0.79641460, 0.55542973, 0.57159987, 0.41985797,
+ 0.61658945, 0.03278444, 0.63403447, 0.09048499,
+ 0.84939516, 0.04907535, 0.57921900, 0.96982613,
+ 0.96066375, 0.60938927, 0.98017393, 0.47613619,
+ 0.04483615, 0.35458106, 0.77925608, 0.09762995,
+ 0.17605426, 0.65475580, 0.49400027, 0.74430323,
+ 0.66834557, 0.55180554, 0.56149147, 0.17780739,
+ 0.45887371, 0.87113438, 0.34729137, 0.03621890,
+ 0.02752394, 0.58748568, 0.94949200, 0.01234387,
+ 0.22512224, 0.20421475, 0.30241591, 0.44287630,
+ 0.92693591, 0.85988589, 0.58523018, 0.75493725,
+ 0.67976038, 0.90210808, 0.16193264, 0.06854948,
+ 0.78065400, 0.56588785, 0.06676102, 0.42662219,
+ 0.12653993, 0.18016388, 0.74432183, 0.80186216,
+ 0.61353588, 0.30734192, 0.60950496, 0.73033964,
+ 0.45933113, 0.95340344, 0.95873238, 0.22091518,
+ 0.41664395, 0.89282994, 0.12649949, 0.94538995,
+ 0.38797159, 0.21194355, 0.93976699, 0.13237574,
+ 0.17614998, 0.63638084, 0.74515463, 0.15821088,
+ 0.26233025, 0.97151094, 0.84639784, 0.96858076,
+ 0.12372874, 0.00061914, 0.47989416, 0.46585169,
+ 0.93181998, 0.24168970, 0.51493176, 0.84527806,
+ 0.71583991, 0.47779283, 0.74898920, 0.14752760,
+ 0.50176804, 0.23792488, 0.36169898, 0.07560302,
+ 0.38701148, 0.76247368, 0.20033977, 0.48230152,
+ 0.30565115, 0.22688719, 0.31492229, 0.73908020,
+ 0.63944999, 0.63692535, 0.36776983, 0.99915443,
+ 0.37796898, 0.57700454, 0.19073928, 0.35272975,
+ 0.47523137, 0.86415822, 0.14259931, 0.86897617,
+ 0.87083832, 0.09469065, 0.01176569, 0.66519020,
+ 0.53688186, 0.57907948, 0.92104488, 0.53224148,
+ 0.94279853, 0.66933028, 0.76264173, 0.00846143,
+ 0.17787411, 0.27029984, 0.94069575, 0.97091936,
+ 0.34419143, 0.80513430, 0.97102144, 0.56356255,
+ 0.96426302, 0.36658938, 0.83537716, 0.99772803,
+ 0.44309853, 0.82985523, 0.10590215, 0.26874156,
+ 0.99451632, 0.40830606, 0.55523556, 0.66017859,
+ 0.55543373, 0.33966445, 0.68147221, 0.15953739,
+ 0.70994904, 0.34768995, 0.26252758, 0.61505059,
+ 0.73006930, 0.19949312, 0.20781777, 0.56998090,
+ 0.08808883, 0.90775056, 0.64990724, 0.85463078,
+ 0.57091962, 0.37328744, 0.94231607, 0.48375077,
+ 0.51243150, 0.08293697, 0.84244579, 0.71510894,
+ 0.16874849, 0.98483478, 0.79377902, 0.71630545,
+ 0.02827830, 0.05768694, 0.19752560, 0.91946121,
+ 0.75047528, 0.77643189, 0.55484145, 0.09883586,
+ 0.95207175, 0.61484315, 0.65478232, 0.89697994,
+ 0.81336748, 0.68487048, 0.30860410, 0.69941932,
+ 0.81105303, 0.30717890, 0.98674485, 0.61447425,
+ 0.69822731, 0.75686959, 0.52146685, 0.40302938,
+ 0.26923451, 0.51424179, 0.65125432, 0.35501958,
+ 0.51128504, 0.62502966, 0.93869369, 0.04485744,
+ 0.46558787, 0.36337906, 0.06694895, 0.56433501,
+ 0.11381991, 0.25193077, 0.98502529, 0.05704914,
+ 0.42741233, 0.94695681, 0.34237149, 0.21235143,
+ 0.38026753, 0.87707973, 0.19586441, 0.12177076,
+ 0.50809963, 0.75425738, 0.73740277, 0.95442052,
+ 0.30532292, 0.28454304, 0.11094620, 0.28705514,
+ 0.60379470, 0.82317726, 0.68476054, 0.19807496,
+ 0.62396085, 0.93379787, 0.54316971, 0.63767898,
+ 0.48464992, 0.62082514, 0.88571107, 0.53376650,
+ 0.33199652, 0.73353233, 0.40077416, 0.74618470,
+ 0.59866563, 0.21305606, 0.12555324, 0.99799893,
+ 0.55033241, 0.03249085, 0.26086445, 0.98521994,
+ 0.99166855, 0.53523486, 0.69955169, 0.04899369,
+ 0.23795922, 0.47763494, 0.76727401, 0.33971988,
+ 0.13467868, 0.61420180, 0.15563938, 0.55256845,
+ 0.26988188, 0.13261020, 0.27974280, 0.11176598,
+ 0.32525126, 0.88784146, 0.26752581, 0.03067154,
+ 0.60569129, 0.02002373, 0.48760334, 0.62073825,
+ 0.21474893, 0.76444057, 0.55800774, 0.73889036,
+ 0.29518644, 0.94996021, 0.56444047, 0.47058584,
+ 0.43366718, 0.85572272, 0.90300854, 0.59503714,
+ 0.65801756, 0.56853684, 0.58558048, 0.72015027,
+ 0.17025921, 0.30271306, 0.53116499, 0.97653227,
+ 0.51964288, 0.52717848, 0.05840294, 0.52634715,
+ 0.29470665, 0.99334131, 0.65047692, 0.47785087,
+ 0.90506666, 0.67078885, 0.90046675, 0.32475029,
+ 0.34752749, 0.91294030, 0.03475684, 0.26864050,
+ 0.50324954, 0.46955497, 0.53413073, 0.84181129,
+ 0.36085900, 0.59277558, 0.88209431, 0.25836241,
+ 0.70103928, 0.02857411, 0.36042473, 0.56857452,
+ 0.45256708, 0.61420176, 0.18950828, 0.57047725,
+ 0.27502452, 0.76338308, 0.54628702, 0.97671683,
+ 0.91241649, 0.87801976, 0.90878537, 0.53572628,
+ 0.28748983, 0.38151063, 0.53979463, 0.02287989,
+ 0.16685784, 0.29065976, 0.90010275, 0.22090120,
+ 0.00914414, 0.02322095, 0.25122691, 0.39070380,
+ 0.52512120, 0.72430885, 0.72957361, 0.97871460,
+ 0.93695260, 0.21384469, 0.68111323, 0.93152877,
+ 0.38167531, 0.64671057, 0.99357667, 0.81439462,
+ 0.18172161, 0.34157997, 0.14163516, 0.97008374,
+ 0.00017817, 0.17492667, 0.89017036, 0.10573359,
+ 0.01900931, 0.16774126, 0.79037446, 0.84010306,
+ 0.47519226, 0.00439313, 0.18381522, 0.84613238,
+ 0.75610369, 0.38004291, 0.12868142, 0.35629285,
+ 0.80828631, 0.28274608, 0.44106362, 0.73265737,
+ 0.61325191, 0.24187840, 0.97955674, 0.93618438,
+ 0.96051047, 0.23422243, 0.97520706, 0.82584169,
+ 0.88025727, 0.35319169, 0.11022647, 0.48966716,
+ 0.33862352, 0.46629508, 0.35234246, 0.99066635,
+ 0.99262152, 0.00977917, 0.61749715, 0.22281960,
+ 0.71707526, 0.87362648, 0.91055938, 0.47073659,
+ 0.88101976, 0.21494194, 0.33205552, 0.54351819,
+ 0.55835019, 0.01768484, 0.02116836, 0.70469912,
+ 0.76899386, 0.64696939, 0.40084197, 0.59803212,
+ 0.52970593, 0.89719532, 0.87168575, 0.35151884,
+ 0.56087250, 0.38735172, 0.10494279, 0.41009167,
+ 0.74723117, 0.32829241, 0.92508072, 0.08944341,
+ 0.61823771, 0.95199810, 0.38566778, 0.45387474,
+ 0.14874216, 0.06920534, 0.16466161, 0.57534195,
+ 0.72012502, 0.22840780, 0.64040413, 0.72252710,
+ 0.46071354, 0.95938459, 0.15282101, 0.25416612,
+ 0.27624054, 0.40343682, 0.21556083, 0.10719734,
+ 0.01178395, 0.81544681, 0.61108854, 0.58873211,
+ 0.08313659, 0.31389776, 0.26683639, 0.17408690,
+ 0.19860426, 0.54852056, 0.45089482, 0.29739356,
+ 0.06490634, 0.94512628, 0.75476861, 0.79302202,
+ 0.91709407, 0.44093711, 0.42285809, 0.87353064,
+ 0.05154859, 0.05673061, 0.03360053, 0.47314265,
+ 0.14352713, 0.86919501, 0.89407749, 0.71384359,
+ 0.43505102, 0.76105734, 0.48072900, 0.26590561,
+ 0.23636561, 0.53526685, 0.43621137, 0.70461497,
+ 0.04695302, 0.29312615, 0.47657411, 0.47776949,
+ 0.67893515, 0.74761854, 0.19647090, 0.90858326,
+ 0.65050969, 0.78866488, 0.56645663, 0.28301728,
+ 0.21439215, 0.23534408, 0.99123621, 0.33798052,
+ 0.57132079, 0.13509136, 0.23913264, 0.98822790,
+ 0.12259069, 0.59413715, 0.98916346, 0.15804781,
+ 0.53868433, 0.82989573, 0.31032958, 0.52338512,
+ 0.43014882, 0.80809309, 0.58102790, 0.41232677,
+ 0.72325580, 0.15152519, 0.61332742, 0.69908457,
+ 0.68902723, 0.40867770, 0.56938072, 0.30977628,
+ 0.75155389, 0.77055871, 0.24496359, 0.00515177,
+ 0.68565391, 0.04478322, 0.74595021, 0.44720965,
+ 0.25587623, 0.42443591, 0.05974449, 0.20046287,
+ 0.30343490, 0.90622420, 0.64120083, 0.52238185,
+ 0.11133412, 0.43655075, 0.76620214, 0.36598683,
+ 0.67400905, 0.89241105, 0.69407209, 0.64427034,
+ 0.18430072, 0.92961135, 0.37992458, 0.41103806,
+ 0.99307206, 0.62659181, 0.44814843, 0.07694981,
+ ],
+}, {
+ output0: [],
+})).WithMultinomialDistributionTolerance(0.025)
diff --git a/tests/nnapi/specs/skip/V1_2/relu1_v1_2.mod.py b/tests/nnapi/specs/skip/V1_2/relu1_v1_2.mod.py
new file mode 100644
index 000000000..9b69ea6cc
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/relu1_v1_2.mod.py
@@ -0,0 +1,88 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TEST 1
+i1 = Input("op1", "TENSOR_FLOAT16", "{1, 2, 2, 1}") # input 0
+i2 = Output("op2", "TENSOR_FLOAT16", "{1, 2, 2, 1}") # output 0
+model = Model().Operation("RELU1", i1).To(i2)
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-10.0, -0.5, 0.5, 10.0]}
+output0 = {i2: # output 0
+ [-1.0, -0.5, 0.5, 1.0]}
+# Instantiate an example
+Example((input0, output0))
+
+
+# TEST 2
+d0 = 2
+d1 = 30
+d2 = 24
+d3 = 2
+
+i0 = Input("input", "TENSOR_FLOAT16", "{%d, %d, %d, %d}" % (d0, d1, d2, d3))
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d, %d, %d}" % (d0, d1, d2, d3))
+model = Model().Operation("RELU1", i0).To(output)
+
+# Example 1. Input in operand 0,
+rng = d0 * d1 * d2 * d3
+input_values = (lambda r = rng: [x * (x % 2 - .5) * .002 for x in range(r)])()
+input0 = {i0: input_values}
+output_values = [-1 if x < -1 else 1 if x > 1 else x for x in input_values]
+output0 = {output: output_values}
+
+# Instantiate an example
+Example((input0, output0))
+
+
+# TEST 3: zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# RELU1 op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 1}") # out
+model = model.Operation("RELU1", zero_sized).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddVariations("relaxed", quant8, "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/relu6_v1_2.mod.py b/tests/nnapi/specs/skip/V1_2/relu6_v1_2.mod.py
new file mode 100644
index 000000000..068f53ffb
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/relu6_v1_2.mod.py
@@ -0,0 +1,88 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TEST 1
+i1 = Input("op1", "TENSOR_FLOAT16", "{1, 2, 2, 1}") # input 0
+i2 = Output("op2", "TENSOR_FLOAT16", "{1, 2, 2, 1}") # output 0
+model = Model().Operation("RELU6", i1).To(i2)
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-10.0, -0.5, 0.5, 10.0]}
+output0 = {i2: # output 0
+ [0.0, 0.0, 0.5, 6.0]}
+# Instantiate an example
+Example((input0, output0))
+
+
+# TEST 2
+d0 = 2
+d1 = 26
+d2 = 40
+d3 = 2
+
+i0 = Input("input", "TENSOR_FLOAT16", "{%d, %d, %d, %d}" % (d0, d1, d2, d3))
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d, %d, %d}" % (d0, d1, d2, d3))
+model = Model().Operation("RELU6", i0).To(output)
+
+# Example 1. Input in operand 0,
+rng = d0 * d1 * d2 * d3
+input_values = (lambda r = rng: [x * (x % 2 - .5) * .002 for x in range(r)])()
+input0 = {i0: input_values}
+output_values = [0 if x < 0 else 6 if x > 6 else x for x in input_values]
+output0 = {output: output_values}
+
+# Instantiate an example
+Example((input0, output0))
+
+
+# TEST 3: zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# RELU6 op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 1}") # out
+model = model.Operation("RELU6", zero_sized).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddVariations("relaxed", quant8, "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/relu_v1_2.mod.py b/tests/nnapi/specs/skip/V1_2/relu_v1_2.mod.py
new file mode 100644
index 000000000..9065fc950
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/relu_v1_2.mod.py
@@ -0,0 +1,88 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TEST 1
+i1 = Input("op1", "TENSOR_FLOAT16", "{1, 2, 2, 1}") # input 0
+i2 = Output("op2", "TENSOR_FLOAT16", "{1, 2, 2, 1}") # output 0
+model = Model().Operation("RELU", i1).To(i2)
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [-10.0, -0.5, 0.5, 10.0]}
+output0 = {i2: # output 0
+ [0.0, 0.0, 0.5, 10.0]}
+# Instantiate an example
+Example((input0, output0))
+
+
+# TEST 2
+d0 = 2
+d1 = 64
+d2 = 40
+d3 = 2
+
+i0 = Input("input", "TENSOR_FLOAT16", "{%d, %d, %d, %d}" % (d0, d1, d2, d3))
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d, %d, %d}" % (d0, d1, d2, d3))
+model = Model().Operation("RELU", i0).To(output)
+
+# Example 1. Input in operand 0,
+rng = d0 * d1 * d2 * d3
+input_values = (lambda r = rng: [x * (x % 2 - .5) * 2 for x in range(r)])()
+input0 = {i0: input_values}
+output_values = (lambda r = rng: [x * (x % 2) for x in range(r)])()
+output0 = {output: output_values}
+
+# Instantiate an example
+Example((input0, output0))
+
+
+# TEST 3: zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# RELU op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 1}") # out
+model = model.Operation("RELU", zero_sized).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddVariations("relaxed", quant8, "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/reshape_float16.mod.py b/tests/nnapi/specs/skip/V1_2/reshape_float16.mod.py
new file mode 100644
index 000000000..f54cba6d8
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/reshape_float16.mod.py
@@ -0,0 +1,18 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT16", "{1, 1, 3, 3}") # a line of 3 pixels, 3 components/pixel
+i2 = Parameter("op2", "TENSOR_INT32", "{1}", [-1]) # another vector of 2 float32s
+i3 = Output("op3", "TENSOR_FLOAT16", "{9}")
+model = model.Operation("RESHAPE", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9]}
+
+output0 = {i3: # output 0
+ [1, 2, 3, 4, 5, 6, 7, 8, 9]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/resize_bilinear_v1_2.mod.py b/tests/nnapi/specs/skip/V1_2/resize_bilinear_v1_2.mod.py
new file mode 100644
index 000000000..572d06b75
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/resize_bilinear_v1_2.mod.py
@@ -0,0 +1,166 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: RESIZE_BILINEAR_NCHW_1, w = 3, h = 3
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
+model_shape = Model("shape").Operation("RESIZE_BILINEAR", i1, 3, 3, layout).To(o1)
+model_scale = Model("scale").Operation("RESIZE_BILINEAR", i1, 1.5, 1.5, layout).To(o1)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.01, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.01, 0)
+})
+
+test1 = {
+ i1: [1.0, 1.0, 2.0, 2.0],
+ o1: [1.0, 1.0, 1.0,
+ 1.666666667, 1.666666667, 1.666666667,
+ 2.0, 2.0, 2.0]
+}
+
+# Instantiate an example
+Example(test1, model=model_shape).AddNchw(i1, o1, layout).AddVariations("relaxed", "float16", quant8)
+Example(test1, model=model_scale).AddNchw(i1, o1, layout).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 2: RESIZE_BILINEAR_NCHW_2, w = 3, h = 3
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 3, 2}")
+model_shape = Model("shape").Operation("RESIZE_BILINEAR", i2, 3, 3, layout).To(o2)
+model_scale = Model("scale").Operation("RESIZE_BILINEAR", i2, 1.6, 1.6, layout).To(o2)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 0.25, 0),
+ o2: ("TENSOR_QUANT8_ASYMM", 0.25, 0)
+})
+
+test2 = {
+ i2: [3, 4, 6, 10, 9, 10, 12, 16],
+ o2: [3, 4, 5, 8, 6, 10,
+ 7, 8, 9, 12, 10, 14,
+ 9, 10, 11, 14, 12, 16,]
+}
+
+# Instantiate an example
+Example(test2, model=model_shape).AddNchw(i2, o2, layout).AddVariations("relaxed", "float16", quant8)
+Example(test2, model=model_scale).AddNchw(i2, o2, layout).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 3: RESIZE_BILINEAR, w = 3, h = 3
+i3 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+o3 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
+model_shape = Model("shape").Operation("RESIZE_BILINEAR", i3, 3, 3).To(o3)
+model_scale = Model("scale").Operation("RESIZE_BILINEAR", i3, 1.8, 1.8).To(o3)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM", 0.01, 0),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.01, 0)
+})
+
+test3 = {
+ i3: [1.0, 1.0, 2.0, 2.0],
+ o3: [1.0, 1.0, 1.0,
+ 1.666666667, 1.666666667, 1.666666667,
+ 2.0, 2.0, 2.0]
+}
+
+# Instantiate an example
+Example(test3, model=model_shape).AddVariations("float16", quant8, includeDefault=False)
+Example(test3, model=model_scale).AddVariations("float16", quant8, includeDefault=False)
+
+
+# TEST 4: zero-sized input, resize by output shape
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# RESIZE_BILINEAR op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 3, 3, 1}") # out
+model = model.Operation("RESIZE_BILINEAR", zero_sized, 3, 3, layout).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations("relaxed", quant8, "float16")
+
+
+# TEST 5: zero-sized input, resize by scale
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# RESIZE_BILINEAR op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 3, 3, 1}") # out
+model = model.Operation("RESIZE_BILINEAR", zero_sized, 1.6, 1.6, layout).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations("relaxed", quant8, "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/rnn_float16.mod.py b/tests/nnapi/specs/skip/V1_2/rnn_float16.mod.py
new file mode 100644
index 000000000..7968c556c
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/rnn_float16.mod.py
@@ -0,0 +1,201 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+batches = 2
+units = 16
+input_size = 8
+
+model = Model()
+
+input = Input("input", "TENSOR_FLOAT16", "{%d, %d}" % (batches, input_size))
+weights = Input("weights", "TENSOR_FLOAT16", "{%d, %d}" % (units, input_size))
+recurrent_weights = Input("recurrent_weights", "TENSOR_FLOAT16", "{%d, %d}" % (units, units))
+bias = Input("bias", "TENSOR_FLOAT16", "{%d}" % (units))
+hidden_state_in = Input("hidden_state_in", "TENSOR_FLOAT16", "{%d, %d}" % (batches, units))
+
+activation_param = Int32Scalar("activation_param", 1) # Relu
+
+hidden_state_out = IgnoredOutput("hidden_state_out", "TENSOR_FLOAT16", "{%d, %d}" % (batches, units))
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (batches, units))
+
+model = model.Operation("RNN", input, weights, recurrent_weights, bias, hidden_state_in,
+ activation_param).To([hidden_state_out, output])
+
+input0 = {
+ weights: [
+ 0.461459, 0.153381, 0.529743, -0.00371218, 0.676267, -0.211346,
+ 0.317493, 0.969689, -0.343251, 0.186423, 0.398151, 0.152399,
+ 0.448504, 0.317662, 0.523556, -0.323514, 0.480877, 0.333113,
+ -0.757714, -0.674487, -0.643585, 0.217766, -0.0251462, 0.79512,
+ -0.595574, -0.422444, 0.371572, -0.452178, -0.556069, -0.482188,
+ -0.685456, -0.727851, 0.841829, 0.551535, -0.232336, 0.729158,
+ -0.00294906, -0.69754, 0.766073, -0.178424, 0.369513, -0.423241,
+ 0.548547, -0.0152023, -0.757482, -0.85491, 0.251331, -0.989183,
+ 0.306261, -0.340716, 0.886103, -0.0726757, -0.723523, -0.784303,
+ 0.0354295, 0.566564, -0.485469, -0.620498, 0.832546, 0.697884,
+ -0.279115, 0.294415, -0.584313, 0.548772, 0.0648819, 0.968726,
+ 0.723834, -0.0080452, -0.350386, -0.272803, 0.115121, -0.412644,
+ -0.824713, -0.992843, -0.592904, -0.417893, 0.863791, -0.423461,
+ -0.147601, -0.770664, -0.479006, 0.654782, 0.587314, -0.639158,
+ 0.816969, -0.337228, 0.659878, 0.73107, 0.754768, -0.337042,
+ 0.0960841, 0.368357, 0.244191, -0.817703, -0.211223, 0.442012,
+ 0.37225, -0.623598, -0.405423, 0.455101, 0.673656, -0.145345,
+ -0.511346, -0.901675, -0.81252, -0.127006, 0.809865, -0.721884,
+ 0.636255, 0.868989, -0.347973, -0.10179, -0.777449, 0.917274,
+ 0.819286, 0.206218, -0.00785118, 0.167141, 0.45872, 0.972934,
+ -0.276798, 0.837861, 0.747958, -0.0151566, -0.330057, -0.469077,
+ 0.277308, 0.415818
+ ],
+ recurrent_weights: [
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.1
+ ],
+ bias: [
+ 0.065691948, -0.69055247, 0.1107955, -0.97084129, -0.23957068,
+ -0.23566568, -0.389184, 0.47481549, -0.4791103, 0.29931796,
+ 0.10463274, 0.83918178, 0.37197268, 0.61957061, 0.3956964,
+ -0.37609905
+ ],
+}
+
+
+test_inputs = [
+ 0.23689353, 0.285385, 0.037029743, -0.19858193, -0.27569133,
+ 0.43773448, 0.60379338, 0.35562468, -0.69424844, -0.93421471,
+ -0.87287879, 0.37144363, -0.62476718, 0.23791671, 0.40060222,
+ 0.1356622, -0.99774903, -0.98858172, -0.38952237, -0.47685933,
+ 0.31073618, 0.71511042, -0.63767755, -0.31729108, 0.33468103,
+ 0.75801885, 0.30660987, -0.37354088, 0.77002847, -0.62747043,
+ -0.68572164, 0.0069220066, 0.65791464, 0.35130811, 0.80834007,
+ -0.61777675, -0.21095741, 0.41213346, 0.73784804, 0.094794154,
+ 0.47791874, 0.86496925, -0.53376222, 0.85315156, 0.10288584,
+ 0.86684, -0.011186242, 0.10513687, 0.87825835, 0.59929144,
+ 0.62827742, 0.18899453, 0.31440187, 0.99059987, 0.87170351,
+ -0.35091716, 0.74861872, 0.17831337, 0.2755419, 0.51864719,
+ 0.55084288, 0.58982027, -0.47443086, 0.20875752, -0.058871567,
+ -0.66609079, 0.59098077, 0.73017097, 0.74604273, 0.32882881,
+ -0.17503482, 0.22396147, 0.19379807, 0.29120302, 0.077113032,
+ -0.70331609, 0.15804303, -0.93407321, 0.40182066, 0.036301374,
+ 0.66521823, 0.0300982, -0.7747041, -0.02038002, 0.020698071,
+ -0.90300065, 0.62870288, -0.23068321, 0.27531278, -0.095755219,
+ -0.712036, -0.17384434, -0.50593495, -0.18646687, -0.96508682,
+ 0.43519354, 0.14744234, 0.62589407, 0.1653645, -0.10651493,
+ -0.045277178, 0.99032974, -0.88255352, -0.85147917, 0.28153265,
+ 0.19455957, -0.55479527, -0.56042433, 0.26048636, 0.84702539,
+ 0.47587705, -0.074295521, -0.12287641, 0.70117295, 0.90532446,
+ 0.89782166, 0.79817224, 0.53402734, -0.33286154, 0.073485017,
+ -0.56172788, -0.044897556, 0.89964068, -0.067662835, 0.76863563,
+ 0.93455386, -0.6324693, -0.083922029
+]
+
+golden_outputs = [
+ 0.496726, 0, 0.965996, 0, 0.0584254, 0,
+ 0, 0.12315, 0, 0, 0.612266, 0.456601,
+ 0, 0.52286, 1.16099, 0.0291232,
+
+ 0, 0, 0.524901, 0, 0, 0,
+ 0, 1.02116, 0, 1.35762, 0, 0.356909,
+ 0.436415, 0.0355727, 0, 0,
+
+ 0, 0, 0, 0.262335, 0, 0,
+ 0, 1.33992, 0, 2.9739, 0, 0,
+ 1.31914, 2.66147, 0, 0,
+
+ 0.942568, 0, 0, 0, 0.025507, 0,
+ 0, 0, 0.321429, 0.569141, 1.25274, 1.57719,
+ 0.8158, 1.21805, 0.586239, 0.25427,
+
+ 1.04436, 0, 0.630725, 0, 0.133801, 0.210693,
+ 0.363026, 0, 0.533426, 0, 1.25926, 0.722707,
+ 0, 1.22031, 1.30117, 0.495867,
+
+ 0.222187, 0, 0.72725, 0, 0.767003, 0,
+ 0, 0.147835, 0, 0, 0, 0.608758,
+ 0.469394, 0.00720298, 0.927537, 0,
+
+ 0.856974, 0.424257, 0, 0, 0.937329, 0,
+ 0, 0, 0.476425, 0, 0.566017, 0.418462,
+ 0.141911, 0.996214, 1.13063, 0,
+
+ 0.967899, 0, 0, 0, 0.0831304, 0,
+ 0, 1.00378, 0, 0, 0, 1.44818,
+ 1.01768, 0.943891, 0.502745, 0,
+
+ 0.940135, 0, 0, 0, 0, 0,
+ 0, 2.13243, 0, 0.71208, 0.123918, 1.53907,
+ 1.30225, 1.59644, 0.70222, 0,
+
+ 0.804329, 0, 0.430576, 0, 0.505872, 0.509603,
+ 0.343448, 0, 0.107756, 0.614544, 1.44549, 1.52311,
+ 0.0454298, 0.300267, 0.562784, 0.395095,
+
+ 0.228154, 0, 0.675323, 0, 1.70536, 0.766217,
+ 0, 0, 0, 0.735363, 0.0759267, 1.91017,
+ 0.941888, 0, 0, 0,
+
+ 0, 0, 1.5909, 0, 0, 0,
+ 0, 0.5755, 0, 0.184687, 0, 1.56296,
+ 0.625285, 0, 0, 0,
+
+ 0, 0, 0.0857888, 0, 0, 0,
+ 0, 0.488383, 0.252786, 0, 0, 0,
+ 1.02817, 1.85665, 0, 0,
+
+ 0.00981836, 0, 1.06371, 0, 0, 0,
+ 0, 0, 0, 0.290445, 0.316406, 0,
+ 0.304161, 1.25079, 0.0707152, 0,
+
+ 0.986264, 0.309201, 0, 0, 0, 0,
+ 0, 1.64896, 0.346248, 0, 0.918175, 0.78884,
+ 0.524981, 1.92076, 2.07013, 0.333244,
+
+ 0.415153, 0.210318, 0, 0, 0, 0,
+ 0, 2.02616, 0, 0.728256, 0.84183, 0.0907453,
+ 0.628881, 3.58099, 1.49974, 0
+]
+
+input_sequence_size = int(len(test_inputs) / input_size / batches)
+
+# TODO: enable the other data points after fixing reference issues
+#for i in range(input_sequence_size):
+for i in range(1):
+ input_begin = i * input_size
+ input_end = input_begin + input_size
+ input0[input] = test_inputs[input_begin:input_end]
+ input0[input].extend(input0[input])
+ input0[hidden_state_in] = [0 for x in range(batches * units)]
+ output0 = {
+ hidden_state_out: [0 for x in range(batches * units)],
+ }
+ golden_start = i * units
+ golden_end = golden_start + units
+ output0[output] = golden_outputs[golden_start:golden_end]
+ output0[output].extend(output0[output])
+ Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/roi_align.mod.py b/tests/nnapi/specs/skip/V1_2/roi_align.mod.py
new file mode 100644
index 000000000..d1b1303c2
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/roi_align.mod.py
@@ -0,0 +1,265 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: ROI_ALIGN_1, outputShape = [2, 2], spatialScale = [0.5, 0.5], samplingRatio = [4, 4]
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
+roi1 = Input("roi", "TENSOR_FLOAT32", "{4, 4}")
+o1 = Output("out", "TENSOR_FLOAT32", "{4, 2, 2, 1}")
+Model().Operation("ROI_ALIGN", i1, roi1, [0, 0, 0, 0], 2, 2, 2.0, 2.0, 4, 4, layout).To(o1)
+
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
+ roi1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.0625, 128)
+})
+
+# Instantiate an example
+Example({
+ i1: [
+ -10, -1, 4, -5,
+ -8, -2, 9, 1,
+ 7, -2, 3, -7,
+ -2, 10, -3, 5
+ ],
+ roi1: [
+ 2, 2, 4, 4,
+ 0, 0, 8, 8,
+ 2, 0, 4, 8,
+ 0, 2, 8, 4
+ ],
+ o1: [
+ 0.375, 5.125, -0.375, 2.875,
+ -0.5, -0.3125, 3.1875, 1.125,
+ 0.25, 4.25, 4.875, 0.625,
+ -0.1875, 1.125, 0.9375, -2.625
+ ]
+}).AddNchw(i1, o1, layout).AddVariations("relaxed", quant8, "float16")
+
+
+# TEST 2: ROI_ALIGN_2, outputShape = [2, 3], spatialScale = [0.25, 0.25], samplingRatio = [4, 4]
+i2 = Input("in", "TENSOR_FLOAT32", "{4, 4, 8, 2}")
+roi2 = Input("roi", "TENSOR_FLOAT32", "{4, 4}")
+o2 = Output("out", "TENSOR_FLOAT32", "{4, 2, 3, 2}")
+Model().Operation("ROI_ALIGN", i2, roi2, [0, 0, 3, 3], 2, 3, 4.0, 4.0, 4, 4, layout).To(o2)
+
+quant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 0.04, 0),
+ roi2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o2: ("TENSOR_QUANT8_ASYMM", 0.03125, 10)
+})
+
+# Instantiate an example
+Example({
+ i2: [
+ 8.84, 8.88, 7.41, 5.60, 9.95, 4.37, 0.10, 7.64, 6.50, 9.47,
+ 7.55, 3.00, 0.89, 3.01, 6.30, 4.40, 1.64, 6.74, 6.16, 8.60,
+ 5.85, 3.17, 7.12, 6.79, 5.77, 6.62, 5.13, 8.44, 5.08, 7.12,
+ 2.84, 1.19, 8.37, 0.90, 7.86, 9.69, 1.97, 1.31, 4.42, 9.89,
+ 0.18, 9.00, 9.30, 0.44, 5.05, 6.47, 1.09, 9.50, 1.30, 2.18,
+ 2.05, 7.74, 7.66, 0.65, 4.18, 7.14, 5.35, 7.90, 1.04, 1.47,
+ 9.01, 0.95, 4.07, 0.65,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00,
+ 5.47, 2.64, 0.86, 4.86, 2.38, 2.45, 8.77, 0.06, 3.60, 9.28,
+ 5.84, 8.97, 6.89, 1.43, 3.90, 5.91, 7.40, 9.25, 3.12, 4.92,
+ 1.87, 3.22, 9.50, 6.73, 2.07, 7.30, 3.07, 4.97, 0.24, 8.91,
+ 1.09, 0.27, 7.29, 6.94, 2.31, 6.88, 4.33, 1.37, 0.86, 0.46,
+ 6.07, 3.81, 0.86, 6.99, 4.36, 1.92, 8.19, 3.57, 7.90, 6.78,
+ 4.64, 6.82, 6.18, 9.63, 2.63, 2.33, 1.36, 2.70, 9.99, 9.85,
+ 8.06, 4.80, 7.80, 5.43
+ ],
+ roi2: [
+ 4, 4, 28, 12,
+ 4, 4, 32, 16,
+ 7, 1, 29, 15, # test rounding
+ 1, 7, 9, 11 # test roi with shape smaller than output
+ ],
+ o2: [
+ 5.150000, 5.491250, 4.733750, 7.100000, 4.827500,
+ 5.843750, 4.721250, 4.797500, 3.750000, 6.592500,
+ 5.452500, 3.362500,
+ 4.899396, 5.861696, 4.941504, 5.979741, 3.182904,
+ 6.111551, 5.141833, 4.631891, 3.903325, 4.627793,
+ 5.537240, 1.356019,
+ 4.845915, 3.618338, 3.301958, 6.250566, 2.930461,
+ 4.269676, 3.642174, 4.201423, 5.008657, 5.735293,
+ 7.426004, 4.819665,
+ 4.518229, 6.887344, 2.952656, 5.565781, 3.952786,
+ 2.552812, 5.191667, 6.854167, 3.920000, 6.512500,
+ 4.886250, 5.497708
+ ]
+}).AddNchw(i2, o2, layout).AddVariations("relaxed", quant8, "float16")
+
+
+# TEST 3: ROI_ALIGN_3, outputShape = [2, 3], spatialScale = [0.25, 0.25], samplingRatio = [0, 0]
+i3 = Input("in", "TENSOR_FLOAT32", "{2, 4, 8, 2}")
+roi3 = Input("roi", "TENSOR_FLOAT32", "{4, 4}")
+o3 = Output("out", "TENSOR_FLOAT32", "{4, 2, 3, 2}")
+Model().Operation("ROI_ALIGN", i3, roi3, [0, 0, 1, 1], 2, 3, 4.0, 4.0, 0, 0, layout).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM", 0.04, 0),
+ roi3: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.03125, 10)
+})
+
+# Instantiate an example
+Example({
+ i3: [
+ 8.84, 8.88, 7.41, 5.60, 9.95, 4.37, 0.10, 7.64, 6.50, 9.47,
+ 7.55, 3.00, 0.89, 3.01, 6.30, 4.40, 1.64, 6.74, 6.16, 8.60,
+ 5.85, 3.17, 7.12, 6.79, 5.77, 6.62, 5.13, 8.44, 5.08, 7.12,
+ 2.84, 1.19, 8.37, 0.90, 7.86, 9.69, 1.97, 1.31, 4.42, 9.89,
+ 0.18, 9.00, 9.30, 0.44, 5.05, 6.47, 1.09, 9.50, 1.30, 2.18,
+ 2.05, 7.74, 7.66, 0.65, 4.18, 7.14, 5.35, 7.90, 1.04, 1.47,
+ 9.01, 0.95, 4.07, 0.65,
+ 5.47, 2.64, 0.86, 4.86, 2.38, 2.45, 8.77, 0.06, 3.60, 9.28,
+ 5.84, 8.97, 6.89, 1.43, 3.90, 5.91, 7.40, 9.25, 3.12, 4.92,
+ 1.87, 3.22, 9.50, 6.73, 2.07, 7.30, 3.07, 4.97, 0.24, 8.91,
+ 1.09, 0.27, 7.29, 6.94, 2.31, 6.88, 4.33, 1.37, 0.86, 0.46,
+ 6.07, 3.81, 0.86, 6.99, 4.36, 1.92, 8.19, 3.57, 7.90, 6.78,
+ 4.64, 6.82, 6.18, 9.63, 2.63, 2.33, 1.36, 2.70, 9.99, 9.85,
+ 8.06, 4.80, 7.80, 5.43
+ ],
+ roi3: [
+ 4, 4, 28, 12,
+ 4, 4, 32, 16,
+ 7, 1, 29, 15, # test rounding
+ 1, 7, 9, 11 # test roi with shape smaller than output
+ ],
+ o3: [
+ 5.150000, 5.491250, 4.733750, 7.100000, 4.827500,
+ 5.843750, 4.721250, 4.797500, 3.750000, 6.592500,
+ 5.452500, 3.362500,
+ 4.869884, 5.908148, 4.941701, 5.955718, 3.113403,
+ 6.341898, 5.156389, 4.604016, 3.881782, 4.616123,
+ 5.690694, 1.237153,
+ 5.028047, 3.560944, 3.157656, 6.395469, 2.896243,
+ 4.336576, 3.563021, 4.057767, 5.053437, 6.028906,
+ 7.396966, 4.668906,
+ 4.385000, 6.905000, 2.815000, 5.502500, 4.161667,
+ 1.829167, 5.191667, 6.854167, 3.920000, 6.512500,
+ 5.106667, 5.612500
+ ]
+}).AddNchw(i3, o3, layout).AddVariations("relaxed", quant8, "float16")
+
+
+# TEST 4: ROI_ALIGN_4, outputShape = [2, 2], spatialScale = [0.5, 1.0], samplingRatio = [0, 4]
+i4 = Input("in", "TENSOR_FLOAT32", "{4, 4, 4, 1}")
+roi4 = Input("roi", "TENSOR_FLOAT32", "{5, 4}")
+o4 = Output("out", "TENSOR_FLOAT32", "{5, 2, 2, 1}")
+Model().Operation("ROI_ALIGN", i4, roi4, [2, 2, 2, 2, 2], 2, 2, 2.0, 1.0, 0, 4, layout).To(o4)
+
+quant8 = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
+ roi4: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o4: ("TENSOR_QUANT8_ASYMM", 0.0625, 128)
+})
+
+# Instantiate an example
+Example({
+ i4: [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ -10, -1, 4, -5,
+ -8, -2, 9, 1,
+ 7, -2, 3, -7,
+ -2, 10, -3, 5,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ],
+ roi4: [
+ 1, 2, 2, 4,
+ 0, 0, 4, 8,
+ 1, 0, 2, 8,
+ 0, 2, 4, 4,
+ 0, 0, 0, 0
+ ],
+ o4: [
+ 0.375, 5.125, -0.375, 2.875,
+ -0.5, -0.3125, 3.1875, 1.125,
+ 0.25, 4.25, 4.875, 0.625,
+ -0.1875, 1.125, 0.9375, -2.625,
+ -7.4375, -3.3125, -6.8125, -3.4375
+ ]
+}).AddNchw(i4, o4, layout).AddVariations("relaxed", quant8, "float16")
+
+
+# TEST 5: ROI_ALIGN_zero_sized
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# ROI_ALIGN op with numRois = 0.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Output("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [0],
+ o1: [0],
+ o2: [0],
+ zero_sized: [0],
+}).AddNchw(i1, zero_sized, layout).AddVariations("relaxed", quant8, "float16")
+
+
+# TEST 6: ROI_ALIGN_6, hanging issue
+i4 = Input("in", "TENSOR_FLOAT32", "{1, 512, 8, 1}")
+roi4 = Input("roi", "TENSOR_FLOAT32", "{1, 4}")
+o4 = Output("out", "TENSOR_FLOAT32", "{1, 128, 4, 1}")
+Model().Operation("ROI_ALIGN", i4, roi4, [0], 128, 4, 1.0, 64.0, 10, 10, layout).To(o4)
+
+quant8 = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
+ roi4: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o4: ("TENSOR_QUANT8_ASYMM", 0.0625, 128)
+})
+
+# Instantiate an example
+Example({
+ i4: [0] * (512 * 8),
+ roi4: [450, 500, 466, 508],
+ o4: [0] * (128 * 4)
+}).AddNchw(i4, o4, layout).AddVariations("relaxed", quant8, "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/roi_pooling.mod.py b/tests/nnapi/specs/skip/V1_2/roi_pooling.mod.py
new file mode 100644
index 000000000..f4135c5b4
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/roi_pooling.mod.py
@@ -0,0 +1,152 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: ROI_POOLING_1, outputShape = [2, 2], spatialScale = [0.5, 0.5]
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
+roi1 = Input("roi", "TENSOR_FLOAT32", "{5, 4}")
+o1 = Output("out", "TENSOR_FLOAT32", "{5, 2, 2, 1}")
+Model().Operation("ROI_POOLING", i1, roi1, [0, 0, 0, 0, 0], 2, 2, 2.0, 2.0, layout).To(o1)
+
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
+ roi1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.25, 128)
+})
+
+# Instantiate an example
+Example({
+ i1: [
+ -10, -1, 4, -5,
+ -8, -2, 9, 1,
+ 7, -2, 3, -7,
+ -2, 10, -3, 5
+ ],
+ roi1: [
+ 2, 2, 4, 4,
+ 0, 0, 6, 6,
+ 2, 0, 4, 6,
+ 0, 2, 6, 4,
+ 8, 8, 8, 8 # empty region
+ ],
+ o1: [
+ -2, 9, -2, 3,
+ -1, 9, 10, 5,
+ -1, 9, 10, 3,
+ -2, 9, 7, 3,
+ 0, 0, 0, 0
+ ]
+}).AddNchw(i1, o1, layout).AddVariations("relaxed", quant8, "float16")
+
+
+# TEST 2: ROI_POOLING_2, outputShape = [2, 3], spatialScale = 0.25
+i2 = Input("in", "TENSOR_FLOAT32", "{4, 4, 8, 2}")
+roi2 = Input("roi", "TENSOR_FLOAT32", "{4, 4}")
+o2 = Output("out", "TENSOR_FLOAT32", "{4, 2, 3, 2}")
+Model().Operation("ROI_POOLING", i2, roi2, [0, 0, 3, 3], 2, 3, 4.0, 4.0, layout).To(o2)
+
+quant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 0.04, 0),
+ roi2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o2: ("TENSOR_QUANT8_ASYMM", 0.04, 0)
+})
+
+# Instantiate an example
+Example({
+ i2: [
+ 8.84, 8.88, 7.41, 5.60, 9.95, 4.37, 0.10, 7.64, 6.50, 9.47,
+ 7.55, 3.00, 0.89, 3.01, 6.30, 4.40, 1.64, 6.74, 6.16, 8.60,
+ 5.85, 3.17, 7.12, 6.79, 5.77, 6.62, 5.13, 8.44, 5.08, 7.12,
+ 2.84, 1.19, 8.37, 0.90, 7.86, 9.69, 1.97, 1.31, 4.42, 9.89,
+ 0.18, 9.00, 9.30, 0.44, 5.05, 6.47, 1.09, 9.50, 1.30, 2.18,
+ 2.05, 7.74, 7.66, 0.65, 4.18, 7.14, 5.35, 7.90, 1.04, 1.47,
+ 9.01, 0.95, 4.07, 0.65,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
+ 0.00, 0.00, 0.00, 0.00,
+ 5.47, 2.64, 0.86, 4.86, 2.38, 2.45, 8.77, 0.06, 3.60, 9.28,
+ 5.84, 8.97, 6.89, 1.43, 3.90, 5.91, 7.40, 9.25, 3.12, 4.92,
+ 1.87, 3.22, 9.50, 6.73, 2.07, 7.30, 3.07, 4.97, 0.24, 8.91,
+ 1.09, 0.27, 7.29, 6.94, 2.31, 6.88, 4.33, 1.37, 0.86, 0.46,
+ 6.07, 3.81, 0.86, 6.99, 4.36, 1.92, 8.19, 3.57, 7.90, 6.78,
+ 4.64, 6.82, 6.18, 9.63, 2.63, 2.33, 1.36, 2.70, 9.99, 9.85,
+ 8.06, 4.80, 7.80, 5.43
+ ],
+ roi2: [
+ 4, 4, 24, 8,
+ 4, 4, 28, 12,
+ 7, 1, 25, 11, # test rounding
+ 1, 7, 5, 11 # test roi with shape smaller than output
+ ],
+ o2: [
+ 6.16, 8.60, 7.12, 6.79, 5.13, 8.44, 7.86, 9.69, 4.42, 9.89, 9.30, 6.47,
+ 7.86, 9.89, 9.30, 9.89, 9.30, 9.50, 7.86, 9.89, 9.30, 9.89, 9.30, 9.50,
+ 9.50, 6.73, 9.50, 9.28, 6.89, 8.97, 6.18, 9.63, 9.99, 9.85, 9.99, 9.85,
+ 7.29, 6.94, 7.29, 6.94, 2.31, 6.88, 7.90, 6.78, 7.90, 6.82, 4.64, 6.82
+ ]
+}).AddNchw(i2, o2, layout).AddVariations("relaxed", quant8, "float16")
+
+
+# TEST 3: ROI_POOLING_3, outputShape = [2, 2], spatialScale = [0.5, 1]
+i3 = Input("in", "TENSOR_FLOAT32", "{4, 4, 4, 1}")
+roi3 = Input("roi", "TENSOR_FLOAT32", "{5, 4}")
+o3 = Output("out", "TENSOR_FLOAT32", "{5, 2, 2, 1}")
+Model().Operation("ROI_POOLING", i3, roi3, [2, 2, 2, 2, 2], 2, 2, 2.0, 1.0, layout).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
+ roi3: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.25, 128)
+})
+
+# Instantiate an example
+Example({
+ i3: [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ -10, -1, 4, -5,
+ -8, -2, 9, 1,
+ 7, -2, 3, -7,
+ -2, 10, -3, 5,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ],
+ roi3: [
+ 1, 2, 2, 4,
+ 0, 0, 3, 6,
+ 1, 0, 2, 6,
+ 0, 2, 3, 4,
+ 0, 0, 0, 0
+ ],
+ o3: [
+ -2, 9, -2, 3,
+ -1, 9, 10, 5,
+ -1, 9, 10, 3,
+ -2, 9, 7, 3,
+ -10, -10, -10, -10
+ ]
+}).AddNchw(i3, o3, layout).AddVariations("relaxed", quant8, "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/sin.mod.py b/tests/nnapi/specs/skip/V1_2/sin.mod.py
new file mode 100644
index 000000000..7f2fcbb8b
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/sin.mod.py
@@ -0,0 +1,27 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{1, 2, 3, 4, 5}")
+output0 = Output("output0", "TENSOR_FLOAT32", "{1, 2, 3, 4, 5}")
+model = Model().Operation("SIN", input0).To(output0)
+
+input_data = [(i - 60) / 10 for i in range(120)]
+output_data = [math.sin(x) for x in input_data]
+
+Example({
+ input0: input_data,
+ output0: output_data,
+}).AddVariations("relaxed", "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/softmax_v1_2.mod.py b/tests/nnapi/specs/skip/V1_2/softmax_v1_2.mod.py
new file mode 100644
index 000000000..5983b05a0
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/softmax_v1_2.mod.py
@@ -0,0 +1,99 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+i = Input("op1", "TENSOR_FLOAT32", "{2, 2, 2, 5}") # input 0
+o = Output("op2", "TENSOR_FLOAT32", "{2, 2, 2, 5}") # output 0
+axis = Int32Scalar("axis", -1) # last axis
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
+ o: ("TENSOR_QUANT8_ASYMM", 1./256, 0)
+})
+
+example1 = {
+ i: [17., 16., 15., 14., 1.,
+ -1., -2., -3., -4., -17.] * 4,
+ o: [0.643914213228014,
+ 0.236882800924671,
+ 0.087144312427294,
+ 0.032058600957022,
+ 7.246299848982885e-08] * 8
+}
+example2 = {
+ i: [1., 2., 3., 4., 5., -1., -2., -3., -4., -5.] * 4,
+ o: [0.2] * 40
+}
+
+# TEST 1: All dimensions other than 2 or 4, without axis parameter
+# beta = 1.0
+Model().Operation("SOFTMAX", i, 1.0).To(o)
+Example(example1).AddVariations("relaxed", "float16", quant8).AddDims([1, 3], i, o)
+# beta = 0.000001
+Model().Operation("SOFTMAX", i, 0.000001).To(o)
+Example(example2).AddVariations("relaxed", "float16", quant8).AddDims([1, 3], i, o)
+
+# TEST 2: All dimensions, with all possible axis parameter
+# beta = 1.0
+Model("axis").Operation("SOFTMAX", i, 1.0, axis).To(o)
+Example(example1).AddVariations("relaxed", "float16", quant8).AddAllDimsAndAxis(i, o, axis)
+# beta = 0.000001
+Model("axis").Operation("SOFTMAX", i, 0.000001, axis).To(o)
+Example(example2).AddVariations("relaxed", "float16", quant8).AddAllDimsAndAxis(i, o, axis)
+
+# SOFTMAX of rank 4 and TENSOR_FLOAT32 and TENSOR_QUANT8_ASYMM data type is introduced in V1_0.
+Example.SetVersion("V1_0", "softmax_v1_2", "softmax_v1_2_quant8", \
+ "softmax_v1_2_2", "softmax_v1_2_quant8_2")
+
+
+# TEST 3: zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# SOFTMAX op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 1}") # out
+model = model.Operation("SOFTMAX", zero_sized, 1.0).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ o3: ("TENSOR_QUANT8_ASYMM", 1./256, 0)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddVariations("relaxed", quant8, "float16")
diff --git a/tests/nnapi/specs/skip/V1_2/space_to_batch_quant8_nonzero.mod.py b/tests/nnapi/specs/skip/V1_2/space_to_batch_quant8_nonzero.mod.py
new file mode 100644
index 000000000..2d0d710e8
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/space_to_batch_quant8_nonzero.mod.py
@@ -0,0 +1,37 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Quantized SPACE_TO_BATCH_ND with non-zero zeroPoint is supported since 1.2.
+# See http://b/132112227.
+
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{1, 5, 2, 1}, 1.0, 9")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [3, 2])
+paddings = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [1, 0, 2, 0])
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{6, 2, 2, 1}, 1.0, 9")
+
+model = model.Operation("SPACE_TO_BATCH_ND", i1, block, paddings).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
+
+output0 = {output: # output 0
+ [9, 9, 9, 5, 9, 9, 9, 6, 9, 1, 9, 7,
+ 9, 2, 9, 8, 9, 3, 9, 9, 9, 4, 9, 10]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/space_to_batch_v1_2.mod.py b/tests/nnapi/specs/skip/V1_2/space_to_batch_v1_2.mod.py
new file mode 100644
index 000000000..356fae5d8
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/space_to_batch_v1_2.mod.py
@@ -0,0 +1,94 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: SPACE_TO_BATCH_NCHW_1, block_size = [2, 2]
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
+pad1 = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [0, 0, 0, 0])
+o1 = Output("op4", "TENSOR_FLOAT32", "{4, 1, 1, 2}")
+Model().Operation("SPACE_TO_BATCH_ND", i1, [2, 2], pad1, layout).To(o1)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1],
+ o1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]
+}).AddNchw(i1, o1, layout).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 2: SPACE_TO_BATCH_NCHW_2, block_size = [2, 2]
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
+o2 = Output("op4", "TENSOR_FLOAT32", "{4, 2, 2, 1}")
+Model().Operation("SPACE_TO_BATCH_ND", i2, [2, 2], pad1, layout).To(o2)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ o2: ("TENSOR_QUANT8_ASYMM", 0.5, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
+ o2: [1, 3, 9, 11, 2, 4, 10, 12, 5, 7, 13, 15, 6, 8, 14, 16]
+}).AddNchw(i2, o2, layout).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 3: SPACE_TO_BATCH_NCHW_3, block_size = [3, 2]
+i3 = Input("op1", "TENSOR_FLOAT32", "{1, 5, 2, 1}")
+pad3 = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [1, 0, 2, 0])
+o3 = Output("op4", "TENSOR_FLOAT32", "{6, 2, 2, 1}")
+Model().Operation("SPACE_TO_BATCH_ND", i3, [3, 2], pad3, layout).To(o3)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.5, 128)
+})
+
+# Instantiate an example
+example = Example({
+ i3: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
+ o3: [0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7,
+ 0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10]
+}).AddNchw(i3, o3, layout).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 4: SPACE_TO_BATCH_NCHW_4, block_size = [3, 2]
+i4 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 2, 1}")
+pad4 = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [1, 1, 2, 4])
+o4 = Output("op4", "TENSOR_FLOAT32", "{6, 2, 4, 1}")
+Model().Operation("SPACE_TO_BATCH_ND", i4, [3, 2], pad4, layout).To(o4)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
+ o4: ("TENSOR_QUANT8_ASYMM", 0.25, 128)
+})
+
+# Instantiate an example
+example = Example({
+ i4: [1, 2, 3, 4, 5, 6, 7, 8],
+ o4: [0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0,
+ 0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0,
+ 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0]
+}).AddNchw(i4, o4, layout).AddVariations("relaxed", "float16", quant8)
diff --git a/tests/nnapi/specs/skip/V1_2/space_to_depth_v1_2.mod.py b/tests/nnapi/specs/skip/V1_2/space_to_depth_v1_2.mod.py
new file mode 100644
index 000000000..9f1a799fe
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/space_to_depth_v1_2.mod.py
@@ -0,0 +1,76 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: SPACE_TO_DEPTH_NCHW_1, block_size = 2
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 1, 1, 8}")
+Model().Operation("SPACE_TO_DEPTH", i1, 2, layout).To(o1)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1],
+ o1: [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]
+}).AddNchw(i1, o1, layout).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 2: SPACE_TO_DEPTH_NCHW_2, block_size = 2
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
+Model().Operation("SPACE_TO_DEPTH", i2, 2, layout).To(o2)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
+ o2: ("TENSOR_QUANT8_ASYMM", 0.5, 128)
+})
+
+# Instantiate an example
+example = Example({
+ i2: [1., 2., 5., 6., 3., 4., 7., 8., 9., 10., 13., 14., 11., 12., 15., 16.],
+ o2: [1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16.]
+}).AddNchw(i2, o2, layout).AddVariations("relaxed", "float16", quant8)
+
+
+# TEST 3: SPACE_TO_DEPTH_NCHW_3, block_size = 2
+i3 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}")
+o3 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 8}")
+Model().Operation("SPACE_TO_DEPTH", i3, 2, layout).To(o3)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM", 1.0, 0),
+ o3: ("TENSOR_QUANT8_ASYMM", 1.0, 0)
+})
+
+# Instantiate an example
+example = Example({
+ i3: [10, 20, 11, 21, 12, 22, 13, 23,
+ 14, 24, 15, 25, 16, 26, 17, 27,
+ 18, 28, 19, 29, 110, 210, 111, 211,
+ 112, 212, 113, 213, 114, 214, 115, 215],
+ o3: [10, 20, 11, 21, 14, 24, 15, 25,
+ 12, 22, 13, 23, 16, 26, 17, 27,
+ 18, 28, 19, 29, 112, 212, 113, 213,
+ 110, 210, 111, 211, 114, 214, 115, 215]
+}).AddNchw(i3, o3, layout).AddVariations("relaxed", "float16", quant8)
diff --git a/tests/nnapi/specs/skip/V1_2/squeeze_float16.mod.py b/tests/nnapi/specs/skip/V1_2/squeeze_float16.mod.py
new file mode 100644
index 000000000..e5f18a524
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/squeeze_float16.mod.py
@@ -0,0 +1,16 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT16", "{4, 1, 1, 2}")
+squeezeDims = Parameter("squeezeDims", "TENSOR_INT32", "{2}", [1, 2])
+output = Output("output", "TENSOR_FLOAT16", "{4, 2}")
+
+model = model.Operation("SQUEEZE", i1, squeezeDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]}
+
+output0 = {output: # output 0
+ [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/strided_slice_float16.mod.py b/tests/nnapi/specs/skip/V1_2/strided_slice_float16.mod.py
new file mode 100644
index 000000000..88735f54e
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/strided_slice_float16.mod.py
@@ -0,0 +1,23 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT16", "{2, 3}")
+begins = Parameter("begins", "TENSOR_INT32", "{2}", [0, 0])
+ends = Parameter("ends", "TENSOR_INT32", "{2}", [2, 3])
+strides = Parameter("strides", "TENSOR_INT32", "{2}", [2, 2])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_FLOAT16", "{1, 2}")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 2.0, 3.0,
+ 4.0, 5.0, 6.0]}
+
+output0 = {output: # output 0
+ [1.0, 3.0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/sub_quantized_different_scales.mod.py b/tests/nnapi/specs/skip/V1_2/sub_quantized_different_scales.mod.py
new file mode 100644
index 000000000..61bda3747
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/sub_quantized_different_scales.mod.py
@@ -0,0 +1,60 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import itertools
+
+def dequantize(x, scale, offset):
+ return (x - offset) * scale
+
+def quantize(x, scale, offset):
+ return max(0, min(255, int(round(x / scale)) + offset))
+
+def create_test(input0_scale, input0_offset,
+ input1_scale, input1_offset,
+ output_scale, output_offset):
+ def sub_quantized(a, b):
+ a_dequantized = dequantize(a, input0_scale, input0_offset)
+ b_dequantized = dequantize(b, input1_scale, input1_offset)
+ return quantize(a_dequantized - b_dequantized, output_scale, output_offset)
+
+ values = [0, 1, 2, 3, 4, 5, 250, 251, 252, 253, 254, 255]
+ inputs = list(itertools.product(values, values))
+ input0_values, input1_values = zip(*inputs)
+ output_values = [sub_quantized(a, b) for a, b in inputs]
+ size = len(output_values)
+ input0 = Input("input0", "TENSOR_QUANT8_ASYMM",
+ "{%d}, %g, %d" % (size, input0_scale, input0_offset))
+ input1 = Input("input1", "TENSOR_QUANT8_ASYMM",
+ "{%d}, %g, %d" % (size, input1_scale, input1_offset))
+ activation = 0
+ output0 = Output("output0", "TENSOR_QUANT8_ASYMM",
+ "{%d}, %g, %d" % (size, output_scale, output_offset))
+ model = Model().Operation("SUB", input0, input1, activation).To(output0)
+ Example({
+ input0: input0_values,
+ input1: input1_values,
+ output0: output_values,
+ })
+
+scales_and_offsets = [(1.0, 0),
+ (1.0, 1),
+ (0.01, 120),
+ (10.0, 120)]
+for params in itertools.product(scales_and_offsets,
+ scales_and_offsets,
+ scales_and_offsets):
+ input0_params, input1_params, output_params = params
+ create_test(*input0_params, *input1_params, *output_params)
diff --git a/tests/nnapi/specs/skip/V1_2/svdf_bias_present_float16.mod.py b/tests/nnapi/specs/skip/V1_2/svdf_bias_present_float16.mod.py
new file mode 100644
index 000000000..4dc691400
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/svdf_bias_present_float16.mod.py
@@ -0,0 +1,138 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+batches = 2
+features = 4
+rank = 1
+units = int(features / rank)
+input_size = 3
+memory_size = 10
+
+model = Model()
+
+input = Input("input", "TENSOR_FLOAT16", "{%d, %d}" % (batches, input_size))
+weights_feature = Input("weights_feature", "TENSOR_FLOAT16", "{%d, %d}" % (features, input_size))
+weights_time = Input("weights_time", "TENSOR_FLOAT16", "{%d, %d}" % (features, memory_size))
+bias = Input("bias", "TENSOR_FLOAT16", "{%d}" % (units))
+state_in = Input("state_in", "TENSOR_FLOAT16", "{%d, %d}" % (batches, memory_size*features))
+rank_param = Int32Scalar("rank_param", rank)
+activation_param = Int32Scalar("activation_param", 0)
+state_out = IgnoredOutput("state_out", "TENSOR_FLOAT16", "{%d, %d}" % (batches, memory_size*features))
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (batches, units))
+
+model = model.Operation("SVDF", input, weights_feature, weights_time, bias, state_in,
+ rank_param, activation_param).To([state_out, output])
+
+input0 = {
+ input: [],
+ weights_feature: [
+ -0.31930989, -0.36118156, 0.0079667, 0.37613347,
+ 0.22197971, 0.12416199, 0.27901134, 0.27557442,
+ 0.3905206, -0.36137494, -0.06634006, -0.10640851
+ ],
+ weights_time: [
+ -0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
+ 0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
+
+ 0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
+ -0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
+
+ -0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
+ 0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
+
+ -0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
+ -0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657
+ ],
+ bias: [1.0, 2.0, 3.0, 4.0],
+ state_in: [0 for _ in range(batches * memory_size * features)],
+}
+
+test_inputs = [
+ 0.12609188, -0.46347019, -0.89598465,
+ 0.12609188, -0.46347019, -0.89598465,
+
+ 0.14278367, -1.64410412, -0.75222826,
+ 0.14278367, -1.64410412, -0.75222826,
+
+ 0.49837467, 0.19278903, 0.26584083,
+ 0.49837467, 0.19278903, 0.26584083,
+
+ -0.11186574, 0.13164264, -0.05349274,
+ -0.11186574, 0.13164264, -0.05349274,
+
+ -0.68892461, 0.37783599, 0.18263303,
+ -0.68892461, 0.37783599, 0.18263303,
+
+ -0.81299269, -0.86831826, 1.43940818,
+ -0.81299269, -0.86831826, 1.43940818,
+
+ -1.45006323, -0.82251364, -1.69082689,
+ -1.45006323, -0.82251364, -1.69082689,
+
+ 0.03966608, -0.24936394, -0.77526885,
+ 0.03966608, -0.24936394, -0.77526885,
+
+ 0.11771342, -0.23761693, -0.65898693,
+ 0.11771342, -0.23761693, -0.65898693,
+
+ -0.89477462, 1.67204106, -0.53235275,
+ -0.89477462, 1.67204106, -0.53235275
+]
+
+golden_outputs = [
+ 1.014899, 1.9482339, 2.856275, 3.99728117,
+ 1.014899, 1.9482339, 2.856275, 3.99728117,
+
+ 1.068281, 1.837783, 2.847732, 4.00323521,
+ 1.068281, 1.837783, 2.847732, 4.00323521,
+
+ 0.9682179, 1.9666911, 3.0609602, 4.0333759,
+ 0.9682179, 1.9666911, 3.0609602, 4.0333759,
+
+ 0.99376901, 1.922299, 2.608807, 3.9863309,
+ 0.99376901, 1.922299, 2.608807, 3.9863309,
+
+ 1.201551, 1.835393, 2.820538, 3.9407261,
+ 1.201551, 1.835393, 2.820538, 3.9407261,
+
+ 1.0886511, 1.9124599, 2.730717, 4.0281379,
+ 1.0886511, 1.9124599, 2.730717, 4.0281379,
+
+ 0.798826, 1.413855, 2.371376, 3.9669588,
+ 0.798826, 1.413855, 2.371376, 3.9669588,
+
+ 0.9160904, 1.700671, 3.108746, 4.109808,
+ 0.9160904, 1.700671, 3.108746, 4.109808,
+
+ 1.419114, 1.762176, 2.577373, 4.175115,
+ 1.419114, 1.762176, 2.577373, 4.175115,
+
+ 1.36726, 1.477697, 2.543498, 3.824525,
+ 1.36726, 1.477697, 2.543498, 3.824525
+]
+
+output0 = {state_out: [0 for _ in range(batches * memory_size * features)],
+ output: []}
+
+# TODO: enable more data points after fixing the reference issue
+for i in range(1):
+ batch_start = i * input_size * batches
+ batch_end = batch_start + input_size * batches
+ input0[input] = test_inputs[batch_start:batch_end]
+ golden_start = i * units * batches
+ golden_end = golden_start + units * batches
+ output0[output] = golden_outputs[golden_start:golden_end]
+ Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/svdf_float16.mod.py b/tests/nnapi/specs/skip/V1_2/svdf_float16.mod.py
new file mode 100644
index 000000000..2b0f368d3
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/svdf_float16.mod.py
@@ -0,0 +1,138 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+batches = 2
+features = 4
+rank = 1
+units = int(features / rank)
+input_size = 3
+memory_size = 10
+
+model = Model()
+
+input = Input("input", "TENSOR_FLOAT16", "{%d, %d}" % (batches, input_size))
+weights_feature = Input("weights_feature", "TENSOR_FLOAT16", "{%d, %d}" % (features, input_size))
+weights_time = Input("weights_time", "TENSOR_FLOAT16", "{%d, %d}" % (features, memory_size))
+bias = Input("bias", "TENSOR_FLOAT16", "{%d}" % (units))
+state_in = Input("state_in", "TENSOR_FLOAT16", "{%d, %d}" % (batches, memory_size*features))
+rank_param = Int32Scalar("rank_param", rank)
+activation_param = Int32Scalar("activation_param", 0)
+state_out = IgnoredOutput("state_out", "TENSOR_FLOAT16", "{%d, %d}" % (batches, memory_size*features))
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (batches, units))
+
+model = model.Operation("SVDF", input, weights_feature, weights_time, bias, state_in,
+ rank_param, activation_param).To([state_out, output])
+
+input0 = {
+ input: [],
+ weights_feature: [
+ -0.31930989, -0.36118156, 0.0079667, 0.37613347,
+ 0.22197971, 0.12416199, 0.27901134, 0.27557442,
+ 0.3905206, -0.36137494, -0.06634006, -0.10640851
+ ],
+ weights_time: [
+ -0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
+ 0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
+
+ 0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
+ -0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
+
+ -0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
+ 0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
+
+ -0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
+ -0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657
+ ],
+ bias: [],
+ state_in: [0 for _ in range(batches * memory_size * features)],
+}
+
+test_inputs = [
+ 0.12609188, -0.46347019, -0.89598465,
+ 0.12609188, -0.46347019, -0.89598465,
+
+ 0.14278367, -1.64410412, -0.75222826,
+ 0.14278367, -1.64410412, -0.75222826,
+
+ 0.49837467, 0.19278903, 0.26584083,
+ 0.49837467, 0.19278903, 0.26584083,
+
+ -0.11186574, 0.13164264, -0.05349274,
+ -0.11186574, 0.13164264, -0.05349274,
+
+ -0.68892461, 0.37783599, 0.18263303,
+ -0.68892461, 0.37783599, 0.18263303,
+
+ -0.81299269, -0.86831826, 1.43940818,
+ -0.81299269, -0.86831826, 1.43940818,
+
+ -1.45006323, -0.82251364, -1.69082689,
+ -1.45006323, -0.82251364, -1.69082689,
+
+ 0.03966608, -0.24936394, -0.77526885,
+ 0.03966608, -0.24936394, -0.77526885,
+
+ 0.11771342, -0.23761693, -0.65898693,
+ 0.11771342, -0.23761693, -0.65898693,
+
+ -0.89477462, 1.67204106, -0.53235275,
+ -0.89477462, 1.67204106, -0.53235275
+]
+
+golden_outputs = [
+ 0.014899, -0.0517661, -0.143725, -0.00271883,
+ 0.014899, -0.0517661, -0.143725, -0.00271883,
+
+ 0.068281, -0.162217, -0.152268, 0.00323521,
+ 0.068281, -0.162217, -0.152268, 0.00323521,
+
+ -0.0317821, -0.0333089, 0.0609602, 0.0333759,
+ -0.0317821, -0.0333089, 0.0609602, 0.0333759,
+
+ -0.00623099, -0.077701, -0.391193, -0.0136691,
+ -0.00623099, -0.077701, -0.391193, -0.0136691,
+
+ 0.201551, -0.164607, -0.179462, -0.0592739,
+ 0.201551, -0.164607, -0.179462, -0.0592739,
+
+ 0.0886511, -0.0875401, -0.269283, 0.0281379,
+ 0.0886511, -0.0875401, -0.269283, 0.0281379,
+
+ -0.201174, -0.586145, -0.628624, -0.0330412,
+ -0.201174, -0.586145, -0.628624, -0.0330412,
+
+ -0.0839096, -0.299329, 0.108746, 0.109808,
+ -0.0839096, -0.299329, 0.108746, 0.109808,
+
+ 0.419114, -0.237824, -0.422627, 0.175115,
+ 0.419114, -0.237824, -0.422627, 0.175115,
+
+ 0.36726, -0.522303, -0.456502, -0.175475,
+ 0.36726, -0.522303, -0.456502, -0.175475
+]
+
+output0 = {state_out: [0 for _ in range(batches * memory_size * features)],
+ output: []}
+
+# TODO: enable more data points after fixing the reference issue
+for i in range(1):
+ batch_start = i * input_size * batches
+ batch_end = batch_start + input_size * batches
+ input0[input] = test_inputs[batch_start:batch_end]
+ golden_start = i * units * batches
+ golden_end = golden_start + units * batches
+ output0[output] = golden_outputs[golden_start:golden_end]
+ Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/svdf_state_float16.mod.py b/tests/nnapi/specs/skip/V1_2/svdf_state_float16.mod.py
new file mode 100644
index 000000000..f8fcae7b0
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/svdf_state_float16.mod.py
@@ -0,0 +1,114 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+batches = 2
+units = 4
+input_size = 3
+memory_size = 10
+
+model = Model()
+
+input = Input("input", "TENSOR_FLOAT16", "{%d, %d}" % (batches, input_size))
+weights_feature = Input("weights_feature", "TENSOR_FLOAT16", "{%d, %d}" % (units, input_size))
+weights_time = Input("weights_time", "TENSOR_FLOAT16", "{%d, %d}" % (units, memory_size))
+bias = Input("bias", "TENSOR_FLOAT16", "{%d}" % (units))
+state_in = Input("state_in", "TENSOR_FLOAT16", "{%d, %d}" % (batches, memory_size*units))
+rank_param = Int32Scalar("rank_param", 1)
+activation_param = Int32Scalar("activation_param", 0)
+state_out = Output("state_out", "TENSOR_FLOAT16", "{%d, %d}" % (batches, memory_size*units))
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d}" % (batches, units))
+
+model = model.Operation("SVDF", input, weights_feature, weights_time, bias, state_in,
+ rank_param, activation_param).To([state_out, output])
+
+input0 = {
+ weights_feature: [
+ -0.31930989, -0.36118156, 0.0079667, 0.37613347,
+ 0.22197971, 0.12416199, 0.27901134, 0.27557442,
+ 0.3905206, -0.36137494, -0.06634006, -0.10640851
+ ],
+ weights_time: [
+ -0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
+ 0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
+
+ 0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
+ -0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
+
+ -0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
+ 0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
+
+ -0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
+ -0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657
+ ],
+ bias: [],
+}
+
+input0[input] = [
+ 0.14278367, -1.64410412, -0.75222826,
+ 0.14278367, -1.64410412, -0.75222826,
+]
+input0[state_in] = [
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0.119996, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, -0.166701, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ -0.44244, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0.0805206, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0.119996, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, -0.166701, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ -0.44244, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0.0805206, 0,
+]
+output0 = {
+ state_out : [
+ 0, 0, 0, 0,
+ 0, 0, 0, 0.119996,
+ 0.542235, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, -0.166701, -0.40465, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, -0.44244,
+ -0.706995, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0.0805206, 0.137515, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0.119996,
+ 0.542235, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, -0.166701, -0.40465, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, -0.44244,
+ -0.706995, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0.0805206, 0.137515, 0,
+ ],
+ output : [
+ 0.068281, -0.162217, -0.152268, 0.00323521,
+ 0.068281, -0.162217, -0.152268, 0.00323521,
+ ]
+}
+
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/transpose_conv2d.mod.py b/tests/nnapi/specs/skip/V1_2/transpose_conv2d.mod.py
new file mode 100644
index 000000000..5a91c12fe
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/transpose_conv2d.mod.py
@@ -0,0 +1,284 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: TRANSPOSE_CONV2D, pad = valid, stride = 2
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # input 0
+w1 = Parameter("op2", "TENSOR_FLOAT32", "{2, 3, 3, 1}", [1, 3, 5, 7, 9, 11, 13, 15, 17, 2, 4, 6, 8, 10, 12, 14, 16, 18]) # weight
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{2}", [-1.5, -2]) # bias
+s1 = Int32Vector("shape", [1, 5, 5, 2]) # output shape
+act = Int32Scalar("act", 0) # act = none
+o1 = Output("op4", "TENSOR_FLOAT32", "{1, 5, 5, 2}") # output
+Model().Operation("TRANSPOSE_CONV_2D", i1, w1, b1, s1, 2, 2, 2, act, layout).To(o1)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ w1: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ b1: ("TENSOR_INT32", 0.25, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.5, 0)
+})
+
+quant8_mult_gt_1 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.5, 100),
+ w1: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
+ b1: ("TENSOR_INT32", 0.25, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 80)
+})
+
+# Per-channel quantization
+channelQuant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.25, 100),
+ w1: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25, 0.5])),
+ b1: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.0625, 0.125], hide=True)),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.5, 80)
+})
+
+channelQuant8_mult_gt_1 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.25, 100),
+ w1: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25, 0.5])),
+ b1: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.0625, 0.125], hide=True)),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 80)
+})
+
+Example({
+ i1: [1, 2, 3, 4],
+ o1: [-0.5, 0, 1.5, 2, 5.5, 8, 4.5, 6, 8.5, 10,
+ 5.5, 6, 7.5, 8, 23.5, 26, 16.5, 18, 20.5, 22,
+ 14.5, 18, 22.5, 26, 60.5, 70, 40.5, 46, 52.5, 58,
+ 19.5, 22, 25.5, 28, 59.5, 66, 34.5, 38, 42.5, 46,
+ 37.5, 40, 43.5, 46, 101.5, 108, 58.5, 62, 66.5, 70]
+}).AddNchw(i1, o1, s1, layout).AddAllActivations(o1, act).AddVariations("relaxed", quant8, quant8_mult_gt_1, channelQuant8, channelQuant8_mult_gt_1, "float16").AddInput(w1, b1)
+
+
+# TEST 2: TRANSPOSE_CONV2D_LARGE, pad = same, stride = 3, act = relu
+i2 = Input("op1", "TENSOR_FLOAT32", "{1, 1, 2, 1}") # input 0
+w2 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 1}", [9, 5, 6, 9, 8, 5, 3, 1, 4]) # weight
+b2 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [-1000]) # bias
+s2 = Int32Vector("shape", [1, 3, 4, 1]) # output shape
+o2 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 4, 1}") # output
+Model().Operation("TRANSPOSE_CONV_2D", i2, w2, b2, s2, 1, 3, 3, 1, layout).To(o2)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 2.0, 0),
+ w2: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
+ b2: ("TENSOR_INT32", 0.5, 0),
+ o2: ("TENSOR_QUANT8_ASYMM", 20.0, 50)
+})
+
+# Per-channel quantization
+channelQuant8 = DataTypeConverter().Identify({
+ i2: ("TENSOR_QUANT8_ASYMM", 2.0, 0),
+ w2: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.25])),
+ b2: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.5], hide=True)),
+ o2: ("TENSOR_QUANT8_ASYMM", 20.0, 50)
+})
+
+Example({
+ i2: [300, 500],
+ o2: [500., 800., 3500., 1500.,
+ 1400., 500., 3500., 3000.,
+ 0., 200., 500., 0.]
+}).AddNchw(i2, o2, s2, layout).AddVariations("relaxed", quant8, channelQuant8, "float16").AddInput(w2, b2)
+
+
+# TEST 3: TRANSPOSE_CONV2D_SAME, outputShape = [1, 4, 4, 1], pad = same, stride = 1, act = none
+i3 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}") # input 0
+w3 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 2}", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) # weight
+b3 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0]) # bias
+s3 = Int32Vector("shape", [1, 4, 4, 1]) # output shape
+o3 = Output("op4", "TENSOR_FLOAT32", "{1, 4, 4, 1}") # output
+Model().Operation("TRANSPOSE_CONV_2D", i3, w3, b3, s3, 1, 1, 1, 0, layout).To(o3)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i3: ("TENSOR_QUANT8_ASYMM", 0.5, 100),
+ w3: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
+ b3: ("TENSOR_INT32", 0.25, 0),
+ o3: ("TENSOR_QUANT8_ASYMM", 16.0, 0)
+})
+
+Example({
+ i3: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32],
+ o3: [184, 412, 568, 528,
+ 678, 1347, 1689, 1434,
+ 1494, 2715, 3057, 2442,
+ 1968, 3352, 3652, 2760]
+}).AddNchw(i3, o3, s3, layout).AddVariations("relaxed", quant8, "float16").AddInput(w3, b3)
+
+
+# TEST 4: TRANSPOSE_CONV2D_VALID, outputShape = [1, 6, 6, 1], pad = valid, stride = 1, act = none
+i4 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}") # input 0
+w4 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 2}", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) # weight
+b4 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0]) # bias
+s4 = Int32Vector("shape", [1, 6, 6, 1]) # output shape
+o4 = Output("op4", "TENSOR_FLOAT32", "{1, 6, 6, 1}") # output
+Model().Operation("TRANSPOSE_CONV_2D", i4, w4, b4, s4, 2, 1, 1, 0, layout).To(o4)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i4: ("TENSOR_QUANT8_ASYMM", 0.25, 10),
+ w4: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
+ b4: ("TENSOR_INT32", 0.125, 0),
+ o4: ("TENSOR_QUANT8_ASYMM", 32.0, 80)
+})
+
+Example({
+ i4: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32],
+ o4: [5, 22, 59, 101, 114, 83,
+ 52, 184, 412, 568, 528, 344,
+ 237, 678, 1347, 1689, 1434, 879,
+ 597, 1494, 2715, 3057, 2442, 1431,
+ 856, 1968, 3352, 3652, 2760, 1548,
+ 689, 1534, 2543, 2729, 2010, 1103]
+}).AddNchw(i4, o4, s4, layout).AddVariations("relaxed", quant8, "float16").AddInput(w4, b4)
+
+
+# TEST 5: TRANSPOSE_CONV2D_EXPLICIT, pad = [1, 2, 2, 1], stride = 1, act = none
+i5 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}") # input 0
+w5 = Parameter("op2", "TENSOR_FLOAT32", "{1, 3, 3, 2}", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) # weight
+b5 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0]) # bias
+o5 = Output("op4", "TENSOR_FLOAT32", "{1, 3, 3, 1}") # output
+Model().Operation("TRANSPOSE_CONV_2D", i5, w5, b5, 1, 2, 2, 1, 1, 1, 0, layout).To(o5)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i5: ("TENSOR_QUANT8_ASYMM", 0.5, 100),
+ w5: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
+ b5: ("TENSOR_INT32", 0.125, 0),
+ o5: ("TENSOR_QUANT8_ASYMM", 20.0, 50)
+})
+
+Example({
+ i5: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32],
+ o5: [678, 1347, 1689,
+ 1494, 2715, 3057,
+ 1968, 3352, 3652]
+}).AddNchw(i5, o5, layout).AddVariations("relaxed", quant8, "float16").AddInput(w5, b5)
+
+
+# TEST 6: zero-sized input, implicit padding
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# TRANSPOSE_CONV_2D op with numBatches = 0.
+w = Parameter("weights", "TENSOR_FLOAT32", "{2, 3, 3, 1}", [1, 3, 5, 7, 9, 11, 9, 7, 5, 2, 4, 6, 8, 10, 12, 10, 8, 6]) # weight
+b = Parameter("bias", "TENSOR_FLOAT32", "{2}", [-1.5, -2]) # bias
+s = Int32Vector("shape", [0, 5, 5, 2]) # output shape
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 5, 5, 2}") # out
+model = model.Operation("TRANSPOSE_CONV_2D", zero_sized, w, b, s, 2, 2, 2, 0, layout).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ w: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ b: ("TENSOR_INT32", 0.01, 0),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddNchw(i1, zero_sized, o3, s, layout).AddVariations("relaxed", quant8, "float16")
+
+
+# TEST 7: zero-sized input, explicit padding
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 4, 4, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 4, 4, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# TRANSPOSE_CONV_2D op with numBatches = 0.
+w = Parameter("weights", "TENSOR_FLOAT32", "{1, 3, 3, 1}", [1, 3, 5, 7, 9, 11, 9, 7, 5]) # weight
+b = Parameter("bias", "TENSOR_FLOAT32", "{1}", [-1.5]) # bias
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 3, 3, 1}") # out
+model = model.Operation("TRANSPOSE_CONV_2D", zero_sized, w, b, 1, 2, 2, 1, 1, 1, 0, layout).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ w: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ b: ("TENSOR_INT32", 0.01, 0),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddNchw(i1, zero_sized, o3, layout).AddVariations("relaxed", quant8, "float16")
+
+
+# TEST 8: TRANSPOSE_CONV2D_SAME, outputShape = [1, 4, 4, 1], pad = same, stride = 2, act = none
+i8 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # input 0
+w8 = Parameter("op2", "TENSOR_FLOAT32", "{1, 1, 1, 1}", [2]) # weight
+b8 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0]) # bias
+s8 = Int32Vector("shape", [1, 4, 4, 1]) # output shape
+o8 = Output("op4", "TENSOR_FLOAT32", "{1, 4, 4, 1}") # output
+Model().Operation("TRANSPOSE_CONV_2D", i8, w8, b8, s8, 1, 2, 2, 0, layout).To(o8)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i8: ("TENSOR_QUANT8_ASYMM", 0.5, 100),
+ w8: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
+ b8: ("TENSOR_INT32", 0.25, 0),
+ o8: ("TENSOR_QUANT8_ASYMM", 16.0, 0)
+})
+
+Example({
+ i8: [1, 2, 3, 4],
+ o8: [2, 0, 4, 0, 0, 0, 0, 0, 6, 0, 8, 0, 0, 0, 0, 0]
+}).AddNchw(i8, o8, s8, layout).AddVariations("relaxed", quant8, "float16").AddInput(w8, b8)
diff --git a/tests/nnapi/specs/skip/V1_2/transpose_conv2d_large.mod.py b/tests/nnapi/specs/skip/V1_2/transpose_conv2d_large.mod.py
new file mode 100644
index 000000000..f9a87c1bf
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/transpose_conv2d_large.mod.py
@@ -0,0 +1,48 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+layout = BoolScalar("layout", False) # NHWC
+
+# TEST 1: TRANSPOSE_CONV2D_LARGE, pad = same, stride = 32
+i1 = Input("op1", "TENSOR_FLOAT32", "{25, 1, 1, 1}") # input 0
+w1 = Parameter("op2", "TENSOR_FLOAT32", "{16, 1, 1, 1}", [1] * 16) # weight
+b1 = Parameter("op3", "TENSOR_FLOAT32", "{16}", [0] * 16) # bias
+s1 = Int32Vector("shape", [25, 32, 32, 16]) # output shape
+act = Int32Scalar("act", 0) # act = none
+o1 = Output("op4", "TENSOR_FLOAT32", "{25, 32, 32, 16}") # output
+Model().Operation("TRANSPOSE_CONV_2D", i1, w1, b1, s1, 1, 32, 32, act, layout).To(o1)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ w1: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ b1: ("TENSOR_INT32", 0.25, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.5, 0)
+})
+
+# Per-channel quantization
+channelQuant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.25, 100),
+ w1: ("TENSOR_QUANT8_SYMM_PER_CHANNEL", 0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.5] * 16)),
+ b1: ("TENSOR_INT32", 0.0, 0, SymmPerChannelQuantParams(channelDim=0, scales=[0.125] * 16, hide=True)),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.5, 80)
+})
+
+Example({
+ i1: [1] * 25,
+ o1: ([1] * 16 + [0] * (32 * 32 - 1) * 16) * 25
+}).AddVariations(quant8, channelQuant8, includeDefault=False)
+
diff --git a/tests/nnapi/specs/skip/V1_2/transpose_float16.mod.py b/tests/nnapi/specs/skip/V1_2/transpose_float16.mod.py
new file mode 100644
index 000000000..79b3796fa
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/transpose_float16.mod.py
@@ -0,0 +1,18 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT16", "{1, 2, 2, 1}")
+perms = Parameter("perms", "TENSOR_INT32", "{4}", [0, 2, 1, 3])
+output = Output("output", "TENSOR_FLOAT16", "{1, 2, 2, 1}")
+
+model = model.Operation("TRANSPOSE", i1, perms).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 2.0,
+ 3.0, 4.0]}
+
+output0 = {output: # output 0
+ [1.0, 3.0,
+ 2.0, 4.0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/unidirectional_sequence_lstm_cifg_peephole.mod.py b/tests/nnapi/specs/skip/V1_2/unidirectional_sequence_lstm_cifg_peephole.mod.py
new file mode 100644
index 000000000..91bde56ee
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/unidirectional_sequence_lstm_cifg_peephole.mod.py
@@ -0,0 +1,168 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Unidirectional Sequence LSTM Test:
+# 3 Time Step, No Layer Normalization, Cifg, Peephole, No Projection, and No Clipping.
+import copy
+
+model = Model()
+
+max_time = 3
+n_batch = 1
+n_input = 2
+# n_cell and n_output have the same size when there is no projection.
+n_cell = 4
+n_output = 4
+
+input = Input("input", "TENSOR_FLOAT32", "{%d, %d, %d}" % (max_time, n_batch, n_input))
+
+input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_input))
+input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_input))
+input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_input))
+input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_input))
+
+recurrent_to_input_weights = Input("recurrent_to_intput_weights",
+ "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_output))
+recurrent_to_forget_weights = Input("recurrent_to_forget_weights",
+ "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_output))
+recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_output))
+recurrent_to_output_weights = Input("recurrent_to_output_weights",
+ "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_cell, n_output))
+
+cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT32",
+ "{%d}" % (n_cell))
+cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT32",
+ "{%d}" % (n_cell))
+cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT32",
+ "{%d}" % (n_cell))
+
+input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT32", "{%d}" % (n_cell))
+forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT32",
+ "{%d}" % (n_cell))
+cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT32", "{%d}" % (n_cell))
+output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT32",
+ "{%d}" % (n_cell))
+
+projection_weights = Input("projection_weights", "TENSOR_FLOAT32",
+ "{%d,%d}" % (n_output, n_cell))
+projection_bias = Input("projection_bias", "TENSOR_FLOAT32", "{0}")
+
+output_state_in = Input("output_state_in", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_batch, n_output))
+cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32",
+ "{%d, %d}" % (n_batch, n_cell))
+
+activation_param = Int32Scalar("activation_param", 4) # Tanh
+cell_clip_param = Float32Scalar("cell_clip_param", 0.)
+proj_clip_param = Float32Scalar("proj_clip_param", 0.)
+time_major_param = BoolScalar("time_major_param", True)
+
+input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT32",
+ "{%d}" % n_cell)
+forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT32",
+ "{%d}" % n_cell)
+cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT32",
+ "{%d}" % n_cell)
+output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT32",
+ "{%d}" % n_cell)
+
+output = Output("output", "TENSOR_FLOAT32", "{%d, %d, %d}" % (max_time, n_batch, n_output))
+
+model = model.Operation(
+ "UNIDIRECTIONAL_SEQUENCE_LSTM", input, input_to_input_weights, input_to_forget_weights,
+ input_to_cell_weights, input_to_output_weights, recurrent_to_input_weights,
+ recurrent_to_forget_weights, recurrent_to_cell_weights,
+ recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights,
+ cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias,
+ output_gate_bias, projection_weights, projection_bias, output_state_in,
+ cell_state_in, activation_param, cell_clip_param, proj_clip_param, time_major_param,
+ input_layer_norm_weights, forget_layer_norm_weights,
+ cell_layer_norm_weights, output_layer_norm_weights).To([output])
+
+# Example 1. Input in operand 0,
+input0 = {
+ input_to_input_weights: [],
+ input_to_forget_weights: [
+ -0.55291498, -0.42866567, 0.13056988, -0.3633365,
+ -0.22755712, 0.28253698, 0.24407166, 0.33826375
+ ],
+ input_to_cell_weights: [
+ -0.49770179, -0.27711356, -0.09624726, 0.05100781,
+ 0.04717243, 0.48944736, -0.38535351, -0.17212132
+ ],
+ input_to_output_weights: [
+ 0.10725588, -0.02335852, -0.55932593, -0.09426838,
+ -0.44257352, 0.54939759, 0.01533556, 0.42751634
+ ],
+ input_gate_bias: [],
+ forget_gate_bias: [1., 1., 1., 1.],
+ cell_gate_bias: [0., 0., 0., 0.],
+ output_gate_bias: [0., 0., 0., 0.],
+ recurrent_to_input_weights: [],
+ recurrent_to_cell_weights: [
+ 0.54066205, -0.32668582, -0.43562764, -0.56094903,
+ 0.42957711, 0.01841056, -0.32764608, -0.33027974,
+ -0.10826075, 0.20675004, 0.19069612, -0.03026325,
+ -0.54532051, 0.33003211, 0.44901288, 0.21193194
+ ],
+ recurrent_to_forget_weights: [
+ -0.13832897, -0.0515101, -0.2359007, -0.16661474,
+ -0.14340827, 0.36986142, 0.23414481, 0.55899,
+ 0.10798943, -0.41174671, 0.17751795, -0.34484994,
+ -0.35874045, -0.11352962, 0.27268326, 0.54058349
+ ],
+ recurrent_to_output_weights: [
+ 0.41613156, 0.42610586, -0.16495961, -0.5663873,
+ 0.30579174, -0.05115908, -0.33941799, 0.23364776,
+ 0.11178309, 0.09481031, -0.26424935, 0.46261835,
+ 0.50248802, 0.26114327, -0.43736315, 0.33149987
+ ],
+ cell_to_input_weights: [],
+ cell_to_forget_weights: [0.47485286, -0.51955009, -0.24458408, 0.31544167],
+ cell_to_output_weights: [-0.17135078, 0.82760304, 0.85573703, -0.77109635],
+ projection_weights: [],
+ projection_bias: [],
+ input_layer_norm_weights: [],
+ forget_layer_norm_weights: [],
+ cell_layer_norm_weights: [],
+ output_layer_norm_weights: []
+}
+
+test_input = [2., 3., 3., 4., 1., 1.]
+
+golden_output = [
+ -0.36444446, -0.00352185, 0.12886585, -0.05163646,
+ -0.42312205, -0.01218222, 0.24201041, -0.08124574,
+ -0.358325, -0.04621704, 0.21641694, -0.06471302
+]
+
+output0 = {
+ output: golden_output,
+}
+
+input0[input] = test_input
+input0[output_state_in] = [ 0 for _ in range(n_batch * n_output) ]
+input0[cell_state_in] = [ 0 for _ in range(n_batch * n_cell) ]
+
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/unidirectional_sequence_lstm_f16_batch_major.mod.py b/tests/nnapi/specs/skip/V1_2/unidirectional_sequence_lstm_f16_batch_major.mod.py
new file mode 100644
index 000000000..4c4e7a8fa
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/unidirectional_sequence_lstm_f16_batch_major.mod.py
@@ -0,0 +1,177 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Unidirectional Sequence LSTM Test:
+# FLOAT16, Batch Major, 3 Time Step, No Layer Normalization, No Cifg, No Peephole, No Projection,
+# and No Clipping.
+import copy
+
+model = Model()
+
+max_time = 3
+n_batch = 1
+n_input = 2
+# n_cell and n_output have the same size when there is no projection.
+n_cell = 4
+n_output = 4
+
+input = Input("input", "TENSOR_FLOAT16", "{%d, %d, %d}" % (n_batch, max_time, n_input))
+
+input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT16",
+ "{%d, %d}" % (n_cell, n_input))
+input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT16",
+ "{%d, %d}" % (n_cell, n_input))
+input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT16",
+ "{%d, %d}" % (n_cell, n_input))
+input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT16",
+ "{%d, %d}" % (n_cell, n_input))
+
+recurrent_to_input_weights = Input("recurrent_to_intput_weights",
+ "TENSOR_FLOAT16",
+ "{%d, %d}" % (n_cell, n_output))
+recurrent_to_forget_weights = Input("recurrent_to_forget_weights",
+ "TENSOR_FLOAT16",
+ "{%d, %d}" % (n_cell, n_output))
+recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT16",
+ "{%d, %d}" % (n_cell, n_output))
+recurrent_to_output_weights = Input("recurrent_to_output_weights",
+ "TENSOR_FLOAT16",
+ "{%d, %d}" % (n_cell, n_output))
+
+cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT16",
+ "{%d}" % (n_cell))
+cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT16",
+ "{%d}" % (n_cell))
+cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT16",
+ "{%d}" % (n_cell))
+
+input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT16", "{%d}" % (n_cell))
+forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT16",
+ "{%d}" % (n_cell))
+cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT16", "{%d}" % (n_cell))
+output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT16",
+ "{%d}" % (n_cell))
+
+projection_weights = Input("projection_weights", "TENSOR_FLOAT16",
+ "{%d,%d}" % (n_output, n_cell))
+projection_bias = Input("projection_bias", "TENSOR_FLOAT16", "{0}")
+
+output_state_in = Input("output_state_in", "TENSOR_FLOAT16",
+ "{%d, %d}" % (n_batch, n_output))
+cell_state_in = Input("cell_state_in", "TENSOR_FLOAT16",
+ "{%d, %d}" % (n_batch, n_cell))
+
+activation_param = Int32Scalar("activation_param", 4) # Tanh
+cell_clip_param = Float16Scalar("cell_clip_param", 0.)
+proj_clip_param = Float16Scalar("proj_clip_param", 0.)
+time_major_param = BoolScalar("time_major_param", False)
+
+input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT16",
+ "{%d}" % n_cell)
+forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT16",
+ "{%d}" % n_cell)
+cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT16",
+ "{%d}" % n_cell)
+output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT16",
+ "{%d}" % n_cell)
+
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d, %d}" % (n_batch, max_time, n_output))
+
+model = model.Operation(
+ "UNIDIRECTIONAL_SEQUENCE_LSTM", input, input_to_input_weights, input_to_forget_weights,
+ input_to_cell_weights, input_to_output_weights, recurrent_to_input_weights,
+ recurrent_to_forget_weights, recurrent_to_cell_weights,
+ recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights,
+ cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias,
+ output_gate_bias, projection_weights, projection_bias, output_state_in,
+ cell_state_in, activation_param, cell_clip_param, proj_clip_param, time_major_param,
+ input_layer_norm_weights, forget_layer_norm_weights,
+ cell_layer_norm_weights, output_layer_norm_weights).To([output])
+
+# Example 1. Input in operand 0,
+input0 = {
+ input_to_input_weights: [
+ -0.45018822, -0.02338299, -0.0870589, -0.34550029,
+ 0.04266912, -0.15680569, -0.34856534, 0.43890524
+ ],
+ input_to_forget_weights: [
+ 0.09701663, 0.20334584, -0.50592935, -0.31343272,
+ -0.40032279, 0.44781327, 0.01387155, -0.35593212
+ ],
+ input_to_cell_weights: [
+ -0.50013041, 0.1370284, 0.11810488, 0.2013163,
+ -0.20583314, 0.44344562, 0.22077113, -0.29909778
+ ],
+ input_to_output_weights: [
+ -0.25065863, -0.28290087, 0.04613829, 0.40525138,
+ 0.44272184, 0.03897077, -0.1556896, 0.19487578
+ ],
+ input_gate_bias: [0., 0., 0., 0.],
+ forget_gate_bias: [1., 1., 1., 1.],
+ cell_gate_bias: [0., 0., 0., 0.],
+ output_gate_bias: [0., 0., 0., 0.],
+ recurrent_to_input_weights: [
+ -0.0063535, -0.2042388, 0.31454784, -0.35746509,
+ 0.28902304, 0.08183324, -0.16555229, 0.02286911,
+ -0.13566875, 0.03034258, 0.48091322, -0.12528998,
+ 0.24077177, -0.51332325, -0.33502164, 0.10629296
+ ],
+ recurrent_to_cell_weights: [
+ -0.3407414, 0.24443203, -0.2078532, 0.26320225,
+ 0.05695659, -0.00123841, -0.4744786, -0.35869038,
+ -0.06418842, -0.13502428, -0.501764, 0.22830659,
+ -0.46367589, 0.26016325, -0.03894562, -0.16368064
+ ],
+ recurrent_to_forget_weights: [
+ -0.48684245, -0.06655136, 0.42224967, 0.2112639,
+ 0.27654213, 0.20864892, -0.07646349, 0.45877004,
+ 0.00141793, -0.14609534, 0.36447752, 0.09196436,
+ 0.28053468, 0.01560611, -0.20127171, -0.01140004
+ ],
+ recurrent_to_output_weights: [
+ 0.43385774, -0.17194885, 0.2718237, 0.09215671,
+ 0.24107647, -0.39835793, 0.18212086, 0.01301402,
+ 0.48572797, -0.50656658, 0.20047462, -0.20607421,
+ -0.51818722, -0.15390486, 0.0468148, 0.39922136
+ ],
+ cell_to_input_weights: [],
+ cell_to_forget_weights: [],
+ cell_to_output_weights: [],
+ projection_weights: [],
+ projection_bias: [],
+ input_layer_norm_weights: [],
+ forget_layer_norm_weights: [],
+ cell_layer_norm_weights: [],
+ output_layer_norm_weights: []
+}
+
+test_input = [2., 3., 3., 4., 1., 1.]
+
+golden_output = [
+ -0.02973187, 0.1229473, 0.20885126, -0.15358765,
+ -0.03716109, 0.12507336, 0.41193449, -0.20860538,
+ -0.15053082, 0.09120187, 0.24278517, -0.12222792
+]
+
+output0 = {
+ output: golden_output,
+}
+
+input0[input] = test_input
+input0[output_state_in] = [ 0 for _ in range(n_batch * n_output) ]
+input0[cell_state_in] = [ 0 for _ in range(n_batch * n_cell) ]
+
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/unidirectional_sequence_lstm_f16_norm_peephole_projection.mod.py b/tests/nnapi/specs/skip/V1_2/unidirectional_sequence_lstm_f16_norm_peephole_projection.mod.py
new file mode 100644
index 000000000..b29308954
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/unidirectional_sequence_lstm_f16_norm_peephole_projection.mod.py
@@ -0,0 +1,168 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Unidirectional Sequence LSTM Test:
+# FLOAT16, 3 Time Step, Layer Normalization, No Cifg, Peephole, Projection, and No Clipping.
+import copy
+
+model = Model()
+
+max_time = 3
+n_batch = 2
+n_input = 5
+# n_cell and n_output have the same size when there is no projection.
+n_cell = 4
+n_output = 3
+
+input = Input("input", "TENSOR_FLOAT16", "{%d, %d, %d}" % (max_time, n_batch, n_input))
+
+input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT16",
+ "{%d, %d}" % (n_cell, n_input))
+input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT16",
+ "{%d, %d}" % (n_cell, n_input))
+input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT16",
+ "{%d, %d}" % (n_cell, n_input))
+input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT16",
+ "{%d, %d}" % (n_cell, n_input))
+
+recurrent_to_input_weights = Input("recurrent_to_intput_weights",
+ "TENSOR_FLOAT16",
+ "{%d, %d}" % (n_cell, n_output))
+recurrent_to_forget_weights = Input("recurrent_to_forget_weights",
+ "TENSOR_FLOAT16",
+ "{%d, %d}" % (n_cell, n_output))
+recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT16",
+ "{%d, %d}" % (n_cell, n_output))
+recurrent_to_output_weights = Input("recurrent_to_output_weights",
+ "TENSOR_FLOAT16",
+ "{%d, %d}" % (n_cell, n_output))
+
+cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT16",
+ "{%d}" % (n_cell))
+cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT16",
+ "{%d}" % (n_cell))
+cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT16",
+ "{%d}" % (n_cell))
+
+input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT16", "{%d}" % (n_cell))
+forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT16",
+ "{%d}" % (n_cell))
+cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT16", "{%d}" % (n_cell))
+output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT16",
+ "{%d}" % (n_cell))
+
+projection_weights = Input("projection_weights", "TENSOR_FLOAT16",
+ "{%d,%d}" % (n_output, n_cell))
+projection_bias = Input("projection_bias", "TENSOR_FLOAT16", "{0}")
+
+output_state_in = Input("output_state_in", "TENSOR_FLOAT16",
+ "{%d, %d}" % (n_batch, n_output))
+cell_state_in = Input("cell_state_in", "TENSOR_FLOAT16",
+ "{%d, %d}" % (n_batch, n_cell))
+
+activation_param = Int32Scalar("activation_param", 4) # Tanh
+cell_clip_param = Float16Scalar("cell_clip_param", 0.)
+proj_clip_param = Float16Scalar("proj_clip_param", 0.)
+time_major_param = BoolScalar("time_major_param", True)
+
+input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT16",
+ "{%d}" % n_cell)
+forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT16",
+ "{%d}" % n_cell)
+cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT16",
+ "{%d}" % n_cell)
+output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT16",
+ "{%d}" % n_cell)
+
+output = Output("output", "TENSOR_FLOAT16", "{%d, %d, %d}" % (max_time, n_batch, n_output))
+
+model = model.Operation(
+ "UNIDIRECTIONAL_SEQUENCE_LSTM", input, input_to_input_weights, input_to_forget_weights,
+ input_to_cell_weights, input_to_output_weights, recurrent_to_input_weights,
+ recurrent_to_forget_weights, recurrent_to_cell_weights,
+ recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights,
+ cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias,
+ output_gate_bias, projection_weights, projection_bias, output_state_in,
+ cell_state_in, activation_param, cell_clip_param, proj_clip_param, time_major_param,
+ input_layer_norm_weights, forget_layer_norm_weights,
+ cell_layer_norm_weights, output_layer_norm_weights).To([output])
+
+# Example 1. Input in operand 0,
+input0 = {
+ input_to_input_weights: [
+ 0.5, 0.6, 0.7, -0.8, -0.9, 0.1, 0.2, 0.3, -0.4, 0.5, -0.8, 0.7, -0.6,
+ 0.5, -0.4, -0.5, -0.4, -0.3, -0.2, -0.1
+ ],
+ input_to_forget_weights: [
+ -0.6, -0.1, 0.3, 0.2, 0.9, -0.5, -0.2, -0.4, 0.3, -0.8, -0.4, 0.3, -0.5,
+ -0.4, -0.6, 0.3, -0.4, -0.6, -0.5, -0.5
+ ],
+ input_to_cell_weights: [
+ -0.4, -0.3, -0.2, -0.1, -0.5, 0.5, -0.2, -0.3, -0.2, -0.6, 0.6, -0.1,
+ -0.4, -0.3, -0.7, 0.7, -0.9, -0.5, 0.8, 0.6
+ ],
+ input_to_output_weights: [
+ -0.8, -0.4, -0.2, -0.9, -0.1, -0.7, 0.3, -0.3, -0.8, -0.2, 0.6, -0.2,
+ 0.4, -0.7, -0.3, -0.5, 0.1, 0.5, -0.6, -0.4
+ ],
+ input_gate_bias: [0.03, 0.15, 0.22, 0.38],
+ forget_gate_bias: [0.1, -0.3, -0.2, 0.1],
+ cell_gate_bias: [-0.05, 0.72, 0.25, 0.08],
+ output_gate_bias: [0.05, -0.01, 0.2, 0.1],
+ recurrent_to_input_weights: [
+ -0.2, -0.3, 0.4, 0.1, -0.5, 0.9, -0.2, -0.3, -0.7, 0.05, -0.2, -0.6
+ ],
+ recurrent_to_cell_weights: [
+ -0.3, 0.2, 0.1, -0.3, 0.8, -0.08, -0.2, 0.3, 0.8, -0.6, -0.1, 0.2
+ ],
+ recurrent_to_forget_weights: [
+ -0.5, -0.3, -0.5, -0.2, 0.6, 0.4, 0.9, 0.3, -0.1, 0.2, 0.5, 0.2
+ ],
+ recurrent_to_output_weights: [
+ 0.3, -0.1, 0.1, -0.2, -0.5, -0.7, -0.2, -0.6, -0.1, -0.4, -0.7, -0.2
+ ],
+ cell_to_input_weights: [0.05, 0.1, 0.25, 0.15],
+ cell_to_forget_weights: [-0.02, -0.15, -0.25, -0.03],
+ cell_to_output_weights: [0.1, -0.1, -0.5, 0.05],
+ projection_weights: [
+ -0.1, 0.2, 0.01, -0.2, 0.1, 0.5, 0.3, 0.08, 0.07, 0.2, -0.4, 0.2
+ ],
+ projection_bias: [],
+ input_layer_norm_weights: [0.1, 0.2, 0.3, 0.5],
+ forget_layer_norm_weights: [0.2, 0.2, 0.4, 0.3],
+ cell_layer_norm_weights: [0.7, 0.2, 0.3, 0.8],
+ output_layer_norm_weights: [0.6, 0.2, 0.2, 0.5]
+}
+
+test_input = [0.7, 0.8, 0.1, 0.2, 0.3, 0.3, 0.2, 0.9, 0.8, 0.1,
+ 0.8, 0.1, 0.2, 0.4, 0.5, 0.1, 0.5, 0.2, 0.4, 0.2,
+ 0.2, 0.7, 0.7, 0.1, 0.7, 0.6, 0.9, 0.2, 0.5, 0.7]
+
+golden_output = [
+ 0.024407668039203, 0.128027379512787, -0.001709178090096, -0.006924282759428, 0.084874063730240, 0.063444979488850,
+ 0.013764165341854, 0.140751048922539, 0.039583537727594, -0.004039138555527, 0.139963015913963, 0.072681039571762,
+ -0.004592306911945, 0.155278354883194, 0.083737745881081, 0.007527053356171, 0.161902531981468, 0.056137066334486,
+]
+
+output0 = {
+ output: golden_output,
+}
+
+input0[input] = test_input
+input0[output_state_in] = [ 0 for _ in range(n_batch * n_output) ]
+input0[cell_state_in] = [ 0 for _ in range(n_batch * n_cell) ]
+
+Example((input0, output0))
diff --git a/tests/nnapi/specs/skip/V1_2/unidirectional_sequence_rnn.mod.py b/tests/nnapi/specs/skip/V1_2/unidirectional_sequence_rnn.mod.py
new file mode 100644
index 000000000..84ae779eb
--- /dev/null
+++ b/tests/nnapi/specs/skip/V1_2/unidirectional_sequence_rnn.mod.py
@@ -0,0 +1,183 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import numpy as np
+
+
+def test(name, input, weights, recurrent_weights, bias, hidden_state,
+ activation, time_major, output, input_data, weights_data,
+ recurrent_weights_data, bias_data, hidden_state_data, output_data):
+ activation = Int32Scalar("activation", activation)
+ time_major = Int32Scalar("time_major", time_major)
+ model = Model().Operation("UNIDIRECTIONAL_SEQUENCE_RNN", input, weights,
+ recurrent_weights, bias, hidden_state, activation,
+ time_major).To(output)
+ example = Example({
+ input: input_data,
+ weights: weights_data,
+ recurrent_weights: recurrent_weights_data,
+ bias: bias_data,
+ hidden_state: hidden_state_data,
+ output: output_data
+ },
+ model=model,
+ name=name).AddVariations("relaxed", "float16")
+
+
+def convert_to_time_major(tensor, num_batches, max_time, input_size):
+ return np.array(tensor).reshape([num_batches, max_time,
+ input_size]).transpose([1, 0, 2]).flatten().tolist()
+
+
+num_batches = 2
+max_time = 16
+input_size = 8
+num_units = 16
+
+input_data = [
+ 0.23689353, 0.285385, 0.037029743, -0.19858193, -0.27569133, 0.43773448,
+ 0.60379338, 0.35562468, -0.69424844, -0.93421471, -0.87287879, 0.37144363,
+ -0.62476718, 0.23791671, 0.40060222, 0.1356622, -0.99774903, -0.98858172,
+ -0.38952237, -0.47685933, 0.31073618, 0.71511042, -0.63767755, -0.31729108,
+ 0.33468103, 0.75801885, 0.30660987, -0.37354088, 0.77002847, -0.62747043,
+ -0.68572164, 0.0069220066, 0.65791464, 0.35130811, 0.80834007, -0.61777675,
+ -0.21095741, 0.41213346, 0.73784804, 0.094794154, 0.47791874, 0.86496925,
+ -0.53376222, 0.85315156, 0.10288584, 0.86684, -0.011186242, 0.10513687,
+ 0.87825835, 0.59929144, 0.62827742, 0.18899453, 0.31440187, 0.99059987,
+ 0.87170351, -0.35091716, 0.74861872, 0.17831337, 0.2755419, 0.51864719,
+ 0.55084288, 0.58982027, -0.47443086, 0.20875752, -0.058871567, -0.66609079,
+ 0.59098077, 0.73017097, 0.74604273, 0.32882881, -0.17503482, 0.22396147,
+ 0.19379807, 0.29120302, 0.077113032, -0.70331609, 0.15804303, -0.93407321,
+ 0.40182066, 0.036301374, 0.66521823, 0.0300982, -0.7747041, -0.02038002,
+ 0.020698071, -0.90300065, 0.62870288, -0.23068321, 0.27531278, -0.095755219,
+ -0.712036, -0.17384434, -0.50593495, -0.18646687, -0.96508682, 0.43519354,
+ 0.14744234, 0.62589407, 0.1653645, -0.10651493, -0.045277178, 0.99032974,
+ -0.88255352, -0.85147917, 0.28153265, 0.19455957, -0.55479527, -0.56042433,
+ 0.26048636, 0.84702539, 0.47587705, -0.074295521, -0.12287641, 0.70117295,
+ 0.90532446, 0.89782166, 0.79817224, 0.53402734, -0.33286154, 0.073485017,
+ -0.56172788, -0.044897556, 0.89964068, -0.067662835, 0.76863563, 0.93455386,
+ -0.6324693, -0.083922029
+] * 2
+weights_data = [
+ 0.461459, 0.153381, 0.529743, -0.00371218, 0.676267, -0.211346, 0.317493,
+ 0.969689, -0.343251, 0.186423, 0.398151, 0.152399, 0.448504, 0.317662,
+ 0.523556, -0.323514, 0.480877, 0.333113, -0.757714, -0.674487, -0.643585,
+ 0.217766, -0.0251462, 0.79512, -0.595574, -0.422444, 0.371572, -0.452178,
+ -0.556069, -0.482188, -0.685456, -0.727851, 0.841829, 0.551535, -0.232336,
+ 0.729158, -0.00294906, -0.69754, 0.766073, -0.178424, 0.369513, -0.423241,
+ 0.548547, -0.0152023, -0.757482, -0.85491, 0.251331, -0.989183, 0.306261,
+ -0.340716, 0.886103, -0.0726757, -0.723523, -0.784303, 0.0354295, 0.566564,
+ -0.485469, -0.620498, 0.832546, 0.697884, -0.279115, 0.294415, -0.584313,
+ 0.548772, 0.0648819, 0.968726, 0.723834, -0.0080452, -0.350386, -0.272803,
+ 0.115121, -0.412644, -0.824713, -0.992843, -0.592904, -0.417893, 0.863791,
+ -0.423461, -0.147601, -0.770664, -0.479006, 0.654782, 0.587314, -0.639158,
+ 0.816969, -0.337228, 0.659878, 0.73107, 0.754768, -0.337042, 0.0960841,
+ 0.368357, 0.244191, -0.817703, -0.211223, 0.442012, 0.37225, -0.623598,
+ -0.405423, 0.455101, 0.673656, -0.145345, -0.511346, -0.901675, -0.81252,
+ -0.127006, 0.809865, -0.721884, 0.636255, 0.868989, -0.347973, -0.10179,
+ -0.777449, 0.917274, 0.819286, 0.206218, -0.00785118, 0.167141, 0.45872,
+ 0.972934, -0.276798, 0.837861, 0.747958, -0.0151566, -0.330057, -0.469077,
+ 0.277308, 0.415818
+]
+recurrent_weights_data = [
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1
+]
+bias_data = [
+ 0.065691948, -0.69055247, 0.1107955, -0.97084129, -0.23957068, -0.23566568,
+ -0.389184, 0.47481549, -0.4791103, 0.29931796, 0.10463274, 0.83918178,
+ 0.37197268, 0.61957061, 0.3956964, -0.37609905
+]
+
+output_data = [
+ 0.496726, 0, 0.965996, 0, 0.0584254, 0, 0, 0.12315, 0, 0, 0.612266,
+ 0.456601, 0, 0.52286, 1.16099, 0.0291232, 0, 0, 0.524901, 0, 0, 0, 0,
+ 1.02116, 0, 1.35762, 0, 0.356909, 0.436415, 0.0355727, 0, 0, 0, 0, 0,
+ 0.262335, 0, 0, 0, 1.33992, 0, 2.9739, 0, 0, 1.31914, 2.66147, 0, 0,
+ 0.942568, 0, 0, 0, 0.025507, 0, 0, 0, 0.321429, 0.569141, 1.25274, 1.57719,
+ 0.8158, 1.21805, 0.586239, 0.25427, 1.04436, 0, 0.630725, 0, 0.133801,
+ 0.210693, 0.363026, 0, 0.533426, 0, 1.25926, 0.722707, 0, 1.22031, 1.30117,
+ 0.495867, 0.222187, 0, 0.72725, 0, 0.767003, 0, 0, 0.147835, 0, 0, 0,
+ 0.608758, 0.469394, 0.00720298, 0.927537, 0, 0.856974, 0.424257, 0, 0,
+ 0.937329, 0, 0, 0, 0.476425, 0, 0.566017, 0.418462, 0.141911, 0.996214,
+ 1.13063, 0, 0.967899, 0, 0, 0, 0.0831304, 0, 0, 1.00378, 0, 0, 0, 1.44818,
+ 1.01768, 0.943891, 0.502745, 0, 0.940135, 0, 0, 0, 0, 0, 0, 2.13243, 0,
+ 0.71208, 0.123918, 1.53907, 1.30225, 1.59644, 0.70222, 0, 0.804329, 0,
+ 0.430576, 0, 0.505872, 0.509603, 0.343448, 0, 0.107756, 0.614544, 1.44549,
+ 1.52311, 0.0454298, 0.300267, 0.562784, 0.395095, 0.228154, 0, 0.675323, 0,
+ 1.70536, 0.766217, 0, 0, 0, 0.735363, 0.0759267, 1.91017, 0.941888, 0, 0, 0,
+ 0, 0, 1.5909, 0, 0, 0, 0, 0.5755, 0, 0.184687, 0, 1.56296, 0.625285, 0, 0,
+ 0, 0, 0, 0.0857888, 0, 0, 0, 0, 0.488383, 0.252786, 0, 0, 0, 1.02817,
+ 1.85665, 0, 0, 0.00981836, 0, 1.06371, 0, 0, 0, 0, 0, 0, 0.290445, 0.316406,
+ 0, 0.304161, 1.25079, 0.0707152, 0, 0.986264, 0.309201, 0, 0, 0, 0, 0,
+ 1.64896, 0.346248, 0, 0.918175, 0.78884, 0.524981, 1.92076, 2.07013,
+ 0.333244, 0.415153, 0.210318, 0, 0, 0, 0, 0, 2.02616, 0, 0.728256, 0.84183,
+ 0.0907453, 0.628881, 3.58099, 1.49974, 0
+] * 2
+
+test(
+ name="blackbox",
+ input=Input("input", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(
+ num_batches, max_time, input_size)),
+ weights=Input("weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(
+ num_units, input_size)),
+ recurrent_weights=Input("recurrent_weights", "TENSOR_FLOAT32",
+ "{{{}, {}}}".format(num_units, num_units)),
+ bias=Input("bias", "TENSOR_FLOAT32", "{{{}}}".format(num_units)),
+ hidden_state=Input("hidden_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(
+ num_batches, num_units)),
+ output=Output("output", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(
+ num_batches, max_time, num_units)),
+ activation=1,
+ time_major=0,
+ input_data=input_data,
+ weights_data=weights_data,
+ recurrent_weights_data=recurrent_weights_data,
+ bias_data=bias_data,
+ hidden_state_data=[0] * num_batches * num_units,
+ output_data=output_data)
+
+test(
+ name="blackbox_time_major",
+ input=Input("input", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(
+ max_time, num_batches, input_size)),
+ weights=Input("weights", "TENSOR_FLOAT32", "{{{}, {}}}".format(
+ num_units, input_size)),
+ recurrent_weights=Input("recurrent_weights", "TENSOR_FLOAT32",
+ "{{{}, {}}}".format(num_units, num_units)),
+ bias=Input("bias", "TENSOR_FLOAT32", "{{{}}}".format(num_units)),
+ hidden_state=Input("hidden_state", "TENSOR_FLOAT32", "{{{}, {}}}".format(
+ num_batches, num_units)),
+ output=Output("output", "TENSOR_FLOAT32", "{{{}, {}, {}}}".format(
+ max_time, num_batches, num_units)),
+ activation=1,
+ time_major=1,
+ input_data=convert_to_time_major(input_data, num_batches, max_time,
+ input_size),
+ weights_data=weights_data,
+ recurrent_weights_data=recurrent_weights_data,
+ bias_data=bias_data,
+ hidden_state_data=[0] * num_batches * num_units,
+ output_data=convert_to_time_major(output_data, num_batches, max_time,
+ num_units))