summaryrefslogtreecommitdiff
path: root/tests/nnapi/specs/V1_2
diff options
context:
space:
mode:
Diffstat (limited to 'tests/nnapi/specs/V1_2')
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/abs_.mod.py0
-rwxr-xr-xtests/nnapi/specs/V1_2/abs_1D_float_nnfw.mod.py20
-rwxr-xr-xtests/nnapi/specs/V1_2/abs_2D_float_nnfw.mod.py20
-rwxr-xr-xtests/nnapi/specs/V1_2/abs_3D_float_nnfw.mod.py20
-rwxr-xr-xtests/nnapi/specs/V1_2/abs_4D_float_nnfw.mod.py18
-rwxr-xr-xtests/nnapi/specs/V1_2/argmax_1.mod.py31
-rwxr-xr-xtests/nnapi/specs/V1_2/argmax_2.mod.py31
-rwxr-xr-xtests/nnapi/specs/V1_2/argmax_3.mod.py33
-rwxr-xr-xtests/nnapi/specs/V1_2/argmax_float_1_nnfw.mod.py18
-rwxr-xr-xtests/nnapi/specs/V1_2/argmax_float_2_nnfw.mod.py18
-rwxr-xr-xtests/nnapi/specs/V1_2/argmax_int32_nnfw.mod.py18
-rwxr-xr-xtests/nnapi/specs/V1_2/argmax_neg_axis_float_nnfw.mod.py17
-rwxr-xr-xtests/nnapi/specs/V1_2/argmax_neg_axis_int32_nnfw.mod.py17
-rwxr-xr-xtests/nnapi/specs/V1_2/argmax_quant8_neg_axis_nnfw.mod.py17
-rwxr-xr-xtests/nnapi/specs/V1_2/argmax_quant8_nnfw.mod.py18
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/cast.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/cast_float32_to_int32_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/cast_int32_to_float32_nnfw.mod.py0
-rwxr-xr-xtests/nnapi/specs/V1_2/equal.mod.py99
-rwxr-xr-xtests/nnapi/specs/V1_2/equal_1D_float_nnfw.mod.py18
-rwxr-xr-xtests/nnapi/specs/V1_2/equal_4D_float_nnfw.mod.py18
-rwxr-xr-xtests/nnapi/specs/V1_2/equal_broadcast_4D_2D_float_nnfw.mod.py30
-rwxr-xr-xtests/nnapi/specs/V1_2/equal_broadcast_float_nnfw.mod.py19
-rwxr-xr-xtests/nnapi/specs/V1_2/equal_quant8_nnfw.mod.py18
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/exp_.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/exp_1D_float_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/exp_2D_float_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/exp_3D_float_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/exp_4D_float_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/gather.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/gather_1D_float_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/gather_1D_int32_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/gather_1D_quant8_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/gather_2D_2D_float_1_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/gather_2D_2D_float_2_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/gather_2D_3D_float_1_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/gather_2D_3D_float_2_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/gather_2D_float_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/gather_2D_int32_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/gather_2D_quant8_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/gather_3D_2D_float_1_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/gather_3D_2D_float_2_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/gather_3D_2D_float_3_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/gather_4D_float_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/gather_higher_rank.mod.py0
-rwxr-xr-xtests/nnapi/specs/V1_2/greater_equal.mod.py99
-rwxr-xr-xtests/nnapi/specs/V1_2/greater_equal_nnfw.mod.py35
-rwxr-xr-xtests/nnapi/specs/V1_2/less.mod.py99
-rwxr-xr-xtests/nnapi/specs/V1_2/less_nnfw.mod.py35
-rwxr-xr-xtests/nnapi/specs/V1_2/logical_and.mod.py43
-rwxr-xr-xtests/nnapi/specs/V1_2/logical_and_1D_nnfw.mod.py19
-rwxr-xr-xtests/nnapi/specs/V1_2/logical_and_2D_nnfw.mod.py19
-rwxr-xr-xtests/nnapi/specs/V1_2/logical_and_3D_nnfw.mod.py19
-rwxr-xr-xtests/nnapi/specs/V1_2/logical_and_4D_nnfw.mod.py19
-rwxr-xr-xtests/nnapi/specs/V1_2/logical_and_broadcast_4D_2D_nnfw.mod.py25
-rwxr-xr-xtests/nnapi/specs/V1_2/logical_and_broadcast_nnfw.mod.py19
-rwxr-xr-xtests/nnapi/specs/V1_2/logical_not.mod.py25
-rwxr-xr-xtests/nnapi/specs/V1_2/logical_not_1D_nnfw.mod.py16
-rwxr-xr-xtests/nnapi/specs/V1_2/logical_not_4D_nnfw.mod.py16
-rwxr-xr-xtests/nnapi/specs/V1_2/logical_or.mod.py43
-rwxr-xr-xtests/nnapi/specs/V1_2/logical_or_1D_nnfw.mod.py19
-rwxr-xr-xtests/nnapi/specs/V1_2/logical_or_2D_nnfw.mod.py19
-rwxr-xr-xtests/nnapi/specs/V1_2/logical_or_3D_nnfw.mod.py19
-rwxr-xr-xtests/nnapi/specs/V1_2/logical_or_4D_nnfw.mod.py19
-rwxr-xr-xtests/nnapi/specs/V1_2/logical_or_broadcast_4D_2D_nnfw.mod.py25
-rwxr-xr-xtests/nnapi/specs/V1_2/logical_or_broadcast_nnfw.mod.py19
-rwxr-xr-xtests/nnapi/specs/V1_2/maximum.mod.py64
-rwxr-xr-xtests/nnapi/specs/V1_2/minimum.mod.py64
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/neg.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/neg_1D_float_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/neg_2D_float_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/neg_3D_float_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/neg_3D_int_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/neg_4D_float_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/neg_4D_int_nnfw.mod.py0
-rwxr-xr-xtests/nnapi/specs/V1_2/not_equal.mod.py99
-rwxr-xr-xtests/nnapi/specs/V1_2/not_equal_broadcast_4D_2D_float_nnfw.mod.py30
-rwxr-xr-xtests/nnapi/specs/V1_2/not_equal_broadcast_float_nnfw.mod.py19
-rwxr-xr-xtests/nnapi/specs/V1_2/not_equal_float_nnfw.mod.py18
-rwxr-xr-xtests/nnapi/specs/V1_2/not_equal_quant8_nnfw.mod.py18
-rwxr-xr-xtests/nnapi/specs/V1_2/prelu.mod.py61
-rwxr-xr-xtests/nnapi/specs/V1_2/prelu_broadcast_float_1_nnfw.mod.py23
-rwxr-xr-xtests/nnapi/specs/V1_2/prelu_broadcast_quant8_1_nnfw.mod.py24
-rwxr-xr-xtests/nnapi/specs/V1_2/prelu_float_1_nnfw.mod.py22
-rwxr-xr-xtests/nnapi/specs/V1_2/prelu_quant8_1_nnfw.mod.py23
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/reduce_max.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/reduce_max_2D_float_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/reduce_max_2D_int32_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/reduce_max_4D_float_reducing_C_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/reduce_max_4D_float_reducing_HW_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/reduce_max_float_1_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/reduce_max_float_2_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/reduce_max_float_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/reduce_max_quant8_1_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/reduce_max_quant8_2_nnfw.mod.py0
-rwxr-xr-xtests/nnapi/specs/V1_2/reduce_min.mod.py70
-rwxr-xr-xtests/nnapi/specs/V1_2/reduce_min_float_1_nnfw.mod.py18
-rwxr-xr-xtests/nnapi/specs/V1_2/reduce_min_float_2_nnfw.mod.py18
-rwxr-xr-xtests/nnapi/specs/V1_2/reduce_min_float_nnfw.mod.py19
-rwxr-xr-xtests/nnapi/specs/V1_2/reduce_sum.mod.py66
-rwxr-xr-xtests/nnapi/specs/V1_2/reduce_sum_2D_float_nnfw.mod.py19
-rwxr-xr-xtests/nnapi/specs/V1_2/reduce_sum_4D_float_nnfw.mod.py19
-rwxr-xr-xtests/nnapi/specs/V1_2/reduce_sum_4D_float_reducing_C_nnfw.mod.py33
-rwxr-xr-xtests/nnapi/specs/V1_2/reduce_sum_4D_float_reducing_HW_nnfw.mod.py33
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/rsqrt.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/rsqrt_1D_float_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/rsqrt_2D_float_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/rsqrt_3D_float_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/rsqrt_4D_float_nnfw.mod.py0
-rwxr-xr-xtests/nnapi/specs/V1_2/sin_1D_float_nnfw.mod.py13
-rwxr-xr-xtests/nnapi/specs/V1_2/sin_4D_float_nnfw.mod.py18
-rwxr-xr-xtests/nnapi/specs/V1_2/slice.mod.py147
-rwxr-xr-xtests/nnapi/specs/V1_2/split_1D_float_nnfw.mod.py40
-rwxr-xr-xtests/nnapi/specs/V1_2/split_1D_int32_nnfw.mod.py40
-rwxr-xr-xtests/nnapi/specs/V1_2/split_4D_float_1_nnfw.mod.py21
-rwxr-xr-xtests/nnapi/specs/V1_2/split_4D_float_2_nnfw.mod.py21
-rwxr-xr-xtests/nnapi/specs/V1_2/split_4D_float_3_nnfw.mod.py21
-rwxr-xr-xtests/nnapi/specs/V1_2/split_4D_int32_1_nnfw.mod.py21
-rwxr-xr-xtests/nnapi/specs/V1_2/split_4D_int32_2_nnfw.mod.py21
-rwxr-xr-xtests/nnapi/specs/V1_2/split_4D_int32_3_nnfw.mod.py21
-rwxr-xr-xtests/nnapi/specs/V1_2/split_4D_int32_4_nnfw.mod.py21
-rwxr-xr-xtests/nnapi/specs/V1_2/split_4D_int32_5_nnfw.mod.py21
-rwxr-xr-xtests/nnapi/specs/V1_2/split_4D_quant8_nnfw.mod.py21
-rwxr-xr-xtests/nnapi/specs/V1_2/split_float_1.mod.py38
-rwxr-xr-xtests/nnapi/specs/V1_2/split_float_2.mod.py37
-rwxr-xr-xtests/nnapi/specs/V1_2/split_float_3.mod.py39
-rwxr-xr-xtests/nnapi/specs/V1_2/split_float_4.mod.py36
-rwxr-xr-xtests/nnapi/specs/V1_2/split_float_5.mod.py36
-rwxr-xr-xtests/nnapi/specs/V1_2/split_int32_1.mod.py38
-rwxr-xr-xtests/nnapi/specs/V1_2/split_int32_2.mod.py37
-rwxr-xr-xtests/nnapi/specs/V1_2/split_int32_3.mod.py39
-rwxr-xr-xtests/nnapi/specs/V1_2/split_int32_4.mod.py36
-rwxr-xr-xtests/nnapi/specs/V1_2/split_quant8_1.mod.py38
-rwxr-xr-xtests/nnapi/specs/V1_2/split_quant8_2.mod.py37
-rwxr-xr-xtests/nnapi/specs/V1_2/split_quant8_3.mod.py39
-rwxr-xr-xtests/nnapi/specs/V1_2/split_quant8_4.mod.py36
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/sqrt_.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/sqrt_1D_float_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/sqrt_2D_float_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/sqrt_3D_float_nnfw.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/sqrt_4D_float_nnfw.mod.py0
-rwxr-xr-xtests/nnapi/specs/V1_2/sub_v1_2.mod.py99
-rwxr-xr-xtests/nnapi/specs/V1_2/sub_v1_2_broadcast.mod.py60
-rwxr-xr-xtests/nnapi/specs/V1_2/tanh_v1_2.mod.py89
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/topk_v2.mod.py0
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/topk_v2_1D_float_nnfw.mod.py2
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/topk_v2_1D_int32_nnfw.mod.py2
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/topk_v2_1D_quant8_nnfw.mod.py2
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/topk_v2_2D_float_nnfw.mod.py4
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/topk_v2_2D_int32_nnfw.mod.py2
-rwxr-xr-x[-rw-r--r--]tests/nnapi/specs/V1_2/topk_v2_2D_quant8_nnfw.mod.py2
-rwxr-xr-xtests/nnapi/specs/V1_2/transpose_v1_2.mod.py81
152 files changed, 3178 insertions, 7 deletions
diff --git a/tests/nnapi/specs/V1_2/abs_.mod.py b/tests/nnapi/specs/V1_2/abs_.mod.py
index 376769e55..376769e55 100644..100755
--- a/tests/nnapi/specs/V1_2/abs_.mod.py
+++ b/tests/nnapi/specs/V1_2/abs_.mod.py
diff --git a/tests/nnapi/specs/V1_2/abs_1D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/abs_1D_float_nnfw.mod.py
new file mode 100755
index 000000000..6366f6e04
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/abs_1D_float_nnfw.mod.py
@@ -0,0 +1,20 @@
+# model
+model = Model()
+
+i1 = Input("input", "TENSOR_FLOAT32", "{10}")
+i2 = Output("output", "TENSOR_FLOAT32", "{10}")
+model = model.Operation("ABS", i1).To(i2)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [0.778, -0.48, -241, 0.9118, -0.466,
+ -30.29, -0.4951, -0.4460, 0.555,
+ 0.11310]}
+
+output0 = {i2: # output 0
+ [0.778, 0.48, 241, 0.9118, 0.466,
+ 30.29, 0.4951, 0.4460, 0.555,
+ 0.11310]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/abs_2D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/abs_2D_float_nnfw.mod.py
new file mode 100755
index 000000000..901127b8d
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/abs_2D_float_nnfw.mod.py
@@ -0,0 +1,20 @@
+# model
+model = Model()
+
+i1 = Input("input", "TENSOR_FLOAT32", "{5, 2}")
+i2 = Output("output", "TENSOR_FLOAT32", "{5, 2}")
+model = model.Operation("ABS", i1).To(i2)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [0.735078, -0.46738, -241, 0.9118, -0.46686,
+ -3150.219, -0.495291, -0.42874460, 0.5005046655,
+ 0.1131106620]}
+
+output0 = {i2: # output 0
+ [0.735078, 0.46738, 241, 0.9118, 0.46686,
+ 3150.219, 0.495291, 0.42874460, 0.5005046655,
+ 0.1131106620]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/abs_3D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/abs_3D_float_nnfw.mod.py
new file mode 100755
index 000000000..b5ab39482
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/abs_3D_float_nnfw.mod.py
@@ -0,0 +1,20 @@
+# model
+model = Model()
+
+i1 = Input("input", "TENSOR_FLOAT32", "{2, 3, 2}")
+i2 = Output("output", "TENSOR_FLOAT32", "{2, 3, 2}")
+model = model.Operation("ABS", i1).To(i2)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [0.735078, -0.46738, -241, 0.9118, -0.46686,
+ -3150.219, -0.495291, -0.42874460, 0.5005046655,
+ 0.1131106620, -40.0, 15.0]}
+
+output0 = {i2: # output 0
+ [0.735078, 0.46738, 241, 0.9118, 0.46686,
+ 3150.219, 0.495291, 0.42874460, 0.5005046655,
+ 0.1131106620, 40.0, 15.0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/abs_4D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/abs_4D_float_nnfw.mod.py
new file mode 100755
index 000000000..b7e749f5d
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/abs_4D_float_nnfw.mod.py
@@ -0,0 +1,18 @@
+# model
+model = Model()
+
+i1 = Input("input", "TENSOR_FLOAT32", "{2, 2, 2, 2}")
+i2 = Output("output", "TENSOR_FLOAT32", "{2, 2, 2, 2}")
+model = model.Operation("ABS", i1).To(i2)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 1.99, -1.4, 0.0001, 0.0002, 16.0, 25.0, 100.0,
+ 23.0, 19.0, -40.0, 15.0, 4.0, -43.0, -0.35355339059, 0.35355339059]}
+
+output0 = {i2: # output 0
+ [1.0, 1.99, 1.4, 0.0001, 0.0002, 16.0, 25.0, 100.0,
+ 23.0, 19.0, 40.0, 15.0, 4.0, 43.0, 0.35355339059, 0.35355339059]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/argmax_1.mod.py b/tests/nnapi/specs/V1_2/argmax_1.mod.py
new file mode 100755
index 000000000..6dc7430af
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/argmax_1.mod.py
@@ -0,0 +1,31 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{2, 2}")
+axis = Int32Scalar("axis", 1)
+output0 = Output("output", "TENSOR_INT32", "{2}")
+
+model = Model().Operation("ARGMAX", input0, axis).To(output0)
+
+quant8 = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM", 1.0, 0],
+})
+
+Example({
+ input0: [1.0, 2.0,
+ 4.0, 3.0],
+ output0: [1, 0],
+}).AddVariations("relaxed", "float16", "int32", quant8)
diff --git a/tests/nnapi/specs/V1_2/argmax_2.mod.py b/tests/nnapi/specs/V1_2/argmax_2.mod.py
new file mode 100755
index 000000000..69be60740
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/argmax_2.mod.py
@@ -0,0 +1,31 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{2, 2}")
+axis = Int32Scalar("axis", 0)
+output0 = Output("output", "TENSOR_INT32", "{2}")
+
+model = Model().Operation("ARGMAX", input0, axis).To(output0)
+
+quant8 = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM", 1.0, 0],
+})
+
+Example({
+ input0: [1.0, 2.0,
+ 4.0, 3.0],
+ output0: [1, 1],
+}).AddVariations("relaxed", "float16", "int32", quant8)
diff --git a/tests/nnapi/specs/V1_2/argmax_3.mod.py b/tests/nnapi/specs/V1_2/argmax_3.mod.py
new file mode 100755
index 000000000..ab7afc60a
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/argmax_3.mod.py
@@ -0,0 +1,33 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Negative axis support test.
+
+input0 = Input("input0", "TENSOR_FLOAT32", "{2, 2}")
+axis = Int32Scalar("axis", -1)
+output0 = Output("output", "TENSOR_INT32", "{2}")
+
+model = Model().Operation("ARGMAX", input0, axis).To(output0)
+
+quant8 = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM", 1.0, 0],
+})
+
+Example({
+ input0: [1.0, 2.0,
+ 4.0, 3.0],
+ output0: [1, 0],
+}).AddVariations("relaxed", "float16", "int32", quant8)
diff --git a/tests/nnapi/specs/V1_2/argmax_float_1_nnfw.mod.py b/tests/nnapi/specs/V1_2/argmax_float_1_nnfw.mod.py
new file mode 100755
index 000000000..b7d4b8a8e
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/argmax_float_1_nnfw.mod.py
@@ -0,0 +1,18 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+axis = Parameter("axis", "TENSOR_INT32", "{1}", [1])
+output = Output("output", "TENSOR_INT32", "{1, 2, 1}")
+
+model = model.Operation("ARGMAX", i1, axis).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 4.0,
+ 2.0, 3.0]}
+
+output0 = {output: # output 0
+ [1,
+ 0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/argmax_float_2_nnfw.mod.py b/tests/nnapi/specs/V1_2/argmax_float_2_nnfw.mod.py
new file mode 100755
index 000000000..2e614f7ea
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/argmax_float_2_nnfw.mod.py
@@ -0,0 +1,18 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
+axis = Parameter("axis", "TENSOR_INT32", "{1}", [3])
+output = Output("output", "TENSOR_INT32", "{1, 2, 2}")
+
+model = model.Operation("ARGMAX", i1, axis).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 2.0, 7.0, 8.0,
+ 1.0, 9.0, 7.0, 3.0]}
+
+output0 = {output: # output 0
+ [1,1,
+ 1,0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/argmax_int32_nnfw.mod.py b/tests/nnapi/specs/V1_2/argmax_int32_nnfw.mod.py
new file mode 100755
index 000000000..fb3151f89
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/argmax_int32_nnfw.mod.py
@@ -0,0 +1,18 @@
+model = Model()
+i1 = Input("input", "TENSOR_INT32", "{1, 2, 2, 1}")
+axis = Parameter("axis", "TENSOR_INT32", "{1}", [1])
+output = Output("output", "TENSOR_INT32", "{1, 2, 1}")
+
+model = model.Operation("ARGMAX", i1, axis).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 4,
+ 2, 3]}
+
+output0 = {output: # output 0
+ [1,
+ 0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/argmax_neg_axis_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/argmax_neg_axis_float_nnfw.mod.py
new file mode 100755
index 000000000..4fc573b31
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/argmax_neg_axis_float_nnfw.mod.py
@@ -0,0 +1,17 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 2, 4, 1}")
+axis = Parameter("axis", "TENSOR_INT32", "{1}", [-3])
+output = Output("output", "TENSOR_INT32", "{1, 4, 1}")
+
+model = model.Operation("ARGMAX", i1, axis).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 2.0, 7.0, 8.0,
+ 1.0, 9.0, 7.0, 3.0]}
+
+output0 = {output: # output 0
+ [0, 1, 0, 0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/argmax_neg_axis_int32_nnfw.mod.py b/tests/nnapi/specs/V1_2/argmax_neg_axis_int32_nnfw.mod.py
new file mode 100755
index 000000000..426a03591
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/argmax_neg_axis_int32_nnfw.mod.py
@@ -0,0 +1,17 @@
+model = Model()
+i1 = Input("input", "TENSOR_INT32", "{1, 2, 4, 1}")
+axis = Parameter("axis", "TENSOR_INT32", "{1}", [-3])
+output = Output("output", "TENSOR_INT32", "{1, 4, 1}")
+
+model = model.Operation("ARGMAX", i1, axis).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 7, 8,
+ 1, 9, 7, 3]}
+
+output0 = {output: # output 0
+ [0, 1, 0, 0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/argmax_quant8_neg_axis_nnfw.mod.py b/tests/nnapi/specs/V1_2/argmax_quant8_neg_axis_nnfw.mod.py
new file mode 100755
index 000000000..a6a1a6500
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/argmax_quant8_neg_axis_nnfw.mod.py
@@ -0,0 +1,17 @@
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{1, 2, 4, 1}, 0.5f, 5")
+axis = Parameter("axis", "TENSOR_INT32", "{1}", [-3])
+output = Output("output", "TENSOR_INT32", "{1, 4, 1}")
+
+model = model.Operation("ARGMAX", i1, axis).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 7, 8,
+ 1, 9, 7, 3]}
+
+output0 = {output: # output 0
+ [0, 1, 0, 0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/argmax_quant8_nnfw.mod.py b/tests/nnapi/specs/V1_2/argmax_quant8_nnfw.mod.py
new file mode 100755
index 000000000..38d6a0b63
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/argmax_quant8_nnfw.mod.py
@@ -0,0 +1,18 @@
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 1}, 0.5f, 2")
+axis = Parameter("axis", "TENSOR_INT32", "{1}", [1])
+output = Output("output", "TENSOR_INT32", "{1, 2, 1}")
+
+model = model.Operation("ARGMAX", i1, axis).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 4,
+ 2, 3]}
+
+output0 = {output: # output 0
+ [1,
+ 0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/cast.mod.py b/tests/nnapi/specs/V1_2/cast.mod.py
index f1d93ce7d..f1d93ce7d 100644..100755
--- a/tests/nnapi/specs/V1_2/cast.mod.py
+++ b/tests/nnapi/specs/V1_2/cast.mod.py
diff --git a/tests/nnapi/specs/V1_2/cast_float32_to_int32_nnfw.mod.py b/tests/nnapi/specs/V1_2/cast_float32_to_int32_nnfw.mod.py
index 926508d99..926508d99 100644..100755
--- a/tests/nnapi/specs/V1_2/cast_float32_to_int32_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/cast_float32_to_int32_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/cast_int32_to_float32_nnfw.mod.py b/tests/nnapi/specs/V1_2/cast_int32_to_float32_nnfw.mod.py
index a4f2aeb42..a4f2aeb42 100644..100755
--- a/tests/nnapi/specs/V1_2/cast_int32_to_float32_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/cast_int32_to_float32_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/equal.mod.py b/tests/nnapi/specs/V1_2/equal.mod.py
new file mode 100755
index 000000000..d7c40fe63
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/equal.mod.py
@@ -0,0 +1,99 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+def test(name, input0, input1, output0, input0_data, input1_data, output_data, do_variations=True):
+ model = Model().Operation("EQUAL", input0, input1).To(output0)
+ example = Example({
+ input0: input0_data,
+ input1: input1_data,
+ output0: output_data,
+ }, model=model, name=name)
+ if do_variations:
+ example.AddVariations("int32", "float16", "relaxed")
+
+test(
+ name="simple",
+ input0=Input("input0", "TENSOR_FLOAT32", "{3}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{3}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[5, 7, 10],
+ input1_data=[10, 7, 5],
+ output_data=[False, True, False],
+)
+
+test(
+ name="broadcast",
+ input0=Input("input0", "TENSOR_FLOAT32", "{2, 1}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{2}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{2, 2}"),
+ input0_data=[5, 10],
+ input1_data=[10, 5],
+ output_data=[False, True, True, False],
+)
+
+test(
+ name="quantized_different_scale",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [3], 1.0, 128)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 2.0, 128)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[129, 130, 131], # effectively 1, 2, 3
+ input1_data=[129], # effectively 2
+ output_data=[False, True, False],
+ do_variations=False,
+)
+
+test(
+ name="quantized_different_zero_point",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [3], 1.0, 128)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 1.0, 129)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[129, 130, 131], # effectively 1, 2, 3
+ input1_data=[131], # effectively 2
+ output_data=[False, True, False],
+ do_variations=False,
+)
+
+test(
+ name="quantized_overflow_second_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [1], 1.64771, 31)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 1.49725, 240)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[0],
+ input1_data=[200],
+ output_data=[False],
+ do_variations=False,
+)
+
+test(
+ name="quantized_overflow_first_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [1], 1.49725, 240)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 1.64771, 31)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[200],
+ input1_data=[0],
+ output_data=[False],
+ do_variations=False,
+)
+
+test(
+ name="boolean",
+ input0=Input("input0", "TENSOR_BOOL8", "{4}"),
+ input1=Input("input1", "TENSOR_BOOL8", "{4}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{4}"),
+ input0_data=[False, True, False, True],
+ input1_data=[False, False, True, True],
+ output_data=[True, False, False, True],
+ do_variations=False,
+)
diff --git a/tests/nnapi/specs/V1_2/equal_1D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/equal_1D_float_nnfw.mod.py
new file mode 100755
index 000000000..5b79b679a
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/equal_1D_float_nnfw.mod.py
@@ -0,0 +1,18 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{3}") # a vector of input
+i2 = Input("op2", "TENSOR_FLOAT32", "{3}") # a vector of input
+i3 = Output("op3", "TENSOR_BOOL8", "{3}") # a vector of output
+model = model.Operation("EQUAL", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [2.0, 3.254232, 5.1232],
+ i2: # input 1
+ [2.0, 3.254111, 5.1232]}
+
+output0 = {i3: # output 0
+ [True, False, True]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/equal_4D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/equal_4D_float_nnfw.mod.py
new file mode 100755
index 000000000..19925506b
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/equal_4D_float_nnfw.mod.py
@@ -0,0 +1,18 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # a vector of input
+i2 = Input("op2", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # a vector of input
+i3 = Output("op3", "TENSOR_BOOL8", "{1, 2, 2, 1}") # a vector of output
+model = model.Operation("EQUAL", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [0, 1543.25454532, 5.1232, 10.1],
+ i2: # input 1
+ [0, 5313.25414521, 5.1, 10.1]}
+
+output0 = {i3: # output 0
+ [True, False, False, True]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/equal_broadcast_4D_2D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/equal_broadcast_4D_2D_float_nnfw.mod.py
new file mode 100755
index 000000000..5d07548d7
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/equal_broadcast_4D_2D_float_nnfw.mod.py
@@ -0,0 +1,30 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{3, 2, 2, 2}")
+i2 = Input("op2", "TENSOR_FLOAT32", "{2, 2}")
+
+i3 = Output("op3", "TENSOR_BOOL8", "{3, 2, 2, 2}")
+model = model.Operation("EQUAL", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [4.89, 11.0, 9.75, 10.20,
+ 8.25, 2.0, 1.15, 0.0,
+ 3.0, 1.0, 8.25, 6.0,
+ 8.45, 3.0, 8.25, 1.2,
+ 0.0, 3.0, 2.0, 7.34,
+ 4.3, 9.56, 11.0, 3.0],
+ i2: # input 1
+ [8.25, 3.0, 2.0, 10.20]}
+
+output0 = {i3: # output 0
+ [False, False, False, True,
+ True, False, False, False,
+ False, False, False, False,
+ False, True, False, False,
+ False, True, True, False,
+ False, False, False, False]
+ }
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/equal_broadcast_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/equal_broadcast_float_nnfw.mod.py
new file mode 100755
index 000000000..279c000ba
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/equal_broadcast_float_nnfw.mod.py
@@ -0,0 +1,19 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{2, 2}")
+i2 = Input("op2", "TENSOR_FLOAT32", "{1, 2}")
+
+i3 = Output("op3", "TENSOR_QUANT8_ASYMM", "{2, 2}, 1.f, 0")
+model = model.Operation("EQUAL_EX", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [7.45, 3.21, 2.0, 7.67],
+ i2: # input 1
+ [0.0, 7.67]}
+
+output0 = {i3: # output 0
+ [0, 0, 0, 255]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/equal_quant8_nnfw.mod.py b/tests/nnapi/specs/V1_2/equal_quant8_nnfw.mod.py
new file mode 100755
index 000000000..8e4e0f23c
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/equal_quant8_nnfw.mod.py
@@ -0,0 +1,18 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{3}, 1.f, 0") # a vector of input
+i2 = Input("op2", "TENSOR_QUANT8_ASYMM", "{3}, 1.f, 0") # a vector of input
+i3 = Output("op3", "TENSOR_BOOL8", "{3}") # a vector of output
+model = model.Operation("EQUAL", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [2, 3, 0],
+ i2: # input 1
+ [2, 9, 0]}
+
+output0 = {i3: # output 0
+ [True, False, True]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/exp_.mod.py b/tests/nnapi/specs/V1_2/exp_.mod.py
index 135f45125..135f45125 100644..100755
--- a/tests/nnapi/specs/V1_2/exp_.mod.py
+++ b/tests/nnapi/specs/V1_2/exp_.mod.py
diff --git a/tests/nnapi/specs/V1_2/exp_1D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/exp_1D_float_nnfw.mod.py
index 8258970f0..8258970f0 100644..100755
--- a/tests/nnapi/specs/V1_2/exp_1D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/exp_1D_float_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/exp_2D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/exp_2D_float_nnfw.mod.py
index 4cdb7b7e5..4cdb7b7e5 100644..100755
--- a/tests/nnapi/specs/V1_2/exp_2D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/exp_2D_float_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/exp_3D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/exp_3D_float_nnfw.mod.py
index 9ed45c7c1..9ed45c7c1 100644..100755
--- a/tests/nnapi/specs/V1_2/exp_3D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/exp_3D_float_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/exp_4D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/exp_4D_float_nnfw.mod.py
index 664336270..664336270 100644..100755
--- a/tests/nnapi/specs/V1_2/exp_4D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/exp_4D_float_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/gather.mod.py b/tests/nnapi/specs/V1_2/gather.mod.py
index d5e1ef73d..d5e1ef73d 100644..100755
--- a/tests/nnapi/specs/V1_2/gather.mod.py
+++ b/tests/nnapi/specs/V1_2/gather.mod.py
diff --git a/tests/nnapi/specs/V1_2/gather_1D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/gather_1D_float_nnfw.mod.py
index 4596467d8..4596467d8 100644..100755
--- a/tests/nnapi/specs/V1_2/gather_1D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/gather_1D_float_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/gather_1D_int32_nnfw.mod.py b/tests/nnapi/specs/V1_2/gather_1D_int32_nnfw.mod.py
index 8fe961bbc..8fe961bbc 100644..100755
--- a/tests/nnapi/specs/V1_2/gather_1D_int32_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/gather_1D_int32_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/gather_1D_quant8_nnfw.mod.py b/tests/nnapi/specs/V1_2/gather_1D_quant8_nnfw.mod.py
index 7699d6c50..7699d6c50 100644..100755
--- a/tests/nnapi/specs/V1_2/gather_1D_quant8_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/gather_1D_quant8_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/gather_2D_2D_float_1_nnfw.mod.py b/tests/nnapi/specs/V1_2/gather_2D_2D_float_1_nnfw.mod.py
index 13be0df06..13be0df06 100644..100755
--- a/tests/nnapi/specs/V1_2/gather_2D_2D_float_1_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/gather_2D_2D_float_1_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/gather_2D_2D_float_2_nnfw.mod.py b/tests/nnapi/specs/V1_2/gather_2D_2D_float_2_nnfw.mod.py
index 4903c97ef..4903c97ef 100644..100755
--- a/tests/nnapi/specs/V1_2/gather_2D_2D_float_2_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/gather_2D_2D_float_2_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/gather_2D_3D_float_1_nnfw.mod.py b/tests/nnapi/specs/V1_2/gather_2D_3D_float_1_nnfw.mod.py
index f4c81cd27..f4c81cd27 100644..100755
--- a/tests/nnapi/specs/V1_2/gather_2D_3D_float_1_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/gather_2D_3D_float_1_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/gather_2D_3D_float_2_nnfw.mod.py b/tests/nnapi/specs/V1_2/gather_2D_3D_float_2_nnfw.mod.py
index eb1cbcbf9..eb1cbcbf9 100644..100755
--- a/tests/nnapi/specs/V1_2/gather_2D_3D_float_2_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/gather_2D_3D_float_2_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/gather_2D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/gather_2D_float_nnfw.mod.py
index 5d35080ed..5d35080ed 100644..100755
--- a/tests/nnapi/specs/V1_2/gather_2D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/gather_2D_float_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/gather_2D_int32_nnfw.mod.py b/tests/nnapi/specs/V1_2/gather_2D_int32_nnfw.mod.py
index 7a5d7526e..7a5d7526e 100644..100755
--- a/tests/nnapi/specs/V1_2/gather_2D_int32_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/gather_2D_int32_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/gather_2D_quant8_nnfw.mod.py b/tests/nnapi/specs/V1_2/gather_2D_quant8_nnfw.mod.py
index c777d34b9..c777d34b9 100644..100755
--- a/tests/nnapi/specs/V1_2/gather_2D_quant8_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/gather_2D_quant8_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/gather_3D_2D_float_1_nnfw.mod.py b/tests/nnapi/specs/V1_2/gather_3D_2D_float_1_nnfw.mod.py
index be138a0cf..be138a0cf 100644..100755
--- a/tests/nnapi/specs/V1_2/gather_3D_2D_float_1_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/gather_3D_2D_float_1_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/gather_3D_2D_float_2_nnfw.mod.py b/tests/nnapi/specs/V1_2/gather_3D_2D_float_2_nnfw.mod.py
index 9e16ee255..9e16ee255 100644..100755
--- a/tests/nnapi/specs/V1_2/gather_3D_2D_float_2_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/gather_3D_2D_float_2_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/gather_3D_2D_float_3_nnfw.mod.py b/tests/nnapi/specs/V1_2/gather_3D_2D_float_3_nnfw.mod.py
index 6b96b0841..6b96b0841 100644..100755
--- a/tests/nnapi/specs/V1_2/gather_3D_2D_float_3_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/gather_3D_2D_float_3_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/gather_4D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/gather_4D_float_nnfw.mod.py
index b5a4ec0fc..b5a4ec0fc 100644..100755
--- a/tests/nnapi/specs/V1_2/gather_4D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/gather_4D_float_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/gather_higher_rank.mod.py b/tests/nnapi/specs/V1_2/gather_higher_rank.mod.py
index a7c5b3848..a7c5b3848 100644..100755
--- a/tests/nnapi/specs/V1_2/gather_higher_rank.mod.py
+++ b/tests/nnapi/specs/V1_2/gather_higher_rank.mod.py
diff --git a/tests/nnapi/specs/V1_2/greater_equal.mod.py b/tests/nnapi/specs/V1_2/greater_equal.mod.py
new file mode 100755
index 000000000..d6c76faff
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/greater_equal.mod.py
@@ -0,0 +1,99 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+def test(name, input0, input1, output0, input0_data, input1_data, output_data, do_variations=True):
+ model = Model().Operation("GREATER_EQUAL", input0, input1).To(output0)
+ example = Example({
+ input0: input0_data,
+ input1: input1_data,
+ output0: output_data,
+ }, model=model, name=name)
+ if do_variations:
+ example.AddVariations("int32", "float16", "relaxed")
+
+test(
+ name="simple",
+ input0=Input("input0", "TENSOR_FLOAT32", "{3}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{3}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[5, 7, 10],
+ input1_data=[10, 7, 5],
+ output_data=[False, True, True],
+)
+
+test(
+ name="broadcast",
+ input0=Input("input0", "TENSOR_FLOAT32", "{2, 1}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{2}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{2, 2}"),
+ input0_data=[5, 10],
+ input1_data=[10, 5],
+ output_data=[False, True, True, True],
+)
+
+test(
+ name="quantized_different_scale",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [3], 1.0, 128)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 2.0, 128)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[129, 130, 131], # effectively 1, 2, 3
+ input1_data=[129], # effectively 2
+ output_data=[False, True, True],
+ do_variations=False,
+)
+
+test(
+ name="quantized_different_zero_point",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [3], 1.0, 128)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 1.0, 129)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[129, 130, 131], # effectively 1, 2, 3
+ input1_data=[131], # effectively 2
+ output_data=[False, True, True],
+ do_variations=False,
+)
+
+test(
+ name="quantized_overflow_second_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [1], 1.64771, 31)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 1.49725, 240)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[0],
+ input1_data=[200],
+ output_data=[True],
+ do_variations=False,
+)
+
+test(
+ name="quantized_overflow_first_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [1], 1.49725, 240)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 1.64771, 31)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[200],
+ input1_data=[0],
+ output_data=[False],
+ do_variations=False,
+)
+
+test(
+ name="boolean",
+ input0=Input("input0", "TENSOR_BOOL8", "{4}"),
+ input1=Input("input1", "TENSOR_BOOL8", "{4}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{4}"),
+ input0_data=[False, True, False, True],
+ input1_data=[False, False, True, True],
+ output_data=[True, True, False, True],
+ do_variations=False,
+)
diff --git a/tests/nnapi/specs/V1_2/greater_equal_nnfw.mod.py b/tests/nnapi/specs/V1_2/greater_equal_nnfw.mod.py
new file mode 100755
index 000000000..8fd7b710f
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/greater_equal_nnfw.mod.py
@@ -0,0 +1,35 @@
+#
+# Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{2, 1}")
+i2 = Input("op2", "TENSOR_FLOAT32", "{2}")
+i3 = Output("op3", "TENSOR_BOOL8", "{2, 2}")
+model = model.Operation("GREATER_EQUAL", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [5, 10],
+ i2: # input 1
+ [10, 5]}
+
+output0 = {i3: # output 0
+ [False, True, True, True]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/less.mod.py b/tests/nnapi/specs/V1_2/less.mod.py
new file mode 100755
index 000000000..182d69d67
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/less.mod.py
@@ -0,0 +1,99 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+def test(name, input0, input1, output0, input0_data, input1_data, output_data, do_variations=True):
+ model = Model().Operation("LESS", input0, input1).To(output0)
+ example = Example({
+ input0: input0_data,
+ input1: input1_data,
+ output0: output_data,
+ }, model=model, name=name)
+ if do_variations:
+ example.AddVariations("int32", "float16", "relaxed")
+
+test(
+ name="simple",
+ input0=Input("input0", "TENSOR_FLOAT32", "{3}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{3}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[5, 7, 10],
+ input1_data=[10, 7, 5],
+ output_data=[True, False, False],
+)
+
+test(
+ name="broadcast",
+ input0=Input("input0", "TENSOR_FLOAT32", "{2, 1}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{2}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{2, 2}"),
+ input0_data=[5, 10],
+ input1_data=[10, 5],
+ output_data=[True, False, False, False],
+)
+
+test(
+ name="quantized_different_scale",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [3], 1.0, 128)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 2.0, 128)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[129, 130, 131], # effectively 1, 2, 3
+ input1_data=[129], # effectively 2
+ output_data=[True, False, False],
+ do_variations=False,
+)
+
+test(
+ name="quantized_different_zero_point",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [3], 1.0, 128)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 1.0, 129)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[129, 130, 131], # effectively 1, 2, 3
+ input1_data=[131], # effectively 2
+ output_data=[True, False, False],
+ do_variations=False,
+)
+
+test(
+ name="quantized_overflow_second_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [1], 1.64771, 31)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 1.49725, 240)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[0],
+ input1_data=[200],
+ output_data=[False],
+ do_variations=False,
+)
+
+test(
+ name="quantized_overflow_first_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [1], 1.49725, 240)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 1.64771, 31)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[200],
+ input1_data=[0],
+ output_data=[True],
+ do_variations=False,
+)
+
+test(
+ name="boolean",
+ input0=Input("input0", "TENSOR_BOOL8", "{4}"),
+ input1=Input("input1", "TENSOR_BOOL8", "{4}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{4}"),
+ input0_data=[False, True, False, True],
+ input1_data=[False, False, True, True],
+ output_data=[False, False, True, False],
+ do_variations=False,
+)
diff --git a/tests/nnapi/specs/V1_2/less_nnfw.mod.py b/tests/nnapi/specs/V1_2/less_nnfw.mod.py
new file mode 100755
index 000000000..b03c0c14d
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/less_nnfw.mod.py
@@ -0,0 +1,35 @@
+#
+# Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{2, 1}")
+i2 = Input("op2", "TENSOR_FLOAT32", "{2}")
+i3 = Output("op3", "TENSOR_BOOL8", "{2, 2}")
+model = model.Operation("LESS", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [5, 10],
+ i2: # input 1
+ [10, 5]}
+
+output0 = {i3: # output 0
+ [True, False, False, False]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/logical_and.mod.py b/tests/nnapi/specs/V1_2/logical_and.mod.py
new file mode 100755
index 000000000..c831bb2a6
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/logical_and.mod.py
@@ -0,0 +1,43 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+def test(name, input0, input1, output0, input0_data, input1_data, output_data):
+ model = Model().Operation("LOGICAL_AND", input0, input1).To(output0)
+ Example({
+ input0: input0_data,
+ input1: input1_data,
+ output0: output_data,
+ }, model=model, name=name)
+
+test(
+ name="simple",
+ input0=Input("input0", "TENSOR_BOOL8", "{1, 1, 1, 4}"),
+ input1=Input("input1", "TENSOR_BOOL8", "{1, 1, 1, 4}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{1, 1, 1, 4}"),
+ input0_data=[True, False, False, True],
+ input1_data=[True, False, True, False],
+ output_data=[True, False, False, False],
+)
+
+test(
+ name="broadcast",
+ input0=Input("input0", "TENSOR_BOOL8", "{1, 1, 1, 4}"),
+ input1=Input("input1", "TENSOR_BOOL8", "{1, 1}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{1, 1, 1, 4}"),
+ input0_data=[True, False, False, True],
+ input1_data=[True],
+ output_data=[True, False, False, True],
+)
diff --git a/tests/nnapi/specs/V1_2/logical_and_1D_nnfw.mod.py b/tests/nnapi/specs/V1_2/logical_and_1D_nnfw.mod.py
new file mode 100755
index 000000000..173ccfab0
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/logical_and_1D_nnfw.mod.py
@@ -0,0 +1,19 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_BOOL8", "{4}")
+i2 = Input("op2", "TENSOR_BOOL8", "{4}")
+
+i3 = Output("op3", "TENSOR_BOOL8", "{4}")
+model = model.Operation("LOGICAL_AND", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [False, False, True, True],
+ i2: # input 1
+ [False, True, False, True]}
+
+output0 = {i3: # output 0
+ [False, False, False, True]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/logical_and_2D_nnfw.mod.py b/tests/nnapi/specs/V1_2/logical_and_2D_nnfw.mod.py
new file mode 100755
index 000000000..3b9b4a4c8
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/logical_and_2D_nnfw.mod.py
@@ -0,0 +1,19 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_BOOL8", "{2, 2}")
+i2 = Input("op2", "TENSOR_BOOL8", "{2, 2}")
+
+i3 = Output("op3", "TENSOR_BOOL8", "{2, 2}")
+model = model.Operation("LOGICAL_AND", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [False, False, True, True],
+ i2: # input 1
+ [False, True, False, True]}
+
+output0 = {i3: # output 0
+ [False, False, False, True]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/logical_and_3D_nnfw.mod.py b/tests/nnapi/specs/V1_2/logical_and_3D_nnfw.mod.py
new file mode 100755
index 000000000..3f0372e71
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/logical_and_3D_nnfw.mod.py
@@ -0,0 +1,19 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_BOOL8", "{2, 2, 2}")
+i2 = Input("op2", "TENSOR_BOOL8", "{2, 2, 2}")
+
+i3 = Output("op3", "TENSOR_BOOL8", "{2, 2, 2}")
+model = model.Operation("LOGICAL_AND", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [False, False, True, True, False, True, False, True],
+ i2: # input 1
+ [False, True, False, True, False, False, True, True]}
+
+output0 = {i3: # output 0
+ [False, False, False, True, False, False, False, True]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/logical_and_4D_nnfw.mod.py b/tests/nnapi/specs/V1_2/logical_and_4D_nnfw.mod.py
new file mode 100755
index 000000000..26820d866
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/logical_and_4D_nnfw.mod.py
@@ -0,0 +1,19 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_BOOL8", "{2, 1, 2, 2}")
+i2 = Input("op2", "TENSOR_BOOL8", "{2, 1, 2, 2}")
+
+i3 = Output("op3", "TENSOR_BOOL8", "{2, 1, 2, 2}")
+model = model.Operation("LOGICAL_AND", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [False, False, True, True, False, True, False, True],
+ i2: # input 1
+ [False, True, False, True, False, False, True, True]}
+
+output0 = {i3: # output 0
+ [False, False, False, True, False, False, False, True]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/logical_and_broadcast_4D_2D_nnfw.mod.py b/tests/nnapi/specs/V1_2/logical_and_broadcast_4D_2D_nnfw.mod.py
new file mode 100755
index 000000000..1a2b5bedc
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/logical_and_broadcast_4D_2D_nnfw.mod.py
@@ -0,0 +1,25 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_BOOL8", "{2, 2, 2, 2}")
+i2 = Input("op2", "TENSOR_BOOL8", "{2, 2}")
+
+i3 = Output("op3", "TENSOR_BOOL8", "{2, 2, 2, 2}")
+model = model.Operation("LOGICAL_AND", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [False, False, True, True,
+ True, True, False, False,
+ False, False, False, False,
+ True, True, True, True],
+ i2: # input 1
+ [False, True, False, True]}
+
+output0 = {i3: # output 0
+ [False, False, False, True,
+ False, True, False, False,
+ False, False, False, False,
+ False, True, False, True]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/logical_and_broadcast_nnfw.mod.py b/tests/nnapi/specs/V1_2/logical_and_broadcast_nnfw.mod.py
new file mode 100755
index 000000000..817aab322
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/logical_and_broadcast_nnfw.mod.py
@@ -0,0 +1,19 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_BOOL8", "{1, 2, 2, 1}")
+i2 = Input("op2", "TENSOR_BOOL8", "{1, 1, 2, 1}")
+
+i3 = Output("op3", "TENSOR_BOOL8", "{1, 2, 2, 1}")
+model = model.Operation("LOGICAL_AND", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [False, False, True, True],
+ i2: # input 1
+ [False, True]}
+
+output0 = {i3: # output 0
+ [False, False, False, True]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/logical_not.mod.py b/tests/nnapi/specs/V1_2/logical_not.mod.py
new file mode 100755
index 000000000..04ca64680
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/logical_not.mod.py
@@ -0,0 +1,25 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+input0 = Input("input0", "TENSOR_BOOL8", "{1, 1, 4, 1, 1}")
+output0 = Output("output0", "TENSOR_BOOL8", "{1, 1, 4, 1, 1}")
+
+model = Model().Operation("LOGICAL_NOT", input0).To(output0)
+
+Example({
+ input0: [True, False, False, True],
+ output0: [False, True, True, False],
+})
diff --git a/tests/nnapi/specs/V1_2/logical_not_1D_nnfw.mod.py b/tests/nnapi/specs/V1_2/logical_not_1D_nnfw.mod.py
new file mode 100755
index 000000000..5c39692e8
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/logical_not_1D_nnfw.mod.py
@@ -0,0 +1,16 @@
+# model
+model = Model()
+i = Input("op1", "TENSOR_BOOL8", "{4}")
+
+o = Output("op2", "TENSOR_BOOL8", "{4}")
+model = model.Operation("LOGICAL_NOT", i).To(o)
+
+# Example 1. Input
+input0 = {i: # input
+ [True, False, True, True]}
+
+output0 = {o: # output
+ [False, True, False, False]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/logical_not_4D_nnfw.mod.py b/tests/nnapi/specs/V1_2/logical_not_4D_nnfw.mod.py
new file mode 100755
index 000000000..34fecbcf5
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/logical_not_4D_nnfw.mod.py
@@ -0,0 +1,16 @@
+# model
+model = Model()
+i = Input("op1", "TENSOR_BOOL8", "{1, 2, 2, 1}") # a vector of input
+
+o = Output("op2", "TENSOR_BOOL8", "{1, 2, 2, 1}") # a vector of output
+model = model.Operation("LOGICAL_NOT", i).To(o)
+
+# Example 1. Input
+input0 = {i: # input
+ [False, True, True, True]}
+
+output0 = {o: # output
+ [True, False, False, False]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/logical_or.mod.py b/tests/nnapi/specs/V1_2/logical_or.mod.py
new file mode 100755
index 000000000..e4f720dd5
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/logical_or.mod.py
@@ -0,0 +1,43 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+def test(name, input0, input1, output0, input0_data, input1_data, output_data):
+ model = Model().Operation("LOGICAL_OR", input0, input1).To(output0)
+ Example({
+ input0: input0_data,
+ input1: input1_data,
+ output0: output_data,
+ }, model=model, name=name)
+
+test(
+ name="simple",
+ input0=Input("input0", "TENSOR_BOOL8", "{1, 1, 1, 4}"),
+ input1=Input("input1", "TENSOR_BOOL8", "{1, 1, 1, 4}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{1, 1, 1, 4}"),
+ input0_data=[True, False, False, True],
+ input1_data=[True, False, True, False],
+ output_data=[True, False, True, True],
+)
+
+test(
+ name="broadcast",
+ input0=Input("input0", "TENSOR_BOOL8", "{1, 1, 1, 4}"),
+ input1=Input("input1", "TENSOR_BOOL8", "{1, 1}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{1, 1, 1, 4}"),
+ input0_data=[True, False, False, True],
+ input1_data=[False],
+ output_data=[True, False, False, True],
+)
diff --git a/tests/nnapi/specs/V1_2/logical_or_1D_nnfw.mod.py b/tests/nnapi/specs/V1_2/logical_or_1D_nnfw.mod.py
new file mode 100755
index 000000000..77843ae06
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/logical_or_1D_nnfw.mod.py
@@ -0,0 +1,19 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_BOOL8", "{4}")
+i2 = Input("op2", "TENSOR_BOOL8", "{4}")
+
+i3 = Output("op3", "TENSOR_BOOL8", "{4}")
+model = model.Operation("LOGICAL_OR", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [False, False, True, True],
+ i2: # input 255
+ [False, True, False, True]}
+
+output0 = {i3: # output 0
+ [False, True, True, True]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/logical_or_2D_nnfw.mod.py b/tests/nnapi/specs/V1_2/logical_or_2D_nnfw.mod.py
new file mode 100755
index 000000000..2ba17edd3
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/logical_or_2D_nnfw.mod.py
@@ -0,0 +1,19 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_BOOL8", "{2, 2}")
+i2 = Input("op2", "TENSOR_BOOL8", "{2, 2}")
+
+i3 = Output("op3", "TENSOR_BOOL8", "{2, 2}")
+model = model.Operation("LOGICAL_OR", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [False, False, True, True],
+ i2: # input 1
+ [False, True, False, True]}
+
+output0 = {i3: # output 0
+ [False, True, True, True]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/logical_or_3D_nnfw.mod.py b/tests/nnapi/specs/V1_2/logical_or_3D_nnfw.mod.py
new file mode 100755
index 000000000..0fb529c97
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/logical_or_3D_nnfw.mod.py
@@ -0,0 +1,19 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_BOOL8", "{2, 2, 2}")
+i2 = Input("op2", "TENSOR_BOOL8", "{2, 2, 2}")
+
+i3 = Output("op3", "TENSOR_BOOL8", "{2, 2, 2}")
+model = model.Operation("LOGICAL_OR", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [False, False, True, True, False, True, False, True],
+ i2: # input 1
+ [False, True, False, True, False, False, True, True]}
+
+output0 = {i3: # output 0
+ [False, True, True, True, False, True, True, True]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/logical_or_4D_nnfw.mod.py b/tests/nnapi/specs/V1_2/logical_or_4D_nnfw.mod.py
new file mode 100755
index 000000000..060900bee
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/logical_or_4D_nnfw.mod.py
@@ -0,0 +1,19 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_BOOL8", "{2, 1, 2, 2}")
+i2 = Input("op2", "TENSOR_BOOL8", "{2, 1, 2, 2}")
+
+i3 = Output("op3", "TENSOR_BOOL8", "{2, 1, 2, 2}")
+model = model.Operation("LOGICAL_OR", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [False, False, True, True, False, True, False, True],
+ i2: # input 1
+ [False, True, False, True, False, False, True, True]}
+
+output0 = {i3: # output 0
+ [False, True, True, True, False, True, True, True]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/logical_or_broadcast_4D_2D_nnfw.mod.py b/tests/nnapi/specs/V1_2/logical_or_broadcast_4D_2D_nnfw.mod.py
new file mode 100755
index 000000000..7f6603961
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/logical_or_broadcast_4D_2D_nnfw.mod.py
@@ -0,0 +1,25 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_BOOL8", "{2, 2, 2, 2}")
+i2 = Input("op2", "TENSOR_BOOL8", "{2, 2}")
+
+i3 = Output("op3", "TENSOR_BOOL8", "{2, 2, 2, 2}")
+model = model.Operation("LOGICAL_OR", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [False, False, True, True,
+ True, True, False, False,
+ False, False, False, False,
+ True, True, True, True],
+ i2: # input 1
+ [False, True, False, True]}
+
+output0 = {i3: # output 0
+ [False, True, True, True,
+ True, True, False, True,
+ False, True, False, True,
+ True, True, True, True]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/logical_or_broadcast_nnfw.mod.py b/tests/nnapi/specs/V1_2/logical_or_broadcast_nnfw.mod.py
new file mode 100755
index 000000000..c30cb8659
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/logical_or_broadcast_nnfw.mod.py
@@ -0,0 +1,19 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_BOOL8", "{1, 2, 2, 1}")
+i2 = Input("op2", "TENSOR_BOOL8", "{1, 1, 2, 1}")
+
+i3 = Output("op3", "TENSOR_BOOL8", "{1, 2, 2, 1}")
+model = model.Operation("LOGICAL_OR", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [False, False, True, True],
+ i2: # input 1
+ [False, True]}
+
+output0 = {i3: # output 0
+ [False, True, True, True]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/maximum.mod.py b/tests/nnapi/specs/V1_2/maximum.mod.py
new file mode 100755
index 000000000..0d37a203c
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/maximum.mod.py
@@ -0,0 +1,64 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+def test(name, input0, input1, output0, input0_data, input1_data, output_data):
+ model = Model().Operation("MAXIMUM", input0, input1).To(output0)
+
+ quant8 = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM", 0.5, 127],
+ input1: ["TENSOR_QUANT8_ASYMM", 1.0, 100],
+ output0: ["TENSOR_QUANT8_ASYMM", 2.0, 80],
+ })
+
+ Example({
+ input0: input0_data,
+ input1: input1_data,
+ output0: output_data,
+ }, model=model, name=name).AddVariations("relaxed", "float16", "int32", quant8)
+
+
+test(
+ name="simple",
+ input0=Input("input0", "TENSOR_FLOAT32", "{3, 1, 2}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{3, 1, 2}"),
+ output0=Output("output0", "TENSOR_FLOAT32", "{3, 1, 2}"),
+ input0_data=[1.0, 0.0, -1.0, 11.0, -2.0, -1.44],
+ input1_data=[-1.0, 0.0, 1.0, 12.0, -3.0, -1.43],
+ output_data=[1.0, 0.0, 1.0, 12.0, -2.0, -1.43],
+)
+
+test(
+ name="broadcast",
+ input0=Input("input0", "TENSOR_FLOAT32", "{3, 1, 2}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{2}"),
+ output0=Output("output0", "TENSOR_FLOAT32", "{3, 1, 2}"),
+ input0_data=[1.0, 0.0, -1.0, -2.0, -1.44, 11.0],
+ input1_data=[0.5, 2.0],
+ output_data=[1.0, 2.0, 0.5, 2.0, 0.5, 11.0],
+)
+
+
+# Test overflow and underflow.
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM", "{2}, 1.0f, 128")
+input1 = Input("input1", "TENSOR_QUANT8_ASYMM", "{2}, 1.0f, 128")
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM", "{2}, 0.5f, 128")
+model = Model().Operation("MAXIMUM", input0, input1).To(output0)
+
+Example({
+ input0: [60, 128],
+ input1: [128, 200],
+ output0: [128, 255],
+}, model=model, name="overflow")
diff --git a/tests/nnapi/specs/V1_2/minimum.mod.py b/tests/nnapi/specs/V1_2/minimum.mod.py
new file mode 100755
index 000000000..76b058612
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/minimum.mod.py
@@ -0,0 +1,64 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+def test(name, input0, input1, output0, input0_data, input1_data, output_data):
+ model = Model().Operation("MINIMUM", input0, input1).To(output0)
+
+ quant8 = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM", 0.5, 127],
+ input1: ["TENSOR_QUANT8_ASYMM", 1.0, 100],
+ output0: ["TENSOR_QUANT8_ASYMM", 2.0, 80],
+ })
+
+ Example({
+ input0: input0_data,
+ input1: input1_data,
+ output0: output_data,
+ }, model=model, name=name).AddVariations("relaxed", "float16", "int32", quant8)
+
+
+test(
+ name="simple",
+ input0=Input("input0", "TENSOR_FLOAT32", "{3, 1, 2}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{3, 1, 2}"),
+ output0=Output("output0", "TENSOR_FLOAT32", "{3, 1, 2}"),
+ input0_data=[1.0, 0.0, -1.0, 11.0, -2.0, -1.44],
+ input1_data=[-1.0, 0.0, 1.0, 12.0, -3.0, -1.43],
+ output_data=[-1.0, 0.0, -1.0, 11.0, -3.0, -1.44],
+)
+
+test(
+ name="broadcast",
+ input0=Input("input0", "TENSOR_FLOAT32", "{3, 1, 2}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{2}"),
+ output0=Output("output0", "TENSOR_FLOAT32", "{3, 1, 2}"),
+ input0_data=[1.0, 0.0, -1.0, -2.0, -1.44, 11.0],
+ input1_data=[0.5, 2.0],
+ output_data=[0.5, 0.0, -1.0, -2.0, -1.44, 2.0],
+)
+
+
+# Test overflow and underflow.
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM", "{2}, 1.0f, 128")
+input1 = Input("input1", "TENSOR_QUANT8_ASYMM", "{2}, 1.0f, 128")
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM", "{2}, 0.5f, 128")
+model = Model().Operation("MINIMUM", input0, input1).To(output0)
+
+Example({
+ input0: [60, 128],
+ input1: [128, 200],
+ output0: [0, 128],
+}, model=model, name="overflow")
diff --git a/tests/nnapi/specs/V1_2/neg.mod.py b/tests/nnapi/specs/V1_2/neg.mod.py
index 82119d28c..82119d28c 100644..100755
--- a/tests/nnapi/specs/V1_2/neg.mod.py
+++ b/tests/nnapi/specs/V1_2/neg.mod.py
diff --git a/tests/nnapi/specs/V1_2/neg_1D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/neg_1D_float_nnfw.mod.py
index 6791f2b5e..6791f2b5e 100644..100755
--- a/tests/nnapi/specs/V1_2/neg_1D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/neg_1D_float_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/neg_2D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/neg_2D_float_nnfw.mod.py
index c5b559046..c5b559046 100644..100755
--- a/tests/nnapi/specs/V1_2/neg_2D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/neg_2D_float_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/neg_3D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/neg_3D_float_nnfw.mod.py
index ef0faba1a..ef0faba1a 100644..100755
--- a/tests/nnapi/specs/V1_2/neg_3D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/neg_3D_float_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/neg_3D_int_nnfw.mod.py b/tests/nnapi/specs/V1_2/neg_3D_int_nnfw.mod.py
index d3e1e435b..d3e1e435b 100644..100755
--- a/tests/nnapi/specs/V1_2/neg_3D_int_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/neg_3D_int_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/neg_4D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/neg_4D_float_nnfw.mod.py
index e29a46ac5..e29a46ac5 100644..100755
--- a/tests/nnapi/specs/V1_2/neg_4D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/neg_4D_float_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/neg_4D_int_nnfw.mod.py b/tests/nnapi/specs/V1_2/neg_4D_int_nnfw.mod.py
index be3d07a3c..be3d07a3c 100644..100755
--- a/tests/nnapi/specs/V1_2/neg_4D_int_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/neg_4D_int_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/not_equal.mod.py b/tests/nnapi/specs/V1_2/not_equal.mod.py
new file mode 100755
index 000000000..2c36b5abb
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/not_equal.mod.py
@@ -0,0 +1,99 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+def test(name, input0, input1, output0, input0_data, input1_data, output_data, do_variations=True):
+ model = Model().Operation("NOT_EQUAL", input0, input1).To(output0)
+ example = Example({
+ input0: input0_data,
+ input1: input1_data,
+ output0: output_data,
+ }, model=model, name=name)
+ if do_variations:
+ example.AddVariations("int32", "float16", "relaxed")
+
+test(
+ name="simple",
+ input0=Input("input0", "TENSOR_FLOAT32", "{3}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{3}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[5, 7, 10],
+ input1_data=[10, 7, 5],
+ output_data=[True, False, True],
+)
+
+test(
+ name="broadcast",
+ input0=Input("input0", "TENSOR_FLOAT32", "{2, 1}"),
+ input1=Input("input1", "TENSOR_FLOAT32", "{2}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{2, 2}"),
+ input0_data=[5, 10],
+ input1_data=[10, 5],
+ output_data=[True, False, False, True],
+)
+
+test(
+ name="quantized_different_scale",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [3], 1.0, 128)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 2.0, 128)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[129, 130, 131], # effectively 1, 2, 3
+ input1_data=[129], # effectively 2
+ output_data=[True, False, True],
+ do_variations=False,
+)
+
+test(
+ name="quantized_different_zero_point",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [3], 1.0, 128)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 1.0, 129)),
+ output0=Output("output0", "TENSOR_BOOL8", "{3}"),
+ input0_data=[129, 130, 131], # effectively 1, 2, 3
+ input1_data=[131], # effectively 2
+ output_data=[True, False, True],
+ do_variations=False,
+)
+
+test(
+ name="quantized_overflow_second_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [1], 1.64771, 31)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 1.49725, 240)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[0],
+ input1_data=[200],
+ output_data=[True],
+ do_variations=False,
+)
+
+test(
+ name="quantized_overflow_first_input_if_requantized",
+ input0=Input("input0", ("TENSOR_QUANT8_ASYMM", [1], 1.49725, 240)),
+ input1=Input("input1", ("TENSOR_QUANT8_ASYMM", [1], 1.64771, 31)),
+ output0=Output("output0", "TENSOR_BOOL8", "{1}"),
+ input0_data=[200],
+ input1_data=[0],
+ output_data=[True],
+ do_variations=False,
+)
+
+test(
+ name="boolean",
+ input0=Input("input0", "TENSOR_BOOL8", "{4}"),
+ input1=Input("input1", "TENSOR_BOOL8", "{4}"),
+ output0=Output("output0", "TENSOR_BOOL8", "{4}"),
+ input0_data=[False, True, False, True],
+ input1_data=[False, False, True, True],
+ output_data=[False, True, True, False],
+ do_variations=False,
+)
diff --git a/tests/nnapi/specs/V1_2/not_equal_broadcast_4D_2D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/not_equal_broadcast_4D_2D_float_nnfw.mod.py
new file mode 100755
index 000000000..c732e592a
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/not_equal_broadcast_4D_2D_float_nnfw.mod.py
@@ -0,0 +1,30 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{3, 2, 2, 2}")
+i2 = Input("op2", "TENSOR_FLOAT32", "{2, 2}")
+
+i3 = Output("op3", "TENSOR_BOOL8", "{3, 2, 2, 2}")
+model = model.Operation("NOT_EQUAL", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [4.25, 11.0, 2.2, 10.3,
+ 8.5, 2.1, 1.0, 0.5,
+ 3.1, 1.0, 8.5, 6.5,
+ 11.2, 3.0, 8.5, 1.0,
+ 0.3, 3.0, 2.1, 7.5,
+ 4.3, 9.2, 11.1, 3.0],
+ i2: # input 1
+ [8.5, 3.0, 2.1, 10.3]}
+
+output0 = {i3: # output 0
+ [True, True, True, False,
+ False, True, True, True,
+ True, True, True, True,
+ True, False, True, True,
+ True, False, False, True,
+ True, True, True, True]
+ }
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/not_equal_broadcast_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/not_equal_broadcast_float_nnfw.mod.py
new file mode 100755
index 000000000..9c1071a2b
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/not_equal_broadcast_float_nnfw.mod.py
@@ -0,0 +1,19 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{2, 2}")
+i2 = Input("op2", "TENSOR_FLOAT32", "{1, 2}")
+
+i3 = Output("op3", "TENSOR_BOOL8", "{2, 2}")
+model = model.Operation("NOT_EQUAL", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [0.2, 3.21, 2.4, 7.44],
+ i2: # input 1
+ [0.21, 7.44]}
+
+output0 = {i3: # output 0
+ [True, True, True, False]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/not_equal_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/not_equal_float_nnfw.mod.py
new file mode 100755
index 000000000..71ca61d6a
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/not_equal_float_nnfw.mod.py
@@ -0,0 +1,18 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{3}") # a vector of input
+i2 = Input("op2", "TENSOR_FLOAT32", "{3}") # a vector of input
+i3 = Output("op3", "TENSOR_BOOL8", "{3}") # a vector of output
+model = model.Operation("NOT_EQUAL", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [2.0, 3.254232, 5.1232],
+ i2: # input 1
+ [2.0, 3.254111, 5.1232]}
+
+output0 = {i3: # output 0
+ [False, True, False]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/not_equal_quant8_nnfw.mod.py b/tests/nnapi/specs/V1_2/not_equal_quant8_nnfw.mod.py
new file mode 100755
index 000000000..0f775496c
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/not_equal_quant8_nnfw.mod.py
@@ -0,0 +1,18 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{3}, 1.f, 0") # a vector of input
+i2 = Input("op2", "TENSOR_QUANT8_ASYMM", "{3}, 1.f, 0") # a vector of input
+i3 = Output("op3", "TENSOR_BOOL8", "{3}") # a vector of output
+model = model.Operation("NOT_EQUAL", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [2, 3, 0],
+ i2: # input 1
+ [2, 9, 0]}
+
+output0 = {i3: # output 0
+ [False, True, False]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/prelu.mod.py b/tests/nnapi/specs/V1_2/prelu.mod.py
new file mode 100755
index 000000000..bbbeeb5dc
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/prelu.mod.py
@@ -0,0 +1,61 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TEST 1: PRELU
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 2, 2, 3}")
+a1 = Parameter("alpha", "TENSOR_FLOAT32", "{1, 1, 3}", [0, 1, 2])
+o1 = Output("output", "TENSOR_FLOAT32", "{1, 2, 2, 3}")
+Model().Operation("PRELU", i1, a1).To(o1)
+
+# output.scale > input.scale && output.scale > input.scale * alpha.scale
+quant8_gt = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
+ a1: ("TENSOR_QUANT8_ASYMM", 0.25, 50),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.5, 120)
+})
+
+# output.scale == input.scale
+quant8_eq1 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
+ a1: ("TENSOR_QUANT8_ASYMM", 0.25, 50),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.25, 120)
+})
+
+# output.scale == input.scale * alpha.scale
+quant8_eq2 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
+ a1: ("TENSOR_QUANT8_ASYMM", 0.5, 50),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.125, 120)
+})
+
+# output.scale < input.scale && output.scale < input.scale * alpha.scale
+quant8_lt = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
+ a1: ("TENSOR_QUANT8_ASYMM", 0.5, 50),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 120)
+})
+
+# Instantiate an example
+Example({
+ i1: [ 0, 0, 0,
+ 1, 1, 1,
+ -1, -1, -1,
+ -2, -2, -2],
+ o1: [ 0, 0, 0,
+ 1, 1, 1,
+ 0, -1, -2,
+ 0, -2, -4]
+}).AddInput(a1).AddVariations("relaxed", quant8_gt, quant8_eq1, quant8_eq2, quant8_lt, "float16")
diff --git a/tests/nnapi/specs/V1_2/prelu_broadcast_float_1_nnfw.mod.py b/tests/nnapi/specs/V1_2/prelu_broadcast_float_1_nnfw.mod.py
new file mode 100755
index 000000000..1be24a9ab
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/prelu_broadcast_float_1_nnfw.mod.py
@@ -0,0 +1,23 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 3}") # a vector of input
+i2 = Input("op2", "TENSOR_FLOAT32", "{1, 1, 1, 3}") # a vector of alpha
+i3 = Output("op3", "TENSOR_FLOAT32", "{1, 2, 2, 3}") # a vector of output
+model = model.Operation("PRELU", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [0.0, 0.0, 0.0,
+ 1.0, 1.0, 1.0,
+ -1.0, -1.0, -1.0,
+ -2.0, -2.0, -2.0],
+ i2: # input 1
+ [0.0, 1.0, 2.0]}
+
+output0 = {i3: # output 0
+ [0.0, 0.0, 0.0,
+ 1.0, 1.0, 1.0,
+ 0.0, -1.0, -2.0,
+ 0.0, -2.0, -4.0]}
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/prelu_broadcast_quant8_1_nnfw.mod.py b/tests/nnapi/specs/V1_2/prelu_broadcast_quant8_1_nnfw.mod.py
new file mode 100755
index 000000000..3ad493429
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/prelu_broadcast_quant8_1_nnfw.mod.py
@@ -0,0 +1,24 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 3}, 1.0f, 2") # a vector of input
+i2 = Input("op2", "TENSOR_QUANT8_ASYMM", "{1, 1, 3}, 1.0f, 1") # a vector of alpha
+i3 = Output("op3", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 3}, 0.5f, 3") # a vector of output
+model = model.Operation("PRELU", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 1, 1,
+ 2, 2, 2,
+ 3, 3, 3,
+ 1, 2, 3],
+ i2: # input 1
+ [0, 1, 2]}
+
+output0 = {i3: # output 0
+ [5, 3, 1,
+ 3, 3, 3,
+ 5, 5, 5,
+ 5, 3, 5]}
+# Instantiate an example
+Example((input0, output0))
+
diff --git a/tests/nnapi/specs/V1_2/prelu_float_1_nnfw.mod.py b/tests/nnapi/specs/V1_2/prelu_float_1_nnfw.mod.py
new file mode 100755
index 000000000..f39e7957e
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/prelu_float_1_nnfw.mod.py
@@ -0,0 +1,22 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # a vector of input
+i2 = Input("op2", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # a vector of alpha
+i3 = Output("op3", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # a vector of output
+model = model.Operation("PRELU", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [3.0, -2.0,
+ -1.0, -2.0
+ ],
+ i2: # input 1
+ [0.0, 1.0,
+ 1.0, 2.0]}
+
+output0 = {i3: # output 0
+ [3.0, -2.0,
+ -1.0, -4.0
+ ]}
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/prelu_quant8_1_nnfw.mod.py b/tests/nnapi/specs/V1_2/prelu_quant8_1_nnfw.mod.py
new file mode 100755
index 000000000..97984b116
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/prelu_quant8_1_nnfw.mod.py
@@ -0,0 +1,23 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 1}, 0.5f, 5") # a vector of input
+i2 = Input("op2", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 1}, 0.5f, 1") # a vector of alpha
+i3 = Output("op3", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 1}, 0.1f, 3") # a vector of output
+model = model.Operation("PRELU", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [3, 1,
+ 7, 11
+ ],
+ i2: # input 1
+ [0, 1,
+ 2, 2]}
+
+output0 = {i3: # output 0
+ [8, 3,
+ 13, 33
+ ]}
+# Instantiate an example
+Example((input0, output0))
+
diff --git a/tests/nnapi/specs/V1_2/reduce_max.mod.py b/tests/nnapi/specs/V1_2/reduce_max.mod.py
index f08041ddd..f08041ddd 100644..100755
--- a/tests/nnapi/specs/V1_2/reduce_max.mod.py
+++ b/tests/nnapi/specs/V1_2/reduce_max.mod.py
diff --git a/tests/nnapi/specs/V1_2/reduce_max_2D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/reduce_max_2D_float_nnfw.mod.py
index be7570eae..be7570eae 100644..100755
--- a/tests/nnapi/specs/V1_2/reduce_max_2D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/reduce_max_2D_float_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/reduce_max_2D_int32_nnfw.mod.py b/tests/nnapi/specs/V1_2/reduce_max_2D_int32_nnfw.mod.py
index 631cd23e8..631cd23e8 100644..100755
--- a/tests/nnapi/specs/V1_2/reduce_max_2D_int32_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/reduce_max_2D_int32_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/reduce_max_4D_float_reducing_C_nnfw.mod.py b/tests/nnapi/specs/V1_2/reduce_max_4D_float_reducing_C_nnfw.mod.py
index 2290dd8b9..2290dd8b9 100644..100755
--- a/tests/nnapi/specs/V1_2/reduce_max_4D_float_reducing_C_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/reduce_max_4D_float_reducing_C_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/reduce_max_4D_float_reducing_HW_nnfw.mod.py b/tests/nnapi/specs/V1_2/reduce_max_4D_float_reducing_HW_nnfw.mod.py
index 057d512ea..057d512ea 100644..100755
--- a/tests/nnapi/specs/V1_2/reduce_max_4D_float_reducing_HW_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/reduce_max_4D_float_reducing_HW_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/reduce_max_float_1_nnfw.mod.py b/tests/nnapi/specs/V1_2/reduce_max_float_1_nnfw.mod.py
index 061f436ac..061f436ac 100644..100755
--- a/tests/nnapi/specs/V1_2/reduce_max_float_1_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/reduce_max_float_1_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/reduce_max_float_2_nnfw.mod.py b/tests/nnapi/specs/V1_2/reduce_max_float_2_nnfw.mod.py
index ab99c6244..ab99c6244 100644..100755
--- a/tests/nnapi/specs/V1_2/reduce_max_float_2_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/reduce_max_float_2_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/reduce_max_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/reduce_max_float_nnfw.mod.py
index eba25a534..eba25a534 100644..100755
--- a/tests/nnapi/specs/V1_2/reduce_max_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/reduce_max_float_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/reduce_max_quant8_1_nnfw.mod.py b/tests/nnapi/specs/V1_2/reduce_max_quant8_1_nnfw.mod.py
index 7c8df05f8..7c8df05f8 100644..100755
--- a/tests/nnapi/specs/V1_2/reduce_max_quant8_1_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/reduce_max_quant8_1_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/reduce_max_quant8_2_nnfw.mod.py b/tests/nnapi/specs/V1_2/reduce_max_quant8_2_nnfw.mod.py
index 7a54866c1..7a54866c1 100644..100755
--- a/tests/nnapi/specs/V1_2/reduce_max_quant8_2_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/reduce_max_quant8_2_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/reduce_min.mod.py b/tests/nnapi/specs/V1_2/reduce_min.mod.py
new file mode 100755
index 000000000..57b827911
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/reduce_min.mod.py
@@ -0,0 +1,70 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+def test(input0, output0, axes, keep_dims, input_data, output_data):
+ model = Model().Operation("REDUCE_MIN", input0, axes, keep_dims).To(output0)
+ quant8 = DataTypeConverter().Identify({
+ input0: ["TENSOR_QUANT8_ASYMM", 0.5, 127],
+ output0: ["TENSOR_QUANT8_ASYMM", 0.5, 127],
+ })
+ Example({
+ input0: input_data,
+ output0: output_data,
+ }, model=model).AddVariations("relaxed", "float16", quant8)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{3, 2}"),
+ input_data=[-1, -2,
+ 3, 4,
+ 5, -6],
+ axes=[-1],
+ keep_dims=False,
+ output0=Output("output0", "TENSOR_FLOAT32", "{3}"),
+ output_data=[-2, 3, -6],
+)
+
+# Tests below were adapted from tensorflow/lite/kernels/reduce_test.cc
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{1}"),
+ input_data=[9.527],
+ axes=[0],
+ keep_dims=True,
+ output0=Output("output0", "TENSOR_FLOAT32", "{1}"),
+ output_data=[9.527],
+)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{4, 3, 2}"),
+ input_data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
+ 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6,
+ 1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4],
+ axes=[1, 0, -3, -3],
+ keep_dims=False,
+ output0=Output("output0", "TENSOR_FLOAT32", "{2}"),
+ output_data=[0.1, 0.2],
+)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{4, 3, 2}"),
+ input_data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
+ 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6,
+ 1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4],
+ axes=[0, 2],
+ keep_dims=True,
+ output0=Output("output0", "TENSOR_FLOAT32", "{1, 3, 1}"),
+ output_data=[0.1, 0.3, 0.5],
+)
diff --git a/tests/nnapi/specs/V1_2/reduce_min_float_1_nnfw.mod.py b/tests/nnapi/specs/V1_2/reduce_min_float_1_nnfw.mod.py
new file mode 100755
index 000000000..853cbc029
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/reduce_min_float_1_nnfw.mod.py
@@ -0,0 +1,18 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{4, 3, 2}")
+axis = Parameter("axis", "TENSOR_INT32", "{4}", [1, 0, -3, -3])
+keepDims = False
+output = Output("output", "TENSOR_FLOAT32", "{2}")
+
+model = model.Operation("REDUCE_MIN", i1, axis, keepDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [23.0, 24.0, 13.0, 22.0, 5.0, 18.0, 7.0, 8.0, 9.0, 15.0, 11.0, 12.0,
+ 3.0, 14.0, 10.0, 16.0, 17.0, 6.0, 19.0, 20.0, 21.0, 4.0, 1.0, 2.0]}
+
+output0 = {output: # output 0
+ [1.0, 2.0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/reduce_min_float_2_nnfw.mod.py b/tests/nnapi/specs/V1_2/reduce_min_float_2_nnfw.mod.py
new file mode 100755
index 000000000..4fccaa7b8
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/reduce_min_float_2_nnfw.mod.py
@@ -0,0 +1,18 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{4, 3, 2}")
+axis = Parameter("axis", "TENSOR_INT32", "{2}", [0, 2])
+keepDims = True
+output = Output("output", "TENSOR_FLOAT32", "{1, 3, 1}")
+
+model = model.Operation("REDUCE_MIN", i1, axis, keepDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [20.0, 2.0, 22.0, 4.0, 24.0, 18.0, 7.0, 8.0, 19.0, 10.0, 14.0, 12.0,
+ 13.0, 11.0, 15.0, 16.0, 17.0, 6.0, 9.0, 1.0, 21.0, 3.0, 23.0, 5.0]}
+
+output0 = {output: # output 0
+ [1.0, 3.0, 5.0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/reduce_min_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/reduce_min_float_nnfw.mod.py
new file mode 100755
index 000000000..81ddc5ba1
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/reduce_min_float_nnfw.mod.py
@@ -0,0 +1,19 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+axis = Parameter("axis", "TENSOR_INT32", "{1}", [2])
+keepDims = False
+output = Output("output", "TENSOR_FLOAT32", "{1, 2, 1}")
+
+model = model.Operation("REDUCE_MIN", i1, axis, keepDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [2.0, 1.0,
+ 3.0, 4.0]}
+
+output0 = {output: # output 0
+ [1.0,
+ 3.0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/reduce_sum.mod.py b/tests/nnapi/specs/V1_2/reduce_sum.mod.py
new file mode 100755
index 000000000..c59579e0f
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/reduce_sum.mod.py
@@ -0,0 +1,66 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+def test(input0, output0, axes, keep_dims, input_data, output_data):
+ model = Model().Operation("REDUCE_SUM", input0, axes, keep_dims).To(output0)
+ Example({
+ input0: input_data,
+ output0: output_data,
+ }, model=model).AddVariations("relaxed", "float16")
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{3, 2}"),
+ input_data=[-1, -2,
+ 3, 4,
+ 5, -6],
+ axes=[-1],
+ keep_dims=False,
+ output0=Output("output0", "TENSOR_FLOAT32", "{3}"),
+ output_data=[-1 - 2, 3 + 4, 5 - 6],
+)
+
+# Tests below were adapted from tensorflow/lite/kernels/reduce_test.cc
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{1}"),
+ input_data=[9.527],
+ axes=[0],
+ keep_dims=True,
+ output0=Output("output0", "TENSOR_FLOAT32", "{1}"),
+ output_data=[9.527],
+)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{4, 3, 2}"),
+ input_data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
+ 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6,
+ 1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4],
+ axes=[1, 0, -3, -3],
+ keep_dims=False,
+ output0=Output("output0", "TENSOR_FLOAT32", "{2}"),
+ output_data=[14.4, 15.6],
+)
+
+test(
+ input0=Input("input0", "TENSOR_FLOAT32", "{4, 3, 2}"),
+ input_data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
+ 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6,
+ 1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4],
+ axes=[0, 2],
+ keep_dims=True,
+ output0=Output("output0", "TENSOR_FLOAT32", "{1, 3, 1}"),
+ output_data=[8.4, 10.0, 11.6],
+)
diff --git a/tests/nnapi/specs/V1_2/reduce_sum_2D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/reduce_sum_2D_float_nnfw.mod.py
new file mode 100755
index 000000000..551ef4aa5
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/reduce_sum_2D_float_nnfw.mod.py
@@ -0,0 +1,19 @@
+# model
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{3, 4}")
+axis = Int32Scalar("axis", 1)
+keepDims = False
+out1 = Output("output", "TENSOR_FLOAT32", "{3}")
+model = model.Operation("REDUCE_SUM", i1, axis, keepDims).To(out1)
+
+# Example 1. Input in operand 0, 1
+input0 = {i1: # input 0
+ [3.2, 11.47, 3.8, 5.76,
+ 28.2, 0.999, -1.3, -13.5,
+ -3.4, -22.1, -2.2, -49.7]}
+
+output0 = {out1: # output 0
+ [24.23, 14.399002, -77.4]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/reduce_sum_4D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/reduce_sum_4D_float_nnfw.mod.py
new file mode 100755
index 000000000..c8365cbc3
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/reduce_sum_4D_float_nnfw.mod.py
@@ -0,0 +1,19 @@
+# model
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 3, 4, 1}")
+axis = Int32Scalar("axis", 1)
+keepDims = False
+out1 = Output("output", "TENSOR_FLOAT32", "{1, 4, 1}")
+model = model.Operation("REDUCE_SUM", i1, axis, keepDims).To(out1)
+
+# Example 1. Input in operand 0, 1
+input0 = {i1: # input 0
+ [6.4, 7.3, 19.3, -2.3,
+ 8.3, 2.0, 11.8, -3.4,
+ 22.8, 3.0, -28.7, 4.9]}
+
+output0 = {out1: # output 0
+ [37.5, 12.3, 2.3999977, -0.7999997]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/reduce_sum_4D_float_reducing_C_nnfw.mod.py b/tests/nnapi/specs/V1_2/reduce_sum_4D_float_reducing_C_nnfw.mod.py
new file mode 100755
index 000000000..2ae69a90c
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/reduce_sum_4D_float_reducing_C_nnfw.mod.py
@@ -0,0 +1,33 @@
+batch = 2
+rows = 3
+cols = 4
+depth = 5
+
+input_table = [x for x in range(batch * rows * cols * depth)]
+
+output_table = [0 for x in range(batch * rows * cols)]
+for i in range(batch):
+ for j in range(rows):
+ for k in range(cols):
+ for l in range(depth):
+ # The value of output_table is the depthwise sum of input_table.
+ output_table[i * rows * cols + j * cols + k] += input_table[i * rows * cols * depth + j * cols * depth + k * depth + l];
+
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{%d, %d, %d, %d}" % (batch, rows, cols, depth))
+# Axis value should be in the range [-(rank), rank). And '-n' is the same axis with 'rank - n'. So '3' and '-1' are the same axis.
+axis = Parameter("axis", "TENSOR_INT32", "{2}", [3, -1])
+keepDims = False
+output = Output("output", "TENSOR_FLOAT32", "{%d, %d, %d}" % (batch, rows, cols))
+
+model = model.Operation("REDUCE_SUM", i1, axis, keepDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ input_table}
+
+output0 = {output: # output 0
+ output_table}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/reduce_sum_4D_float_reducing_HW_nnfw.mod.py b/tests/nnapi/specs/V1_2/reduce_sum_4D_float_reducing_HW_nnfw.mod.py
new file mode 100755
index 000000000..9f53d1061
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/reduce_sum_4D_float_reducing_HW_nnfw.mod.py
@@ -0,0 +1,33 @@
+batch = 2
+rows = 3
+cols = 4
+depth = 5
+
+input_table = [x for x in range(batch * rows * cols * depth)]
+
+output_table = [0 for x in range(batch * depth)]
+for i in range(batch):
+ for j in range(rows):
+ for k in range(cols):
+ for l in range(depth):
+ # The value of output_table is the rowwise sum and colwise sum of input_table.
+ output_table[i * depth + l] += input_table[i * rows * cols * depth + j * cols * depth + k * depth + l];
+
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{%d, %d, %d, %d}" % (batch, rows, cols, depth))
+# Axis value should be in the range [-(rank), rank). And '-n' is the same axis with 'rank - n'. So this test's axis value are the same [1, 2].
+axis = Parameter("axis", "TENSOR_INT32", "{4}", [1, 2, -3, -2])
+keepDims = False
+output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (batch, depth))
+
+model = model.Operation("REDUCE_SUM", i1, axis, keepDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ input_table}
+
+output0 = {output: # output 0
+ output_table}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/rsqrt.mod.py b/tests/nnapi/specs/V1_2/rsqrt.mod.py
index bfce569c6..bfce569c6 100644..100755
--- a/tests/nnapi/specs/V1_2/rsqrt.mod.py
+++ b/tests/nnapi/specs/V1_2/rsqrt.mod.py
diff --git a/tests/nnapi/specs/V1_2/rsqrt_1D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/rsqrt_1D_float_nnfw.mod.py
index 74e23048e..74e23048e 100644..100755
--- a/tests/nnapi/specs/V1_2/rsqrt_1D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/rsqrt_1D_float_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/rsqrt_2D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/rsqrt_2D_float_nnfw.mod.py
index 45bb0ce70..45bb0ce70 100644..100755
--- a/tests/nnapi/specs/V1_2/rsqrt_2D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/rsqrt_2D_float_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/rsqrt_3D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/rsqrt_3D_float_nnfw.mod.py
index 084e02020..084e02020 100644..100755
--- a/tests/nnapi/specs/V1_2/rsqrt_3D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/rsqrt_3D_float_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/rsqrt_4D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/rsqrt_4D_float_nnfw.mod.py
index f479c50b3..f479c50b3 100644..100755
--- a/tests/nnapi/specs/V1_2/rsqrt_4D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/rsqrt_4D_float_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/sin_1D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/sin_1D_float_nnfw.mod.py
new file mode 100755
index 000000000..695ad491a
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/sin_1D_float_nnfw.mod.py
@@ -0,0 +1,13 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{4}") #A vector of inputs
+i2 = Output("op2", "TENSOR_FLOAT32", "{4}") #A vector of outputs
+model = model.Operation("SIN", i1).To(i2)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [2.0, 90.0, 1.0, 0.012]}
+output0 = {i2: # output 0
+ [0.909297427, 0.893996664, 0.841470985, 0.011999712]}
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/sin_4D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/sin_4D_float_nnfw.mod.py
new file mode 100755
index 000000000..87877df4c
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/sin_4D_float_nnfw.mod.py
@@ -0,0 +1,18 @@
+# model
+model = Model()
+
+i1 = Input("op1", "TENSOR_FLOAT32", "{2, 1, 2, 2}")
+i3 = Output("op3", "TENSOR_FLOAT32", "{2, 1, 2, 2}")
+model = model.Operation("SIN", i1).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [12.0, 36.1, 2.0, 90, 1.0, 0.012, 0.001, 5]}
+
+output0 = {i3: # output 0
+ [-0.536572918, -0.999599143, 0.909297427, 0.893996664,
+ 0.841470985, 0.011999712, 0.001, -0.958924275]}
+
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/slice.mod.py b/tests/nnapi/specs/V1_2/slice.mod.py
new file mode 100755
index 000000000..f3683ba10
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/slice.mod.py
@@ -0,0 +1,147 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import collections
+
+TestCase = collections.namedtuple("TestCase", [
+ "inp", "inp_data", "begin", "begin_data", "size", "size_data", "output",
+ "output_data"
+])
+
+test_cases = [
+ TestCase(
+ inp=Input("input", "TENSOR_FLOAT32", "{4}"),
+ inp_data=[1, 2, 3, 4],
+ begin=Input("begin", "TENSOR_INT32", "{1}"),
+ begin_data=[1],
+ size=Input("size", "TENSOR_INT32", "{1}"),
+ size_data=[2],
+ output=Output("output", "TENSOR_FLOAT32", "{2}"),
+ output_data=[2, 3]),
+ TestCase(
+ inp=Input("input", "TENSOR_FLOAT32", "{2,3}"),
+ inp_data=[1, 2, 3, 4, 5, 6],
+ begin=Input("begin", "TENSOR_INT32", "{2}"),
+ begin_data=[1, 0],
+ size=Input("size", "TENSOR_INT32", "{2}"),
+ size_data=[1, 2],
+ output=Output("output", "TENSOR_FLOAT32", "{1, 2}"),
+ output_data=[4, 5]),
+ TestCase(
+ inp=Input("input", "TENSOR_FLOAT32", "{2,3,2}"),
+ inp_data=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
+ begin=Input("begin", "TENSOR_INT32", "{3}"),
+ begin_data=[0, 0, 0],
+ size=Input("size", "TENSOR_INT32", "{3}"),
+ size_data=[2, 3, 2],
+ output=Output("output", "TENSOR_FLOAT32", "{2, 3, 2}"),
+ output_data=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]),
+ TestCase(
+ inp=Input("input", "TENSOR_FLOAT32", "{4, 1, 1, 1}"),
+ inp_data=[1, 2, 3, 4],
+ begin=Input("begin", "TENSOR_INT32", "{4}"),
+ begin_data=[1, 0, 0, 0],
+ size=Input("size", "TENSOR_INT32", "{4}"),
+ size_data=[3, 1, 1, 1],
+ output=Output("output", "TENSOR_FLOAT32", "{3, 1, 1, 1}"),
+ output_data=[2, 3, 4]),
+ TestCase(
+ inp=Input("input", "TENSOR_INT32", "{3, 2, 3, 1}"),
+ inp_data=[1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],
+ begin=Input("begin", "TENSOR_INT32", "{4}"),
+ begin_data=[1, 0, 0, 0],
+ size=Input("size", "TENSOR_INT32", "{4}"),
+ size_data=[1, 1, 3, 1],
+ output=Output("output", "TENSOR_INT32", "{1, 1, 3, 1}"),
+ output_data=[3, 3, 3]),
+ TestCase(
+ inp=Input("input", "TENSOR_INT32", "{3, 2, 3, 1}"),
+ inp_data=[1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],
+ begin=Input("begin", "TENSOR_INT32", "{4}"),
+ begin_data=[1, 0, 0, 0],
+ size=Input("size", "TENSOR_INT32", "{4}"),
+ size_data=[2, 1, 3, 1],
+ output=Output("output", "TENSOR_INT32", "{2, 1, 3, 1}"),
+ output_data=[3, 3, 3, 5, 5, 5]),
+ TestCase(
+ inp=Input("input", "TENSOR_QUANT8_ASYMM", "{3, 2, 3, 1}, 2.0, 128"),
+ inp_data=[1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],
+ begin=Input("begin", "TENSOR_INT32", "{4}"),
+ begin_data=[1, 0, 0, 0],
+ size=Input("size", "TENSOR_INT32", "{4}"),
+ size_data=[2, 1, 3, 1],
+ output=Output("output", "TENSOR_QUANT8_ASYMM", "{2, 1, 3, 1}, 2.0, 128"),
+ output_data=[3, 3, 3, 5, 5, 5]),
+ TestCase(
+ inp=Input("input", "TENSOR_INT32", "{3, 2, 3, 1}"),
+ inp_data=[1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],
+ begin=Input("begin", "TENSOR_INT32", "{4}"),
+ begin_data=[1, 0, 0, 0],
+ size=Input("size", "TENSOR_INT32", "{4}"),
+ size_data=[2, 1, -1, 1],
+ output=Output("output", "TENSOR_INT32", "{2, 1, 3, 1}"),
+ output_data=[3, 3, 3, 5, 5, 5]),
+]
+
+for test_case in test_cases:
+ model = Model().Operation("SLICE", test_case.inp, test_case.begin,
+ test_case.size).To(test_case.output)
+ Example({
+ test_case.inp: test_case.inp_data,
+ test_case.begin: test_case.begin_data,
+ test_case.size: test_case.size_data,
+ test_case.output: test_case.output_data,
+ },
+ model=model).AddVariations("relaxed", "float16")
+
+
+# zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# SLICE op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 1, 1, 1}") # out
+model = model.Operation("SLICE", zero_sized, [0, 1, 1, 0], [-1, 1, -1, 1]).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddVariations("relaxed", quant8, "float16")
diff --git a/tests/nnapi/specs/V1_2/split_1D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/split_1D_float_nnfw.mod.py
new file mode 100755
index 000000000..96d7b7987
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_1D_float_nnfw.mod.py
@@ -0,0 +1,40 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{8}")
+axis = Int32Scalar("axis", 0)
+num_out = Int32Scalar("num_out", 8)
+i2 = Output("op2", "TENSOR_FLOAT32", "{1}")
+i3 = Output("op3", "TENSOR_FLOAT32", "{1}")
+i4 = Output("op4", "TENSOR_FLOAT32", "{1}")
+i5 = Output("op5", "TENSOR_FLOAT32", "{1}")
+i6 = Output("op6", "TENSOR_FLOAT32", "{1}")
+i7 = Output("op7", "TENSOR_FLOAT32", "{1}")
+i8 = Output("op8", "TENSOR_FLOAT32", "{1}")
+i9 = Output("op9", "TENSOR_FLOAT32", "{1}")
+
+model = model.Operation("SPLIT", i1, axis, num_out).To([i2, i3, i4, i5, i6, i7, i8, i9])
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]}
+
+output0 = {
+ i2: # output 0
+ [1.0],
+ i3: # output 1
+ [2.0],
+ i4: # output 2
+ [3.0],
+ i5: # output 3
+ [4.0],
+ i6: # output 4
+ [5.0],
+ i7: # output 5
+ [6.0],
+ i8: # output 6
+ [7.0],
+ i9: # output 7
+ [8.0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/split_1D_int32_nnfw.mod.py b/tests/nnapi/specs/V1_2/split_1D_int32_nnfw.mod.py
new file mode 100755
index 000000000..c2da36fbb
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_1D_int32_nnfw.mod.py
@@ -0,0 +1,40 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_INT32", "{8}")
+axis = Int32Scalar("axis", 0)
+num_out = Int32Scalar("num_out", 8)
+i2 = Output("op2", "TENSOR_INT32", "{1}")
+i3 = Output("op3", "TENSOR_INT32", "{1}")
+i4 = Output("op4", "TENSOR_INT32", "{1}")
+i5 = Output("op5", "TENSOR_INT32", "{1}")
+i6 = Output("op6", "TENSOR_INT32", "{1}")
+i7 = Output("op7", "TENSOR_INT32", "{1}")
+i8 = Output("op8", "TENSOR_INT32", "{1}")
+i9 = Output("op9", "TENSOR_INT32", "{1}")
+
+model = model.Operation("SPLIT", i1, axis, num_out).To([i2, i3, i4, i5, i6, i7, i8, i9])
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8]}
+
+output0 = {
+ i2: # output 0
+ [1],
+ i3: # output 1
+ [2],
+ i4: # output 2
+ [3],
+ i5: # output 3
+ [4],
+ i6: # output 4
+ [5],
+ i7: # output 5
+ [6],
+ i8: # output 6
+ [7],
+ i9: # output 7
+ [8]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/split_4D_float_1_nnfw.mod.py b/tests/nnapi/specs/V1_2/split_4D_float_1_nnfw.mod.py
new file mode 100755
index 000000000..909af1920
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_4D_float_1_nnfw.mod.py
@@ -0,0 +1,21 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{2,2,2,2}")
+axis = Int32Scalar("axis", 0)
+num_out = Int32Scalar("num_out", 2)
+i2 = Output("op2", "TENSOR_FLOAT32", "{1,2,2,2}")
+i3 = Output("op3", "TENSOR_FLOAT32", "{1,2,2,2}")
+model = model.Operation("SPLIT", i1, axis, num_out).To([i2, i3])
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0]}
+
+output0 = {
+ i2: # output 0
+ [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0],
+ i3: # output 1
+ [9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/split_4D_float_2_nnfw.mod.py b/tests/nnapi/specs/V1_2/split_4D_float_2_nnfw.mod.py
new file mode 100755
index 000000000..d1ccda689
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_4D_float_2_nnfw.mod.py
@@ -0,0 +1,21 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{2,2,2,2}")
+axis = Int32Scalar("axis", 3)
+num_out = Int32Scalar("num_out", 2)
+i2 = Output("op2", "TENSOR_FLOAT32", "{2,2,2,1}")
+i3 = Output("op3", "TENSOR_FLOAT32", "{2,2,2,1}")
+model = model.Operation("SPLIT", i1, axis, num_out).To([i2, i3])
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0]}
+
+output0 = {
+ i2: # output 0
+ [1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0],
+ i3: # output 1
+ [2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/split_4D_float_3_nnfw.mod.py b/tests/nnapi/specs/V1_2/split_4D_float_3_nnfw.mod.py
new file mode 100755
index 000000000..2c218f329
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_4D_float_3_nnfw.mod.py
@@ -0,0 +1,21 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{2,2,2,2}")
+axis = Int32Scalar("axis", -4) # Negative axis
+num_out = Int32Scalar("num_out", 2)
+i2 = Output("op2", "TENSOR_FLOAT32", "{1,2,2,2}")
+i3 = Output("op3", "TENSOR_FLOAT32", "{1,2,2,2}")
+model = model.Operation("SPLIT", i1, axis, num_out).To([i2, i3])
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0]}
+
+output0 = {
+ i2: # output 0
+ [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0],
+ i3: # output 1
+ [9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/split_4D_int32_1_nnfw.mod.py b/tests/nnapi/specs/V1_2/split_4D_int32_1_nnfw.mod.py
new file mode 100755
index 000000000..c5d95fcd8
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_4D_int32_1_nnfw.mod.py
@@ -0,0 +1,21 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_INT32", "{2,2,2,2}")
+axis = Int32Scalar("axis", 0)
+num_out = Int32Scalar("num_out", 2)
+i2 = Output("op2", "TENSOR_INT32", "{1,2,2,2}")
+i3 = Output("op3", "TENSOR_INT32", "{1,2,2,2}")
+model = model.Operation("SPLIT", i1, axis, num_out).To([i2, i3])
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]}
+
+output0 = {
+ i2: # output 0
+ [1, 2, 3, 4, 5, 6, 7, 8],
+ i3: # output 1
+ [9, 10, 11, 12, 13, 14, 15, 16]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/split_4D_int32_2_nnfw.mod.py b/tests/nnapi/specs/V1_2/split_4D_int32_2_nnfw.mod.py
new file mode 100755
index 000000000..51a43d8f9
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_4D_int32_2_nnfw.mod.py
@@ -0,0 +1,21 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_INT32", "{2,2,2,2}")
+axis = Int32Scalar("axis", 1)
+num_out = Int32Scalar("num_out", 2)
+i2 = Output("op2", "TENSOR_INT32", "{2,1,2,2}")
+i3 = Output("op3", "TENSOR_INT32", "{2,1,2,2}")
+model = model.Operation("SPLIT", i1, axis, num_out).To([i2, i3])
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]}
+
+output0 = {
+ i2: # output 0
+ [1, 2, 3, 4, 9, 10, 11, 12],
+ i3: # output 1
+ [5, 6, 7, 8, 13, 14, 15, 16]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/split_4D_int32_3_nnfw.mod.py b/tests/nnapi/specs/V1_2/split_4D_int32_3_nnfw.mod.py
new file mode 100755
index 000000000..a9709e315
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_4D_int32_3_nnfw.mod.py
@@ -0,0 +1,21 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_INT32", "{2,2,2,2}")
+axis = Int32Scalar("axis", 2)
+num_out = Int32Scalar("num_out", 2)
+i2 = Output("op2", "TENSOR_INT32", "{2,2,1,2}")
+i3 = Output("op3", "TENSOR_INT32", "{2,2,1,2}")
+model = model.Operation("SPLIT", i1, axis, num_out).To([i2, i3])
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]}
+
+output0 = {
+ i2: # output 0
+ [1, 2, 5, 6, 9, 10, 13, 14],
+ i3: # output 1
+ [3, 4, 7, 8, 11, 12, 15, 16]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/split_4D_int32_4_nnfw.mod.py b/tests/nnapi/specs/V1_2/split_4D_int32_4_nnfw.mod.py
new file mode 100755
index 000000000..98d70f9a6
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_4D_int32_4_nnfw.mod.py
@@ -0,0 +1,21 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_INT32", "{2,2,2,2}")
+axis = Int32Scalar("axis", 3)
+num_out = Int32Scalar("num_out", 2)
+i2 = Output("op2", "TENSOR_INT32", "{2,2,2,1}")
+i3 = Output("op3", "TENSOR_INT32", "{2,2,2,1}")
+model = model.Operation("SPLIT", i1, axis, num_out).To([i2, i3])
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]}
+
+output0 = {
+ i2: # output 0
+ [1, 3, 5, 7, 9, 11, 13, 15],
+ i3: # output 1
+ [2, 4, 6, 8, 10, 12, 14, 16]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/split_4D_int32_5_nnfw.mod.py b/tests/nnapi/specs/V1_2/split_4D_int32_5_nnfw.mod.py
new file mode 100755
index 000000000..4dd1e441c
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_4D_int32_5_nnfw.mod.py
@@ -0,0 +1,21 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_INT32", "{2,2,2,2}")
+axis = Int32Scalar("axis", -4) # Negative axis
+num_out = Int32Scalar("num_out", 2)
+i2 = Output("op2", "TENSOR_INT32", "{1,2,2,2}")
+i3 = Output("op3", "TENSOR_INT32", "{1,2,2,2}")
+model = model.Operation("SPLIT", i1, axis, num_out).To([i2, i3])
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]}
+
+output0 = {
+ i2: # output 0
+ [1, 2, 3, 4, 5, 6, 7, 8],
+ i3: # output 1
+ [9, 10, 11, 12, 13, 14, 15, 16]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/split_4D_quant8_nnfw.mod.py b/tests/nnapi/specs/V1_2/split_4D_quant8_nnfw.mod.py
new file mode 100755
index 000000000..062cd3722
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_4D_quant8_nnfw.mod.py
@@ -0,0 +1,21 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{2,2,2,2}, 0.5f, 1")
+axis = Int32Scalar("axis", 0)
+num_out = Int32Scalar("num_out", 2)
+i2 = Output("op2", "TENSOR_QUANT8_ASYMM", "{1,2,2,2}, 0.5f, 1")
+i3 = Output("op3", "TENSOR_QUANT8_ASYMM", "{1,2,2,2}, 0.5f, 1")
+model = model.Operation("SPLIT", i1, axis, num_out).To([i2, i3])
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]}
+
+output0 = {
+ i2: # output 0
+ [1, 2, 3, 4, 5, 6, 7, 8],
+ i3: # output 1
+ [9, 10, 11, 12, 13, 14, 15, 16]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/tests/nnapi/specs/V1_2/split_float_1.mod.py b/tests/nnapi/specs/V1_2/split_float_1.mod.py
new file mode 100755
index 000000000..d1bdc4561
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_float_1.mod.py
@@ -0,0 +1,38 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# model
+input0 = Input("input0", "TENSOR_FLOAT32", "{6}")
+axis = Int32Scalar("axis", 0)
+num_splits = Int32Scalar("num_splits", 3)
+output0 = Output("output0", "TENSOR_FLOAT32", "{2}")
+output1 = Output("output1", "TENSOR_FLOAT32", "{2}")
+output2 = Output("output2", "TENSOR_FLOAT32", "{2}")
+
+model = Model().Operation("SPLIT", input0, axis, num_splits).To((output0, output1, output2))
+
+# Example 1.
+input_dict = {
+ input0: [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
+}
+output_dict = {
+ output0: [1.0, 2.0],
+ output1: [3.0, 4.0],
+ output2: [5.0, 6.0],
+}
+
+# Instantiate an example
+Example((input_dict, output_dict)).AddVariations("relaxed", "float16")
diff --git a/tests/nnapi/specs/V1_2/split_float_2.mod.py b/tests/nnapi/specs/V1_2/split_float_2.mod.py
new file mode 100755
index 000000000..a1610327e
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_float_2.mod.py
@@ -0,0 +1,37 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# model
+input0 = Input("input0", "TENSOR_FLOAT32", "{2, 3}")
+axis = Int32Scalar("axis", 0)
+num_splits = Int32Scalar("num_splits", 2)
+output0 = Output("output0", "TENSOR_FLOAT32", "{1, 3}")
+output1 = Output("output1", "TENSOR_FLOAT32", "{1, 3}")
+
+model = Model().Operation("SPLIT", input0, axis, num_splits).To((output0, output1))
+
+# Example 1.
+input_dict = {
+ input0: [1.0, 2.0, 3.0,
+ 4.0, 5.0, 6.0]
+}
+output_dict = {
+ output0: [1.0, 2.0, 3.0],
+ output1: [4.0, 5.0, 6.0],
+}
+
+# Instantiate an example
+Example((input_dict, output_dict)).AddVariations("relaxed", "float16")
diff --git a/tests/nnapi/specs/V1_2/split_float_3.mod.py b/tests/nnapi/specs/V1_2/split_float_3.mod.py
new file mode 100755
index 000000000..56b87e53c
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_float_3.mod.py
@@ -0,0 +1,39 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# model
+input0 = Input("input0", "TENSOR_FLOAT32", "{2, 3}")
+axis = Int32Scalar("axis", 1)
+num_splits = Int32Scalar("num_splits", 3)
+output0 = Output("output0", "TENSOR_FLOAT32", "{2, 1}")
+output1 = Output("output1", "TENSOR_FLOAT32", "{2, 1}")
+output2 = Output("output2", "TENSOR_FLOAT32", "{2, 1}")
+
+model = Model().Operation("SPLIT", input0, axis, num_splits).To((output0, output1, output2))
+
+# Example 1.
+input_dict = {
+ input0: [1.0, 2.0, 3.0,
+ 4.0, 5.0, 6.0]
+}
+output_dict = {
+ output0: [1.0, 4.0],
+ output1: [2.0, 5.0],
+ output2: [3.0, 6.0],
+}
+
+# Instantiate an example
+Example((input_dict, output_dict)).AddVariations("relaxed", "float16")
diff --git a/tests/nnapi/specs/V1_2/split_float_4.mod.py b/tests/nnapi/specs/V1_2/split_float_4.mod.py
new file mode 100755
index 000000000..a9bf5a70a
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_float_4.mod.py
@@ -0,0 +1,36 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# model
+input0 = Input("input0", "TENSOR_FLOAT32", "{2, 2, 2}")
+axis = Int32Scalar("axis", 1)
+num_splits = Int32Scalar("num_splits", 2)
+output0 = Output("output0", "TENSOR_FLOAT32", "{2, 1, 2}")
+output1 = Output("output1", "TENSOR_FLOAT32", "{2, 1, 2}")
+
+model = Model().Operation("SPLIT", input0, axis, num_splits).To((output0, output1))
+
+# Example 1.
+input_dict = {
+ input0: [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]
+}
+output_dict = {
+ output0: [1.0, 2.0, 5.0, 6.0],
+ output1: [3.0, 4.0, 7.0, 8.0],
+}
+
+# Instantiate an example
+Example((input_dict, output_dict)).AddVariations("relaxed", "float16")
diff --git a/tests/nnapi/specs/V1_2/split_float_5.mod.py b/tests/nnapi/specs/V1_2/split_float_5.mod.py
new file mode 100755
index 000000000..ad6621aeb
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_float_5.mod.py
@@ -0,0 +1,36 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# model
+input0 = Input("input0", "TENSOR_FLOAT32", "{2, 2, 2}")
+axis = Int32Scalar("axis", -2)
+num_splits = Int32Scalar("num_splits", 2)
+output0 = Output("output0", "TENSOR_FLOAT32", "{2, 1, 2}")
+output1 = Output("output1", "TENSOR_FLOAT32", "{2, 1, 2}")
+
+model = Model().Operation("SPLIT", input0, axis, num_splits).To((output0, output1))
+
+# Example 1.
+input_dict = {
+ input0: [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]
+}
+output_dict = {
+ output0: [1.0, 2.0, 5.0, 6.0],
+ output1: [3.0, 4.0, 7.0, 8.0],
+}
+
+# Instantiate an example
+Example((input_dict, output_dict)).AddVariations("relaxed", "float16")
diff --git a/tests/nnapi/specs/V1_2/split_int32_1.mod.py b/tests/nnapi/specs/V1_2/split_int32_1.mod.py
new file mode 100755
index 000000000..313505a9a
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_int32_1.mod.py
@@ -0,0 +1,38 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# model
+input0 = Input("input0", "TENSOR_INT32", "{6}")
+axis = Int32Scalar("axis", 0)
+num_splits = Int32Scalar("num_splits", 3)
+output0 = Output("output0", "TENSOR_INT32", "{2}")
+output1 = Output("output1", "TENSOR_INT32", "{2}")
+output2 = Output("output2", "TENSOR_INT32", "{2}")
+
+model = Model().Operation("SPLIT", input0, axis, num_splits).To((output0, output1, output2))
+
+# Example 1.
+input_dict = {
+ input0: [1, 2, 3, 4, 5, 6]
+}
+output_dict = {
+ output0: [1, 2],
+ output1: [3, 4],
+ output2: [5, 6],
+}
+
+# Instantiate an example
+Example((input_dict, output_dict)).AddRelaxed()
diff --git a/tests/nnapi/specs/V1_2/split_int32_2.mod.py b/tests/nnapi/specs/V1_2/split_int32_2.mod.py
new file mode 100755
index 000000000..4ad52d361
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_int32_2.mod.py
@@ -0,0 +1,37 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# model
+input0 = Input("input0", "TENSOR_INT32", "{2, 3}")
+axis = Int32Scalar("axis", 0)
+num_splits = Int32Scalar("num_splits", 2)
+output0 = Output("output0", "TENSOR_INT32", "{1, 3}")
+output1 = Output("output1", "TENSOR_INT32", "{1, 3}")
+
+model = Model().Operation("SPLIT", input0, axis, num_splits).To((output0, output1))
+
+# Example 1.
+input_dict = {
+ input0: [1, 2, 3,
+ 4, 5, 6]
+}
+output_dict = {
+ output0: [1, 2, 3],
+ output1: [4, 5, 6],
+}
+
+# Instantiate an example
+Example((input_dict, output_dict)).AddRelaxed()
diff --git a/tests/nnapi/specs/V1_2/split_int32_3.mod.py b/tests/nnapi/specs/V1_2/split_int32_3.mod.py
new file mode 100755
index 000000000..0e8acb63a
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_int32_3.mod.py
@@ -0,0 +1,39 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# model
+input0 = Input("input0", "TENSOR_INT32", "{2, 3}")
+axis = Int32Scalar("axis", 1)
+num_splits = Int32Scalar("num_splits", 3)
+output0 = Output("output0", "TENSOR_INT32", "{2, 1}")
+output1 = Output("output1", "TENSOR_INT32", "{2, 1}")
+output2 = Output("output2", "TENSOR_INT32", "{2, 1}")
+
+model = Model().Operation("SPLIT", input0, axis, num_splits).To((output0, output1, output2))
+
+# Example 1.
+input_dict = {
+ input0: [1, 2, 3,
+ 4, 5, 6]
+}
+output_dict = {
+ output0: [1, 4],
+ output1: [2, 5],
+ output2: [3, 6],
+}
+
+# Instantiate an example
+Example((input_dict, output_dict)).AddRelaxed()
diff --git a/tests/nnapi/specs/V1_2/split_int32_4.mod.py b/tests/nnapi/specs/V1_2/split_int32_4.mod.py
new file mode 100755
index 000000000..e84abcd84
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_int32_4.mod.py
@@ -0,0 +1,36 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# model
+input0 = Input("input0", "TENSOR_INT32", "{2, 2, 2}")
+axis = Int32Scalar("axis", 1)
+num_splits = Int32Scalar("num_splits", 2)
+output0 = Output("output0", "TENSOR_INT32", "{2, 1, 2}")
+output1 = Output("output1", "TENSOR_INT32", "{2, 1, 2}")
+
+model = Model().Operation("SPLIT", input0, axis, num_splits).To((output0, output1))
+
+# Example 1.
+input_dict = {
+ input0: [1, 2, 3, 4, 5, 6, 7, 8]
+}
+output_dict = {
+ output0: [1, 2, 5, 6],
+ output1: [3, 4, 7, 8],
+}
+
+# Instantiate an example
+Example((input_dict, output_dict)).AddRelaxed()
diff --git a/tests/nnapi/specs/V1_2/split_quant8_1.mod.py b/tests/nnapi/specs/V1_2/split_quant8_1.mod.py
new file mode 100755
index 000000000..0c4723775
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_quant8_1.mod.py
@@ -0,0 +1,38 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# model
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM", "{6}, 1.0, 0")
+axis = Int32Scalar("axis", 0)
+num_splits = Int32Scalar("num_splits", 3)
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM", "{2}, 1.0, 0")
+output1 = Output("output1", "TENSOR_QUANT8_ASYMM", "{2}, 1.0, 0")
+output2 = Output("output2", "TENSOR_QUANT8_ASYMM", "{2}, 1.0, 0")
+
+model = Model().Operation("SPLIT", input0, axis, num_splits).To((output0, output1, output2))
+
+# Example 1.
+input_dict = {
+ input0: [1, 2, 3, 4, 5, 6]
+}
+output_dict = {
+ output0: [1, 2],
+ output1: [3, 4],
+ output2: [5, 6],
+}
+
+# Instantiate an example
+Example((input_dict, output_dict)).AddRelaxed()
diff --git a/tests/nnapi/specs/V1_2/split_quant8_2.mod.py b/tests/nnapi/specs/V1_2/split_quant8_2.mod.py
new file mode 100755
index 000000000..4c24dace9
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_quant8_2.mod.py
@@ -0,0 +1,37 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# model
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM", "{2, 3}, 2.0, 3")
+axis = Int32Scalar("axis", 0)
+num_splits = Int32Scalar("num_splits", 2)
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM", "{1, 3}, 2.0, 3")
+output1 = Output("output1", "TENSOR_QUANT8_ASYMM", "{1, 3}, 2.0, 3")
+
+model = Model().Operation("SPLIT", input0, axis, num_splits).To((output0, output1))
+
+# Example 1.
+input_dict = {
+ input0: [1, 2, 3,
+ 4, 5, 6]
+}
+output_dict = {
+ output0: [1, 2, 3],
+ output1: [4, 5, 6],
+}
+
+# Instantiate an example
+Example((input_dict, output_dict)).AddRelaxed()
diff --git a/tests/nnapi/specs/V1_2/split_quant8_3.mod.py b/tests/nnapi/specs/V1_2/split_quant8_3.mod.py
new file mode 100755
index 000000000..813f1a332
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_quant8_3.mod.py
@@ -0,0 +1,39 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# model
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM", "{2, 3}, 2.0, 3")
+axis = Int32Scalar("axis", 1)
+num_splits = Int32Scalar("num_splits", 3)
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM", "{2, 1}, 2.0, 3")
+output1 = Output("output1", "TENSOR_QUANT8_ASYMM", "{2, 1}, 2.0, 3")
+output2 = Output("output2", "TENSOR_QUANT8_ASYMM", "{2, 1}, 2.0, 3")
+
+model = Model().Operation("SPLIT", input0, axis, num_splits).To((output0, output1, output2))
+
+# Example 1.
+input_dict = {
+ input0: [1, 2, 3,
+ 4, 5, 6]
+}
+output_dict = {
+ output0: [1, 4],
+ output1: [2, 5],
+ output2: [3, 6],
+}
+
+# Instantiate an example
+Example((input_dict, output_dict))
diff --git a/tests/nnapi/specs/V1_2/split_quant8_4.mod.py b/tests/nnapi/specs/V1_2/split_quant8_4.mod.py
new file mode 100755
index 000000000..51e5d5de2
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/split_quant8_4.mod.py
@@ -0,0 +1,36 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# model
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM", "{2, 2, 2}, 1.0, 0")
+axis = Int32Scalar("axis", 1)
+num_splits = Int32Scalar("num_splits", 2)
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM", "{2, 1, 2}, 1.0, 0")
+output1 = Output("output1", "TENSOR_QUANT8_ASYMM", "{2, 1, 2}, 1.0, 0")
+
+model = Model().Operation("SPLIT", input0, axis, num_splits).To((output0, output1))
+
+# Example 1.
+input_dict = {
+ input0: [1, 2, 3, 4, 5, 6, 7, 8]
+}
+output_dict = {
+ output0: [1, 2, 5, 6],
+ output1: [3, 4, 7, 8],
+}
+
+# Instantiate an example
+Example((input_dict, output_dict))
diff --git a/tests/nnapi/specs/V1_2/sqrt_.mod.py b/tests/nnapi/specs/V1_2/sqrt_.mod.py
index e934062b9..e934062b9 100644..100755
--- a/tests/nnapi/specs/V1_2/sqrt_.mod.py
+++ b/tests/nnapi/specs/V1_2/sqrt_.mod.py
diff --git a/tests/nnapi/specs/V1_2/sqrt_1D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/sqrt_1D_float_nnfw.mod.py
index 3e5a660ff..3e5a660ff 100644..100755
--- a/tests/nnapi/specs/V1_2/sqrt_1D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/sqrt_1D_float_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/sqrt_2D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/sqrt_2D_float_nnfw.mod.py
index a61732dd3..a61732dd3 100644..100755
--- a/tests/nnapi/specs/V1_2/sqrt_2D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/sqrt_2D_float_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/sqrt_3D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/sqrt_3D_float_nnfw.mod.py
index 7fe3dc2a5..7fe3dc2a5 100644..100755
--- a/tests/nnapi/specs/V1_2/sqrt_3D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/sqrt_3D_float_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/sqrt_4D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/sqrt_4D_float_nnfw.mod.py
index f75a80720..f75a80720 100644..100755
--- a/tests/nnapi/specs/V1_2/sqrt_4D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/sqrt_4D_float_nnfw.mod.py
diff --git a/tests/nnapi/specs/V1_2/sub_v1_2.mod.py b/tests/nnapi/specs/V1_2/sub_v1_2.mod.py
new file mode 100755
index 000000000..86299762d
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/sub_v1_2.mod.py
@@ -0,0 +1,99 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import random
+
+random.seed(0)
+
+# FLOAT32 and FLOAT16
+input0 = Input("input0", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+input1 = Input("input1", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+activation = Int32Scalar("act", 0)
+output0 = Output("output0", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+
+model = Model().Operation("SUB", input0, input1, activation).To(output0)
+
+Example({
+ input0: [2.0, -4.0, 8.0, -16.0],
+ input1: [2.0, -2.0, -4.0, 4.0],
+ output0: [0.0, -2.0, 12.0, -20.0],
+}).AddVariations("float16").AddAllActivations(output0, activation)
+
+
+# QUANT8_ASYMM
+shape = "{2, 4, 16, 2}, 0.5, 0"
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM", shape)
+input1 = Input("input1", "TENSOR_QUANT8_ASYMM", shape)
+activation = 0
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM", shape)
+
+model = Model("quant8").Operation("SUB", input0, input1, activation).To(output0)
+
+input0_values = list(range(256))
+input1_values = list(input0_values)
+random.shuffle(input1_values)
+output_values = [max(0, a - b) for a, b in zip(input0_values, input1_values)]
+
+Example({
+ input0: input0_values,
+ input1: input1_values,
+ output0: output_values,
+})
+
+# SUB of data type TENSOR_FLOAT32 is introduced in V1_1.
+Example.SetVersion("V1_1", "sub_v1_2_none", "sub_v1_2_relu", "sub_v1_2_relu1", "sub_v1_2_relu6")
+
+
+# SUB, zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 2}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 2}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# SUB op with numBatches = 0.
+i2 = Parameter("op", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [1, 2, 3, 4]) # weights
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 2}") # out
+model = model.Operation("SUB", zero_sized, i2, 0).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ i2: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1, 2],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddVariations("relaxed", quant8, "float16")
diff --git a/tests/nnapi/specs/V1_2/sub_v1_2_broadcast.mod.py b/tests/nnapi/specs/V1_2/sub_v1_2_broadcast.mod.py
new file mode 100755
index 000000000..5a755117b
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/sub_v1_2_broadcast.mod.py
@@ -0,0 +1,60 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# FLOAT32 and FLOAT16
+input0 = Input("input0", "TENSOR_FLOAT32", "{1, 2}")
+input1 = Input("input1", "TENSOR_FLOAT32", "{2, 2}")
+activation = Int32Scalar("act", 0)
+output0 = Output("output0", "TENSOR_FLOAT32", "{2, 2}")
+
+model = Model().Operation("SUB", input0, input1, activation).To(output0)
+
+input0_values = [10, 20]
+input1_values = [0.1, 0.2,
+ 0.3, 0.4]
+output_values = [9.9, 19.8,
+ 9.7, 19.6]
+
+Example({
+ input0: input0_values,
+ input1: input1_values,
+ output0: output_values,
+}).AddVariations("float16").AddAllActivations(output0, activation)
+
+
+# QUANT8_ASYMM
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM", "{1, 2}, 1.0, 0")
+input1 = Input("input1", "TENSOR_QUANT8_ASYMM", "{2, 2}, 1.0, 0")
+activation = 0
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM", "{2, 2}, 1.0, 0")
+
+model = Model("quant8").Operation("SUB", input0, input1, activation).To(output0)
+
+input0_values = [100, 200]
+input1_values = [1, 2,
+ 3, 4]
+output_values = [99, 198,
+ 97, 196]
+
+Example({
+ input0: input0_values,
+ input1: input1_values,
+ output0: output_values,
+})
+
+# SUB of data type TENSOR_FLOAT32 is introduced in V1_1.
+Example.SetVersion("V1_1", "sub_v1_2_broadcast_none", "sub_v1_2_broadcast_relu", \
+ "sub_v1_2_broadcast_relu1", "sub_v1_2_broadcast_relu6")
diff --git a/tests/nnapi/specs/V1_2/tanh_v1_2.mod.py b/tests/nnapi/specs/V1_2/tanh_v1_2.mod.py
new file mode 100755
index 000000000..c65d09fdb
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/tanh_v1_2.mod.py
@@ -0,0 +1,89 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TEST 1
+input0 = Input("input0", "TENSOR_FLOAT16", "{1, 2, 2, 1}")
+output0 = Output("output0", "TENSOR_FLOAT16", "{1, 2, 2, 1}")
+
+model = Model().Operation("TANH", input0).To(output0)
+
+Example({
+ input0: [-1, 0, 1, 10],
+ output0: [-.761594156, 0, .761594156, 0.999999996],
+})
+
+
+# TEST 2
+input_scale, input_offset = 0.05, 100
+output_scale, output_offset = 1.0 / 128, 128 # Required.
+
+def dequantize(x):
+ return (x - input_offset) * input_scale
+
+def quantize(x):
+ return max(0, min(255, int(round(x / output_scale)) + output_offset))
+
+input0 = Input("input0", "TENSOR_QUANT8_ASYMM", "{256}, %g, %d" % (input_scale, input_offset))
+output0 = Output("output0", "TENSOR_QUANT8_ASYMM", "{256}, %g, %d" % (output_scale, output_offset))
+model = Model().Operation("TANH", input0).To(output0)
+
+input_values = list(range(256))
+output_values = [quantize(math.tanh(dequantize(x))) for x in input_values]
+
+Example({
+ input0: input_values,
+ output0: output_values,
+})
+
+
+# TEST 3: zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# TANH op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 1}") # out
+model = model.Operation("TANH", zero_sized).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ o3: ("TENSOR_QUANT8_ASYMM", 1.0 / 128, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddVariations("relaxed", quant8, "float16")
diff --git a/tests/nnapi/specs/V1_2/topk_v2.mod.py b/tests/nnapi/specs/V1_2/topk_v2.mod.py
index 189b9907d..189b9907d 100644..100755
--- a/tests/nnapi/specs/V1_2/topk_v2.mod.py
+++ b/tests/nnapi/specs/V1_2/topk_v2.mod.py
diff --git a/tests/nnapi/specs/V1_2/topk_v2_1D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/topk_v2_1D_float_nnfw.mod.py
index 1e0ed21d6..5be9c49bf 100644..100755
--- a/tests/nnapi/specs/V1_2/topk_v2_1D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/topk_v2_1D_float_nnfw.mod.py
@@ -4,7 +4,7 @@ i1 = Input("op1", "TENSOR_FLOAT32", "{4}") # a vector of input
k = Int32Scalar("k", 2)
i2 = Output("op2", "TENSOR_FLOAT32", "{2}") # values of output
i3 = Output("op3", "TENSOR_INT32", "{2}") # indexes of output
-model = model.Operation("TOPK_V2_EX", i1, k).To([i2, i3])
+model = model.Operation("TOPK_V2", i1, k).To([i2, i3])
# Example 1. Input in operand 0,
input0 = {i1: # input 0
diff --git a/tests/nnapi/specs/V1_2/topk_v2_1D_int32_nnfw.mod.py b/tests/nnapi/specs/V1_2/topk_v2_1D_int32_nnfw.mod.py
index d2bd39adf..8ee332761 100644..100755
--- a/tests/nnapi/specs/V1_2/topk_v2_1D_int32_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/topk_v2_1D_int32_nnfw.mod.py
@@ -4,7 +4,7 @@ i1 = Input("op1", "TENSOR_INT32", "{4}") # a vector of input
k = Int32Scalar("k", 2)
i2 = Output("op2", "TENSOR_INT32", "{2}") # values of output
i3 = Output("op3", "TENSOR_INT32", "{2}") # indexes of output
-model = model.Operation("TOPK_V2_EX", i1, k).To([i2, i3])
+model = model.Operation("TOPK_V2", i1, k).To([i2, i3])
# Example 1. Input in operand 0,
input0 = {i1: # input 0
diff --git a/tests/nnapi/specs/V1_2/topk_v2_1D_quant8_nnfw.mod.py b/tests/nnapi/specs/V1_2/topk_v2_1D_quant8_nnfw.mod.py
index 6f36ce41f..d270d8c77 100644..100755
--- a/tests/nnapi/specs/V1_2/topk_v2_1D_quant8_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/topk_v2_1D_quant8_nnfw.mod.py
@@ -4,7 +4,7 @@ i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{4}, 0.5f, 1") # a vector of input
k = Int32Scalar("k", 2)
i2 = Output("op2", "TENSOR_QUANT8_ASYMM", "{2}, 0.5f, 1") # values of output
i3 = Output("op3", "TENSOR_INT32", "{2}") # indexes of output
-model = model.Operation("TOPK_V2_EX", i1, k).To([i2, i3])
+model = model.Operation("TOPK_V2", i1, k).To([i2, i3])
# Example 1. Input in operand 0,
input0 = {i1: # input 0
diff --git a/tests/nnapi/specs/V1_2/topk_v2_2D_float_nnfw.mod.py b/tests/nnapi/specs/V1_2/topk_v2_2D_float_nnfw.mod.py
index 204bc143f..29113b901 100644..100755
--- a/tests/nnapi/specs/V1_2/topk_v2_2D_float_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/topk_v2_2D_float_nnfw.mod.py
@@ -4,7 +4,7 @@ i1 = Input("op1", "TENSOR_FLOAT32", "{3,4}") # a matirx of input
k = Int32Scalar("k", 2)
o1 = Output("op2", "TENSOR_FLOAT32", "{3,2}") # values of output
o2 = Output("op3", "TENSOR_INT32", "{3,2}") # indexes of output
-model = model.Operation("TOPK_V2_EX", i1, k).To([o1, o2])
+model = model.Operation("TOPK_V2", i1, k).To([o1, o2])
# Example 1. Input in operand 0,
input0 = {i1: # input 0
@@ -13,7 +13,7 @@ input0 = {i1: # input 0
2.123456789123456789, 18.123456789123456789, 19.123456789123456789, 11.123456789123456789]}
output0 = {o1: # output 1
- [6.123456789123456789, 5.123456789123456789,
+ [6.123456789123456789, 5.123456789123456789,
9.123456789123456789, 8.123456789123456789,
19.123456789123456789, 18.123456789123456789],
o2: # output 1
diff --git a/tests/nnapi/specs/V1_2/topk_v2_2D_int32_nnfw.mod.py b/tests/nnapi/specs/V1_2/topk_v2_2D_int32_nnfw.mod.py
index b90a35488..7a2965fd8 100644..100755
--- a/tests/nnapi/specs/V1_2/topk_v2_2D_int32_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/topk_v2_2D_int32_nnfw.mod.py
@@ -4,7 +4,7 @@ i1 = Input("op1", "TENSOR_INT32", "{3,4}") # a vector of input
k = Int32Scalar("k", 2)
i2 = Output("op2", "TENSOR_INT32", "{3,2}") # indexes of output
i3 = Output("op3", "TENSOR_INT32", "{3,2}") # values of output
-model = model.Operation("TOPK_V2_EX", i1, k).To([i2, i3])
+model = model.Operation("TOPK_V2", i1, k).To([i2, i3])
# Example 1. Input in operand 0,
input0 = {i1: # input 0
diff --git a/tests/nnapi/specs/V1_2/topk_v2_2D_quant8_nnfw.mod.py b/tests/nnapi/specs/V1_2/topk_v2_2D_quant8_nnfw.mod.py
index d8b5c6075..31c8ef9f2 100644..100755
--- a/tests/nnapi/specs/V1_2/topk_v2_2D_quant8_nnfw.mod.py
+++ b/tests/nnapi/specs/V1_2/topk_v2_2D_quant8_nnfw.mod.py
@@ -4,7 +4,7 @@ i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{3,4}, 0.5f, 1") # a vector of input
k = Int32Scalar("k", 2)
i2 = Output("op2", "TENSOR_QUANT8_ASYMM", "{3,2}, 0.5f, 1") # values of output
i3 = Output("op3", "TENSOR_INT32", "{3,2}") # indexes of output
-model = model.Operation("TOPK_V2_EX", i1, k).To([i2, i3])
+model = model.Operation("TOPK_V2", i1, k).To([i2, i3])
# Example 1. Input in operand 0,
input0 = {i1: # input 0
diff --git a/tests/nnapi/specs/V1_2/transpose_v1_2.mod.py b/tests/nnapi/specs/V1_2/transpose_v1_2.mod.py
new file mode 100755
index 000000000..9d0108e8f
--- /dev/null
+++ b/tests/nnapi/specs/V1_2/transpose_v1_2.mod.py
@@ -0,0 +1,81 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# model
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{2, 2}")
+perms = Input("perms", "TENSOR_INT32", "{0}")
+output = Output("output", "TENSOR_FLOAT32", "{2, 2}")
+
+model = model.Operation("TRANSPOSE", i1, perms).To(output)
+
+# Additional data type
+quant8 = DataTypeConverter().Identify({
+ i1: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
+ output: ("TENSOR_QUANT8_ASYMM", 0.5, 0)
+})
+
+# Instantiate an example
+Example({
+ i1: [1.0, 2.0,
+ 3.0, 4.0],
+ perms: [],
+ output: [1.0, 3.0,
+ 2.0, 4.0]
+}).AddVariations("relaxed", quant8)
+
+# TRANSPOSE of data type TENSOR_FLOAT32 and TENSOR_QUANT8_ASYMM is introduced in V1_1.
+Example.SetVersion("V1_1", "transpose_v1_2", "transpose_v1_2_quant8")
+
+
+# zero-sized input
+
+# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
+p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
+p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
+o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
+o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
+tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
+tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
+model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
+
+# Use ROI_ALIGN op to convert into zero-sized feature map.
+layout = BoolScalar("layout", False) # NHWC
+i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
+zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
+model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
+
+# TRANSPOSE op with numBatches = 0.
+o3 = Output("out", "TENSOR_FLOAT32", "{0, 1, 2, 2}") # out
+model = model.Operation("TRANSPOSE", zero_sized, [0, 3, 1, 2]).To(o3)
+
+quant8 = DataTypeConverter().Identify({
+ p1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ o1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
+ i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
+ o3: ("TENSOR_QUANT8_ASYMM", 0.1, 128)
+})
+
+# Create test case with dummy values.
+Example({
+ i1: [1],
+ o1: [0],
+ o2: [0],
+ o3: [0],
+}).AddVariations("relaxed", quant8, "float16")