summaryrefslogtreecommitdiff
path: root/res
diff options
context:
space:
mode:
Diffstat (limited to 'res')
-rw-r--r--res/TensorFlowLiteRecipes/BroadcastTo_000/test.recipe24
-rw-r--r--res/TensorFlowLiteRecipes/ExpandDims_004/test.recipe30
-rw-r--r--res/TensorFlowLiteRecipes/FakeQuant_000/test.recipe25
-rw-r--r--res/TensorFlowLiteRecipes/FakeQuant_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Net_BroadcastTo_AddV2_000/test.recipe63
-rw-r--r--res/TensorFlowLiteRecipes/Net_BroadcastTo_AddV2_000/test.rule7
-rw-r--r--res/TensorFlowLiteRecipes/Net_BroadcastTo_AddV2_001/test.recipe63
-rw-r--r--res/TensorFlowLiteRecipes/Net_BroadcastTo_AddV2_001/test.rule7
-rw-r--r--res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_000/test.recipe92
-rw-r--r--res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_000/test.rule7
-rw-r--r--res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_001/test.recipe92
-rw-r--r--res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_001/test.rule7
-rw-r--r--res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_002/test.recipe92
-rw-r--r--res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_002/test.rule7
-rw-r--r--res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_003/test.recipe92
-rw-r--r--res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_003/test.rule7
-rw-r--r--res/TensorFlowLiteRecipes/Net_Conv_Min_Max_000/test.recipe121
-rw-r--r--res/TensorFlowLiteRecipes/Net_Conv_Min_Max_000/test.rule8
-rw-r--r--res/TensorFlowLiteRecipes/Net_Conv_Relu6_000/test.recipe85
-rw-r--r--res/TensorFlowLiteRecipes/Net_Conv_Relu6_000/test.rule6
-rw-r--r--res/TensorFlowLiteRecipes/Net_DwConv_BN_000/test.recipe91
-rw-r--r--res/TensorFlowLiteRecipes/Net_DwConv_BN_000/test.rule7
-rw-r--r--res/TensorFlowLiteRecipes/Net_DwConv_BN_001/test.recipe91
-rw-r--r--res/TensorFlowLiteRecipes/Net_DwConv_BN_001/test.rule7
-rw-r--r--res/TensorFlowLiteRecipes/Net_InstanceNorm_002/test.recipe39
-rw-r--r--res/TensorFlowLiteRecipes/Net_InstanceNorm_002/test.rule2
-rw-r--r--res/TensorFlowLiteRecipes/Net_Maximum_Minimum_000/test.recipe86
-rw-r--r--res/TensorFlowLiteRecipes/Net_Maximum_Minimum_000/test.rule7
-rw-r--r--res/TensorFlowLiteRecipes/Net_Preactivation_BN_000/test.recipe5
-rw-r--r--res/TensorFlowLiteRecipes/Net_Reshape_Neg_000/test.recipe35
-rw-r--r--res/TensorFlowLiteRecipes/Net_Reshape_Reshape_000/test.recipe42
-rw-r--r--res/TensorFlowLiteRecipes/Net_Reshape_Reshape_000/test.rule5
-rw-r--r--res/TensorFlowLiteRecipes/Net_Squeeze_Squeeze_000/test.recipe29
-rw-r--r--res/TensorFlowLiteRecipes/Net_Squeeze_Squeeze_000/test.rule6
-rw-r--r--res/TensorFlowLiteRecipes/Net_StridedSlice_StridedSlice_000/test.recipe77
-rw-r--r--res/TensorFlowLiteRecipes/Net_StridedSlice_StridedSlice_000/test.rule5
-rw-r--r--res/TensorFlowLiteRecipes/Net_TConv_BN_002/test.recipe156
-rw-r--r--res/TensorFlowLiteRecipes/Net_TConv_BN_002/test.rule8
-rw-r--r--res/TensorFlowLiteRecipes/Part_Add_Sqrt_000/test.recipe48
-rw-r--r--res/TensorFlowLiteRecipes/Part_Add_Sqrt_Rsqrt_000/test.recipe68
-rw-r--r--res/TensorFlowLiteRecipes/Part_Add_Sub_000/test.recipe67
-rw-r--r--res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_000/test.recipe27
-rw-r--r--res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_001/test.recipe47
-rw-r--r--res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_002/test.recipe47
-rw-r--r--res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_003/test.recipe47
-rw-r--r--res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_004/test.recipe38
-rw-r--r--res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_Add_000/test.recipe56
-rw-r--r--res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_Add_001/test.recipe61
-rw-r--r--res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_Add_002/test.recipe71
-rw-r--r--res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_Add_003/test.recipe47
-rw-r--r--res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_Add_004/test.recipe41
-rw-r--r--res/TensorFlowLiteRecipes/Slice_001/test.recipe37
-rw-r--r--res/TensorFlowLiteRecipes/Slice_001/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Squeeze_001/test.recipe18
-rw-r--r--res/TensorFlowLiteRecipes/Squeeze_001/test.reverse0
-rw-r--r--res/TensorFlowPythonExamples/examples/Bidirectional_LSTM/__init__.py6
-rw-r--r--res/TensorFlowPythonExamples/examples/fake_quant_with_min_max_vars/__init__.py27
-rw-r--r--res/TensorFlowPythonModels/examples/minimum-maximum/__init__.py15
-rwxr-xr-x[-rw-r--r--]res/TensorFlowPythonModels/tfpem.py2
59 files changed, 2264 insertions, 39 deletions
diff --git a/res/TensorFlowLiteRecipes/BroadcastTo_000/test.recipe b/res/TensorFlowLiteRecipes/BroadcastTo_000/test.recipe
new file mode 100644
index 000000000..015e40bc4
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/BroadcastTo_000/test.recipe
@@ -0,0 +1,24 @@
+operand {
+ name: "bc_input"
+ type: FLOAT32
+ shape { dim: 2 dim: 3 }
+}
+operand {
+ name: "bc_shape"
+ type: INT32
+ shape { dim: 3 }
+ filler { tag: "explicit" arg: "1" arg: "2" arg: "3" }
+}
+operand {
+ name: "bc_ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 2 dim: 3 }
+}
+operation {
+ type: "BroadcastTo"
+ input: "bc_input"
+ input: "bc_shape"
+ output: "bc_ofm"
+}
+input: "bc_input"
+output: "bc_ofm"
diff --git a/res/TensorFlowLiteRecipes/ExpandDims_004/test.recipe b/res/TensorFlowLiteRecipes/ExpandDims_004/test.recipe
new file mode 100644
index 000000000..20e6555f7
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/ExpandDims_004/test.recipe
@@ -0,0 +1,30 @@
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 3 dim: 3 }
+}
+
+operand {
+ name: "ifm2"
+ type: INT32
+ shape { }
+ filler {
+ tag: "constant"
+ arg: "-1"
+ }
+}
+
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 3 dim: 3 dim: 1 }
+}
+
+operation {
+ type: "ExpandDims"
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm"
+}
+input: "ifm1"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/FakeQuant_000/test.recipe b/res/TensorFlowLiteRecipes/FakeQuant_000/test.recipe
new file mode 100644
index 000000000..c96466f83
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/FakeQuant_000/test.recipe
@@ -0,0 +1,25 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+
+operation {
+ type: "FakeQuant"
+ fakequant_options {
+ min: 0.0
+ max: 1.0
+ num_bits: 8
+ narrow_range: false
+ }
+ input: "ifm"
+ output: "ofm"
+}
+
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/FakeQuant_000/test.reverse b/res/TensorFlowLiteRecipes/FakeQuant_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/FakeQuant_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Net_BroadcastTo_AddV2_000/test.recipe b/res/TensorFlowLiteRecipes/Net_BroadcastTo_AddV2_000/test.recipe
new file mode 100644
index 000000000..5069aac09
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_BroadcastTo_AddV2_000/test.recipe
@@ -0,0 +1,63 @@
+operand {
+ name: "bc_input"
+ type: FLOAT32
+ shape { dim: 2 dim: 3 }
+}
+operand {
+ name: "bc_shape"
+ type: INT32
+ shape { dim: 3 }
+ filler { tag: "explicit" arg: "1" arg: "2" arg: "3" }
+}
+operand {
+ name: "bc_ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 2 dim: 3 }
+}
+operation {
+ type: "BroadcastTo"
+ input: "bc_input"
+ input: "bc_shape"
+ output: "bc_ofm"
+}
+operand {
+ name: "reshape_data"
+ type: FLOAT32
+ shape { dim: 2 dim: 3 }
+}
+operand {
+ name: "reshape_shape"
+ type: INT32
+ shape { dim: 3 }
+ filler { tag: "explicit" arg: "1" arg: "2" arg: "3" }
+}
+operand {
+ name: "reshape_ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 2 dim: 3 }
+}
+operation {
+ type: "Reshape"
+ reshape_options {
+ new_shape: 1
+ new_shape: 2
+ new_shape: 3
+ }
+ input: "reshape_data"
+ input: "reshape_shape"
+ output: "reshape_ofm"
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 2 dim: 3 }
+}
+operation {
+ type: "AddV2"
+ input: "bc_ofm"
+ input: "reshape_ofm"
+ output: "ofm"
+}
+input: "bc_input"
+input: "reshape_data"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Net_BroadcastTo_AddV2_000/test.rule b/res/TensorFlowLiteRecipes/Net_BroadcastTo_AddV2_000/test.rule
new file mode 100644
index 000000000..fdaa7904a
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_BroadcastTo_AddV2_000/test.rule
@@ -0,0 +1,7 @@
+# To check if BroadcastTo and AddV2 are fused to Add op
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "ADD_EXIST" $(op_count ADD) '=' 1
+RULE "NO_BroadcastTo" $(op_count 'CUSTOM(BroadcastTo)') '=' 0
+RULE "NO_AddV2" $(op_count 'CUSTOM(AddV2)') '=' 0
diff --git a/res/TensorFlowLiteRecipes/Net_BroadcastTo_AddV2_001/test.recipe b/res/TensorFlowLiteRecipes/Net_BroadcastTo_AddV2_001/test.recipe
new file mode 100644
index 000000000..ca0ad8e03
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_BroadcastTo_AddV2_001/test.recipe
@@ -0,0 +1,63 @@
+operand {
+ name: "bc_input"
+ type: INT64
+ shape { dim: 2 dim: 3 }
+}
+operand {
+ name: "bc_shape"
+ type: INT32
+ shape { dim: 3 }
+ filler { tag: "explicit" arg: "1" arg: "2" arg: "3" }
+}
+operand {
+ name: "bc_ofm"
+ type: INT64
+ shape { dim: 1 dim: 2 dim: 3 }
+}
+operation {
+ type: "BroadcastTo"
+ input: "bc_input"
+ input: "bc_shape"
+ output: "bc_ofm"
+}
+operand {
+ name: "reshape_data"
+ type: INT64
+ shape { dim: 2 dim: 3 }
+}
+operand {
+ name: "reshape_shape"
+ type: INT32
+ shape { dim: 3 }
+ filler { tag: "explicit" arg: "1" arg: "2" arg: "3" }
+}
+operand {
+ name: "reshape_ofm"
+ type: INT64
+ shape { dim: 1 dim: 2 dim: 3 }
+}
+operation {
+ type: "Reshape"
+ reshape_options {
+ new_shape: 1
+ new_shape: 2
+ new_shape: 3
+ }
+ input: "reshape_data"
+ input: "reshape_shape"
+ output: "reshape_ofm"
+}
+operand {
+ name: "ofm"
+ type: INT64
+ shape { dim: 1 dim: 2 dim: 3 }
+}
+operation {
+ type: "AddV2"
+ input: "bc_ofm"
+ input: "reshape_ofm"
+ output: "ofm"
+}
+input: "bc_input"
+input: "reshape_data"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Net_BroadcastTo_AddV2_001/test.rule b/res/TensorFlowLiteRecipes/Net_BroadcastTo_AddV2_001/test.rule
new file mode 100644
index 000000000..d34458999
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_BroadcastTo_AddV2_001/test.rule
@@ -0,0 +1,7 @@
+# To check if BroadcastTo and AddV2 are not fused to Add op
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "BroadcastTo_EXIST" $(op_count 'CUSTOM(BroadcastTo)') '=' 1
+RULE "AddV2_EXIST" $(op_count 'CUSTOM(AddV2)') '=' 1
+RULE "NO_ADD" $(op_count ADD) '=' 0
diff --git a/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_000/test.recipe b/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_000/test.recipe
new file mode 100644
index 000000000..5ee07b456
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_000/test.recipe
@@ -0,0 +1,92 @@
+operand {
+ name: "ifm_conv"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 dim: 64 dim: 32 }
+}
+operand {
+ name: "filter"
+ type: FLOAT32
+ shape { dim: 64 dim: 1 dim: 1 dim: 32 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm_conv"
+ type: FLOAT32
+ shape { dim: 1 dim: 32 dim: 32 dim: 64 }
+}
+operand {
+ name: "mul_const"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "add_const"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm_mul"
+ type: FLOAT32
+ shape { dim: 1 dim: 32 dim: 32 dim: 64 }
+}
+operand {
+ name: "ofm_add"
+ type: FLOAT32
+ shape { dim: 1 dim: 32 dim: 32 dim: 64 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options {
+ padding: VALID
+ stride_w: 2
+ stride_h: 2
+ }
+ input: "ifm_conv"
+ input: "filter"
+ input: "bias"
+ output: "ofm_conv"
+}
+operation {
+ type: "Mul"
+ input: "ofm_conv"
+ input: "mul_const"
+ output: "ofm_mul"
+ mul_options {
+ activation: NONE
+ }
+}
+operation {
+ type: "Add"
+ input: "ofm_mul"
+ input: "add_const"
+ output: "ofm_add"
+ add_options {
+ activation: NONE
+ }
+}
+input: "ifm_conv"
+output: "ofm_add"
diff --git a/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_000/test.rule b/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_000/test.rule
new file mode 100644
index 000000000..00a25dfd6
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_000/test.rule
@@ -0,0 +1,7 @@
+# To check if Add and Mul are fused to Convolution op
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "CONV_EXIST" $(op_count CONV_2D) '=' 1
+RULE "NO_MUL" $(op_count MUL) '=' 0
+RULE "NO_ADD" $(op_count ADD) '=' 0
diff --git a/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_001/test.recipe b/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_001/test.recipe
new file mode 100644
index 000000000..04bdd5ae0
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_001/test.recipe
@@ -0,0 +1,92 @@
+operand {
+ name: "ifm_conv"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 dim: 64 dim: 32 }
+}
+operand {
+ name: "filter"
+ type: FLOAT32
+ shape { dim: 64 dim: 1 dim: 1 dim: 32 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm_conv"
+ type: FLOAT32
+ shape { dim: 1 dim: 32 dim: 32 dim: 64 }
+}
+operand {
+ name: "mul_const"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "add_const"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm_mul"
+ type: FLOAT32
+ shape { dim: 1 dim: 32 dim: 32 dim: 64 }
+}
+operand {
+ name: "ofm_add"
+ type: FLOAT32
+ shape { dim: 1 dim: 32 dim: 32 dim: 64 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options {
+ padding: VALID
+ stride_w: 2
+ stride_h: 2
+ }
+ input: "ifm_conv"
+ input: "filter"
+ input: "bias"
+ output: "ofm_conv"
+}
+operation {
+ type: "Mul"
+ input: "ofm_conv"
+ input: "mul_const"
+ output: "ofm_mul"
+ mul_options {
+ activation: NONE
+ }
+}
+operation {
+ type: "Add"
+ input: "ofm_mul"
+ input: "add_const"
+ output: "ofm_add"
+ add_options {
+ activation: RELU
+ }
+}
+input: "ifm_conv"
+output: "ofm_add"
diff --git a/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_001/test.rule b/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_001/test.rule
new file mode 100644
index 000000000..7f3511a35
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_001/test.rule
@@ -0,0 +1,7 @@
+# To check if Add(with RELU) and Mul are fused to Convolution op
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "CONV_EXIST" $(op_count CONV_2D) '=' 1
+RULE "NO_MUL" $(op_count MUL) '=' 0
+RULE "NO_ADD" $(op_count ADD) '=' 0
diff --git a/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_002/test.recipe b/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_002/test.recipe
new file mode 100644
index 000000000..e3fe1e315
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_002/test.recipe
@@ -0,0 +1,92 @@
+operand {
+ name: "ifm_conv"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 dim: 64 dim: 32 }
+}
+operand {
+ name: "filter"
+ type: FLOAT32
+ shape { dim: 64 dim: 1 dim: 1 dim: 32 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm_conv"
+ type: FLOAT32
+ shape { dim: 1 dim: 32 dim: 32 dim: 64 }
+}
+operand {
+ name: "mul_const"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "add_const"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm_mul"
+ type: FLOAT32
+ shape { dim: 1 dim: 32 dim: 32 dim: 64 }
+}
+operand {
+ name: "ofm_add"
+ type: FLOAT32
+ shape { dim: 1 dim: 32 dim: 32 dim: 64 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options {
+ padding: VALID
+ stride_w: 2
+ stride_h: 2
+ }
+ input: "ifm_conv"
+ input: "filter"
+ input: "bias"
+ output: "ofm_conv"
+}
+operation {
+ type: "Mul"
+ input: "mul_const"
+ input: "ofm_conv"
+ output: "ofm_mul"
+ mul_options {
+ activation: NONE
+ }
+}
+operation {
+ type: "Add"
+ input: "add_const"
+ input: "ofm_mul"
+ output: "ofm_add"
+ add_options {
+ activation: NONE
+ }
+}
+input: "ifm_conv"
+output: "ofm_add"
diff --git a/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_002/test.rule b/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_002/test.rule
new file mode 100644
index 000000000..329d1752c
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_002/test.rule
@@ -0,0 +1,7 @@
+# To check if Add and Mul with reverse input sequence are fused to Convolution op
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "CONV_EXIST" $(op_count CONV_2D) '=' 1
+RULE "NO_MUL" $(op_count MUL) '=' 0
+RULE "NO_ADD" $(op_count ADD) '=' 0
diff --git a/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_003/test.recipe b/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_003/test.recipe
new file mode 100644
index 000000000..d7673169e
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_003/test.recipe
@@ -0,0 +1,92 @@
+operand {
+ name: "ifm_conv"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 dim: 64 dim: 32 }
+}
+operand {
+ name: "filter"
+ type: FLOAT32
+ shape { dim: 64 dim: 1 dim: 1 dim: 32 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm_conv"
+ type: FLOAT32
+ shape { dim: 1 dim: 32 dim: 32 dim: 64 }
+}
+operand {
+ name: "mul_const"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "add_const"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm_mul"
+ type: FLOAT32
+ shape { dim: 1 dim: 32 dim: 32 dim: 64 }
+}
+operand {
+ name: "ofm_add"
+ type: FLOAT32
+ shape { dim: 1 dim: 32 dim: 32 dim: 64 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options {
+ padding: VALID
+ stride_w: 2
+ stride_h: 2
+ }
+ input: "ifm_conv"
+ input: "filter"
+ input: "bias"
+ output: "ofm_conv"
+}
+operation {
+ type: "Mul"
+ input: "ofm_conv"
+ input: "mul_const"
+ output: "ofm_mul"
+ mul_options {
+ activation: RELU
+ }
+}
+operation {
+ type: "Add"
+ input: "ofm_mul"
+ input: "add_const"
+ output: "ofm_add"
+ add_options {
+ activation: NONE
+ }
+}
+input: "ifm_conv"
+output: "ofm_add"
diff --git a/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_003/test.rule b/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_003/test.rule
new file mode 100644
index 000000000..9e158e3d6
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_Conv_Add_Mul_003/test.rule
@@ -0,0 +1,7 @@
+# To check if Add and Mul are not fused to Convolution op
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "CONV_EXIST" $(op_count CONV_2D) '=' 1
+RULE "MUL_EXIST" $(op_count MUL) '=' 1
+RULE "ADD_EXIST" $(op_count ADD) '=' 1
diff --git a/res/TensorFlowLiteRecipes/Net_Conv_Min_Max_000/test.recipe b/res/TensorFlowLiteRecipes/Net_Conv_Min_Max_000/test.recipe
new file mode 100644
index 000000000..6d166f0bf
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_Conv_Min_Max_000/test.recipe
@@ -0,0 +1,121 @@
+operand {
+ name: "Placeholder"
+ type: FLOAT32
+ shape { dim: 1 dim: 16 dim: 16 dim: 3 }
+}
+operand {
+ name: "Const_4"
+ type: FLOAT32
+ shape { }
+ filler { tag: "explicit" arg: "6" }
+}
+operand {
+ name: "Const_5"
+ type: FLOAT32
+ shape { }
+ filler { tag: "explicit" arg: "0" }
+}
+operand {
+ name: "Conv2D_1"
+ type: FLOAT32
+ shape { dim: 3 dim: 3 dim: 3 dim: 3 }
+ filler { tag: "gaussian" arg: "0.0" arg: "0.1" }
+}
+operand {
+ name: "Conv2D_2"
+ type: FLOAT32
+ shape { dim: 3 }
+ filler { tag: "gaussian" arg: "0.0" arg: "0.1" }
+}
+operand {
+ name: "Conv2D_21"
+ type: FLOAT32
+ shape { dim: 3 dim: 3 dim: 3 dim: 3 }
+ filler { tag: "gaussian" arg: "0.0" arg: "0.1" }
+}
+operand {
+ name: "Conv2D_11"
+ type: FLOAT32
+ shape { dim: 1 dim: 16 dim: 16 dim: 3 }
+}
+operand {
+ name: "Minimum"
+ type: FLOAT32
+ shape { dim: 1 dim: 16 dim: 16 dim: 3 }
+}
+operand {
+ name: "Maximum"
+ type: FLOAT32
+ shape { dim: 1 dim: 16 dim: 16 dim: 3 }
+}
+operand {
+ name: "Conv2D_22"
+ type: FLOAT32
+ shape { dim: 1 dim: 16 dim: 16 dim: 3 }
+}
+operand {
+ name: "Minimum_1"
+ type: FLOAT32
+ shape { dim: 1 dim: 16 dim: 16 dim: 3 }
+}
+operand {
+ name: "Maximum_1"
+ type: FLOAT32
+ shape { dim: 1 dim: 16 dim: 16 dim: 3 }
+}
+operation {
+ type: "Conv2D"
+ input: "Placeholder"
+ input: "Conv2D_1"
+ input: "Conv2D_2"
+ output: "Conv2D_11"
+ conv2d_options {
+ padding: SAME
+ stride_w: 1
+ stride_h: 1
+ activation: NONE
+ dilation_w_factor: 1
+ dilation_h_factor: 1
+ }
+}
+operation {
+ type: "Minimum"
+ input: "Conv2D_11"
+ input: "Const_4"
+ output: "Minimum"
+}
+operation {
+ type: "Maximum"
+ input: "Minimum"
+ input: "Const_5"
+ output: "Maximum"
+}
+operation {
+ type: "Conv2D"
+ input: "Maximum"
+ input: "Conv2D_21"
+ input: "Conv2D_2"
+ output: "Conv2D_22"
+ conv2d_options {
+ padding: SAME
+ stride_w: 1
+ stride_h: 1
+ activation: NONE
+ dilation_w_factor: 1
+ dilation_h_factor: 1
+ }
+}
+operation {
+ type: "Minimum"
+ input: "Conv2D_22"
+ input: "Const_4"
+ output: "Minimum_1"
+}
+operation {
+ type: "Maximum"
+ input: "Minimum_1"
+ input: "Const_5"
+ output: "Maximum_1"
+}
+input: "Placeholder"
+output: "Maximum_1"
diff --git a/res/TensorFlowLiteRecipes/Net_Conv_Min_Max_000/test.rule b/res/TensorFlowLiteRecipes/Net_Conv_Min_Max_000/test.rule
new file mode 100644
index 000000000..a67530afd
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_Conv_Min_Max_000/test.rule
@@ -0,0 +1,8 @@
+# To check if Minumum and Maximum are converte to Relu6 op
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "CONV_EXIST" $(op_count CONV_2D) '=' 2
+RULE "RELU6_EXIST" $(op_count RELU6) '=' 2
+RULE "MIN_NOT_EXIST" $(op_count MINUMUM) '=' 0
+RULE "MAX_NOT_EXIST" $(op_count MAXIMUM) '=' 0
diff --git a/res/TensorFlowLiteRecipes/Net_Conv_Relu6_000/test.recipe b/res/TensorFlowLiteRecipes/Net_Conv_Relu6_000/test.recipe
new file mode 100644
index 000000000..f6be63f84
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_Conv_Relu6_000/test.recipe
@@ -0,0 +1,85 @@
+operand {
+ name: "Placeholder"
+ type: FLOAT32
+ shape { dim: 1 dim: 16 dim: 16 dim: 3 }
+}
+operand {
+ name: "Conv2D_1"
+ type: FLOAT32
+ shape { dim: 3 dim: 3 dim: 3 dim: 3 }
+ filler { tag: "gaussian" arg: "0.0" arg: "0.1" }
+}
+operand {
+ name: "Conv2D_2"
+ type: FLOAT32
+ shape { dim: 3 }
+ filler { tag: "gaussian" arg: "0.0" arg: "0.1" }
+}
+operand {
+ name: "Conv2D_21"
+ type: FLOAT32
+ shape { dim: 3 dim: 3 dim: 3 dim: 3 }
+ filler { tag: "gaussian" arg: "0.0" arg: "0.1" }
+}
+operand {
+ name: "Conv2D_11"
+ type: FLOAT32
+ shape { dim: 1 dim: 16 dim: 16 dim: 3 }
+}
+operand {
+ name: "ReLU6"
+ type: FLOAT32
+ shape { dim: 1 dim: 16 dim: 16 dim: 3 }
+}
+operand {
+ name: "Conv2D_22"
+ type: FLOAT32
+ shape { dim: 1 dim: 16 dim: 16 dim: 3 }
+}
+operand {
+ name: "ReLU6_1"
+ type: FLOAT32
+ shape { dim: 1 dim: 16 dim: 16 dim: 3 }
+}
+operation {
+ type: "Conv2D"
+ input: "Placeholder"
+ input: "Conv2D_1"
+ input: "Conv2D_2"
+ output: "Conv2D_11"
+ conv2d_options {
+ padding: SAME
+ stride_w: 1
+ stride_h: 1
+ activation: NONE
+ dilation_w_factor: 1
+ dilation_h_factor: 1
+ }
+}
+operation {
+ type: "ReLU6"
+ input: "Conv2D_11"
+ output: "ReLU6"
+}
+operation {
+ type: "Conv2D"
+ input: "ReLU6"
+ input: "Conv2D_21"
+ input: "Conv2D_2"
+ output: "Conv2D_22"
+ conv2d_options {
+ padding: SAME
+ stride_w: 1
+ stride_h: 1
+ activation: NONE
+ dilation_w_factor: 1
+ dilation_h_factor: 1
+ }
+}
+operation {
+ type: "ReLU6"
+ input: "Conv2D_22"
+ output: "ReLU6_1"
+}
+input: "Placeholder"
+output: "ReLU6_1"
diff --git a/res/TensorFlowLiteRecipes/Net_Conv_Relu6_000/test.rule b/res/TensorFlowLiteRecipes/Net_Conv_Relu6_000/test.rule
new file mode 100644
index 000000000..34d5d663d
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_Conv_Relu6_000/test.rule
@@ -0,0 +1,6 @@
+# To check if ReLU6 is fused to Convolution op
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "CONV_EXIST" $(op_count CONV_2D) '=' 2
+RULE "RELU6_NOT_EXIST" $(op_count RELU6) '=' 0
diff --git a/res/TensorFlowLiteRecipes/Net_DwConv_BN_000/test.recipe b/res/TensorFlowLiteRecipes/Net_DwConv_BN_000/test.recipe
new file mode 100644
index 000000000..f9769273f
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_DwConv_BN_000/test.recipe
@@ -0,0 +1,91 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 dim: 64 dim: 8 }
+}
+operand {
+ name: "filter"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 8 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 8 }
+ filler {
+ tag: "constant"
+ arg: "1.1"
+ }
+}
+operand {
+ name: "scale"
+ type: FLOAT32
+ shape { dim: 8 }
+ filler {
+ tag: "constant"
+ arg: "1.1"
+ }
+}
+operand {
+ name: "shift"
+ type: FLOAT32
+ shape { dim: 8 }
+ filler {
+ tag: "constant"
+ arg: "1.1"
+ }
+}
+operand {
+ name: "dwout"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 dim: 64 dim: 8 }
+}
+operand {
+ name: "mulout"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 dim: 64 dim: 8 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 dim: 64 dim: 8 }
+}
+operation {
+ type: "DepthwiseConv2D"
+ depthwiseconv2d_options {
+ padding: SAME
+ stride_w: 1
+ stride_h: 1
+ depth_multiplier: 1
+ activation : NONE
+ }
+ input: "ifm"
+ input: "filter"
+ input: "bias"
+ output: "dwout"
+}
+operation {
+ type: "Mul"
+ input: "dwout"
+ input: "scale"
+ output: "mulout"
+ mul_options {
+ activation: NONE
+ }
+}
+operation {
+ type: "Add"
+ input: "mulout"
+ input: "shift"
+ output: "ofm"
+ add_options {
+ activation: RELU6
+ }
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Net_DwConv_BN_000/test.rule b/res/TensorFlowLiteRecipes/Net_DwConv_BN_000/test.rule
new file mode 100644
index 000000000..eb0cba835
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_DwConv_BN_000/test.rule
@@ -0,0 +1,7 @@
+# To check if BatchNorm op(mul + add) is fused to Depthwise Convolution op
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "DWCONV_EXIST" $(op_count DEPTHWISE_CONV_2D) '=' 1
+RULE "NO_MUL" $(op_count MUL) '=' 0
+RULE "NO_ADD" $(op_count ADD) '=' 0
diff --git a/res/TensorFlowLiteRecipes/Net_DwConv_BN_001/test.recipe b/res/TensorFlowLiteRecipes/Net_DwConv_BN_001/test.recipe
new file mode 100644
index 000000000..4bbfd841c
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_DwConv_BN_001/test.recipe
@@ -0,0 +1,91 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 dim: 64 dim: 8 }
+}
+operand {
+ name: "filter"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 8 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 8 }
+ filler {
+ tag: "constant"
+ arg: "1.1"
+ }
+}
+operand {
+ name: "scale"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 8 }
+ filler {
+ tag: "constant"
+ arg: "1.1"
+ }
+}
+operand {
+ name: "shift"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 8 }
+ filler {
+ tag: "constant"
+ arg: "1.1"
+ }
+}
+operand {
+ name: "dwout"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 dim: 64 dim: 8 }
+}
+operand {
+ name: "mulout"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 dim: 64 dim: 8 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 dim: 64 dim: 8 }
+}
+operation {
+ type: "DepthwiseConv2D"
+ depthwiseconv2d_options {
+ padding: SAME
+ stride_w: 1
+ stride_h: 1
+ depth_multiplier: 1
+ activation : NONE
+ }
+ input: "ifm"
+ input: "filter"
+ input: "bias"
+ output: "dwout"
+}
+operation {
+ type: "Mul"
+ input: "dwout"
+ input: "scale"
+ output: "mulout"
+ mul_options {
+ activation: NONE
+ }
+}
+operation {
+ type: "Add"
+ input: "mulout"
+ input: "shift"
+ output: "ofm"
+ add_options {
+ activation: RELU6
+ }
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Net_DwConv_BN_001/test.rule b/res/TensorFlowLiteRecipes/Net_DwConv_BN_001/test.rule
new file mode 100644
index 000000000..eb0cba835
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_DwConv_BN_001/test.rule
@@ -0,0 +1,7 @@
+# To check if BatchNorm op(mul + add) is fused to Depthwise Convolution op
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "DWCONV_EXIST" $(op_count DEPTHWISE_CONV_2D) '=' 1
+RULE "NO_MUL" $(op_count MUL) '=' 0
+RULE "NO_ADD" $(op_count ADD) '=' 0
diff --git a/res/TensorFlowLiteRecipes/Net_InstanceNorm_002/test.recipe b/res/TensorFlowLiteRecipes/Net_InstanceNorm_002/test.recipe
index 92087829c..a79517484 100644
--- a/res/TensorFlowLiteRecipes/Net_InstanceNorm_002/test.recipe
+++ b/res/TensorFlowLiteRecipes/Net_InstanceNorm_002/test.recipe
@@ -18,7 +18,7 @@ operand {
name: "sequential/instance_normalization/stack"
type: INT32
shape {
- dim: 5
+ dim: 4
}
filler {
tag: "explicit"
@@ -26,7 +26,6 @@ operand {
arg: "32"
arg: "32"
arg: "8"
- arg: "1"
}
}
operand {
@@ -51,7 +50,6 @@ operand {
dim: 1
dim: 1
dim: 8
- dim: 1
}
filler {
tag: "explicit"
@@ -73,7 +71,6 @@ operand {
dim: 1
dim: 1
dim: 8
- dim: 1
}
filler {
tag: "explicit"
@@ -101,13 +98,12 @@ operand {
name: "sequential/instance_normalization/moments/variance/reduction_indices"
type: INT32
shape {
- dim: 3
+ dim: 2
}
filler {
tag: "explicit"
arg: "1"
arg: "2"
- arg: "4"
}
}
operand {
@@ -118,7 +114,6 @@ operand {
dim: 32
dim: 32
dim: 8
- dim: 1
}
}
operand {
@@ -129,7 +124,6 @@ operand {
dim: 1
dim: 1
dim: 8
- dim: 1
}
}
operand {
@@ -140,7 +134,6 @@ operand {
dim: 32
dim: 32
dim: 8
- dim: 1
}
}
operand {
@@ -151,7 +144,6 @@ operand {
dim: 1
dim: 1
dim: 8
- dim: 1
}
}
operand {
@@ -162,7 +154,6 @@ operand {
dim: 1
dim: 1
dim: 8
- dim: 1
}
}
operand {
@@ -173,7 +164,6 @@ operand {
dim: 1
dim: 1
dim: 8
- dim: 1
}
}
operand {
@@ -184,7 +174,6 @@ operand {
dim: 1
dim: 1
dim: 8
- dim: 1
}
}
operand {
@@ -195,7 +184,6 @@ operand {
dim: 32
dim: 32
dim: 8
- dim: 1
}
}
operand {
@@ -206,7 +194,6 @@ operand {
dim: 1
dim: 1
dim: 8
- dim: 1
}
}
operand {
@@ -217,7 +204,6 @@ operand {
dim: 1
dim: 1
dim: 8
- dim: 1
}
}
operand {
@@ -228,7 +214,6 @@ operand {
dim: 32
dim: 32
dim: 8
- dim: 1
}
}
operand {
@@ -242,14 +227,8 @@ operand {
}
}
operation {
- type: "Reshape"
- input: "input_layer"
- input: "sequential/instance_normalization/stack"
- output: "sequential/instance_normalization/Reshape"
-}
-operation {
type: "Mean"
- input: "sequential/instance_normalization/Reshape"
+ input: "input_layer"
input: "sequential/instance_normalization/moments/variance/reduction_indices"
output: "sequential/instance_normalization/moments/mean"
mean_options {
@@ -258,7 +237,7 @@ operation {
}
operation {
type: "SquaredDifference"
- input: "sequential/instance_normalization/Reshape"
+ input: "input_layer"
input: "sequential/instance_normalization/moments/mean"
output: "sequential/instance_normalization/moments/SquaredDifference"
}
@@ -296,7 +275,7 @@ operation {
}
operation {
type: "Mul"
- input: "sequential/instance_normalization/Reshape"
+ input: "input_layer"
input: "sequential/instance_normalization/batchnorm/mul"
output: "sequential/instance_normalization/batchnorm/mul_1"
mul_options {
@@ -330,11 +309,5 @@ operation {
activation: NONE
}
}
-operation {
- type: "Reshape"
- input: "sequential/instance_normalization/batchnorm/add_1"
- input: "sequential/instance_normalization/Shape"
- output: "Identity"
-}
input: "input_layer"
-output: "Identity"
+output: "sequential/instance_normalization/batchnorm/add_1"
diff --git a/res/TensorFlowLiteRecipes/Net_InstanceNorm_002/test.rule b/res/TensorFlowLiteRecipes/Net_InstanceNorm_002/test.rule
index 650827f4e..d6e47712f 100644
--- a/res/TensorFlowLiteRecipes/Net_InstanceNorm_002/test.rule
+++ b/res/TensorFlowLiteRecipes/Net_InstanceNorm_002/test.rule
@@ -3,6 +3,6 @@
RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
RULE "INSTANCE_NORM_EXIST" $(op_count INSTANCE_NORM) '=' 1
-RULE "RESHAPE_EXIST" $(op_count RESHAPE) '=' 3
+RULE "RESHAPE_EXIST" $(op_count RESHAPE) '<=' 3
RULE "NO_ADD" $(op_count ADD) '=' 0
RULE "NO_MUL" $(op_count MUL) '=' 0
diff --git a/res/TensorFlowLiteRecipes/Net_Maximum_Minimum_000/test.recipe b/res/TensorFlowLiteRecipes/Net_Maximum_Minimum_000/test.recipe
new file mode 100644
index 000000000..e1d3c0a09
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_Maximum_Minimum_000/test.recipe
@@ -0,0 +1,86 @@
+operand {
+ name: "Const"
+ type: FLOAT32
+ shape {
+ }
+ filler {
+ tag: "explicit"
+ arg: "6"
+ }
+ quant {
+ quantized_dimension: 0
+ }
+ is_variable: false
+}
+operand {
+ name: "Const_1"
+ type: FLOAT32
+ shape {
+ }
+ filler {
+ tag: "explicit"
+ arg: "0"
+ }
+ quant {
+ quantized_dimension: 0
+ }
+ is_variable: false
+}
+operand {
+ name: "Hole"
+ type: FLOAT32
+ shape {
+ dim: 1
+ dim: 3
+ dim: 3
+ dim: 4
+ }
+ quant {
+ min: 0
+ max: 255
+ quantized_dimension: 0
+ }
+ is_variable: false
+}
+operand {
+ name: "Maximum"
+ type: FLOAT32
+ shape {
+ dim: 1
+ dim: 3
+ dim: 3
+ dim: 4
+ }
+ quant {
+ quantized_dimension: 0
+ }
+ is_variable: false
+}
+operand {
+ name: "Minimum"
+ type: FLOAT32
+ shape {
+ dim: 1
+ dim: 3
+ dim: 3
+ dim: 4
+ }
+ quant {
+ quantized_dimension: 0
+ }
+ is_variable: false
+}
+operation {
+ type: "Minimum"
+ input: "Hole"
+ input: "Const"
+ output: "Minimum"
+}
+operation {
+ type: "Maximum"
+ input: "Minimum"
+ input: "Const_1"
+ output: "Maximum"
+}
+input: "Hole"
+output: "Maximum"
diff --git a/res/TensorFlowLiteRecipes/Net_Maximum_Minimum_000/test.rule b/res/TensorFlowLiteRecipes/Net_Maximum_Minimum_000/test.rule
new file mode 100644
index 000000000..9d6340727
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_Maximum_Minimum_000/test.rule
@@ -0,0 +1,7 @@
+# To check if Maximum and Minimum is fused to Relu6.
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "RELU6_EXIST" $(op_count RELU6) '=' 1
+RULE "NO_MAXIMUM" $(op_count MAXIMUM) '=' 0
+RULE "NO_MINIMUM" $(op_count MINIMUM) '=' 0
diff --git a/res/TensorFlowLiteRecipes/Net_Preactivation_BN_000/test.recipe b/res/TensorFlowLiteRecipes/Net_Preactivation_BN_000/test.recipe
index c12ce9d64..3658a2bff 100644
--- a/res/TensorFlowLiteRecipes/Net_Preactivation_BN_000/test.recipe
+++ b/res/TensorFlowLiteRecipes/Net_Preactivation_BN_000/test.recipe
@@ -7,11 +7,6 @@ operand {
dim: 4
dim: 16
}
- filler {
- tag: "gaussian"
- arg: "0.0"
- arg: "0.1"
- }
}
operand {
name: "Weights1"
diff --git a/res/TensorFlowLiteRecipes/Net_Reshape_Neg_000/test.recipe b/res/TensorFlowLiteRecipes/Net_Reshape_Neg_000/test.recipe
new file mode 100644
index 000000000..51cf3b4ca
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_Reshape_Neg_000/test.recipe
@@ -0,0 +1,35 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 2 dim: 3 dim: 6 }
+}
+operand {
+ name: "shape1"
+ type: INT32
+ shape { dim: 2 }
+ filler { tag: "explicit" arg: "6" arg: "6" }
+}
+operand {
+ name: "reshape_out"
+ type: FLOAT32
+ shape { dim: 6 dim: 6 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 6 dim: 6 }
+}
+operation {
+ type: "Reshape"
+ input: "ifm"
+ input: "shape1"
+ output: "reshape_out"
+}
+operation {
+ type: "Neg"
+ input: "reshape_out"
+ output: "ofm"
+}
+
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Net_Reshape_Reshape_000/test.recipe b/res/TensorFlowLiteRecipes/Net_Reshape_Reshape_000/test.recipe
new file mode 100644
index 000000000..2acb2e71b
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_Reshape_Reshape_000/test.recipe
@@ -0,0 +1,42 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 2 dim: 3 dim: 6 }
+}
+operand {
+ name: "shape1"
+ type: INT32
+ shape { dim: 2 }
+ filler { tag: "explicit" arg: "6" arg: "6" }
+}
+operand {
+ name: "shape2"
+ type: INT32
+ shape { dim: 3 }
+ filler { tag: "explicit" arg: "6" arg: "2" arg: "3" }
+}
+operand {
+ name: "reshape_out"
+ type: FLOAT32
+ shape { dim: 6 dim: 6 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 6 dim: 2 dim: 3 }
+}
+operation {
+ type: "Reshape"
+ input: "ifm"
+ input: "shape1"
+ output: "reshape_out"
+}
+operation {
+ type: "Reshape"
+ input: "reshape_out"
+ input: "shape2"
+ output: "ofm"
+}
+
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Net_Reshape_Reshape_000/test.rule b/res/TensorFlowLiteRecipes/Net_Reshape_Reshape_000/test.rule
new file mode 100644
index 000000000..9a70601c8
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_Reshape_Reshape_000/test.rule
@@ -0,0 +1,5 @@
+# To check if Redundant Reshape removed.
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "RESHAPE_EXIST" $(op_count RESHAPE) '=' 1
diff --git a/res/TensorFlowLiteRecipes/Net_Squeeze_Squeeze_000/test.recipe b/res/TensorFlowLiteRecipes/Net_Squeeze_Squeeze_000/test.recipe
new file mode 100644
index 000000000..b84058b0e
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_Squeeze_Squeeze_000/test.recipe
@@ -0,0 +1,29 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 16 dim: 1 dim: 1 }
+}
+operand {
+ name: "t1"
+ type: FLOAT32
+ shape { dim: 1 dim: 16 dim: 1 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 16 }
+}
+operation {
+ type: "Squeeze"
+ squeeze_options { squeeze_dim: 3 }
+ input: "ifm"
+ output: "t1"
+}
+operation {
+ type: "Squeeze"
+ squeeze_options { squeeze_dim: 2 }
+ input: "t1"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Net_Squeeze_Squeeze_000/test.rule b/res/TensorFlowLiteRecipes/Net_Squeeze_Squeeze_000/test.rule
new file mode 100644
index 000000000..66a105a73
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_Squeeze_Squeeze_000/test.rule
@@ -0,0 +1,6 @@
+# To check if Squeeze is substituted to Reshape op
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "SQUEEZE_COUNT" $(op_count SQUEEZE) '=' 0
+RULE "RESHAPE_COUNT" $(op_count RESHAPE) '=' 2
diff --git a/res/TensorFlowLiteRecipes/Net_StridedSlice_StridedSlice_000/test.recipe b/res/TensorFlowLiteRecipes/Net_StridedSlice_StridedSlice_000/test.recipe
new file mode 100644
index 000000000..04c0e9084
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_StridedSlice_StridedSlice_000/test.recipe
@@ -0,0 +1,77 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 2 dim: 4 }
+}
+operand {
+ name: "begin"
+ type: INT32
+ shape { dim: 3 }
+ filler { tag: "explicit" arg: "0" arg: "0" arg: "0" }
+}
+operand {
+ name: "end"
+ type: INT32
+ shape { dim: 3 }
+ filler { tag: "explicit" arg: "1" arg: "2" arg: "4" }
+}
+operand {
+ name: "strides"
+ type: INT32
+ shape { dim: 3 }
+ filler { tag: "explicit" arg: "1" arg: "1" arg: "1" }
+}
+operand {
+ name: "output_1"
+ type: FLOAT32
+ shape { dim: 1 dim: 2 dim: 4 }
+}
+operation {
+ type: "StridedSlice"
+ strided_slice_options {
+ begin_mask: 0
+ end_mask: 0
+ ellipsis_mask: 0
+ new_axis_mask: 0
+ shrink_axis_mask: 0
+ }
+ input: "ifm"
+ input: "begin"
+ input: "end"
+ input: "strides"
+ output: "output_1"
+}
+operand {
+ name: "begin_2"
+ type: INT32
+ shape { dim: 3 }
+ filler { tag: "explicit" arg: "0" arg: "0" arg: "0" }
+}
+operand {
+ name: "end_2"
+ type: INT32
+ shape { dim: 3 }
+ filler { tag: "explicit" arg: "0" arg: "1" arg: "0" }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim:1 dim: 4}
+}
+operation {
+ type: "StridedSlice"
+ strided_slice_options {
+ begin_mask: 5
+ end_mask: 5
+ ellipsis_mask: 0
+ new_axis_mask: 0
+ shrink_axis_mask: 2
+ }
+ input: "output_1"
+ input: "begin_2"
+ input: "end_2"
+ input: "strides"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Net_StridedSlice_StridedSlice_000/test.rule b/res/TensorFlowLiteRecipes/Net_StridedSlice_StridedSlice_000/test.rule
new file mode 100644
index 000000000..f1a660d19
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_StridedSlice_StridedSlice_000/test.rule
@@ -0,0 +1,5 @@
+# To check if Unnecessary StridedSlice removed.
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "STRIDEDSLICE_EXIST" $(op_count STRIDEDSLICE) '=' 1
diff --git a/res/TensorFlowLiteRecipes/Net_TConv_BN_002/test.recipe b/res/TensorFlowLiteRecipes/Net_TConv_BN_002/test.recipe
new file mode 100644
index 000000000..e40fe4f59
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_TConv_BN_002/test.recipe
@@ -0,0 +1,156 @@
+# Tconv with asymmetric filter + BN + Relu6
+operand {
+ name: "Hole"
+ type: FLOAT32
+ shape {
+ dim: 1
+ dim: 1
+ dim: 1
+ dim: 2
+ }
+ quant {
+ quantized_dimension: 0
+ }
+ is_variable: false
+}
+operand {
+ name: "conv2d_transpose/input_sizes"
+ type: INT32
+ shape {
+ dim: 4
+ }
+ filler {
+ tag: "explicit"
+ arg: "1"
+ arg: "5"
+ arg: "1"
+ arg: "2"
+ }
+ quant {
+ quantized_dimension: 0
+ }
+ is_variable: false
+}
+operand {
+ name: "FusedBatchNormV3"
+ type: FLOAT32
+ shape {
+ dim: 2
+ }
+ filler {
+ tag: "explicit"
+ arg: "-2.04724"
+ arg: "-7.80109"
+ }
+ quant {
+ quantized_dimension: 0
+ }
+ is_variable: false
+}
+operand {
+ name: "FusedBatchNormV3;conv2d_transpose;conv2d_transpose/input_sizes"
+ type: FLOAT32
+ shape {
+ dim: 2
+ dim: 5
+ dim: 1
+ dim: 2
+ }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "0.1"
+ }
+ quant {
+ quantized_dimension: 0
+ }
+ is_variable: false
+}
+operand {
+ name: "FusedBatchNormV3;conv2d_transpose;conv2d_transpose/input_sizes2"
+ type: FLOAT32
+ shape {
+ dim: 1
+ dim: 5
+ dim: 1
+ dim: 2
+ }
+ quant {
+ quantized_dimension: 0
+ }
+ is_variable: false
+}
+operand {
+ name: "FusedBatchNormV3_mul_0"
+ type: FLOAT32
+ shape {
+ dim: 1
+ dim: 5
+ dim: 1
+ dim: 2
+ }
+ quant {
+ quantized_dimension: 0
+ }
+}
+operand {
+ name: "FusedBatchNormV3_mul_0_param"
+ type: FLOAT32
+ shape {
+ dim: 2
+ }
+ filler {
+ tag: "explicit"
+ arg: "2.00834"
+ arg: "1.00344"
+ }
+ quant {
+ quantized_dimension: 0
+ }
+}
+operand {
+ name: "Relu6"
+ type: FLOAT32
+ shape {
+ dim: 1
+ dim: 5
+ dim: 1
+ dim: 2
+ }
+ quant {
+ quantized_dimension: 0
+ }
+ is_variable: false
+}
+operation {
+ type: "TransposeConv"
+ input: "conv2d_transpose/input_sizes"
+ input: "FusedBatchNormV3;conv2d_transpose;conv2d_transpose/input_sizes"
+ input: "Hole"
+ output: "FusedBatchNormV3;conv2d_transpose;conv2d_transpose/input_sizes2"
+ transpose_conv_options {
+ padding: VALID
+ stride_w: 1
+ stride_h: 1
+ }
+}
+operation {
+ type: "Mul"
+ input: "FusedBatchNormV3;conv2d_transpose;conv2d_transpose/input_sizes2"
+ input: "FusedBatchNormV3_mul_0_param"
+ output: "FusedBatchNormV3_mul_0"
+ mul_options {
+ activation: NONE
+ }
+}
+operation {
+ type: "Add"
+ input: "FusedBatchNormV3_mul_0"
+ input: "FusedBatchNormV3"
+ output: "Relu6"
+ add_options {
+ activation: RELU6
+ }
+}
+input: "Hole"
+output: "Relu6"
diff --git a/res/TensorFlowLiteRecipes/Net_TConv_BN_002/test.rule b/res/TensorFlowLiteRecipes/Net_TConv_BN_002/test.rule
new file mode 100644
index 000000000..dfc392758
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_TConv_BN_002/test.rule
@@ -0,0 +1,8 @@
+# To check if BatchNorm op(mul + add) is fused to Transposed Convolution op
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "TCONV_EXIST" $(op_count TRANSPOSE_CONV) '=' 1
+RULE "RELU6_EXIST" $(op_count RELU6) '=' 1
+RULE "NO_MUL" $(op_count MUL) '=' 0
+RULE "NO_ADD" $(op_count ADD) '=' 0
diff --git a/res/TensorFlowLiteRecipes/Part_Add_Sqrt_000/test.recipe b/res/TensorFlowLiteRecipes/Part_Add_Sqrt_000/test.recipe
new file mode 100644
index 000000000..1125246d1
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Part_Add_Sqrt_000/test.recipe
@@ -0,0 +1,48 @@
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ifm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "add"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "Add"
+ add_options {
+ activation: NONE
+ }
+ input: "ifm1"
+ input: "ifm2"
+ output: "add"
+}
+operation {
+ type: "Sqrt"
+ input: "add"
+ output: "ofm1"
+}
+operation {
+ type: "Sqrt"
+ input: "add"
+ output: "ofm2"
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm1"
+output: "ofm2"
diff --git a/res/TensorFlowLiteRecipes/Part_Add_Sqrt_Rsqrt_000/test.recipe b/res/TensorFlowLiteRecipes/Part_Add_Sqrt_Rsqrt_000/test.recipe
new file mode 100644
index 000000000..c9cee9960
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Part_Add_Sqrt_Rsqrt_000/test.recipe
@@ -0,0 +1,68 @@
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ifm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "add"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "sqrt1"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "sqrt2"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "Add"
+ add_options {
+ activation: NONE
+ }
+ input: "ifm1"
+ input: "ifm2"
+ output: "add"
+}
+operation {
+ type: "Sqrt"
+ input: "add"
+ output: "sqrt1"
+}
+operation {
+ type: "Sqrt"
+ input: "add"
+ output: "sqrt2"
+}
+operation {
+ type: "Rsqrt"
+ input: "sqrt1"
+ output: "ofm1"
+}
+operation {
+ type: "Rsqrt"
+ input: "sqrt2"
+ output: "ofm2"
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm1"
+output: "ofm2"
diff --git a/res/TensorFlowLiteRecipes/Part_Add_Sub_000/test.recipe b/res/TensorFlowLiteRecipes/Part_Add_Sub_000/test.recipe
new file mode 100644
index 000000000..8cd878ac3
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Part_Add_Sub_000/test.recipe
@@ -0,0 +1,67 @@
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ifm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ifm3"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ifm4"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "add1"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "add2"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "Add"
+ add_options {
+ activation: NONE
+ }
+ input: "ifm1"
+ input: "ifm2"
+ output: "add1"
+}
+operation {
+ type: "Add"
+ add_options {
+ activation: NONE
+ }
+ input: "add1"
+ input: "ifm3"
+ output: "add2"
+}
+operation {
+ type: "Sub"
+ sub_options {
+ activation: NONE
+ }
+ input: "add2"
+ input: "ifm4"
+ output: "ofm"
+}
+input: "ifm1"
+input: "ifm2"
+input: "ifm3"
+input: "ifm4"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_000/test.recipe b/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_000/test.recipe
new file mode 100644
index 000000000..e0a6fe2aa
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_000/test.recipe
@@ -0,0 +1,27 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "sqrt"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "Sqrt"
+ input: "ifm"
+ output: "sqrt"
+}
+operation {
+ type: "Rsqrt"
+ input: "sqrt"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_001/test.recipe b/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_001/test.recipe
new file mode 100644
index 000000000..89f74772e
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_001/test.recipe
@@ -0,0 +1,47 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "sqrt"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "sqrt2"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "rsqrt"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "Sqrt"
+ input: "ifm"
+ output: "sqrt"
+}
+operation {
+ type: "Sqrt"
+ input: "sqrt"
+ output: "sqrt2"
+}
+operation {
+ type: "Rsqrt"
+ input: "sqrt2"
+ output: "rsqrt"
+}
+operation {
+ type: "Rsqrt"
+ input: "rsqrt"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_002/test.recipe b/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_002/test.recipe
new file mode 100644
index 000000000..2e7e13240
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_002/test.recipe
@@ -0,0 +1,47 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "sqrt"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "rsqrt"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "sqrt2"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "Sqrt"
+ input: "ifm"
+ output: "sqrt"
+}
+operation {
+ type: "Rsqrt"
+ input: "sqrt"
+ output: "rsqrt"
+}
+operation {
+ type: "Sqrt"
+ input: "rsqrt"
+ output: "sqrt2"
+}
+operation {
+ type: "Rsqrt"
+ input: "sqrt2"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_003/test.recipe b/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_003/test.recipe
new file mode 100644
index 000000000..1cd57ae12
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_003/test.recipe
@@ -0,0 +1,47 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "sqrt"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "rsqrt"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "rsqrt2"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "Sqrt"
+ input: "ifm"
+ output: "sqrt"
+}
+operation {
+ type: "Rsqrt"
+ input: "sqrt"
+ output: "rsqrt"
+}
+operation {
+ type: "Rsqrt"
+ input: "rsqrt"
+ output: "rsqrt2"
+}
+operation {
+ type: "Sqrt"
+ input: "rsqrt2"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_004/test.recipe b/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_004/test.recipe
new file mode 100644
index 000000000..3b4458480
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_004/test.recipe
@@ -0,0 +1,38 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "sqrt"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "Sqrt"
+ input: "ifm"
+ output: "sqrt"
+}
+operation {
+ type: "Rsqrt"
+ input: "sqrt"
+ output: "ofm1"
+}
+operation {
+ type: "Rsqrt"
+ input: "sqrt"
+ output: "ofm2"
+}
+input: "ifm"
+output: "ofm1"
+output: "ofm2"
diff --git a/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_Add_000/test.recipe b/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_Add_000/test.recipe
new file mode 100644
index 000000000..6618fff22
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_Add_000/test.recipe
@@ -0,0 +1,56 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "rsqrt"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "sqrt"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "rsqrt2"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "add"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "Rsqrt"
+ input: "ifm"
+ output: "rsqrt"
+}
+operation {
+ type: "Sqrt"
+ input: "rsqrt"
+ output: "sqrt"
+}
+operation {
+ type: "Rsqrt"
+ input: "rsqrt"
+ output: "rsqrt2"
+}
+operation {
+ type: "Add"
+ add_options {
+ activation: NONE
+ }
+ input: "sqrt"
+ input: "rsqrt2"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_Add_001/test.recipe b/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_Add_001/test.recipe
new file mode 100644
index 000000000..dd3f69bea
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_Add_001/test.recipe
@@ -0,0 +1,61 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "rsqrt"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "sqrt"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "rsqrt2"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "rsqrt3"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "Rsqrt"
+ input: "ifm"
+ output: "rsqrt"
+}
+operation {
+ type: "Sqrt"
+ input: "rsqrt"
+ output: "sqrt"
+}
+operation {
+ type: "Rsqrt"
+ input: "rsqrt"
+ output: "rsqrt2"
+}
+operation {
+ type: "Rsqrt"
+ input: "rsqrt2"
+ output: "rsqrt3"
+}
+operation {
+ type: "Add"
+ add_options {
+ activation: NONE
+ }
+ input: "sqrt"
+ input: "rsqrt3"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_Add_002/test.recipe b/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_Add_002/test.recipe
new file mode 100644
index 000000000..23b7458c9
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_Add_002/test.recipe
@@ -0,0 +1,71 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "rsqrt"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "sqrt"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "rsqrt2"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "rsqrt3"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "rsqrt4"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "Rsqrt"
+ input: "ifm"
+ output: "rsqrt"
+}
+operation {
+ type: "Sqrt"
+ input: "rsqrt"
+ output: "sqrt"
+}
+operation {
+ type: "Rsqrt"
+ input: "rsqrt"
+ output: "rsqrt2"
+}
+operation {
+ type: "Rsqrt"
+ input: "sqrt"
+ output: "rsqrt3"
+}
+operation {
+ type: "Rsqrt"
+ input: "rsqrt2"
+ output: "rsqrt4"
+}
+operation {
+ type: "Add"
+ add_options {
+ activation: NONE
+ }
+ input: "rsqrt3"
+ input: "rsqrt4"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_Add_003/test.recipe b/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_Add_003/test.recipe
new file mode 100644
index 000000000..c2dae2e86
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_Add_003/test.recipe
@@ -0,0 +1,47 @@
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ifm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "rsqrt"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "sqrt"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "Rsqrt"
+ input: "ifm1"
+ output: "rsqrt"
+}
+operation {
+ type: "Sqrt"
+ input: "ifm2"
+ output: "sqrt"
+}
+operation {
+ type: "Add"
+ add_options {
+ activation: NONE
+ }
+ input: "rsqrt"
+ input: "sqrt"
+ output: "ofm"
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_Add_004/test.recipe b/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_Add_004/test.recipe
new file mode 100644
index 000000000..c1693f72e
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Part_Sqrt_Rsqrt_Add_004/test.recipe
@@ -0,0 +1,41 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "rsqrt"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "sqrt"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "Rsqrt"
+ input: "ifm"
+ output: "rsqrt"
+}
+operation {
+ type: "Sqrt"
+ input: "rsqrt"
+ output: "sqrt"
+}
+operation {
+ type: "Add"
+ add_options {
+ activation: NONE
+ }
+ input: "rsqrt"
+ input: "sqrt"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Slice_001/test.recipe b/res/TensorFlowLiteRecipes/Slice_001/test.recipe
new file mode 100644
index 000000000..20f1baab3
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Slice_001/test.recipe
@@ -0,0 +1,37 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 3 dim: 2 dim: 3 }
+}
+operand {
+ name: "begin"
+ type: INT32
+ shape { dim: 3 }
+ filler {
+ tag: "explicit"
+ arg: "-1" arg: "0" arg: "0"
+ }
+}
+operand {
+ name: "size"
+ type: INT32
+ shape { dim: 3 }
+ filler {
+ tag: "explicit"
+ arg: "1" arg: "1" arg: "3"
+ }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 3 }
+}
+operation {
+ type: "Slice"
+ input: "ifm"
+ input: "begin"
+ input: "size"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Slice_001/test.reverse b/res/TensorFlowLiteRecipes/Slice_001/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Slice_001/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Squeeze_001/test.recipe b/res/TensorFlowLiteRecipes/Squeeze_001/test.recipe
new file mode 100644
index 000000000..9ac441574
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Squeeze_001/test.recipe
@@ -0,0 +1,18 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 5 dim: 1 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 4 dim: 5 }
+}
+operation {
+ type: "Squeeze"
+ squeeze_options { }
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Squeeze_001/test.reverse b/res/TensorFlowLiteRecipes/Squeeze_001/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Squeeze_001/test.reverse
diff --git a/res/TensorFlowPythonExamples/examples/Bidirectional_LSTM/__init__.py b/res/TensorFlowPythonExamples/examples/Bidirectional_LSTM/__init__.py
new file mode 100644
index 000000000..d28034bf9
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/Bidirectional_LSTM/__init__.py
@@ -0,0 +1,6 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=[28, 28, 3], name="Hole")
+
+op_uni_ = tf.compat.v1.keras.layers.LSTM(1, time_major=False, return_sequences=True)
+op_bidi_ = tf.compat.v1.keras.layers.Bidirectional(op_uni_)(in_)
diff --git a/res/TensorFlowPythonExamples/examples/fake_quant_with_min_max_vars/__init__.py b/res/TensorFlowPythonExamples/examples/fake_quant_with_min_max_vars/__init__.py
new file mode 100644
index 000000000..c4c928466
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/fake_quant_with_min_max_vars/__init__.py
@@ -0,0 +1,27 @@
+import tensorflow as tf
+import numpy as np
+
+tf.compat.v1.disable_eager_execution()
+
+in_ = tf.compat.v1.placeholder(tf.float32, shape=(1, 32, 32, 3), name="Hole")
+
+filters = np.random.uniform(low=-1., high=1, size=[5, 5, 3, 32]).astype(np.float32)
+strides = (1, 2, 2, 1)
+cv_ = tf.compat.v1.nn.conv2d(in_, filters, strides, "VALID", data_format="NHWC")
+
+op_ = tf.compat.v1.fake_quant_with_min_max_vars(cv_, 0.0, 1.0, 8, False)
+'''
+NOTE:
+'fake_quant_with_min_max_vars' is converted to QUANTIZE-DEQUANTIZE in tflite.
+To produce tflite with FAKE_QUANT Op, you need to change tf2tfliteV2.py with
+
+converter.experimental_new_converter = False
+
+and then run
+
+python3 ../../compiler/tf2tfliteV2/tf2tfliteV2.py --v2 --graph_def \
+-i ./fake_quant_with_min_max_vars.pbtxt \
+-o ./fake_quant_with_min_max_vars.tflite \
+-I Hole \
+-O FakeQuantWithMinMaxVars
+'''
diff --git a/res/TensorFlowPythonModels/examples/minimum-maximum/__init__.py b/res/TensorFlowPythonModels/examples/minimum-maximum/__init__.py
new file mode 100644
index 000000000..fe074b49c
--- /dev/null
+++ b/res/TensorFlowPythonModels/examples/minimum-maximum/__init__.py
@@ -0,0 +1,15 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 16, 160, 160), name="Hole")
+
+upper_ = tf.compat.v1.constant(6.)
+lower_ = tf.compat.v1.constant(0.)
+
+min_ = tf.compat.v1.minimum(in_, upper_)
+max_ = tf.compat.v1.maximum(min_, lower_)
+'''
+python ../../compiler/tf2tfliteV2/tf2tfliteV2.py --v1 \
+-i minimum-maximum.pbtxt \
+-o minimum-maximum.tflite \
+-I Hole -O Maximum
+'''
diff --git a/res/TensorFlowPythonModels/tfpem.py b/res/TensorFlowPythonModels/tfpem.py
index 01627eb99..542085bb6 100644..100755
--- a/res/TensorFlowPythonModels/tfpem.py
+++ b/res/TensorFlowPythonModels/tfpem.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python
+
# TensorFlow Python Example Manager
import tensorflow as tf