summaryrefslogtreecommitdiff
path: root/res/TensorFlowLiteRecipes
diff options
context:
space:
mode:
Diffstat (limited to 'res/TensorFlowLiteRecipes')
-rw-r--r--res/TensorFlowLiteRecipes/FullyConnected_006/test.recipe29
-rw-r--r--res/TensorFlowLiteRecipes/FullyConnected_006/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Gather_000/test.recipe1
-rw-r--r--res/TensorFlowLiteRecipes/Gather_001/test.recipe27
-rw-r--r--res/TensorFlowLiteRecipes/Gather_001/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Net_Conv_QuantDequant_000/test.recipe1
-rw-r--r--res/TensorFlowLiteRecipes/Net_Gather_SparseToDense_AddV2_000/test.recipe131
-rw-r--r--res/TensorFlowLiteRecipes/Part_Add_SVDF_000/test.recipe82
-rw-r--r--res/TensorFlowLiteRecipes/Part_Mul_Sqrt_FC_nobias_000/test.recipe63
-rw-r--r--res/TensorFlowLiteRecipes/Part_Split_Add_000/test.recipe47
-rw-r--r--res/TensorFlowLiteRecipes/Quant_Add_000/test.recipe36
-rw-r--r--res/TensorFlowLiteRecipes/Quant_Add_000/test.rule10
-rw-r--r--res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_000/test.qconf.json11
-rw-r--r--res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_000/test.recipe92
-rw-r--r--res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_000/test.rule11
-rw-r--r--res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_001/test.qconf.json16
-rw-r--r--res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_001/test.recipe92
-rw-r--r--res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_001/test.rule14
-rw-r--r--res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_002/test.qconf.json16
-rw-r--r--res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_002/test.recipe88
-rw-r--r--res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_002/test.rule14
-rw-r--r--res/TensorFlowLiteRecipes/Quant_Split_Add_000/test.qconf.json11
-rw-r--r--res/TensorFlowLiteRecipes/Quant_Split_Add_000/test.recipe47
-rw-r--r--res/TensorFlowLiteRecipes/Quant_Split_Add_000/test.rule11
-rw-r--r--res/TensorFlowLiteRecipes/Quant_Split_Add_001/test.qconf.json11
-rw-r--r--res/TensorFlowLiteRecipes/Quant_Split_Add_001/test.recipe47
-rw-r--r--res/TensorFlowLiteRecipes/Quant_Split_Add_001/test.rule11
-rw-r--r--res/TensorFlowLiteRecipes/Quantize_001/test.recipe66
-rw-r--r--res/TensorFlowLiteRecipes/Quantize_001/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/SVDF_000/test.recipe62
-rw-r--r--res/TensorFlowLiteRecipes/SVDF_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/SVDF_001/test.recipe52
-rw-r--r--res/TensorFlowLiteRecipes/SVDF_001/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/SignatureDef_MultiOut_000/test.recipe3
-rw-r--r--res/TensorFlowLiteRecipes/SignatureDef_MultiOut_001/test.recipe81
-rw-r--r--res/TensorFlowLiteRecipes/Sqrt_000/test.recipe1
36 files changed, 1180 insertions, 4 deletions
diff --git a/res/TensorFlowLiteRecipes/FullyConnected_006/test.recipe b/res/TensorFlowLiteRecipes/FullyConnected_006/test.recipe
new file mode 100644
index 000000000..b5f329b57
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/FullyConnected_006/test.recipe
@@ -0,0 +1,29 @@
+operand {
+ name: "in"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 4 }
+}
+operand {
+ name: "weight"
+ type: FLOAT32
+ shape { dim: 2 dim: 4 }
+}
+operand {
+ name: "out"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 2 }
+}
+operation {
+ type: "FullyConnected"
+ fullyconnected_options {
+ activation: NONE
+ keep_num_dims: true
+ }
+ input: "in"
+ input: "weight"
+ input: ""
+ output: "out"
+}
+input: "in"
+input: "weight"
+output: "out"
diff --git a/res/TensorFlowLiteRecipes/FullyConnected_006/test.reverse b/res/TensorFlowLiteRecipes/FullyConnected_006/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/FullyConnected_006/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Gather_000/test.recipe b/res/TensorFlowLiteRecipes/Gather_000/test.recipe
index 4c6c99da6..b9b2412cf 100644
--- a/res/TensorFlowLiteRecipes/Gather_000/test.recipe
+++ b/res/TensorFlowLiteRecipes/Gather_000/test.recipe
@@ -24,5 +24,4 @@ operation {
output: "ofm"
}
input: "param"
-input: "indices"
output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Gather_001/test.recipe b/res/TensorFlowLiteRecipes/Gather_001/test.recipe
new file mode 100644
index 000000000..cc23cf11d
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Gather_001/test.recipe
@@ -0,0 +1,27 @@
+operand {
+ name: "param"
+ type: FLOAT32
+ shape { dim: 1 dim: 2 dim: 3 dim: 4 }
+}
+operand {
+ name: "indices"
+ type: INT32
+ shape { dim: 4 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 2 dim: 3 dim: 4 }
+}
+operation {
+ type: "Gather"
+ gather_options {
+ axis: 3
+ }
+ input: "param"
+ input: "indices"
+ output: "ofm"
+}
+input: "param"
+input: "indices"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Gather_001/test.reverse b/res/TensorFlowLiteRecipes/Gather_001/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Gather_001/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Net_Conv_QuantDequant_000/test.recipe b/res/TensorFlowLiteRecipes/Net_Conv_QuantDequant_000/test.recipe
index fa7fa7df7..c5d387293 100644
--- a/res/TensorFlowLiteRecipes/Net_Conv_QuantDequant_000/test.recipe
+++ b/res/TensorFlowLiteRecipes/Net_Conv_QuantDequant_000/test.recipe
@@ -32,6 +32,7 @@ operand {
name: "quantize"
type: UINT8
shape { dim: 1 dim: 16 dim: 16 dim: 8 }
+ quant { scale: 1 zero_point: 128 }
}
operand {
name: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Net_Gather_SparseToDense_AddV2_000/test.recipe b/res/TensorFlowLiteRecipes/Net_Gather_SparseToDense_AddV2_000/test.recipe
new file mode 100644
index 000000000..804d293fc
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Net_Gather_SparseToDense_AddV2_000/test.recipe
@@ -0,0 +1,131 @@
+operand {
+ name: "param_gather"
+ type: INT64
+ shape { dim: 3 }
+ filler { tag: "explicit" arg: "1" arg: "2" }
+}
+operand {
+ name: "indices_gather"
+ type: INT64
+ shape { dim: 1 }
+ filler { tag: "explicit" arg: "1" arg: "2" }
+}
+operand {
+ name: "ofm_gather"
+ type: INT64
+ shape { dim: 1 }
+}
+operand {
+ name: "shape_sparse"
+ type: INT64
+ shape { dim: 1 dim: 1 }
+ filler {
+ tag: "explicit"
+ arg: "3" arg: "5"
+ }
+}
+operand {
+ name: "values_sparse"
+ type: INT64
+ shape { dim: 1 }
+ filler { tag: "explicit" arg: "1" arg: "2" }
+}
+operand {
+ name: "defalut_value_sparse"
+ type: INT64
+ shape { }
+ filler { tag: "explicit" arg: "1" arg: "2" }
+}
+operand {
+ name: "ofm_sparse"
+ type: INT64
+ shape { dim: 3 }
+}
+operand {
+ name: "add_v2_2"
+ type: INT64
+ shape { dim: 3 }
+ filler { tag: "explicit" arg: "1" arg: "2" }
+}
+operand {
+ name: "ofm_add_v2"
+ type: INT64
+ shape { dim: 3 }
+}
+operand {
+ name: "ofm_cast"
+ type: INT32
+ shape { dim: 3 }
+}
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 2 dim: 5 }
+}
+operand {
+ name: "perm"
+ type: INT32
+ shape { dim: 3 }
+ filler { tag: "explicit" arg: "0" arg: "2" arg: "1" }
+}
+operand {
+ name: "ofm_trans"
+ type: FLOAT32
+ shape { dim: 1 dim: 5 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 10 }
+}
+operation {
+ type: "Gather"
+ gather_options {
+ axis: 0
+ }
+ input: "param_gather"
+ input: "indices_gather"
+ output: "ofm_gather"
+}
+operation {
+ type: "SparseToDense"
+ sparse_to_dense_options {
+ validate_indices: false
+ }
+ input: "shape_sparse"
+ input: "values_sparse"
+ input: "ofm_gather"
+ input: "defalut_value_sparse"
+ output: "ofm_sparse"
+}
+operation {
+ type: "AddV2"
+ input: "ofm_sparse"
+ input: "add_v2_2"
+ output: "ofm_add_v2"
+}
+operation {
+ type: "Cast"
+ cast_options {
+ in_data_type: INT64
+ out_data_type: INT32
+ }
+ input: "ofm_add_v2"
+ output: "ofm_cast"
+}
+operation {
+ type: "Transpose"
+ transpose_options {
+ }
+ input: "ifm"
+ input: "perm"
+ output: "ofm_trans"
+}
+operation {
+ type: "Reshape"
+ input: "ofm_trans"
+ input: "ofm_cast"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Part_Add_SVDF_000/test.recipe b/res/TensorFlowLiteRecipes/Part_Add_SVDF_000/test.recipe
new file mode 100644
index 000000000..d357a059f
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Part_Add_SVDF_000/test.recipe
@@ -0,0 +1,82 @@
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 16 }
+}
+operand {
+ name: "ifm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 }
+}
+operand {
+ name: "weight_feature"
+ type: FLOAT32
+ shape { dim: 64 dim: 16 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "weight_time"
+ type: FLOAT32
+ shape { dim: 64 dim: 8 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "input_activation_state"
+ type: FLOAT32
+ is_variable: true
+ shape { dim: 1 dim: 512 }
+}
+operand {
+ name: "svdf"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 }
+}
+operation {
+ type: "SVDF"
+ svdf_options {
+ rank: 1
+ activation: RELU
+ asymmetric_quantize_inputs: false
+ }
+ input: "ifm1"
+ input: "weight_feature"
+ input: "weight_time"
+ input: "bias"
+ input: "input_activation_state"
+ output: "svdf"
+}
+operation {
+ type: "Add"
+ add_options {
+ activation: NONE
+ }
+ input: "svdf"
+ input: "ifm2"
+ output: "ofm"
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Part_Mul_Sqrt_FC_nobias_000/test.recipe b/res/TensorFlowLiteRecipes/Part_Mul_Sqrt_FC_nobias_000/test.recipe
new file mode 100644
index 000000000..a712d2ac3
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Part_Mul_Sqrt_FC_nobias_000/test.recipe
@@ -0,0 +1,63 @@
+operand {
+ name: "in1"
+ type: FLOAT32
+ shape { dim: 2 dim: 4 }
+}
+operand {
+ name: "in2"
+ type: FLOAT32
+ shape { dim: 2 dim: 4 }
+}
+operand {
+ name: "mul"
+ type: FLOAT32
+ shape { dim: 2 dim: 4 }
+}
+operand {
+ name: "weight"
+ type: FLOAT32
+ shape { dim: 4 dim: 4 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "sqrtout"
+ type: FLOAT32
+ shape { dim: 2 dim: 4 }
+}
+operand {
+ name: "fcout"
+ type: FLOAT32
+ shape { dim: 2 dim: 4 }
+}
+operation {
+ type: "Mul"
+ input: "in1"
+ input: "in2"
+ output: "mul"
+ mul_options {
+ activation: NONE
+ }
+}
+operation {
+ type: "Sqrt"
+ input: "mul"
+ output: "sqrtout"
+}
+operation {
+ type: "FullyConnected"
+ fullyconnected_options {
+ activation: NONE
+ }
+ input: "mul"
+ input: "weight"
+ input: ""
+ output: "fcout"
+}
+input: "in1"
+input: "in2"
+output: "fcout"
+output: "sqrtout"
diff --git a/res/TensorFlowLiteRecipes/Part_Split_Add_000/test.recipe b/res/TensorFlowLiteRecipes/Part_Split_Add_000/test.recipe
new file mode 100644
index 000000000..1d20443c8
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Part_Split_Add_000/test.recipe
@@ -0,0 +1,47 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 6 dim: 1 dim: 2 }
+}
+operand {
+ name: "split_dim"
+ type: INT32
+ shape { }
+ filler { tag: "explicit" arg: "0" }
+}
+operand {
+ name: "split1"
+ type: FLOAT32
+ shape { dim: 3 dim: 1 dim: 2 }
+}
+operand {
+ name: "split2"
+ type: FLOAT32
+ shape { dim: 3 dim: 1 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 3 dim: 1 dim: 2 }
+}
+operation {
+ type: "Split"
+ split_options {
+ num_splits: 2
+ }
+ input: "split_dim"
+ input: "ifm"
+ output: "split1"
+ output: "split2"
+}
+operation {
+ type: "Add"
+ input: "split1"
+ input: "split2"
+ output: "ofm"
+ add_options {
+ activation: NONE
+ }
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Quant_Add_000/test.recipe b/res/TensorFlowLiteRecipes/Quant_Add_000/test.recipe
new file mode 100644
index 000000000..5c150922e
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quant_Add_000/test.recipe
@@ -0,0 +1,36 @@
+operand {
+ name: "ifm"
+ type: UINT8
+ shape { dim: 1 dim: 4 dim: 4 dim: 4 }
+ quant { scale: 1.0 zero_point: 0 }
+}
+operand {
+ name: "add_const"
+ type: UINT8
+ shape { dim: 1 dim: 1 dim: 1 dim: 4 }
+ quant { scale: 1.0 zero_point: 0 }
+ filler {
+ tag: "explicit"
+ arg: "0"
+ arg: "1"
+ arg: "2"
+ arg: "3"
+ }
+}
+operand {
+ name: "ofm"
+ type: UINT8
+ shape { dim: 1 dim: 4 dim: 4 dim: 4 }
+ quant { scale: 1.0 zero_point: 0 }
+}
+operation {
+ type: "Add"
+ input: "ifm"
+ input: "add_const"
+ output: "ofm"
+ add_options {
+ activation: NONE
+ }
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Quant_Add_000/test.rule b/res/TensorFlowLiteRecipes/Quant_Add_000/test.rule
new file mode 100644
index 000000000..7bde66240
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quant_Add_000/test.rule
@@ -0,0 +1,10 @@
+# To check fake quantization.
+# All Ops are float32. Quantize/Dequantize Ops are inserted at the beginning/end of the model.
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "IFM_FP32" $(tensor_dtype ifm) '=' FLOAT32
+RULE "ADD_CONST_FP32" $(tensor_dtype add_const_DQ) '=' FLOAT32
+RULE "ADD_FP32" $(tensor_dtype ofm) '=' FLOAT32
+RULE "QUANTIZE_OP" $(op_count QUANTIZE) '=' 2
+RULE "DEQUANTIZE_OP" $(op_count DEQUANTIZE) '=' 2
diff --git a/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_000/test.qconf.json b/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_000/test.qconf.json
new file mode 100644
index 000000000..536fef232
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_000/test.qconf.json
@@ -0,0 +1,11 @@
+{
+ "default_quantization_dtype" : "uint8",
+ "default_granularity" : "channel",
+ "layers" : [
+ {
+ "name" : "ofm_conv",
+ "dtype" : "int16",
+ "granularity" : "channel"
+ }
+ ]
+}
diff --git a/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_000/test.recipe b/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_000/test.recipe
new file mode 100644
index 000000000..3a3dba47f
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_000/test.recipe
@@ -0,0 +1,92 @@
+operand {
+ name: "ifm_conv"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 dim: 64 dim: 32 }
+}
+operand {
+ name: "filter"
+ type: FLOAT32
+ shape { dim: 64 dim: 1 dim: 1 dim: 32 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm_conv"
+ type: FLOAT32
+ shape { dim: 1 dim: 32 dim: 32 dim: 64 }
+}
+operand {
+ name: "mul_const"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "add_const"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm_mul"
+ type: FLOAT32
+ shape { dim: 1 dim: 32 dim: 32 dim: 64 }
+}
+operand {
+ name: "ofm_add"
+ type: FLOAT32
+ shape { dim: 1 dim: 32 dim: 32 dim: 64 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options {
+ padding: VALID
+ stride_w: 2
+ stride_h: 2
+ }
+ input: "ifm_conv"
+ input: "filter"
+ input: "bias"
+ output: "ofm_conv"
+}
+operation {
+ type: "Mul"
+ input: "ofm_conv"
+ input: "mul_const"
+ output: "ofm_mul"
+ mul_options {
+ activation: NONE
+ }
+}
+operation {
+ type: "Add"
+ input: "ofm_mul"
+ input: "add_const"
+ output: "ofm_add"
+ add_options {
+ activation: NONE
+ }
+}
+input: "ifm_conv"
+output: "ofm_add"
diff --git a/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_000/test.rule b/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_000/test.rule
new file mode 100644
index 000000000..912405507
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_000/test.rule
@@ -0,0 +1,11 @@
+# To check mixed-precision quantization.
+# Conv is int16, and others u8. Quantize Ops are inserted before/after Conv.
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "CONV_INT16" $(tensor_dtype ofm_conv) '=' INT16
+RULE "WEIGHTS_INT16" $(tensor_dtype filter) '=' INT16
+RULE "BIAS_INT32" $(tensor_dtype bias) '=' INT64
+RULE "MUL_U8" $(tensor_dtype ofm_mul) '=' UINT8
+RULE "ADD_U8" $(tensor_dtype ofm_add) '=' UINT8
+RULE "QUANTIZE_OP" $(op_count QUANTIZE) '=' 2
diff --git a/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_001/test.qconf.json b/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_001/test.qconf.json
new file mode 100644
index 000000000..824f0791d
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_001/test.qconf.json
@@ -0,0 +1,16 @@
+{
+ "default_quantization_dtype" : "uint8",
+ "default_granularity" : "channel",
+ "layers" : [
+ {
+ "name" : "ofm_conv",
+ "dtype" : "int16",
+ "granularity" : "channel"
+ },
+ {
+ "name" : "ofm_mul",
+ "dtype" : "int16",
+ "granularity" : "channel"
+ }
+ ]
+}
diff --git a/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_001/test.recipe b/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_001/test.recipe
new file mode 100644
index 000000000..3a3dba47f
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_001/test.recipe
@@ -0,0 +1,92 @@
+operand {
+ name: "ifm_conv"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 dim: 64 dim: 32 }
+}
+operand {
+ name: "filter"
+ type: FLOAT32
+ shape { dim: 64 dim: 1 dim: 1 dim: 32 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm_conv"
+ type: FLOAT32
+ shape { dim: 1 dim: 32 dim: 32 dim: 64 }
+}
+operand {
+ name: "mul_const"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "add_const"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm_mul"
+ type: FLOAT32
+ shape { dim: 1 dim: 32 dim: 32 dim: 64 }
+}
+operand {
+ name: "ofm_add"
+ type: FLOAT32
+ shape { dim: 1 dim: 32 dim: 32 dim: 64 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options {
+ padding: VALID
+ stride_w: 2
+ stride_h: 2
+ }
+ input: "ifm_conv"
+ input: "filter"
+ input: "bias"
+ output: "ofm_conv"
+}
+operation {
+ type: "Mul"
+ input: "ofm_conv"
+ input: "mul_const"
+ output: "ofm_mul"
+ mul_options {
+ activation: NONE
+ }
+}
+operation {
+ type: "Add"
+ input: "ofm_mul"
+ input: "add_const"
+ output: "ofm_add"
+ add_options {
+ activation: NONE
+ }
+}
+input: "ifm_conv"
+output: "ofm_add"
diff --git a/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_001/test.rule b/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_001/test.rule
new file mode 100644
index 000000000..7df910a40
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_001/test.rule
@@ -0,0 +1,14 @@
+# To check mixed-precision quantization.
+# Conv, Mul: int16, Add: u8
+# Quantize Ops are inserted before Conv and after Mul.
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "CONV_INT16" $(tensor_dtype ofm_conv) '=' INT16
+RULE "WEIGHTS_INT16" $(tensor_dtype filter) '=' INT16
+RULE "BIAS_INT64" $(tensor_dtype bias) '=' INT64
+RULE "MUL_INT16" $(tensor_dtype ofm_mul) '=' INT16
+RULE "MUL_CONST_INT16" $(tensor_dtype mul_const) '=' INT16
+RULE "ADD_UINT8" $(tensor_dtype ofm_add) '=' UINT8
+RULE "ADD_CONST_UINT8" $(tensor_dtype add_const) '=' UINT8
+RULE "QUANTIZE_OP" $(op_count QUANTIZE) '=' 2
diff --git a/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_002/test.qconf.json b/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_002/test.qconf.json
new file mode 100644
index 000000000..824f0791d
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_002/test.qconf.json
@@ -0,0 +1,16 @@
+{
+ "default_quantization_dtype" : "uint8",
+ "default_granularity" : "channel",
+ "layers" : [
+ {
+ "name" : "ofm_conv",
+ "dtype" : "int16",
+ "granularity" : "channel"
+ },
+ {
+ "name" : "ofm_mul",
+ "dtype" : "int16",
+ "granularity" : "channel"
+ }
+ ]
+}
diff --git a/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_002/test.recipe b/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_002/test.recipe
new file mode 100644
index 000000000..9e114b33a
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_002/test.recipe
@@ -0,0 +1,88 @@
+operand {
+ name: "ifm_conv"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 dim: 64 dim: 32 }
+}
+operand {
+ name: "filter"
+ type: FLOAT32
+ shape { dim: 64 dim: 1 dim: 1 dim: 32 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm_conv"
+ type: FLOAT32
+ shape { dim: 1 dim: 32 dim: 32 dim: 64 }
+}
+operand {
+ name: "mul_non_const"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 64 }
+}
+operand {
+ name: "add_const"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm_mul"
+ type: FLOAT32
+ shape { dim: 1 dim: 32 dim: 32 dim: 64 }
+}
+operand {
+ name: "ofm_add"
+ type: FLOAT32
+ shape { dim: 1 dim: 32 dim: 32 dim: 64 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options {
+ padding: VALID
+ stride_w: 2
+ stride_h: 2
+ }
+ input: "ifm_conv"
+ input: "filter"
+ input: "bias"
+ output: "ofm_conv"
+}
+operation {
+ type: "Mul"
+ input: "ofm_conv"
+ input: "mul_non_const"
+ output: "ofm_mul"
+ mul_options {
+ activation: NONE
+ }
+}
+operation {
+ type: "Add"
+ input: "ofm_mul"
+ input: "add_const"
+ output: "ofm_add"
+ add_options {
+ activation: NONE
+ }
+}
+input: "ifm_conv"
+input: "mul_non_const"
+output: "ofm_add"
diff --git a/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_002/test.rule b/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_002/test.rule
new file mode 100644
index 000000000..b539872fc
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quant_Conv_Mul_Add_002/test.rule
@@ -0,0 +1,14 @@
+# To check mixed-precision quantization.
+# Conv, Mul: int16, Add: u8
+# Quantize Ops are inserted before Conv, after Mul, before Mul's non-const input.
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "CONV_INT16" $(tensor_dtype ofm_conv) '=' INT16
+RULE "WEIGHTS_INT16" $(tensor_dtype filter) '=' INT16
+RULE "BIAS_INT64" $(tensor_dtype bias) '=' INT64
+RULE "MUL_INT16" $(tensor_dtype ofm_mul) '=' INT16
+RULE "MUL_NON_CONST_UINT8" $(tensor_dtype mul_non_const) '=' UINT8
+RULE "ADD_UINT8" $(tensor_dtype ofm_add) '=' UINT8
+RULE "ADD_CONST_UINT8" $(tensor_dtype add_const) '=' UINT8
+RULE "QUANTIZE_OP" $(op_count QUANTIZE) '=' 3
diff --git a/res/TensorFlowLiteRecipes/Quant_Split_Add_000/test.qconf.json b/res/TensorFlowLiteRecipes/Quant_Split_Add_000/test.qconf.json
new file mode 100644
index 000000000..102e05fc7
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quant_Split_Add_000/test.qconf.json
@@ -0,0 +1,11 @@
+{
+ "default_quantization_dtype" : "uint8",
+ "default_granularity" : "channel",
+ "layers" : [
+ {
+ "name" : "ofm1",
+ "dtype" : "int16",
+ "granularity" : "channel"
+ }
+ ]
+}
diff --git a/res/TensorFlowLiteRecipes/Quant_Split_Add_000/test.recipe b/res/TensorFlowLiteRecipes/Quant_Split_Add_000/test.recipe
new file mode 100644
index 000000000..ef7908979
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quant_Split_Add_000/test.recipe
@@ -0,0 +1,47 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 6 dim: 1 dim: 2 }
+}
+operand {
+ name: "split_dim"
+ type: INT32
+ shape { }
+ filler { tag: "explicit" arg: "0" }
+}
+operand {
+ name: "ofm1"
+ type: FLOAT32
+ shape { dim: 3 dim: 1 dim: 2 }
+}
+operand {
+ name: "ofm2"
+ type: FLOAT32
+ shape { dim: 3 dim: 1 dim: 2 }
+}
+operation {
+ type: "Split"
+ split_options {
+ num_splits: 2
+ }
+ input: "split_dim"
+ input: "ifm"
+ output: "ofm1"
+ output: "ofm2"
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 3 dim: 1 dim: 2 }
+}
+operation {
+ type: "Add"
+ input: "ofm1"
+ input: "ofm2"
+ output: "ofm"
+ add_options {
+ activation: NONE
+ }
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Quant_Split_Add_000/test.rule b/res/TensorFlowLiteRecipes/Quant_Split_Add_000/test.rule
new file mode 100644
index 000000000..dc1ed874e
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quant_Split_Add_000/test.rule
@@ -0,0 +1,11 @@
+# To check mixed-precision quantization for multiple output node.
+# Split: int16, Add: u8
+# Quantize Ops are inserted before Split and after all Split output nodes.
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "INPUT_UINT8" $(tensor_dtype ifm) '=' UINT8
+RULE "SPLIT_OUT_1_INT16" $(tensor_dtype ofm1) '=' INT16
+RULE "SPLIT_OUT_2_INT16" $(tensor_dtype ofm2) '=' INT16
+RULE "ADD_UINT8" $(tensor_dtype ofm) '=' UINT8
+RULE "QUANTIZE_OP" $(op_count QUANTIZE) '=' 3
diff --git a/res/TensorFlowLiteRecipes/Quant_Split_Add_001/test.qconf.json b/res/TensorFlowLiteRecipes/Quant_Split_Add_001/test.qconf.json
new file mode 100644
index 000000000..272081b27
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quant_Split_Add_001/test.qconf.json
@@ -0,0 +1,11 @@
+{
+ "default_quantization_dtype" : "uint8",
+ "default_granularity" : "channel",
+ "layers" : [
+ {
+ "name" : "ofm2",
+ "dtype" : "int16",
+ "granularity" : "channel"
+ }
+ ]
+}
diff --git a/res/TensorFlowLiteRecipes/Quant_Split_Add_001/test.recipe b/res/TensorFlowLiteRecipes/Quant_Split_Add_001/test.recipe
new file mode 100644
index 000000000..ef7908979
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quant_Split_Add_001/test.recipe
@@ -0,0 +1,47 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 6 dim: 1 dim: 2 }
+}
+operand {
+ name: "split_dim"
+ type: INT32
+ shape { }
+ filler { tag: "explicit" arg: "0" }
+}
+operand {
+ name: "ofm1"
+ type: FLOAT32
+ shape { dim: 3 dim: 1 dim: 2 }
+}
+operand {
+ name: "ofm2"
+ type: FLOAT32
+ shape { dim: 3 dim: 1 dim: 2 }
+}
+operation {
+ type: "Split"
+ split_options {
+ num_splits: 2
+ }
+ input: "split_dim"
+ input: "ifm"
+ output: "ofm1"
+ output: "ofm2"
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 3 dim: 1 dim: 2 }
+}
+operation {
+ type: "Add"
+ input: "ofm1"
+ input: "ofm2"
+ output: "ofm"
+ add_options {
+ activation: NONE
+ }
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Quant_Split_Add_001/test.rule b/res/TensorFlowLiteRecipes/Quant_Split_Add_001/test.rule
new file mode 100644
index 000000000..dc1ed874e
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quant_Split_Add_001/test.rule
@@ -0,0 +1,11 @@
+# To check mixed-precision quantization for multiple output node.
+# Split: int16, Add: u8
+# Quantize Ops are inserted before Split and after all Split output nodes.
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "INPUT_UINT8" $(tensor_dtype ifm) '=' UINT8
+RULE "SPLIT_OUT_1_INT16" $(tensor_dtype ofm1) '=' INT16
+RULE "SPLIT_OUT_2_INT16" $(tensor_dtype ofm2) '=' INT16
+RULE "ADD_UINT8" $(tensor_dtype ofm) '=' UINT8
+RULE "QUANTIZE_OP" $(op_count QUANTIZE) '=' 3
diff --git a/res/TensorFlowLiteRecipes/Quantize_001/test.recipe b/res/TensorFlowLiteRecipes/Quantize_001/test.recipe
new file mode 100644
index 000000000..943341be1
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quantize_001/test.recipe
@@ -0,0 +1,66 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ker"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 2 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 1 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm_c"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 1 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options {
+ padding: VALID
+ stride_w: 1
+ stride_h: 1
+ }
+ input: "ifm"
+ input: "ker"
+ input: "bias"
+ output: "ofm_c"
+}
+operand {
+ name: "ofm_q"
+ type: UINT8
+ shape { dim: 1 dim: 3 dim: 3 dim: 1 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operation {
+ type: "Quantize"
+ input: "ofm_c"
+ output: "ofm_q"
+}
+operand {
+ name: "ofm"
+ type: INT16
+ shape { dim: 1 dim: 3 dim: 3 dim: 1 }
+ quant { min: -255 max: 255 scale: 1.0 zero_point: 0 }
+}
+operation {
+ type: "Quantize"
+ input: "ofm_q"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Quantize_001/test.reverse b/res/TensorFlowLiteRecipes/Quantize_001/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quantize_001/test.reverse
diff --git a/res/TensorFlowLiteRecipes/SVDF_000/test.recipe b/res/TensorFlowLiteRecipes/SVDF_000/test.recipe
new file mode 100644
index 000000000..cd45f1b56
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/SVDF_000/test.recipe
@@ -0,0 +1,62 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 16 }
+}
+operand {
+ name: "weight_feature"
+ type: FLOAT32
+ shape { dim: 64 dim: 16 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "weight_time"
+ type: FLOAT32
+ shape { dim: 64 dim: 8 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 64 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "input_activation_state"
+ type: FLOAT32
+ is_variable: true
+ shape { dim: 1 dim: 512 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 }
+}
+operation {
+ type: "SVDF"
+ svdf_options {
+ rank: 1
+ activation: RELU
+ asymmetric_quantize_inputs: false
+ }
+ input: "ifm"
+ input: "weight_feature"
+ input: "weight_time"
+ input: "bias"
+ input: "input_activation_state"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/SVDF_000/test.reverse b/res/TensorFlowLiteRecipes/SVDF_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/SVDF_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/SVDF_001/test.recipe b/res/TensorFlowLiteRecipes/SVDF_001/test.recipe
new file mode 100644
index 000000000..38b76c2a4
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/SVDF_001/test.recipe
@@ -0,0 +1,52 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 16 }
+}
+operand {
+ name: "weight_feature"
+ type: FLOAT32
+ shape { dim: 64 dim: 16 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "weight_time"
+ type: FLOAT32
+ shape { dim: 64 dim: 8 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "input_activation_state"
+ type: FLOAT32
+ is_variable: true
+ shape { dim: 1 dim: 512 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 }
+}
+operation {
+ type: "SVDF"
+ svdf_options {
+ rank: 1
+ activation: RELU
+ asymmetric_quantize_inputs: false
+ }
+ input: "ifm"
+ input: "weight_feature"
+ input: "weight_time"
+ input: ""
+ input: "input_activation_state"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/SVDF_001/test.reverse b/res/TensorFlowLiteRecipes/SVDF_001/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/SVDF_001/test.reverse
diff --git a/res/TensorFlowLiteRecipes/SignatureDef_MultiOut_000/test.recipe b/res/TensorFlowLiteRecipes/SignatureDef_MultiOut_000/test.recipe
index ae993e6d8..81e1e56e8 100644
--- a/res/TensorFlowLiteRecipes/SignatureDef_MultiOut_000/test.recipe
+++ b/res/TensorFlowLiteRecipes/SignatureDef_MultiOut_000/test.recipe
@@ -71,8 +71,7 @@ signature_def {
name: "ofm1"
tensor_index: 2
}
- method_name: "serving_default"
- key: "serv"
+ signature_key: "serving_default"
subgraph_index: 0
}
input: "ifm1"
diff --git a/res/TensorFlowLiteRecipes/SignatureDef_MultiOut_001/test.recipe b/res/TensorFlowLiteRecipes/SignatureDef_MultiOut_001/test.recipe
new file mode 100644
index 000000000..a1731f99e
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/SignatureDef_MultiOut_001/test.recipe
@@ -0,0 +1,81 @@
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ifm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm3"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "Add"
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm1"
+ add_options {
+ activation: NONE
+ }
+}
+operation {
+ type: "Mul"
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm2"
+ mul_options {
+ activation: 0
+ }
+}
+operation {
+ type: "Sub"
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm3"
+ sub_options {
+ activation: 0
+ }
+}
+signature_def {
+ inputs: {
+ name: "ifm1"
+ tensor_index: 0
+ }
+ inputs: {
+ name: "ifm2"
+ tensor_index: 1
+ }
+ outputs {
+ name: "out3"
+ tensor_index: 3
+ }
+ outputs {
+ name: "out2"
+ tensor_index: 4
+ }
+ outputs {
+ name: "out1"
+ tensor_index: 2
+ }
+ signature_key: "serving_default"
+ subgraph_index: 0
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm3"
+output: "ofm1"
+output: "ofm2"
diff --git a/res/TensorFlowLiteRecipes/Sqrt_000/test.recipe b/res/TensorFlowLiteRecipes/Sqrt_000/test.recipe
index 1754f9a58..6d258e73f 100644
--- a/res/TensorFlowLiteRecipes/Sqrt_000/test.recipe
+++ b/res/TensorFlowLiteRecipes/Sqrt_000/test.recipe
@@ -2,7 +2,6 @@ operand {
name: "ifm"
type: FLOAT32
shape { dim: 1 dim: 3 dim: 3 dim: 2 }
- filler { tag: "constant" arg: "3.5" }
}
operand {
name: "ofm"