summaryrefslogtreecommitdiff
path: root/res/TensorFlowLiteRecipes/Quant_Concatenation_000
diff options
context:
space:
mode:
Diffstat (limited to 'res/TensorFlowLiteRecipes/Quant_Concatenation_000')
-rw-r--r--res/TensorFlowLiteRecipes/Quant_Concatenation_000/test.qconf.json11
-rw-r--r--res/TensorFlowLiteRecipes/Quant_Concatenation_000/test.recipe28
-rw-r--r--res/TensorFlowLiteRecipes/Quant_Concatenation_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Quant_Concatenation_000/test.rule13
4 files changed, 52 insertions, 0 deletions
diff --git a/res/TensorFlowLiteRecipes/Quant_Concatenation_000/test.qconf.json b/res/TensorFlowLiteRecipes/Quant_Concatenation_000/test.qconf.json
new file mode 100644
index 000000000..ab70bcc16
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quant_Concatenation_000/test.qconf.json
@@ -0,0 +1,11 @@
+{
+ "default_quantization_dtype" : "uint8",
+ "default_granularity" : "channel",
+ "layers" : [
+ {
+ "name" : "ofm",
+ "dtype" : "int16",
+ "granularity" : "channel"
+ }
+ ]
+}
diff --git a/res/TensorFlowLiteRecipes/Quant_Concatenation_000/test.recipe b/res/TensorFlowLiteRecipes/Quant_Concatenation_000/test.recipe
new file mode 100644
index 000000000..35641bd07
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quant_Concatenation_000/test.recipe
@@ -0,0 +1,28 @@
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 1 }
+}
+operand {
+ name: "ifm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operation {
+ type: "Concatenation"
+ concatenation_options {
+ axis: 3
+ activation: NONE
+ }
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm"
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Quant_Concatenation_000/test.reverse b/res/TensorFlowLiteRecipes/Quant_Concatenation_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quant_Concatenation_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Quant_Concatenation_000/test.rule b/res/TensorFlowLiteRecipes/Quant_Concatenation_000/test.rule
new file mode 100644
index 000000000..e832ac526
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quant_Concatenation_000/test.rule
@@ -0,0 +1,13 @@
+# To check mixed quantization.
+# Default dtype: U8, Target Op dtype: S16
+# Quantize Ops are inserted at the beginning/end of the model.
+
+RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1
+
+RULE "IFM1_U8" $(tensor_dtype ifm1) '=' UINT8
+RULE "IFM1_QUANTIZE_S16" $(tensor_dtype ifm1_Quantize) '=' INT16
+RULE "IFM2_U8" $(tensor_dtype ifm2) '=' UINT8
+RULE "IFM2_QUANTIZE_S16" $(tensor_dtype ifm2_Quantize) '=' INT16
+RULE "TARGET_S16" $(tensor_dtype ofm) '=' INT16
+RULE "OUTPUT_S16" $(tensor_dtype ofm_Quantize) '=' UINT8
+RULE "QUANTIZE_OP" $(op_count QUANTIZE) '=' 3