diff options
Diffstat (limited to 'res/TensorFlowLiteRecipes/Quant_TransposeConv_001')
4 files changed, 79 insertions, 0 deletions
diff --git a/res/TensorFlowLiteRecipes/Quant_TransposeConv_001/test.qconf.json b/res/TensorFlowLiteRecipes/Quant_TransposeConv_001/test.qconf.json new file mode 100644 index 000000000..010fa65fd --- /dev/null +++ b/res/TensorFlowLiteRecipes/Quant_TransposeConv_001/test.qconf.json @@ -0,0 +1,11 @@ +{ + "default_quantization_dtype" : "int16", + "default_granularity" : "channel", + "layers" : [ + { + "name" : "ofm", + "dtype" : "uint8", + "granularity" : "channel" + } + ] +} diff --git a/res/TensorFlowLiteRecipes/Quant_TransposeConv_001/test.recipe b/res/TensorFlowLiteRecipes/Quant_TransposeConv_001/test.recipe new file mode 100644 index 000000000..9462e1351 --- /dev/null +++ b/res/TensorFlowLiteRecipes/Quant_TransposeConv_001/test.recipe @@ -0,0 +1,55 @@ +operand { + name: "out_shape" + type: INT32 + shape { dim: 4 } + filler { + tag: "explicit" + arg: "1" arg: "4" arg: "4" arg: "3" + } +} +operand { + name: "bias" + type: FLOAT32 + shape { dim: 3 } + filler { + tag: "explicit" + arg: "1" arg: "2" arg: "3" + } +} +operand { + name: "ker" + type: FLOAT32 + shape { dim: 3 dim: 1 dim: 1 dim: 3 } + filler { + tag: "gaussian" + arg: "0.0" + arg: "1.0" + } +} +operand { + name: "ifm" + type: FLOAT32 + shape { dim: 1 dim: 4 dim: 4 dim: 3 } +} +operand { + name: "ofm" + type: FLOAT32 + shape { dim: 1 dim: 4 dim: 4 dim: 3 } +} + +operation { + type: "TransposeConv" + transpose_conv_options { + padding: SAME + stride_w: 1 + stride_h: 1 + activation: NONE + } + input: "out_shape" + input: "ker" + input: "ifm" + input: "bias" + output: "ofm" +} +input: "ifm" +output: "ofm" diff --git a/res/TensorFlowLiteRecipes/Quant_TransposeConv_001/test.reverse b/res/TensorFlowLiteRecipes/Quant_TransposeConv_001/test.reverse new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/res/TensorFlowLiteRecipes/Quant_TransposeConv_001/test.reverse diff --git a/res/TensorFlowLiteRecipes/Quant_TransposeConv_001/test.rule b/res/TensorFlowLiteRecipes/Quant_TransposeConv_001/test.rule new file mode 100644 index 000000000..ffa3bc906 --- /dev/null +++ b/res/TensorFlowLiteRecipes/Quant_TransposeConv_001/test.rule @@ -0,0 +1,13 @@ +# To check mixed quantization. +# Default dtype: S16, Target Op dtype: U8 +# Quantize Ops are inserted at the beginning/end of the model. + +RULE "VERIFY_FILE_FORMAT" $(verify_file_format) '=' 1 + +RULE "IFM_S16" $(tensor_dtype ifm) '=' INT16 +RULE "IFM_QUANTIZE_U8" $(tensor_dtype ifm_Quantize) '=' UINT8 +RULE "KER_U8" $(tensor_dtype ker) '=' UINT8 +RULE "BIAS_S32" $(tensor_dtype bias) '=' INT32 +RULE "TARGET_U8" $(tensor_dtype ofm) '=' UINT8 +RULE "OUTPUT_S16" $(tensor_dtype ofm_Quantize) '=' INT16 +RULE "QUANTIZE_OP" $(op_count QUANTIZE) '=' 2 |