summaryrefslogtreecommitdiff
path: root/res
diff options
context:
space:
mode:
authorChunseok Lee <chunseok.lee@samsung.com>2020-04-23 14:45:49 +0900
committerChunseok Lee <chunseok.lee@samsung.com>2020-04-23 14:45:49 +0900
commite2ef8438a24f7c56a0744eb579a6e293ee2fbf8e (patch)
tree44a1a7951d168dd4370e13593ed03f4bc6d920c5 /res
parent302e6564a7a76109e1178207e44e45a58631c477 (diff)
downloadnnfw-e2ef8438a24f7c56a0744eb579a6e293ee2fbf8e.tar.gz
nnfw-e2ef8438a24f7c56a0744eb579a6e293ee2fbf8e.tar.bz2
nnfw-e2ef8438a24f7c56a0744eb579a6e293ee2fbf8e.zip
Imported Upstream version 1.4.0upstream/1.4.0submit/tizen/20200423.054851
Diffstat (limited to 'res')
-rw-r--r--res/ONNXTests/UNIT_Gemm_000/test.pbtxt79
-rw-r--r--res/ONNXTests/UNIT_Gemm_001/test.pbtxt70
-rw-r--r--res/TensorFlowLiteRecipes/Abs_000/test.recipe17
-rw-r--r--res/TensorFlowLiteRecipes/Abs_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Add_000/test.recipe27
-rw-r--r--res/TensorFlowLiteRecipes/Add_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Add_U8_000/test.recipe30
-rw-r--r--res/TensorFlowLiteRecipes/Add_U8_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/ArgMax_000/test.recipe30
-rw-r--r--res/TensorFlowLiteRecipes/ArgMax_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/ArgMax_001/test.recipe30
-rw-r--r--res/TensorFlowLiteRecipes/ArgMax_001/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/ArgMax_002/test.recipe30
-rw-r--r--res/TensorFlowLiteRecipes/ArgMax_002/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/ArgMax_003/test.recipe30
-rw-r--r--res/TensorFlowLiteRecipes/ArgMax_003/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/ArgMax_U8_000/test.recipe31
-rw-r--r--res/TensorFlowLiteRecipes/ArgMax_U8_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/ArgMax_U8_001/test.recipe31
-rw-r--r--res/TensorFlowLiteRecipes/ArgMax_U8_001/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/ArgMax_U8_002/test.recipe31
-rw-r--r--res/TensorFlowLiteRecipes/ArgMax_U8_002/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/ArgMax_U8_003/test.recipe31
-rw-r--r--res/TensorFlowLiteRecipes/ArgMax_U8_003/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/AveragePool2D_000/test.recipe24
-rw-r--r--res/TensorFlowLiteRecipes/AveragePool2D_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/BatchToSpaceND_000/test.recipe38
-rw-r--r--res/TensorFlowLiteRecipes/BatchToSpaceND_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Concatenation_000/test.recipe28
-rw-r--r--res/TensorFlowLiteRecipes/Concatenation_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Concatenation_U8_000/test.recipe31
-rw-r--r--res/TensorFlowLiteRecipes/Conv2D_000/test.recipe44
-rw-r--r--res/TensorFlowLiteRecipes/Conv2D_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Conv2D_001/test.recipe44
-rw-r--r--res/TensorFlowLiteRecipes/Conv2D_001/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Conv2D_002/test.recipe45
-rw-r--r--res/TensorFlowLiteRecipes/Conv2D_U8_000/test.recipe48
-rw-r--r--res/TensorFlowLiteRecipes/Cos_000/test.recipe17
-rw-r--r--res/TensorFlowLiteRecipes/Cos_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/DepthwiseConv2D_000/test.recipe41
-rw-r--r--res/TensorFlowLiteRecipes/DepthwiseConv2D_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/DepthwiseConv2D_U8_000/test.recipe46
-rw-r--r--res/TensorFlowLiteRecipes/DepthwiseConv2D_U8_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Div_000/test.recipe27
-rw-r--r--res/TensorFlowLiteRecipes/Div_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Equal_000/test.recipe26
-rw-r--r--res/TensorFlowLiteRecipes/Equal_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Exp_000/test.recipe17
-rw-r--r--res/TensorFlowLiteRecipes/Exp_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/FullyConnected_000/test.recipe34
-rw-r--r--res/TensorFlowLiteRecipes/FullyConnected_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/FullyConnected_001/test.recipe34
-rw-r--r--res/TensorFlowLiteRecipes/FullyConnected_001/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/FullyConnected_U8_000/test.recipe35
-rw-r--r--res/TensorFlowLiteRecipes/FullyConnected_U8_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/LogicalNot_000/test.recipe17
-rw-r--r--res/TensorFlowLiteRecipes/LogicalNot_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/LogicalOr_000/test.recipe24
-rw-r--r--res/TensorFlowLiteRecipes/LogicalOr_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/MaxPool2D_000/test.recipe24
-rw-r--r--res/TensorFlowLiteRecipes/MaxPool2D_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/MaxPool2D_U8_000/test.recipe26
-rw-r--r--res/TensorFlowLiteRecipes/Mean_000/test.recipe27
-rw-r--r--res/TensorFlowLiteRecipes/Mean_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Mul_000/test.recipe27
-rw-r--r--res/TensorFlowLiteRecipes/Mul_U8_000/test.recipe30
-rw-r--r--res/TensorFlowLiteRecipes/Pack_000/test.recipe28
-rw-r--r--res/TensorFlowLiteRecipes/Pack_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Pack_U8_000/test.recipe31
-rw-r--r--res/TensorFlowLiteRecipes/Pack_U8_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Pad_000/test.recipe30
-rw-r--r--res/TensorFlowLiteRecipes/Pad_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Pad_U8_000/test.recipe32
-rw-r--r--res/TensorFlowLiteRecipes/Pad_U8_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Quantization_000/test.recipe46
-rw-r--r--res/TensorFlowLiteRecipes/Quantization_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/ReLU6_000/test.recipe17
-rw-r--r--res/TensorFlowLiteRecipes/ReLU6_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/ReLU_000/test.recipe17
-rw-r--r--res/TensorFlowLiteRecipes/ReLU_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Reshape_000/test.recipe20
-rw-r--r--res/TensorFlowLiteRecipes/Reshape_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Reshape_001/test.recipe28
-rw-r--r--res/TensorFlowLiteRecipes/Reshape_001/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Reshape_U8_000/test.recipe22
-rw-r--r--res/TensorFlowLiteRecipes/Reshape_U8_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Rsqrt_000/test.recipe17
-rw-r--r--res/TensorFlowLiteRecipes/Rsqrt_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Softmax_000/test.recipe20
-rw-r--r--res/TensorFlowLiteRecipes/Softmax_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Softmax_U8_000/test.recipe22
-rw-r--r--res/TensorFlowLiteRecipes/Softmax_U8_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Sqrt_000/test.recipe18
-rw-r--r--res/TensorFlowLiteRecipes/Sqrt_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Sub_000/test.recipe27
-rw-r--r--res/TensorFlowLiteRecipes/Sub_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Sub_001/test.recipe42
-rw-r--r--res/TensorFlowLiteRecipes/Sub_001/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Sub_U8_000/test.recipe30
-rw-r--r--res/TensorFlowLiteRecipes/Sub_U8_000/test.reverse0
-rw-r--r--res/TensorFlowLiteRecipes/Transpose_000/test.recipe27
-rw-r--r--res/TensorFlowLiteRecipes/Transpose_000/test.reverse0
-rw-r--r--res/TensorFlowLiteSchema/1.13.1/schema.fbs794
-rw-r--r--res/TensorFlowLiteSchema/1.14.0/schema.fbs873
-rw-r--r--res/TensorFlowLiteSchema/1.15.2/schema.fbs922
-rw-r--r--res/TensorFlowLiteSchema/2.1.0/schema.fbs940
-rw-r--r--res/TensorFlowLiteSchema/README.md7
-rw-r--r--res/TensorFlowLiteSchema/SCHEMA.lst5
-rwxr-xr-xres/TensorFlowLiteSchema/download.sh9
-rw-r--r--res/TensorFlowPythonExamples/.gitignore1
-rw-r--r--res/TensorFlowPythonExamples/README.md31
-rwxr-xr-xres/TensorFlowPythonExamples/examples/abs/__init__.py4
-rwxr-xr-xres/TensorFlowPythonExamples/examples/add/__init__.py5
-rwxr-xr-xres/TensorFlowPythonExamples/examples/argmax/__init__.py4
-rwxr-xr-xres/TensorFlowPythonExamples/examples/biasadd/__init__.py4
-rwxr-xr-xres/TensorFlowPythonExamples/examples/cos/__init__.py4
-rwxr-xr-xres/TensorFlowPythonExamples/examples/div/__init__.py5
-rwxr-xr-xres/TensorFlowPythonExamples/examples/elu/__init__.py4
-rw-r--r--res/TensorFlowPythonExamples/examples/exp/__init__.py4
-rwxr-xr-xres/TensorFlowPythonExamples/examples/floor/__init__.py4
-rwxr-xr-xres/TensorFlowPythonExamples/examples/floordiv/__init__.py5
-rwxr-xr-xres/TensorFlowPythonExamples/examples/greater/__init__.py5
-rwxr-xr-xres/TensorFlowPythonExamples/examples/greater_equal/__init__.py5
-rwxr-xr-xres/TensorFlowPythonExamples/examples/leaky_relu/__init__.py4
-rwxr-xr-xres/TensorFlowPythonExamples/examples/less/__init__.py5
-rwxr-xr-xres/TensorFlowPythonExamples/examples/less_equal/__init__.py5
-rwxr-xr-xres/TensorFlowPythonExamples/examples/logical_not/__init__.py4
-rwxr-xr-xres/TensorFlowPythonExamples/examples/logical_or/__init__.py5
-rwxr-xr-xres/TensorFlowPythonExamples/examples/matmul/__init__.py5
-rwxr-xr-xres/TensorFlowPythonExamples/examples/multiply/__init__.py5
-rwxr-xr-xres/TensorFlowPythonExamples/examples/not_equal/__init__.py5
-rwxr-xr-xres/TensorFlowPythonExamples/examples/pack/__init__.py5
-rwxr-xr-xres/TensorFlowPythonExamples/examples/pad/__init__.py5
-rwxr-xr-xres/TensorFlowPythonExamples/examples/pow/__init__.py5
-rwxr-xr-xres/TensorFlowPythonExamples/examples/prelu/__init__.py7
-rwxr-xr-xres/TensorFlowPythonExamples/examples/relu/__init__.py4
-rwxr-xr-xres/TensorFlowPythonExamples/examples/relu6/__init__.py4
-rw-r--r--res/TensorFlowPythonExamples/examples/reshape/__init.py__4
-rwxr-xr-xres/TensorFlowPythonExamples/examples/resize_bilinear/__init__.py4
-rwxr-xr-xres/TensorFlowPythonExamples/examples/resize_nearest_neighbor/__init__.py4
-rwxr-xr-xres/TensorFlowPythonExamples/examples/rsqrt/__init__.py4
-rwxr-xr-xres/TensorFlowPythonExamples/examples/sigmoid/__init__.py4
-rwxr-xr-xres/TensorFlowPythonExamples/examples/softmax/__init__.py4
-rwxr-xr-xres/TensorFlowPythonExamples/examples/sqrt/__init__.py4
-rwxr-xr-xres/TensorFlowPythonExamples/examples/subtract/__init__.py5
-rwxr-xr-xres/TensorFlowPythonExamples/examples/tanh/__init__.py4
-rwxr-xr-xres/TensorFlowPythonExamples/examples/yuv_to_rgb/__init__.py4
-rw-r--r--res/TensorFlowPythonExamples/requirements.txt18
-rwxr-xr-xres/TensorFlowPythonExamples/tfpem.py25
-rwxr-xr-x[-rw-r--r--]res/TensorFlowTests/NET_0003/test.py0
-rwxr-xr-x[-rw-r--r--]res/TensorFlowTests/NET_0004/test.py0
-rw-r--r--res/TensorFlowTests/UNIT_Maximum_000/test.info3
-rw-r--r--res/TensorFlowTests/UNIT_Maximum_000/test.pbtxt70
-rw-r--r--res/TensorFlowTests/UNIT_Maximum_001/test.info3
-rw-r--r--res/TensorFlowTests/UNIT_Maximum_001/test.pbtxt70
-rw-r--r--res/TensorFlowTests/UNIT_Maximum_002/test.info3
-rw-r--r--res/TensorFlowTests/UNIT_Maximum_002/test.pbtxt61
157 files changed, 5692 insertions, 0 deletions
diff --git a/res/ONNXTests/UNIT_Gemm_000/test.pbtxt b/res/ONNXTests/UNIT_Gemm_000/test.pbtxt
new file mode 100644
index 000000000..6fd497f89
--- /dev/null
+++ b/res/ONNXTests/UNIT_Gemm_000/test.pbtxt
@@ -0,0 +1,79 @@
+# This testcase is compatible with ONNX 1.4.1 or newer
+ir_version: 5
+
+opset_import {
+ version: 7
+}
+
+graph {
+ name: "Gemm_000"
+
+ node {
+ input: "input"
+ input: "weight"
+ input: "bias"
+ output: "output"
+ op_type: "Gemm"
+ attribute {
+ name: "alpha"
+ f: 1.5
+ type: FLOAT
+ }
+ attribute {
+ name: "beta"
+ f: 1.5
+ type: FLOAT
+ }
+ attribute {
+ name: "transA"
+ i: 1
+ type: INT
+ }
+ }
+
+# Initializers generated by python helper script:
+# a = np.ones((2,2), dtype = np.float32)
+# onnx.numpy_helper.from_array(a)
+#
+ initializer {
+ dims: 3
+ dims: 2
+ data_type: 1
+ name: "weight"
+ raw_data: "\000\000\200?\000\000\200?\000\000\200?\000\000\200?\000\000\200?\000\000\200?"
+ }
+
+ initializer {
+ dims: 2
+ dims: 2
+ data_type: 1
+ name: "bias"
+ raw_data: "\000\000\200?\000\000\200?\000\000\200?\000\000\200?"
+ }
+
+ input {
+ name: "input"
+ type {
+ tensor_type {
+ elem_type: 1 # FLOAT type
+ shape {
+ dim { dim_value: 3 }
+ dim { dim_value: 2 }
+ }
+ }
+ }
+ }
+
+ output {
+ name: "output"
+ type {
+ tensor_type {
+ elem_type: 1 # FLOAT type
+ shape {
+ dim { dim_value: 2 }
+ dim { dim_value: 2 }
+ }
+ }
+ }
+ }
+}
diff --git a/res/ONNXTests/UNIT_Gemm_001/test.pbtxt b/res/ONNXTests/UNIT_Gemm_001/test.pbtxt
new file mode 100644
index 000000000..0371fc250
--- /dev/null
+++ b/res/ONNXTests/UNIT_Gemm_001/test.pbtxt
@@ -0,0 +1,70 @@
+# This testcase is compatible with ONNX 1.4.1 or newer
+ir_version: 5
+
+opset_import {
+ version: 11
+}
+
+graph {
+ name: "Gemm_001"
+
+ node {
+ input: "input"
+ input: "weight"
+ output: "output"
+ op_type: "Gemm"
+ attribute {
+ name: "alpha"
+ f: 1.5
+ type: FLOAT
+ }
+ attribute {
+ name: "beta"
+ f: 1.5
+ type: FLOAT
+ }
+ attribute {
+ name: "transA"
+ i: 1
+ type: INT
+ }
+ }
+
+# Initializers generated by python helper script:
+# a = np.ones((2,2), dtype = np.float32)
+# onnx.numpy_helper.from_array(a)
+#
+ initializer {
+ dims: 3
+ dims: 2
+ data_type: 1
+ name: "weight"
+ raw_data: "\000\000\200?\000\000\200?\000\000\200?\000\000\200?\000\000\200?\000\000\200?"
+ }
+
+ input {
+ name: "input"
+ type {
+ tensor_type {
+ elem_type: 1 # FLOAT type
+ shape {
+ dim { dim_value: 3 }
+ dim { dim_value: 2 }
+ }
+ }
+ }
+ }
+
+ output {
+ name: "output"
+ type {
+ tensor_type {
+ elem_type: 1 # FLOAT type
+ shape {
+ dim { dim_value: 2 }
+ dim { dim_value: 2 }
+ }
+ }
+ }
+ }
+}
diff --git a/res/TensorFlowLiteRecipes/Abs_000/test.recipe b/res/TensorFlowLiteRecipes/Abs_000/test.recipe
new file mode 100644
index 000000000..0603a43fd
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Abs_000/test.recipe
@@ -0,0 +1,17 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "Abs"
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Abs_000/test.reverse b/res/TensorFlowLiteRecipes/Abs_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Abs_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Add_000/test.recipe b/res/TensorFlowLiteRecipes/Add_000/test.recipe
new file mode 100644
index 000000000..54018446a
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Add_000/test.recipe
@@ -0,0 +1,27 @@
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operand {
+ name: "ifm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operation {
+ type: "Add"
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm"
+ add_options {
+ activation: NONE
+ }
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Add_000/test.reverse b/res/TensorFlowLiteRecipes/Add_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Add_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Add_U8_000/test.recipe b/res/TensorFlowLiteRecipes/Add_U8_000/test.recipe
new file mode 100644
index 000000000..f4ccc3cc8
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Add_U8_000/test.recipe
@@ -0,0 +1,30 @@
+operand {
+ name: "ifm1"
+ type: UINT8
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operand {
+ name: "ifm2"
+ type: UINT8
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operand {
+ name: "ofm"
+ type: UINT8
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operation {
+ type: "Add"
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm"
+ add_options {
+ activation: NONE
+ }
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Add_U8_000/test.reverse b/res/TensorFlowLiteRecipes/Add_U8_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Add_U8_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/ArgMax_000/test.recipe b/res/TensorFlowLiteRecipes/ArgMax_000/test.recipe
new file mode 100644
index 000000000..2883e1853
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/ArgMax_000/test.recipe
@@ -0,0 +1,30 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 4 }
+}
+operand {
+ name: "ofm"
+ type: INT64
+ shape { }
+}
+operand {
+ name: "argmax/dim"
+ type: INT32
+ shape { }
+ filler {
+ tag: "explicit"
+ arg: "0"
+ }
+}
+operation {
+ type: "ArgMax"
+ argmax_options {
+ output_type: INT64
+ }
+ input: "ifm"
+ input: "argmax/dim"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/ArgMax_000/test.reverse b/res/TensorFlowLiteRecipes/ArgMax_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/ArgMax_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/ArgMax_001/test.recipe b/res/TensorFlowLiteRecipes/ArgMax_001/test.recipe
new file mode 100644
index 000000000..1f3961cae
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/ArgMax_001/test.recipe
@@ -0,0 +1,30 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 4 dim: 5 }
+}
+operand {
+ name: "ofm"
+ type: INT64
+ shape { dim: 5 }
+}
+operand {
+ name: "argmax/dim"
+ type: INT32
+ shape { }
+ filler {
+ tag: "explicit"
+ arg: "0"
+ }
+}
+operation {
+ type: "ArgMax"
+ argmax_options {
+ output_type: INT64
+ }
+ input: "ifm"
+ input: "argmax/dim"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/ArgMax_001/test.reverse b/res/TensorFlowLiteRecipes/ArgMax_001/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/ArgMax_001/test.reverse
diff --git a/res/TensorFlowLiteRecipes/ArgMax_002/test.recipe b/res/TensorFlowLiteRecipes/ArgMax_002/test.recipe
new file mode 100644
index 000000000..56d951f3d
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/ArgMax_002/test.recipe
@@ -0,0 +1,30 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 4 dim: 5 }
+}
+operand {
+ name: "ofm"
+ type: INT64
+ shape { dim: 4 }
+}
+operand {
+ name: "argmax/dim"
+ type: INT32
+ shape { }
+ filler {
+ tag: "explicit"
+ arg: "1"
+ }
+}
+operation {
+ type: "ArgMax"
+ argmax_options {
+ output_type: INT64
+ }
+ input: "ifm"
+ input: "argmax/dim"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/ArgMax_002/test.reverse b/res/TensorFlowLiteRecipes/ArgMax_002/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/ArgMax_002/test.reverse
diff --git a/res/TensorFlowLiteRecipes/ArgMax_003/test.recipe b/res/TensorFlowLiteRecipes/ArgMax_003/test.recipe
new file mode 100644
index 000000000..cb34e3824
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/ArgMax_003/test.recipe
@@ -0,0 +1,30 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 4 dim: 5 dim: 6 }
+}
+operand {
+ name: "ofm"
+ type: INT64
+ shape { dim: 4 dim: 6 }
+}
+operand {
+ name: "argmax/dim"
+ type: INT32
+ shape { }
+ filler {
+ tag: "explicit"
+ arg: "1"
+ }
+}
+operation {
+ type: "ArgMax"
+ argmax_options {
+ output_type: INT64
+ }
+ input: "ifm"
+ input: "argmax/dim"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/ArgMax_003/test.reverse b/res/TensorFlowLiteRecipes/ArgMax_003/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/ArgMax_003/test.reverse
diff --git a/res/TensorFlowLiteRecipes/ArgMax_U8_000/test.recipe b/res/TensorFlowLiteRecipes/ArgMax_U8_000/test.recipe
new file mode 100644
index 000000000..12e2b250b
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/ArgMax_U8_000/test.recipe
@@ -0,0 +1,31 @@
+operand {
+ name: "ifm"
+ type: UINT8
+ shape { dim: 4 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operand {
+ name: "ofm"
+ type: INT64
+ shape { }
+}
+operand {
+ name: "argmax/dim"
+ type: INT32
+ shape { }
+ filler {
+ tag: "explicit"
+ arg: "0"
+ }
+}
+operation {
+ type: "ArgMax"
+ argmax_options {
+ output_type: INT64
+ }
+ input: "ifm"
+ input: "argmax/dim"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/ArgMax_U8_000/test.reverse b/res/TensorFlowLiteRecipes/ArgMax_U8_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/ArgMax_U8_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/ArgMax_U8_001/test.recipe b/res/TensorFlowLiteRecipes/ArgMax_U8_001/test.recipe
new file mode 100644
index 000000000..78a519304
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/ArgMax_U8_001/test.recipe
@@ -0,0 +1,31 @@
+operand {
+ name: "ifm"
+ type: UINT8
+ shape { dim: 4 dim: 5 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operand {
+ name: "ofm"
+ type: INT64
+ shape { dim: 5 }
+}
+operand {
+ name: "argmax/dim"
+ type: INT32
+ shape { }
+ filler {
+ tag: "explicit"
+ arg: "0"
+ }
+}
+operation {
+ type: "ArgMax"
+ argmax_options {
+ output_type: INT64
+ }
+ input: "ifm"
+ input: "argmax/dim"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/ArgMax_U8_001/test.reverse b/res/TensorFlowLiteRecipes/ArgMax_U8_001/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/ArgMax_U8_001/test.reverse
diff --git a/res/TensorFlowLiteRecipes/ArgMax_U8_002/test.recipe b/res/TensorFlowLiteRecipes/ArgMax_U8_002/test.recipe
new file mode 100644
index 000000000..3f1e5ec53
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/ArgMax_U8_002/test.recipe
@@ -0,0 +1,31 @@
+operand {
+ name: "ifm"
+ type: UINT8
+ shape { dim: 4 dim: 5 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operand {
+ name: "ofm"
+ type: INT64
+ shape { dim: 4 }
+}
+operand {
+ name: "argmax/dim"
+ type: INT32
+ shape { }
+ filler {
+ tag: "explicit"
+ arg: "1"
+ }
+}
+operation {
+ type: "ArgMax"
+ argmax_options {
+ output_type: INT64
+ }
+ input: "ifm"
+ input: "argmax/dim"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/ArgMax_U8_002/test.reverse b/res/TensorFlowLiteRecipes/ArgMax_U8_002/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/ArgMax_U8_002/test.reverse
diff --git a/res/TensorFlowLiteRecipes/ArgMax_U8_003/test.recipe b/res/TensorFlowLiteRecipes/ArgMax_U8_003/test.recipe
new file mode 100644
index 000000000..2ef292045
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/ArgMax_U8_003/test.recipe
@@ -0,0 +1,31 @@
+operand {
+ name: "ifm"
+ type: UINT8
+ shape { dim: 4 dim: 5 dim: 6 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operand {
+ name: "ofm"
+ type: INT64
+ shape { dim: 4 dim: 6 }
+}
+operand {
+ name: "argmax/dim"
+ type: INT32
+ shape { }
+ filler {
+ tag: "explicit"
+ arg: "1"
+ }
+}
+operation {
+ type: "ArgMax"
+ argmax_options {
+ output_type: INT64
+ }
+ input: "ifm"
+ input: "argmax/dim"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/ArgMax_U8_003/test.reverse b/res/TensorFlowLiteRecipes/ArgMax_U8_003/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/ArgMax_U8_003/test.reverse
diff --git a/res/TensorFlowLiteRecipes/AveragePool2D_000/test.recipe b/res/TensorFlowLiteRecipes/AveragePool2D_000/test.recipe
new file mode 100644
index 000000000..746c34334
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/AveragePool2D_000/test.recipe
@@ -0,0 +1,24 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 8 dim: 8 dim: 1 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 7 dim: 7 dim: 1 }
+}
+operation {
+ type: "AveragePool2D"
+ averagepool2d_options {
+ padding: VALID
+ stride_w: 1
+ stride_h: 1
+ filter_width: 2
+ filter_height: 2
+ }
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/AveragePool2D_000/test.reverse b/res/TensorFlowLiteRecipes/AveragePool2D_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/AveragePool2D_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/BatchToSpaceND_000/test.recipe b/res/TensorFlowLiteRecipes/BatchToSpaceND_000/test.recipe
new file mode 100644
index 000000000..3d7c28cb0
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/BatchToSpaceND_000/test.recipe
@@ -0,0 +1,38 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 4 dim: 1 dim: 1 dim: 1 }
+}
+operand {
+ name: "crops"
+ type: INT32
+ shape { dim: 2 dim: 2 }
+ filler {
+ tag: "explicit"
+ arg: "0" arg: "0"
+ arg: "0" arg: "0"
+ }
+}
+operand {
+ name: "block_shape"
+ type: INT32
+ shape { dim: 2 }
+ filler {
+ tag: "explicit"
+ arg: "2" arg: "2"
+ }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 2 dim: 2 dim: 1 }
+}
+operation {
+ type: "BatchToSpaceND"
+ input: "ifm"
+ input: "block_shape"
+ input: "crops"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/BatchToSpaceND_000/test.reverse b/res/TensorFlowLiteRecipes/BatchToSpaceND_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/BatchToSpaceND_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Concatenation_000/test.recipe b/res/TensorFlowLiteRecipes/Concatenation_000/test.recipe
new file mode 100644
index 000000000..35641bd07
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Concatenation_000/test.recipe
@@ -0,0 +1,28 @@
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 1 }
+}
+operand {
+ name: "ifm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operation {
+ type: "Concatenation"
+ concatenation_options {
+ axis: 3
+ activation: NONE
+ }
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm"
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Concatenation_000/test.reverse b/res/TensorFlowLiteRecipes/Concatenation_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Concatenation_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Concatenation_U8_000/test.recipe b/res/TensorFlowLiteRecipes/Concatenation_U8_000/test.recipe
new file mode 100644
index 000000000..3ae21e356
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Concatenation_U8_000/test.recipe
@@ -0,0 +1,31 @@
+operand {
+ name: "ifm1"
+ type: UINT8
+ shape { dim: 1 dim: 4 dim: 4 dim: 1 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operand {
+ name: "ifm2"
+ type: UINT8
+ shape { dim: 1 dim: 4 dim: 4 dim: 2 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operand {
+ name: "ofm"
+ type: UINT8
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operation {
+ type: "Concatenation"
+ concatenation_options {
+ axis: 3
+ activation: NONE
+ }
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm"
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Conv2D_000/test.recipe b/res/TensorFlowLiteRecipes/Conv2D_000/test.recipe
new file mode 100644
index 000000000..9cf8a0f69
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Conv2D_000/test.recipe
@@ -0,0 +1,44 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ker"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 2 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 1 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 1 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options {
+ padding: VALID
+ stride_w: 1
+ stride_h: 1
+ }
+ input: "ifm"
+ input: "ker"
+ input: "bias"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Conv2D_000/test.reverse b/res/TensorFlowLiteRecipes/Conv2D_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Conv2D_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Conv2D_001/test.recipe b/res/TensorFlowLiteRecipes/Conv2D_001/test.recipe
new file mode 100644
index 000000000..bc41a3fc0
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Conv2D_001/test.recipe
@@ -0,0 +1,44 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ker"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 2 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 1 }
+ filler {
+ tag: "constant"
+ arg: "1.1"
+ }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 1 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options {
+ padding: VALID
+ stride_w: 1
+ stride_h: 1
+ }
+ input: "ifm"
+ input: "ker"
+ input: "bias"
+ output: "ofm"
+}
+input: "ifm"
+input: "ker"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Conv2D_001/test.reverse b/res/TensorFlowLiteRecipes/Conv2D_001/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Conv2D_001/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Conv2D_002/test.recipe b/res/TensorFlowLiteRecipes/Conv2D_002/test.recipe
new file mode 100644
index 000000000..1901ead3b
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Conv2D_002/test.recipe
@@ -0,0 +1,45 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 1 }
+}
+operand {
+ name: "ker"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 1 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 1 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 2 dim: 2 dim: 1 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options {
+ padding: SAME
+ stride_w: 2
+ stride_h: 2
+ activation: RELU
+ }
+ input: "ifm"
+ input: "ker"
+ input: "bias"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Conv2D_U8_000/test.recipe b/res/TensorFlowLiteRecipes/Conv2D_U8_000/test.recipe
new file mode 100644
index 000000000..9a8e47853
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Conv2D_U8_000/test.recipe
@@ -0,0 +1,48 @@
+operand {
+ name: "ifm"
+ type: UINT8
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+ quant { min: 0 max: 1 scale: 0.004 zero_point: 0 }
+}
+operand {
+ name: "ker"
+ type: UINT8
+ shape { dim: 1 dim: 1 dim: 1 dim: 2 }
+ filler {
+ tag: "gaussian"
+ arg: "102"
+ arg: "32"
+ }
+ quant { min: -4 max: 6 scale: 0.039215686 zero_point: 102 }
+}
+operand {
+ name: "bias"
+ type: INT32
+ shape { dim: 1 }
+ filler {
+ tag: "gaussian"
+ arg: "0"
+ arg: "1024"
+ }
+ quant { scale: 0.00015686276310589164 zero_point: 0 }
+}
+operand {
+ name: "ofm"
+ type: UINT8
+ shape { dim: 1 dim: 3 dim: 3 dim: 1 }
+ quant { min: -4 max: 6 scale: 0.039215686 zero_point: 102 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options {
+ padding: VALID
+ stride_w: 1
+ stride_h: 1
+ }
+ input: "ifm"
+ input: "ker"
+ input: "bias"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Cos_000/test.recipe b/res/TensorFlowLiteRecipes/Cos_000/test.recipe
new file mode 100644
index 000000000..6fa8ac9b8
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Cos_000/test.recipe
@@ -0,0 +1,17 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "Cos"
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Cos_000/test.reverse b/res/TensorFlowLiteRecipes/Cos_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Cos_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/DepthwiseConv2D_000/test.recipe b/res/TensorFlowLiteRecipes/DepthwiseConv2D_000/test.recipe
new file mode 100644
index 000000000..17a3b06c7
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/DepthwiseConv2D_000/test.recipe
@@ -0,0 +1,41 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 dim: 64 dim: 8 }
+}
+operand {
+ name: "ker"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 8 }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 8 }
+ filler {
+ tag: "constant"
+ arg: "1.1"
+ }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 dim: 64 dim: 8 }
+}
+operation {
+ type: "DepthwiseConv2D"
+ depthwiseconv2d_options {
+ padding: SAME
+ stride_w: 1
+ stride_h: 1
+ depth_multiplier: 1
+ activation : RELU6
+ }
+ input: "ifm"
+ input: "ker"
+ input: "bias"
+ output: "ofm"
+}
+input: "ifm"
+input: "ker"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/DepthwiseConv2D_000/test.reverse b/res/TensorFlowLiteRecipes/DepthwiseConv2D_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/DepthwiseConv2D_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/DepthwiseConv2D_U8_000/test.recipe b/res/TensorFlowLiteRecipes/DepthwiseConv2D_U8_000/test.recipe
new file mode 100644
index 000000000..9a4ddf155
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/DepthwiseConv2D_U8_000/test.recipe
@@ -0,0 +1,46 @@
+operand {
+ name: "ifm"
+ type: UINT8
+ shape { dim: 1 dim: 64 dim: 64 dim: 8 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operand {
+ name: "ker"
+ type: UINT8
+ shape { dim: 1 dim: 3 dim: 3 dim: 8 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operand {
+ name: "bias"
+ type: INT32
+ shape { dim: 8 }
+ filler {
+ tag: "gaussian"
+ arg: "0"
+ arg: "1024"
+ }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operand {
+ name: "ofm"
+ type: UINT8
+ shape { dim: 1 dim: 64 dim: 64 dim: 8 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operation {
+ type: "DepthwiseConv2D"
+ depthwiseconv2d_options {
+ padding: SAME
+ stride_w: 1
+ stride_h: 1
+ depth_multiplier: 1
+ activation : RELU6
+ }
+ input: "ifm"
+ input: "ker"
+ input: "bias"
+ output: "ofm"
+}
+input: "ifm"
+input: "ker"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/DepthwiseConv2D_U8_000/test.reverse b/res/TensorFlowLiteRecipes/DepthwiseConv2D_U8_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/DepthwiseConv2D_U8_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Div_000/test.recipe b/res/TensorFlowLiteRecipes/Div_000/test.recipe
new file mode 100644
index 000000000..4fb76d467
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Div_000/test.recipe
@@ -0,0 +1,27 @@
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operand {
+ name: "ifm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operation {
+ type: "Div"
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm"
+ div_options {
+ activation: NONE
+ }
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Div_000/test.reverse b/res/TensorFlowLiteRecipes/Div_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Div_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Equal_000/test.recipe b/res/TensorFlowLiteRecipes/Equal_000/test.recipe
new file mode 100644
index 000000000..dcc81f9f5
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Equal_000/test.recipe
@@ -0,0 +1,26 @@
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operand {
+ name: "ifm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operand {
+ name: "ofm"
+ type: BOOL
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operation {
+ type: "Equal"
+ equal_options {
+ }
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm"
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Equal_000/test.reverse b/res/TensorFlowLiteRecipes/Equal_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Equal_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Exp_000/test.recipe b/res/TensorFlowLiteRecipes/Exp_000/test.recipe
new file mode 100644
index 000000000..06e0054db
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Exp_000/test.recipe
@@ -0,0 +1,17 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "Exp"
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Exp_000/test.reverse b/res/TensorFlowLiteRecipes/Exp_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Exp_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/FullyConnected_000/test.recipe b/res/TensorFlowLiteRecipes/FullyConnected_000/test.recipe
new file mode 100644
index 000000000..dca4c09f0
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/FullyConnected_000/test.recipe
@@ -0,0 +1,34 @@
+operand {
+ name: "in"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 }
+}
+operand {
+ name: "weight"
+ type: FLOAT32
+ shape { dim: 8 dim: 64 }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 8 }
+}
+operand {
+ name: "out"
+ type: FLOAT32
+ shape { dim: 1 dim: 8 }
+}
+operation {
+ type: "FullyConnected"
+ fullyconnected_options {
+ activation: NONE
+ }
+ input: "in"
+ input: "weight"
+ input: "bias"
+ output: "out"
+}
+input: "in"
+input: "weight"
+input: "bias"
+output: "out"
diff --git a/res/TensorFlowLiteRecipes/FullyConnected_000/test.reverse b/res/TensorFlowLiteRecipes/FullyConnected_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/FullyConnected_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/FullyConnected_001/test.recipe b/res/TensorFlowLiteRecipes/FullyConnected_001/test.recipe
new file mode 100644
index 000000000..e404f759f
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/FullyConnected_001/test.recipe
@@ -0,0 +1,34 @@
+operand {
+ name: "in"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 4 }
+}
+operand {
+ name: "weight"
+ type: FLOAT32
+ shape { dim: 2 dim: 4 }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 2 }
+}
+operand {
+ name: "out"
+ type: FLOAT32
+ shape { dim: 1 dim: 2 }
+}
+operation {
+ type: "FullyConnected"
+ fullyconnected_options {
+ activation: NONE
+ }
+ input: "in"
+ input: "weight"
+ input: "bias"
+ output: "out"
+}
+input: "in"
+input: "weight"
+input: "bias"
+output: "out"
diff --git a/res/TensorFlowLiteRecipes/FullyConnected_001/test.reverse b/res/TensorFlowLiteRecipes/FullyConnected_001/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/FullyConnected_001/test.reverse
diff --git a/res/TensorFlowLiteRecipes/FullyConnected_U8_000/test.recipe b/res/TensorFlowLiteRecipes/FullyConnected_U8_000/test.recipe
new file mode 100644
index 000000000..3c996218f
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/FullyConnected_U8_000/test.recipe
@@ -0,0 +1,35 @@
+operand {
+ name: "in"
+ type: FLOAT32
+ shape { dim: 1 dim: 64 }
+}
+operand {
+ name: "weight"
+ type: UINT8
+ shape { dim: 8 dim: 64 }
+ quant { min: 0 max: 1 scale: 0.000553869 zero_point: 0 }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 8 }
+}
+operand {
+ name: "out"
+ type: FLOAT32
+ shape { dim: 1 dim: 8 }
+}
+operation {
+ type: "FullyConnected"
+ fullyconnected_options {
+ activation: NONE
+ }
+ input: "in"
+ input: "weight"
+ input: "bias"
+ output: "out"
+}
+input: "in"
+input: "weight"
+input: "bias"
+output: "out"
diff --git a/res/TensorFlowLiteRecipes/FullyConnected_U8_000/test.reverse b/res/TensorFlowLiteRecipes/FullyConnected_U8_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/FullyConnected_U8_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/LogicalNot_000/test.recipe b/res/TensorFlowLiteRecipes/LogicalNot_000/test.recipe
new file mode 100644
index 000000000..da02bd9af
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/LogicalNot_000/test.recipe
@@ -0,0 +1,17 @@
+operand {
+ name: "ifm"
+ type: BOOL
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: BOOL
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "LogicalNot"
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/LogicalNot_000/test.reverse b/res/TensorFlowLiteRecipes/LogicalNot_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/LogicalNot_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/LogicalOr_000/test.recipe b/res/TensorFlowLiteRecipes/LogicalOr_000/test.recipe
new file mode 100644
index 000000000..636c8b857
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/LogicalOr_000/test.recipe
@@ -0,0 +1,24 @@
+operand {
+ name: "ifm1"
+ type: BOOL
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operand {
+ name: "ifm2"
+ type: BOOL
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operand {
+ name: "ofm"
+ type: BOOL
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operation {
+ type: "LogicalOr"
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm"
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/LogicalOr_000/test.reverse b/res/TensorFlowLiteRecipes/LogicalOr_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/LogicalOr_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/MaxPool2D_000/test.recipe b/res/TensorFlowLiteRecipes/MaxPool2D_000/test.recipe
new file mode 100644
index 000000000..718630f08
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/MaxPool2D_000/test.recipe
@@ -0,0 +1,24 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 8 dim: 8 dim: 1 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 7 dim: 7 dim: 1 }
+}
+operation {
+ type: "MaxPool2D"
+ maxpool2d_options {
+ padding: VALID
+ stride_w: 1
+ stride_h: 1
+ filter_width: 2
+ filter_height: 2
+ }
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/MaxPool2D_000/test.reverse b/res/TensorFlowLiteRecipes/MaxPool2D_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/MaxPool2D_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/MaxPool2D_U8_000/test.recipe b/res/TensorFlowLiteRecipes/MaxPool2D_U8_000/test.recipe
new file mode 100644
index 000000000..a736988e9
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/MaxPool2D_U8_000/test.recipe
@@ -0,0 +1,26 @@
+operand {
+ name: "ifm"
+ type: UINT8
+ shape { dim: 1 dim: 8 dim: 8 dim: 1 }
+ quant { min: 0 max: 1 scale: 0.004 zero_point: 0 }
+}
+operand {
+ name: "ofm"
+ type: UINT8
+ shape { dim: 1 dim: 7 dim: 7 dim: 1 }
+ quant { min: 0 max: 1 scale: 0.004 zero_point: 0 }
+}
+operation {
+ type: "MaxPool2D"
+ maxpool2d_options {
+ padding: VALID
+ stride_w: 1
+ stride_h: 1
+ filter_width: 2
+ filter_height: 2
+ }
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Mean_000/test.recipe b/res/TensorFlowLiteRecipes/Mean_000/test.recipe
new file mode 100644
index 000000000..d383997d3
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Mean_000/test.recipe
@@ -0,0 +1,27 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 8 dim: 8 dim: 4 }
+}
+operand {
+ name: "reduction_indices"
+ type: INT32
+ shape { dim: 1 }
+ filler { tag: "explicit" arg: "-1" }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 8 dim: 8 dim: 1 }
+}
+operation {
+ type: "Mean"
+ mean_options {
+ keep_dims: true
+ }
+ input: "ifm"
+ input: "reduction_indices"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Mean_000/test.reverse b/res/TensorFlowLiteRecipes/Mean_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Mean_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Mul_000/test.recipe b/res/TensorFlowLiteRecipes/Mul_000/test.recipe
new file mode 100644
index 000000000..43ca30dec
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Mul_000/test.recipe
@@ -0,0 +1,27 @@
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operand {
+ name: "ifm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+}
+operation {
+ type: "Mul"
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm"
+ mul_options {
+ activation: NONE
+ }
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Mul_U8_000/test.recipe b/res/TensorFlowLiteRecipes/Mul_U8_000/test.recipe
new file mode 100644
index 000000000..2fbf96d29
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Mul_U8_000/test.recipe
@@ -0,0 +1,30 @@
+operand {
+ name: "ifm1"
+ type: UINT8
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+ quant { min: 0 max: 255 scale: 0.5 zero_point: 0 }
+}
+operand {
+ name: "ifm2"
+ type: UINT8
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+ quant { min: 0 max: 255 scale: 0.5 zero_point: 0 }
+}
+operand {
+ name: "ofm"
+ type: UINT8
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+ quant { min: 0 max: 255 scale: 0.5 zero_point: 0 }
+}
+operation {
+ type: "Mul"
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm"
+ mul_options {
+ activation: NONE
+ }
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Pack_000/test.recipe b/res/TensorFlowLiteRecipes/Pack_000/test.recipe
new file mode 100644
index 000000000..5d9141d9e
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Pack_000/test.recipe
@@ -0,0 +1,28 @@
+operand {
+ name: "input"
+ type: FLOAT32
+ shape { dim: 2 dim: 4 dim: 3 }
+}
+operand {
+ name: "input_1"
+ type: FLOAT32
+ shape { dim: 2 dim: 4 dim: 3 }
+}
+operand {
+ name: "stack_4d"
+ type: FLOAT32
+ shape { dim: 2 dim: 2 dim: 4 dim: 3 }
+}
+operation {
+ type: "Pack"
+ pack_options {
+ values_count: 2,
+ axis: 1
+ }
+ input: "input"
+ input: "input_1"
+ output: "stack_4d"
+}
+input: "input"
+input: "input_1"
+output: "stack_4d"
diff --git a/res/TensorFlowLiteRecipes/Pack_000/test.reverse b/res/TensorFlowLiteRecipes/Pack_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Pack_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Pack_U8_000/test.recipe b/res/TensorFlowLiteRecipes/Pack_U8_000/test.recipe
new file mode 100644
index 000000000..f00199980
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Pack_U8_000/test.recipe
@@ -0,0 +1,31 @@
+operand {
+ name: "input"
+ type: UINT8
+ shape { dim: 2 dim: 4 dim: 3 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operand {
+ name: "input_1"
+ type: UINT8
+ shape { dim: 2 dim: 4 dim: 3 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operand {
+ name: "stack_4d"
+ type: UINT8
+ shape { dim: 2 dim: 2 dim: 4 dim: 3 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operation {
+ type: "Pack"
+ pack_options {
+ values_count: 2,
+ axis: 1
+ }
+ input: "input"
+ input: "input_1"
+ output: "stack_4d"
+}
+input: "input"
+input: "input_1"
+output: "stack_4d"
diff --git a/res/TensorFlowLiteRecipes/Pack_U8_000/test.reverse b/res/TensorFlowLiteRecipes/Pack_U8_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Pack_U8_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Pad_000/test.recipe b/res/TensorFlowLiteRecipes/Pad_000/test.recipe
new file mode 100644
index 000000000..2cc980b9c
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Pad_000/test.recipe
@@ -0,0 +1,30 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "padding"
+ type: INT32
+ shape { dim: 4 dim: 2 }
+ filler {
+ tag: "explicit"
+ arg: "0" arg: "0"
+ arg: "1" arg: "1"
+ arg: "2" arg: "2"
+ arg: "0" arg: "0"
+ }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 5 dim: 7 dim: 2 }
+}
+operation {
+ type: "Pad"
+ input: "ifm"
+ input: "padding"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Pad_000/test.reverse b/res/TensorFlowLiteRecipes/Pad_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Pad_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Pad_U8_000/test.recipe b/res/TensorFlowLiteRecipes/Pad_U8_000/test.recipe
new file mode 100644
index 000000000..7a835bdb8
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Pad_U8_000/test.recipe
@@ -0,0 +1,32 @@
+operand {
+ name: "ifm"
+ type: UINT8
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operand {
+ name: "padding"
+ type: INT32
+ shape { dim: 4 dim: 2 }
+ filler {
+ tag: "explicit"
+ arg: "0" arg: "0"
+ arg: "1" arg: "1"
+ arg: "2" arg: "2"
+ arg: "0" arg: "0"
+ }
+}
+operand {
+ name: "ofm"
+ type: UINT8
+ shape { dim: 1 dim: 5 dim: 7 dim: 2 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operation {
+ type: "Pad"
+ input: "ifm"
+ input: "padding"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Pad_U8_000/test.reverse b/res/TensorFlowLiteRecipes/Pad_U8_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Pad_U8_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Quantization_000/test.recipe b/res/TensorFlowLiteRecipes/Quantization_000/test.recipe
new file mode 100644
index 000000000..be5d222a2
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quantization_000/test.recipe
@@ -0,0 +1,46 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+ quant { min: 0 max: 128 scale: 2 zero_point: 2 }
+}
+operand {
+ name: "ker"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 2 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "bias"
+ type: FLOAT32
+ shape { dim: 1 }
+ filler {
+ tag: "gaussian"
+ arg: "0.0"
+ arg: "1.0"
+ }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 1 }
+ quant { min: 0 max: 80 scale: 1.5 zero_point: 3 }
+}
+operation {
+ type: "Conv2D"
+ conv2d_options {
+ padding: VALID
+ stride_w: 1
+ stride_h: 1
+ }
+ input: "ifm"
+ input: "ker"
+ input: "bias"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Quantization_000/test.reverse b/res/TensorFlowLiteRecipes/Quantization_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Quantization_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/ReLU6_000/test.recipe b/res/TensorFlowLiteRecipes/ReLU6_000/test.recipe
new file mode 100644
index 000000000..226593593
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/ReLU6_000/test.recipe
@@ -0,0 +1,17 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "ReLU6"
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/ReLU6_000/test.reverse b/res/TensorFlowLiteRecipes/ReLU6_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/ReLU6_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/ReLU_000/test.recipe b/res/TensorFlowLiteRecipes/ReLU_000/test.recipe
new file mode 100644
index 000000000..8eaa3602f
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/ReLU_000/test.recipe
@@ -0,0 +1,17 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "ReLU"
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/ReLU_000/test.reverse b/res/TensorFlowLiteRecipes/ReLU_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/ReLU_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Reshape_000/test.recipe b/res/TensorFlowLiteRecipes/Reshape_000/test.recipe
new file mode 100644
index 000000000..cdca58980
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Reshape_000/test.recipe
@@ -0,0 +1,20 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 10 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 10 }
+}
+operation {
+ type: "Reshape"
+ reshape_options {
+ new_shape: 10
+ }
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Reshape_000/test.reverse b/res/TensorFlowLiteRecipes/Reshape_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Reshape_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Reshape_001/test.recipe b/res/TensorFlowLiteRecipes/Reshape_001/test.recipe
new file mode 100644
index 000000000..bd5213f39
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Reshape_001/test.recipe
@@ -0,0 +1,28 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 1 dim: 1 dim: 10 }
+}
+operand {
+ name: "shape"
+ type: INT32
+ shape { dim: 2 }
+ filler { tag: "explicit" arg: "-1" arg: "10" }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 10 }
+}
+operation {
+ type: "Reshape"
+ reshape_options {
+ new_shape: -1
+ new_shape: 10
+ }
+ input: "ifm"
+ input: "shape"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Reshape_001/test.reverse b/res/TensorFlowLiteRecipes/Reshape_001/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Reshape_001/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Reshape_U8_000/test.recipe b/res/TensorFlowLiteRecipes/Reshape_U8_000/test.recipe
new file mode 100644
index 000000000..5fe10e599
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Reshape_U8_000/test.recipe
@@ -0,0 +1,22 @@
+operand {
+ name: "ifm"
+ type: UINT8
+ shape { dim: 1 dim: 1 dim: 1 dim: 10 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operand {
+ name: "ofm"
+ type: UINT8
+ shape { dim: 10 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operation {
+ type: "Reshape"
+ reshape_options {
+ new_shape: 10
+ }
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Reshape_U8_000/test.reverse b/res/TensorFlowLiteRecipes/Reshape_U8_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Reshape_U8_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Rsqrt_000/test.recipe b/res/TensorFlowLiteRecipes/Rsqrt_000/test.recipe
new file mode 100644
index 000000000..ba16f2b6b
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Rsqrt_000/test.recipe
@@ -0,0 +1,17 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "Rsqrt"
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Rsqrt_000/test.reverse b/res/TensorFlowLiteRecipes/Rsqrt_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Rsqrt_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Softmax_000/test.recipe b/res/TensorFlowLiteRecipes/Softmax_000/test.recipe
new file mode 100644
index 000000000..ce9abf555
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Softmax_000/test.recipe
@@ -0,0 +1,20 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "Softmax"
+ softmax_options {
+ beta: 0.0
+ }
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Softmax_000/test.reverse b/res/TensorFlowLiteRecipes/Softmax_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Softmax_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Softmax_U8_000/test.recipe b/res/TensorFlowLiteRecipes/Softmax_U8_000/test.recipe
new file mode 100644
index 000000000..a753ca437
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Softmax_U8_000/test.recipe
@@ -0,0 +1,22 @@
+operand {
+ name: "ifm"
+ type: UINT8
+ shape { dim: 1 dim: 1001 }
+ quant { min: -6.02353 max: 5.97647 scale: 0.0470588 zero_point: 128 }
+}
+operand {
+ name: "ofm"
+ type: UINT8
+ shape { dim: 1 dim: 1001 }
+ quant { min: 0 max: 0.996094 scale: 0.00390625 zero_point: 0 }
+}
+operation {
+ type: "Softmax"
+ softmax_options {
+ beta: 1.0
+ }
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Softmax_U8_000/test.reverse b/res/TensorFlowLiteRecipes/Softmax_U8_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Softmax_U8_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Sqrt_000/test.recipe b/res/TensorFlowLiteRecipes/Sqrt_000/test.recipe
new file mode 100644
index 000000000..1754f9a58
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Sqrt_000/test.recipe
@@ -0,0 +1,18 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+ filler { tag: "constant" arg: "3.5" }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 3 dim: 3 dim: 2 }
+}
+operation {
+ type: "Sqrt"
+ input: "ifm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Sqrt_000/test.reverse b/res/TensorFlowLiteRecipes/Sqrt_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Sqrt_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Sub_000/test.recipe b/res/TensorFlowLiteRecipes/Sub_000/test.recipe
new file mode 100644
index 000000000..c934bb8aa
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Sub_000/test.recipe
@@ -0,0 +1,27 @@
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 5 dim:2 dim:3 }
+}
+operand {
+ name: "ifm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 5 dim:2 dim:3 }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 1 dim: 5 dim:2 dim:3 }
+}
+operation {
+ type: "Sub"
+ sub_options {
+ activation: 0
+ }
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm"
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Sub_000/test.reverse b/res/TensorFlowLiteRecipes/Sub_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Sub_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Sub_001/test.recipe b/res/TensorFlowLiteRecipes/Sub_001/test.recipe
new file mode 100644
index 000000000..09f46e4f6
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Sub_001/test.recipe
@@ -0,0 +1,42 @@
+operand {
+ name: "ifm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 5 dim:2 dim:3 }
+}
+operand {
+ name: "ifm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 5 dim:2 dim:3 }
+}
+operand {
+ name: "ofm1"
+ type: FLOAT32
+ shape { dim: 1 dim: 5 dim:2 dim:3 }
+}
+operand {
+ name: "ofm2"
+ type: FLOAT32
+ shape { dim: 1 dim: 5 dim:2 dim:3 }
+}
+operation {
+ type: "Sub"
+ sub_options {
+ activation: 0
+ }
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm1"
+}
+operation {
+ type: "Sub"
+ sub_options {
+ activation: 0
+ }
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm2"
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm1"
+output: "ofm2"
diff --git a/res/TensorFlowLiteRecipes/Sub_001/test.reverse b/res/TensorFlowLiteRecipes/Sub_001/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Sub_001/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Sub_U8_000/test.recipe b/res/TensorFlowLiteRecipes/Sub_U8_000/test.recipe
new file mode 100644
index 000000000..eeeb8aacc
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Sub_U8_000/test.recipe
@@ -0,0 +1,30 @@
+operand {
+ name: "ifm1"
+ type: UINT8
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operand {
+ name: "ifm2"
+ type: UINT8
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operand {
+ name: "ofm"
+ type: UINT8
+ shape { dim: 1 dim: 4 dim: 4 dim: 3 }
+ quant { min: 0 max: 255 scale: 1.0 zero_point: 0 }
+}
+operation {
+ type: "Sub"
+ input: "ifm1"
+ input: "ifm2"
+ output: "ofm"
+ sub_options {
+ activation: NONE
+ }
+}
+input: "ifm1"
+input: "ifm2"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Sub_U8_000/test.reverse b/res/TensorFlowLiteRecipes/Sub_U8_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Sub_U8_000/test.reverse
diff --git a/res/TensorFlowLiteRecipes/Transpose_000/test.recipe b/res/TensorFlowLiteRecipes/Transpose_000/test.recipe
new file mode 100644
index 000000000..82a85c13b
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Transpose_000/test.recipe
@@ -0,0 +1,27 @@
+operand {
+ name: "ifm"
+ type: FLOAT32
+ shape { dim: 3 dim: 8 dim: 1 }
+}
+operand {
+ name: "perm"
+ type: INT32
+ shape { dim: 3 }
+ filler { tag: "explicit" arg: "1" arg: "2" arg: "0" }
+}
+operand {
+ name: "ofm"
+ type: FLOAT32
+ shape { dim: 8 dim: 1 dim: 3 }
+}
+
+operation {
+ type: "Transpose"
+ transpose_options {
+ }
+ input: "ifm"
+ input: "perm"
+ output: "ofm"
+}
+input: "ifm"
+output: "ofm"
diff --git a/res/TensorFlowLiteRecipes/Transpose_000/test.reverse b/res/TensorFlowLiteRecipes/Transpose_000/test.reverse
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/res/TensorFlowLiteRecipes/Transpose_000/test.reverse
diff --git a/res/TensorFlowLiteSchema/1.13.1/schema.fbs b/res/TensorFlowLiteSchema/1.13.1/schema.fbs
new file mode 100644
index 000000000..980f13b19
--- /dev/null
+++ b/res/TensorFlowLiteSchema/1.13.1/schema.fbs
@@ -0,0 +1,794 @@
+// Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Revision History
+// Version 0: Initial version.
+// Version 1: Add subgraphs to schema.
+// Version 2: Rename operators to conform to NN API.
+// Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers.
+
+namespace tflite;
+
+// This corresponds to the version.
+file_identifier "TFL3";
+// File extension of any written files.
+file_extension "tflite";
+
+// IMPORTANT: All new members of tables, enums and unions must be added at the
+// end to ensure backwards compatibility.
+
+// The type of data stored in a tensor.
+enum TensorType : byte {
+ FLOAT32 = 0,
+ FLOAT16 = 1,
+ INT32 = 2,
+ UINT8 = 3,
+ INT64 = 4,
+ STRING = 5,
+ BOOL = 6,
+ INT16 = 7,
+ COMPLEX64 = 8,
+ INT8 = 9,
+}
+
+// Custom quantization parameters for experimenting with new quantization
+// techniques.
+table CustomQuantization {
+ custom:[ubyte] (force_align: 16);
+}
+
+// Represents a specific quantization technique's parameters.
+union QuantizationDetails {
+ CustomQuantization,
+}
+
+// Parameters for converting a quantized tensor back to float.
+table QuantizationParameters {
+ // These four parameters are the asymmetric linear quantization parameters.
+ // Given a quantized value q, the corresponding float value f should be:
+ // f = scale * (q - zero_point)
+ // For other quantization types, the QuantizationDetails below is used.
+ min:[float]; // For importing back into tensorflow.
+ max:[float]; // For importing back into tensorflow.
+ scale:[float]; // For dequantizing the tensor's values.
+ zero_point:[long];
+
+ // If this is not none, the quantization parameters above are ignored and the
+ // value of the QuantizationDetails union below should be used.
+ details:QuantizationDetails;
+}
+
+table Tensor {
+ // The tensor shape. The meaning of each entry is operator-specific but
+ // builtin ops use: [batch size, height, width, number of channels] (That's
+ // Tensorflow's NHWC).
+ shape:[int];
+ type:TensorType;
+ // An index that refers to the buffers table at the root of the model. Or,
+ // if there is no data buffer associated (i.e. intermediate results), then
+ // this is 0 (which refers to an always existent empty buffer).
+ //
+ // The data_buffer itself is an opaque container, with the assumption that the
+ // target device is little-endian. In addition, all builtin operators assume
+ // the memory is ordered such that if `shape` is [4, 3, 2], then index
+ // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k].
+ buffer:uint;
+ name:string; // For debugging and importing back into tensorflow.
+ quantization:QuantizationParameters; // Optional.
+
+ is_variable:bool = false;
+}
+
+// A list of builtin operators. Builtin operators are slightly faster than custom
+// ones, but not by much. Moreover, while custom operators accept an opaque
+// object containing configuration parameters, builtins have a predetermined
+// set of acceptable options.
+enum BuiltinOperator : byte {
+ ADD = 0,
+ AVERAGE_POOL_2D = 1,
+ CONCATENATION = 2,
+ CONV_2D = 3,
+ DEPTHWISE_CONV_2D = 4,
+ // DEPTH_TO_SPACE = 5,
+ DEQUANTIZE = 6,
+ EMBEDDING_LOOKUP = 7,
+ FLOOR = 8,
+ FULLY_CONNECTED = 9,
+ HASHTABLE_LOOKUP = 10,
+ L2_NORMALIZATION = 11,
+ L2_POOL_2D = 12,
+ LOCAL_RESPONSE_NORMALIZATION = 13,
+ LOGISTIC = 14,
+ LSH_PROJECTION = 15,
+ LSTM = 16,
+ MAX_POOL_2D = 17,
+ MUL = 18,
+ RELU = 19,
+ // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
+ // since different model developers use RELU1 in different ways. Never
+ // create another op called RELU1.
+ RELU_N1_TO_1 = 20,
+ RELU6 = 21,
+ RESHAPE = 22,
+ RESIZE_BILINEAR = 23,
+ RNN = 24,
+ SOFTMAX = 25,
+ SPACE_TO_DEPTH = 26,
+ SVDF = 27,
+ TANH = 28,
+ // TODO(aselle): Consider rename to CONCATENATE_EMBEDDINGS
+ CONCAT_EMBEDDINGS = 29,
+ SKIP_GRAM = 30,
+ CALL = 31,
+ CUSTOM = 32,
+ EMBEDDING_LOOKUP_SPARSE = 33,
+ PAD = 34,
+ UNIDIRECTIONAL_SEQUENCE_RNN = 35,
+ GATHER = 36,
+ BATCH_TO_SPACE_ND = 37,
+ SPACE_TO_BATCH_ND = 38,
+ TRANSPOSE = 39,
+ MEAN = 40,
+ SUB = 41,
+ DIV = 42,
+ SQUEEZE = 43,
+ UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
+ STRIDED_SLICE = 45,
+ BIDIRECTIONAL_SEQUENCE_RNN = 46,
+ EXP = 47,
+ TOPK_V2 = 48,
+ SPLIT = 49,
+ LOG_SOFTMAX = 50,
+ // DELEGATE is a special op type for the operations which are delegated to
+ // other backends.
+ // WARNING: Experimental interface, subject to change
+ DELEGATE = 51,
+ BIDIRECTIONAL_SEQUENCE_LSTM = 52,
+ CAST = 53,
+ PRELU = 54,
+ MAXIMUM = 55,
+ ARG_MAX = 56,
+ MINIMUM = 57,
+ LESS = 58,
+ NEG = 59,
+ PADV2 = 60,
+ GREATER = 61,
+ GREATER_EQUAL = 62,
+ LESS_EQUAL = 63,
+ SELECT = 64,
+ SLICE = 65,
+ SIN = 66,
+ TRANSPOSE_CONV = 67,
+ SPARSE_TO_DENSE = 68,
+ TILE = 69,
+ EXPAND_DIMS = 70,
+ EQUAL = 71,
+ NOT_EQUAL = 72,
+ LOG = 73,
+ SUM = 74,
+ SQRT = 75,
+ RSQRT = 76,
+ SHAPE = 77,
+ POW = 78,
+ ARG_MIN = 79,
+ FAKE_QUANT = 80,
+ REDUCE_PROD = 81,
+ REDUCE_MAX = 82,
+ PACK = 83,
+ LOGICAL_OR = 84,
+ ONE_HOT = 85,
+ LOGICAL_AND = 86,
+ LOGICAL_NOT = 87,
+ UNPACK = 88,
+ REDUCE_MIN = 89,
+ FLOOR_DIV = 90,
+ REDUCE_ANY = 91,
+ SQUARE = 92,
+ ZEROS_LIKE = 93,
+ FILL = 94,
+ FLOOR_MOD = 95,
+ RANGE = 96,
+ RESIZE_NEAREST_NEIGHBOR = 97,
+ LEAKY_RELU = 98,
+ SQUARED_DIFFERENCE = 99,
+ MIRROR_PAD = 100,
+ ABS = 101,
+ SPLIT_V = 102,
+}
+
+// Options for the builtin operators.
+union BuiltinOptions {
+ Conv2DOptions,
+ DepthwiseConv2DOptions,
+ ConcatEmbeddingsOptions,
+ LSHProjectionOptions,
+ Pool2DOptions,
+ SVDFOptions,
+ RNNOptions,
+ FullyConnectedOptions,
+ SoftmaxOptions,
+ ConcatenationOptions,
+ AddOptions,
+ L2NormOptions,
+ LocalResponseNormalizationOptions,
+ LSTMOptions,
+ ResizeBilinearOptions,
+ CallOptions,
+ ReshapeOptions,
+ SkipGramOptions,
+ SpaceToDepthOptions,
+ EmbeddingLookupSparseOptions,
+ MulOptions,
+ PadOptions,
+ GatherOptions,
+ BatchToSpaceNDOptions,
+ SpaceToBatchNDOptions,
+ TransposeOptions,
+ ReducerOptions,
+ SubOptions,
+ DivOptions,
+ SqueezeOptions,
+ SequenceRNNOptions,
+ StridedSliceOptions,
+ ExpOptions,
+ TopKV2Options,
+ SplitOptions,
+ LogSoftmaxOptions,
+ CastOptions,
+ DequantizeOptions,
+ MaximumMinimumOptions,
+ ArgMaxOptions,
+ LessOptions,
+ NegOptions,
+ PadV2Options,
+ GreaterOptions,
+ GreaterEqualOptions,
+ LessEqualOptions,
+ SelectOptions,
+ SliceOptions,
+ TransposeConvOptions,
+ SparseToDenseOptions,
+ TileOptions,
+ ExpandDimsOptions,
+ EqualOptions,
+ NotEqualOptions,
+ ShapeOptions,
+ PowOptions,
+ ArgMinOptions,
+ FakeQuantOptions,
+ PackOptions,
+ LogicalOrOptions,
+ OneHotOptions,
+ LogicalAndOptions,
+ LogicalNotOptions,
+ UnpackOptions,
+ FloorDivOptions,
+ SquareOptions,
+ ZerosLikeOptions,
+ FillOptions,
+ BidirectionalSequenceLSTMOptions,
+ BidirectionalSequenceRNNOptions,
+ UnidirectionalSequenceLSTMOptions,
+ FloorModOptions,
+ RangeOptions,
+ ResizeNearestNeighborOptions,
+ LeakyReluOptions,
+ SquaredDifferenceOptions,
+ MirrorPadOptions,
+ AbsOptions,
+ SplitVOptions,
+}
+
+enum Padding : byte { SAME, VALID }
+
+enum ActivationFunctionType : byte {
+ NONE = 0,
+ RELU = 1,
+ RELU_N1_TO_1 = 2,
+ RELU6 = 3,
+ TANH = 4,
+ SIGN_BIT = 5,
+}
+
+table Conv2DOptions {
+ padding:Padding;
+ stride_w:int;
+ stride_h:int;
+ fused_activation_function:ActivationFunctionType;
+ dilation_w_factor:int = 1;
+ dilation_h_factor:int = 1;
+}
+
+table Pool2DOptions {
+ padding:Padding;
+ stride_w:int;
+ stride_h:int;
+ filter_width:int;
+ filter_height:int;
+ fused_activation_function:ActivationFunctionType;
+}
+
+table DepthwiseConv2DOptions {
+ // Parameters for DepthwiseConv version 1 or above.
+ padding:Padding;
+ stride_w:int;
+ stride_h:int;
+ depth_multiplier:int;
+ fused_activation_function:ActivationFunctionType;
+ // Parameters for DepthwiseConv version 2 or above.
+ dilation_w_factor:int = 1;
+ dilation_h_factor:int = 1;
+}
+
+table ConcatEmbeddingsOptions {
+ num_channels:int;
+ num_columns_per_channel:[int];
+ embedding_dim_per_channel:[int]; // This could be inferred from parameters.
+}
+
+enum LSHProjectionType: byte {
+ UNKNOWN = 0,
+ SPARSE = 1,
+ DENSE = 2,
+}
+
+table LSHProjectionOptions {
+ type: LSHProjectionType;
+}
+
+table SVDFOptions {
+ rank:int;
+ fused_activation_function:ActivationFunctionType;
+}
+
+// An implementation of TensorFlow RNNCell.
+table RNNOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+// An implementation of TensorFlow dynamic_rnn with RNNCell.
+table SequenceRNNOptions {
+ time_major:bool;
+ fused_activation_function:ActivationFunctionType;
+}
+
+// An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell.
+table BidirectionalSequenceRNNOptions {
+ time_major:bool;
+ fused_activation_function:ActivationFunctionType;
+ merge_outputs: bool;
+}
+
+enum FullyConnectedOptionsWeightsFormat: byte {
+ DEFAULT = 0,
+ SHUFFLED4x16INT8 = 1,
+}
+
+// An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
+table FullyConnectedOptions {
+ // Parameters for FullyConnected version 1 or above.
+ fused_activation_function:ActivationFunctionType;
+
+ // Parameters for FullyConnected version 2 or above.
+ weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT;
+}
+
+table SoftmaxOptions {
+ beta: float;
+}
+
+// An implementation of TensorFlow concat.
+table ConcatenationOptions {
+ axis:int;
+ fused_activation_function:ActivationFunctionType;
+}
+
+table AddOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table MulOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table L2NormOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table LocalResponseNormalizationOptions {
+ radius:int;
+ bias:float;
+ alpha:float;
+ beta:float;
+}
+
+enum LSTMKernelType : byte {
+ // Full LSTM kernel which supports peephole and projection.
+ FULL = 0,
+ // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell.
+ BASIC = 1,
+}
+
+// An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
+table LSTMOptions {
+ // Parameters for LSTM version 1 or above.
+ fused_activation_function:ActivationFunctionType;
+ cell_clip: float; // Optional, 0.0 means no clipping
+ proj_clip: float; // Optional, 0.0 means no clipping
+
+ // Parameters for LSTM version 2 or above.
+ // Basic kernel is only supported in version 2 or above.
+ kernel_type: LSTMKernelType = FULL;
+}
+
+// An implementation of TensorFlow dynamic_rnn with LSTMCell.
+table UnidirectionalSequenceLSTMOptions {
+ fused_activation_function:ActivationFunctionType;
+ cell_clip: float; // Optional, 0.0 means no clipping
+ proj_clip: float; // Optional, 0.0 means no clipping
+
+ // If true then first dimension is sequence, otherwise batch.
+ time_major:bool;
+}
+
+table BidirectionalSequenceLSTMOptions {
+ fused_activation_function:ActivationFunctionType;
+ cell_clip: float; // Optional, 0.0 means no clipping
+ proj_clip: float; // Optional, 0.0 means no clipping
+
+ // If true, store the outputs of both directions into the first output.
+ merge_outputs: bool;
+}
+
+table ResizeBilinearOptions {
+ new_height: int (deprecated);
+ new_width: int (deprecated);
+ align_corners: bool;
+}
+
+table ResizeNearestNeighborOptions {
+ align_corners: bool;
+}
+
+// A call operation options
+table CallOptions {
+ // The subgraph index that needs to be called.
+ subgraph:uint;
+}
+
+table PadOptions {
+}
+
+table PadV2Options {
+}
+
+table ReshapeOptions {
+ new_shape:[int];
+}
+
+table SpaceToBatchNDOptions {
+}
+
+table BatchToSpaceNDOptions {
+}
+
+table SkipGramOptions {
+ ngram_size: int;
+ max_skip_size: int;
+ include_all_ngrams: bool;
+}
+
+table SpaceToDepthOptions {
+ block_size: int;
+}
+
+table SubOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table DivOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table TopKV2Options {
+}
+
+enum CombinerType : byte {
+ SUM = 0,
+ MEAN = 1,
+ SQRTN = 2,
+}
+
+table EmbeddingLookupSparseOptions {
+ combiner:CombinerType;
+}
+
+table GatherOptions {
+ axis: int;
+}
+
+table TransposeOptions {
+}
+
+table ExpOptions {
+}
+
+table ReducerOptions {
+ keep_dims: bool;
+}
+
+table SqueezeOptions {
+ squeeze_dims:[int];
+}
+
+table SplitOptions {
+ num_splits: int;
+}
+
+table SplitVOptions {
+ num_splits: int;
+}
+
+table StridedSliceOptions {
+ begin_mask: int;
+ end_mask: int;
+ ellipsis_mask: int;
+ new_axis_mask: int;
+ shrink_axis_mask: int;
+}
+
+table LogSoftmaxOptions {
+}
+
+table CastOptions {
+ in_data_type: TensorType;
+ out_data_type: TensorType;
+}
+
+table DequantizeOptions {
+}
+
+table MaximumMinimumOptions {
+}
+
+table TileOptions {
+}
+
+table ArgMaxOptions {
+ output_type : TensorType;
+}
+
+table ArgMinOptions {
+ output_type : TensorType;
+}
+
+table GreaterOptions {
+}
+
+table GreaterEqualOptions {
+}
+
+table LessOptions {
+}
+
+table LessEqualOptions {
+}
+
+table NegOptions {
+}
+
+table SelectOptions {
+}
+
+table SliceOptions {
+}
+
+table TransposeConvOptions {
+ padding:Padding;
+ stride_w:int;
+ stride_h:int;
+}
+
+table ExpandDimsOptions {
+}
+
+table SparseToDenseOptions {
+ validate_indices:bool;
+}
+
+table EqualOptions {
+}
+
+table NotEqualOptions {
+}
+
+table ShapeOptions {
+ // Optional output type of the operation (int32 or int64). Defaults to int32.
+ out_type : TensorType;
+}
+
+table PowOptions {
+}
+
+table FakeQuantOptions {
+ // Parameters supported by version 1:
+ min:float;
+ max:float;
+ num_bits:int;
+
+ // Parameters supported by version 2:
+ narrow_range:bool;
+}
+
+table PackOptions {
+ values_count:int;
+ axis:int;
+}
+
+table LogicalOrOptions {
+}
+
+table OneHotOptions {
+ axis:int;
+}
+
+table AbsOptions {
+}
+
+
+table LogicalAndOptions {
+}
+
+table LogicalNotOptions {
+}
+
+table UnpackOptions {
+ num:int;
+ axis:int;
+}
+
+table FloorDivOptions {
+}
+
+table SquareOptions {
+}
+
+table ZerosLikeOptions {
+}
+
+table FillOptions {
+}
+
+table FloorModOptions {
+}
+
+table RangeOptions {
+}
+
+table LeakyReluOptions {
+ alpha:float;
+}
+
+table SquaredDifferenceOptions {
+}
+
+enum MirrorPadMode : byte {
+ // Doesn't include borders.
+ REFLECT = 0,
+ // Includes borders.
+ SYMMETRIC = 1,
+}
+
+table MirrorPadOptions {
+ mode:MirrorPadMode;
+}
+
+// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
+// builtin, or a string if the operator is custom.
+table OperatorCode {
+ builtin_code:BuiltinOperator;
+ custom_code:string;
+
+ // The version of the operator. The version need to be bumped whenever new
+ // parameters are introduced into an op.
+ version:int = 1;
+}
+
+enum CustomOptionsFormat : byte {
+ FLEXBUFFERS = 0,
+}
+
+// An operator takes tensors as inputs and outputs. The type of operation being
+// performed is determined by an index into the list of valid OperatorCodes,
+// while the specifics of each operations is configured using builtin_options
+// or custom_options.
+table Operator {
+ // Index into the operator_codes array. Using an integer here avoids
+ // complicate map lookups.
+ opcode_index:uint;
+
+ // Optional input and output tensors are indicated by -1.
+ inputs:[int];
+ outputs:[int];
+
+ builtin_options:BuiltinOptions;
+ custom_options:[ubyte];
+ custom_options_format:CustomOptionsFormat;
+
+ // A list of booleans indicating the input tensors which are being mutated by
+ // this operator.(e.g. used by RNN and LSTM).
+ // For example, if the "inputs" array refers to 5 tensors and the second and
+ // fifth are mutable variables, then this list will contain
+ // [false, true, false, false, true].
+ //
+ // If the list is empty, no variable is mutated in this operator.
+ // The list either has the same length as `inputs`, or is empty.
+ mutating_variable_inputs:[bool];
+}
+
+// The root type, defining a subgraph, which typically represents an entire
+// model.
+table SubGraph {
+ // A list of all tensors used in this subgraph.
+ tensors:[Tensor];
+
+ // Indices of the tensors that are inputs into this subgraph. Note this is
+ // the list of non-static tensors that feed into the subgraph for inference.
+ inputs:[int];
+
+ // Indices of the tensors that are outputs out of this subgraph. Note this is
+ // the list of output tensors that are considered the product of the
+ // subgraph's inference.
+ outputs:[int];
+
+ // All operators, in execution order.
+ operators:[Operator];
+
+ // Name of this subgraph (used for debugging).
+ name:string;
+}
+
+// Table of raw data buffers (used for constant tensors). Referenced by tensors
+// by index. The generous alignment accommodates mmap-friendly data structures.
+table Buffer {
+ data:[ubyte] (force_align: 16);
+}
+
+table Model {
+ // Version of the schema.
+ version:uint;
+
+ // A list of all operator codes used in this model. This is
+ // kept in order because operators carry an index into this
+ // vector.
+ operator_codes:[OperatorCode];
+
+ // All the subgraphs of the model. The 0th is assumed to be the main
+ // model.
+ subgraphs:[SubGraph];
+
+ // A description of the model.
+ description:string;
+
+ // Buffers of the model.
+ // Note the 0th entry of this array must be an empty buffer (sentinel).
+ // This is a convention so that tensors without a buffer can provide 0 as
+ // their buffer.
+ buffers:[Buffer];
+
+ // Metadata about the model. Indirects into the existings buffers list.
+ metadata_buffer:[int];
+}
+
+root_type Model;
diff --git a/res/TensorFlowLiteSchema/1.14.0/schema.fbs b/res/TensorFlowLiteSchema/1.14.0/schema.fbs
new file mode 100644
index 000000000..b5fc0f31b
--- /dev/null
+++ b/res/TensorFlowLiteSchema/1.14.0/schema.fbs
@@ -0,0 +1,873 @@
+// Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Revision History
+// Version 0: Initial version.
+// Version 1: Add subgraphs to schema.
+// Version 2: Rename operators to conform to NN API.
+// Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers.
+
+namespace tflite;
+
+// This corresponds to the version.
+file_identifier "TFL3";
+// File extension of any written files.
+file_extension "tflite";
+
+// IMPORTANT: All new members of tables, enums and unions must be added at the
+// end to ensure backwards compatibility.
+
+// The type of data stored in a tensor.
+enum TensorType : byte {
+ FLOAT32 = 0,
+ FLOAT16 = 1,
+ INT32 = 2,
+ UINT8 = 3,
+ INT64 = 4,
+ STRING = 5,
+ BOOL = 6,
+ INT16 = 7,
+ COMPLEX64 = 8,
+ INT8 = 9,
+}
+
+// Custom quantization parameters for experimenting with new quantization
+// techniques.
+table CustomQuantization {
+ custom:[ubyte] (force_align: 16);
+}
+
+// Represents a specific quantization technique's parameters.
+union QuantizationDetails {
+ CustomQuantization,
+}
+
+// Parameters for converting a quantized tensor back to float.
+table QuantizationParameters {
+ // These four parameters are the asymmetric linear quantization parameters.
+ // Given a quantized value q, the corresponding float value f should be:
+ // f = scale * (q - zero_point)
+ // For other quantization types, the QuantizationDetails below is used.
+ min:[float]; // For importing back into tensorflow.
+ max:[float]; // For importing back into tensorflow.
+ scale:[float]; // For dequantizing the tensor's values.
+ zero_point:[long];
+
+ // If this is not none, the other quantization parameters (i.e. min, max,
+ // scale, zero_point fields above) are ignored and the value of the
+ // QuantizationDetails union should be used.
+ details:QuantizationDetails;
+
+ // Specifies the dimension of the Tensor's shape that the scales and
+ // zero_points correspond to. For example, a tensor t, with dims=[4, 3, 2, 1]
+ // with quantization params:
+ // scale=[1.0, 2.0, 3.0], zero_point=[1, 2, 3], quantization_dimension=1
+ // will be quantized across the second dimension of t.
+ // t[:, 0, :, :] will have scale[0]=1.0, zero_point[0]=1
+ // t[:, 1, :, :] will have scale[1]=2.0, zero_point[0]=2
+ // t[:, 2, :, :] will have scale[2]=3.0, zero_point[0]=3
+ quantized_dimension:int;
+}
+
+table Tensor {
+ // The tensor shape. The meaning of each entry is operator-specific but
+ // builtin ops use: [batch size, height, width, number of channels] (That's
+ // Tensorflow's NHWC).
+ shape:[int];
+ type:TensorType;
+ // An index that refers to the buffers table at the root of the model. Or,
+ // if there is no data buffer associated (i.e. intermediate results), then
+ // this is 0 (which refers to an always existent empty buffer).
+ //
+ // The data_buffer itself is an opaque container, with the assumption that the
+ // target device is little-endian. In addition, all builtin operators assume
+ // the memory is ordered such that if `shape` is [4, 3, 2], then index
+ // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k].
+ buffer:uint;
+ name:string; // For debugging and importing back into tensorflow.
+ quantization:QuantizationParameters; // Optional.
+
+ is_variable:bool = false;
+}
+
+// A list of builtin operators. Builtin operators are slightly faster than custom
+// ones, but not by much. Moreover, while custom operators accept an opaque
+// object containing configuration parameters, builtins have a predetermined
+// set of acceptable options.
+enum BuiltinOperator : byte {
+ ADD = 0,
+ AVERAGE_POOL_2D = 1,
+ CONCATENATION = 2,
+ CONV_2D = 3,
+ DEPTHWISE_CONV_2D = 4,
+ // DEPTH_TO_SPACE = 5,
+ DEQUANTIZE = 6,
+ EMBEDDING_LOOKUP = 7,
+ FLOOR = 8,
+ FULLY_CONNECTED = 9,
+ HASHTABLE_LOOKUP = 10,
+ L2_NORMALIZATION = 11,
+ L2_POOL_2D = 12,
+ LOCAL_RESPONSE_NORMALIZATION = 13,
+ LOGISTIC = 14,
+ LSH_PROJECTION = 15,
+ LSTM = 16,
+ MAX_POOL_2D = 17,
+ MUL = 18,
+ RELU = 19,
+ // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
+ // since different model developers use RELU1 in different ways. Never
+ // create another op called RELU1.
+ RELU_N1_TO_1 = 20,
+ RELU6 = 21,
+ RESHAPE = 22,
+ RESIZE_BILINEAR = 23,
+ RNN = 24,
+ SOFTMAX = 25,
+ SPACE_TO_DEPTH = 26,
+ SVDF = 27,
+ TANH = 28,
+ // TODO(aselle): Consider rename to CONCATENATE_EMBEDDINGS
+ CONCAT_EMBEDDINGS = 29,
+ SKIP_GRAM = 30,
+ CALL = 31,
+ CUSTOM = 32,
+ EMBEDDING_LOOKUP_SPARSE = 33,
+ PAD = 34,
+ UNIDIRECTIONAL_SEQUENCE_RNN = 35,
+ GATHER = 36,
+ BATCH_TO_SPACE_ND = 37,
+ SPACE_TO_BATCH_ND = 38,
+ TRANSPOSE = 39,
+ MEAN = 40,
+ SUB = 41,
+ DIV = 42,
+ SQUEEZE = 43,
+ UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
+ STRIDED_SLICE = 45,
+ BIDIRECTIONAL_SEQUENCE_RNN = 46,
+ EXP = 47,
+ TOPK_V2 = 48,
+ SPLIT = 49,
+ LOG_SOFTMAX = 50,
+ // DELEGATE is a special op type for the operations which are delegated to
+ // other backends.
+ // WARNING: Experimental interface, subject to change
+ DELEGATE = 51,
+ BIDIRECTIONAL_SEQUENCE_LSTM = 52,
+ CAST = 53,
+ PRELU = 54,
+ MAXIMUM = 55,
+ ARG_MAX = 56,
+ MINIMUM = 57,
+ LESS = 58,
+ NEG = 59,
+ PADV2 = 60,
+ GREATER = 61,
+ GREATER_EQUAL = 62,
+ LESS_EQUAL = 63,
+ SELECT = 64,
+ SLICE = 65,
+ SIN = 66,
+ TRANSPOSE_CONV = 67,
+ SPARSE_TO_DENSE = 68,
+ TILE = 69,
+ EXPAND_DIMS = 70,
+ EQUAL = 71,
+ NOT_EQUAL = 72,
+ LOG = 73,
+ SUM = 74,
+ SQRT = 75,
+ RSQRT = 76,
+ SHAPE = 77,
+ POW = 78,
+ ARG_MIN = 79,
+ FAKE_QUANT = 80,
+ REDUCE_PROD = 81,
+ REDUCE_MAX = 82,
+ PACK = 83,
+ LOGICAL_OR = 84,
+ ONE_HOT = 85,
+ LOGICAL_AND = 86,
+ LOGICAL_NOT = 87,
+ UNPACK = 88,
+ REDUCE_MIN = 89,
+ FLOOR_DIV = 90,
+ REDUCE_ANY = 91,
+ SQUARE = 92,
+ ZEROS_LIKE = 93,
+ FILL = 94,
+ FLOOR_MOD = 95,
+ RANGE = 96,
+ RESIZE_NEAREST_NEIGHBOR = 97,
+ LEAKY_RELU = 98,
+ SQUARED_DIFFERENCE = 99,
+ MIRROR_PAD = 100,
+ ABS = 101,
+ SPLIT_V = 102,
+ UNIQUE = 103,
+ CEIL = 104,
+ REVERSE_V2 = 105,
+ ADD_N = 106,
+ GATHER_ND = 107,
+ COS = 108,
+ WHERE = 109,
+ RANK = 110,
+ ELU = 111,
+ REVERSE_SEQUENCE = 112,
+ MATRIX_DIAG = 113,
+ QUANTIZE = 114,
+ MATRIX_SET_DIAG = 115,
+ ROUND = 116,
+}
+
+// Options for the builtin operators.
+union BuiltinOptions {
+ Conv2DOptions,
+ DepthwiseConv2DOptions,
+ ConcatEmbeddingsOptions,
+ LSHProjectionOptions,
+ Pool2DOptions,
+ SVDFOptions,
+ RNNOptions,
+ FullyConnectedOptions,
+ SoftmaxOptions,
+ ConcatenationOptions,
+ AddOptions,
+ L2NormOptions,
+ LocalResponseNormalizationOptions,
+ LSTMOptions,
+ ResizeBilinearOptions,
+ CallOptions,
+ ReshapeOptions,
+ SkipGramOptions,
+ SpaceToDepthOptions,
+ EmbeddingLookupSparseOptions,
+ MulOptions,
+ PadOptions,
+ GatherOptions,
+ BatchToSpaceNDOptions,
+ SpaceToBatchNDOptions,
+ TransposeOptions,
+ ReducerOptions,
+ SubOptions,
+ DivOptions,
+ SqueezeOptions,
+ SequenceRNNOptions,
+ StridedSliceOptions,
+ ExpOptions,
+ TopKV2Options,
+ SplitOptions,
+ LogSoftmaxOptions,
+ CastOptions,
+ DequantizeOptions,
+ MaximumMinimumOptions,
+ ArgMaxOptions,
+ LessOptions,
+ NegOptions,
+ PadV2Options,
+ GreaterOptions,
+ GreaterEqualOptions,
+ LessEqualOptions,
+ SelectOptions,
+ SliceOptions,
+ TransposeConvOptions,
+ SparseToDenseOptions,
+ TileOptions,
+ ExpandDimsOptions,
+ EqualOptions,
+ NotEqualOptions,
+ ShapeOptions,
+ PowOptions,
+ ArgMinOptions,
+ FakeQuantOptions,
+ PackOptions,
+ LogicalOrOptions,
+ OneHotOptions,
+ LogicalAndOptions,
+ LogicalNotOptions,
+ UnpackOptions,
+ FloorDivOptions,
+ SquareOptions,
+ ZerosLikeOptions,
+ FillOptions,
+ BidirectionalSequenceLSTMOptions,
+ BidirectionalSequenceRNNOptions,
+ UnidirectionalSequenceLSTMOptions,
+ FloorModOptions,
+ RangeOptions,
+ ResizeNearestNeighborOptions,
+ LeakyReluOptions,
+ SquaredDifferenceOptions,
+ MirrorPadOptions,
+ AbsOptions,
+ SplitVOptions,
+ UniqueOptions,
+ ReverseV2Options,
+ AddNOptions,
+ GatherNdOptions,
+ CosOptions,
+ WhereOptions,
+ RankOptions,
+ ReverseSequenceOptions,
+ MatrixDiagOptions,
+ QuantizeOptions,
+ MatrixSetDiagOptions
+}
+
+enum Padding : byte { SAME, VALID }
+
+enum ActivationFunctionType : byte {
+ NONE = 0,
+ RELU = 1,
+ RELU_N1_TO_1 = 2,
+ RELU6 = 3,
+ TANH = 4,
+ SIGN_BIT = 5,
+}
+
+table Conv2DOptions {
+ padding:Padding;
+ stride_w:int;
+ stride_h:int;
+ fused_activation_function:ActivationFunctionType;
+ dilation_w_factor:int = 1;
+ dilation_h_factor:int = 1;
+}
+
+table Pool2DOptions {
+ padding:Padding;
+ stride_w:int;
+ stride_h:int;
+ filter_width:int;
+ filter_height:int;
+ fused_activation_function:ActivationFunctionType;
+}
+
+table DepthwiseConv2DOptions {
+ // Parameters for DepthwiseConv version 1 or above.
+ padding:Padding;
+ stride_w:int;
+ stride_h:int;
+ depth_multiplier:int;
+ fused_activation_function:ActivationFunctionType;
+ // Parameters for DepthwiseConv version 2 or above.
+ dilation_w_factor:int = 1;
+ dilation_h_factor:int = 1;
+}
+
+table ConcatEmbeddingsOptions {
+ num_channels:int;
+ num_columns_per_channel:[int];
+ embedding_dim_per_channel:[int]; // This could be inferred from parameters.
+}
+
+enum LSHProjectionType: byte {
+ UNKNOWN = 0,
+ SPARSE = 1,
+ DENSE = 2,
+}
+
+table LSHProjectionOptions {
+ type: LSHProjectionType;
+}
+
+table SVDFOptions {
+ rank:int;
+ fused_activation_function:ActivationFunctionType;
+}
+
+// An implementation of TensorFlow RNNCell.
+table RNNOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+// An implementation of TensorFlow dynamic_rnn with RNNCell.
+table SequenceRNNOptions {
+ time_major:bool;
+ fused_activation_function:ActivationFunctionType;
+}
+
+// An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell.
+table BidirectionalSequenceRNNOptions {
+ time_major:bool;
+ fused_activation_function:ActivationFunctionType;
+ merge_outputs: bool;
+}
+
+enum FullyConnectedOptionsWeightsFormat: byte {
+ DEFAULT = 0,
+ SHUFFLED4x16INT8 = 1,
+}
+
+// An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
+table FullyConnectedOptions {
+ // Parameters for FullyConnected version 1 or above.
+ fused_activation_function:ActivationFunctionType;
+
+ // Parameters for FullyConnected version 2 or above.
+ weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT;
+}
+
+table SoftmaxOptions {
+ beta: float;
+}
+
+// An implementation of TensorFlow concat.
+table ConcatenationOptions {
+ axis:int;
+ fused_activation_function:ActivationFunctionType;
+}
+
+table AddOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table MulOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table L2NormOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table LocalResponseNormalizationOptions {
+ radius:int;
+ bias:float;
+ alpha:float;
+ beta:float;
+}
+
+enum LSTMKernelType : byte {
+ // Full LSTM kernel which supports peephole and projection.
+ FULL = 0,
+ // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell.
+ BASIC = 1,
+}
+
+// An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
+table LSTMOptions {
+ // Parameters for LSTM version 1 or above.
+ fused_activation_function:ActivationFunctionType;
+ cell_clip: float; // Optional, 0.0 means no clipping
+ proj_clip: float; // Optional, 0.0 means no clipping
+
+ // Parameters for LSTM version 2 or above.
+ // Basic kernel is only supported in version 2 or above.
+ kernel_type: LSTMKernelType = FULL;
+}
+
+// An implementation of TensorFlow dynamic_rnn with LSTMCell.
+table UnidirectionalSequenceLSTMOptions {
+ fused_activation_function:ActivationFunctionType;
+ cell_clip: float; // Optional, 0.0 means no clipping
+ proj_clip: float; // Optional, 0.0 means no clipping
+
+ // If true then first dimension is sequence, otherwise batch.
+ time_major:bool;
+}
+
+table BidirectionalSequenceLSTMOptions {
+ // Parameters supported by version 1:
+ fused_activation_function:ActivationFunctionType;
+ cell_clip: float; // Optional, 0.0 means no clipping
+ proj_clip: float; // Optional, 0.0 means no clipping
+
+ // If true, store the outputs of both directions into the first output.
+ merge_outputs: bool;
+
+ // Parameters supported by version 2:
+ // If true then first dimension is sequence, otherwise batch.
+ // Version 1 implementations assumed time_major to be true, so this default
+ // value should never change.
+ time_major: bool = true;
+}
+
+table ResizeBilinearOptions {
+ new_height: int (deprecated);
+ new_width: int (deprecated);
+ align_corners: bool;
+}
+
+table ResizeNearestNeighborOptions {
+ align_corners: bool;
+}
+
+// A call operation options
+table CallOptions {
+ // The subgraph index that needs to be called.
+ subgraph:uint;
+}
+
+table PadOptions {
+}
+
+table PadV2Options {
+}
+
+table ReshapeOptions {
+ new_shape:[int];
+}
+
+table SpaceToBatchNDOptions {
+}
+
+table BatchToSpaceNDOptions {
+}
+
+table SkipGramOptions {
+ ngram_size: int;
+ max_skip_size: int;
+ include_all_ngrams: bool;
+}
+
+table SpaceToDepthOptions {
+ block_size: int;
+}
+
+table SubOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table DivOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table TopKV2Options {
+}
+
+enum CombinerType : byte {
+ SUM = 0,
+ MEAN = 1,
+ SQRTN = 2,
+}
+
+table EmbeddingLookupSparseOptions {
+ combiner:CombinerType;
+}
+
+table GatherOptions {
+ axis: int;
+}
+
+table TransposeOptions {
+}
+
+table ExpOptions {
+}
+
+table CosOptions {
+}
+
+table ReducerOptions {
+ keep_dims: bool;
+}
+
+table SqueezeOptions {
+ squeeze_dims:[int];
+}
+
+table SplitOptions {
+ num_splits: int;
+}
+
+table SplitVOptions {
+ num_splits: int;
+}
+
+table StridedSliceOptions {
+ begin_mask: int;
+ end_mask: int;
+ ellipsis_mask: int;
+ new_axis_mask: int;
+ shrink_axis_mask: int;
+}
+
+table LogSoftmaxOptions {
+}
+
+table CastOptions {
+ in_data_type: TensorType;
+ out_data_type: TensorType;
+}
+
+table DequantizeOptions {
+}
+
+table MaximumMinimumOptions {
+}
+
+table TileOptions {
+}
+
+table ArgMaxOptions {
+ output_type : TensorType;
+}
+
+table ArgMinOptions {
+ output_type : TensorType;
+}
+
+table GreaterOptions {
+}
+
+table GreaterEqualOptions {
+}
+
+table LessOptions {
+}
+
+table LessEqualOptions {
+}
+
+table NegOptions {
+}
+
+table SelectOptions {
+}
+
+table SliceOptions {
+}
+
+table TransposeConvOptions {
+ padding:Padding;
+ stride_w:int;
+ stride_h:int;
+}
+
+table ExpandDimsOptions {
+}
+
+table SparseToDenseOptions {
+ validate_indices:bool;
+}
+
+table EqualOptions {
+}
+
+table NotEqualOptions {
+}
+
+table ShapeOptions {
+ // Optional output type of the operation (int32 or int64). Defaults to int32.
+ out_type : TensorType;
+}
+
+table RankOptions {
+}
+
+table PowOptions {
+}
+
+table FakeQuantOptions {
+ // Parameters supported by version 1:
+ min:float;
+ max:float;
+ num_bits:int;
+
+ // Parameters supported by version 2:
+ narrow_range:bool;
+}
+
+table PackOptions {
+ values_count:int;
+ axis:int;
+}
+
+table LogicalOrOptions {
+}
+
+table OneHotOptions {
+ axis:int;
+}
+
+table AbsOptions {
+}
+
+
+table LogicalAndOptions {
+}
+
+table LogicalNotOptions {
+}
+
+table UnpackOptions {
+ num:int;
+ axis:int;
+}
+
+table FloorDivOptions {
+}
+
+table SquareOptions {
+}
+
+table ZerosLikeOptions {
+}
+
+table FillOptions {
+}
+
+table FloorModOptions {
+}
+
+table RangeOptions {
+}
+
+table LeakyReluOptions {
+ alpha:float;
+}
+
+table SquaredDifferenceOptions {
+}
+
+enum MirrorPadMode : byte {
+ // Doesn't include borders.
+ REFLECT = 0,
+ // Includes borders.
+ SYMMETRIC = 1,
+}
+
+table MirrorPadOptions {
+ mode:MirrorPadMode;
+}
+
+table UniqueOptions {
+ idx_out_type:TensorType = INT32;
+}
+
+table ReverseV2Options {
+}
+
+table AddNOptions {
+}
+
+table GatherNdOptions {
+}
+
+table WhereOptions {
+}
+
+table ReverseSequenceOptions {
+ seq_dim:int;
+ batch_dim:int = 0;
+}
+
+table MatrixDiagOptions {
+}
+
+table QuantizeOptions {
+}
+
+table MatrixSetDiagOptions {
+}
+
+// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
+// builtin, or a string if the operator is custom.
+table OperatorCode {
+ builtin_code:BuiltinOperator;
+ custom_code:string;
+
+ // The version of the operator. The version need to be bumped whenever new
+ // parameters are introduced into an op.
+ version:int = 1;
+}
+
+enum CustomOptionsFormat : byte {
+ FLEXBUFFERS = 0,
+}
+
+// An operator takes tensors as inputs and outputs. The type of operation being
+// performed is determined by an index into the list of valid OperatorCodes,
+// while the specifics of each operations is configured using builtin_options
+// or custom_options.
+table Operator {
+ // Index into the operator_codes array. Using an integer here avoids
+ // complicate map lookups.
+ opcode_index:uint;
+
+ // Optional input and output tensors are indicated by -1.
+ inputs:[int];
+ outputs:[int];
+
+ builtin_options:BuiltinOptions;
+ custom_options:[ubyte];
+ custom_options_format:CustomOptionsFormat;
+
+ // A list of booleans indicating the input tensors which are being mutated by
+ // this operator.(e.g. used by RNN and LSTM).
+ // For example, if the "inputs" array refers to 5 tensors and the second and
+ // fifth are mutable variables, then this list will contain
+ // [false, true, false, false, true].
+ //
+ // If the list is empty, no variable is mutated in this operator.
+ // The list either has the same length as `inputs`, or is empty.
+ mutating_variable_inputs:[bool];
+}
+
+// The root type, defining a subgraph, which typically represents an entire
+// model.
+table SubGraph {
+ // A list of all tensors used in this subgraph.
+ tensors:[Tensor];
+
+ // Indices of the tensors that are inputs into this subgraph. Note this is
+ // the list of non-static tensors that feed into the subgraph for inference.
+ inputs:[int];
+
+ // Indices of the tensors that are outputs out of this subgraph. Note this is
+ // the list of output tensors that are considered the product of the
+ // subgraph's inference.
+ outputs:[int];
+
+ // All operators, in execution order.
+ operators:[Operator];
+
+ // Name of this subgraph (used for debugging).
+ name:string;
+}
+
+// Table of raw data buffers (used for constant tensors). Referenced by tensors
+// by index. The generous alignment accommodates mmap-friendly data structures.
+table Buffer {
+ data:[ubyte] (force_align: 16);
+}
+
+table Model {
+ // Version of the schema.
+ version:uint;
+
+ // A list of all operator codes used in this model. This is
+ // kept in order because operators carry an index into this
+ // vector.
+ operator_codes:[OperatorCode];
+
+ // All the subgraphs of the model. The 0th is assumed to be the main
+ // model.
+ subgraphs:[SubGraph];
+
+ // A description of the model.
+ description:string;
+
+ // Buffers of the model.
+ // Note the 0th entry of this array must be an empty buffer (sentinel).
+ // This is a convention so that tensors without a buffer can provide 0 as
+ // their buffer.
+ buffers:[Buffer];
+
+ // Metadata about the model. Indirects into the existings buffers list.
+ metadata_buffer:[int];
+}
+
+root_type Model;
diff --git a/res/TensorFlowLiteSchema/1.15.2/schema.fbs b/res/TensorFlowLiteSchema/1.15.2/schema.fbs
new file mode 100644
index 000000000..d63386035
--- /dev/null
+++ b/res/TensorFlowLiteSchema/1.15.2/schema.fbs
@@ -0,0 +1,922 @@
+// Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Revision History
+// Version 0: Initial version.
+// Version 1: Add subgraphs to schema.
+// Version 2: Rename operators to conform to NN API.
+// Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers.
+
+namespace tflite;
+
+// This corresponds to the version.
+file_identifier "TFL3";
+// File extension of any written files.
+file_extension "tflite";
+
+// IMPORTANT: All new members of tables, enums and unions must be added at the
+// end to ensure backwards compatibility.
+
+// The type of data stored in a tensor.
+enum TensorType : byte {
+ FLOAT32 = 0,
+ FLOAT16 = 1,
+ INT32 = 2,
+ UINT8 = 3,
+ INT64 = 4,
+ STRING = 5,
+ BOOL = 6,
+ INT16 = 7,
+ COMPLEX64 = 8,
+ INT8 = 9,
+}
+
+// Custom quantization parameters for experimenting with new quantization
+// techniques.
+table CustomQuantization {
+ custom:[ubyte] (force_align: 16);
+}
+
+// Represents a specific quantization technique's parameters.
+union QuantizationDetails {
+ CustomQuantization,
+}
+
+// Parameters for converting a quantized tensor back to float.
+table QuantizationParameters {
+ // These four parameters are the asymmetric linear quantization parameters.
+ // Given a quantized value q, the corresponding float value f should be:
+ // f = scale * (q - zero_point)
+ // For other quantization types, the QuantizationDetails below is used.
+ min:[float]; // For importing back into tensorflow.
+ max:[float]; // For importing back into tensorflow.
+ scale:[float]; // For dequantizing the tensor's values.
+ zero_point:[long];
+
+ // If this is not none, the other quantization parameters (i.e. min, max,
+ // scale, zero_point fields above) are ignored and the value of the
+ // QuantizationDetails union should be used.
+ details:QuantizationDetails;
+
+ // Specifies the dimension of the Tensor's shape that the scales and
+ // zero_points correspond to. For example, a tensor t, with dims=[4, 3, 2, 1]
+ // with quantization params:
+ // scale=[1.0, 2.0, 3.0], zero_point=[1, 2, 3], quantization_dimension=1
+ // will be quantized across the second dimension of t.
+ // t[:, 0, :, :] will have scale[0]=1.0, zero_point[0]=1
+ // t[:, 1, :, :] will have scale[1]=2.0, zero_point[0]=2
+ // t[:, 2, :, :] will have scale[2]=3.0, zero_point[0]=3
+ quantized_dimension:int;
+}
+
+table Tensor {
+ // The tensor shape. The meaning of each entry is operator-specific but
+ // builtin ops use: [batch size, height, width, number of channels] (That's
+ // Tensorflow's NHWC).
+ shape:[int];
+ type:TensorType;
+ // An index that refers to the buffers table at the root of the model. Or,
+ // if there is no data buffer associated (i.e. intermediate results), then
+ // this is 0 (which refers to an always existent empty buffer).
+ //
+ // The data_buffer itself is an opaque container, with the assumption that the
+ // target device is little-endian. In addition, all builtin operators assume
+ // the memory is ordered such that if `shape` is [4, 3, 2], then index
+ // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k].
+ buffer:uint;
+ name:string; // For debugging and importing back into tensorflow.
+ quantization:QuantizationParameters; // Optional.
+
+ is_variable:bool = false;
+}
+
+// A list of builtin operators. Builtin operators are slightly faster than custom
+// ones, but not by much. Moreover, while custom operators accept an opaque
+// object containing configuration parameters, builtins have a predetermined
+// set of acceptable options.
+enum BuiltinOperator : byte {
+ ADD = 0,
+ AVERAGE_POOL_2D = 1,
+ CONCATENATION = 2,
+ CONV_2D = 3,
+ DEPTHWISE_CONV_2D = 4,
+ DEPTH_TO_SPACE = 5,
+ DEQUANTIZE = 6,
+ EMBEDDING_LOOKUP = 7,
+ FLOOR = 8,
+ FULLY_CONNECTED = 9,
+ HASHTABLE_LOOKUP = 10,
+ L2_NORMALIZATION = 11,
+ L2_POOL_2D = 12,
+ LOCAL_RESPONSE_NORMALIZATION = 13,
+ LOGISTIC = 14,
+ LSH_PROJECTION = 15,
+ LSTM = 16,
+ MAX_POOL_2D = 17,
+ MUL = 18,
+ RELU = 19,
+ // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
+ // since different model developers use RELU1 in different ways. Never
+ // create another op called RELU1.
+ RELU_N1_TO_1 = 20,
+ RELU6 = 21,
+ RESHAPE = 22,
+ RESIZE_BILINEAR = 23,
+ RNN = 24,
+ SOFTMAX = 25,
+ SPACE_TO_DEPTH = 26,
+ SVDF = 27,
+ TANH = 28,
+ // TODO(aselle): Consider rename to CONCATENATE_EMBEDDINGS
+ CONCAT_EMBEDDINGS = 29,
+ SKIP_GRAM = 30,
+ CALL = 31,
+ CUSTOM = 32,
+ EMBEDDING_LOOKUP_SPARSE = 33,
+ PAD = 34,
+ UNIDIRECTIONAL_SEQUENCE_RNN = 35,
+ GATHER = 36,
+ BATCH_TO_SPACE_ND = 37,
+ SPACE_TO_BATCH_ND = 38,
+ TRANSPOSE = 39,
+ MEAN = 40,
+ SUB = 41,
+ DIV = 42,
+ SQUEEZE = 43,
+ UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
+ STRIDED_SLICE = 45,
+ BIDIRECTIONAL_SEQUENCE_RNN = 46,
+ EXP = 47,
+ TOPK_V2 = 48,
+ SPLIT = 49,
+ LOG_SOFTMAX = 50,
+ // DELEGATE is a special op type for the operations which are delegated to
+ // other backends.
+ // WARNING: Experimental interface, subject to change
+ DELEGATE = 51,
+ BIDIRECTIONAL_SEQUENCE_LSTM = 52,
+ CAST = 53,
+ PRELU = 54,
+ MAXIMUM = 55,
+ ARG_MAX = 56,
+ MINIMUM = 57,
+ LESS = 58,
+ NEG = 59,
+ PADV2 = 60,
+ GREATER = 61,
+ GREATER_EQUAL = 62,
+ LESS_EQUAL = 63,
+ SELECT = 64,
+ SLICE = 65,
+ SIN = 66,
+ TRANSPOSE_CONV = 67,
+ SPARSE_TO_DENSE = 68,
+ TILE = 69,
+ EXPAND_DIMS = 70,
+ EQUAL = 71,
+ NOT_EQUAL = 72,
+ LOG = 73,
+ SUM = 74,
+ SQRT = 75,
+ RSQRT = 76,
+ SHAPE = 77,
+ POW = 78,
+ ARG_MIN = 79,
+ FAKE_QUANT = 80,
+ REDUCE_PROD = 81,
+ REDUCE_MAX = 82,
+ PACK = 83,
+ LOGICAL_OR = 84,
+ ONE_HOT = 85,
+ LOGICAL_AND = 86,
+ LOGICAL_NOT = 87,
+ UNPACK = 88,
+ REDUCE_MIN = 89,
+ FLOOR_DIV = 90,
+ REDUCE_ANY = 91,
+ SQUARE = 92,
+ ZEROS_LIKE = 93,
+ FILL = 94,
+ FLOOR_MOD = 95,
+ RANGE = 96,
+ RESIZE_NEAREST_NEIGHBOR = 97,
+ LEAKY_RELU = 98,
+ SQUARED_DIFFERENCE = 99,
+ MIRROR_PAD = 100,
+ ABS = 101,
+ SPLIT_V = 102,
+ UNIQUE = 103,
+ CEIL = 104,
+ REVERSE_V2 = 105,
+ ADD_N = 106,
+ GATHER_ND = 107,
+ COS = 108,
+ WHERE = 109,
+ RANK = 110,
+ ELU = 111,
+ REVERSE_SEQUENCE = 112,
+ MATRIX_DIAG = 113,
+ QUANTIZE = 114,
+ MATRIX_SET_DIAG = 115,
+ ROUND = 116,
+ HARD_SWISH = 117,
+ IF = 118,
+ WHILE = 119,
+}
+
+// Options for the builtin operators.
+union BuiltinOptions {
+ Conv2DOptions,
+ DepthwiseConv2DOptions,
+ ConcatEmbeddingsOptions,
+ LSHProjectionOptions,
+ Pool2DOptions,
+ SVDFOptions,
+ RNNOptions,
+ FullyConnectedOptions,
+ SoftmaxOptions,
+ ConcatenationOptions,
+ AddOptions,
+ L2NormOptions,
+ LocalResponseNormalizationOptions,
+ LSTMOptions,
+ ResizeBilinearOptions,
+ CallOptions,
+ ReshapeOptions,
+ SkipGramOptions,
+ SpaceToDepthOptions,
+ EmbeddingLookupSparseOptions,
+ MulOptions,
+ PadOptions,
+ GatherOptions,
+ BatchToSpaceNDOptions,
+ SpaceToBatchNDOptions,
+ TransposeOptions,
+ ReducerOptions,
+ SubOptions,
+ DivOptions,
+ SqueezeOptions,
+ SequenceRNNOptions,
+ StridedSliceOptions,
+ ExpOptions,
+ TopKV2Options,
+ SplitOptions,
+ LogSoftmaxOptions,
+ CastOptions,
+ DequantizeOptions,
+ MaximumMinimumOptions,
+ ArgMaxOptions,
+ LessOptions,
+ NegOptions,
+ PadV2Options,
+ GreaterOptions,
+ GreaterEqualOptions,
+ LessEqualOptions,
+ SelectOptions,
+ SliceOptions,
+ TransposeConvOptions,
+ SparseToDenseOptions,
+ TileOptions,
+ ExpandDimsOptions,
+ EqualOptions,
+ NotEqualOptions,
+ ShapeOptions,
+ PowOptions,
+ ArgMinOptions,
+ FakeQuantOptions,
+ PackOptions,
+ LogicalOrOptions,
+ OneHotOptions,
+ LogicalAndOptions,
+ LogicalNotOptions,
+ UnpackOptions,
+ FloorDivOptions,
+ SquareOptions,
+ ZerosLikeOptions,
+ FillOptions,
+ BidirectionalSequenceLSTMOptions,
+ BidirectionalSequenceRNNOptions,
+ UnidirectionalSequenceLSTMOptions,
+ FloorModOptions,
+ RangeOptions,
+ ResizeNearestNeighborOptions,
+ LeakyReluOptions,
+ SquaredDifferenceOptions,
+ MirrorPadOptions,
+ AbsOptions,
+ SplitVOptions,
+ UniqueOptions,
+ ReverseV2Options,
+ AddNOptions,
+ GatherNdOptions,
+ CosOptions,
+ WhereOptions,
+ RankOptions,
+ ReverseSequenceOptions,
+ MatrixDiagOptions,
+ QuantizeOptions,
+ MatrixSetDiagOptions,
+ HardSwishOptions,
+ IfOptions,
+ WhileOptions,
+ DepthToSpaceOptions
+}
+
+enum Padding : byte { SAME, VALID }
+
+enum ActivationFunctionType : byte {
+ NONE = 0,
+ RELU = 1,
+ RELU_N1_TO_1 = 2,
+ RELU6 = 3,
+ TANH = 4,
+ SIGN_BIT = 5,
+}
+
+table Conv2DOptions {
+ padding:Padding;
+ stride_w:int;
+ stride_h:int;
+ fused_activation_function:ActivationFunctionType;
+ dilation_w_factor:int = 1;
+ dilation_h_factor:int = 1;
+}
+
+table Pool2DOptions {
+ padding:Padding;
+ stride_w:int;
+ stride_h:int;
+ filter_width:int;
+ filter_height:int;
+ fused_activation_function:ActivationFunctionType;
+}
+
+table DepthwiseConv2DOptions {
+ // Parameters for DepthwiseConv version 1 or above.
+ padding:Padding;
+ stride_w:int;
+ stride_h:int;
+ depth_multiplier:int;
+ fused_activation_function:ActivationFunctionType;
+ // Parameters for DepthwiseConv version 2 or above.
+ dilation_w_factor:int = 1;
+ dilation_h_factor:int = 1;
+}
+
+table ConcatEmbeddingsOptions {
+ num_channels:int;
+ num_columns_per_channel:[int];
+ embedding_dim_per_channel:[int]; // This could be inferred from parameters.
+}
+
+enum LSHProjectionType: byte {
+ UNKNOWN = 0,
+ SPARSE = 1,
+ DENSE = 2,
+}
+
+table LSHProjectionOptions {
+ type: LSHProjectionType;
+}
+
+table SVDFOptions {
+ rank:int;
+ fused_activation_function:ActivationFunctionType;
+}
+
+// An implementation of TensorFlow RNNCell.
+table RNNOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+// An implementation of TensorFlow dynamic_rnn with RNNCell.
+table SequenceRNNOptions {
+ time_major:bool;
+ fused_activation_function:ActivationFunctionType;
+}
+
+// An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell.
+table BidirectionalSequenceRNNOptions {
+ time_major:bool;
+ fused_activation_function:ActivationFunctionType;
+ merge_outputs: bool;
+}
+
+enum FullyConnectedOptionsWeightsFormat: byte {
+ DEFAULT = 0,
+ SHUFFLED4x16INT8 = 1,
+}
+
+// An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
+table FullyConnectedOptions {
+ // Parameters for FullyConnected version 1 or above.
+ fused_activation_function:ActivationFunctionType;
+
+ // Parameters for FullyConnected version 2 or above.
+ weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT;
+
+ // Parameters for FullyConnected version 5 or above.
+ // If set to true, then the number of dimension is preserved. Furthermore,
+ // all but the last dimension of the input and output shapes will be equal.
+ keep_num_dims: bool;
+}
+
+table SoftmaxOptions {
+ beta: float;
+}
+
+// An implementation of TensorFlow concat.
+table ConcatenationOptions {
+ axis:int;
+ fused_activation_function:ActivationFunctionType;
+}
+
+table AddOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table MulOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table L2NormOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table LocalResponseNormalizationOptions {
+ radius:int;
+ bias:float;
+ alpha:float;
+ beta:float;
+}
+
+enum LSTMKernelType : byte {
+ // Full LSTM kernel which supports peephole and projection.
+ FULL = 0,
+ // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell.
+ BASIC = 1,
+}
+
+// An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
+table LSTMOptions {
+ // Parameters for LSTM version 1 or above.
+ fused_activation_function:ActivationFunctionType;
+ cell_clip: float; // Optional, 0.0 means no clipping
+ proj_clip: float; // Optional, 0.0 means no clipping
+
+ // Parameters for LSTM version 2 or above.
+ // Basic kernel is only supported in version 2 or above.
+ kernel_type: LSTMKernelType = FULL;
+}
+
+// An implementation of TensorFlow dynamic_rnn with LSTMCell.
+table UnidirectionalSequenceLSTMOptions {
+ fused_activation_function:ActivationFunctionType;
+ cell_clip: float; // Optional, 0.0 means no clipping
+ proj_clip: float; // Optional, 0.0 means no clipping
+
+ // If true then first dimension is sequence, otherwise batch.
+ time_major:bool;
+}
+
+table BidirectionalSequenceLSTMOptions {
+ // Parameters supported by version 1:
+ fused_activation_function:ActivationFunctionType;
+ cell_clip: float; // Optional, 0.0 means no clipping
+ proj_clip: float; // Optional, 0.0 means no clipping
+
+ // If true, store the outputs of both directions into the first output.
+ merge_outputs: bool;
+
+ // Parameters supported by version 2:
+ // If true then first dimension is sequence, otherwise batch.
+ // Version 1 implementations assumed time_major to be true, so this default
+ // value should never change.
+ time_major: bool = true;
+}
+
+table ResizeBilinearOptions {
+ new_height: int (deprecated);
+ new_width: int (deprecated);
+ align_corners: bool;
+}
+
+table ResizeNearestNeighborOptions {
+ align_corners: bool;
+}
+
+// A call operation options
+table CallOptions {
+ // The subgraph index that needs to be called.
+ subgraph:uint;
+}
+
+table PadOptions {
+}
+
+table PadV2Options {
+}
+
+table ReshapeOptions {
+ new_shape:[int];
+}
+
+table SpaceToBatchNDOptions {
+}
+
+table BatchToSpaceNDOptions {
+}
+
+table SkipGramOptions {
+ ngram_size: int;
+ max_skip_size: int;
+ include_all_ngrams: bool;
+}
+
+table SpaceToDepthOptions {
+ block_size: int;
+}
+
+table DepthToSpaceOptions {
+ block_size: int;
+}
+
+table SubOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table DivOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table TopKV2Options {
+}
+
+enum CombinerType : byte {
+ SUM = 0,
+ MEAN = 1,
+ SQRTN = 2,
+}
+
+table EmbeddingLookupSparseOptions {
+ combiner:CombinerType;
+}
+
+table GatherOptions {
+ axis: int;
+}
+
+table TransposeOptions {
+}
+
+table ExpOptions {
+}
+
+table CosOptions {
+}
+
+table ReducerOptions {
+ keep_dims: bool;
+}
+
+table SqueezeOptions {
+ squeeze_dims:[int];
+}
+
+table SplitOptions {
+ num_splits: int;
+}
+
+table SplitVOptions {
+ num_splits: int;
+}
+
+table StridedSliceOptions {
+ begin_mask: int;
+ end_mask: int;
+ ellipsis_mask: int;
+ new_axis_mask: int;
+ shrink_axis_mask: int;
+}
+
+table LogSoftmaxOptions {
+}
+
+table CastOptions {
+ in_data_type: TensorType;
+ out_data_type: TensorType;
+}
+
+table DequantizeOptions {
+}
+
+table MaximumMinimumOptions {
+}
+
+table TileOptions {
+}
+
+table ArgMaxOptions {
+ output_type : TensorType;
+}
+
+table ArgMinOptions {
+ output_type : TensorType;
+}
+
+table GreaterOptions {
+}
+
+table GreaterEqualOptions {
+}
+
+table LessOptions {
+}
+
+table LessEqualOptions {
+}
+
+table NegOptions {
+}
+
+table SelectOptions {
+}
+
+table SliceOptions {
+}
+
+table TransposeConvOptions {
+ padding:Padding;
+ stride_w:int;
+ stride_h:int;
+}
+
+table ExpandDimsOptions {
+}
+
+table SparseToDenseOptions {
+ validate_indices:bool;
+}
+
+table EqualOptions {
+}
+
+table NotEqualOptions {
+}
+
+table ShapeOptions {
+ // Optional output type of the operation (int32 or int64). Defaults to int32.
+ out_type : TensorType;
+}
+
+table RankOptions {
+}
+
+table PowOptions {
+}
+
+table FakeQuantOptions {
+ // Parameters supported by version 1:
+ min:float;
+ max:float;
+ num_bits:int;
+
+ // Parameters supported by version 2:
+ narrow_range:bool;
+}
+
+table PackOptions {
+ values_count:int;
+ axis:int;
+}
+
+table LogicalOrOptions {
+}
+
+table OneHotOptions {
+ axis:int;
+}
+
+table AbsOptions {
+}
+
+
+table HardSwishOptions {
+}
+
+table LogicalAndOptions {
+}
+
+table LogicalNotOptions {
+}
+
+table UnpackOptions {
+ num:int;
+ axis:int;
+}
+
+table FloorDivOptions {
+}
+
+table SquareOptions {
+}
+
+table ZerosLikeOptions {
+}
+
+table FillOptions {
+}
+
+table FloorModOptions {
+}
+
+table RangeOptions {
+}
+
+table LeakyReluOptions {
+ alpha:float;
+}
+
+table SquaredDifferenceOptions {
+}
+
+enum MirrorPadMode : byte {
+ // Doesn't include borders.
+ REFLECT = 0,
+ // Includes borders.
+ SYMMETRIC = 1,
+}
+
+table MirrorPadOptions {
+ mode:MirrorPadMode;
+}
+
+table UniqueOptions {
+ idx_out_type:TensorType = INT32;
+}
+
+table ReverseV2Options {
+}
+
+table AddNOptions {
+}
+
+table GatherNdOptions {
+}
+
+table WhereOptions {
+}
+
+table ReverseSequenceOptions {
+ seq_dim:int;
+ batch_dim:int = 0;
+}
+
+table MatrixDiagOptions {
+}
+
+table QuantizeOptions {
+}
+
+table MatrixSetDiagOptions {
+}
+
+table IfOptions {
+ then_subgraph_index:int;
+ else_subgraph_index:int;
+}
+
+table WhileOptions {
+ cond_subgraph_index:int;
+ body_subgraph_index:int;
+}
+
+// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
+// builtin, or a string if the operator is custom.
+table OperatorCode {
+ builtin_code:BuiltinOperator;
+ custom_code:string;
+
+ // The version of the operator. The version need to be bumped whenever new
+ // parameters are introduced into an op.
+ version:int = 1;
+}
+
+enum CustomOptionsFormat : byte {
+ FLEXBUFFERS = 0,
+}
+
+// An operator takes tensors as inputs and outputs. The type of operation being
+// performed is determined by an index into the list of valid OperatorCodes,
+// while the specifics of each operations is configured using builtin_options
+// or custom_options.
+table Operator {
+ // Index into the operator_codes array. Using an integer here avoids
+ // complicate map lookups.
+ opcode_index:uint;
+
+ // Optional input and output tensors are indicated by -1.
+ inputs:[int];
+ outputs:[int];
+
+ builtin_options:BuiltinOptions;
+ custom_options:[ubyte];
+ custom_options_format:CustomOptionsFormat;
+
+ // A list of booleans indicating the input tensors which are being mutated by
+ // this operator.(e.g. used by RNN and LSTM).
+ // For example, if the "inputs" array refers to 5 tensors and the second and
+ // fifth are mutable variables, then this list will contain
+ // [false, true, false, false, true].
+ //
+ // If the list is empty, no variable is mutated in this operator.
+ // The list either has the same length as `inputs`, or is empty.
+ mutating_variable_inputs:[bool];
+
+ // A list of indices to the subgraph's "tensors" that are internal to an Op.
+ // Internal tensors are those that do not flow in or out of the operation,
+ // but instead are part of internal computation. As such, the operation's
+ // implementation may manage its memory more efficiently. They are needed
+ // however (i.e. not just an implementation detail) since they are part of the
+ // computation, which may require relevant metadata such as quantization
+ // parameters.
+ intermediates:[int];
+}
+
+// The root type, defining a subgraph, which typically represents an entire
+// model.
+table SubGraph {
+ // A list of all tensors used in this subgraph.
+ tensors:[Tensor];
+
+ // Indices of the tensors that are inputs into this subgraph. Note this is
+ // the list of non-static tensors that feed into the subgraph for inference.
+ inputs:[int];
+
+ // Indices of the tensors that are outputs out of this subgraph. Note this is
+ // the list of output tensors that are considered the product of the
+ // subgraph's inference.
+ outputs:[int];
+
+ // All operators, in execution order.
+ operators:[Operator];
+
+ // Name of this subgraph (used for debugging).
+ name:string;
+}
+
+// Table of raw data buffers (used for constant tensors). Referenced by tensors
+// by index. The generous alignment accommodates mmap-friendly data structures.
+table Buffer {
+ data:[ubyte] (force_align: 16);
+}
+
+table Metadata {
+ // A human readable string to uniquely identify a Metadata.
+ name:string;
+ // An index to the buffers table.
+ buffer:uint;
+}
+
+table Model {
+ // Version of the schema.
+ version:uint;
+
+ // A list of all operator codes used in this model. This is
+ // kept in order because operators carry an index into this
+ // vector.
+ operator_codes:[OperatorCode];
+
+ // All the subgraphs of the model. The 0th is assumed to be the main
+ // model.
+ subgraphs:[SubGraph];
+
+ // A description of the model.
+ description:string;
+
+ // Buffers of the model.
+ // Note the 0th entry of this array must be an empty buffer (sentinel).
+ // This is a convention so that tensors without a buffer can provide 0 as
+ // their buffer.
+ buffers:[Buffer];
+
+ // Metadata about the model. Indirects into the existings buffers list.
+ // Deprecated, prefer to use metadata field.
+ metadata_buffer:[int];
+
+ // Metadata about the model.
+ metadata:[Metadata];
+}
+
+root_type Model;
diff --git a/res/TensorFlowLiteSchema/2.1.0/schema.fbs b/res/TensorFlowLiteSchema/2.1.0/schema.fbs
new file mode 100644
index 000000000..f1fbfc655
--- /dev/null
+++ b/res/TensorFlowLiteSchema/2.1.0/schema.fbs
@@ -0,0 +1,940 @@
+// Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Revision History
+// Version 0: Initial version.
+// Version 1: Add subgraphs to schema.
+// Version 2: Rename operators to conform to NN API.
+// Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers.
+
+namespace tflite;
+
+// This corresponds to the version.
+file_identifier "TFL3";
+// File extension of any written files.
+file_extension "tflite";
+
+// IMPORTANT: All new members of tables, enums and unions must be added at the
+// end to ensure backwards compatibility.
+
+// The type of data stored in a tensor.
+enum TensorType : byte {
+ FLOAT32 = 0,
+ FLOAT16 = 1,
+ INT32 = 2,
+ UINT8 = 3,
+ INT64 = 4,
+ STRING = 5,
+ BOOL = 6,
+ INT16 = 7,
+ COMPLEX64 = 8,
+ INT8 = 9,
+}
+
+// Custom quantization parameters for experimenting with new quantization
+// techniques.
+table CustomQuantization {
+ custom:[ubyte] (force_align: 16);
+}
+
+// Represents a specific quantization technique's parameters.
+union QuantizationDetails {
+ CustomQuantization,
+}
+
+// Parameters for converting a quantized tensor back to float.
+table QuantizationParameters {
+ // These four parameters are the asymmetric linear quantization parameters.
+ // Given a quantized value q, the corresponding float value f should be:
+ // f = scale * (q - zero_point)
+ // For other quantization types, the QuantizationDetails below is used.
+ min:[float]; // For importing back into tensorflow.
+ max:[float]; // For importing back into tensorflow.
+ scale:[float]; // For dequantizing the tensor's values.
+ zero_point:[long];
+
+ // If this is not none, the other quantization parameters (i.e. min, max,
+ // scale, zero_point fields above) are ignored and the value of the
+ // QuantizationDetails union should be used.
+ details:QuantizationDetails;
+
+ // Specifies the dimension of the Tensor's shape that the scales and
+ // zero_points correspond to. For example, a tensor t, with dims=[4, 3, 2, 1]
+ // with quantization params:
+ // scale=[1.0, 2.0, 3.0], zero_point=[1, 2, 3], quantization_dimension=1
+ // will be quantized across the second dimension of t.
+ // t[:, 0, :, :] will have scale[0]=1.0, zero_point[0]=1
+ // t[:, 1, :, :] will have scale[1]=2.0, zero_point[0]=2
+ // t[:, 2, :, :] will have scale[2]=3.0, zero_point[0]=3
+ quantized_dimension:int;
+}
+
+table Tensor {
+ // The tensor shape. The meaning of each entry is operator-specific but
+ // builtin ops use: [batch size, height, width, number of channels] (That's
+ // Tensorflow's NHWC).
+ shape:[int];
+ type:TensorType;
+ // An index that refers to the buffers table at the root of the model. Or,
+ // if there is no data buffer associated (i.e. intermediate results), then
+ // this is 0 (which refers to an always existent empty buffer).
+ //
+ // The data_buffer itself is an opaque container, with the assumption that the
+ // target device is little-endian. In addition, all builtin operators assume
+ // the memory is ordered such that if `shape` is [4, 3, 2], then index
+ // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k].
+ buffer:uint;
+ name:string; // For debugging and importing back into tensorflow.
+ quantization:QuantizationParameters; // Optional.
+
+ is_variable:bool = false;
+}
+
+// A list of builtin operators. Builtin operators are slightly faster than custom
+// ones, but not by much. Moreover, while custom operators accept an opaque
+// object containing configuration parameters, builtins have a predetermined
+// set of acceptable options.
+enum BuiltinOperator : byte {
+ ADD = 0,
+ AVERAGE_POOL_2D = 1,
+ CONCATENATION = 2,
+ CONV_2D = 3,
+ DEPTHWISE_CONV_2D = 4,
+ DEPTH_TO_SPACE = 5,
+ DEQUANTIZE = 6,
+ EMBEDDING_LOOKUP = 7,
+ FLOOR = 8,
+ FULLY_CONNECTED = 9,
+ HASHTABLE_LOOKUP = 10,
+ L2_NORMALIZATION = 11,
+ L2_POOL_2D = 12,
+ LOCAL_RESPONSE_NORMALIZATION = 13,
+ LOGISTIC = 14,
+ LSH_PROJECTION = 15,
+ LSTM = 16,
+ MAX_POOL_2D = 17,
+ MUL = 18,
+ RELU = 19,
+ // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
+ // since different model developers use RELU1 in different ways. Never
+ // create another op called RELU1.
+ RELU_N1_TO_1 = 20,
+ RELU6 = 21,
+ RESHAPE = 22,
+ RESIZE_BILINEAR = 23,
+ RNN = 24,
+ SOFTMAX = 25,
+ SPACE_TO_DEPTH = 26,
+ SVDF = 27,
+ TANH = 28,
+ // TODO(aselle): Consider rename to CONCATENATE_EMBEDDINGS
+ CONCAT_EMBEDDINGS = 29,
+ SKIP_GRAM = 30,
+ CALL = 31,
+ CUSTOM = 32,
+ EMBEDDING_LOOKUP_SPARSE = 33,
+ PAD = 34,
+ UNIDIRECTIONAL_SEQUENCE_RNN = 35,
+ GATHER = 36,
+ BATCH_TO_SPACE_ND = 37,
+ SPACE_TO_BATCH_ND = 38,
+ TRANSPOSE = 39,
+ MEAN = 40,
+ SUB = 41,
+ DIV = 42,
+ SQUEEZE = 43,
+ UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
+ STRIDED_SLICE = 45,
+ BIDIRECTIONAL_SEQUENCE_RNN = 46,
+ EXP = 47,
+ TOPK_V2 = 48,
+ SPLIT = 49,
+ LOG_SOFTMAX = 50,
+ // DELEGATE is a special op type for the operations which are delegated to
+ // other backends.
+ // WARNING: Experimental interface, subject to change
+ DELEGATE = 51,
+ BIDIRECTIONAL_SEQUENCE_LSTM = 52,
+ CAST = 53,
+ PRELU = 54,
+ MAXIMUM = 55,
+ ARG_MAX = 56,
+ MINIMUM = 57,
+ LESS = 58,
+ NEG = 59,
+ PADV2 = 60,
+ GREATER = 61,
+ GREATER_EQUAL = 62,
+ LESS_EQUAL = 63,
+ SELECT = 64,
+ SLICE = 65,
+ SIN = 66,
+ TRANSPOSE_CONV = 67,
+ SPARSE_TO_DENSE = 68,
+ TILE = 69,
+ EXPAND_DIMS = 70,
+ EQUAL = 71,
+ NOT_EQUAL = 72,
+ LOG = 73,
+ SUM = 74,
+ SQRT = 75,
+ RSQRT = 76,
+ SHAPE = 77,
+ POW = 78,
+ ARG_MIN = 79,
+ FAKE_QUANT = 80,
+ REDUCE_PROD = 81,
+ REDUCE_MAX = 82,
+ PACK = 83,
+ LOGICAL_OR = 84,
+ ONE_HOT = 85,
+ LOGICAL_AND = 86,
+ LOGICAL_NOT = 87,
+ UNPACK = 88,
+ REDUCE_MIN = 89,
+ FLOOR_DIV = 90,
+ REDUCE_ANY = 91,
+ SQUARE = 92,
+ ZEROS_LIKE = 93,
+ FILL = 94,
+ FLOOR_MOD = 95,
+ RANGE = 96,
+ RESIZE_NEAREST_NEIGHBOR = 97,
+ LEAKY_RELU = 98,
+ SQUARED_DIFFERENCE = 99,
+ MIRROR_PAD = 100,
+ ABS = 101,
+ SPLIT_V = 102,
+ UNIQUE = 103,
+ CEIL = 104,
+ REVERSE_V2 = 105,
+ ADD_N = 106,
+ GATHER_ND = 107,
+ COS = 108,
+ WHERE = 109,
+ RANK = 110,
+ ELU = 111,
+ REVERSE_SEQUENCE = 112,
+ MATRIX_DIAG = 113,
+ QUANTIZE = 114,
+ MATRIX_SET_DIAG = 115,
+ ROUND = 116,
+ HARD_SWISH = 117,
+ IF = 118,
+ WHILE = 119,
+ NON_MAX_SUPPRESSION_V4 = 120,
+ NON_MAX_SUPPRESSION_V5 = 121,
+ SCATTER_ND = 122
+}
+
+// Options for the builtin operators.
+union BuiltinOptions {
+ Conv2DOptions,
+ DepthwiseConv2DOptions,
+ ConcatEmbeddingsOptions,
+ LSHProjectionOptions,
+ Pool2DOptions,
+ SVDFOptions,
+ RNNOptions,
+ FullyConnectedOptions,
+ SoftmaxOptions,
+ ConcatenationOptions,
+ AddOptions,
+ L2NormOptions,
+ LocalResponseNormalizationOptions,
+ LSTMOptions,
+ ResizeBilinearOptions,
+ CallOptions,
+ ReshapeOptions,
+ SkipGramOptions,
+ SpaceToDepthOptions,
+ EmbeddingLookupSparseOptions,
+ MulOptions,
+ PadOptions,
+ GatherOptions,
+ BatchToSpaceNDOptions,
+ SpaceToBatchNDOptions,
+ TransposeOptions,
+ ReducerOptions,
+ SubOptions,
+ DivOptions,
+ SqueezeOptions,
+ SequenceRNNOptions,
+ StridedSliceOptions,
+ ExpOptions,
+ TopKV2Options,
+ SplitOptions,
+ LogSoftmaxOptions,
+ CastOptions,
+ DequantizeOptions,
+ MaximumMinimumOptions,
+ ArgMaxOptions,
+ LessOptions,
+ NegOptions,
+ PadV2Options,
+ GreaterOptions,
+ GreaterEqualOptions,
+ LessEqualOptions,
+ SelectOptions,
+ SliceOptions,
+ TransposeConvOptions,
+ SparseToDenseOptions,
+ TileOptions,
+ ExpandDimsOptions,
+ EqualOptions,
+ NotEqualOptions,
+ ShapeOptions,
+ PowOptions,
+ ArgMinOptions,
+ FakeQuantOptions,
+ PackOptions,
+ LogicalOrOptions,
+ OneHotOptions,
+ LogicalAndOptions,
+ LogicalNotOptions,
+ UnpackOptions,
+ FloorDivOptions,
+ SquareOptions,
+ ZerosLikeOptions,
+ FillOptions,
+ BidirectionalSequenceLSTMOptions,
+ BidirectionalSequenceRNNOptions,
+ UnidirectionalSequenceLSTMOptions,
+ FloorModOptions,
+ RangeOptions,
+ ResizeNearestNeighborOptions,
+ LeakyReluOptions,
+ SquaredDifferenceOptions,
+ MirrorPadOptions,
+ AbsOptions,
+ SplitVOptions,
+ UniqueOptions,
+ ReverseV2Options,
+ AddNOptions,
+ GatherNdOptions,
+ CosOptions,
+ WhereOptions,
+ RankOptions,
+ ReverseSequenceOptions,
+ MatrixDiagOptions,
+ QuantizeOptions,
+ MatrixSetDiagOptions,
+ HardSwishOptions,
+ IfOptions,
+ WhileOptions,
+ DepthToSpaceOptions,
+ NonMaxSuppressionV4Options,
+ NonMaxSuppressionV5Options,
+ ScatterNdOptions
+}
+
+enum Padding : byte { SAME, VALID }
+
+enum ActivationFunctionType : byte {
+ NONE = 0,
+ RELU = 1,
+ RELU_N1_TO_1 = 2,
+ RELU6 = 3,
+ TANH = 4,
+ SIGN_BIT = 5,
+}
+
+table Conv2DOptions {
+ padding:Padding;
+ stride_w:int;
+ stride_h:int;
+ fused_activation_function:ActivationFunctionType;
+ dilation_w_factor:int = 1;
+ dilation_h_factor:int = 1;
+}
+
+table Pool2DOptions {
+ padding:Padding;
+ stride_w:int;
+ stride_h:int;
+ filter_width:int;
+ filter_height:int;
+ fused_activation_function:ActivationFunctionType;
+}
+
+table DepthwiseConv2DOptions {
+ // Parameters for DepthwiseConv version 1 or above.
+ padding:Padding;
+ stride_w:int;
+ stride_h:int;
+ // `depth_multiplier` is redundant. It's used by CPU kernels in
+ // TensorFlow 2.0 or below, but ignored in versions above.
+ // See comments in lite/c/builtin_op_data.h for more details.
+ depth_multiplier:int;
+ fused_activation_function:ActivationFunctionType;
+ // Parameters for DepthwiseConv version 2 or above.
+ dilation_w_factor:int = 1;
+ dilation_h_factor:int = 1;
+}
+
+table ConcatEmbeddingsOptions {
+ num_channels:int;
+ num_columns_per_channel:[int];
+ embedding_dim_per_channel:[int]; // This could be inferred from parameters.
+}
+
+enum LSHProjectionType: byte {
+ UNKNOWN = 0,
+ SPARSE = 1,
+ DENSE = 2,
+}
+
+table LSHProjectionOptions {
+ type: LSHProjectionType;
+}
+
+table SVDFOptions {
+ rank:int;
+ fused_activation_function:ActivationFunctionType;
+}
+
+// An implementation of TensorFlow RNNCell.
+table RNNOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+// An implementation of TensorFlow dynamic_rnn with RNNCell.
+table SequenceRNNOptions {
+ time_major:bool;
+ fused_activation_function:ActivationFunctionType;
+}
+
+// An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell.
+table BidirectionalSequenceRNNOptions {
+ time_major:bool;
+ fused_activation_function:ActivationFunctionType;
+ merge_outputs: bool;
+}
+
+enum FullyConnectedOptionsWeightsFormat: byte {
+ DEFAULT = 0,
+ SHUFFLED4x16INT8 = 1,
+}
+
+// An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
+table FullyConnectedOptions {
+ // Parameters for FullyConnected version 1 or above.
+ fused_activation_function:ActivationFunctionType;
+
+ // Parameters for FullyConnected version 2 or above.
+ weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT;
+
+ // Parameters for FullyConnected version 5 or above.
+ // If set to true, then the number of dimension is preserved. Furthermore,
+ // all but the last dimension of the input and output shapes will be equal.
+ keep_num_dims: bool;
+}
+
+table SoftmaxOptions {
+ beta: float;
+}
+
+// An implementation of TensorFlow concat.
+table ConcatenationOptions {
+ axis:int;
+ fused_activation_function:ActivationFunctionType;
+}
+
+table AddOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table MulOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table L2NormOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table LocalResponseNormalizationOptions {
+ radius:int;
+ bias:float;
+ alpha:float;
+ beta:float;
+}
+
+enum LSTMKernelType : byte {
+ // Full LSTM kernel which supports peephole and projection.
+ FULL = 0,
+ // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell.
+ BASIC = 1,
+}
+
+// An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
+table LSTMOptions {
+ // Parameters for LSTM version 1 or above.
+ fused_activation_function:ActivationFunctionType;
+ cell_clip: float; // Optional, 0.0 means no clipping
+ proj_clip: float; // Optional, 0.0 means no clipping
+
+ // Parameters for LSTM version 2 or above.
+ // Basic kernel is only supported in version 2 or above.
+ kernel_type: LSTMKernelType = FULL;
+}
+
+// An implementation of TensorFlow dynamic_rnn with LSTMCell.
+table UnidirectionalSequenceLSTMOptions {
+ fused_activation_function:ActivationFunctionType;
+ cell_clip: float; // Optional, 0.0 means no clipping
+ proj_clip: float; // Optional, 0.0 means no clipping
+
+ // If true then first dimension is sequence, otherwise batch.
+ time_major:bool;
+}
+
+table BidirectionalSequenceLSTMOptions {
+ // Parameters supported by version 1:
+ fused_activation_function:ActivationFunctionType;
+ cell_clip: float; // Optional, 0.0 means no clipping
+ proj_clip: float; // Optional, 0.0 means no clipping
+
+ // If true, store the outputs of both directions into the first output.
+ merge_outputs: bool;
+
+ // Parameters supported by version 2:
+ // If true then first dimension is sequence, otherwise batch.
+ // Version 1 implementations assumed time_major to be true, so this default
+ // value should never change.
+ time_major: bool = true;
+}
+
+table ResizeBilinearOptions {
+ new_height: int (deprecated);
+ new_width: int (deprecated);
+ align_corners: bool;
+}
+
+table ResizeNearestNeighborOptions {
+ align_corners: bool;
+}
+
+// A call operation options
+table CallOptions {
+ // The subgraph index that needs to be called.
+ subgraph:uint;
+}
+
+table PadOptions {
+}
+
+table PadV2Options {
+}
+
+table ReshapeOptions {
+ new_shape:[int];
+}
+
+table SpaceToBatchNDOptions {
+}
+
+table BatchToSpaceNDOptions {
+}
+
+table SkipGramOptions {
+ ngram_size: int;
+ max_skip_size: int;
+ include_all_ngrams: bool;
+}
+
+table SpaceToDepthOptions {
+ block_size: int;
+}
+
+table DepthToSpaceOptions {
+ block_size: int;
+}
+
+table SubOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table DivOptions {
+ fused_activation_function:ActivationFunctionType;
+}
+
+table TopKV2Options {
+}
+
+enum CombinerType : byte {
+ SUM = 0,
+ MEAN = 1,
+ SQRTN = 2,
+}
+
+table EmbeddingLookupSparseOptions {
+ combiner:CombinerType;
+}
+
+table GatherOptions {
+ axis: int;
+}
+
+table TransposeOptions {
+}
+
+table ExpOptions {
+}
+
+table CosOptions {
+}
+
+table ReducerOptions {
+ keep_dims: bool;
+}
+
+table SqueezeOptions {
+ squeeze_dims:[int];
+}
+
+table SplitOptions {
+ num_splits: int;
+}
+
+table SplitVOptions {
+ num_splits: int;
+}
+
+table StridedSliceOptions {
+ begin_mask: int;
+ end_mask: int;
+ ellipsis_mask: int;
+ new_axis_mask: int;
+ shrink_axis_mask: int;
+}
+
+table LogSoftmaxOptions {
+}
+
+table CastOptions {
+ in_data_type: TensorType;
+ out_data_type: TensorType;
+}
+
+table DequantizeOptions {
+}
+
+table MaximumMinimumOptions {
+}
+
+table TileOptions {
+}
+
+table ArgMaxOptions {
+ output_type : TensorType;
+}
+
+table ArgMinOptions {
+ output_type : TensorType;
+}
+
+table GreaterOptions {
+}
+
+table GreaterEqualOptions {
+}
+
+table LessOptions {
+}
+
+table LessEqualOptions {
+}
+
+table NegOptions {
+}
+
+table SelectOptions {
+}
+
+table SliceOptions {
+}
+
+table TransposeConvOptions {
+ padding:Padding;
+ stride_w:int;
+ stride_h:int;
+}
+
+table ExpandDimsOptions {
+}
+
+table SparseToDenseOptions {
+ validate_indices:bool;
+}
+
+table EqualOptions {
+}
+
+table NotEqualOptions {
+}
+
+table ShapeOptions {
+ // Optional output type of the operation (int32 or int64). Defaults to int32.
+ out_type : TensorType;
+}
+
+table RankOptions {
+}
+
+table PowOptions {
+}
+
+table FakeQuantOptions {
+ // Parameters supported by version 1:
+ min:float;
+ max:float;
+ num_bits:int;
+
+ // Parameters supported by version 2:
+ narrow_range:bool;
+}
+
+table PackOptions {
+ values_count:int;
+ axis:int;
+}
+
+table LogicalOrOptions {
+}
+
+table OneHotOptions {
+ axis:int;
+}
+
+table AbsOptions {
+}
+
+
+table HardSwishOptions {
+}
+
+table LogicalAndOptions {
+}
+
+table LogicalNotOptions {
+}
+
+table UnpackOptions {
+ num:int;
+ axis:int;
+}
+
+table FloorDivOptions {
+}
+
+table SquareOptions {
+}
+
+table ZerosLikeOptions {
+}
+
+table FillOptions {
+}
+
+table FloorModOptions {
+}
+
+table RangeOptions {
+}
+
+table LeakyReluOptions {
+ alpha:float;
+}
+
+table SquaredDifferenceOptions {
+}
+
+enum MirrorPadMode : byte {
+ // Doesn't include borders.
+ REFLECT = 0,
+ // Includes borders.
+ SYMMETRIC = 1,
+}
+
+table MirrorPadOptions {
+ mode:MirrorPadMode;
+}
+
+table UniqueOptions {
+ idx_out_type:TensorType = INT32;
+}
+
+table ReverseV2Options {
+}
+
+table AddNOptions {
+}
+
+table GatherNdOptions {
+}
+
+table WhereOptions {
+}
+
+table ReverseSequenceOptions {
+ seq_dim:int;
+ batch_dim:int = 0;
+}
+
+table MatrixDiagOptions {
+}
+
+table QuantizeOptions {
+}
+
+table MatrixSetDiagOptions {
+}
+
+table IfOptions {
+ then_subgraph_index:int;
+ else_subgraph_index:int;
+}
+
+table WhileOptions {
+ cond_subgraph_index:int;
+ body_subgraph_index:int;
+}
+
+table NonMaxSuppressionV4Options {
+}
+
+table NonMaxSuppressionV5Options {
+}
+
+table ScatterNdOptions {
+}
+
+// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
+// builtin, or a string if the operator is custom.
+table OperatorCode {
+ builtin_code:BuiltinOperator;
+ custom_code:string;
+
+ // The version of the operator. The version need to be bumped whenever new
+ // parameters are introduced into an op.
+ version:int = 1;
+}
+
+enum CustomOptionsFormat : byte {
+ FLEXBUFFERS = 0,
+}
+
+// An operator takes tensors as inputs and outputs. The type of operation being
+// performed is determined by an index into the list of valid OperatorCodes,
+// while the specifics of each operations is configured using builtin_options
+// or custom_options.
+table Operator {
+ // Index into the operator_codes array. Using an integer here avoids
+ // complicate map lookups.
+ opcode_index:uint;
+
+ // Optional input and output tensors are indicated by -1.
+ inputs:[int];
+ outputs:[int];
+
+ builtin_options:BuiltinOptions;
+ custom_options:[ubyte];
+ custom_options_format:CustomOptionsFormat;
+
+ // A list of booleans indicating the input tensors which are being mutated by
+ // this operator.(e.g. used by RNN and LSTM).
+ // For example, if the "inputs" array refers to 5 tensors and the second and
+ // fifth are mutable variables, then this list will contain
+ // [false, true, false, false, true].
+ //
+ // If the list is empty, no variable is mutated in this operator.
+ // The list either has the same length as `inputs`, or is empty.
+ mutating_variable_inputs:[bool];
+
+ // A list of indices to the subgraph's "tensors" that are internal to an Op.
+ // Internal tensors are those that do not flow in or out of the operation,
+ // but instead are part of internal computation. As such, the operation's
+ // implementation may manage its memory more efficiently. They are needed
+ // however (i.e. not just an implementation detail) since they are part of the
+ // computation, which may require relevant metadata such as quantization
+ // parameters.
+ intermediates:[int];
+}
+
+// The root type, defining a subgraph, which typically represents an entire
+// model.
+table SubGraph {
+ // A list of all tensors used in this subgraph.
+ tensors:[Tensor];
+
+ // Indices of the tensors that are inputs into this subgraph. Note this is
+ // the list of non-static tensors that feed into the subgraph for inference.
+ inputs:[int];
+
+ // Indices of the tensors that are outputs out of this subgraph. Note this is
+ // the list of output tensors that are considered the product of the
+ // subgraph's inference.
+ outputs:[int];
+
+ // All operators, in execution order.
+ operators:[Operator];
+
+ // Name of this subgraph (used for debugging).
+ name:string;
+}
+
+// Table of raw data buffers (used for constant tensors). Referenced by tensors
+// by index. The generous alignment accommodates mmap-friendly data structures.
+table Buffer {
+ data:[ubyte] (force_align: 16);
+}
+
+table Metadata {
+ // A human readable string to uniquely identify a Metadata.
+ name:string;
+ // An index to the buffers table.
+ buffer:uint;
+}
+
+table Model {
+ // Version of the schema.
+ version:uint;
+
+ // A list of all operator codes used in this model. This is
+ // kept in order because operators carry an index into this
+ // vector.
+ operator_codes:[OperatorCode];
+
+ // All the subgraphs of the model. The 0th is assumed to be the main
+ // model.
+ subgraphs:[SubGraph];
+
+ // A description of the model.
+ description:string;
+
+ // Buffers of the model.
+ // Note the 0th entry of this array must be an empty buffer (sentinel).
+ // This is a convention so that tensors without a buffer can provide 0 as
+ // their buffer.
+ buffers:[Buffer];
+
+ // Metadata about the model. Indirects into the existings buffers list.
+ // Deprecated, prefer to use metadata field.
+ metadata_buffer:[int];
+
+ // Metadata about the model.
+ metadata:[Metadata];
+}
+
+root_type Model;
diff --git a/res/TensorFlowLiteSchema/README.md b/res/TensorFlowLiteSchema/README.md
new file mode 100644
index 000000000..b99cfe4a6
--- /dev/null
+++ b/res/TensorFlowLiteSchema/README.md
@@ -0,0 +1,7 @@
+# TensorFlow Lite Schema
+
+A collection of T/F Lite schema files (for each version)
+
+## How to add a new schema?
+
+Update [SCHEMA.lst](SCHEMA.lst) and run "download.sh".
diff --git a/res/TensorFlowLiteSchema/SCHEMA.lst b/res/TensorFlowLiteSchema/SCHEMA.lst
new file mode 100644
index 000000000..f264b0c9f
--- /dev/null
+++ b/res/TensorFlowLiteSchema/SCHEMA.lst
@@ -0,0 +1,5 @@
+VERSION,URL
+1.13.1,https://raw.githubusercontent.com/tensorflow/tensorflow/v1.13.1/tensorflow/lite/schema/schema.fbs
+1.14.0,https://raw.githubusercontent.com/tensorflow/tensorflow/v1.14.0/tensorflow/lite/schema/schema.fbs
+1.15.2,https://raw.githubusercontent.com/tensorflow/tensorflow/v1.15.2/tensorflow/lite/schema/schema.fbs
+2.1.0,https://raw.githubusercontent.com/tensorflow/tensorflow/v2.1.0/tensorflow/lite/schema/schema.fbs
diff --git a/res/TensorFlowLiteSchema/download.sh b/res/TensorFlowLiteSchema/download.sh
new file mode 100755
index 000000000..9a946c3d9
--- /dev/null
+++ b/res/TensorFlowLiteSchema/download.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+while IFS=',' read -r VERSION URL
+do
+ echo "Download ${VERSION} from '${URL}'"
+ mkdir -p "${VERSION}"
+ wget -nv -O "${VERSION}/schema.fbs" "${URL}"
+ echo "Download ${VERSION} from '${URL}' - Done"
+done < <(cat SCHEMA.lst | tail -n +2)
diff --git a/res/TensorFlowPythonExamples/.gitignore b/res/TensorFlowPythonExamples/.gitignore
new file mode 100644
index 000000000..bee8a64b7
--- /dev/null
+++ b/res/TensorFlowPythonExamples/.gitignore
@@ -0,0 +1 @@
+__pycache__
diff --git a/res/TensorFlowPythonExamples/README.md b/res/TensorFlowPythonExamples/README.md
new file mode 100644
index 000000000..63e249b93
--- /dev/null
+++ b/res/TensorFlowPythonExamples/README.md
@@ -0,0 +1,31 @@
+# TensorFlow Python Examples
+
+## Prerequisite
+
+- Python 3.X
+- TensorFlow 1.13.1
+
+## Directory Layout
+
+```
+tfpem.py <- TensorFlow Python Example Manager
+examples/
+ [EXAMPLE NAME]/
+ __init__.py
+```
+
+## HOWTO: Create a Python environment
+
+TBA
+
+## HOWTO: Generate a pbtxt from examples
+
+```
+$ /path/to/python -B <path/to/tfpem.py> [EXAMPLE NAME 1] [EXANMPE NAME 2] ...
+```
+
+NOTE. Add "-B" option not to generate "__pycache__".
+
+## HOWTO: Add a new example
+
+TBA
diff --git a/res/TensorFlowPythonExamples/examples/abs/__init__.py b/res/TensorFlowPythonExamples/examples/abs/__init__.py
new file mode 100755
index 000000000..fd5515595
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/abs/__init__.py
@@ -0,0 +1,4 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+abs_ = tf.compat.v1.abs(in_)
diff --git a/res/TensorFlowPythonExamples/examples/add/__init__.py b/res/TensorFlowPythonExamples/examples/add/__init__.py
new file mode 100755
index 000000000..7e283f35f
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/add/__init__.py
@@ -0,0 +1,5 @@
+import tensorflow as tf
+
+lhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+rhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+op_ = tf.compat.v1.add(lhs_, rhs_)
diff --git a/res/TensorFlowPythonExamples/examples/argmax/__init__.py b/res/TensorFlowPythonExamples/examples/argmax/__init__.py
new file mode 100755
index 000000000..059df97f9
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/argmax/__init__.py
@@ -0,0 +1,4 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 1), name="Hole")
+op_ = tf.compat.v1.math.argmax(in_)
diff --git a/res/TensorFlowPythonExamples/examples/biasadd/__init__.py b/res/TensorFlowPythonExamples/examples/biasadd/__init__.py
new file mode 100755
index 000000000..eb8a69bc3
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/biasadd/__init__.py
@@ -0,0 +1,4 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 1, 2, 3), name="Hole")
+op_ = tf.nn.bias_add(in_, bias=[1.0, 1.0, -1.0], data_format="NHWC")
diff --git a/res/TensorFlowPythonExamples/examples/cos/__init__.py b/res/TensorFlowPythonExamples/examples/cos/__init__.py
new file mode 100755
index 000000000..cfce5d830
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/cos/__init__.py
@@ -0,0 +1,4 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+op_ = tf.compat.v1.cos(in_)
diff --git a/res/TensorFlowPythonExamples/examples/div/__init__.py b/res/TensorFlowPythonExamples/examples/div/__init__.py
new file mode 100755
index 000000000..2887771ff
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/div/__init__.py
@@ -0,0 +1,5 @@
+import tensorflow as tf
+
+lhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+rhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+op_ = tf.compat.v1.div(lhs_, rhs_)
diff --git a/res/TensorFlowPythonExamples/examples/elu/__init__.py b/res/TensorFlowPythonExamples/examples/elu/__init__.py
new file mode 100755
index 000000000..b41f65111
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/elu/__init__.py
@@ -0,0 +1,4 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 1), name="Hole")
+elu_ = tf.compat.v1.nn.elu(in_)
diff --git a/res/TensorFlowPythonExamples/examples/exp/__init__.py b/res/TensorFlowPythonExamples/examples/exp/__init__.py
new file mode 100644
index 000000000..e83638436
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/exp/__init__.py
@@ -0,0 +1,4 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+op_ = tf.compat.v1.exp(in_)
diff --git a/res/TensorFlowPythonExamples/examples/floor/__init__.py b/res/TensorFlowPythonExamples/examples/floor/__init__.py
new file mode 100755
index 000000000..3b3f5bfc3
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/floor/__init__.py
@@ -0,0 +1,4 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 1), name="Hole")
+op_ = tf.compat.v1.floor(in_)
diff --git a/res/TensorFlowPythonExamples/examples/floordiv/__init__.py b/res/TensorFlowPythonExamples/examples/floordiv/__init__.py
new file mode 100755
index 000000000..34f413f2b
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/floordiv/__init__.py
@@ -0,0 +1,5 @@
+import tensorflow as tf
+
+lhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+rhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+op_ = tf.compat.v1.floordiv(lhs_, rhs_)
diff --git a/res/TensorFlowPythonExamples/examples/greater/__init__.py b/res/TensorFlowPythonExamples/examples/greater/__init__.py
new file mode 100755
index 000000000..e88f57471
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/greater/__init__.py
@@ -0,0 +1,5 @@
+import tensorflow as tf
+
+lhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+rhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+op_ = tf.compat.v1.greater(lhs_, rhs_)
diff --git a/res/TensorFlowPythonExamples/examples/greater_equal/__init__.py b/res/TensorFlowPythonExamples/examples/greater_equal/__init__.py
new file mode 100755
index 000000000..b15fbd324
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/greater_equal/__init__.py
@@ -0,0 +1,5 @@
+import tensorflow as tf
+
+lhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+rhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+op_ = tf.compat.v1.greater_equal(lhs_, rhs_)
diff --git a/res/TensorFlowPythonExamples/examples/leaky_relu/__init__.py b/res/TensorFlowPythonExamples/examples/leaky_relu/__init__.py
new file mode 100755
index 000000000..d595edbd0
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/leaky_relu/__init__.py
@@ -0,0 +1,4 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 1), name="Hole")
+op_ = tf.compat.v1.nn.leaky_relu(in_)
diff --git a/res/TensorFlowPythonExamples/examples/less/__init__.py b/res/TensorFlowPythonExamples/examples/less/__init__.py
new file mode 100755
index 000000000..41ba18c62
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/less/__init__.py
@@ -0,0 +1,5 @@
+import tensorflow as tf
+
+lhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+rhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+op_ = tf.compat.v1.less(lhs_, rhs_)
diff --git a/res/TensorFlowPythonExamples/examples/less_equal/__init__.py b/res/TensorFlowPythonExamples/examples/less_equal/__init__.py
new file mode 100755
index 000000000..d60bf2a73
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/less_equal/__init__.py
@@ -0,0 +1,5 @@
+import tensorflow as tf
+
+lhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+rhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+op_ = tf.compat.v1.less_equal(lhs_, rhs_)
diff --git a/res/TensorFlowPythonExamples/examples/logical_not/__init__.py b/res/TensorFlowPythonExamples/examples/logical_not/__init__.py
new file mode 100755
index 000000000..f1bcc2c8f
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/logical_not/__init__.py
@@ -0,0 +1,4 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.bool, shape=(4, 4), name="Hole")
+op_ = tf.compat.v1.logical_not(in_)
diff --git a/res/TensorFlowPythonExamples/examples/logical_or/__init__.py b/res/TensorFlowPythonExamples/examples/logical_or/__init__.py
new file mode 100755
index 000000000..991d61ab9
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/logical_or/__init__.py
@@ -0,0 +1,5 @@
+import tensorflow as tf
+
+lhs_ = tf.compat.v1.placeholder(dtype=tf.bool, shape=(4, 4), name="Hole")
+rhs_ = tf.compat.v1.placeholder(dtype=tf.bool, shape=(4, 4), name="Hole")
+op_ = tf.compat.v1.logical_or(lhs_, rhs_)
diff --git a/res/TensorFlowPythonExamples/examples/matmul/__init__.py b/res/TensorFlowPythonExamples/examples/matmul/__init__.py
new file mode 100755
index 000000000..760241de7
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/matmul/__init__.py
@@ -0,0 +1,5 @@
+import tensorflow as tf
+
+lhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(3, 4), name="Hole")
+rhs_ = tf.compat.v1.constant(dtype=tf.float32, shape=(4, 4), name="Hole", value=1.0)
+op_ = tf.compat.v1.matmul(lhs_, rhs_)
diff --git a/res/TensorFlowPythonExamples/examples/multiply/__init__.py b/res/TensorFlowPythonExamples/examples/multiply/__init__.py
new file mode 100755
index 000000000..da8885660
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/multiply/__init__.py
@@ -0,0 +1,5 @@
+import tensorflow as tf
+
+lhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+rhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+op_ = tf.compat.v1.multiply(lhs_, rhs_)
diff --git a/res/TensorFlowPythonExamples/examples/not_equal/__init__.py b/res/TensorFlowPythonExamples/examples/not_equal/__init__.py
new file mode 100755
index 000000000..95073fe4a
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/not_equal/__init__.py
@@ -0,0 +1,5 @@
+import tensorflow as tf
+
+lhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+rhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+op_ = tf.compat.v1.not_equal(lhs_, rhs_)
diff --git a/res/TensorFlowPythonExamples/examples/pack/__init__.py b/res/TensorFlowPythonExamples/examples/pack/__init__.py
new file mode 100755
index 000000000..609bc9b76
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/pack/__init__.py
@@ -0,0 +1,5 @@
+import tensorflow as tf
+
+in_1 = tf.compat.v1.placeholder(dtype=tf.float32, shape=(2, 3, 4), name="Hole")
+in_2 = tf.compat.v1.placeholder(dtype=tf.float32, shape=(2, 3, 4), name="Hole")
+op_ = tf.compat.v1.stack([in_1, in_2])
diff --git a/res/TensorFlowPythonExamples/examples/pad/__init__.py b/res/TensorFlowPythonExamples/examples/pad/__init__.py
new file mode 100755
index 000000000..ac5cf81fa
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/pad/__init__.py
@@ -0,0 +1,5 @@
+import tensorflow as tf
+
+tensor_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(2, 3), name="Hole")
+paddings_ = tf.compat.v1.constant([[1, 1], [2, 2]], name="Hole")
+op_ = tf.compat.v1.pad(tensor_, paddings_)
diff --git a/res/TensorFlowPythonExamples/examples/pow/__init__.py b/res/TensorFlowPythonExamples/examples/pow/__init__.py
new file mode 100755
index 000000000..960032a84
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/pow/__init__.py
@@ -0,0 +1,5 @@
+import tensorflow as tf
+
+lhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+rhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+op_ = tf.compat.v1.pow(lhs_, rhs_)
diff --git a/res/TensorFlowPythonExamples/examples/prelu/__init__.py b/res/TensorFlowPythonExamples/examples/prelu/__init__.py
new file mode 100755
index 000000000..2ab030265
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/prelu/__init__.py
@@ -0,0 +1,7 @@
+import tensorflow as tf
+from tensorflow.compat.v1.keras import layers
+
+model = tf.compat.v1.keras.Sequential()
+model.add(layers.PReLU())
+# TODO Find a way to freeze Keras model for inference
+model.build((1, 1))
diff --git a/res/TensorFlowPythonExamples/examples/relu/__init__.py b/res/TensorFlowPythonExamples/examples/relu/__init__.py
new file mode 100755
index 000000000..a144a1212
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/relu/__init__.py
@@ -0,0 +1,4 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 1), name="Hole")
+op_ = tf.compat.v1.nn.relu(in_)
diff --git a/res/TensorFlowPythonExamples/examples/relu6/__init__.py b/res/TensorFlowPythonExamples/examples/relu6/__init__.py
new file mode 100755
index 000000000..f58ae7c2c
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/relu6/__init__.py
@@ -0,0 +1,4 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 1), name="Hole")
+op_ = tf.compat.v1.nn.relu6(in_)
diff --git a/res/TensorFlowPythonExamples/examples/reshape/__init.py__ b/res/TensorFlowPythonExamples/examples/reshape/__init.py__
new file mode 100644
index 000000000..3afe7efa9
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/reshape/__init.py__
@@ -0,0 +1,4 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+op_ = tf.compat.v1.reshape(in_, shape=[2,2,2,2])
diff --git a/res/TensorFlowPythonExamples/examples/resize_bilinear/__init__.py b/res/TensorFlowPythonExamples/examples/resize_bilinear/__init__.py
new file mode 100755
index 000000000..422bf1db5
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/resize_bilinear/__init__.py
@@ -0,0 +1,4 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 8, 8, 3), name="Hole")
+op_ = tf.compat.v1.image.resize_bilinear(in_, [16, 16])
diff --git a/res/TensorFlowPythonExamples/examples/resize_nearest_neighbor/__init__.py b/res/TensorFlowPythonExamples/examples/resize_nearest_neighbor/__init__.py
new file mode 100755
index 000000000..a14022948
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/resize_nearest_neighbor/__init__.py
@@ -0,0 +1,4 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 8, 8, 3), name="Hole")
+op_ = tf.compat.v1.image.resize_nearest_neighbor(in_, [16, 16])
diff --git a/res/TensorFlowPythonExamples/examples/rsqrt/__init__.py b/res/TensorFlowPythonExamples/examples/rsqrt/__init__.py
new file mode 100755
index 000000000..90500bd11
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/rsqrt/__init__.py
@@ -0,0 +1,4 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 1), name="Hole")
+op_ = tf.compat.v1.rsqrt(in_)
diff --git a/res/TensorFlowPythonExamples/examples/sigmoid/__init__.py b/res/TensorFlowPythonExamples/examples/sigmoid/__init__.py
new file mode 100755
index 000000000..43328f2cb
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/sigmoid/__init__.py
@@ -0,0 +1,4 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 1), name="Hole")
+op_ = tf.compat.v1.nn.sigmoid(in_)
diff --git a/res/TensorFlowPythonExamples/examples/softmax/__init__.py b/res/TensorFlowPythonExamples/examples/softmax/__init__.py
new file mode 100755
index 000000000..5b8d1cdfb
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/softmax/__init__.py
@@ -0,0 +1,4 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 1), name="Hole")
+op_ = tf.compat.v1.nn.softmax(in_)
diff --git a/res/TensorFlowPythonExamples/examples/sqrt/__init__.py b/res/TensorFlowPythonExamples/examples/sqrt/__init__.py
new file mode 100755
index 000000000..4aab5da9c
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/sqrt/__init__.py
@@ -0,0 +1,4 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 1), name="Hole")
+op_ = tf.compat.v1.sqrt(in_)
diff --git a/res/TensorFlowPythonExamples/examples/subtract/__init__.py b/res/TensorFlowPythonExamples/examples/subtract/__init__.py
new file mode 100755
index 000000000..feb11b12e
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/subtract/__init__.py
@@ -0,0 +1,5 @@
+import tensorflow as tf
+
+lhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+rhs_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(4, 4), name="Hole")
+op_ = tf.compat.v1.subtract(lhs_, rhs_)
diff --git a/res/TensorFlowPythonExamples/examples/tanh/__init__.py b/res/TensorFlowPythonExamples/examples/tanh/__init__.py
new file mode 100755
index 000000000..dd202a78d
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/tanh/__init__.py
@@ -0,0 +1,4 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 1), name="Hole")
+op_ = tf.compat.v1.tanh(in_)
diff --git a/res/TensorFlowPythonExamples/examples/yuv_to_rgb/__init__.py b/res/TensorFlowPythonExamples/examples/yuv_to_rgb/__init__.py
new file mode 100755
index 000000000..5230bbac6
--- /dev/null
+++ b/res/TensorFlowPythonExamples/examples/yuv_to_rgb/__init__.py
@@ -0,0 +1,4 @@
+import tensorflow as tf
+
+in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 16, 16, 3), name="Hole")
+op_ = tf.compat.v1.image.yuv_to_rgb(in_)
diff --git a/res/TensorFlowPythonExamples/requirements.txt b/res/TensorFlowPythonExamples/requirements.txt
new file mode 100644
index 000000000..2a938941f
--- /dev/null
+++ b/res/TensorFlowPythonExamples/requirements.txt
@@ -0,0 +1,18 @@
+absl-py==0.9.0
+astor==0.8.1
+gast==0.3.3
+grpcio==1.27.2
+h5py==2.10.0
+Keras-Applications==1.0.8
+Keras-Preprocessing==1.1.0
+Markdown==3.2.1
+mock==4.0.2
+numpy==1.18.2
+pkg-resources==0.0.0
+protobuf==3.11.3
+six==1.14.0
+tensorboard==1.13.1
+tensorflow==1.13.1
+tensorflow-estimator==1.13.0
+termcolor==1.1.0
+Werkzeug==1.0.0
diff --git a/res/TensorFlowPythonExamples/tfpem.py b/res/TensorFlowPythonExamples/tfpem.py
new file mode 100755
index 000000000..514200c19
--- /dev/null
+++ b/res/TensorFlowPythonExamples/tfpem.py
@@ -0,0 +1,25 @@
+# TensorFlow Python Example Manager
+
+import tensorflow as tf
+import importlib
+import argparse
+
+parser = argparse.ArgumentParser(description='Process TensorFlow Python Examples')
+
+parser.add_argument('--mode', metavar='MODE', choices=['pbtxt'], default='pbtxt')
+parser.add_argument('examples', metavar='EXAMPLES', nargs='+')
+
+args = parser.parse_args()
+
+if args.mode == 'pbtxt':
+ for example in args.examples:
+ print("Generate '" + example + ".pbtxt'")
+
+ tf.compat.v1.reset_default_graph()
+ # https://stackoverflow.com/questions/37808866/proper-way-to-dynamically-import-a-module-with-relative-imports
+ importlib.import_module("examples." + example)
+
+ with open(example + ".pbtxt", "w") as f:
+ f.write(str(tf.compat.v1.get_default_graph().as_graph_def(add_shapes=True)))
+
+ print("Generate '" + example + ".pbtxt' - Done")
diff --git a/res/TensorFlowTests/NET_0003/test.py b/res/TensorFlowTests/NET_0003/test.py
index b5bad2dae..b5bad2dae 100644..100755
--- a/res/TensorFlowTests/NET_0003/test.py
+++ b/res/TensorFlowTests/NET_0003/test.py
diff --git a/res/TensorFlowTests/NET_0004/test.py b/res/TensorFlowTests/NET_0004/test.py
index a0c790d79..a0c790d79 100644..100755
--- a/res/TensorFlowTests/NET_0004/test.py
+++ b/res/TensorFlowTests/NET_0004/test.py
diff --git a/res/TensorFlowTests/UNIT_Maximum_000/test.info b/res/TensorFlowTests/UNIT_Maximum_000/test.info
new file mode 100644
index 000000000..f8f74e382
--- /dev/null
+++ b/res/TensorFlowTests/UNIT_Maximum_000/test.info
@@ -0,0 +1,3 @@
+input, input_01:0, TF_FLOAT, [1, 3, 3, 1]
+input, input_02:0, TF_FLOAT, [1, 3, 3, 1]
+output, maximum:0, TF_FLOAT, [1, 3, 3, 1]
diff --git a/res/TensorFlowTests/UNIT_Maximum_000/test.pbtxt b/res/TensorFlowTests/UNIT_Maximum_000/test.pbtxt
new file mode 100644
index 000000000..9ab81b52b
--- /dev/null
+++ b/res/TensorFlowTests/UNIT_Maximum_000/test.pbtxt
@@ -0,0 +1,70 @@
+node {
+ name: "input_01"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 3
+ }
+ dim {
+ size: 3
+ }
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+}
+node {
+ name: "input_02"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 3
+ }
+ dim {
+ size: 3
+ }
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+}
+node {
+ name: "maximum"
+ op: "Maximum"
+ input: "input_01"
+ input: "input_02"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
diff --git a/res/TensorFlowTests/UNIT_Maximum_001/test.info b/res/TensorFlowTests/UNIT_Maximum_001/test.info
new file mode 100644
index 000000000..24dc2be07
--- /dev/null
+++ b/res/TensorFlowTests/UNIT_Maximum_001/test.info
@@ -0,0 +1,3 @@
+input, input_01:0, TF_FLOAT, [1, 3, 3, 5]
+input, input_02:0, TF_FLOAT, [1, 1, 1, 5]
+output, maximum:0, TF_FLOAT, [1, 3, 3, 5]
diff --git a/res/TensorFlowTests/UNIT_Maximum_001/test.pbtxt b/res/TensorFlowTests/UNIT_Maximum_001/test.pbtxt
new file mode 100644
index 000000000..29e90ab2f
--- /dev/null
+++ b/res/TensorFlowTests/UNIT_Maximum_001/test.pbtxt
@@ -0,0 +1,70 @@
+node {
+ name: "input_01"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 3
+ }
+ dim {
+ size: 3
+ }
+ dim {
+ size: 5
+ }
+ }
+ }
+ }
+}
+node {
+ name: "input_02"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 1
+ }
+ dim {
+ size: 1
+ }
+ dim {
+ size: 5
+ }
+ }
+ }
+ }
+}
+node {
+ name: "maximum"
+ op: "Maximum"
+ input: "input_01"
+ input: "input_02"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}
diff --git a/res/TensorFlowTests/UNIT_Maximum_002/test.info b/res/TensorFlowTests/UNIT_Maximum_002/test.info
new file mode 100644
index 000000000..44ddd085a
--- /dev/null
+++ b/res/TensorFlowTests/UNIT_Maximum_002/test.info
@@ -0,0 +1,3 @@
+input, input_01:0, TF_FLOAT, [1, 3, 3, 5]
+input, input_02:0, TF_FLOAT, [5]
+output, maximum:0, TF_FLOAT, [1, 3, 3, 5]
diff --git a/res/TensorFlowTests/UNIT_Maximum_002/test.pbtxt b/res/TensorFlowTests/UNIT_Maximum_002/test.pbtxt
new file mode 100644
index 000000000..ca4ae80de
--- /dev/null
+++ b/res/TensorFlowTests/UNIT_Maximum_002/test.pbtxt
@@ -0,0 +1,61 @@
+node {
+ name: "input_01"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 1
+ }
+ dim {
+ size: 3
+ }
+ dim {
+ size: 3
+ }
+ dim {
+ size: 5
+ }
+ }
+ }
+ }
+}
+node {
+ name: "input_02"
+ op: "Placeholder"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 5
+ }
+ }
+ }
+ }
+}
+node {
+ name: "maximum"
+ op: "Maximum"
+ input: "input_01"
+ input: "input_02"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+}