summaryrefslogtreecommitdiff
path: root/nnpackage/schema/circle_schema.fbs
diff options
context:
space:
mode:
Diffstat (limited to 'nnpackage/schema/circle_schema.fbs')
-rw-r--r--nnpackage/schema/circle_schema.fbs273
1 files changed, 263 insertions, 10 deletions
diff --git a/nnpackage/schema/circle_schema.fbs b/nnpackage/schema/circle_schema.fbs
index 89e458c4f..cdc10361b 100644
--- a/nnpackage/schema/circle_schema.fbs
+++ b/nnpackage/schema/circle_schema.fbs
@@ -1,4 +1,4 @@
-// Copyright (c) 2019~2020 Samsung Electronics Co., Ltd. All Rights Reserved
+// Copyright (c) 2019~2023 Samsung Electronics Co., Ltd. All Rights Reserved
// Copyright 2017 The TensorFlow Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
@@ -27,6 +27,10 @@
// `BATCH_MATMUL` operator, `FLOAT64` tensor type,
// `asymmetric_quantize_inputs` for several operator options
// Version 0.2: BCQ_GATHER and BCQ_FULLY_CONNECTED are added.
+// Version 0.3: SHUFFLED16x1FLOAT32 is added.
+// Version 0.4: Base up to TensorFlow Lite v2.7.0 schema.
+// Version 0.5: Base up to TensorFlow Lite v2.10.1 schema.
+// Version 0.6: Base up to TensorFlow Lite v2.13.0 schema.
namespace circle;
@@ -51,6 +55,16 @@ enum TensorType : byte {
COMPLEX64 = 8,
INT8 = 9,
FLOAT64 = 10,
+ COMPLEX128 = 11,
+ UINT64 = 12,
+ // Experimental: Resource and variant types are experimental, that are subject
+ // to change. Do not implement custom kernels using resource & variant types
+ // now.
+ RESOURCE = 13,
+ VARIANT = 14,
+ UINT32 = 15,
+ UINT16 = 16,
+ INT4 = 17,
}
// Custom quantization parameters for experimenting with new quantization
@@ -190,6 +204,16 @@ table SparsityParameters {
dim_metadata:[DimensionMetadata];
}
+// The nested tensor type for VARIANT type.
+table VariantSubType {
+ // The tensor shape.
+ shape:[int];
+ type:TensorType;
+ // If false, the rank or the number of tensor dimensions is unknown.
+ // If false, "shape" must be [].
+ has_rank: bool = false;
+}
+
table Tensor {
// The tensor shape. The meaning of each entry is operator-specific but
// builtin ops use: [batch size, height, width, number of channels] (That's
@@ -217,14 +241,27 @@ table Tensor {
// Encodes `shape` with unknown dimensions. Unknown dimensions are
// represented with -1.
shape_signature:[int]; // Optional.
+
+ // If false, the rank or the number of tensor dimensions is unknown.
+ // If false, "shape" must be [].
+ has_rank: bool = false;
+
+ // The nested Tensor types for VARIANT type. This is always empty for
+ // non-VARIANT types. This is optional because the nested type can be omitted.
+ // Currently only 1 subtype is supported. The field is defined as an array for
+ // flexibility of supporting multiple subtypes in the future.
+ variant_tensors:[VariantSubType];
}
// A list of builtin operators. Builtin operators are slightly faster than custom
// ones, but not by much. Moreover, while custom operators accept an opaque
// object containing configuration parameters, builtins have a predetermined
// set of acceptable options.
-
-enum BuiltinOperator : ubyte {
+// LINT.IfChange
+enum BuiltinOperator : int32 {
+ BCQ_GATHER = -4,
+ BCQ_FULLY_CONNECTED = -3,
+ INSTANCE_NORM = -2,
ADD = 0,
AVERAGE_POOL_2D = 1,
CONCATENATION = 2,
@@ -257,7 +294,6 @@ enum BuiltinOperator : ubyte {
SPACE_TO_DEPTH = 26,
SVDF = 27,
TANH = 28,
- // TODO(aselle): Consider rename to CONCATENATE_EMBEDDINGS
CONCAT_EMBEDDINGS = 29,
SKIP_GRAM = 30,
CALL = 31,
@@ -359,10 +395,43 @@ enum BuiltinOperator : ubyte {
DENSIFY = 124,
SEGMENT_SUM = 125,
BATCH_MATMUL = 126,
- BCQ_GATHER = 252,
- BCQ_FULLY_CONNECTED = 253,
- INSTANCE_NORM = 254,
-}
+ PLACEHOLDER_FOR_GREATER_OP_CODES = 127,
+ CUMSUM = 128,
+ CALL_ONCE = 129,
+ BROADCAST_TO = 130,
+ RFFT2D = 131,
+ CONV_3D = 132,
+ IMAG=133,
+ REAL=134,
+ COMPLEX_ABS=135,
+ HASHTABLE = 136,
+ HASHTABLE_FIND = 137,
+ HASHTABLE_IMPORT = 138,
+ HASHTABLE_SIZE = 139,
+ REDUCE_ALL = 140,
+ CONV_3D_TRANSPOSE = 141,
+ VAR_HANDLE = 142,
+ READ_VARIABLE = 143,
+ ASSIGN_VARIABLE = 144,
+ BROADCAST_ARGS = 145,
+ RANDOM_STANDARD_NORMAL = 146,
+ BUCKETIZE = 147,
+ RANDOM_UNIFORM = 148,
+ MULTINOMIAL = 149,
+ GELU = 150,
+ DYNAMIC_UPDATE_SLICE = 151,
+ RELU_0_TO_1 = 152,
+ UNSORTED_SEGMENT_PROD = 153,
+ UNSORTED_SEGMENT_MAX = 154,
+ UNSORTED_SEGMENT_SUM = 155,
+ ATAN2 = 156,
+ UNSORTED_SEGMENT_MIN = 157,
+ SIGN = 158,
+ BITCAST = 159,
+ BITWISE_XOR = 160,
+ RIGHT_SHIFT = 161,
+}
+// LINT.ThenChange(nnapi_linter/linter.proto)
// Options for the builtin operators.
union BuiltinOptions {
@@ -467,6 +536,31 @@ union BuiltinOptions {
DensifyOptions,
SegmentSumOptions,
BatchMatMulOptions,
+ CumsumOptions,
+ CallOnceOptions,
+ BroadcastToOptions,
+ Rfft2dOptions,
+ Conv3DOptions,
+ HashtableOptions,
+ HashtableFindOptions,
+ HashtableImportOptions,
+ HashtableSizeOptions,
+ VarHandleOptions,
+ ReadVariableOptions,
+ AssignVariableOptions,
+ RandomOptions,
+ BucketizeOptions,
+ GeluOptions,
+ DynamicUpdateSliceOptions,
+ UnsortedSegmentProdOptions,
+ UnsortedSegmentMaxOptions,
+ UnsortedSegmentMinOptions,
+ UnsortedSegmentSumOptions,
+ ATan2Options,
+ SignOptions,
+ BitcastOptions,
+ BitwiseXorOptions,
+ RightShiftOptions,
BCQGatherOptions = 252,
BCQFullyConnectedOptions = 253,
InstanceNormOptions = 254,
@@ -492,6 +586,18 @@ table Conv2DOptions {
dilation_h_factor:int = 1;
}
+// Options for both Conv3D and Conv3DTranspose.
+table Conv3DOptions {
+ padding:Padding;
+ stride_d:int;
+ stride_w:int;
+ stride_h:int;
+ fused_activation_function:ActivationFunctionType;
+ dilation_d_factor:int = 1;
+ dilation_w_factor:int = 1;
+ dilation_h_factor:int = 1;
+}
+
table Pool2DOptions {
padding:Padding;
stride_w:int;
@@ -564,6 +670,7 @@ table BidirectionalSequenceRNNOptions {
enum FullyConnectedOptionsWeightsFormat: byte {
DEFAULT = 0,
SHUFFLED4x16INT8 = 1,
+ SHUFFLED16x1FLOAT32 = 127
}
// An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
@@ -597,6 +704,8 @@ table ConcatenationOptions {
table AddOptions {
fused_activation_function:ActivationFunctionType;
+ // Parameters supported by version 3.
+ pot_scale_int16:bool = true;
}
table MulOptions {
@@ -604,6 +713,7 @@ table MulOptions {
}
table L2NormOptions {
+ // This field is currently ignored in the L2 Norm Op.
fused_activation_function:ActivationFunctionType;
}
@@ -645,8 +755,11 @@ table UnidirectionalSequenceLSTMOptions {
// If true then first dimension is sequence, otherwise batch.
time_major:bool;
- // Parameter for Unidirectional Sequence LSTM version 4.
+ // Parameter for Unidirectional Sequence LSTM version 3.
asymmetric_quantize_inputs:bool;
+
+ // Parameter for unidirectional sequence RNN version 4.
+ diagonal_recurrent_tensors:bool;
}
table BidirectionalSequenceLSTMOptions {
@@ -677,6 +790,7 @@ table ResizeBilinearOptions {
table ResizeNearestNeighborOptions {
align_corners: bool;
+ half_pixel_centers: bool;
}
// A call operation options
@@ -717,6 +831,8 @@ table DepthToSpaceOptions {
table SubOptions {
fused_activation_function:ActivationFunctionType;
+ // Parameters supported by version 5
+ pot_scale_int16:bool = true;
}
table DivOptions {
@@ -738,6 +854,8 @@ table EmbeddingLookupSparseOptions {
table GatherOptions {
axis: int;
+ // Parameters for Gather version 5 or above.
+ batch_dims: int = 0;
}
table TransposeOptions {
@@ -820,9 +938,13 @@ table SliceOptions {
}
table TransposeConvOptions {
+ // Parameters supported by version 1, 2, 3:
padding:Padding;
stride_w:int;
stride_h:int;
+
+ // Parameters supported by version 4:
+ fused_activation_function:ActivationFunctionType = NONE;
}
table ExpandDimsOptions {
@@ -960,6 +1082,10 @@ table IfOptions {
else_subgraph_index:int;
}
+table CallOnceOptions {
+ init_subgraph_index:int;
+}
+
table WhileOptions {
cond_subgraph_index:int;
body_subgraph_index:int;
@@ -986,6 +1112,92 @@ table SegmentSumOptions {
table BatchMatMulOptions {
adjoint_lhs:bool;
adjoint_rhs:bool;
+ // Parameters for BatchMatMul version 4 or above.
+ // If set to true, then weights-only op will use asymmetric quantization for
+ // inputs.
+ asymmetric_quantize_inputs: bool;
+}
+
+table CumsumOptions {
+ exclusive:bool;
+ reverse:bool;
+}
+
+table BroadcastToOptions {
+}
+
+table Rfft2dOptions {
+}
+
+table HashtableOptions {
+ // The identity of hash tables. This identity will be used across different
+ // subgraphs in the same interpreter instance.
+ table_id:int;
+ key_dtype:TensorType;
+ value_dtype:TensorType;
+}
+
+table HashtableFindOptions {
+}
+
+table HashtableImportOptions {
+}
+
+table HashtableSizeOptions {
+}
+
+table VarHandleOptions {
+ container:string;
+ shared_name:string;
+}
+
+table ReadVariableOptions {
+}
+
+table AssignVariableOptions {
+}
+
+table RandomOptions {
+ seed: long;
+ seed2: long;
+}
+
+table BucketizeOptions {
+ boundaries: [float]; // The bucket boundaries.
+}
+
+table GeluOptions {
+ approximate: bool;
+}
+
+table DynamicUpdateSliceOptions {
+}
+
+table UnsortedSegmentProdOptions {
+}
+
+table UnsortedSegmentMaxOptions {
+}
+
+table UnsortedSegmentSumOptions {
+}
+
+table ATan2Options {
+}
+
+table UnsortedSegmentMinOptions{
+}
+
+table SignOptions {
+}
+
+table BitcastOptions {
+}
+
+table BitwiseXorOptions {
+}
+
+table RightShiftOptions {
}
table BCQGatherOptions {
@@ -1006,12 +1218,21 @@ table InstanceNormOptions {
// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
// builtin, or a string if the operator is custom.
table OperatorCode {
- builtin_code:BuiltinOperator;
+ // This field is for backward compatibility. This field will be used when
+ // the value of the extended builtin_code field has less than
+ // BulitinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES.
+ deprecated_builtin_code:byte;
custom_code:string;
// The version of the operator. The version need to be bumped whenever new
// parameters are introduced into an op.
version:int = 1;
+
+ // This field is introduced for resolving op builtin code shortage problem
+ // (the original BuiltinOperator enum field was represented as a byte).
+ // This field will be used when the value of the extended builtin_code field
+ // has greater than BulitinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES.
+ builtin_code:BuiltinOperator;
}
enum CustomOptionsFormat : byte {
@@ -1102,6 +1323,35 @@ table Metadata {
buffer:uint;
}
+// Map from an alias name of tensor to tensor index in the graph.
+// This is used in Signature def.
+table TensorMap {
+ // Represents the alias to use for this tensor.
+ name:string;
+
+ // The actual tensor index in the primary graph, that 'name' corresponds to.
+ tensor_index:uint;
+}
+
+// This corresponds to SignatureDef in Tensorflow SavedModel.
+// The SignatureDef will be part of the SavedModel provided for conversion.
+table SignatureDef {
+ // Named inputs for this signature.
+ inputs:[TensorMap];
+
+ // Named outputs for this signature.
+ outputs:[TensorMap];
+
+ // Key value which was in the Tensorflow SavedModel SignatureDef map.
+ signature_key:string;
+
+ // Model tag, deprecated.
+ deprecated_tag:string (deprecated);
+
+ // Index of subgraphs that corresponds to the exported method.
+ subgraph_index:uint;
+}
+
table Model {
// Version of the schema.
version:uint;
@@ -1130,6 +1380,9 @@ table Model {
// Metadata about the model.
metadata:[Metadata];
+
+ // Optional SignatureDefs for the model.
+ signature_defs:[SignatureDef];
}
root_type Model;