diff options
Diffstat (limited to 'runtime/onert/frontend')
34 files changed, 27333 insertions, 0 deletions
diff --git a/runtime/onert/frontend/CMakeLists.txt b/runtime/onert/frontend/CMakeLists.txt new file mode 100644 index 000000000..5ea6cdadd --- /dev/null +++ b/runtime/onert/frontend/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectories() diff --git a/runtime/onert/frontend/base_loader/CMakeLists.txt b/runtime/onert/frontend/base_loader/CMakeLists.txt new file mode 100644 index 000000000..921206c31 --- /dev/null +++ b/runtime/onert/frontend/base_loader/CMakeLists.txt @@ -0,0 +1,7 @@ +if(NOT BUILD_TFLITE_LOADER AND NOT BUILD_CIRCLE_LOADER) + return() +endif(NOT BUILD_TFLITE_LOADER AND NOT BUILD_CIRCLE_LOADER) + +add_library(base_loader INTERFACE) +target_include_directories(base_loader INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/include) +target_link_libraries(base_loader INTERFACE onert_core) diff --git a/runtime/onert/frontend/base_loader/include/base_loader.h b/runtime/onert/frontend/base_loader/include/base_loader.h new file mode 100644 index 000000000..f87c6ea77 --- /dev/null +++ b/runtime/onert/frontend/base_loader/include/base_loader.h @@ -0,0 +1,1362 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __BASE_LOADER_BASE_LOADER_H__ +#define __BASE_LOADER_BASE_LOADER_H__ + +#include "ir/Graph.h" +#include "ir/Operations.Include.h" + +#include <map> +#include <memory> +#include <fstream> +#include <limits> + +namespace onert +{ +namespace base_loader +{ + +template <typename LoaderDomain, typename SpecificLoader> class BaseLoader +{ + using Verifier = typename LoaderDomain::Verifier; + using ActivationFunctionType = typename LoaderDomain::ActivationFunctionType; + using Buffer = typename LoaderDomain::Buffer; + using BuiltinOperator = typename LoaderDomain::BuiltinOperator; + using CustomOptionsFormat = typename LoaderDomain::CustomOptionsFormat; + using Model = typename LoaderDomain::Model; + using Operator = typename LoaderDomain::Operator; + using Padding = typename LoaderDomain::Padding; + using Pool2DOptions = typename LoaderDomain::Pool2DOptions; + using SubGraph = typename LoaderDomain::SubGraph; + using Tensor = typename LoaderDomain::Tensor; + using TensorType = typename LoaderDomain::TensorType; + +public: + /** + * @brief Construct a new Loader object + * + * @param graph reference on primary subgraph + */ + explicit BaseLoader(std::unique_ptr<ir::Graph> &graph) : _primary_subgraph(graph), _model{nullptr} + { + } + + /** + * @brief Load a model from file + * + * @param file_path + */ + void loadFromFile(const char *file_path); + +protected: + ~BaseLoader() = default; + + void loadModel(); + + // Helper functions + ir::Activation convertActivation(ActivationFunctionType type); + ir::DataType tensorTypeToDataType(TensorType type); + + // Create operands form tflite::Tensor + ir::OperandIndex loadOperand(const Tensor *tensor, ir::Graph &subg); + void loadOperationIO(const Operator *op, ir::OperandIndexSequence &inputs, + ir::OperandIndexSequence &outputs); + // Create operations from Operator + void loadOperation(const Operator *op, ir::Graph &subg); + // Load Strides and Paddings from options to param + template <typename Param, typename OptionsType> + void loadStridesAndPaddings(Param ¶m, const OptionsType *options); + // Load Pool2D param + template <typename Param> void loadPool2D(Param ¶m, const Pool2DOptions *options); + + // Operations + void loadConv2D(const Operator *op, ir::Graph &subg); + void loadDepthwiseConv2D(const Operator *op, ir::Graph &subg); + void loadTransposeConv(const Operator *op, ir::Graph &subg); + void loadAvgPool2D(const Operator *op, ir::Graph &subg); + void loadReshape(const Operator *op, ir::Graph &subg); + void loadSoftmax(const Operator *op, ir::Graph &subg); + void loadMaxPool2D(const Operator *op, ir::Graph &subg); + void loadConcatenation(const Operator *op, ir::Graph &subg); + void loadInstanceNorm(const Operator *op, ir::Graph &subg); + void loadFC(const Operator *op, ir::Graph &subg); + void loadAdd(const Operator *op, ir::Graph &subg); + void loadSub(const Operator *op, ir::Graph &subg); + void loadMul(const Operator *op, ir::Graph &subg); + void loadDiv(const Operator *op, ir::Graph &subg); + void loadPack(const Operator *op, ir::Graph &subg); + void loadRelu(const Operator *op, ir::Graph &subg); + void loadRelu6(const Operator *op, ir::Graph &subg); + void loadResizeBilinear(const Operator *op, ir::Graph &subg); + void loadRsqrt(const Operator *op, ir::Graph &subg); + void loadSqrt(const Operator *op, ir::Graph &subg); + void loadSquaredDifference(const Operator *op, ir::Graph &subg); + void loadTanh(const Operator *op, ir::Graph &subg); + void loadTranspose(const Operator *op, ir::Graph &subg); + void loadMean(const Operator *op, ir::Graph &subg); + void loadReduceMax(const Operator *op, ir::Graph &subg); + void loadPad(const Operator *op, ir::Graph &subg); + void loadLogistic(const Operator *op, ir::Graph &subg); + void loadExp(const Operator *op, ir::Graph &subg); + void loadGather(const Operator *op, ir::Graph &subg); + void loadCustom(const Operator *op, ir::Graph &subg); + void loadSpaceToBatchND(const Operator *op, ir::Graph &subg); + void loadBatchToSpaceND(const Operator *op, ir::Graph &subg); + void loadReduceSum(const Operator *op, ir::Graph &subg); + void loadSqueeze(const Operator *op, ir::Graph &subg); + void loadPrelu(const Operator *op, ir::Graph &subg); + void loadSplit(const Operator *op, ir::Graph &subg); + void loadSlice(const Operator *op, ir::Graph &subg); + void loadStridedSlice(const Operator *op, ir::Graph &subg); + void loadUnpack(const Operator *op, ir::Graph &subg); + void loadMinimum(const Operator *op, ir::Graph &subg); + void loadMaximum(const Operator *op, ir::Graph &subg); + void loadCast(const Operator *op, ir::Graph &subg); + void loadComparison(const Operator *op, ir::Graph &subg); + void loadOneHot(const Operator *op, ir::Graph &subg); + void loadAbs(const Operator *op, ir::Graph &subg); + void loadSin(const Operator *op, ir::Graph &subg); + void loadShape(const Operator *op, ir::Graph &subg); + +protected: + // Buffer for loading (if needed) + std::vector<char> _buffer; + // Reference on loadable primary subgraph + std::unique_ptr<ir::Graph> &_primary_subgraph; + const Model *_model; + // Maps Tensor indices to onert Operands. + std::vector<ir::OperandIndex> _tensor_to_operand; + // Verifier + std::unique_ptr<Verifier> _verifier; +}; + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::BaseLoader::loadFromFile(const char *file_path) +{ + std::ifstream stream(file_path, std::fstream::in | std::fstream::binary); + + if (!stream) + { + std::string msg = "Failed to open file `"; + msg += file_path; + msg += "`"; + throw std::runtime_error{msg}; + } + + stream.seekg(0, stream.end); + auto size = stream.tellg(); + stream.seekg(0, stream.beg); + + _buffer.resize(size); + stream.read(_buffer.data(), size); + + stream.close(); + + // Prepare verifier + _verifier = std::make_unique<Verifier>(reinterpret_cast<const std::uint8_t *>(_buffer.data()), + _buffer.size()); + + loadModel(); +} + +template <typename LoaderDomain, typename SpecificLoader> +ir::Activation BaseLoader<LoaderDomain, SpecificLoader>::BaseLoader::convertActivation( + const ActivationFunctionType type) +{ + switch (type) + { + case ActivationFunctionType::ActivationFunctionType_NONE: + return ir::Activation::NONE; + case ActivationFunctionType::ActivationFunctionType_RELU: + return ir::Activation::RELU; + case ActivationFunctionType::ActivationFunctionType_RELU_N1_TO_1: + return ir::Activation::RELU1; + case ActivationFunctionType::ActivationFunctionType_RELU6: + return ir::Activation::RELU6; + case ActivationFunctionType::ActivationFunctionType_TANH: + return ir::Activation::TANH; + default: + throw std::runtime_error(std::string("Unsupported activation type: ") + .append(EnumNameActivationFunctionType(type))); + } +} + +template <typename LoaderDomain, typename SpecificLoader> +ir::DataType +BaseLoader<LoaderDomain, SpecificLoader>::BaseLoader::tensorTypeToDataType(const TensorType type) +{ + switch (type) + { + case TensorType::TensorType_FLOAT32: + return ir::DataType::FLOAT32; + case TensorType::TensorType_INT32: + return ir::DataType::INT32; + case TensorType::TensorType_BOOL: + return ir::DataType::BOOL8; + case TensorType::TensorType_UINT8: + return ir::DataType::QUANT8_ASYMM; + default: + throw std::runtime_error( + std::string("Unsupported tensor type: ").append(EnumNameTensorType(type))); + } +} + +template <typename LoaderDomain, typename SpecificLoader> +ir::OperandIndex BaseLoader<LoaderDomain, SpecificLoader>::loadOperand(const Tensor *tensor, + ir::Graph &subg) +{ + ir::Shape shape; + // Shape + const auto *tensor_shape = tensor->shape(); + if (tensor_shape != nullptr) + { + for (const auto &dim : *tensor_shape) + { + shape.append(dim); + } + } + // Type + ir::DataType data_type = tensorTypeToDataType(tensor->type()); + // Quantization + auto q_params = tensor->quantization(); + float scale = 0.0; + long zero_point = 0; + if (q_params != nullptr) + { + if (q_params->scale()) + { + if (q_params->scale()->size() != 1) + { + throw std::runtime_error("Only 1 scale for a tensor is supported."); + } + scale = q_params->scale()->Get(0); + } + + if (q_params->zero_point()) + { + if (q_params->zero_point()->size() != 1) + { + throw std::runtime_error("Only 1 zero_point value for a tensor is supported."); + } + zero_point = q_params->zero_point()->Get(0); + // zero_point is long while TypeInfo.zero_point is defined as int32_t. + assert(zero_point >= std::numeric_limits<int32_t>::min()); + assert(zero_point <= std::numeric_limits<int32_t>::max()); + } + auto details = q_params->details_as_CustomQuantization(); + if (details != nullptr) + throw std::runtime_error("Custom Quantization is not supported"); + } + // Create TypeInfo + ir::TypeInfo type_info(data_type, scale, zero_point); + // Create operand + const auto operand_index = subg.addOperand(shape, type_info); + + // Constant tensors are indicated by non-empty data. + const auto *data = _model->buffers()->Get(tensor->buffer())->data(); + if (data != nullptr) + { + auto ptr = std::make_unique<ir::CachedData>(data->data(), data->size()); + subg.setOperandValue(operand_index, std::move(ptr)); + } + + // Name unused + // auto name = tensor->name(); + // Variablie + if (tensor->is_variable()) + throw std::runtime_error("Variable tensor not supported!"); + + return operand_index; +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadOperationIO(const Operator *op, + ir::OperandIndexSequence &inputs, + ir::OperandIndexSequence &outputs) +{ + for (const std::int32_t idx : *op->inputs()) + { + inputs.append(_tensor_to_operand[idx]); + } + + for (const std::int32_t idx : *op->outputs()) + { + outputs.append(_tensor_to_operand[idx]); + } +} + +template <typename LoaderDomain, typename SpecificLoader> +template <typename Param, typename OptionsType> +void BaseLoader<LoaderDomain, SpecificLoader>::loadStridesAndPaddings(Param ¶m, + const OptionsType *options) +{ + // Strides + param.stride.vertical = options->stride_w(); + param.stride.horizontal = options->stride_h(); + // Paddings + if (options->padding() == Padding::Padding_SAME) + param.padding.type = ir::PaddingType::SAME; + if (options->padding() == Padding::Padding_VALID) + param.padding.type = ir::PaddingType::VALID; + // param paddings indexes unused +} + +template <typename LoaderDomain, typename SpecificLoader> +template <typename Param> +void BaseLoader<LoaderDomain, SpecificLoader>::loadPool2D(Param ¶m, + const Pool2DOptions *options) +{ + // Strides and Paddings + loadStridesAndPaddings(param, options); + // Filter width and height + // Strides + param.kw = options->filter_width(); + param.kh = options->filter_height(); + // Activation + param.activation = convertActivation(options->fused_activation_function()); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadConv2D(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::Conv2D::Param param; + const auto *options = op->builtin_options_as_Conv2DOptions(); + param.activation = convertActivation(options->fused_activation_function()); + loadStridesAndPaddings(param, options); + // Dilation h/w factor unused + std::unique_ptr<ir::Operation> new_op(new ir::operation::Conv2D(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadDepthwiseConv2D(const Operator *op, + ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::DepthwiseConv2D::Param param; + const auto *options = op->builtin_options_as_DepthwiseConv2DOptions(); + param.activation = convertActivation(options->fused_activation_function()); + loadStridesAndPaddings(param, options); + // Multiplier + param.multiplier = options->depth_multiplier(); + // Dilation h/w factor unused + std::unique_ptr<ir::Operation> new_op(new ir::operation::DepthwiseConv2D(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadTransposeConv(const Operator *op, + ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::TransposeConv::Param param; + const auto *options = op->builtin_options_as_TransposeConvOptions(); + loadStridesAndPaddings(param, options); + std::unique_ptr<ir::Operation> new_op(new ir::operation::TransposeConv(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadAvgPool2D(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::AvgPool2D::Param param; + const auto *options = op->builtin_options_as_Pool2DOptions(); + + loadPool2D(param, options); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::AvgPool2D(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadReshape(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + // const auto *options = op->builtin_options_as_ReshapeOptions(); + // No params + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Reshape(inputs, outputs)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadSoftmax(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::Softmax::Param param; + const auto *options = op->builtin_options_as_SoftmaxOptions(); + // Beta + param.beta = options->beta(); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Softmax(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadMaxPool2D(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::MaxPool2D::Param param; + const auto *options = op->builtin_options_as_Pool2DOptions(); + + loadPool2D(param, options); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::MaxPool2D(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadConcatenation(const Operator *op, + ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::Concat::Param param; + const auto *options = op->builtin_options_as_ConcatenationOptions(); + // Axis + param.axis = options->axis(); + param.rank = subg.operands().at(outputs.at(0)).shape().rank(); + // activation unused + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Concat(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadInstanceNorm(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::InstanceNorm::Param param; + const auto *options = op->builtin_options_as_InstanceNormOptions(); + + param.activation = convertActivation(options->fused_activation_function()); + // Use default value 1e-5 if value of epsilon is zero + param.epsilon = options->epsilon() == 0.f ? 1e-5 : options->epsilon(); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::InstanceNorm(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadFC(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + const auto &input_operand = subg.operands().at(inputs.at(ir::operation::FullyConnected::INPUT)); + auto &weights_operand = subg.operands().at(inputs.at(ir::operation::FullyConnected::WEIGHT)); + if (input_operand.typeInfo().type() == ir::DataType::FLOAT32 && + weights_operand.typeInfo().type() == ir::DataType::QUANT8_ASYMM) + { + weights_operand.type(ir::DataType::QUANT8_SYMM); + } + + ir::operation::FullyConnected::Param param; + const auto *options = op->builtin_options_as_FullyConnectedOptions(); + + param.activation = convertActivation(options->fused_activation_function()); + // weights_format unused + + std::unique_ptr<ir::Operation> new_op(new ir::operation::FullyConnected(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadAdd(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::Add::Param param; + const auto *options = op->builtin_options_as_AddOptions(); + + param.activation = convertActivation(options->fused_activation_function()); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Add(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadSub(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::Sub::Param param; + const auto *options = op->builtin_options_as_SubOptions(); + + param.activation = convertActivation(options->fused_activation_function()); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Sub(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadMul(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::Mul::Param param; + const auto *options = op->builtin_options_as_MulOptions(); + + param.activation = convertActivation(options->fused_activation_function()); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Mul(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadDiv(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::Div::Param param; + const auto *options = op->builtin_options_as_DivOptions(); + + param.activation = convertActivation(options->fused_activation_function()); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Div(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadPack(const Operator *op, ir::Graph &subg) +{ + // This runtime_error will be removed if the one of backend supports this operation + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::Pack::Param param; + const auto *options = op->builtin_options_as_PackOptions(); + param.num = options->values_count(); + param.axis = options->axis(); + param.rank = subg.operands().at(outputs.at(0)).shape().rank(); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Pack(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadRelu(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::ReLU(inputs, outputs)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadRelu6(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::ReLU6(inputs, outputs)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadResizeBilinear(const Operator *op, + ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + auto input = inputs.at(0); + auto size = inputs.at(1); + + // FIXME Handle ResizeBilinearOptions. + if (!subg.operands().at(size).isConstant()) + throw std::runtime_error("ResizeBilinear: non-constant 'size' is not supported."); + + std::vector<std::int32_t> size_v = subg.operands().at(size).template asVector<std::int32_t>(); + + ir::operation::ResizeBilinear::Param param; + param.height_out = size_v[0]; + param.width_out = size_v[1]; + + std::unique_ptr<ir::Operation> new_op(new ir::operation::ResizeBilinear({input}, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadRsqrt(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::RSQRT(inputs, outputs)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadSqrt(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::SQRT(inputs, outputs)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadSquaredDifference(const Operator *op, + ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::SquaredDifference(inputs, outputs)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadTanh(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Tanh(inputs, outputs)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadTranspose(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + auto input = inputs.at(0); + auto perm = inputs.at(1); + + if (!subg.operands().at(perm).isConstant()) + throw std::runtime_error("Transpose: non-constant 'perm' is not supported."); + + ir::operation::Transpose::Param param; + param.perm = subg.operands().at(perm).template asVector<int>(); + param.rank = subg.operands().at(inputs.at(0)).shape().rank(); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Transpose({input}, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadMean(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + auto input = inputs.at(0); + auto axes = inputs.at(1); + + if (!subg.operands().at(axes).isConstant()) + throw std::runtime_error("Mean: non-constant 'axes' is not supported."); + + ir::operation::Mean::Param param; + param.axes = subg.operands().at(axes).template asVector<int>(); + param.keep_dims = op->builtin_options_as_ReducerOptions()->keep_dims(); + param.rank = subg.operands().at(inputs.at(0)).shape().rank(); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Mean({input}, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadReduceMax(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + auto input = inputs.at(0); + auto axes = inputs.at(1); + + // FIXME Handle ReducerOptions. + if (!subg.operands().at(axes).isConstant()) + throw std::runtime_error("ReduceSum: non-constant 'axes' is not supported."); + + ir::operation::ReduceMax::Param param; + param.axes = subg.operands().at(axes).template asVector<int>(); + param.keep_dims = op->builtin_options_as_ReducerOptions()->keep_dims(); + param.rank = subg.operands().at(inputs.at(0)).shape().rank(); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::ReduceMax({input}, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadPad(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::Pad::Param param; + param.rank = subg.operands().at(inputs.at(0)).shape().rank(); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Pad(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadLogistic(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Logistic(inputs, outputs)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadExp(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Exp(inputs, outputs)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadGather(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + ir::operation::Gather::Param param; + param.axis = op->builtin_options_as_GatherOptions()->axis(); + param.rank = subg.operands().at(inputs.at(0)).shape().rank(); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Gather(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadSpaceToBatchND(const Operator *op, + ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + std::unique_ptr<ir::Operation> new_op{new ir::operation::SpaceToBatchND{inputs, outputs}}; + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadBatchToSpaceND(const Operator *op, + ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + auto input = inputs.at(0); + auto block_shape = inputs.at(1); + auto crops = inputs.at(2); + + if (!subg.operands().at(crops).isConstant()) + throw std::runtime_error("BatchToSpaceND: non-constant 'crops' is not supported."); + + std::vector<std::int32_t> crops_v = subg.operands().at(crops).template asVector<std::int32_t>(); + assert(crops_v.size() == 4); + if (crops_v != std::vector<std::int32_t>{0, 0, 0, 0}) + throw std::runtime_error("BatchToSpaceND: 'crops' other than {0, 0, 0, 0} is not supported."); + + std::unique_ptr<ir::Operation> new_op{ + new ir::operation::BatchToSpaceND{{input, block_shape}, outputs}}; + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadReduceSum(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + auto input = inputs.at(0); + auto axes = inputs.at(1); + + // FIXME Handle ReducerOptions. + if (!subg.operands().at(axes).isConstant()) + throw std::runtime_error("ReduceSum: non-constant 'axes' is not supported."); + + ir::operation::ReduceSum::Param param; + param.axes = subg.operands().at(axes).template asVector<int>(); + param.keep_dims = op->builtin_options_as_ReducerOptions()->keep_dims(); + param.rank = subg.operands().at(inputs.at(0)).shape().rank(); + + std::unique_ptr<ir::Operation> new_op{new ir::operation::ReduceSum{{input}, outputs, param}}; + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadCustom(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + auto *op_code = _model->operator_codes()->Get(op->opcode_index()); + auto custom_op_id = op_code->custom_code()->str(); + + auto constraint = ir::OperandConstraint::createExact(inputs.size()); + + assert(op->custom_options_format() == CustomOptionsFormat::CustomOptionsFormat_FLEXBUFFERS && + "Unsupported custom operation options format"); + + size_t custom_op_data_size = op->custom_options()->size(); + auto custom_op_data = new char[custom_op_data_size]; + std::copy(op->custom_options()->begin(), op->custom_options()->end(), custom_op_data); + + ir::operation::Custom::Userdata userdata{}; + userdata.data = custom_op_data; + userdata.size = custom_op_data_size; + + auto new_op = + std::make_unique<ir::operation::Custom>(constraint, inputs, outputs, custom_op_id, userdata); + + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadSqueeze(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::Squeeze::Param param{}; + const auto *options = op->builtin_options_as_SqueezeOptions(); + const auto *dims = options->squeeze_dims(); + if (dims) + { + if (dims->Length() > sizeof(param.dims) / sizeof(param.dims[0])) + throw std::runtime_error("Squeeze: 'param.ndims' is out of range."); + param.ndim = dims->Length(); + for (int i = 0; i < param.ndim; ++i) + param.dims[i] = dims->Get(i); + } + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Squeeze(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadPrelu(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::PReLU(inputs, outputs)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadSplit(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + // Notice : input order is strange for tflite split + auto input = inputs.at(1); + auto axis = inputs.at(0); + + // FIXME Handle SplitOptions. + if (!subg.operands().at(axis).isConstant()) + throw std::runtime_error("Split: non-constant 'axis' is not supported."); + + ir::operation::Split::Param param{}; + param.axis = subg.operands().at(axis).template asScalar<int>(); + const auto *options = op->builtin_options_as_SplitOptions(); + param.num_splits = options->num_splits(); + param.rank = subg.operands().at(input).shape().rank(); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Split({input}, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadSlice(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::Slice::Param param; + param.rank = subg.operands().at(inputs.at(0)).shape().rank(); + + std::unique_ptr<ir::Operation> new_op{new ir::operation::Slice{inputs, outputs, param}}; + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadStridedSlice(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::StridedSlice::Param param; + + const auto *options = op->builtin_options_as_StridedSliceOptions(); + param.begin_mask = options->begin_mask(); + param.end_mask = options->end_mask(); + param.shrink_axis_mask = options->shrink_axis_mask(); + param.rank = subg.operands().at(inputs.at(0)).shape().rank(); + + std::unique_ptr<ir::Operation> new_op{new ir::operation::StridedSlice{inputs, outputs, param}}; + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadUnpack(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::Unpack::Param param; + const auto *options = op->builtin_options_as_UnpackOptions(); + param.num = options->num(); + param.axis = options->axis(); + param.rank = subg.operands().at(inputs.at(0)).shape().rank(); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Unpack(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadMinimum(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Min(inputs, outputs)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadMaximum(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Max(inputs, outputs)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadCast(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + auto qasymm8ToUint8 = [](ir::Operand &operand) { + if (operand.typeInfo().type() == ir::DataType::QUANT8_ASYMM) + { + operand.type(ir::DataType::UINT8); + } + }; + qasymm8ToUint8(subg.operands().at(inputs.at(ir::operation::Cast::Input::INPUT))); + qasymm8ToUint8(subg.operands().at(outputs.at(0))); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Cast(inputs, outputs)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadComparison(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + ir::operation::Comparison::Param param; + + const auto builtin_op = _model->operator_codes()->Get(op->opcode_index())->builtin_code(); + + switch (builtin_op) + { + case BuiltinOperator::BuiltinOperator_EQUAL: + param.comparison_type = ir::operation::Comparison::ComparisonType::Equal; + break; + case BuiltinOperator::BuiltinOperator_NOT_EQUAL: + param.comparison_type = ir::operation::Comparison::ComparisonType::NotEqual; + break; + case BuiltinOperator::BuiltinOperator_GREATER_EQUAL: + param.comparison_type = ir::operation::Comparison::ComparisonType::GreaterEqual; + break; + case BuiltinOperator::BuiltinOperator_GREATER: + param.comparison_type = ir::operation::Comparison::ComparisonType::Greater; + break; + case BuiltinOperator::BuiltinOperator_LESS_EQUAL: + param.comparison_type = ir::operation::Comparison::ComparisonType::LessEqual; + break; + case BuiltinOperator::BuiltinOperator_LESS: + param.comparison_type = ir::operation::Comparison::ComparisonType::Less; + break; + default: + throw std::runtime_error( + std::string("Unsupported operation: ").append(EnumNameBuiltinOperator(builtin_op))); + } + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Comparison(inputs, outputs, param)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadOneHot(const Operator *op, ir::Graph &subg) +{ + if (op->inputs()->size() != 4 || op->outputs()->size() != 1) + throw std::runtime_error("OneHot Op has wrong number of input or output tensors."); + + enum + { + INDICES = 0, + DEPTH = 1, + ON_VALUE = 2, + OFF_VALUE = 3, + }; + + // Set input and output tensors + ir::OperandIndexSequence inputs, outputs; + inputs.append(_tensor_to_operand[op->inputs()->Get(INDICES)]); + outputs.append(_tensor_to_operand[op->outputs()->Get(0)]); + + // Set parameters + // depth, on_value and off_value are scalar though it is passed as inputs + auto depth_opidx = _tensor_to_operand[op->inputs()->Get(DEPTH)]; + auto on_value_opidx = _tensor_to_operand[op->inputs()->Get(ON_VALUE)]; + auto off_value_opidx = _tensor_to_operand[op->inputs()->Get(OFF_VALUE)]; + const auto depth = subg.operands().at(depth_opidx).template asScalar<int>(); + const auto on_value = subg.operands().at(on_value_opidx).template asScalar<float>(); + const auto off_value = subg.operands().at(off_value_opidx).template asScalar<float>(); + const auto axis = op->builtin_options_as_OneHotOptions()->axis(); + std::unique_ptr<ir::Operation> new_op( + new ir::operation::OneHot(inputs, outputs, {depth, on_value, off_value, axis})); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadAbs(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Abs(inputs, outputs)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadSin(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Sin(inputs, outputs)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadShape(const Operator *op, ir::Graph &subg) +{ + ir::OperandIndexSequence inputs; + ir::OperandIndexSequence outputs; + + loadOperationIO(op, inputs, outputs); + + // ir::operation::Shape::Param param; + // const auto *options = op->builtin_options_as_ShapeOptions(); + // param.out_type = tensorTypeToDataType(options->out_type()); + + std::unique_ptr<ir::Operation> new_op(new ir::operation::Shape(inputs, outputs /*, param*/)); + subg.addOperation(std::move(new_op)); +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op, ir::Graph &subg) +{ + const auto builtin_op = _model->operator_codes()->Get(op->opcode_index())->builtin_code(); + + switch (builtin_op) + { + case BuiltinOperator::BuiltinOperator_CONV_2D: + loadConv2D(op, subg); + return; + case BuiltinOperator::BuiltinOperator_AVERAGE_POOL_2D: + loadAvgPool2D(op, subg); + return; + case BuiltinOperator::BuiltinOperator_DEPTHWISE_CONV_2D: + loadDepthwiseConv2D(op, subg); + return; + case BuiltinOperator::BuiltinOperator_TRANSPOSE_CONV: + loadTransposeConv(op, subg); + return; + case BuiltinOperator::BuiltinOperator_RESHAPE: + loadReshape(op, subg); + return; + case BuiltinOperator::BuiltinOperator_SOFTMAX: + loadSoftmax(op, subg); + return; + case BuiltinOperator::BuiltinOperator_MAX_POOL_2D: + loadMaxPool2D(op, subg); + return; + case BuiltinOperator::BuiltinOperator_CONCATENATION: + loadConcatenation(op, subg); + return; + case BuiltinOperator::BuiltinOperator_FULLY_CONNECTED: + loadFC(op, subg); + return; + case BuiltinOperator::BuiltinOperator_ADD: + loadAdd(op, subg); + return; + case BuiltinOperator::BuiltinOperator_SUB: + loadSub(op, subg); + return; + case BuiltinOperator::BuiltinOperator_MUL: + loadMul(op, subg); + return; + case BuiltinOperator::BuiltinOperator_DIV: + loadDiv(op, subg); + return; + case BuiltinOperator::BuiltinOperator_PACK: + loadPack(op, subg); + return; + case BuiltinOperator::BuiltinOperator_RELU: + loadRelu(op, subg); + return; + case BuiltinOperator::BuiltinOperator_RELU6: + loadRelu6(op, subg); + return; + case BuiltinOperator::BuiltinOperator_RESIZE_BILINEAR: + loadResizeBilinear(op, subg); + return; + case BuiltinOperator::BuiltinOperator_RSQRT: + loadRsqrt(op, subg); + return; + case BuiltinOperator::BuiltinOperator_SQRT: + loadSqrt(op, subg); + return; + case BuiltinOperator::BuiltinOperator_SQUARED_DIFFERENCE: + loadSquaredDifference(op, subg); + return; + case BuiltinOperator::BuiltinOperator_TANH: + loadTanh(op, subg); + return; + case BuiltinOperator::BuiltinOperator_TRANSPOSE: + loadTranspose(op, subg); + return; + case BuiltinOperator::BuiltinOperator_MEAN: + loadMean(op, subg); + return; + case BuiltinOperator::BuiltinOperator_REDUCE_MAX: + loadReduceMax(op, subg); + return; + case BuiltinOperator::BuiltinOperator_PAD: + loadPad(op, subg); + return; + case BuiltinOperator::BuiltinOperator_LOGISTIC: + loadLogistic(op, subg); + return; + case BuiltinOperator::BuiltinOperator_EXP: + loadExp(op, subg); + return; + case BuiltinOperator::BuiltinOperator_GATHER: + loadGather(op, subg); + return; + case BuiltinOperator::BuiltinOperator_SPACE_TO_BATCH_ND: + loadSpaceToBatchND(op, subg); + return; + case BuiltinOperator::BuiltinOperator_BATCH_TO_SPACE_ND: + loadBatchToSpaceND(op, subg); + return; + case BuiltinOperator::BuiltinOperator_SUM: + loadReduceSum(op, subg); + return; + case BuiltinOperator::BuiltinOperator_CUSTOM: + loadCustom(op, subg); + return; + case BuiltinOperator::BuiltinOperator_SQUEEZE: + loadSqueeze(op, subg); + return; + case BuiltinOperator::BuiltinOperator_PRELU: + loadPrelu(op, subg); + return; + case BuiltinOperator::BuiltinOperator_SPLIT: + loadSplit(op, subg); + return; + case BuiltinOperator::BuiltinOperator_SLICE: + loadSlice(op, subg); + return; + case BuiltinOperator::BuiltinOperator_STRIDED_SLICE: + loadStridedSlice(op, subg); + return; + case BuiltinOperator::BuiltinOperator_UNPACK: + loadUnpack(op, subg); + return; + case BuiltinOperator::BuiltinOperator_MINIMUM: + loadMinimum(op, subg); + return; + case BuiltinOperator::BuiltinOperator_MAXIMUM: + loadMaximum(op, subg); + return; + case BuiltinOperator::BuiltinOperator_CAST: + loadCast(op, subg); + return; + case BuiltinOperator::BuiltinOperator_EQUAL: + case BuiltinOperator::BuiltinOperator_NOT_EQUAL: + case BuiltinOperator::BuiltinOperator_GREATER_EQUAL: + case BuiltinOperator::BuiltinOperator_GREATER: + case BuiltinOperator::BuiltinOperator_LESS_EQUAL: + case BuiltinOperator::BuiltinOperator_LESS: + loadComparison(op, subg); + return; + case BuiltinOperator::BuiltinOperator_ONE_HOT: + loadOneHot(op, subg); + return; + case BuiltinOperator::BuiltinOperator_ABS: + loadAbs(op, subg); + return; + case BuiltinOperator::BuiltinOperator_SIN: + loadSin(op, subg); + return; + case BuiltinOperator::BuiltinOperator_SHAPE: + loadShape(op, subg); + return; + // TODO Implement loading subgraphs of conftrol flow ops + default: + throw std::runtime_error( + std::string("Unsupported operation: ").append(EnumNameBuiltinOperator(builtin_op))); + } +} + +template <typename LoaderDomain, typename SpecificLoader> +void BaseLoader<LoaderDomain, SpecificLoader>::loadModel() +{ + LoaderDomain::VerifyModelBuffer(*_verifier.get()); + _model = LoaderDomain::GetModel(_buffer.data()); + // Version unused + // const auto version = _model->version(); + // Description unused + // const auto *description = _model->description(); + // Metabuffer unsued + // const auto *metadata_buffer = _model->metadata_buffer(); + // Load subgraphs recursively from primary subgraph and map operations on subgraph + const auto domain_subgraph = (*_model->subgraphs())[0]; + _primary_subgraph = static_cast<SpecificLoader *>(this)->loadSubgraph(domain_subgraph); +} + +} // namespace base_loader +} // namespace onert + +#endif //__BASE_LOADER_BASE_LOADER_H__ diff --git a/runtime/onert/frontend/circle/CMakeLists.txt b/runtime/onert/frontend/circle/CMakeLists.txt new file mode 100644 index 000000000..b446e694a --- /dev/null +++ b/runtime/onert/frontend/circle/CMakeLists.txt @@ -0,0 +1,17 @@ +if (NOT BUILD_CIRCLE_LOADER) + return() +endif () + +nnfw_find_package(FlatBuffersSource REQUIRED) + +set(CIRCLE_LOADER_SOURCES src/circle_loader.cc) + +add_library(circle_loader SHARED ${CIRCLE_LOADER_SOURCES}) + +target_include_directories(circle_loader PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) +target_include_directories(circle_loader PRIVATE ${FlatBuffersSource_DIR}/include) + +target_link_libraries(circle_loader PUBLIC onert_core) +target_link_libraries(circle_loader PRIVATE base_loader nnfw_common nnfw_coverage) + +install(TARGETS circle_loader DESTINATION lib) diff --git a/runtime/onert/frontend/circle/include/circle_loader.h b/runtime/onert/frontend/circle/include/circle_loader.h new file mode 100644 index 000000000..718bc0b65 --- /dev/null +++ b/runtime/onert/frontend/circle/include/circle_loader.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CIRCLE_CIRCLE_LOADER_H__ +#define __CIRCLE_CIRCLE_LOADER_H__ + +#include "ir/Graph.h" + +#include <memory> + +namespace onert +{ +namespace circle_loader +{ +std::unique_ptr<ir::Graph> loadModel(const char *filename); +} // namespace circle_loader +} // namespace onert + +#endif // __CIRCLE_CIRCLE_LOADER_H__ diff --git a/runtime/onert/frontend/circle/src/circle_loader.cc b/runtime/onert/frontend/circle/src/circle_loader.cc new file mode 100644 index 000000000..49aaccc4c --- /dev/null +++ b/runtime/onert/frontend/circle/src/circle_loader.cc @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "circle_loader.h" +#include "base_loader.h" +#include "circle_schema_generated.h" + +namespace onert +{ +namespace circle_loader +{ + +namespace +{ + +ir::Layout convertDataFormat(circle::DataFormat data_format) +{ + switch (data_format) + { + case circle::DataFormat::DataFormat_CHANNELS_FIRST: + return ir::Layout::NCHW; + case circle::DataFormat::DataFormat_CHANNELS_LAST: + return ir::Layout::NHWC; + default: + throw std::runtime_error("Unsupported DataFormat"); + } +} + +struct LoaderDomain +{ + using Verifier = flatbuffers::Verifier; + using ActivationFunctionType = circle::ActivationFunctionType; + using Buffer = circle::Buffer; + using BuiltinOperator = circle::BuiltinOperator; + using CustomOptionsFormat = circle::CustomOptionsFormat; + using Model = circle::Model; + using Operator = circle::Operator; + using Padding = circle::Padding; + using Pool2DOptions = circle::Pool2DOptions; + using Tensor = circle::Tensor; + using TensorType = circle::TensorType; + using SubGraph = circle::SubGraph; + + static const char *EnumNameBuiltinOperator(BuiltinOperator e) + { + return circle::EnumNameBuiltinOperator(e); + } + static const char *EnumNameActivationFunctionType(ActivationFunctionType e) + { + return circle::EnumNameActivationFunctionType(e); + } + static const char *EnumNameTensorType(TensorType e) { return circle::EnumNameTensorType(e); } + static const Model *GetModel(const void *buf) { return circle::GetModel(buf); } + static bool VerifyModelBuffer(Verifier &verifier) { return circle::VerifyModelBuffer(verifier); } +}; + +class CircleLoader final : public base_loader::BaseLoader<LoaderDomain, CircleLoader> +{ +public: + using BaseLoader::BaseLoader; + + std::unique_ptr<ir::Graph> loadSubgraph(const circle::SubGraph *circle_subg) + { + auto subg = std::make_unique<ir::Graph>(); + // Load tensors + _tensor_to_operand.resize(circle_subg->tensors()->size()); + for (flatbuffers::uoffset_t i = 0; i < circle_subg->tensors()->size(); ++i) + { + _tensor_to_operand[i] = loadOperand(circle_subg->tensors()->Get(i), *subg); + } + // Set inputs + for (const std::int32_t input_ind : *circle_subg->inputs()) + { + subg->addInput(_tensor_to_operand[input_ind]); + } + // Set outputs + for (const std::int32_t output_ind : *circle_subg->outputs()) + { + subg->addOutput(_tensor_to_operand[output_ind]); + } + // Create operations + for (const auto *op : *circle_subg->operators()) + { + CircleLoader::loadOperation(op, *subg); + } + + subg->setLayout(convertDataFormat(circle_subg->data_format())); + + subg->finishBuilding(); + + return subg; + } + + void loadOperation(const circle::Operator *op, ir::Graph &subg) + { + const auto builtin_op = _model->operator_codes()->Get(op->opcode_index())->builtin_code(); + + switch (builtin_op) + { + case circle::BuiltinOperator::BuiltinOperator_INSTANCE_NORM: + loadInstanceNorm(op, subg); + return; + default: + BaseLoader::loadOperation(op, subg); + return; + } + } +}; + +} // namespace + +std::unique_ptr<ir::Graph> loadModel(const char *filename) +{ + auto primary_subgraph = std::make_unique<ir::Graph>(); + CircleLoader loader(primary_subgraph); + loader.loadFromFile(filename); + return primary_subgraph; +} + +} // namespace circle_loader +} // namespace onert diff --git a/runtime/onert/frontend/circle/src/circle_schema_generated.h b/runtime/onert/frontend/circle/src/circle_schema_generated.h new file mode 100644 index 000000000..b1337f20d --- /dev/null +++ b/runtime/onert/frontend/circle/src/circle_schema_generated.h @@ -0,0 +1,9952 @@ +/* + * Copyright (c) 2019-2020 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2018 The TensorFlow Authors. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// automatically generated by the FlatBuffers compiler, do not modify + +#ifndef FLATBUFFERS_GENERATED_CIRCLESCHEMA_CIRCLE_H_ +#define FLATBUFFERS_GENERATED_CIRCLESCHEMA_CIRCLE_H_ + +#include "flatbuffers/flatbuffers.h" + +namespace circle +{ + +struct CustomQuantization; + +struct QuantizationParameters; + +struct Int32Vector; + +struct Uint16Vector; + +struct Uint8Vector; + +struct DimensionMetadata; + +struct SparsityParameters; + +struct Tensor; + +struct Conv2DOptions; + +struct Pool2DOptions; + +struct DepthwiseConv2DOptions; + +struct ConcatEmbeddingsOptions; + +struct LSHProjectionOptions; + +struct SVDFOptions; + +struct RNNOptions; + +struct SequenceRNNOptions; + +struct BidirectionalSequenceRNNOptions; + +struct FullyConnectedOptions; + +struct SoftmaxOptions; + +struct ConcatenationOptions; + +struct AddOptions; + +struct MulOptions; + +struct L2NormOptions; + +struct LocalResponseNormalizationOptions; + +struct LSTMOptions; + +struct UnidirectionalSequenceLSTMOptions; + +struct BidirectionalSequenceLSTMOptions; + +struct ResizeBilinearOptions; + +struct ResizeNearestNeighborOptions; + +struct CallOptions; + +struct PadOptions; + +struct PadV2Options; + +struct ReshapeOptions; + +struct SpaceToBatchNDOptions; + +struct BatchToSpaceNDOptions; + +struct SkipGramOptions; + +struct SpaceToDepthOptions; + +struct DepthToSpaceOptions; + +struct SubOptions; + +struct DivOptions; + +struct TopKV2Options; + +struct EmbeddingLookupSparseOptions; + +struct GatherOptions; + +struct TransposeOptions; + +struct ExpOptions; + +struct CosOptions; + +struct ReducerOptions; + +struct SqueezeOptions; + +struct SplitOptions; + +struct SplitVOptions; + +struct StridedSliceOptions; + +struct LogSoftmaxOptions; + +struct CastOptions; + +struct DequantizeOptions; + +struct MaximumMinimumOptions; + +struct TileOptions; + +struct ArgMaxOptions; + +struct ArgMinOptions; + +struct GreaterOptions; + +struct GreaterEqualOptions; + +struct LessOptions; + +struct LessEqualOptions; + +struct NegOptions; + +struct SelectOptions; + +struct SliceOptions; + +struct TransposeConvOptions; + +struct ExpandDimsOptions; + +struct SparseToDenseOptions; + +struct EqualOptions; + +struct NotEqualOptions; + +struct ShapeOptions; + +struct RankOptions; + +struct PowOptions; + +struct FakeQuantOptions; + +struct PackOptions; + +struct LogicalOrOptions; + +struct OneHotOptions; + +struct AbsOptions; + +struct HardSwishOptions; + +struct LogicalAndOptions; + +struct LogicalNotOptions; + +struct UnpackOptions; + +struct FloorDivOptions; + +struct SquareOptions; + +struct ZerosLikeOptions; + +struct FillOptions; + +struct FloorModOptions; + +struct RangeOptions; + +struct LeakyReluOptions; + +struct SquaredDifferenceOptions; + +struct MirrorPadOptions; + +struct UniqueOptions; + +struct ReverseV2Options; + +struct AddNOptions; + +struct GatherNdOptions; + +struct WhereOptions; + +struct ReverseSequenceOptions; + +struct MatrixDiagOptions; + +struct QuantizeOptions; + +struct MatrixSetDiagOptions; + +struct IfOptions; + +struct WhileOptions; + +struct NonMaxSuppressionV4Options; + +struct NonMaxSuppressionV5Options; + +struct ScatterNdOptions; + +struct SelectV2Options; + +struct DensifyOptions; + +struct SegmentSumOptions; + +struct BatchMatMulOptions; + +struct InstanceNormOptions; + +struct OperatorCode; + +struct Operator; + +struct SubGraph; + +struct Buffer; + +struct Metadata; + +struct Model; + +enum TensorType +{ + TensorType_FLOAT32 = 0, + TensorType_FLOAT16 = 1, + TensorType_INT32 = 2, + TensorType_UINT8 = 3, + TensorType_INT64 = 4, + TensorType_STRING = 5, + TensorType_BOOL = 6, + TensorType_INT16 = 7, + TensorType_COMPLEX64 = 8, + TensorType_INT8 = 9, + TensorType_FLOAT64 = 10, + TensorType_MIN = TensorType_FLOAT32, + TensorType_MAX = TensorType_FLOAT64 +}; + +inline const TensorType (&EnumValuesTensorType())[11] +{ + static const TensorType values[] = {TensorType_FLOAT32, TensorType_FLOAT16, TensorType_INT32, + TensorType_UINT8, TensorType_INT64, TensorType_STRING, + TensorType_BOOL, TensorType_INT16, TensorType_COMPLEX64, + TensorType_INT8, TensorType_FLOAT64}; + return values; +} + +inline const char *const *EnumNamesTensorType() +{ + static const char *const names[] = {"FLOAT32", "FLOAT16", "INT32", "UINT8", + "INT64", "STRING", "BOOL", "INT16", + "COMPLEX64", "INT8", "FLOAT64", nullptr}; + return names; +} + +inline const char *EnumNameTensorType(TensorType e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesTensorType()[index]; +} + +enum QuantizationDetails +{ + QuantizationDetails_NONE = 0, + QuantizationDetails_CustomQuantization = 1, + QuantizationDetails_MIN = QuantizationDetails_NONE, + QuantizationDetails_MAX = QuantizationDetails_CustomQuantization +}; + +inline const QuantizationDetails (&EnumValuesQuantizationDetails())[2] +{ + static const QuantizationDetails values[] = {QuantizationDetails_NONE, + QuantizationDetails_CustomQuantization}; + return values; +} + +inline const char *const *EnumNamesQuantizationDetails() +{ + static const char *const names[] = {"NONE", "CustomQuantization", nullptr}; + return names; +} + +inline const char *EnumNameQuantizationDetails(QuantizationDetails e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesQuantizationDetails()[index]; +} + +template <typename T> struct QuantizationDetailsTraits +{ + static const QuantizationDetails enum_value = QuantizationDetails_NONE; +}; + +template <> struct QuantizationDetailsTraits<CustomQuantization> +{ + static const QuantizationDetails enum_value = QuantizationDetails_CustomQuantization; +}; + +bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj, + QuantizationDetails type); +bool VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier, + const flatbuffers::Vector<flatbuffers::Offset<void>> *values, + const flatbuffers::Vector<uint8_t> *types); + +enum DimensionType +{ + DimensionType_DENSE = 0, + DimensionType_SPARSE_CSR = 1, + DimensionType_MIN = DimensionType_DENSE, + DimensionType_MAX = DimensionType_SPARSE_CSR +}; + +inline const DimensionType (&EnumValuesDimensionType())[2] +{ + static const DimensionType values[] = {DimensionType_DENSE, DimensionType_SPARSE_CSR}; + return values; +} + +inline const char *const *EnumNamesDimensionType() +{ + static const char *const names[] = {"DENSE", "SPARSE_CSR", nullptr}; + return names; +} + +inline const char *EnumNameDimensionType(DimensionType e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesDimensionType()[index]; +} + +enum SparseIndexVector +{ + SparseIndexVector_NONE = 0, + SparseIndexVector_Int32Vector = 1, + SparseIndexVector_Uint16Vector = 2, + SparseIndexVector_Uint8Vector = 3, + SparseIndexVector_MIN = SparseIndexVector_NONE, + SparseIndexVector_MAX = SparseIndexVector_Uint8Vector +}; + +inline const SparseIndexVector (&EnumValuesSparseIndexVector())[4] +{ + static const SparseIndexVector values[] = {SparseIndexVector_NONE, SparseIndexVector_Int32Vector, + SparseIndexVector_Uint16Vector, + SparseIndexVector_Uint8Vector}; + return values; +} + +inline const char *const *EnumNamesSparseIndexVector() +{ + static const char *const names[] = {"NONE", "Int32Vector", "Uint16Vector", "Uint8Vector", + nullptr}; + return names; +} + +inline const char *EnumNameSparseIndexVector(SparseIndexVector e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesSparseIndexVector()[index]; +} + +template <typename T> struct SparseIndexVectorTraits +{ + static const SparseIndexVector enum_value = SparseIndexVector_NONE; +}; + +template <> struct SparseIndexVectorTraits<Int32Vector> +{ + static const SparseIndexVector enum_value = SparseIndexVector_Int32Vector; +}; + +template <> struct SparseIndexVectorTraits<Uint16Vector> +{ + static const SparseIndexVector enum_value = SparseIndexVector_Uint16Vector; +}; + +template <> struct SparseIndexVectorTraits<Uint8Vector> +{ + static const SparseIndexVector enum_value = SparseIndexVector_Uint8Vector; +}; + +bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj, + SparseIndexVector type); +bool VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier, + const flatbuffers::Vector<flatbuffers::Offset<void>> *values, + const flatbuffers::Vector<uint8_t> *types); + +enum BuiltinOperator +{ + BuiltinOperator_ADD = 0, + BuiltinOperator_AVERAGE_POOL_2D = 1, + BuiltinOperator_CONCATENATION = 2, + BuiltinOperator_CONV_2D = 3, + BuiltinOperator_DEPTHWISE_CONV_2D = 4, + BuiltinOperator_DEPTH_TO_SPACE = 5, + BuiltinOperator_DEQUANTIZE = 6, + BuiltinOperator_EMBEDDING_LOOKUP = 7, + BuiltinOperator_FLOOR = 8, + BuiltinOperator_FULLY_CONNECTED = 9, + BuiltinOperator_HASHTABLE_LOOKUP = 10, + BuiltinOperator_L2_NORMALIZATION = 11, + BuiltinOperator_L2_POOL_2D = 12, + BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION = 13, + BuiltinOperator_LOGISTIC = 14, + BuiltinOperator_LSH_PROJECTION = 15, + BuiltinOperator_LSTM = 16, + BuiltinOperator_MAX_POOL_2D = 17, + BuiltinOperator_MUL = 18, + BuiltinOperator_RELU = 19, + BuiltinOperator_RELU_N1_TO_1 = 20, + BuiltinOperator_RELU6 = 21, + BuiltinOperator_RESHAPE = 22, + BuiltinOperator_RESIZE_BILINEAR = 23, + BuiltinOperator_RNN = 24, + BuiltinOperator_SOFTMAX = 25, + BuiltinOperator_SPACE_TO_DEPTH = 26, + BuiltinOperator_SVDF = 27, + BuiltinOperator_TANH = 28, + BuiltinOperator_CONCAT_EMBEDDINGS = 29, + BuiltinOperator_SKIP_GRAM = 30, + BuiltinOperator_CALL = 31, + BuiltinOperator_CUSTOM = 32, + BuiltinOperator_EMBEDDING_LOOKUP_SPARSE = 33, + BuiltinOperator_PAD = 34, + BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN = 35, + BuiltinOperator_GATHER = 36, + BuiltinOperator_BATCH_TO_SPACE_ND = 37, + BuiltinOperator_SPACE_TO_BATCH_ND = 38, + BuiltinOperator_TRANSPOSE = 39, + BuiltinOperator_MEAN = 40, + BuiltinOperator_SUB = 41, + BuiltinOperator_DIV = 42, + BuiltinOperator_SQUEEZE = 43, + BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM = 44, + BuiltinOperator_STRIDED_SLICE = 45, + BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN = 46, + BuiltinOperator_EXP = 47, + BuiltinOperator_TOPK_V2 = 48, + BuiltinOperator_SPLIT = 49, + BuiltinOperator_LOG_SOFTMAX = 50, + BuiltinOperator_DELEGATE = 51, + BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM = 52, + BuiltinOperator_CAST = 53, + BuiltinOperator_PRELU = 54, + BuiltinOperator_MAXIMUM = 55, + BuiltinOperator_ARG_MAX = 56, + BuiltinOperator_MINIMUM = 57, + BuiltinOperator_LESS = 58, + BuiltinOperator_NEG = 59, + BuiltinOperator_PADV2 = 60, + BuiltinOperator_GREATER = 61, + BuiltinOperator_GREATER_EQUAL = 62, + BuiltinOperator_LESS_EQUAL = 63, + BuiltinOperator_SELECT = 64, + BuiltinOperator_SLICE = 65, + BuiltinOperator_SIN = 66, + BuiltinOperator_TRANSPOSE_CONV = 67, + BuiltinOperator_SPARSE_TO_DENSE = 68, + BuiltinOperator_TILE = 69, + BuiltinOperator_EXPAND_DIMS = 70, + BuiltinOperator_EQUAL = 71, + BuiltinOperator_NOT_EQUAL = 72, + BuiltinOperator_LOG = 73, + BuiltinOperator_SUM = 74, + BuiltinOperator_SQRT = 75, + BuiltinOperator_RSQRT = 76, + BuiltinOperator_SHAPE = 77, + BuiltinOperator_POW = 78, + BuiltinOperator_ARG_MIN = 79, + BuiltinOperator_FAKE_QUANT = 80, + BuiltinOperator_REDUCE_PROD = 81, + BuiltinOperator_REDUCE_MAX = 82, + BuiltinOperator_PACK = 83, + BuiltinOperator_LOGICAL_OR = 84, + BuiltinOperator_ONE_HOT = 85, + BuiltinOperator_LOGICAL_AND = 86, + BuiltinOperator_LOGICAL_NOT = 87, + BuiltinOperator_UNPACK = 88, + BuiltinOperator_REDUCE_MIN = 89, + BuiltinOperator_FLOOR_DIV = 90, + BuiltinOperator_REDUCE_ANY = 91, + BuiltinOperator_SQUARE = 92, + BuiltinOperator_ZEROS_LIKE = 93, + BuiltinOperator_FILL = 94, + BuiltinOperator_FLOOR_MOD = 95, + BuiltinOperator_RANGE = 96, + BuiltinOperator_RESIZE_NEAREST_NEIGHBOR = 97, + BuiltinOperator_LEAKY_RELU = 98, + BuiltinOperator_SQUARED_DIFFERENCE = 99, + BuiltinOperator_MIRROR_PAD = 100, + BuiltinOperator_ABS = 101, + BuiltinOperator_SPLIT_V = 102, + BuiltinOperator_UNIQUE = 103, + BuiltinOperator_CEIL = 104, + BuiltinOperator_REVERSE_V2 = 105, + BuiltinOperator_ADD_N = 106, + BuiltinOperator_GATHER_ND = 107, + BuiltinOperator_COS = 108, + BuiltinOperator_WHERE = 109, + BuiltinOperator_RANK = 110, + BuiltinOperator_ELU = 111, + BuiltinOperator_REVERSE_SEQUENCE = 112, + BuiltinOperator_MATRIX_DIAG = 113, + BuiltinOperator_QUANTIZE = 114, + BuiltinOperator_MATRIX_SET_DIAG = 115, + BuiltinOperator_ROUND = 116, + BuiltinOperator_HARD_SWISH = 117, + BuiltinOperator_IF = 118, + BuiltinOperator_WHILE = 119, + BuiltinOperator_NON_MAX_SUPPRESSION_V4 = 120, + BuiltinOperator_NON_MAX_SUPPRESSION_V5 = 121, + BuiltinOperator_SCATTER_ND = 122, + BuiltinOperator_SELECT_V2 = 123, + BuiltinOperator_DENSIFY = 124, + BuiltinOperator_SEGMENT_SUM = 125, + BuiltinOperator_BATCH_MATMUL = 126, + BuiltinOperator_INSTANCE_NORM = 254, + BuiltinOperator_MIN = BuiltinOperator_ADD, + BuiltinOperator_MAX = BuiltinOperator_INSTANCE_NORM +}; + +inline const BuiltinOperator (&EnumValuesBuiltinOperator())[128] +{ + static const BuiltinOperator values[] = {BuiltinOperator_ADD, + BuiltinOperator_AVERAGE_POOL_2D, + BuiltinOperator_CONCATENATION, + BuiltinOperator_CONV_2D, + BuiltinOperator_DEPTHWISE_CONV_2D, + BuiltinOperator_DEPTH_TO_SPACE, + BuiltinOperator_DEQUANTIZE, + BuiltinOperator_EMBEDDING_LOOKUP, + BuiltinOperator_FLOOR, + BuiltinOperator_FULLY_CONNECTED, + BuiltinOperator_HASHTABLE_LOOKUP, + BuiltinOperator_L2_NORMALIZATION, + BuiltinOperator_L2_POOL_2D, + BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION, + BuiltinOperator_LOGISTIC, + BuiltinOperator_LSH_PROJECTION, + BuiltinOperator_LSTM, + BuiltinOperator_MAX_POOL_2D, + BuiltinOperator_MUL, + BuiltinOperator_RELU, + BuiltinOperator_RELU_N1_TO_1, + BuiltinOperator_RELU6, + BuiltinOperator_RESHAPE, + BuiltinOperator_RESIZE_BILINEAR, + BuiltinOperator_RNN, + BuiltinOperator_SOFTMAX, + BuiltinOperator_SPACE_TO_DEPTH, + BuiltinOperator_SVDF, + BuiltinOperator_TANH, + BuiltinOperator_CONCAT_EMBEDDINGS, + BuiltinOperator_SKIP_GRAM, + BuiltinOperator_CALL, + BuiltinOperator_CUSTOM, + BuiltinOperator_EMBEDDING_LOOKUP_SPARSE, + BuiltinOperator_PAD, + BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN, + BuiltinOperator_GATHER, + BuiltinOperator_BATCH_TO_SPACE_ND, + BuiltinOperator_SPACE_TO_BATCH_ND, + BuiltinOperator_TRANSPOSE, + BuiltinOperator_MEAN, + BuiltinOperator_SUB, + BuiltinOperator_DIV, + BuiltinOperator_SQUEEZE, + BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM, + BuiltinOperator_STRIDED_SLICE, + BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN, + BuiltinOperator_EXP, + BuiltinOperator_TOPK_V2, + BuiltinOperator_SPLIT, + BuiltinOperator_LOG_SOFTMAX, + BuiltinOperator_DELEGATE, + BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM, + BuiltinOperator_CAST, + BuiltinOperator_PRELU, + BuiltinOperator_MAXIMUM, + BuiltinOperator_ARG_MAX, + BuiltinOperator_MINIMUM, + BuiltinOperator_LESS, + BuiltinOperator_NEG, + BuiltinOperator_PADV2, + BuiltinOperator_GREATER, + BuiltinOperator_GREATER_EQUAL, + BuiltinOperator_LESS_EQUAL, + BuiltinOperator_SELECT, + BuiltinOperator_SLICE, + BuiltinOperator_SIN, + BuiltinOperator_TRANSPOSE_CONV, + BuiltinOperator_SPARSE_TO_DENSE, + BuiltinOperator_TILE, + BuiltinOperator_EXPAND_DIMS, + BuiltinOperator_EQUAL, + BuiltinOperator_NOT_EQUAL, + BuiltinOperator_LOG, + BuiltinOperator_SUM, + BuiltinOperator_SQRT, + BuiltinOperator_RSQRT, + BuiltinOperator_SHAPE, + BuiltinOperator_POW, + BuiltinOperator_ARG_MIN, + BuiltinOperator_FAKE_QUANT, + BuiltinOperator_REDUCE_PROD, + BuiltinOperator_REDUCE_MAX, + BuiltinOperator_PACK, + BuiltinOperator_LOGICAL_OR, + BuiltinOperator_ONE_HOT, + BuiltinOperator_LOGICAL_AND, + BuiltinOperator_LOGICAL_NOT, + BuiltinOperator_UNPACK, + BuiltinOperator_REDUCE_MIN, + BuiltinOperator_FLOOR_DIV, + BuiltinOperator_REDUCE_ANY, + BuiltinOperator_SQUARE, + BuiltinOperator_ZEROS_LIKE, + BuiltinOperator_FILL, + BuiltinOperator_FLOOR_MOD, + BuiltinOperator_RANGE, + BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, + BuiltinOperator_LEAKY_RELU, + BuiltinOperator_SQUARED_DIFFERENCE, + BuiltinOperator_MIRROR_PAD, + BuiltinOperator_ABS, + BuiltinOperator_SPLIT_V, + BuiltinOperator_UNIQUE, + BuiltinOperator_CEIL, + BuiltinOperator_REVERSE_V2, + BuiltinOperator_ADD_N, + BuiltinOperator_GATHER_ND, + BuiltinOperator_COS, + BuiltinOperator_WHERE, + BuiltinOperator_RANK, + BuiltinOperator_ELU, + BuiltinOperator_REVERSE_SEQUENCE, + BuiltinOperator_MATRIX_DIAG, + BuiltinOperator_QUANTIZE, + BuiltinOperator_MATRIX_SET_DIAG, + BuiltinOperator_ROUND, + BuiltinOperator_HARD_SWISH, + BuiltinOperator_IF, + BuiltinOperator_WHILE, + BuiltinOperator_NON_MAX_SUPPRESSION_V4, + BuiltinOperator_NON_MAX_SUPPRESSION_V5, + BuiltinOperator_SCATTER_ND, + BuiltinOperator_SELECT_V2, + BuiltinOperator_DENSIFY, + BuiltinOperator_SEGMENT_SUM, + BuiltinOperator_BATCH_MATMUL, + BuiltinOperator_INSTANCE_NORM}; + return values; +} + +inline const char *const *EnumNamesBuiltinOperator() +{ + static const char *const names[] = {"ADD", + "AVERAGE_POOL_2D", + "CONCATENATION", + "CONV_2D", + "DEPTHWISE_CONV_2D", + "DEPTH_TO_SPACE", + "DEQUANTIZE", + "EMBEDDING_LOOKUP", + "FLOOR", + "FULLY_CONNECTED", + "HASHTABLE_LOOKUP", + "L2_NORMALIZATION", + "L2_POOL_2D", + "LOCAL_RESPONSE_NORMALIZATION", + "LOGISTIC", + "LSH_PROJECTION", + "LSTM", + "MAX_POOL_2D", + "MUL", + "RELU", + "RELU_N1_TO_1", + "RELU6", + "RESHAPE", + "RESIZE_BILINEAR", + "RNN", + "SOFTMAX", + "SPACE_TO_DEPTH", + "SVDF", + "TANH", + "CONCAT_EMBEDDINGS", + "SKIP_GRAM", + "CALL", + "CUSTOM", + "EMBEDDING_LOOKUP_SPARSE", + "PAD", + "UNIDIRECTIONAL_SEQUENCE_RNN", + "GATHER", + "BATCH_TO_SPACE_ND", + "SPACE_TO_BATCH_ND", + "TRANSPOSE", + "MEAN", + "SUB", + "DIV", + "SQUEEZE", + "UNIDIRECTIONAL_SEQUENCE_LSTM", + "STRIDED_SLICE", + "BIDIRECTIONAL_SEQUENCE_RNN", + "EXP", + "TOPK_V2", + "SPLIT", + "LOG_SOFTMAX", + "DELEGATE", + "BIDIRECTIONAL_SEQUENCE_LSTM", + "CAST", + "PRELU", + "MAXIMUM", + "ARG_MAX", + "MINIMUM", + "LESS", + "NEG", + "PADV2", + "GREATER", + "GREATER_EQUAL", + "LESS_EQUAL", + "SELECT", + "SLICE", + "SIN", + "TRANSPOSE_CONV", + "SPARSE_TO_DENSE", + "TILE", + "EXPAND_DIMS", + "EQUAL", + "NOT_EQUAL", + "LOG", + "SUM", + "SQRT", + "RSQRT", + "SHAPE", + "POW", + "ARG_MIN", + "FAKE_QUANT", + "REDUCE_PROD", + "REDUCE_MAX", + "PACK", + "LOGICAL_OR", + "ONE_HOT", + "LOGICAL_AND", + "LOGICAL_NOT", + "UNPACK", + "REDUCE_MIN", + "FLOOR_DIV", + "REDUCE_ANY", + "SQUARE", + "ZEROS_LIKE", + "FILL", + "FLOOR_MOD", + "RANGE", + "RESIZE_NEAREST_NEIGHBOR", + "LEAKY_RELU", + "SQUARED_DIFFERENCE", + "MIRROR_PAD", + "ABS", + "SPLIT_V", + "UNIQUE", + "CEIL", + "REVERSE_V2", + "ADD_N", + "GATHER_ND", + "COS", + "WHERE", + "RANK", + "ELU", + "REVERSE_SEQUENCE", + "MATRIX_DIAG", + "QUANTIZE", + "MATRIX_SET_DIAG", + "ROUND", + "HARD_SWISH", + "IF", + "WHILE", + "NON_MAX_SUPPRESSION_V4", + "NON_MAX_SUPPRESSION_V5", + "SCATTER_ND", + "SELECT_V2", + "DENSIFY", + "SEGMENT_SUM", + "BATCH_MATMUL", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "INSTANCE_NORM", + nullptr}; + return names; +} + +inline const char *EnumNameBuiltinOperator(BuiltinOperator e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesBuiltinOperator()[index]; +} + +enum BuiltinOptions +{ + BuiltinOptions_NONE = 0, + BuiltinOptions_Conv2DOptions = 1, + BuiltinOptions_DepthwiseConv2DOptions = 2, + BuiltinOptions_ConcatEmbeddingsOptions = 3, + BuiltinOptions_LSHProjectionOptions = 4, + BuiltinOptions_Pool2DOptions = 5, + BuiltinOptions_SVDFOptions = 6, + BuiltinOptions_RNNOptions = 7, + BuiltinOptions_FullyConnectedOptions = 8, + BuiltinOptions_SoftmaxOptions = 9, + BuiltinOptions_ConcatenationOptions = 10, + BuiltinOptions_AddOptions = 11, + BuiltinOptions_L2NormOptions = 12, + BuiltinOptions_LocalResponseNormalizationOptions = 13, + BuiltinOptions_LSTMOptions = 14, + BuiltinOptions_ResizeBilinearOptions = 15, + BuiltinOptions_CallOptions = 16, + BuiltinOptions_ReshapeOptions = 17, + BuiltinOptions_SkipGramOptions = 18, + BuiltinOptions_SpaceToDepthOptions = 19, + BuiltinOptions_EmbeddingLookupSparseOptions = 20, + BuiltinOptions_MulOptions = 21, + BuiltinOptions_PadOptions = 22, + BuiltinOptions_GatherOptions = 23, + BuiltinOptions_BatchToSpaceNDOptions = 24, + BuiltinOptions_SpaceToBatchNDOptions = 25, + BuiltinOptions_TransposeOptions = 26, + BuiltinOptions_ReducerOptions = 27, + BuiltinOptions_SubOptions = 28, + BuiltinOptions_DivOptions = 29, + BuiltinOptions_SqueezeOptions = 30, + BuiltinOptions_SequenceRNNOptions = 31, + BuiltinOptions_StridedSliceOptions = 32, + BuiltinOptions_ExpOptions = 33, + BuiltinOptions_TopKV2Options = 34, + BuiltinOptions_SplitOptions = 35, + BuiltinOptions_LogSoftmaxOptions = 36, + BuiltinOptions_CastOptions = 37, + BuiltinOptions_DequantizeOptions = 38, + BuiltinOptions_MaximumMinimumOptions = 39, + BuiltinOptions_ArgMaxOptions = 40, + BuiltinOptions_LessOptions = 41, + BuiltinOptions_NegOptions = 42, + BuiltinOptions_PadV2Options = 43, + BuiltinOptions_GreaterOptions = 44, + BuiltinOptions_GreaterEqualOptions = 45, + BuiltinOptions_LessEqualOptions = 46, + BuiltinOptions_SelectOptions = 47, + BuiltinOptions_SliceOptions = 48, + BuiltinOptions_TransposeConvOptions = 49, + BuiltinOptions_SparseToDenseOptions = 50, + BuiltinOptions_TileOptions = 51, + BuiltinOptions_ExpandDimsOptions = 52, + BuiltinOptions_EqualOptions = 53, + BuiltinOptions_NotEqualOptions = 54, + BuiltinOptions_ShapeOptions = 55, + BuiltinOptions_PowOptions = 56, + BuiltinOptions_ArgMinOptions = 57, + BuiltinOptions_FakeQuantOptions = 58, + BuiltinOptions_PackOptions = 59, + BuiltinOptions_LogicalOrOptions = 60, + BuiltinOptions_OneHotOptions = 61, + BuiltinOptions_LogicalAndOptions = 62, + BuiltinOptions_LogicalNotOptions = 63, + BuiltinOptions_UnpackOptions = 64, + BuiltinOptions_FloorDivOptions = 65, + BuiltinOptions_SquareOptions = 66, + BuiltinOptions_ZerosLikeOptions = 67, + BuiltinOptions_FillOptions = 68, + BuiltinOptions_BidirectionalSequenceLSTMOptions = 69, + BuiltinOptions_BidirectionalSequenceRNNOptions = 70, + BuiltinOptions_UnidirectionalSequenceLSTMOptions = 71, + BuiltinOptions_FloorModOptions = 72, + BuiltinOptions_RangeOptions = 73, + BuiltinOptions_ResizeNearestNeighborOptions = 74, + BuiltinOptions_LeakyReluOptions = 75, + BuiltinOptions_SquaredDifferenceOptions = 76, + BuiltinOptions_MirrorPadOptions = 77, + BuiltinOptions_AbsOptions = 78, + BuiltinOptions_SplitVOptions = 79, + BuiltinOptions_UniqueOptions = 80, + BuiltinOptions_ReverseV2Options = 81, + BuiltinOptions_AddNOptions = 82, + BuiltinOptions_GatherNdOptions = 83, + BuiltinOptions_CosOptions = 84, + BuiltinOptions_WhereOptions = 85, + BuiltinOptions_RankOptions = 86, + BuiltinOptions_ReverseSequenceOptions = 87, + BuiltinOptions_MatrixDiagOptions = 88, + BuiltinOptions_QuantizeOptions = 89, + BuiltinOptions_MatrixSetDiagOptions = 90, + BuiltinOptions_HardSwishOptions = 91, + BuiltinOptions_IfOptions = 92, + BuiltinOptions_WhileOptions = 93, + BuiltinOptions_DepthToSpaceOptions = 94, + BuiltinOptions_NonMaxSuppressionV4Options = 95, + BuiltinOptions_NonMaxSuppressionV5Options = 96, + BuiltinOptions_ScatterNdOptions = 97, + BuiltinOptions_SelectV2Options = 98, + BuiltinOptions_DensifyOptions = 99, + BuiltinOptions_SegmentSumOptions = 100, + BuiltinOptions_BatchMatMulOptions = 101, + BuiltinOptions_InstanceNormOptions = 254, + BuiltinOptions_MIN = BuiltinOptions_NONE, + BuiltinOptions_MAX = BuiltinOptions_InstanceNormOptions +}; + +inline const BuiltinOptions (&EnumValuesBuiltinOptions())[103] +{ + static const BuiltinOptions values[] = {BuiltinOptions_NONE, + BuiltinOptions_Conv2DOptions, + BuiltinOptions_DepthwiseConv2DOptions, + BuiltinOptions_ConcatEmbeddingsOptions, + BuiltinOptions_LSHProjectionOptions, + BuiltinOptions_Pool2DOptions, + BuiltinOptions_SVDFOptions, + BuiltinOptions_RNNOptions, + BuiltinOptions_FullyConnectedOptions, + BuiltinOptions_SoftmaxOptions, + BuiltinOptions_ConcatenationOptions, + BuiltinOptions_AddOptions, + BuiltinOptions_L2NormOptions, + BuiltinOptions_LocalResponseNormalizationOptions, + BuiltinOptions_LSTMOptions, + BuiltinOptions_ResizeBilinearOptions, + BuiltinOptions_CallOptions, + BuiltinOptions_ReshapeOptions, + BuiltinOptions_SkipGramOptions, + BuiltinOptions_SpaceToDepthOptions, + BuiltinOptions_EmbeddingLookupSparseOptions, + BuiltinOptions_MulOptions, + BuiltinOptions_PadOptions, + BuiltinOptions_GatherOptions, + BuiltinOptions_BatchToSpaceNDOptions, + BuiltinOptions_SpaceToBatchNDOptions, + BuiltinOptions_TransposeOptions, + BuiltinOptions_ReducerOptions, + BuiltinOptions_SubOptions, + BuiltinOptions_DivOptions, + BuiltinOptions_SqueezeOptions, + BuiltinOptions_SequenceRNNOptions, + BuiltinOptions_StridedSliceOptions, + BuiltinOptions_ExpOptions, + BuiltinOptions_TopKV2Options, + BuiltinOptions_SplitOptions, + BuiltinOptions_LogSoftmaxOptions, + BuiltinOptions_CastOptions, + BuiltinOptions_DequantizeOptions, + BuiltinOptions_MaximumMinimumOptions, + BuiltinOptions_ArgMaxOptions, + BuiltinOptions_LessOptions, + BuiltinOptions_NegOptions, + BuiltinOptions_PadV2Options, + BuiltinOptions_GreaterOptions, + BuiltinOptions_GreaterEqualOptions, + BuiltinOptions_LessEqualOptions, + BuiltinOptions_SelectOptions, + BuiltinOptions_SliceOptions, + BuiltinOptions_TransposeConvOptions, + BuiltinOptions_SparseToDenseOptions, + BuiltinOptions_TileOptions, + BuiltinOptions_ExpandDimsOptions, + BuiltinOptions_EqualOptions, + BuiltinOptions_NotEqualOptions, + BuiltinOptions_ShapeOptions, + BuiltinOptions_PowOptions, + BuiltinOptions_ArgMinOptions, + BuiltinOptions_FakeQuantOptions, + BuiltinOptions_PackOptions, + BuiltinOptions_LogicalOrOptions, + BuiltinOptions_OneHotOptions, + BuiltinOptions_LogicalAndOptions, + BuiltinOptions_LogicalNotOptions, + BuiltinOptions_UnpackOptions, + BuiltinOptions_FloorDivOptions, + BuiltinOptions_SquareOptions, + BuiltinOptions_ZerosLikeOptions, + BuiltinOptions_FillOptions, + BuiltinOptions_BidirectionalSequenceLSTMOptions, + BuiltinOptions_BidirectionalSequenceRNNOptions, + BuiltinOptions_UnidirectionalSequenceLSTMOptions, + BuiltinOptions_FloorModOptions, + BuiltinOptions_RangeOptions, + BuiltinOptions_ResizeNearestNeighborOptions, + BuiltinOptions_LeakyReluOptions, + BuiltinOptions_SquaredDifferenceOptions, + BuiltinOptions_MirrorPadOptions, + BuiltinOptions_AbsOptions, + BuiltinOptions_SplitVOptions, + BuiltinOptions_UniqueOptions, + BuiltinOptions_ReverseV2Options, + BuiltinOptions_AddNOptions, + BuiltinOptions_GatherNdOptions, + BuiltinOptions_CosOptions, + BuiltinOptions_WhereOptions, + BuiltinOptions_RankOptions, + BuiltinOptions_ReverseSequenceOptions, + BuiltinOptions_MatrixDiagOptions, + BuiltinOptions_QuantizeOptions, + BuiltinOptions_MatrixSetDiagOptions, + BuiltinOptions_HardSwishOptions, + BuiltinOptions_IfOptions, + BuiltinOptions_WhileOptions, + BuiltinOptions_DepthToSpaceOptions, + BuiltinOptions_NonMaxSuppressionV4Options, + BuiltinOptions_NonMaxSuppressionV5Options, + BuiltinOptions_ScatterNdOptions, + BuiltinOptions_SelectV2Options, + BuiltinOptions_DensifyOptions, + BuiltinOptions_SegmentSumOptions, + BuiltinOptions_BatchMatMulOptions, + BuiltinOptions_InstanceNormOptions}; + return values; +} + +inline const char *const *EnumNamesBuiltinOptions() +{ + static const char *const names[] = {"NONE", + "Conv2DOptions", + "DepthwiseConv2DOptions", + "ConcatEmbeddingsOptions", + "LSHProjectionOptions", + "Pool2DOptions", + "SVDFOptions", + "RNNOptions", + "FullyConnectedOptions", + "SoftmaxOptions", + "ConcatenationOptions", + "AddOptions", + "L2NormOptions", + "LocalResponseNormalizationOptions", + "LSTMOptions", + "ResizeBilinearOptions", + "CallOptions", + "ReshapeOptions", + "SkipGramOptions", + "SpaceToDepthOptions", + "EmbeddingLookupSparseOptions", + "MulOptions", + "PadOptions", + "GatherOptions", + "BatchToSpaceNDOptions", + "SpaceToBatchNDOptions", + "TransposeOptions", + "ReducerOptions", + "SubOptions", + "DivOptions", + "SqueezeOptions", + "SequenceRNNOptions", + "StridedSliceOptions", + "ExpOptions", + "TopKV2Options", + "SplitOptions", + "LogSoftmaxOptions", + "CastOptions", + "DequantizeOptions", + "MaximumMinimumOptions", + "ArgMaxOptions", + "LessOptions", + "NegOptions", + "PadV2Options", + "GreaterOptions", + "GreaterEqualOptions", + "LessEqualOptions", + "SelectOptions", + "SliceOptions", + "TransposeConvOptions", + "SparseToDenseOptions", + "TileOptions", + "ExpandDimsOptions", + "EqualOptions", + "NotEqualOptions", + "ShapeOptions", + "PowOptions", + "ArgMinOptions", + "FakeQuantOptions", + "PackOptions", + "LogicalOrOptions", + "OneHotOptions", + "LogicalAndOptions", + "LogicalNotOptions", + "UnpackOptions", + "FloorDivOptions", + "SquareOptions", + "ZerosLikeOptions", + "FillOptions", + "BidirectionalSequenceLSTMOptions", + "BidirectionalSequenceRNNOptions", + "UnidirectionalSequenceLSTMOptions", + "FloorModOptions", + "RangeOptions", + "ResizeNearestNeighborOptions", + "LeakyReluOptions", + "SquaredDifferenceOptions", + "MirrorPadOptions", + "AbsOptions", + "SplitVOptions", + "UniqueOptions", + "ReverseV2Options", + "AddNOptions", + "GatherNdOptions", + "CosOptions", + "WhereOptions", + "RankOptions", + "ReverseSequenceOptions", + "MatrixDiagOptions", + "QuantizeOptions", + "MatrixSetDiagOptions", + "HardSwishOptions", + "IfOptions", + "WhileOptions", + "DepthToSpaceOptions", + "NonMaxSuppressionV4Options", + "NonMaxSuppressionV5Options", + "ScatterNdOptions", + "SelectV2Options", + "DensifyOptions", + "SegmentSumOptions", + "BatchMatMulOptions", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "InstanceNormOptions", + nullptr}; + return names; +} + +inline const char *EnumNameBuiltinOptions(BuiltinOptions e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesBuiltinOptions()[index]; +} + +template <typename T> struct BuiltinOptionsTraits +{ + static const BuiltinOptions enum_value = BuiltinOptions_NONE; +}; + +template <> struct BuiltinOptionsTraits<Conv2DOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_Conv2DOptions; +}; + +template <> struct BuiltinOptionsTraits<DepthwiseConv2DOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_DepthwiseConv2DOptions; +}; + +template <> struct BuiltinOptionsTraits<ConcatEmbeddingsOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ConcatEmbeddingsOptions; +}; + +template <> struct BuiltinOptionsTraits<LSHProjectionOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_LSHProjectionOptions; +}; + +template <> struct BuiltinOptionsTraits<Pool2DOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_Pool2DOptions; +}; + +template <> struct BuiltinOptionsTraits<SVDFOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SVDFOptions; +}; + +template <> struct BuiltinOptionsTraits<RNNOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_RNNOptions; +}; + +template <> struct BuiltinOptionsTraits<FullyConnectedOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_FullyConnectedOptions; +}; + +template <> struct BuiltinOptionsTraits<SoftmaxOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SoftmaxOptions; +}; + +template <> struct BuiltinOptionsTraits<ConcatenationOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ConcatenationOptions; +}; + +template <> struct BuiltinOptionsTraits<AddOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_AddOptions; +}; + +template <> struct BuiltinOptionsTraits<L2NormOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_L2NormOptions; +}; + +template <> struct BuiltinOptionsTraits<LocalResponseNormalizationOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_LocalResponseNormalizationOptions; +}; + +template <> struct BuiltinOptionsTraits<LSTMOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_LSTMOptions; +}; + +template <> struct BuiltinOptionsTraits<ResizeBilinearOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ResizeBilinearOptions; +}; + +template <> struct BuiltinOptionsTraits<CallOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_CallOptions; +}; + +template <> struct BuiltinOptionsTraits<ReshapeOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ReshapeOptions; +}; + +template <> struct BuiltinOptionsTraits<SkipGramOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SkipGramOptions; +}; + +template <> struct BuiltinOptionsTraits<SpaceToDepthOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SpaceToDepthOptions; +}; + +template <> struct BuiltinOptionsTraits<EmbeddingLookupSparseOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_EmbeddingLookupSparseOptions; +}; + +template <> struct BuiltinOptionsTraits<MulOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_MulOptions; +}; + +template <> struct BuiltinOptionsTraits<PadOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_PadOptions; +}; + +template <> struct BuiltinOptionsTraits<GatherOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_GatherOptions; +}; + +template <> struct BuiltinOptionsTraits<BatchToSpaceNDOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_BatchToSpaceNDOptions; +}; + +template <> struct BuiltinOptionsTraits<SpaceToBatchNDOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SpaceToBatchNDOptions; +}; + +template <> struct BuiltinOptionsTraits<TransposeOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_TransposeOptions; +}; + +template <> struct BuiltinOptionsTraits<ReducerOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ReducerOptions; +}; + +template <> struct BuiltinOptionsTraits<SubOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SubOptions; +}; + +template <> struct BuiltinOptionsTraits<DivOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_DivOptions; +}; + +template <> struct BuiltinOptionsTraits<SqueezeOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SqueezeOptions; +}; + +template <> struct BuiltinOptionsTraits<SequenceRNNOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SequenceRNNOptions; +}; + +template <> struct BuiltinOptionsTraits<StridedSliceOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_StridedSliceOptions; +}; + +template <> struct BuiltinOptionsTraits<ExpOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ExpOptions; +}; + +template <> struct BuiltinOptionsTraits<TopKV2Options> +{ + static const BuiltinOptions enum_value = BuiltinOptions_TopKV2Options; +}; + +template <> struct BuiltinOptionsTraits<SplitOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SplitOptions; +}; + +template <> struct BuiltinOptionsTraits<LogSoftmaxOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_LogSoftmaxOptions; +}; + +template <> struct BuiltinOptionsTraits<CastOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_CastOptions; +}; + +template <> struct BuiltinOptionsTraits<DequantizeOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_DequantizeOptions; +}; + +template <> struct BuiltinOptionsTraits<MaximumMinimumOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_MaximumMinimumOptions; +}; + +template <> struct BuiltinOptionsTraits<ArgMaxOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ArgMaxOptions; +}; + +template <> struct BuiltinOptionsTraits<LessOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_LessOptions; +}; + +template <> struct BuiltinOptionsTraits<NegOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_NegOptions; +}; + +template <> struct BuiltinOptionsTraits<PadV2Options> +{ + static const BuiltinOptions enum_value = BuiltinOptions_PadV2Options; +}; + +template <> struct BuiltinOptionsTraits<GreaterOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_GreaterOptions; +}; + +template <> struct BuiltinOptionsTraits<GreaterEqualOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_GreaterEqualOptions; +}; + +template <> struct BuiltinOptionsTraits<LessEqualOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_LessEqualOptions; +}; + +template <> struct BuiltinOptionsTraits<SelectOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SelectOptions; +}; + +template <> struct BuiltinOptionsTraits<SliceOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SliceOptions; +}; + +template <> struct BuiltinOptionsTraits<TransposeConvOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_TransposeConvOptions; +}; + +template <> struct BuiltinOptionsTraits<SparseToDenseOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SparseToDenseOptions; +}; + +template <> struct BuiltinOptionsTraits<TileOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_TileOptions; +}; + +template <> struct BuiltinOptionsTraits<ExpandDimsOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ExpandDimsOptions; +}; + +template <> struct BuiltinOptionsTraits<EqualOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_EqualOptions; +}; + +template <> struct BuiltinOptionsTraits<NotEqualOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_NotEqualOptions; +}; + +template <> struct BuiltinOptionsTraits<ShapeOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ShapeOptions; +}; + +template <> struct BuiltinOptionsTraits<PowOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_PowOptions; +}; + +template <> struct BuiltinOptionsTraits<ArgMinOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ArgMinOptions; +}; + +template <> struct BuiltinOptionsTraits<FakeQuantOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_FakeQuantOptions; +}; + +template <> struct BuiltinOptionsTraits<PackOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_PackOptions; +}; + +template <> struct BuiltinOptionsTraits<LogicalOrOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_LogicalOrOptions; +}; + +template <> struct BuiltinOptionsTraits<OneHotOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_OneHotOptions; +}; + +template <> struct BuiltinOptionsTraits<LogicalAndOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_LogicalAndOptions; +}; + +template <> struct BuiltinOptionsTraits<LogicalNotOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_LogicalNotOptions; +}; + +template <> struct BuiltinOptionsTraits<UnpackOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_UnpackOptions; +}; + +template <> struct BuiltinOptionsTraits<FloorDivOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_FloorDivOptions; +}; + +template <> struct BuiltinOptionsTraits<SquareOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SquareOptions; +}; + +template <> struct BuiltinOptionsTraits<ZerosLikeOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ZerosLikeOptions; +}; + +template <> struct BuiltinOptionsTraits<FillOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_FillOptions; +}; + +template <> struct BuiltinOptionsTraits<BidirectionalSequenceLSTMOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceLSTMOptions; +}; + +template <> struct BuiltinOptionsTraits<BidirectionalSequenceRNNOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceRNNOptions; +}; + +template <> struct BuiltinOptionsTraits<UnidirectionalSequenceLSTMOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_UnidirectionalSequenceLSTMOptions; +}; + +template <> struct BuiltinOptionsTraits<FloorModOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_FloorModOptions; +}; + +template <> struct BuiltinOptionsTraits<RangeOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_RangeOptions; +}; + +template <> struct BuiltinOptionsTraits<ResizeNearestNeighborOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ResizeNearestNeighborOptions; +}; + +template <> struct BuiltinOptionsTraits<LeakyReluOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_LeakyReluOptions; +}; + +template <> struct BuiltinOptionsTraits<SquaredDifferenceOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SquaredDifferenceOptions; +}; + +template <> struct BuiltinOptionsTraits<MirrorPadOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_MirrorPadOptions; +}; + +template <> struct BuiltinOptionsTraits<AbsOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_AbsOptions; +}; + +template <> struct BuiltinOptionsTraits<SplitVOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SplitVOptions; +}; + +template <> struct BuiltinOptionsTraits<UniqueOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_UniqueOptions; +}; + +template <> struct BuiltinOptionsTraits<ReverseV2Options> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ReverseV2Options; +}; + +template <> struct BuiltinOptionsTraits<AddNOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_AddNOptions; +}; + +template <> struct BuiltinOptionsTraits<GatherNdOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_GatherNdOptions; +}; + +template <> struct BuiltinOptionsTraits<CosOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_CosOptions; +}; + +template <> struct BuiltinOptionsTraits<WhereOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_WhereOptions; +}; + +template <> struct BuiltinOptionsTraits<RankOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_RankOptions; +}; + +template <> struct BuiltinOptionsTraits<ReverseSequenceOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ReverseSequenceOptions; +}; + +template <> struct BuiltinOptionsTraits<MatrixDiagOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_MatrixDiagOptions; +}; + +template <> struct BuiltinOptionsTraits<QuantizeOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_QuantizeOptions; +}; + +template <> struct BuiltinOptionsTraits<MatrixSetDiagOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_MatrixSetDiagOptions; +}; + +template <> struct BuiltinOptionsTraits<HardSwishOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_HardSwishOptions; +}; + +template <> struct BuiltinOptionsTraits<IfOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_IfOptions; +}; + +template <> struct BuiltinOptionsTraits<WhileOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_WhileOptions; +}; + +template <> struct BuiltinOptionsTraits<DepthToSpaceOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_DepthToSpaceOptions; +}; + +template <> struct BuiltinOptionsTraits<NonMaxSuppressionV4Options> +{ + static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV4Options; +}; + +template <> struct BuiltinOptionsTraits<NonMaxSuppressionV5Options> +{ + static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV5Options; +}; + +template <> struct BuiltinOptionsTraits<ScatterNdOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ScatterNdOptions; +}; + +template <> struct BuiltinOptionsTraits<SelectV2Options> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SelectV2Options; +}; + +template <> struct BuiltinOptionsTraits<DensifyOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_DensifyOptions; +}; + +template <> struct BuiltinOptionsTraits<SegmentSumOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SegmentSumOptions; +}; + +template <> struct BuiltinOptionsTraits<BatchMatMulOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_BatchMatMulOptions; +}; + +template <> struct BuiltinOptionsTraits<InstanceNormOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_InstanceNormOptions; +}; + +bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type); +bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier, + const flatbuffers::Vector<flatbuffers::Offset<void>> *values, + const flatbuffers::Vector<uint8_t> *types); + +enum Padding +{ + Padding_SAME = 0, + Padding_VALID = 1, + Padding_MIN = Padding_SAME, + Padding_MAX = Padding_VALID +}; + +inline const Padding (&EnumValuesPadding())[2] +{ + static const Padding values[] = {Padding_SAME, Padding_VALID}; + return values; +} + +inline const char *const *EnumNamesPadding() +{ + static const char *const names[] = {"SAME", "VALID", nullptr}; + return names; +} + +inline const char *EnumNamePadding(Padding e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesPadding()[index]; +} + +enum ActivationFunctionType +{ + ActivationFunctionType_NONE = 0, + ActivationFunctionType_RELU = 1, + ActivationFunctionType_RELU_N1_TO_1 = 2, + ActivationFunctionType_RELU6 = 3, + ActivationFunctionType_TANH = 4, + ActivationFunctionType_SIGN_BIT = 5, + ActivationFunctionType_MIN = ActivationFunctionType_NONE, + ActivationFunctionType_MAX = ActivationFunctionType_SIGN_BIT +}; + +inline const ActivationFunctionType (&EnumValuesActivationFunctionType())[6] +{ + static const ActivationFunctionType values[] = { + ActivationFunctionType_NONE, ActivationFunctionType_RELU, + ActivationFunctionType_RELU_N1_TO_1, ActivationFunctionType_RELU6, + ActivationFunctionType_TANH, ActivationFunctionType_SIGN_BIT}; + return values; +} + +inline const char *const *EnumNamesActivationFunctionType() +{ + static const char *const names[] = {"NONE", "RELU", "RELU_N1_TO_1", "RELU6", + "TANH", "SIGN_BIT", nullptr}; + return names; +} + +inline const char *EnumNameActivationFunctionType(ActivationFunctionType e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesActivationFunctionType()[index]; +} + +enum LSHProjectionType +{ + LSHProjectionType_UNKNOWN = 0, + LSHProjectionType_SPARSE = 1, + LSHProjectionType_DENSE = 2, + LSHProjectionType_MIN = LSHProjectionType_UNKNOWN, + LSHProjectionType_MAX = LSHProjectionType_DENSE +}; + +inline const LSHProjectionType (&EnumValuesLSHProjectionType())[3] +{ + static const LSHProjectionType values[] = {LSHProjectionType_UNKNOWN, LSHProjectionType_SPARSE, + LSHProjectionType_DENSE}; + return values; +} + +inline const char *const *EnumNamesLSHProjectionType() +{ + static const char *const names[] = {"UNKNOWN", "SPARSE", "DENSE", nullptr}; + return names; +} + +inline const char *EnumNameLSHProjectionType(LSHProjectionType e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesLSHProjectionType()[index]; +} + +enum FullyConnectedOptionsWeightsFormat +{ + FullyConnectedOptionsWeightsFormat_DEFAULT = 0, + FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 = 1, + FullyConnectedOptionsWeightsFormat_MIN = FullyConnectedOptionsWeightsFormat_DEFAULT, + FullyConnectedOptionsWeightsFormat_MAX = FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 +}; + +inline const FullyConnectedOptionsWeightsFormat (&EnumValuesFullyConnectedOptionsWeightsFormat())[2] +{ + static const FullyConnectedOptionsWeightsFormat values[] = { + FullyConnectedOptionsWeightsFormat_DEFAULT, + FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8}; + return values; +} + +inline const char *const *EnumNamesFullyConnectedOptionsWeightsFormat() +{ + static const char *const names[] = {"DEFAULT", "SHUFFLED4x16INT8", nullptr}; + return names; +} + +inline const char *EnumNameFullyConnectedOptionsWeightsFormat(FullyConnectedOptionsWeightsFormat e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesFullyConnectedOptionsWeightsFormat()[index]; +} + +enum LSTMKernelType +{ + LSTMKernelType_FULL = 0, + LSTMKernelType_BASIC = 1, + LSTMKernelType_MIN = LSTMKernelType_FULL, + LSTMKernelType_MAX = LSTMKernelType_BASIC +}; + +inline const LSTMKernelType (&EnumValuesLSTMKernelType())[2] +{ + static const LSTMKernelType values[] = {LSTMKernelType_FULL, LSTMKernelType_BASIC}; + return values; +} + +inline const char *const *EnumNamesLSTMKernelType() +{ + static const char *const names[] = {"FULL", "BASIC", nullptr}; + return names; +} + +inline const char *EnumNameLSTMKernelType(LSTMKernelType e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesLSTMKernelType()[index]; +} + +enum CombinerType +{ + CombinerType_SUM = 0, + CombinerType_MEAN = 1, + CombinerType_SQRTN = 2, + CombinerType_MIN = CombinerType_SUM, + CombinerType_MAX = CombinerType_SQRTN +}; + +inline const CombinerType (&EnumValuesCombinerType())[3] +{ + static const CombinerType values[] = {CombinerType_SUM, CombinerType_MEAN, CombinerType_SQRTN}; + return values; +} + +inline const char *const *EnumNamesCombinerType() +{ + static const char *const names[] = {"SUM", "MEAN", "SQRTN", nullptr}; + return names; +} + +inline const char *EnumNameCombinerType(CombinerType e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesCombinerType()[index]; +} + +enum MirrorPadMode +{ + MirrorPadMode_REFLECT = 0, + MirrorPadMode_SYMMETRIC = 1, + MirrorPadMode_MIN = MirrorPadMode_REFLECT, + MirrorPadMode_MAX = MirrorPadMode_SYMMETRIC +}; + +inline const MirrorPadMode (&EnumValuesMirrorPadMode())[2] +{ + static const MirrorPadMode values[] = {MirrorPadMode_REFLECT, MirrorPadMode_SYMMETRIC}; + return values; +} + +inline const char *const *EnumNamesMirrorPadMode() +{ + static const char *const names[] = {"REFLECT", "SYMMETRIC", nullptr}; + return names; +} + +inline const char *EnumNameMirrorPadMode(MirrorPadMode e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesMirrorPadMode()[index]; +} + +enum CustomOptionsFormat +{ + CustomOptionsFormat_FLEXBUFFERS = 0, + CustomOptionsFormat_MIN = CustomOptionsFormat_FLEXBUFFERS, + CustomOptionsFormat_MAX = CustomOptionsFormat_FLEXBUFFERS +}; + +inline const CustomOptionsFormat (&EnumValuesCustomOptionsFormat())[1] +{ + static const CustomOptionsFormat values[] = {CustomOptionsFormat_FLEXBUFFERS}; + return values; +} + +inline const char *const *EnumNamesCustomOptionsFormat() +{ + static const char *const names[] = {"FLEXBUFFERS", nullptr}; + return names; +} + +inline const char *EnumNameCustomOptionsFormat(CustomOptionsFormat e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesCustomOptionsFormat()[index]; +} + +enum DataFormat +{ + DataFormat_CHANNELS_LAST = 0, + DataFormat_CHANNELS_FIRST = 1, + DataFormat_MIN = DataFormat_CHANNELS_LAST, + DataFormat_MAX = DataFormat_CHANNELS_FIRST +}; + +inline const DataFormat (&EnumValuesDataFormat())[2] +{ + static const DataFormat values[] = {DataFormat_CHANNELS_LAST, DataFormat_CHANNELS_FIRST}; + return values; +} + +inline const char *const *EnumNamesDataFormat() +{ + static const char *const names[] = {"CHANNELS_LAST", "CHANNELS_FIRST", nullptr}; + return names; +} + +inline const char *EnumNameDataFormat(DataFormat e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesDataFormat()[index]; +} + +struct CustomQuantization FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_CUSTOM = 4 + }; + const flatbuffers::Vector<uint8_t> *custom() const + { + return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_CUSTOM) && + verifier.VerifyVector(custom()) && verifier.EndTable(); + } +}; + +struct CustomQuantizationBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_custom(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom) + { + fbb_.AddOffset(CustomQuantization::VT_CUSTOM, custom); + } + explicit CustomQuantizationBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + CustomQuantizationBuilder &operator=(const CustomQuantizationBuilder &); + flatbuffers::Offset<CustomQuantization> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<CustomQuantization>(end); + return o; + } +}; + +inline flatbuffers::Offset<CustomQuantization> +CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom = 0) +{ + CustomQuantizationBuilder builder_(_fbb); + builder_.add_custom(custom); + return builder_.Finish(); +} + +inline flatbuffers::Offset<CustomQuantization> +CreateCustomQuantizationDirect(flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<uint8_t> *custom = nullptr) +{ + return circle::CreateCustomQuantization(_fbb, custom ? _fbb.CreateVector<uint8_t>(*custom) : 0); +} + +struct QuantizationParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_MIN = 4, + VT_MAX = 6, + VT_SCALE = 8, + VT_ZERO_POINT = 10, + VT_DETAILS_TYPE = 12, + VT_DETAILS = 14, + VT_QUANTIZED_DIMENSION = 16 + }; + const flatbuffers::Vector<float> *min() const + { + return GetPointer<const flatbuffers::Vector<float> *>(VT_MIN); + } + const flatbuffers::Vector<float> *max() const + { + return GetPointer<const flatbuffers::Vector<float> *>(VT_MAX); + } + const flatbuffers::Vector<float> *scale() const + { + return GetPointer<const flatbuffers::Vector<float> *>(VT_SCALE); + } + const flatbuffers::Vector<int64_t> *zero_point() const + { + return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_ZERO_POINT); + } + QuantizationDetails details_type() const + { + return static_cast<QuantizationDetails>(GetField<uint8_t>(VT_DETAILS_TYPE, 0)); + } + const void *details() const { return GetPointer<const void *>(VT_DETAILS); } + template <typename T> const T *details_as() const; + const CustomQuantization *details_as_CustomQuantization() const + { + return details_type() == QuantizationDetails_CustomQuantization + ? static_cast<const CustomQuantization *>(details()) + : nullptr; + } + int32_t quantized_dimension() const { return GetField<int32_t>(VT_QUANTIZED_DIMENSION, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_MIN) && + verifier.VerifyVector(min()) && VerifyOffset(verifier, VT_MAX) && + verifier.VerifyVector(max()) && VerifyOffset(verifier, VT_SCALE) && + verifier.VerifyVector(scale()) && VerifyOffset(verifier, VT_ZERO_POINT) && + verifier.VerifyVector(zero_point()) && VerifyField<uint8_t>(verifier, VT_DETAILS_TYPE) && + VerifyOffset(verifier, VT_DETAILS) && + VerifyQuantizationDetails(verifier, details(), details_type()) && + VerifyField<int32_t>(verifier, VT_QUANTIZED_DIMENSION) && verifier.EndTable(); + } +}; + +template <> +inline const CustomQuantization *QuantizationParameters::details_as<CustomQuantization>() const +{ + return details_as_CustomQuantization(); +} + +struct QuantizationParametersBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_min(flatbuffers::Offset<flatbuffers::Vector<float>> min) + { + fbb_.AddOffset(QuantizationParameters::VT_MIN, min); + } + void add_max(flatbuffers::Offset<flatbuffers::Vector<float>> max) + { + fbb_.AddOffset(QuantizationParameters::VT_MAX, max); + } + void add_scale(flatbuffers::Offset<flatbuffers::Vector<float>> scale) + { + fbb_.AddOffset(QuantizationParameters::VT_SCALE, scale); + } + void add_zero_point(flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point) + { + fbb_.AddOffset(QuantizationParameters::VT_ZERO_POINT, zero_point); + } + void add_details_type(QuantizationDetails details_type) + { + fbb_.AddElement<uint8_t>(QuantizationParameters::VT_DETAILS_TYPE, + static_cast<uint8_t>(details_type), 0); + } + void add_details(flatbuffers::Offset<void> details) + { + fbb_.AddOffset(QuantizationParameters::VT_DETAILS, details); + } + void add_quantized_dimension(int32_t quantized_dimension) + { + fbb_.AddElement<int32_t>(QuantizationParameters::VT_QUANTIZED_DIMENSION, quantized_dimension, + 0); + } + explicit QuantizationParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + QuantizationParametersBuilder &operator=(const QuantizationParametersBuilder &); + flatbuffers::Offset<QuantizationParameters> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<QuantizationParameters>(end); + return o; + } +}; + +inline flatbuffers::Offset<QuantizationParameters> +CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<float>> min = 0, + flatbuffers::Offset<flatbuffers::Vector<float>> max = 0, + flatbuffers::Offset<flatbuffers::Vector<float>> scale = 0, + flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point = 0, + QuantizationDetails details_type = QuantizationDetails_NONE, + flatbuffers::Offset<void> details = 0, int32_t quantized_dimension = 0) +{ + QuantizationParametersBuilder builder_(_fbb); + builder_.add_quantized_dimension(quantized_dimension); + builder_.add_details(details); + builder_.add_zero_point(zero_point); + builder_.add_scale(scale); + builder_.add_max(max); + builder_.add_min(min); + builder_.add_details_type(details_type); + return builder_.Finish(); +} + +inline flatbuffers::Offset<QuantizationParameters> CreateQuantizationParametersDirect( + flatbuffers::FlatBufferBuilder &_fbb, const std::vector<float> *min = nullptr, + const std::vector<float> *max = nullptr, const std::vector<float> *scale = nullptr, + const std::vector<int64_t> *zero_point = nullptr, + QuantizationDetails details_type = QuantizationDetails_NONE, + flatbuffers::Offset<void> details = 0, int32_t quantized_dimension = 0) +{ + return circle::CreateQuantizationParameters( + _fbb, min ? _fbb.CreateVector<float>(*min) : 0, max ? _fbb.CreateVector<float>(*max) : 0, + scale ? _fbb.CreateVector<float>(*scale) : 0, + zero_point ? _fbb.CreateVector<int64_t>(*zero_point) : 0, details_type, details, + quantized_dimension); +} + +struct Int32Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_VALUES = 4 + }; + const flatbuffers::Vector<int32_t> *values() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_VALUES); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_VALUES) && + verifier.VerifyVector(values()) && verifier.EndTable(); + } +}; + +struct Int32VectorBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_values(flatbuffers::Offset<flatbuffers::Vector<int32_t>> values) + { + fbb_.AddOffset(Int32Vector::VT_VALUES, values); + } + explicit Int32VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + Int32VectorBuilder &operator=(const Int32VectorBuilder &); + flatbuffers::Offset<Int32Vector> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<Int32Vector>(end); + return o; + } +}; + +inline flatbuffers::Offset<Int32Vector> +CreateInt32Vector(flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> values = 0) +{ + Int32VectorBuilder builder_(_fbb); + builder_.add_values(values); + return builder_.Finish(); +} + +inline flatbuffers::Offset<Int32Vector> +CreateInt32VectorDirect(flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<int32_t> *values = nullptr) +{ + return circle::CreateInt32Vector(_fbb, values ? _fbb.CreateVector<int32_t>(*values) : 0); +} + +struct Uint16Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_VALUES = 4 + }; + const flatbuffers::Vector<uint16_t> *values() const + { + return GetPointer<const flatbuffers::Vector<uint16_t> *>(VT_VALUES); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_VALUES) && + verifier.VerifyVector(values()) && verifier.EndTable(); + } +}; + +struct Uint16VectorBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_values(flatbuffers::Offset<flatbuffers::Vector<uint16_t>> values) + { + fbb_.AddOffset(Uint16Vector::VT_VALUES, values); + } + explicit Uint16VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + Uint16VectorBuilder &operator=(const Uint16VectorBuilder &); + flatbuffers::Offset<Uint16Vector> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<Uint16Vector>(end); + return o; + } +}; + +inline flatbuffers::Offset<Uint16Vector> +CreateUint16Vector(flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<uint16_t>> values = 0) +{ + Uint16VectorBuilder builder_(_fbb); + builder_.add_values(values); + return builder_.Finish(); +} + +inline flatbuffers::Offset<Uint16Vector> +CreateUint16VectorDirect(flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<uint16_t> *values = nullptr) +{ + return circle::CreateUint16Vector(_fbb, values ? _fbb.CreateVector<uint16_t>(*values) : 0); +} + +struct Uint8Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_VALUES = 4 + }; + const flatbuffers::Vector<uint8_t> *values() const + { + return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_VALUES); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_VALUES) && + verifier.VerifyVector(values()) && verifier.EndTable(); + } +}; + +struct Uint8VectorBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_values(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> values) + { + fbb_.AddOffset(Uint8Vector::VT_VALUES, values); + } + explicit Uint8VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + Uint8VectorBuilder &operator=(const Uint8VectorBuilder &); + flatbuffers::Offset<Uint8Vector> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<Uint8Vector>(end); + return o; + } +}; + +inline flatbuffers::Offset<Uint8Vector> +CreateUint8Vector(flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<uint8_t>> values = 0) +{ + Uint8VectorBuilder builder_(_fbb); + builder_.add_values(values); + return builder_.Finish(); +} + +inline flatbuffers::Offset<Uint8Vector> +CreateUint8VectorDirect(flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<uint8_t> *values = nullptr) +{ + return circle::CreateUint8Vector(_fbb, values ? _fbb.CreateVector<uint8_t>(*values) : 0); +} + +struct DimensionMetadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FORMAT = 4, + VT_DENSE_SIZE = 6, + VT_ARRAY_SEGMENTS_TYPE = 8, + VT_ARRAY_SEGMENTS = 10, + VT_ARRAY_INDICES_TYPE = 12, + VT_ARRAY_INDICES = 14 + }; + DimensionType format() const + { + return static_cast<DimensionType>(GetField<int8_t>(VT_FORMAT, 0)); + } + int32_t dense_size() const { return GetField<int32_t>(VT_DENSE_SIZE, 0); } + SparseIndexVector array_segments_type() const + { + return static_cast<SparseIndexVector>(GetField<uint8_t>(VT_ARRAY_SEGMENTS_TYPE, 0)); + } + const void *array_segments() const { return GetPointer<const void *>(VT_ARRAY_SEGMENTS); } + template <typename T> const T *array_segments_as() const; + const Int32Vector *array_segments_as_Int32Vector() const + { + return array_segments_type() == SparseIndexVector_Int32Vector + ? static_cast<const Int32Vector *>(array_segments()) + : nullptr; + } + const Uint16Vector *array_segments_as_Uint16Vector() const + { + return array_segments_type() == SparseIndexVector_Uint16Vector + ? static_cast<const Uint16Vector *>(array_segments()) + : nullptr; + } + const Uint8Vector *array_segments_as_Uint8Vector() const + { + return array_segments_type() == SparseIndexVector_Uint8Vector + ? static_cast<const Uint8Vector *>(array_segments()) + : nullptr; + } + SparseIndexVector array_indices_type() const + { + return static_cast<SparseIndexVector>(GetField<uint8_t>(VT_ARRAY_INDICES_TYPE, 0)); + } + const void *array_indices() const { return GetPointer<const void *>(VT_ARRAY_INDICES); } + template <typename T> const T *array_indices_as() const; + const Int32Vector *array_indices_as_Int32Vector() const + { + return array_indices_type() == SparseIndexVector_Int32Vector + ? static_cast<const Int32Vector *>(array_indices()) + : nullptr; + } + const Uint16Vector *array_indices_as_Uint16Vector() const + { + return array_indices_type() == SparseIndexVector_Uint16Vector + ? static_cast<const Uint16Vector *>(array_indices()) + : nullptr; + } + const Uint8Vector *array_indices_as_Uint8Vector() const + { + return array_indices_type() == SparseIndexVector_Uint8Vector + ? static_cast<const Uint8Vector *>(array_indices()) + : nullptr; + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_FORMAT) && + VerifyField<int32_t>(verifier, VT_DENSE_SIZE) && + VerifyField<uint8_t>(verifier, VT_ARRAY_SEGMENTS_TYPE) && + VerifyOffset(verifier, VT_ARRAY_SEGMENTS) && + VerifySparseIndexVector(verifier, array_segments(), array_segments_type()) && + VerifyField<uint8_t>(verifier, VT_ARRAY_INDICES_TYPE) && + VerifyOffset(verifier, VT_ARRAY_INDICES) && + VerifySparseIndexVector(verifier, array_indices(), array_indices_type()) && + verifier.EndTable(); + } +}; + +template <> inline const Int32Vector *DimensionMetadata::array_segments_as<Int32Vector>() const +{ + return array_segments_as_Int32Vector(); +} + +template <> inline const Uint16Vector *DimensionMetadata::array_segments_as<Uint16Vector>() const +{ + return array_segments_as_Uint16Vector(); +} + +template <> inline const Uint8Vector *DimensionMetadata::array_segments_as<Uint8Vector>() const +{ + return array_segments_as_Uint8Vector(); +} + +template <> inline const Int32Vector *DimensionMetadata::array_indices_as<Int32Vector>() const +{ + return array_indices_as_Int32Vector(); +} + +template <> inline const Uint16Vector *DimensionMetadata::array_indices_as<Uint16Vector>() const +{ + return array_indices_as_Uint16Vector(); +} + +template <> inline const Uint8Vector *DimensionMetadata::array_indices_as<Uint8Vector>() const +{ + return array_indices_as_Uint8Vector(); +} + +struct DimensionMetadataBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(DimensionType format) + { + fbb_.AddElement<int8_t>(DimensionMetadata::VT_FORMAT, static_cast<int8_t>(format), 0); + } + void add_dense_size(int32_t dense_size) + { + fbb_.AddElement<int32_t>(DimensionMetadata::VT_DENSE_SIZE, dense_size, 0); + } + void add_array_segments_type(SparseIndexVector array_segments_type) + { + fbb_.AddElement<uint8_t>(DimensionMetadata::VT_ARRAY_SEGMENTS_TYPE, + static_cast<uint8_t>(array_segments_type), 0); + } + void add_array_segments(flatbuffers::Offset<void> array_segments) + { + fbb_.AddOffset(DimensionMetadata::VT_ARRAY_SEGMENTS, array_segments); + } + void add_array_indices_type(SparseIndexVector array_indices_type) + { + fbb_.AddElement<uint8_t>(DimensionMetadata::VT_ARRAY_INDICES_TYPE, + static_cast<uint8_t>(array_indices_type), 0); + } + void add_array_indices(flatbuffers::Offset<void> array_indices) + { + fbb_.AddOffset(DimensionMetadata::VT_ARRAY_INDICES, array_indices); + } + explicit DimensionMetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + DimensionMetadataBuilder &operator=(const DimensionMetadataBuilder &); + flatbuffers::Offset<DimensionMetadata> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<DimensionMetadata>(end); + return o; + } +}; + +inline flatbuffers::Offset<DimensionMetadata> +CreateDimensionMetadata(flatbuffers::FlatBufferBuilder &_fbb, + DimensionType format = DimensionType_DENSE, int32_t dense_size = 0, + SparseIndexVector array_segments_type = SparseIndexVector_NONE, + flatbuffers::Offset<void> array_segments = 0, + SparseIndexVector array_indices_type = SparseIndexVector_NONE, + flatbuffers::Offset<void> array_indices = 0) +{ + DimensionMetadataBuilder builder_(_fbb); + builder_.add_array_indices(array_indices); + builder_.add_array_segments(array_segments); + builder_.add_dense_size(dense_size); + builder_.add_array_indices_type(array_indices_type); + builder_.add_array_segments_type(array_segments_type); + builder_.add_format(format); + return builder_.Finish(); +} + +struct SparsityParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_TRAVERSAL_ORDER = 4, + VT_BLOCK_MAP = 6, + VT_DIM_METADATA = 8 + }; + const flatbuffers::Vector<int32_t> *traversal_order() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_TRAVERSAL_ORDER); + } + const flatbuffers::Vector<int32_t> *block_map() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_BLOCK_MAP); + } + const flatbuffers::Vector<flatbuffers::Offset<DimensionMetadata>> *dim_metadata() const + { + return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<DimensionMetadata>> *>( + VT_DIM_METADATA); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_TRAVERSAL_ORDER) && + verifier.VerifyVector(traversal_order()) && VerifyOffset(verifier, VT_BLOCK_MAP) && + verifier.VerifyVector(block_map()) && VerifyOffset(verifier, VT_DIM_METADATA) && + verifier.VerifyVector(dim_metadata()) && verifier.VerifyVectorOfTables(dim_metadata()) && + verifier.EndTable(); + } +}; + +struct SparsityParametersBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_traversal_order(flatbuffers::Offset<flatbuffers::Vector<int32_t>> traversal_order) + { + fbb_.AddOffset(SparsityParameters::VT_TRAVERSAL_ORDER, traversal_order); + } + void add_block_map(flatbuffers::Offset<flatbuffers::Vector<int32_t>> block_map) + { + fbb_.AddOffset(SparsityParameters::VT_BLOCK_MAP, block_map); + } + void add_dim_metadata( + flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<DimensionMetadata>>> dim_metadata) + { + fbb_.AddOffset(SparsityParameters::VT_DIM_METADATA, dim_metadata); + } + explicit SparsityParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SparsityParametersBuilder &operator=(const SparsityParametersBuilder &); + flatbuffers::Offset<SparsityParameters> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SparsityParameters>(end); + return o; + } +}; + +inline flatbuffers::Offset<SparsityParameters> CreateSparsityParameters( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> traversal_order = 0, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> block_map = 0, + flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<DimensionMetadata>>> dim_metadata = + 0) +{ + SparsityParametersBuilder builder_(_fbb); + builder_.add_dim_metadata(dim_metadata); + builder_.add_block_map(block_map); + builder_.add_traversal_order(traversal_order); + return builder_.Finish(); +} + +inline flatbuffers::Offset<SparsityParameters> CreateSparsityParametersDirect( + flatbuffers::FlatBufferBuilder &_fbb, const std::vector<int32_t> *traversal_order = nullptr, + const std::vector<int32_t> *block_map = nullptr, + const std::vector<flatbuffers::Offset<DimensionMetadata>> *dim_metadata = nullptr) +{ + return circle::CreateSparsityParameters( + _fbb, traversal_order ? _fbb.CreateVector<int32_t>(*traversal_order) : 0, + block_map ? _fbb.CreateVector<int32_t>(*block_map) : 0, + dim_metadata ? _fbb.CreateVector<flatbuffers::Offset<DimensionMetadata>>(*dim_metadata) : 0); +} + +struct Tensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_SHAPE = 4, + VT_TYPE = 6, + VT_BUFFER = 8, + VT_NAME = 10, + VT_QUANTIZATION = 12, + VT_IS_VARIABLE = 14, + VT_SPARSITY = 16, + VT_SHAPE_SIGNATURE = 18 + }; + const flatbuffers::Vector<int32_t> *shape() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SHAPE); + } + TensorType type() const { return static_cast<TensorType>(GetField<int8_t>(VT_TYPE, 0)); } + uint32_t buffer() const { return GetField<uint32_t>(VT_BUFFER, 0); } + const flatbuffers::String *name() const + { + return GetPointer<const flatbuffers::String *>(VT_NAME); + } + const QuantizationParameters *quantization() const + { + return GetPointer<const QuantizationParameters *>(VT_QUANTIZATION); + } + bool is_variable() const { return GetField<uint8_t>(VT_IS_VARIABLE, 0) != 0; } + const SparsityParameters *sparsity() const + { + return GetPointer<const SparsityParameters *>(VT_SPARSITY); + } + const flatbuffers::Vector<int32_t> *shape_signature() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SHAPE_SIGNATURE); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_SHAPE) && + verifier.VerifyVector(shape()) && VerifyField<int8_t>(verifier, VT_TYPE) && + VerifyField<uint32_t>(verifier, VT_BUFFER) && VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && VerifyOffset(verifier, VT_QUANTIZATION) && + verifier.VerifyTable(quantization()) && VerifyField<uint8_t>(verifier, VT_IS_VARIABLE) && + VerifyOffset(verifier, VT_SPARSITY) && verifier.VerifyTable(sparsity()) && + VerifyOffset(verifier, VT_SHAPE_SIGNATURE) && verifier.VerifyVector(shape_signature()) && + verifier.EndTable(); + } +}; + +struct TensorBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape) + { + fbb_.AddOffset(Tensor::VT_SHAPE, shape); + } + void add_type(TensorType type) + { + fbb_.AddElement<int8_t>(Tensor::VT_TYPE, static_cast<int8_t>(type), 0); + } + void add_buffer(uint32_t buffer) { fbb_.AddElement<uint32_t>(Tensor::VT_BUFFER, buffer, 0); } + void add_name(flatbuffers::Offset<flatbuffers::String> name) + { + fbb_.AddOffset(Tensor::VT_NAME, name); + } + void add_quantization(flatbuffers::Offset<QuantizationParameters> quantization) + { + fbb_.AddOffset(Tensor::VT_QUANTIZATION, quantization); + } + void add_is_variable(bool is_variable) + { + fbb_.AddElement<uint8_t>(Tensor::VT_IS_VARIABLE, static_cast<uint8_t>(is_variable), 0); + } + void add_sparsity(flatbuffers::Offset<SparsityParameters> sparsity) + { + fbb_.AddOffset(Tensor::VT_SPARSITY, sparsity); + } + void add_shape_signature(flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape_signature) + { + fbb_.AddOffset(Tensor::VT_SHAPE_SIGNATURE, shape_signature); + } + explicit TensorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + TensorBuilder &operator=(const TensorBuilder &); + flatbuffers::Offset<Tensor> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<Tensor>(end); + return o; + } +}; + +inline flatbuffers::Offset<Tensor> +CreateTensor(flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape = 0, + TensorType type = TensorType_FLOAT32, uint32_t buffer = 0, + flatbuffers::Offset<flatbuffers::String> name = 0, + flatbuffers::Offset<QuantizationParameters> quantization = 0, bool is_variable = false, + flatbuffers::Offset<SparsityParameters> sparsity = 0, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape_signature = 0) +{ + TensorBuilder builder_(_fbb); + builder_.add_shape_signature(shape_signature); + builder_.add_sparsity(sparsity); + builder_.add_quantization(quantization); + builder_.add_name(name); + builder_.add_buffer(buffer); + builder_.add_shape(shape); + builder_.add_is_variable(is_variable); + builder_.add_type(type); + return builder_.Finish(); +} + +inline flatbuffers::Offset<Tensor> CreateTensorDirect( + flatbuffers::FlatBufferBuilder &_fbb, const std::vector<int32_t> *shape = nullptr, + TensorType type = TensorType_FLOAT32, uint32_t buffer = 0, const char *name = nullptr, + flatbuffers::Offset<QuantizationParameters> quantization = 0, bool is_variable = false, + flatbuffers::Offset<SparsityParameters> sparsity = 0, + const std::vector<int32_t> *shape_signature = nullptr) +{ + return circle::CreateTensor(_fbb, shape ? _fbb.CreateVector<int32_t>(*shape) : 0, type, buffer, + name ? _fbb.CreateString(name) : 0, quantization, is_variable, + sparsity, + shape_signature ? _fbb.CreateVector<int32_t>(*shape_signature) : 0); +} + +struct Conv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_PADDING = 4, + VT_STRIDE_W = 6, + VT_STRIDE_H = 8, + VT_FUSED_ACTIVATION_FUNCTION = 10, + VT_DILATION_W_FACTOR = 12, + VT_DILATION_H_FACTOR = 14 + }; + Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); } + int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); } + int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); } + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + int32_t dilation_w_factor() const { return GetField<int32_t>(VT_DILATION_W_FACTOR, 1); } + int32_t dilation_h_factor() const { return GetField<int32_t>(VT_DILATION_H_FACTOR, 1); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) && + VerifyField<int32_t>(verifier, VT_STRIDE_W) && + VerifyField<int32_t>(verifier, VT_STRIDE_H) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField<int32_t>(verifier, VT_DILATION_W_FACTOR) && + VerifyField<int32_t>(verifier, VT_DILATION_H_FACTOR) && verifier.EndTable(); + } +}; + +struct Conv2DOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_padding(Padding padding) + { + fbb_.AddElement<int8_t>(Conv2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0); + } + void add_stride_w(int32_t stride_w) + { + fbb_.AddElement<int32_t>(Conv2DOptions::VT_STRIDE_W, stride_w, 0); + } + void add_stride_h(int32_t stride_h) + { + fbb_.AddElement<int32_t>(Conv2DOptions::VT_STRIDE_H, stride_h, 0); + } + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(Conv2DOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + void add_dilation_w_factor(int32_t dilation_w_factor) + { + fbb_.AddElement<int32_t>(Conv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1); + } + void add_dilation_h_factor(int32_t dilation_h_factor) + { + fbb_.AddElement<int32_t>(Conv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1); + } + explicit Conv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + Conv2DOptionsBuilder &operator=(const Conv2DOptionsBuilder &); + flatbuffers::Offset<Conv2DOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<Conv2DOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<Conv2DOptions> +CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME, + int32_t stride_w = 0, int32_t stride_h = 0, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + int32_t dilation_w_factor = 1, int32_t dilation_h_factor = 1) +{ + Conv2DOptionsBuilder builder_(_fbb); + builder_.add_dilation_h_factor(dilation_h_factor); + builder_.add_dilation_w_factor(dilation_w_factor); + builder_.add_stride_h(stride_h); + builder_.add_stride_w(stride_w); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_padding(padding); + return builder_.Finish(); +} + +struct Pool2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_PADDING = 4, + VT_STRIDE_W = 6, + VT_STRIDE_H = 8, + VT_FILTER_WIDTH = 10, + VT_FILTER_HEIGHT = 12, + VT_FUSED_ACTIVATION_FUNCTION = 14 + }; + Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); } + int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); } + int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); } + int32_t filter_width() const { return GetField<int32_t>(VT_FILTER_WIDTH, 0); } + int32_t filter_height() const { return GetField<int32_t>(VT_FILTER_HEIGHT, 0); } + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) && + VerifyField<int32_t>(verifier, VT_STRIDE_W) && + VerifyField<int32_t>(verifier, VT_STRIDE_H) && + VerifyField<int32_t>(verifier, VT_FILTER_WIDTH) && + VerifyField<int32_t>(verifier, VT_FILTER_HEIGHT) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable(); + } +}; + +struct Pool2DOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_padding(Padding padding) + { + fbb_.AddElement<int8_t>(Pool2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0); + } + void add_stride_w(int32_t stride_w) + { + fbb_.AddElement<int32_t>(Pool2DOptions::VT_STRIDE_W, stride_w, 0); + } + void add_stride_h(int32_t stride_h) + { + fbb_.AddElement<int32_t>(Pool2DOptions::VT_STRIDE_H, stride_h, 0); + } + void add_filter_width(int32_t filter_width) + { + fbb_.AddElement<int32_t>(Pool2DOptions::VT_FILTER_WIDTH, filter_width, 0); + } + void add_filter_height(int32_t filter_height) + { + fbb_.AddElement<int32_t>(Pool2DOptions::VT_FILTER_HEIGHT, filter_height, 0); + } + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(Pool2DOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + explicit Pool2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + Pool2DOptionsBuilder &operator=(const Pool2DOptionsBuilder &); + flatbuffers::Offset<Pool2DOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<Pool2DOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<Pool2DOptions> +CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME, + int32_t stride_w = 0, int32_t stride_h = 0, int32_t filter_width = 0, + int32_t filter_height = 0, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) +{ + Pool2DOptionsBuilder builder_(_fbb); + builder_.add_filter_height(filter_height); + builder_.add_filter_width(filter_width); + builder_.add_stride_h(stride_h); + builder_.add_stride_w(stride_w); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_padding(padding); + return builder_.Finish(); +} + +struct DepthwiseConv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_PADDING = 4, + VT_STRIDE_W = 6, + VT_STRIDE_H = 8, + VT_DEPTH_MULTIPLIER = 10, + VT_FUSED_ACTIVATION_FUNCTION = 12, + VT_DILATION_W_FACTOR = 14, + VT_DILATION_H_FACTOR = 16 + }; + Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); } + int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); } + int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); } + int32_t depth_multiplier() const { return GetField<int32_t>(VT_DEPTH_MULTIPLIER, 0); } + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + int32_t dilation_w_factor() const { return GetField<int32_t>(VT_DILATION_W_FACTOR, 1); } + int32_t dilation_h_factor() const { return GetField<int32_t>(VT_DILATION_H_FACTOR, 1); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) && + VerifyField<int32_t>(verifier, VT_STRIDE_W) && + VerifyField<int32_t>(verifier, VT_STRIDE_H) && + VerifyField<int32_t>(verifier, VT_DEPTH_MULTIPLIER) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField<int32_t>(verifier, VT_DILATION_W_FACTOR) && + VerifyField<int32_t>(verifier, VT_DILATION_H_FACTOR) && verifier.EndTable(); + } +}; + +struct DepthwiseConv2DOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_padding(Padding padding) + { + fbb_.AddElement<int8_t>(DepthwiseConv2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0); + } + void add_stride_w(int32_t stride_w) + { + fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_STRIDE_W, stride_w, 0); + } + void add_stride_h(int32_t stride_h) + { + fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_STRIDE_H, stride_h, 0); + } + void add_depth_multiplier(int32_t depth_multiplier) + { + fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DEPTH_MULTIPLIER, depth_multiplier, 0); + } + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(DepthwiseConv2DOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + void add_dilation_w_factor(int32_t dilation_w_factor) + { + fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1); + } + void add_dilation_h_factor(int32_t dilation_h_factor) + { + fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1); + } + explicit DepthwiseConv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + DepthwiseConv2DOptionsBuilder &operator=(const DepthwiseConv2DOptionsBuilder &); + flatbuffers::Offset<DepthwiseConv2DOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<DepthwiseConv2DOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<DepthwiseConv2DOptions> CreateDepthwiseConv2DOptions( + flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME, int32_t stride_w = 0, + int32_t stride_h = 0, int32_t depth_multiplier = 0, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + int32_t dilation_w_factor = 1, int32_t dilation_h_factor = 1) +{ + DepthwiseConv2DOptionsBuilder builder_(_fbb); + builder_.add_dilation_h_factor(dilation_h_factor); + builder_.add_dilation_w_factor(dilation_w_factor); + builder_.add_depth_multiplier(depth_multiplier); + builder_.add_stride_h(stride_h); + builder_.add_stride_w(stride_w); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_padding(padding); + return builder_.Finish(); +} + +struct ConcatEmbeddingsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_NUM_CHANNELS = 4, + VT_NUM_COLUMNS_PER_CHANNEL = 6, + VT_EMBEDDING_DIM_PER_CHANNEL = 8 + }; + int32_t num_channels() const { return GetField<int32_t>(VT_NUM_CHANNELS, 0); } + const flatbuffers::Vector<int32_t> *num_columns_per_channel() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_NUM_COLUMNS_PER_CHANNEL); + } + const flatbuffers::Vector<int32_t> *embedding_dim_per_channel() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_EMBEDDING_DIM_PER_CHANNEL); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM_CHANNELS) && + VerifyOffset(verifier, VT_NUM_COLUMNS_PER_CHANNEL) && + verifier.VerifyVector(num_columns_per_channel()) && + VerifyOffset(verifier, VT_EMBEDDING_DIM_PER_CHANNEL) && + verifier.VerifyVector(embedding_dim_per_channel()) && verifier.EndTable(); + } +}; + +struct ConcatEmbeddingsOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_num_channels(int32_t num_channels) + { + fbb_.AddElement<int32_t>(ConcatEmbeddingsOptions::VT_NUM_CHANNELS, num_channels, 0); + } + void add_num_columns_per_channel( + flatbuffers::Offset<flatbuffers::Vector<int32_t>> num_columns_per_channel) + { + fbb_.AddOffset(ConcatEmbeddingsOptions::VT_NUM_COLUMNS_PER_CHANNEL, num_columns_per_channel); + } + void add_embedding_dim_per_channel( + flatbuffers::Offset<flatbuffers::Vector<int32_t>> embedding_dim_per_channel) + { + fbb_.AddOffset(ConcatEmbeddingsOptions::VT_EMBEDDING_DIM_PER_CHANNEL, + embedding_dim_per_channel); + } + explicit ConcatEmbeddingsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ConcatEmbeddingsOptionsBuilder &operator=(const ConcatEmbeddingsOptionsBuilder &); + flatbuffers::Offset<ConcatEmbeddingsOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ConcatEmbeddingsOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ConcatEmbeddingsOptions> CreateConcatEmbeddingsOptions( + flatbuffers::FlatBufferBuilder &_fbb, int32_t num_channels = 0, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> num_columns_per_channel = 0, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> embedding_dim_per_channel = 0) +{ + ConcatEmbeddingsOptionsBuilder builder_(_fbb); + builder_.add_embedding_dim_per_channel(embedding_dim_per_channel); + builder_.add_num_columns_per_channel(num_columns_per_channel); + builder_.add_num_channels(num_channels); + return builder_.Finish(); +} + +inline flatbuffers::Offset<ConcatEmbeddingsOptions> +CreateConcatEmbeddingsOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb, int32_t num_channels = 0, + const std::vector<int32_t> *num_columns_per_channel = nullptr, + const std::vector<int32_t> *embedding_dim_per_channel = nullptr) +{ + return circle::CreateConcatEmbeddingsOptions( + _fbb, num_channels, + num_columns_per_channel ? _fbb.CreateVector<int32_t>(*num_columns_per_channel) : 0, + embedding_dim_per_channel ? _fbb.CreateVector<int32_t>(*embedding_dim_per_channel) : 0); +} + +struct LSHProjectionOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_TYPE = 4 + }; + LSHProjectionType type() const + { + return static_cast<LSHProjectionType>(GetField<int8_t>(VT_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_TYPE) && + verifier.EndTable(); + } +}; + +struct LSHProjectionOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_type(LSHProjectionType type) + { + fbb_.AddElement<int8_t>(LSHProjectionOptions::VT_TYPE, static_cast<int8_t>(type), 0); + } + explicit LSHProjectionOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + LSHProjectionOptionsBuilder &operator=(const LSHProjectionOptionsBuilder &); + flatbuffers::Offset<LSHProjectionOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<LSHProjectionOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<LSHProjectionOptions> +CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb, + LSHProjectionType type = LSHProjectionType_UNKNOWN) +{ + LSHProjectionOptionsBuilder builder_(_fbb); + builder_.add_type(type); + return builder_.Finish(); +} + +struct SVDFOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_RANK = 4, + VT_FUSED_ACTIVATION_FUNCTION = 6, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 + }; + int32_t rank() const { return GetField<int32_t>(VT_RANK, 0); } + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool asymmetric_quantize_inputs() const + { + return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_RANK) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); + } +}; + +struct SVDFOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_rank(int32_t rank) { fbb_.AddElement<int32_t>(SVDFOptions::VT_RANK, rank, 0); } + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(SVDFOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) + { + fbb_.AddElement<uint8_t>(SVDFOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, + static_cast<uint8_t>(asymmetric_quantize_inputs), 0); + } + explicit SVDFOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SVDFOptionsBuilder &operator=(const SVDFOptionsBuilder &); + flatbuffers::Offset<SVDFOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SVDFOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SVDFOptions> +CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t rank = 0, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + bool asymmetric_quantize_inputs = false) +{ + SVDFOptionsBuilder builder_(_fbb); + builder_.add_rank(rank); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct RNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 6 + }; + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool asymmetric_quantize_inputs() const + { + return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); + } +}; + +struct RNNOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(RNNOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) + { + fbb_.AddElement<uint8_t>(RNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, + static_cast<uint8_t>(asymmetric_quantize_inputs), 0); + } + explicit RNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + RNNOptionsBuilder &operator=(const RNNOptionsBuilder &); + flatbuffers::Offset<RNNOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<RNNOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<RNNOptions> +CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + bool asymmetric_quantize_inputs = false) +{ + RNNOptionsBuilder builder_(_fbb); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct SequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_TIME_MAJOR = 4, + VT_FUSED_ACTIVATION_FUNCTION = 6, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 + }; + bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 0) != 0; } + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool asymmetric_quantize_inputs() const + { + return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); + } +}; + +struct SequenceRNNOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_time_major(bool time_major) + { + fbb_.AddElement<uint8_t>(SequenceRNNOptions::VT_TIME_MAJOR, static_cast<uint8_t>(time_major), + 0); + } + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(SequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) + { + fbb_.AddElement<uint8_t>(SequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, + static_cast<uint8_t>(asymmetric_quantize_inputs), 0); + } + explicit SequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SequenceRNNOptionsBuilder &operator=(const SequenceRNNOptionsBuilder &); + flatbuffers::Offset<SequenceRNNOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SequenceRNNOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SequenceRNNOptions> CreateSequenceRNNOptions( + flatbuffers::FlatBufferBuilder &_fbb, bool time_major = false, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + bool asymmetric_quantize_inputs = false) +{ + SequenceRNNOptionsBuilder builder_(_fbb); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_time_major(time_major); + return builder_.Finish(); +} + +struct BidirectionalSequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_TIME_MAJOR = 4, + VT_FUSED_ACTIVATION_FUNCTION = 6, + VT_MERGE_OUTPUTS = 8, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 10 + }; + bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 0) != 0; } + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool merge_outputs() const { return GetField<uint8_t>(VT_MERGE_OUTPUTS, 0) != 0; } + bool asymmetric_quantize_inputs() const + { + return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField<uint8_t>(verifier, VT_MERGE_OUTPUTS) && + VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); + } +}; + +struct BidirectionalSequenceRNNOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_time_major(bool time_major) + { + fbb_.AddElement<uint8_t>(BidirectionalSequenceRNNOptions::VT_TIME_MAJOR, + static_cast<uint8_t>(time_major), 0); + } + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(BidirectionalSequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + void add_merge_outputs(bool merge_outputs) + { + fbb_.AddElement<uint8_t>(BidirectionalSequenceRNNOptions::VT_MERGE_OUTPUTS, + static_cast<uint8_t>(merge_outputs), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) + { + fbb_.AddElement<uint8_t>(BidirectionalSequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, + static_cast<uint8_t>(asymmetric_quantize_inputs), 0); + } + explicit BidirectionalSequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + BidirectionalSequenceRNNOptionsBuilder &operator=(const BidirectionalSequenceRNNOptionsBuilder &); + flatbuffers::Offset<BidirectionalSequenceRNNOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<BidirectionalSequenceRNNOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<BidirectionalSequenceRNNOptions> CreateBidirectionalSequenceRNNOptions( + flatbuffers::FlatBufferBuilder &_fbb, bool time_major = false, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + bool merge_outputs = false, bool asymmetric_quantize_inputs = false) +{ + BidirectionalSequenceRNNOptionsBuilder builder_(_fbb); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_merge_outputs(merge_outputs); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_time_major(time_major); + return builder_.Finish(); +} + +struct FullyConnectedOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_WEIGHTS_FORMAT = 6, + VT_KEEP_NUM_DIMS = 8, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 10 + }; + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + FullyConnectedOptionsWeightsFormat weights_format() const + { + return static_cast<FullyConnectedOptionsWeightsFormat>(GetField<int8_t>(VT_WEIGHTS_FORMAT, 0)); + } + bool keep_num_dims() const { return GetField<uint8_t>(VT_KEEP_NUM_DIMS, 0) != 0; } + bool asymmetric_quantize_inputs() const + { + return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField<int8_t>(verifier, VT_WEIGHTS_FORMAT) && + VerifyField<uint8_t>(verifier, VT_KEEP_NUM_DIMS) && + VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); + } +}; + +struct FullyConnectedOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(FullyConnectedOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + void add_weights_format(FullyConnectedOptionsWeightsFormat weights_format) + { + fbb_.AddElement<int8_t>(FullyConnectedOptions::VT_WEIGHTS_FORMAT, + static_cast<int8_t>(weights_format), 0); + } + void add_keep_num_dims(bool keep_num_dims) + { + fbb_.AddElement<uint8_t>(FullyConnectedOptions::VT_KEEP_NUM_DIMS, + static_cast<uint8_t>(keep_num_dims), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) + { + fbb_.AddElement<uint8_t>(FullyConnectedOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, + static_cast<uint8_t>(asymmetric_quantize_inputs), 0); + } + explicit FullyConnectedOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + FullyConnectedOptionsBuilder &operator=(const FullyConnectedOptionsBuilder &); + flatbuffers::Offset<FullyConnectedOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<FullyConnectedOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<FullyConnectedOptions> CreateFullyConnectedOptions( + flatbuffers::FlatBufferBuilder &_fbb, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + FullyConnectedOptionsWeightsFormat weights_format = FullyConnectedOptionsWeightsFormat_DEFAULT, + bool keep_num_dims = false, bool asymmetric_quantize_inputs = false) +{ + FullyConnectedOptionsBuilder builder_(_fbb); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_keep_num_dims(keep_num_dims); + builder_.add_weights_format(weights_format); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct SoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_BETA = 4 + }; + float beta() const { return GetField<float>(VT_BETA, 0.0f); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_BETA) && + verifier.EndTable(); + } +}; + +struct SoftmaxOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_beta(float beta) { fbb_.AddElement<float>(SoftmaxOptions::VT_BETA, beta, 0.0f); } + explicit SoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SoftmaxOptionsBuilder &operator=(const SoftmaxOptionsBuilder &); + flatbuffers::Offset<SoftmaxOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SoftmaxOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SoftmaxOptions> +CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, float beta = 0.0f) +{ + SoftmaxOptionsBuilder builder_(_fbb); + builder_.add_beta(beta); + return builder_.Finish(); +} + +struct ConcatenationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_AXIS = 4, + VT_FUSED_ACTIVATION_FUNCTION = 6 + }; + int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); } + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_AXIS) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable(); + } +}; + +struct ConcatenationOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(ConcatenationOptions::VT_AXIS, axis, 0); } + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(ConcatenationOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + explicit ConcatenationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ConcatenationOptionsBuilder &operator=(const ConcatenationOptionsBuilder &); + flatbuffers::Offset<ConcatenationOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ConcatenationOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ConcatenationOptions> CreateConcatenationOptions( + flatbuffers::FlatBufferBuilder &_fbb, int32_t axis = 0, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) +{ + ConcatenationOptionsBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct AddOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FUSED_ACTIVATION_FUNCTION = 4 + }; + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable(); + } +}; + +struct AddOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(AddOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + explicit AddOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + AddOptionsBuilder &operator=(const AddOptionsBuilder &); + flatbuffers::Offset<AddOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<AddOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<AddOptions> +CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) +{ + AddOptionsBuilder builder_(_fbb); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct MulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FUSED_ACTIVATION_FUNCTION = 4 + }; + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable(); + } +}; + +struct MulOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(MulOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + explicit MulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + MulOptionsBuilder &operator=(const MulOptionsBuilder &); + flatbuffers::Offset<MulOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<MulOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<MulOptions> +CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) +{ + MulOptionsBuilder builder_(_fbb); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct L2NormOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FUSED_ACTIVATION_FUNCTION = 4 + }; + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable(); + } +}; + +struct L2NormOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(L2NormOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + explicit L2NormOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + L2NormOptionsBuilder &operator=(const L2NormOptionsBuilder &); + flatbuffers::Offset<L2NormOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<L2NormOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<L2NormOptions> +CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) +{ + L2NormOptionsBuilder builder_(_fbb); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct LocalResponseNormalizationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_RADIUS = 4, + VT_BIAS = 6, + VT_ALPHA = 8, + VT_BETA = 10 + }; + int32_t radius() const { return GetField<int32_t>(VT_RADIUS, 0); } + float bias() const { return GetField<float>(VT_BIAS, 0.0f); } + float alpha() const { return GetField<float>(VT_ALPHA, 0.0f); } + float beta() const { return GetField<float>(VT_BETA, 0.0f); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_RADIUS) && + VerifyField<float>(verifier, VT_BIAS) && VerifyField<float>(verifier, VT_ALPHA) && + VerifyField<float>(verifier, VT_BETA) && verifier.EndTable(); + } +}; + +struct LocalResponseNormalizationOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_radius(int32_t radius) + { + fbb_.AddElement<int32_t>(LocalResponseNormalizationOptions::VT_RADIUS, radius, 0); + } + void add_bias(float bias) + { + fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_BIAS, bias, 0.0f); + } + void add_alpha(float alpha) + { + fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_ALPHA, alpha, 0.0f); + } + void add_beta(float beta) + { + fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_BETA, beta, 0.0f); + } + explicit LocalResponseNormalizationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + LocalResponseNormalizationOptionsBuilder & + operator=(const LocalResponseNormalizationOptionsBuilder &); + flatbuffers::Offset<LocalResponseNormalizationOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<LocalResponseNormalizationOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<LocalResponseNormalizationOptions> +CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t radius = 0, + float bias = 0.0f, float alpha = 0.0f, float beta = 0.0f) +{ + LocalResponseNormalizationOptionsBuilder builder_(_fbb); + builder_.add_beta(beta); + builder_.add_alpha(alpha); + builder_.add_bias(bias); + builder_.add_radius(radius); + return builder_.Finish(); +} + +struct LSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_CELL_CLIP = 6, + VT_PROJ_CLIP = 8, + VT_KERNEL_TYPE = 10, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 12 + }; + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + float cell_clip() const { return GetField<float>(VT_CELL_CLIP, 0.0f); } + float proj_clip() const { return GetField<float>(VT_PROJ_CLIP, 0.0f); } + LSTMKernelType kernel_type() const + { + return static_cast<LSTMKernelType>(GetField<int8_t>(VT_KERNEL_TYPE, 0)); + } + bool asymmetric_quantize_inputs() const + { + return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField<float>(verifier, VT_CELL_CLIP) && + VerifyField<float>(verifier, VT_PROJ_CLIP) && + VerifyField<int8_t>(verifier, VT_KERNEL_TYPE) && + VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); + } +}; + +struct LSTMOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(LSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + void add_cell_clip(float cell_clip) + { + fbb_.AddElement<float>(LSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); + } + void add_proj_clip(float proj_clip) + { + fbb_.AddElement<float>(LSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); + } + void add_kernel_type(LSTMKernelType kernel_type) + { + fbb_.AddElement<int8_t>(LSTMOptions::VT_KERNEL_TYPE, static_cast<int8_t>(kernel_type), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) + { + fbb_.AddElement<uint8_t>(LSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, + static_cast<uint8_t>(asymmetric_quantize_inputs), 0); + } + explicit LSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + LSTMOptionsBuilder &operator=(const LSTMOptionsBuilder &); + flatbuffers::Offset<LSTMOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<LSTMOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<LSTMOptions> +CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + float cell_clip = 0.0f, float proj_clip = 0.0f, + LSTMKernelType kernel_type = LSTMKernelType_FULL, + bool asymmetric_quantize_inputs = false) +{ + LSTMOptionsBuilder builder_(_fbb); + builder_.add_proj_clip(proj_clip); + builder_.add_cell_clip(cell_clip); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_kernel_type(kernel_type); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct UnidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_CELL_CLIP = 6, + VT_PROJ_CLIP = 8, + VT_TIME_MAJOR = 10, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 12 + }; + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + float cell_clip() const { return GetField<float>(VT_CELL_CLIP, 0.0f); } + float proj_clip() const { return GetField<float>(VT_PROJ_CLIP, 0.0f); } + bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 0) != 0; } + bool asymmetric_quantize_inputs() const + { + return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField<float>(verifier, VT_CELL_CLIP) && + VerifyField<float>(verifier, VT_PROJ_CLIP) && + VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) && + VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); + } +}; + +struct UnidirectionalSequenceLSTMOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(UnidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + void add_cell_clip(float cell_clip) + { + fbb_.AddElement<float>(UnidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); + } + void add_proj_clip(float proj_clip) + { + fbb_.AddElement<float>(UnidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); + } + void add_time_major(bool time_major) + { + fbb_.AddElement<uint8_t>(UnidirectionalSequenceLSTMOptions::VT_TIME_MAJOR, + static_cast<uint8_t>(time_major), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) + { + fbb_.AddElement<uint8_t>(UnidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, + static_cast<uint8_t>(asymmetric_quantize_inputs), 0); + } + explicit UnidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + UnidirectionalSequenceLSTMOptionsBuilder & + operator=(const UnidirectionalSequenceLSTMOptionsBuilder &); + flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<UnidirectionalSequenceLSTMOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> +CreateUnidirectionalSequenceLSTMOptions( + flatbuffers::FlatBufferBuilder &_fbb, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + float cell_clip = 0.0f, float proj_clip = 0.0f, bool time_major = false, + bool asymmetric_quantize_inputs = false) +{ + UnidirectionalSequenceLSTMOptionsBuilder builder_(_fbb); + builder_.add_proj_clip(proj_clip); + builder_.add_cell_clip(cell_clip); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_time_major(time_major); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct BidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_CELL_CLIP = 6, + VT_PROJ_CLIP = 8, + VT_MERGE_OUTPUTS = 10, + VT_TIME_MAJOR = 12, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 14 + }; + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + float cell_clip() const { return GetField<float>(VT_CELL_CLIP, 0.0f); } + float proj_clip() const { return GetField<float>(VT_PROJ_CLIP, 0.0f); } + bool merge_outputs() const { return GetField<uint8_t>(VT_MERGE_OUTPUTS, 0) != 0; } + bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 1) != 0; } + bool asymmetric_quantize_inputs() const + { + return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField<float>(verifier, VT_CELL_CLIP) && + VerifyField<float>(verifier, VT_PROJ_CLIP) && + VerifyField<uint8_t>(verifier, VT_MERGE_OUTPUTS) && + VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) && + VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); + } +}; + +struct BidirectionalSequenceLSTMOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(BidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + void add_cell_clip(float cell_clip) + { + fbb_.AddElement<float>(BidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); + } + void add_proj_clip(float proj_clip) + { + fbb_.AddElement<float>(BidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); + } + void add_merge_outputs(bool merge_outputs) + { + fbb_.AddElement<uint8_t>(BidirectionalSequenceLSTMOptions::VT_MERGE_OUTPUTS, + static_cast<uint8_t>(merge_outputs), 0); + } + void add_time_major(bool time_major) + { + fbb_.AddElement<uint8_t>(BidirectionalSequenceLSTMOptions::VT_TIME_MAJOR, + static_cast<uint8_t>(time_major), 1); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) + { + fbb_.AddElement<uint8_t>(BidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, + static_cast<uint8_t>(asymmetric_quantize_inputs), 0); + } + explicit BidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + BidirectionalSequenceLSTMOptionsBuilder & + operator=(const BidirectionalSequenceLSTMOptionsBuilder &); + flatbuffers::Offset<BidirectionalSequenceLSTMOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<BidirectionalSequenceLSTMOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<BidirectionalSequenceLSTMOptions> CreateBidirectionalSequenceLSTMOptions( + flatbuffers::FlatBufferBuilder &_fbb, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + float cell_clip = 0.0f, float proj_clip = 0.0f, bool merge_outputs = false, + bool time_major = true, bool asymmetric_quantize_inputs = false) +{ + BidirectionalSequenceLSTMOptionsBuilder builder_(_fbb); + builder_.add_proj_clip(proj_clip); + builder_.add_cell_clip(cell_clip); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_time_major(time_major); + builder_.add_merge_outputs(merge_outputs); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct ResizeBilinearOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_ALIGN_CORNERS = 8, + VT_HALF_PIXEL_CENTERS = 10 + }; + bool align_corners() const { return GetField<uint8_t>(VT_ALIGN_CORNERS, 0) != 0; } + bool half_pixel_centers() const { return GetField<uint8_t>(VT_HALF_PIXEL_CENTERS, 0) != 0; } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_ALIGN_CORNERS) && + VerifyField<uint8_t>(verifier, VT_HALF_PIXEL_CENTERS) && verifier.EndTable(); + } +}; + +struct ResizeBilinearOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_align_corners(bool align_corners) + { + fbb_.AddElement<uint8_t>(ResizeBilinearOptions::VT_ALIGN_CORNERS, + static_cast<uint8_t>(align_corners), 0); + } + void add_half_pixel_centers(bool half_pixel_centers) + { + fbb_.AddElement<uint8_t>(ResizeBilinearOptions::VT_HALF_PIXEL_CENTERS, + static_cast<uint8_t>(half_pixel_centers), 0); + } + explicit ResizeBilinearOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ResizeBilinearOptionsBuilder &operator=(const ResizeBilinearOptionsBuilder &); + flatbuffers::Offset<ResizeBilinearOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ResizeBilinearOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ResizeBilinearOptions> +CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, bool align_corners = false, + bool half_pixel_centers = false) +{ + ResizeBilinearOptionsBuilder builder_(_fbb); + builder_.add_half_pixel_centers(half_pixel_centers); + builder_.add_align_corners(align_corners); + return builder_.Finish(); +} + +struct ResizeNearestNeighborOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_ALIGN_CORNERS = 4 + }; + bool align_corners() const { return GetField<uint8_t>(VT_ALIGN_CORNERS, 0) != 0; } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_ALIGN_CORNERS) && + verifier.EndTable(); + } +}; + +struct ResizeNearestNeighborOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_align_corners(bool align_corners) + { + fbb_.AddElement<uint8_t>(ResizeNearestNeighborOptions::VT_ALIGN_CORNERS, + static_cast<uint8_t>(align_corners), 0); + } + explicit ResizeNearestNeighborOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ResizeNearestNeighborOptionsBuilder &operator=(const ResizeNearestNeighborOptionsBuilder &); + flatbuffers::Offset<ResizeNearestNeighborOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ResizeNearestNeighborOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ResizeNearestNeighborOptions> +CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb, bool align_corners = false) +{ + ResizeNearestNeighborOptionsBuilder builder_(_fbb); + builder_.add_align_corners(align_corners); + return builder_.Finish(); +} + +struct CallOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_SUBGRAPH = 4 + }; + uint32_t subgraph() const { return GetField<uint32_t>(VT_SUBGRAPH, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<uint32_t>(verifier, VT_SUBGRAPH) && + verifier.EndTable(); + } +}; + +struct CallOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_subgraph(uint32_t subgraph) + { + fbb_.AddElement<uint32_t>(CallOptions::VT_SUBGRAPH, subgraph, 0); + } + explicit CallOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + CallOptionsBuilder &operator=(const CallOptionsBuilder &); + flatbuffers::Offset<CallOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<CallOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<CallOptions> CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb, + uint32_t subgraph = 0) +{ + CallOptionsBuilder builder_(_fbb); + builder_.add_subgraph(subgraph); + return builder_.Finish(); +} + +struct PadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct PadOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit PadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + PadOptionsBuilder &operator=(const PadOptionsBuilder &); + flatbuffers::Offset<PadOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<PadOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<PadOptions> CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + PadOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct PadV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct PadV2OptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit PadV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + PadV2OptionsBuilder &operator=(const PadV2OptionsBuilder &); + flatbuffers::Offset<PadV2Options> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<PadV2Options>(end); + return o; + } +}; + +inline flatbuffers::Offset<PadV2Options> CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb) +{ + PadV2OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ReshapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_NEW_SHAPE = 4 + }; + const flatbuffers::Vector<int32_t> *new_shape() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_NEW_SHAPE); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NEW_SHAPE) && + verifier.VerifyVector(new_shape()) && verifier.EndTable(); + } +}; + +struct ReshapeOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_new_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> new_shape) + { + fbb_.AddOffset(ReshapeOptions::VT_NEW_SHAPE, new_shape); + } + explicit ReshapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ReshapeOptionsBuilder &operator=(const ReshapeOptionsBuilder &); + flatbuffers::Offset<ReshapeOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ReshapeOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ReshapeOptions> +CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> new_shape = 0) +{ + ReshapeOptionsBuilder builder_(_fbb); + builder_.add_new_shape(new_shape); + return builder_.Finish(); +} + +inline flatbuffers::Offset<ReshapeOptions> +CreateReshapeOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<int32_t> *new_shape = nullptr) +{ + return circle::CreateReshapeOptions(_fbb, new_shape ? _fbb.CreateVector<int32_t>(*new_shape) : 0); +} + +struct SpaceToBatchNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct SpaceToBatchNDOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SpaceToBatchNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SpaceToBatchNDOptionsBuilder &operator=(const SpaceToBatchNDOptionsBuilder &); + flatbuffers::Offset<SpaceToBatchNDOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SpaceToBatchNDOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SpaceToBatchNDOptions> +CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + SpaceToBatchNDOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct BatchToSpaceNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct BatchToSpaceNDOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit BatchToSpaceNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + BatchToSpaceNDOptionsBuilder &operator=(const BatchToSpaceNDOptionsBuilder &); + flatbuffers::Offset<BatchToSpaceNDOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<BatchToSpaceNDOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<BatchToSpaceNDOptions> +CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + BatchToSpaceNDOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SkipGramOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_NGRAM_SIZE = 4, + VT_MAX_SKIP_SIZE = 6, + VT_INCLUDE_ALL_NGRAMS = 8 + }; + int32_t ngram_size() const { return GetField<int32_t>(VT_NGRAM_SIZE, 0); } + int32_t max_skip_size() const { return GetField<int32_t>(VT_MAX_SKIP_SIZE, 0); } + bool include_all_ngrams() const { return GetField<uint8_t>(VT_INCLUDE_ALL_NGRAMS, 0) != 0; } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NGRAM_SIZE) && + VerifyField<int32_t>(verifier, VT_MAX_SKIP_SIZE) && + VerifyField<uint8_t>(verifier, VT_INCLUDE_ALL_NGRAMS) && verifier.EndTable(); + } +}; + +struct SkipGramOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_ngram_size(int32_t ngram_size) + { + fbb_.AddElement<int32_t>(SkipGramOptions::VT_NGRAM_SIZE, ngram_size, 0); + } + void add_max_skip_size(int32_t max_skip_size) + { + fbb_.AddElement<int32_t>(SkipGramOptions::VT_MAX_SKIP_SIZE, max_skip_size, 0); + } + void add_include_all_ngrams(bool include_all_ngrams) + { + fbb_.AddElement<uint8_t>(SkipGramOptions::VT_INCLUDE_ALL_NGRAMS, + static_cast<uint8_t>(include_all_ngrams), 0); + } + explicit SkipGramOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SkipGramOptionsBuilder &operator=(const SkipGramOptionsBuilder &); + flatbuffers::Offset<SkipGramOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SkipGramOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SkipGramOptions> +CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t ngram_size = 0, + int32_t max_skip_size = 0, bool include_all_ngrams = false) +{ + SkipGramOptionsBuilder builder_(_fbb); + builder_.add_max_skip_size(max_skip_size); + builder_.add_ngram_size(ngram_size); + builder_.add_include_all_ngrams(include_all_ngrams); + return builder_.Finish(); +} + +struct SpaceToDepthOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_BLOCK_SIZE = 4 + }; + int32_t block_size() const { return GetField<int32_t>(VT_BLOCK_SIZE, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_BLOCK_SIZE) && + verifier.EndTable(); + } +}; + +struct SpaceToDepthOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_block_size(int32_t block_size) + { + fbb_.AddElement<int32_t>(SpaceToDepthOptions::VT_BLOCK_SIZE, block_size, 0); + } + explicit SpaceToDepthOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SpaceToDepthOptionsBuilder &operator=(const SpaceToDepthOptionsBuilder &); + flatbuffers::Offset<SpaceToDepthOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SpaceToDepthOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SpaceToDepthOptions> +CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t block_size = 0) +{ + SpaceToDepthOptionsBuilder builder_(_fbb); + builder_.add_block_size(block_size); + return builder_.Finish(); +} + +struct DepthToSpaceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_BLOCK_SIZE = 4 + }; + int32_t block_size() const { return GetField<int32_t>(VT_BLOCK_SIZE, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_BLOCK_SIZE) && + verifier.EndTable(); + } +}; + +struct DepthToSpaceOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_block_size(int32_t block_size) + { + fbb_.AddElement<int32_t>(DepthToSpaceOptions::VT_BLOCK_SIZE, block_size, 0); + } + explicit DepthToSpaceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + DepthToSpaceOptionsBuilder &operator=(const DepthToSpaceOptionsBuilder &); + flatbuffers::Offset<DepthToSpaceOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<DepthToSpaceOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<DepthToSpaceOptions> +CreateDepthToSpaceOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t block_size = 0) +{ + DepthToSpaceOptionsBuilder builder_(_fbb); + builder_.add_block_size(block_size); + return builder_.Finish(); +} + +struct SubOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FUSED_ACTIVATION_FUNCTION = 4 + }; + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable(); + } +}; + +struct SubOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(SubOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + explicit SubOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SubOptionsBuilder &operator=(const SubOptionsBuilder &); + flatbuffers::Offset<SubOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SubOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SubOptions> +CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) +{ + SubOptionsBuilder builder_(_fbb); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct DivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FUSED_ACTIVATION_FUNCTION = 4 + }; + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable(); + } +}; + +struct DivOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(DivOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + explicit DivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + DivOptionsBuilder &operator=(const DivOptionsBuilder &); + flatbuffers::Offset<DivOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<DivOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<DivOptions> +CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) +{ + DivOptionsBuilder builder_(_fbb); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct TopKV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct TopKV2OptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit TopKV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + TopKV2OptionsBuilder &operator=(const TopKV2OptionsBuilder &); + flatbuffers::Offset<TopKV2Options> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<TopKV2Options>(end); + return o; + } +}; + +inline flatbuffers::Offset<TopKV2Options> CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb) +{ + TopKV2OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct EmbeddingLookupSparseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_COMBINER = 4 + }; + CombinerType combiner() const + { + return static_cast<CombinerType>(GetField<int8_t>(VT_COMBINER, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_COMBINER) && + verifier.EndTable(); + } +}; + +struct EmbeddingLookupSparseOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_combiner(CombinerType combiner) + { + fbb_.AddElement<int8_t>(EmbeddingLookupSparseOptions::VT_COMBINER, + static_cast<int8_t>(combiner), 0); + } + explicit EmbeddingLookupSparseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + EmbeddingLookupSparseOptionsBuilder &operator=(const EmbeddingLookupSparseOptionsBuilder &); + flatbuffers::Offset<EmbeddingLookupSparseOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<EmbeddingLookupSparseOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<EmbeddingLookupSparseOptions> +CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb, + CombinerType combiner = CombinerType_SUM) +{ + EmbeddingLookupSparseOptionsBuilder builder_(_fbb); + builder_.add_combiner(combiner); + return builder_.Finish(); +} + +struct GatherOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_AXIS = 4 + }; + int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_AXIS) && + verifier.EndTable(); + } +}; + +struct GatherOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(GatherOptions::VT_AXIS, axis, 0); } + explicit GatherOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + GatherOptionsBuilder &operator=(const GatherOptionsBuilder &); + flatbuffers::Offset<GatherOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<GatherOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<GatherOptions> CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0) +{ + GatherOptionsBuilder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +struct TransposeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct TransposeOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit TransposeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + TransposeOptionsBuilder &operator=(const TransposeOptionsBuilder &); + flatbuffers::Offset<TransposeOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<TransposeOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<TransposeOptions> +CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + TransposeOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ExpOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct ExpOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ExpOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ExpOptionsBuilder &operator=(const ExpOptionsBuilder &); + flatbuffers::Offset<ExpOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ExpOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ExpOptions> CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + ExpOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct CosOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct CosOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit CosOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + CosOptionsBuilder &operator=(const CosOptionsBuilder &); + flatbuffers::Offset<CosOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<CosOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<CosOptions> CreateCosOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + CosOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ReducerOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_KEEP_DIMS = 4 + }; + bool keep_dims() const { return GetField<uint8_t>(VT_KEEP_DIMS, 0) != 0; } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_KEEP_DIMS) && + verifier.EndTable(); + } +}; + +struct ReducerOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_keep_dims(bool keep_dims) + { + fbb_.AddElement<uint8_t>(ReducerOptions::VT_KEEP_DIMS, static_cast<uint8_t>(keep_dims), 0); + } + explicit ReducerOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ReducerOptionsBuilder &operator=(const ReducerOptionsBuilder &); + flatbuffers::Offset<ReducerOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ReducerOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ReducerOptions> +CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, bool keep_dims = false) +{ + ReducerOptionsBuilder builder_(_fbb); + builder_.add_keep_dims(keep_dims); + return builder_.Finish(); +} + +struct SqueezeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_SQUEEZE_DIMS = 4 + }; + const flatbuffers::Vector<int32_t> *squeeze_dims() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SQUEEZE_DIMS); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_SQUEEZE_DIMS) && + verifier.VerifyVector(squeeze_dims()) && verifier.EndTable(); + } +}; + +struct SqueezeOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_squeeze_dims(flatbuffers::Offset<flatbuffers::Vector<int32_t>> squeeze_dims) + { + fbb_.AddOffset(SqueezeOptions::VT_SQUEEZE_DIMS, squeeze_dims); + } + explicit SqueezeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SqueezeOptionsBuilder &operator=(const SqueezeOptionsBuilder &); + flatbuffers::Offset<SqueezeOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SqueezeOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SqueezeOptions> +CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> squeeze_dims = 0) +{ + SqueezeOptionsBuilder builder_(_fbb); + builder_.add_squeeze_dims(squeeze_dims); + return builder_.Finish(); +} + +inline flatbuffers::Offset<SqueezeOptions> +CreateSqueezeOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<int32_t> *squeeze_dims = nullptr) +{ + return circle::CreateSqueezeOptions(_fbb, + squeeze_dims ? _fbb.CreateVector<int32_t>(*squeeze_dims) : 0); +} + +struct SplitOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_NUM_SPLITS = 4 + }; + int32_t num_splits() const { return GetField<int32_t>(VT_NUM_SPLITS, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM_SPLITS) && + verifier.EndTable(); + } +}; + +struct SplitOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_num_splits(int32_t num_splits) + { + fbb_.AddElement<int32_t>(SplitOptions::VT_NUM_SPLITS, num_splits, 0); + } + explicit SplitOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SplitOptionsBuilder &operator=(const SplitOptionsBuilder &); + flatbuffers::Offset<SplitOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SplitOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SplitOptions> CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb, + int32_t num_splits = 0) +{ + SplitOptionsBuilder builder_(_fbb); + builder_.add_num_splits(num_splits); + return builder_.Finish(); +} + +struct SplitVOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_NUM_SPLITS = 4 + }; + int32_t num_splits() const { return GetField<int32_t>(VT_NUM_SPLITS, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM_SPLITS) && + verifier.EndTable(); + } +}; + +struct SplitVOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_num_splits(int32_t num_splits) + { + fbb_.AddElement<int32_t>(SplitVOptions::VT_NUM_SPLITS, num_splits, 0); + } + explicit SplitVOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SplitVOptionsBuilder &operator=(const SplitVOptionsBuilder &); + flatbuffers::Offset<SplitVOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SplitVOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SplitVOptions> CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb, + int32_t num_splits = 0) +{ + SplitVOptionsBuilder builder_(_fbb); + builder_.add_num_splits(num_splits); + return builder_.Finish(); +} + +struct StridedSliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_BEGIN_MASK = 4, + VT_END_MASK = 6, + VT_ELLIPSIS_MASK = 8, + VT_NEW_AXIS_MASK = 10, + VT_SHRINK_AXIS_MASK = 12 + }; + int32_t begin_mask() const { return GetField<int32_t>(VT_BEGIN_MASK, 0); } + int32_t end_mask() const { return GetField<int32_t>(VT_END_MASK, 0); } + int32_t ellipsis_mask() const { return GetField<int32_t>(VT_ELLIPSIS_MASK, 0); } + int32_t new_axis_mask() const { return GetField<int32_t>(VT_NEW_AXIS_MASK, 0); } + int32_t shrink_axis_mask() const { return GetField<int32_t>(VT_SHRINK_AXIS_MASK, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_BEGIN_MASK) && + VerifyField<int32_t>(verifier, VT_END_MASK) && + VerifyField<int32_t>(verifier, VT_ELLIPSIS_MASK) && + VerifyField<int32_t>(verifier, VT_NEW_AXIS_MASK) && + VerifyField<int32_t>(verifier, VT_SHRINK_AXIS_MASK) && verifier.EndTable(); + } +}; + +struct StridedSliceOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_begin_mask(int32_t begin_mask) + { + fbb_.AddElement<int32_t>(StridedSliceOptions::VT_BEGIN_MASK, begin_mask, 0); + } + void add_end_mask(int32_t end_mask) + { + fbb_.AddElement<int32_t>(StridedSliceOptions::VT_END_MASK, end_mask, 0); + } + void add_ellipsis_mask(int32_t ellipsis_mask) + { + fbb_.AddElement<int32_t>(StridedSliceOptions::VT_ELLIPSIS_MASK, ellipsis_mask, 0); + } + void add_new_axis_mask(int32_t new_axis_mask) + { + fbb_.AddElement<int32_t>(StridedSliceOptions::VT_NEW_AXIS_MASK, new_axis_mask, 0); + } + void add_shrink_axis_mask(int32_t shrink_axis_mask) + { + fbb_.AddElement<int32_t>(StridedSliceOptions::VT_SHRINK_AXIS_MASK, shrink_axis_mask, 0); + } + explicit StridedSliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + StridedSliceOptionsBuilder &operator=(const StridedSliceOptionsBuilder &); + flatbuffers::Offset<StridedSliceOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<StridedSliceOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<StridedSliceOptions> +CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t begin_mask = 0, + int32_t end_mask = 0, int32_t ellipsis_mask = 0, + int32_t new_axis_mask = 0, int32_t shrink_axis_mask = 0) +{ + StridedSliceOptionsBuilder builder_(_fbb); + builder_.add_shrink_axis_mask(shrink_axis_mask); + builder_.add_new_axis_mask(new_axis_mask); + builder_.add_ellipsis_mask(ellipsis_mask); + builder_.add_end_mask(end_mask); + builder_.add_begin_mask(begin_mask); + return builder_.Finish(); +} + +struct LogSoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct LogSoftmaxOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogSoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + LogSoftmaxOptionsBuilder &operator=(const LogSoftmaxOptionsBuilder &); + flatbuffers::Offset<LogSoftmaxOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<LogSoftmaxOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<LogSoftmaxOptions> +CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + LogSoftmaxOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct CastOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_IN_DATA_TYPE = 4, + VT_OUT_DATA_TYPE = 6 + }; + TensorType in_data_type() const + { + return static_cast<TensorType>(GetField<int8_t>(VT_IN_DATA_TYPE, 0)); + } + TensorType out_data_type() const + { + return static_cast<TensorType>(GetField<int8_t>(VT_OUT_DATA_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_IN_DATA_TYPE) && + VerifyField<int8_t>(verifier, VT_OUT_DATA_TYPE) && verifier.EndTable(); + } +}; + +struct CastOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_in_data_type(TensorType in_data_type) + { + fbb_.AddElement<int8_t>(CastOptions::VT_IN_DATA_TYPE, static_cast<int8_t>(in_data_type), 0); + } + void add_out_data_type(TensorType out_data_type) + { + fbb_.AddElement<int8_t>(CastOptions::VT_OUT_DATA_TYPE, static_cast<int8_t>(out_data_type), 0); + } + explicit CastOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + CastOptionsBuilder &operator=(const CastOptionsBuilder &); + flatbuffers::Offset<CastOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<CastOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<CastOptions> +CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb, + TensorType in_data_type = TensorType_FLOAT32, + TensorType out_data_type = TensorType_FLOAT32) +{ + CastOptionsBuilder builder_(_fbb); + builder_.add_out_data_type(out_data_type); + builder_.add_in_data_type(in_data_type); + return builder_.Finish(); +} + +struct DequantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct DequantizeOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit DequantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + DequantizeOptionsBuilder &operator=(const DequantizeOptionsBuilder &); + flatbuffers::Offset<DequantizeOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<DequantizeOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<DequantizeOptions> +CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + DequantizeOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct MaximumMinimumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct MaximumMinimumOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit MaximumMinimumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + MaximumMinimumOptionsBuilder &operator=(const MaximumMinimumOptionsBuilder &); + flatbuffers::Offset<MaximumMinimumOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<MaximumMinimumOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<MaximumMinimumOptions> +CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + MaximumMinimumOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct TileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct TileOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit TileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + TileOptionsBuilder &operator=(const TileOptionsBuilder &); + flatbuffers::Offset<TileOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<TileOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<TileOptions> CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + TileOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ArgMaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_OUTPUT_TYPE = 4 + }; + TensorType output_type() const + { + return static_cast<TensorType>(GetField<int8_t>(VT_OUTPUT_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_OUTPUT_TYPE) && + verifier.EndTable(); + } +}; + +struct ArgMaxOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_output_type(TensorType output_type) + { + fbb_.AddElement<int8_t>(ArgMaxOptions::VT_OUTPUT_TYPE, static_cast<int8_t>(output_type), 0); + } + explicit ArgMaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ArgMaxOptionsBuilder &operator=(const ArgMaxOptionsBuilder &); + flatbuffers::Offset<ArgMaxOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ArgMaxOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ArgMaxOptions> +CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, + TensorType output_type = TensorType_FLOAT32) +{ + ArgMaxOptionsBuilder builder_(_fbb); + builder_.add_output_type(output_type); + return builder_.Finish(); +} + +struct ArgMinOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_OUTPUT_TYPE = 4 + }; + TensorType output_type() const + { + return static_cast<TensorType>(GetField<int8_t>(VT_OUTPUT_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_OUTPUT_TYPE) && + verifier.EndTable(); + } +}; + +struct ArgMinOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_output_type(TensorType output_type) + { + fbb_.AddElement<int8_t>(ArgMinOptions::VT_OUTPUT_TYPE, static_cast<int8_t>(output_type), 0); + } + explicit ArgMinOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ArgMinOptionsBuilder &operator=(const ArgMinOptionsBuilder &); + flatbuffers::Offset<ArgMinOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ArgMinOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ArgMinOptions> +CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb, + TensorType output_type = TensorType_FLOAT32) +{ + ArgMinOptionsBuilder builder_(_fbb); + builder_.add_output_type(output_type); + return builder_.Finish(); +} + +struct GreaterOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct GreaterOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit GreaterOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + GreaterOptionsBuilder &operator=(const GreaterOptionsBuilder &); + flatbuffers::Offset<GreaterOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<GreaterOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<GreaterOptions> +CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + GreaterOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct GreaterEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct GreaterEqualOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit GreaterEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + GreaterEqualOptionsBuilder &operator=(const GreaterEqualOptionsBuilder &); + flatbuffers::Offset<GreaterEqualOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<GreaterEqualOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<GreaterEqualOptions> +CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + GreaterEqualOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LessOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct LessOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LessOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + LessOptionsBuilder &operator=(const LessOptionsBuilder &); + flatbuffers::Offset<LessOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<LessOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<LessOptions> CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + LessOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LessEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct LessEqualOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LessEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + LessEqualOptionsBuilder &operator=(const LessEqualOptionsBuilder &); + flatbuffers::Offset<LessEqualOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<LessEqualOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<LessEqualOptions> +CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + LessEqualOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct NegOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct NegOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit NegOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + NegOptionsBuilder &operator=(const NegOptionsBuilder &); + flatbuffers::Offset<NegOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<NegOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<NegOptions> CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + NegOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SelectOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct SelectOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SelectOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SelectOptionsBuilder &operator=(const SelectOptionsBuilder &); + flatbuffers::Offset<SelectOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SelectOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SelectOptions> CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + SelectOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct SliceOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SliceOptionsBuilder &operator=(const SliceOptionsBuilder &); + flatbuffers::Offset<SliceOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SliceOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SliceOptions> CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + SliceOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct TransposeConvOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_PADDING = 4, + VT_STRIDE_W = 6, + VT_STRIDE_H = 8 + }; + Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); } + int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); } + int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) && + VerifyField<int32_t>(verifier, VT_STRIDE_W) && + VerifyField<int32_t>(verifier, VT_STRIDE_H) && verifier.EndTable(); + } +}; + +struct TransposeConvOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_padding(Padding padding) + { + fbb_.AddElement<int8_t>(TransposeConvOptions::VT_PADDING, static_cast<int8_t>(padding), 0); + } + void add_stride_w(int32_t stride_w) + { + fbb_.AddElement<int32_t>(TransposeConvOptions::VT_STRIDE_W, stride_w, 0); + } + void add_stride_h(int32_t stride_h) + { + fbb_.AddElement<int32_t>(TransposeConvOptions::VT_STRIDE_H, stride_h, 0); + } + explicit TransposeConvOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + TransposeConvOptionsBuilder &operator=(const TransposeConvOptionsBuilder &); + flatbuffers::Offset<TransposeConvOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<TransposeConvOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<TransposeConvOptions> +CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME, + int32_t stride_w = 0, int32_t stride_h = 0) +{ + TransposeConvOptionsBuilder builder_(_fbb); + builder_.add_stride_h(stride_h); + builder_.add_stride_w(stride_w); + builder_.add_padding(padding); + return builder_.Finish(); +} + +struct ExpandDimsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct ExpandDimsOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ExpandDimsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ExpandDimsOptionsBuilder &operator=(const ExpandDimsOptionsBuilder &); + flatbuffers::Offset<ExpandDimsOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ExpandDimsOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ExpandDimsOptions> +CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + ExpandDimsOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SparseToDenseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_VALIDATE_INDICES = 4 + }; + bool validate_indices() const { return GetField<uint8_t>(VT_VALIDATE_INDICES, 0) != 0; } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_VALIDATE_INDICES) && + verifier.EndTable(); + } +}; + +struct SparseToDenseOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_validate_indices(bool validate_indices) + { + fbb_.AddElement<uint8_t>(SparseToDenseOptions::VT_VALIDATE_INDICES, + static_cast<uint8_t>(validate_indices), 0); + } + explicit SparseToDenseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SparseToDenseOptionsBuilder &operator=(const SparseToDenseOptionsBuilder &); + flatbuffers::Offset<SparseToDenseOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SparseToDenseOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SparseToDenseOptions> +CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, bool validate_indices = false) +{ + SparseToDenseOptionsBuilder builder_(_fbb); + builder_.add_validate_indices(validate_indices); + return builder_.Finish(); +} + +struct EqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct EqualOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit EqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + EqualOptionsBuilder &operator=(const EqualOptionsBuilder &); + flatbuffers::Offset<EqualOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<EqualOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<EqualOptions> CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + EqualOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct NotEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct NotEqualOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit NotEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + NotEqualOptionsBuilder &operator=(const NotEqualOptionsBuilder &); + flatbuffers::Offset<NotEqualOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<NotEqualOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<NotEqualOptions> +CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + NotEqualOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ShapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_OUT_TYPE = 4 + }; + TensorType out_type() const { return static_cast<TensorType>(GetField<int8_t>(VT_OUT_TYPE, 0)); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_OUT_TYPE) && + verifier.EndTable(); + } +}; + +struct ShapeOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_out_type(TensorType out_type) + { + fbb_.AddElement<int8_t>(ShapeOptions::VT_OUT_TYPE, static_cast<int8_t>(out_type), 0); + } + explicit ShapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ShapeOptionsBuilder &operator=(const ShapeOptionsBuilder &); + flatbuffers::Offset<ShapeOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ShapeOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ShapeOptions> +CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, TensorType out_type = TensorType_FLOAT32) +{ + ShapeOptionsBuilder builder_(_fbb); + builder_.add_out_type(out_type); + return builder_.Finish(); +} + +struct RankOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct RankOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit RankOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + RankOptionsBuilder &operator=(const RankOptionsBuilder &); + flatbuffers::Offset<RankOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<RankOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<RankOptions> CreateRankOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + RankOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct PowOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct PowOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit PowOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + PowOptionsBuilder &operator=(const PowOptionsBuilder &); + flatbuffers::Offset<PowOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<PowOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<PowOptions> CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + PowOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct FakeQuantOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_MIN = 4, + VT_MAX = 6, + VT_NUM_BITS = 8, + VT_NARROW_RANGE = 10 + }; + float min() const { return GetField<float>(VT_MIN, 0.0f); } + float max() const { return GetField<float>(VT_MAX, 0.0f); } + int32_t num_bits() const { return GetField<int32_t>(VT_NUM_BITS, 0); } + bool narrow_range() const { return GetField<uint8_t>(VT_NARROW_RANGE, 0) != 0; } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_MIN) && + VerifyField<float>(verifier, VT_MAX) && VerifyField<int32_t>(verifier, VT_NUM_BITS) && + VerifyField<uint8_t>(verifier, VT_NARROW_RANGE) && verifier.EndTable(); + } +}; + +struct FakeQuantOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_min(float min) { fbb_.AddElement<float>(FakeQuantOptions::VT_MIN, min, 0.0f); } + void add_max(float max) { fbb_.AddElement<float>(FakeQuantOptions::VT_MAX, max, 0.0f); } + void add_num_bits(int32_t num_bits) + { + fbb_.AddElement<int32_t>(FakeQuantOptions::VT_NUM_BITS, num_bits, 0); + } + void add_narrow_range(bool narrow_range) + { + fbb_.AddElement<uint8_t>(FakeQuantOptions::VT_NARROW_RANGE, static_cast<uint8_t>(narrow_range), + 0); + } + explicit FakeQuantOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + FakeQuantOptionsBuilder &operator=(const FakeQuantOptionsBuilder &); + flatbuffers::Offset<FakeQuantOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<FakeQuantOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<FakeQuantOptions> +CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, float min = 0.0f, float max = 0.0f, + int32_t num_bits = 0, bool narrow_range = false) +{ + FakeQuantOptionsBuilder builder_(_fbb); + builder_.add_num_bits(num_bits); + builder_.add_max(max); + builder_.add_min(min); + builder_.add_narrow_range(narrow_range); + return builder_.Finish(); +} + +struct PackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_VALUES_COUNT = 4, + VT_AXIS = 6 + }; + int32_t values_count() const { return GetField<int32_t>(VT_VALUES_COUNT, 0); } + int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_VALUES_COUNT) && + VerifyField<int32_t>(verifier, VT_AXIS) && verifier.EndTable(); + } +}; + +struct PackOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_values_count(int32_t values_count) + { + fbb_.AddElement<int32_t>(PackOptions::VT_VALUES_COUNT, values_count, 0); + } + void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(PackOptions::VT_AXIS, axis, 0); } + explicit PackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + PackOptionsBuilder &operator=(const PackOptionsBuilder &); + flatbuffers::Offset<PackOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<PackOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<PackOptions> +CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t values_count = 0, int32_t axis = 0) +{ + PackOptionsBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_values_count(values_count); + return builder_.Finish(); +} + +struct LogicalOrOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct LogicalOrOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogicalOrOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + LogicalOrOptionsBuilder &operator=(const LogicalOrOptionsBuilder &); + flatbuffers::Offset<LogicalOrOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<LogicalOrOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<LogicalOrOptions> +CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + LogicalOrOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct OneHotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_AXIS = 4 + }; + int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_AXIS) && + verifier.EndTable(); + } +}; + +struct OneHotOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(OneHotOptions::VT_AXIS, axis, 0); } + explicit OneHotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + OneHotOptionsBuilder &operator=(const OneHotOptionsBuilder &); + flatbuffers::Offset<OneHotOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<OneHotOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<OneHotOptions> CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0) +{ + OneHotOptionsBuilder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +struct AbsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct AbsOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit AbsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + AbsOptionsBuilder &operator=(const AbsOptionsBuilder &); + flatbuffers::Offset<AbsOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<AbsOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<AbsOptions> CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + AbsOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct HardSwishOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct HardSwishOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit HardSwishOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + HardSwishOptionsBuilder &operator=(const HardSwishOptionsBuilder &); + flatbuffers::Offset<HardSwishOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<HardSwishOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<HardSwishOptions> +CreateHardSwishOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + HardSwishOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LogicalAndOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct LogicalAndOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogicalAndOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + LogicalAndOptionsBuilder &operator=(const LogicalAndOptionsBuilder &); + flatbuffers::Offset<LogicalAndOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<LogicalAndOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<LogicalAndOptions> +CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + LogicalAndOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LogicalNotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct LogicalNotOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogicalNotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + LogicalNotOptionsBuilder &operator=(const LogicalNotOptionsBuilder &); + flatbuffers::Offset<LogicalNotOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<LogicalNotOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<LogicalNotOptions> +CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + LogicalNotOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct UnpackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_NUM = 4, + VT_AXIS = 6 + }; + int32_t num() const { return GetField<int32_t>(VT_NUM, 0); } + int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM) && + VerifyField<int32_t>(verifier, VT_AXIS) && verifier.EndTable(); + } +}; + +struct UnpackOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_num(int32_t num) { fbb_.AddElement<int32_t>(UnpackOptions::VT_NUM, num, 0); } + void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(UnpackOptions::VT_AXIS, axis, 0); } + explicit UnpackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + UnpackOptionsBuilder &operator=(const UnpackOptionsBuilder &); + flatbuffers::Offset<UnpackOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<UnpackOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<UnpackOptions> CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb, + int32_t num = 0, int32_t axis = 0) +{ + UnpackOptionsBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_num(num); + return builder_.Finish(); +} + +struct FloorDivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct FloorDivOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FloorDivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + FloorDivOptionsBuilder &operator=(const FloorDivOptionsBuilder &); + flatbuffers::Offset<FloorDivOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<FloorDivOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<FloorDivOptions> +CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + FloorDivOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SquareOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct SquareOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SquareOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SquareOptionsBuilder &operator=(const SquareOptionsBuilder &); + flatbuffers::Offset<SquareOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SquareOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SquareOptions> CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + SquareOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ZerosLikeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct ZerosLikeOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ZerosLikeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ZerosLikeOptionsBuilder &operator=(const ZerosLikeOptionsBuilder &); + flatbuffers::Offset<ZerosLikeOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ZerosLikeOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ZerosLikeOptions> +CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + ZerosLikeOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct FillOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct FillOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FillOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + FillOptionsBuilder &operator=(const FillOptionsBuilder &); + flatbuffers::Offset<FillOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<FillOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<FillOptions> CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + FillOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct FloorModOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct FloorModOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FloorModOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + FloorModOptionsBuilder &operator=(const FloorModOptionsBuilder &); + flatbuffers::Offset<FloorModOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<FloorModOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<FloorModOptions> +CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + FloorModOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct RangeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct RangeOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit RangeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + RangeOptionsBuilder &operator=(const RangeOptionsBuilder &); + flatbuffers::Offset<RangeOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<RangeOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<RangeOptions> CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + RangeOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LeakyReluOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_ALPHA = 4 + }; + float alpha() const { return GetField<float>(VT_ALPHA, 0.0f); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_ALPHA) && + verifier.EndTable(); + } +}; + +struct LeakyReluOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_alpha(float alpha) { fbb_.AddElement<float>(LeakyReluOptions::VT_ALPHA, alpha, 0.0f); } + explicit LeakyReluOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + LeakyReluOptionsBuilder &operator=(const LeakyReluOptionsBuilder &); + flatbuffers::Offset<LeakyReluOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<LeakyReluOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<LeakyReluOptions> +CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, float alpha = 0.0f) +{ + LeakyReluOptionsBuilder builder_(_fbb); + builder_.add_alpha(alpha); + return builder_.Finish(); +} + +struct SquaredDifferenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct SquaredDifferenceOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SquaredDifferenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SquaredDifferenceOptionsBuilder &operator=(const SquaredDifferenceOptionsBuilder &); + flatbuffers::Offset<SquaredDifferenceOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SquaredDifferenceOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SquaredDifferenceOptions> +CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + SquaredDifferenceOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct MirrorPadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_MODE = 4 + }; + MirrorPadMode mode() const { return static_cast<MirrorPadMode>(GetField<int8_t>(VT_MODE, 0)); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_MODE) && + verifier.EndTable(); + } +}; + +struct MirrorPadOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_mode(MirrorPadMode mode) + { + fbb_.AddElement<int8_t>(MirrorPadOptions::VT_MODE, static_cast<int8_t>(mode), 0); + } + explicit MirrorPadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + MirrorPadOptionsBuilder &operator=(const MirrorPadOptionsBuilder &); + flatbuffers::Offset<MirrorPadOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<MirrorPadOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<MirrorPadOptions> +CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb, + MirrorPadMode mode = MirrorPadMode_REFLECT) +{ + MirrorPadOptionsBuilder builder_(_fbb); + builder_.add_mode(mode); + return builder_.Finish(); +} + +struct UniqueOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_IDX_OUT_TYPE = 4 + }; + TensorType idx_out_type() const + { + return static_cast<TensorType>(GetField<int8_t>(VT_IDX_OUT_TYPE, 2)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_IDX_OUT_TYPE) && + verifier.EndTable(); + } +}; + +struct UniqueOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_idx_out_type(TensorType idx_out_type) + { + fbb_.AddElement<int8_t>(UniqueOptions::VT_IDX_OUT_TYPE, static_cast<int8_t>(idx_out_type), 2); + } + explicit UniqueOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + UniqueOptionsBuilder &operator=(const UniqueOptionsBuilder &); + flatbuffers::Offset<UniqueOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<UniqueOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<UniqueOptions> +CreateUniqueOptions(flatbuffers::FlatBufferBuilder &_fbb, + TensorType idx_out_type = TensorType_INT32) +{ + UniqueOptionsBuilder builder_(_fbb); + builder_.add_idx_out_type(idx_out_type); + return builder_.Finish(); +} + +struct ReverseV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct ReverseV2OptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ReverseV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ReverseV2OptionsBuilder &operator=(const ReverseV2OptionsBuilder &); + flatbuffers::Offset<ReverseV2Options> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ReverseV2Options>(end); + return o; + } +}; + +inline flatbuffers::Offset<ReverseV2Options> +CreateReverseV2Options(flatbuffers::FlatBufferBuilder &_fbb) +{ + ReverseV2OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct AddNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct AddNOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit AddNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + AddNOptionsBuilder &operator=(const AddNOptionsBuilder &); + flatbuffers::Offset<AddNOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<AddNOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<AddNOptions> CreateAddNOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + AddNOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct GatherNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct GatherNdOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit GatherNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + GatherNdOptionsBuilder &operator=(const GatherNdOptionsBuilder &); + flatbuffers::Offset<GatherNdOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<GatherNdOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<GatherNdOptions> +CreateGatherNdOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + GatherNdOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct WhereOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct WhereOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit WhereOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + WhereOptionsBuilder &operator=(const WhereOptionsBuilder &); + flatbuffers::Offset<WhereOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<WhereOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<WhereOptions> CreateWhereOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + WhereOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ReverseSequenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_SEQ_DIM = 4, + VT_BATCH_DIM = 6 + }; + int32_t seq_dim() const { return GetField<int32_t>(VT_SEQ_DIM, 0); } + int32_t batch_dim() const { return GetField<int32_t>(VT_BATCH_DIM, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_SEQ_DIM) && + VerifyField<int32_t>(verifier, VT_BATCH_DIM) && verifier.EndTable(); + } +}; + +struct ReverseSequenceOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_seq_dim(int32_t seq_dim) + { + fbb_.AddElement<int32_t>(ReverseSequenceOptions::VT_SEQ_DIM, seq_dim, 0); + } + void add_batch_dim(int32_t batch_dim) + { + fbb_.AddElement<int32_t>(ReverseSequenceOptions::VT_BATCH_DIM, batch_dim, 0); + } + explicit ReverseSequenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ReverseSequenceOptionsBuilder &operator=(const ReverseSequenceOptionsBuilder &); + flatbuffers::Offset<ReverseSequenceOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ReverseSequenceOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ReverseSequenceOptions> +CreateReverseSequenceOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t seq_dim = 0, + int32_t batch_dim = 0) +{ + ReverseSequenceOptionsBuilder builder_(_fbb); + builder_.add_batch_dim(batch_dim); + builder_.add_seq_dim(seq_dim); + return builder_.Finish(); +} + +struct MatrixDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct MatrixDiagOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit MatrixDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + MatrixDiagOptionsBuilder &operator=(const MatrixDiagOptionsBuilder &); + flatbuffers::Offset<MatrixDiagOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<MatrixDiagOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<MatrixDiagOptions> +CreateMatrixDiagOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + MatrixDiagOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct QuantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct QuantizeOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit QuantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + QuantizeOptionsBuilder &operator=(const QuantizeOptionsBuilder &); + flatbuffers::Offset<QuantizeOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<QuantizeOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<QuantizeOptions> +CreateQuantizeOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + QuantizeOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct MatrixSetDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct MatrixSetDiagOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit MatrixSetDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + MatrixSetDiagOptionsBuilder &operator=(const MatrixSetDiagOptionsBuilder &); + flatbuffers::Offset<MatrixSetDiagOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<MatrixSetDiagOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<MatrixSetDiagOptions> +CreateMatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + MatrixSetDiagOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct IfOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_THEN_SUBGRAPH_INDEX = 4, + VT_ELSE_SUBGRAPH_INDEX = 6 + }; + int32_t then_subgraph_index() const { return GetField<int32_t>(VT_THEN_SUBGRAPH_INDEX, 0); } + int32_t else_subgraph_index() const { return GetField<int32_t>(VT_ELSE_SUBGRAPH_INDEX, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_THEN_SUBGRAPH_INDEX) && + VerifyField<int32_t>(verifier, VT_ELSE_SUBGRAPH_INDEX) && verifier.EndTable(); + } +}; + +struct IfOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_then_subgraph_index(int32_t then_subgraph_index) + { + fbb_.AddElement<int32_t>(IfOptions::VT_THEN_SUBGRAPH_INDEX, then_subgraph_index, 0); + } + void add_else_subgraph_index(int32_t else_subgraph_index) + { + fbb_.AddElement<int32_t>(IfOptions::VT_ELSE_SUBGRAPH_INDEX, else_subgraph_index, 0); + } + explicit IfOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + IfOptionsBuilder &operator=(const IfOptionsBuilder &); + flatbuffers::Offset<IfOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<IfOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<IfOptions> CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb, + int32_t then_subgraph_index = 0, + int32_t else_subgraph_index = 0) +{ + IfOptionsBuilder builder_(_fbb); + builder_.add_else_subgraph_index(else_subgraph_index); + builder_.add_then_subgraph_index(then_subgraph_index); + return builder_.Finish(); +} + +struct WhileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_COND_SUBGRAPH_INDEX = 4, + VT_BODY_SUBGRAPH_INDEX = 6 + }; + int32_t cond_subgraph_index() const { return GetField<int32_t>(VT_COND_SUBGRAPH_INDEX, 0); } + int32_t body_subgraph_index() const { return GetField<int32_t>(VT_BODY_SUBGRAPH_INDEX, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_COND_SUBGRAPH_INDEX) && + VerifyField<int32_t>(verifier, VT_BODY_SUBGRAPH_INDEX) && verifier.EndTable(); + } +}; + +struct WhileOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_cond_subgraph_index(int32_t cond_subgraph_index) + { + fbb_.AddElement<int32_t>(WhileOptions::VT_COND_SUBGRAPH_INDEX, cond_subgraph_index, 0); + } + void add_body_subgraph_index(int32_t body_subgraph_index) + { + fbb_.AddElement<int32_t>(WhileOptions::VT_BODY_SUBGRAPH_INDEX, body_subgraph_index, 0); + } + explicit WhileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + WhileOptionsBuilder &operator=(const WhileOptionsBuilder &); + flatbuffers::Offset<WhileOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<WhileOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<WhileOptions> CreateWhileOptions(flatbuffers::FlatBufferBuilder &_fbb, + int32_t cond_subgraph_index = 0, + int32_t body_subgraph_index = 0) +{ + WhileOptionsBuilder builder_(_fbb); + builder_.add_body_subgraph_index(body_subgraph_index); + builder_.add_cond_subgraph_index(cond_subgraph_index); + return builder_.Finish(); +} + +struct NonMaxSuppressionV4Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct NonMaxSuppressionV4OptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit NonMaxSuppressionV4OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + NonMaxSuppressionV4OptionsBuilder &operator=(const NonMaxSuppressionV4OptionsBuilder &); + flatbuffers::Offset<NonMaxSuppressionV4Options> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<NonMaxSuppressionV4Options>(end); + return o; + } +}; + +inline flatbuffers::Offset<NonMaxSuppressionV4Options> +CreateNonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &_fbb) +{ + NonMaxSuppressionV4OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct NonMaxSuppressionV5Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct NonMaxSuppressionV5OptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit NonMaxSuppressionV5OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + NonMaxSuppressionV5OptionsBuilder &operator=(const NonMaxSuppressionV5OptionsBuilder &); + flatbuffers::Offset<NonMaxSuppressionV5Options> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<NonMaxSuppressionV5Options>(end); + return o; + } +}; + +inline flatbuffers::Offset<NonMaxSuppressionV5Options> +CreateNonMaxSuppressionV5Options(flatbuffers::FlatBufferBuilder &_fbb) +{ + NonMaxSuppressionV5OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ScatterNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct ScatterNdOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ScatterNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ScatterNdOptionsBuilder &operator=(const ScatterNdOptionsBuilder &); + flatbuffers::Offset<ScatterNdOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ScatterNdOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ScatterNdOptions> +CreateScatterNdOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + ScatterNdOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SelectV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct SelectV2OptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SelectV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SelectV2OptionsBuilder &operator=(const SelectV2OptionsBuilder &); + flatbuffers::Offset<SelectV2Options> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SelectV2Options>(end); + return o; + } +}; + +inline flatbuffers::Offset<SelectV2Options> +CreateSelectV2Options(flatbuffers::FlatBufferBuilder &_fbb) +{ + SelectV2OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct DensifyOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct DensifyOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit DensifyOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + DensifyOptionsBuilder &operator=(const DensifyOptionsBuilder &); + flatbuffers::Offset<DensifyOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<DensifyOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<DensifyOptions> +CreateDensifyOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + DensifyOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SegmentSumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct SegmentSumOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SegmentSumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SegmentSumOptionsBuilder &operator=(const SegmentSumOptionsBuilder &); + flatbuffers::Offset<SegmentSumOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SegmentSumOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SegmentSumOptions> +CreateSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + SegmentSumOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct BatchMatMulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_ADJOINT_LHS = 4, + VT_ADJOINT_RHS = 6 + }; + bool adjoint_lhs() const { return GetField<uint8_t>(VT_ADJOINT_LHS, 0) != 0; } + bool adjoint_rhs() const { return GetField<uint8_t>(VT_ADJOINT_RHS, 0) != 0; } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_ADJOINT_LHS) && + VerifyField<uint8_t>(verifier, VT_ADJOINT_RHS) && verifier.EndTable(); + } +}; + +struct BatchMatMulOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_adjoint_lhs(bool adjoint_lhs) + { + fbb_.AddElement<uint8_t>(BatchMatMulOptions::VT_ADJOINT_LHS, static_cast<uint8_t>(adjoint_lhs), + 0); + } + void add_adjoint_rhs(bool adjoint_rhs) + { + fbb_.AddElement<uint8_t>(BatchMatMulOptions::VT_ADJOINT_RHS, static_cast<uint8_t>(adjoint_rhs), + 0); + } + explicit BatchMatMulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + BatchMatMulOptionsBuilder &operator=(const BatchMatMulOptionsBuilder &); + flatbuffers::Offset<BatchMatMulOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<BatchMatMulOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<BatchMatMulOptions> +CreateBatchMatMulOptions(flatbuffers::FlatBufferBuilder &_fbb, bool adjoint_lhs = false, + bool adjoint_rhs = false) +{ + BatchMatMulOptionsBuilder builder_(_fbb); + builder_.add_adjoint_rhs(adjoint_rhs); + builder_.add_adjoint_lhs(adjoint_lhs); + return builder_.Finish(); +} + +struct InstanceNormOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_EPSILON = 4, + VT_FUSED_ACTIVATION_FUNCTION = 6 + }; + float epsilon() const { return GetField<float>(VT_EPSILON, 0.0f); } + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_EPSILON) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable(); + } +}; + +struct InstanceNormOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_epsilon(float epsilon) + { + fbb_.AddElement<float>(InstanceNormOptions::VT_EPSILON, epsilon, 0.0f); + } + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(InstanceNormOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + explicit InstanceNormOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + InstanceNormOptionsBuilder &operator=(const InstanceNormOptionsBuilder &); + flatbuffers::Offset<InstanceNormOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<InstanceNormOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<InstanceNormOptions> CreateInstanceNormOptions( + flatbuffers::FlatBufferBuilder &_fbb, float epsilon = 0.0f, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) +{ + InstanceNormOptionsBuilder builder_(_fbb); + builder_.add_epsilon(epsilon); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct OperatorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_BUILTIN_CODE = 4, + VT_CUSTOM_CODE = 6, + VT_VERSION = 8 + }; + BuiltinOperator builtin_code() const + { + return static_cast<BuiltinOperator>(GetField<uint8_t>(VT_BUILTIN_CODE, 0)); + } + const flatbuffers::String *custom_code() const + { + return GetPointer<const flatbuffers::String *>(VT_CUSTOM_CODE); + } + int32_t version() const { return GetField<int32_t>(VT_VERSION, 1); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_BUILTIN_CODE) && + VerifyOffset(verifier, VT_CUSTOM_CODE) && verifier.VerifyString(custom_code()) && + VerifyField<int32_t>(verifier, VT_VERSION) && verifier.EndTable(); + } +}; + +struct OperatorCodeBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_builtin_code(BuiltinOperator builtin_code) + { + fbb_.AddElement<uint8_t>(OperatorCode::VT_BUILTIN_CODE, static_cast<uint8_t>(builtin_code), 0); + } + void add_custom_code(flatbuffers::Offset<flatbuffers::String> custom_code) + { + fbb_.AddOffset(OperatorCode::VT_CUSTOM_CODE, custom_code); + } + void add_version(int32_t version) + { + fbb_.AddElement<int32_t>(OperatorCode::VT_VERSION, version, 1); + } + explicit OperatorCodeBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + OperatorCodeBuilder &operator=(const OperatorCodeBuilder &); + flatbuffers::Offset<OperatorCode> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<OperatorCode>(end); + return o; + } +}; + +inline flatbuffers::Offset<OperatorCode> +CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, + BuiltinOperator builtin_code = BuiltinOperator_ADD, + flatbuffers::Offset<flatbuffers::String> custom_code = 0, int32_t version = 1) +{ + OperatorCodeBuilder builder_(_fbb); + builder_.add_version(version); + builder_.add_custom_code(custom_code); + builder_.add_builtin_code(builtin_code); + return builder_.Finish(); +} + +inline flatbuffers::Offset<OperatorCode> +CreateOperatorCodeDirect(flatbuffers::FlatBufferBuilder &_fbb, + BuiltinOperator builtin_code = BuiltinOperator_ADD, + const char *custom_code = nullptr, int32_t version = 1) +{ + return circle::CreateOperatorCode(_fbb, builtin_code, + custom_code ? _fbb.CreateString(custom_code) : 0, version); +} + +struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_OPCODE_INDEX = 4, + VT_INPUTS = 6, + VT_OUTPUTS = 8, + VT_BUILTIN_OPTIONS_TYPE = 10, + VT_BUILTIN_OPTIONS = 12, + VT_CUSTOM_OPTIONS = 14, + VT_CUSTOM_OPTIONS_FORMAT = 16, + VT_MUTATING_VARIABLE_INPUTS = 18, + VT_INTERMEDIATES = 20 + }; + uint32_t opcode_index() const { return GetField<uint32_t>(VT_OPCODE_INDEX, 0); } + const flatbuffers::Vector<int32_t> *inputs() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUTS); + } + const flatbuffers::Vector<int32_t> *outputs() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUTS); + } + BuiltinOptions builtin_options_type() const + { + return static_cast<BuiltinOptions>(GetField<uint8_t>(VT_BUILTIN_OPTIONS_TYPE, 0)); + } + const void *builtin_options() const { return GetPointer<const void *>(VT_BUILTIN_OPTIONS); } + template <typename T> const T *builtin_options_as() const; + const Conv2DOptions *builtin_options_as_Conv2DOptions() const + { + return builtin_options_type() == BuiltinOptions_Conv2DOptions + ? static_cast<const Conv2DOptions *>(builtin_options()) + : nullptr; + } + const DepthwiseConv2DOptions *builtin_options_as_DepthwiseConv2DOptions() const + { + return builtin_options_type() == BuiltinOptions_DepthwiseConv2DOptions + ? static_cast<const DepthwiseConv2DOptions *>(builtin_options()) + : nullptr; + } + const ConcatEmbeddingsOptions *builtin_options_as_ConcatEmbeddingsOptions() const + { + return builtin_options_type() == BuiltinOptions_ConcatEmbeddingsOptions + ? static_cast<const ConcatEmbeddingsOptions *>(builtin_options()) + : nullptr; + } + const LSHProjectionOptions *builtin_options_as_LSHProjectionOptions() const + { + return builtin_options_type() == BuiltinOptions_LSHProjectionOptions + ? static_cast<const LSHProjectionOptions *>(builtin_options()) + : nullptr; + } + const Pool2DOptions *builtin_options_as_Pool2DOptions() const + { + return builtin_options_type() == BuiltinOptions_Pool2DOptions + ? static_cast<const Pool2DOptions *>(builtin_options()) + : nullptr; + } + const SVDFOptions *builtin_options_as_SVDFOptions() const + { + return builtin_options_type() == BuiltinOptions_SVDFOptions + ? static_cast<const SVDFOptions *>(builtin_options()) + : nullptr; + } + const RNNOptions *builtin_options_as_RNNOptions() const + { + return builtin_options_type() == BuiltinOptions_RNNOptions + ? static_cast<const RNNOptions *>(builtin_options()) + : nullptr; + } + const FullyConnectedOptions *builtin_options_as_FullyConnectedOptions() const + { + return builtin_options_type() == BuiltinOptions_FullyConnectedOptions + ? static_cast<const FullyConnectedOptions *>(builtin_options()) + : nullptr; + } + const SoftmaxOptions *builtin_options_as_SoftmaxOptions() const + { + return builtin_options_type() == BuiltinOptions_SoftmaxOptions + ? static_cast<const SoftmaxOptions *>(builtin_options()) + : nullptr; + } + const ConcatenationOptions *builtin_options_as_ConcatenationOptions() const + { + return builtin_options_type() == BuiltinOptions_ConcatenationOptions + ? static_cast<const ConcatenationOptions *>(builtin_options()) + : nullptr; + } + const AddOptions *builtin_options_as_AddOptions() const + { + return builtin_options_type() == BuiltinOptions_AddOptions + ? static_cast<const AddOptions *>(builtin_options()) + : nullptr; + } + const L2NormOptions *builtin_options_as_L2NormOptions() const + { + return builtin_options_type() == BuiltinOptions_L2NormOptions + ? static_cast<const L2NormOptions *>(builtin_options()) + : nullptr; + } + const LocalResponseNormalizationOptions * + builtin_options_as_LocalResponseNormalizationOptions() const + { + return builtin_options_type() == BuiltinOptions_LocalResponseNormalizationOptions + ? static_cast<const LocalResponseNormalizationOptions *>(builtin_options()) + : nullptr; + } + const LSTMOptions *builtin_options_as_LSTMOptions() const + { + return builtin_options_type() == BuiltinOptions_LSTMOptions + ? static_cast<const LSTMOptions *>(builtin_options()) + : nullptr; + } + const ResizeBilinearOptions *builtin_options_as_ResizeBilinearOptions() const + { + return builtin_options_type() == BuiltinOptions_ResizeBilinearOptions + ? static_cast<const ResizeBilinearOptions *>(builtin_options()) + : nullptr; + } + const CallOptions *builtin_options_as_CallOptions() const + { + return builtin_options_type() == BuiltinOptions_CallOptions + ? static_cast<const CallOptions *>(builtin_options()) + : nullptr; + } + const ReshapeOptions *builtin_options_as_ReshapeOptions() const + { + return builtin_options_type() == BuiltinOptions_ReshapeOptions + ? static_cast<const ReshapeOptions *>(builtin_options()) + : nullptr; + } + const SkipGramOptions *builtin_options_as_SkipGramOptions() const + { + return builtin_options_type() == BuiltinOptions_SkipGramOptions + ? static_cast<const SkipGramOptions *>(builtin_options()) + : nullptr; + } + const SpaceToDepthOptions *builtin_options_as_SpaceToDepthOptions() const + { + return builtin_options_type() == BuiltinOptions_SpaceToDepthOptions + ? static_cast<const SpaceToDepthOptions *>(builtin_options()) + : nullptr; + } + const EmbeddingLookupSparseOptions *builtin_options_as_EmbeddingLookupSparseOptions() const + { + return builtin_options_type() == BuiltinOptions_EmbeddingLookupSparseOptions + ? static_cast<const EmbeddingLookupSparseOptions *>(builtin_options()) + : nullptr; + } + const MulOptions *builtin_options_as_MulOptions() const + { + return builtin_options_type() == BuiltinOptions_MulOptions + ? static_cast<const MulOptions *>(builtin_options()) + : nullptr; + } + const PadOptions *builtin_options_as_PadOptions() const + { + return builtin_options_type() == BuiltinOptions_PadOptions + ? static_cast<const PadOptions *>(builtin_options()) + : nullptr; + } + const GatherOptions *builtin_options_as_GatherOptions() const + { + return builtin_options_type() == BuiltinOptions_GatherOptions + ? static_cast<const GatherOptions *>(builtin_options()) + : nullptr; + } + const BatchToSpaceNDOptions *builtin_options_as_BatchToSpaceNDOptions() const + { + return builtin_options_type() == BuiltinOptions_BatchToSpaceNDOptions + ? static_cast<const BatchToSpaceNDOptions *>(builtin_options()) + : nullptr; + } + const SpaceToBatchNDOptions *builtin_options_as_SpaceToBatchNDOptions() const + { + return builtin_options_type() == BuiltinOptions_SpaceToBatchNDOptions + ? static_cast<const SpaceToBatchNDOptions *>(builtin_options()) + : nullptr; + } + const TransposeOptions *builtin_options_as_TransposeOptions() const + { + return builtin_options_type() == BuiltinOptions_TransposeOptions + ? static_cast<const TransposeOptions *>(builtin_options()) + : nullptr; + } + const ReducerOptions *builtin_options_as_ReducerOptions() const + { + return builtin_options_type() == BuiltinOptions_ReducerOptions + ? static_cast<const ReducerOptions *>(builtin_options()) + : nullptr; + } + const SubOptions *builtin_options_as_SubOptions() const + { + return builtin_options_type() == BuiltinOptions_SubOptions + ? static_cast<const SubOptions *>(builtin_options()) + : nullptr; + } + const DivOptions *builtin_options_as_DivOptions() const + { + return builtin_options_type() == BuiltinOptions_DivOptions + ? static_cast<const DivOptions *>(builtin_options()) + : nullptr; + } + const SqueezeOptions *builtin_options_as_SqueezeOptions() const + { + return builtin_options_type() == BuiltinOptions_SqueezeOptions + ? static_cast<const SqueezeOptions *>(builtin_options()) + : nullptr; + } + const SequenceRNNOptions *builtin_options_as_SequenceRNNOptions() const + { + return builtin_options_type() == BuiltinOptions_SequenceRNNOptions + ? static_cast<const SequenceRNNOptions *>(builtin_options()) + : nullptr; + } + const StridedSliceOptions *builtin_options_as_StridedSliceOptions() const + { + return builtin_options_type() == BuiltinOptions_StridedSliceOptions + ? static_cast<const StridedSliceOptions *>(builtin_options()) + : nullptr; + } + const ExpOptions *builtin_options_as_ExpOptions() const + { + return builtin_options_type() == BuiltinOptions_ExpOptions + ? static_cast<const ExpOptions *>(builtin_options()) + : nullptr; + } + const TopKV2Options *builtin_options_as_TopKV2Options() const + { + return builtin_options_type() == BuiltinOptions_TopKV2Options + ? static_cast<const TopKV2Options *>(builtin_options()) + : nullptr; + } + const SplitOptions *builtin_options_as_SplitOptions() const + { + return builtin_options_type() == BuiltinOptions_SplitOptions + ? static_cast<const SplitOptions *>(builtin_options()) + : nullptr; + } + const LogSoftmaxOptions *builtin_options_as_LogSoftmaxOptions() const + { + return builtin_options_type() == BuiltinOptions_LogSoftmaxOptions + ? static_cast<const LogSoftmaxOptions *>(builtin_options()) + : nullptr; + } + const CastOptions *builtin_options_as_CastOptions() const + { + return builtin_options_type() == BuiltinOptions_CastOptions + ? static_cast<const CastOptions *>(builtin_options()) + : nullptr; + } + const DequantizeOptions *builtin_options_as_DequantizeOptions() const + { + return builtin_options_type() == BuiltinOptions_DequantizeOptions + ? static_cast<const DequantizeOptions *>(builtin_options()) + : nullptr; + } + const MaximumMinimumOptions *builtin_options_as_MaximumMinimumOptions() const + { + return builtin_options_type() == BuiltinOptions_MaximumMinimumOptions + ? static_cast<const MaximumMinimumOptions *>(builtin_options()) + : nullptr; + } + const ArgMaxOptions *builtin_options_as_ArgMaxOptions() const + { + return builtin_options_type() == BuiltinOptions_ArgMaxOptions + ? static_cast<const ArgMaxOptions *>(builtin_options()) + : nullptr; + } + const LessOptions *builtin_options_as_LessOptions() const + { + return builtin_options_type() == BuiltinOptions_LessOptions + ? static_cast<const LessOptions *>(builtin_options()) + : nullptr; + } + const NegOptions *builtin_options_as_NegOptions() const + { + return builtin_options_type() == BuiltinOptions_NegOptions + ? static_cast<const NegOptions *>(builtin_options()) + : nullptr; + } + const PadV2Options *builtin_options_as_PadV2Options() const + { + return builtin_options_type() == BuiltinOptions_PadV2Options + ? static_cast<const PadV2Options *>(builtin_options()) + : nullptr; + } + const GreaterOptions *builtin_options_as_GreaterOptions() const + { + return builtin_options_type() == BuiltinOptions_GreaterOptions + ? static_cast<const GreaterOptions *>(builtin_options()) + : nullptr; + } + const GreaterEqualOptions *builtin_options_as_GreaterEqualOptions() const + { + return builtin_options_type() == BuiltinOptions_GreaterEqualOptions + ? static_cast<const GreaterEqualOptions *>(builtin_options()) + : nullptr; + } + const LessEqualOptions *builtin_options_as_LessEqualOptions() const + { + return builtin_options_type() == BuiltinOptions_LessEqualOptions + ? static_cast<const LessEqualOptions *>(builtin_options()) + : nullptr; + } + const SelectOptions *builtin_options_as_SelectOptions() const + { + return builtin_options_type() == BuiltinOptions_SelectOptions + ? static_cast<const SelectOptions *>(builtin_options()) + : nullptr; + } + const SliceOptions *builtin_options_as_SliceOptions() const + { + return builtin_options_type() == BuiltinOptions_SliceOptions + ? static_cast<const SliceOptions *>(builtin_options()) + : nullptr; + } + const TransposeConvOptions *builtin_options_as_TransposeConvOptions() const + { + return builtin_options_type() == BuiltinOptions_TransposeConvOptions + ? static_cast<const TransposeConvOptions *>(builtin_options()) + : nullptr; + } + const SparseToDenseOptions *builtin_options_as_SparseToDenseOptions() const + { + return builtin_options_type() == BuiltinOptions_SparseToDenseOptions + ? static_cast<const SparseToDenseOptions *>(builtin_options()) + : nullptr; + } + const TileOptions *builtin_options_as_TileOptions() const + { + return builtin_options_type() == BuiltinOptions_TileOptions + ? static_cast<const TileOptions *>(builtin_options()) + : nullptr; + } + const ExpandDimsOptions *builtin_options_as_ExpandDimsOptions() const + { + return builtin_options_type() == BuiltinOptions_ExpandDimsOptions + ? static_cast<const ExpandDimsOptions *>(builtin_options()) + : nullptr; + } + const EqualOptions *builtin_options_as_EqualOptions() const + { + return builtin_options_type() == BuiltinOptions_EqualOptions + ? static_cast<const EqualOptions *>(builtin_options()) + : nullptr; + } + const NotEqualOptions *builtin_options_as_NotEqualOptions() const + { + return builtin_options_type() == BuiltinOptions_NotEqualOptions + ? static_cast<const NotEqualOptions *>(builtin_options()) + : nullptr; + } + const ShapeOptions *builtin_options_as_ShapeOptions() const + { + return builtin_options_type() == BuiltinOptions_ShapeOptions + ? static_cast<const ShapeOptions *>(builtin_options()) + : nullptr; + } + const PowOptions *builtin_options_as_PowOptions() const + { + return builtin_options_type() == BuiltinOptions_PowOptions + ? static_cast<const PowOptions *>(builtin_options()) + : nullptr; + } + const ArgMinOptions *builtin_options_as_ArgMinOptions() const + { + return builtin_options_type() == BuiltinOptions_ArgMinOptions + ? static_cast<const ArgMinOptions *>(builtin_options()) + : nullptr; + } + const FakeQuantOptions *builtin_options_as_FakeQuantOptions() const + { + return builtin_options_type() == BuiltinOptions_FakeQuantOptions + ? static_cast<const FakeQuantOptions *>(builtin_options()) + : nullptr; + } + const PackOptions *builtin_options_as_PackOptions() const + { + return builtin_options_type() == BuiltinOptions_PackOptions + ? static_cast<const PackOptions *>(builtin_options()) + : nullptr; + } + const LogicalOrOptions *builtin_options_as_LogicalOrOptions() const + { + return builtin_options_type() == BuiltinOptions_LogicalOrOptions + ? static_cast<const LogicalOrOptions *>(builtin_options()) + : nullptr; + } + const OneHotOptions *builtin_options_as_OneHotOptions() const + { + return builtin_options_type() == BuiltinOptions_OneHotOptions + ? static_cast<const OneHotOptions *>(builtin_options()) + : nullptr; + } + const LogicalAndOptions *builtin_options_as_LogicalAndOptions() const + { + return builtin_options_type() == BuiltinOptions_LogicalAndOptions + ? static_cast<const LogicalAndOptions *>(builtin_options()) + : nullptr; + } + const LogicalNotOptions *builtin_options_as_LogicalNotOptions() const + { + return builtin_options_type() == BuiltinOptions_LogicalNotOptions + ? static_cast<const LogicalNotOptions *>(builtin_options()) + : nullptr; + } + const UnpackOptions *builtin_options_as_UnpackOptions() const + { + return builtin_options_type() == BuiltinOptions_UnpackOptions + ? static_cast<const UnpackOptions *>(builtin_options()) + : nullptr; + } + const FloorDivOptions *builtin_options_as_FloorDivOptions() const + { + return builtin_options_type() == BuiltinOptions_FloorDivOptions + ? static_cast<const FloorDivOptions *>(builtin_options()) + : nullptr; + } + const SquareOptions *builtin_options_as_SquareOptions() const + { + return builtin_options_type() == BuiltinOptions_SquareOptions + ? static_cast<const SquareOptions *>(builtin_options()) + : nullptr; + } + const ZerosLikeOptions *builtin_options_as_ZerosLikeOptions() const + { + return builtin_options_type() == BuiltinOptions_ZerosLikeOptions + ? static_cast<const ZerosLikeOptions *>(builtin_options()) + : nullptr; + } + const FillOptions *builtin_options_as_FillOptions() const + { + return builtin_options_type() == BuiltinOptions_FillOptions + ? static_cast<const FillOptions *>(builtin_options()) + : nullptr; + } + const BidirectionalSequenceLSTMOptions * + builtin_options_as_BidirectionalSequenceLSTMOptions() const + { + return builtin_options_type() == BuiltinOptions_BidirectionalSequenceLSTMOptions + ? static_cast<const BidirectionalSequenceLSTMOptions *>(builtin_options()) + : nullptr; + } + const BidirectionalSequenceRNNOptions *builtin_options_as_BidirectionalSequenceRNNOptions() const + { + return builtin_options_type() == BuiltinOptions_BidirectionalSequenceRNNOptions + ? static_cast<const BidirectionalSequenceRNNOptions *>(builtin_options()) + : nullptr; + } + const UnidirectionalSequenceLSTMOptions * + builtin_options_as_UnidirectionalSequenceLSTMOptions() const + { + return builtin_options_type() == BuiltinOptions_UnidirectionalSequenceLSTMOptions + ? static_cast<const UnidirectionalSequenceLSTMOptions *>(builtin_options()) + : nullptr; + } + const FloorModOptions *builtin_options_as_FloorModOptions() const + { + return builtin_options_type() == BuiltinOptions_FloorModOptions + ? static_cast<const FloorModOptions *>(builtin_options()) + : nullptr; + } + const RangeOptions *builtin_options_as_RangeOptions() const + { + return builtin_options_type() == BuiltinOptions_RangeOptions + ? static_cast<const RangeOptions *>(builtin_options()) + : nullptr; + } + const ResizeNearestNeighborOptions *builtin_options_as_ResizeNearestNeighborOptions() const + { + return builtin_options_type() == BuiltinOptions_ResizeNearestNeighborOptions + ? static_cast<const ResizeNearestNeighborOptions *>(builtin_options()) + : nullptr; + } + const LeakyReluOptions *builtin_options_as_LeakyReluOptions() const + { + return builtin_options_type() == BuiltinOptions_LeakyReluOptions + ? static_cast<const LeakyReluOptions *>(builtin_options()) + : nullptr; + } + const SquaredDifferenceOptions *builtin_options_as_SquaredDifferenceOptions() const + { + return builtin_options_type() == BuiltinOptions_SquaredDifferenceOptions + ? static_cast<const SquaredDifferenceOptions *>(builtin_options()) + : nullptr; + } + const MirrorPadOptions *builtin_options_as_MirrorPadOptions() const + { + return builtin_options_type() == BuiltinOptions_MirrorPadOptions + ? static_cast<const MirrorPadOptions *>(builtin_options()) + : nullptr; + } + const AbsOptions *builtin_options_as_AbsOptions() const + { + return builtin_options_type() == BuiltinOptions_AbsOptions + ? static_cast<const AbsOptions *>(builtin_options()) + : nullptr; + } + const SplitVOptions *builtin_options_as_SplitVOptions() const + { + return builtin_options_type() == BuiltinOptions_SplitVOptions + ? static_cast<const SplitVOptions *>(builtin_options()) + : nullptr; + } + const UniqueOptions *builtin_options_as_UniqueOptions() const + { + return builtin_options_type() == BuiltinOptions_UniqueOptions + ? static_cast<const UniqueOptions *>(builtin_options()) + : nullptr; + } + const ReverseV2Options *builtin_options_as_ReverseV2Options() const + { + return builtin_options_type() == BuiltinOptions_ReverseV2Options + ? static_cast<const ReverseV2Options *>(builtin_options()) + : nullptr; + } + const AddNOptions *builtin_options_as_AddNOptions() const + { + return builtin_options_type() == BuiltinOptions_AddNOptions + ? static_cast<const AddNOptions *>(builtin_options()) + : nullptr; + } + const GatherNdOptions *builtin_options_as_GatherNdOptions() const + { + return builtin_options_type() == BuiltinOptions_GatherNdOptions + ? static_cast<const GatherNdOptions *>(builtin_options()) + : nullptr; + } + const CosOptions *builtin_options_as_CosOptions() const + { + return builtin_options_type() == BuiltinOptions_CosOptions + ? static_cast<const CosOptions *>(builtin_options()) + : nullptr; + } + const WhereOptions *builtin_options_as_WhereOptions() const + { + return builtin_options_type() == BuiltinOptions_WhereOptions + ? static_cast<const WhereOptions *>(builtin_options()) + : nullptr; + } + const RankOptions *builtin_options_as_RankOptions() const + { + return builtin_options_type() == BuiltinOptions_RankOptions + ? static_cast<const RankOptions *>(builtin_options()) + : nullptr; + } + const ReverseSequenceOptions *builtin_options_as_ReverseSequenceOptions() const + { + return builtin_options_type() == BuiltinOptions_ReverseSequenceOptions + ? static_cast<const ReverseSequenceOptions *>(builtin_options()) + : nullptr; + } + const MatrixDiagOptions *builtin_options_as_MatrixDiagOptions() const + { + return builtin_options_type() == BuiltinOptions_MatrixDiagOptions + ? static_cast<const MatrixDiagOptions *>(builtin_options()) + : nullptr; + } + const QuantizeOptions *builtin_options_as_QuantizeOptions() const + { + return builtin_options_type() == BuiltinOptions_QuantizeOptions + ? static_cast<const QuantizeOptions *>(builtin_options()) + : nullptr; + } + const MatrixSetDiagOptions *builtin_options_as_MatrixSetDiagOptions() const + { + return builtin_options_type() == BuiltinOptions_MatrixSetDiagOptions + ? static_cast<const MatrixSetDiagOptions *>(builtin_options()) + : nullptr; + } + const HardSwishOptions *builtin_options_as_HardSwishOptions() const + { + return builtin_options_type() == BuiltinOptions_HardSwishOptions + ? static_cast<const HardSwishOptions *>(builtin_options()) + : nullptr; + } + const IfOptions *builtin_options_as_IfOptions() const + { + return builtin_options_type() == BuiltinOptions_IfOptions + ? static_cast<const IfOptions *>(builtin_options()) + : nullptr; + } + const WhileOptions *builtin_options_as_WhileOptions() const + { + return builtin_options_type() == BuiltinOptions_WhileOptions + ? static_cast<const WhileOptions *>(builtin_options()) + : nullptr; + } + const DepthToSpaceOptions *builtin_options_as_DepthToSpaceOptions() const + { + return builtin_options_type() == BuiltinOptions_DepthToSpaceOptions + ? static_cast<const DepthToSpaceOptions *>(builtin_options()) + : nullptr; + } + const NonMaxSuppressionV4Options *builtin_options_as_NonMaxSuppressionV4Options() const + { + return builtin_options_type() == BuiltinOptions_NonMaxSuppressionV4Options + ? static_cast<const NonMaxSuppressionV4Options *>(builtin_options()) + : nullptr; + } + const NonMaxSuppressionV5Options *builtin_options_as_NonMaxSuppressionV5Options() const + { + return builtin_options_type() == BuiltinOptions_NonMaxSuppressionV5Options + ? static_cast<const NonMaxSuppressionV5Options *>(builtin_options()) + : nullptr; + } + const ScatterNdOptions *builtin_options_as_ScatterNdOptions() const + { + return builtin_options_type() == BuiltinOptions_ScatterNdOptions + ? static_cast<const ScatterNdOptions *>(builtin_options()) + : nullptr; + } + const SelectV2Options *builtin_options_as_SelectV2Options() const + { + return builtin_options_type() == BuiltinOptions_SelectV2Options + ? static_cast<const SelectV2Options *>(builtin_options()) + : nullptr; + } + const DensifyOptions *builtin_options_as_DensifyOptions() const + { + return builtin_options_type() == BuiltinOptions_DensifyOptions + ? static_cast<const DensifyOptions *>(builtin_options()) + : nullptr; + } + const SegmentSumOptions *builtin_options_as_SegmentSumOptions() const + { + return builtin_options_type() == BuiltinOptions_SegmentSumOptions + ? static_cast<const SegmentSumOptions *>(builtin_options()) + : nullptr; + } + const BatchMatMulOptions *builtin_options_as_BatchMatMulOptions() const + { + return builtin_options_type() == BuiltinOptions_BatchMatMulOptions + ? static_cast<const BatchMatMulOptions *>(builtin_options()) + : nullptr; + } + const InstanceNormOptions *builtin_options_as_InstanceNormOptions() const + { + return builtin_options_type() == BuiltinOptions_InstanceNormOptions + ? static_cast<const InstanceNormOptions *>(builtin_options()) + : nullptr; + } + const flatbuffers::Vector<uint8_t> *custom_options() const + { + return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM_OPTIONS); + } + CustomOptionsFormat custom_options_format() const + { + return static_cast<CustomOptionsFormat>(GetField<int8_t>(VT_CUSTOM_OPTIONS_FORMAT, 0)); + } + const flatbuffers::Vector<uint8_t> *mutating_variable_inputs() const + { + return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_MUTATING_VARIABLE_INPUTS); + } + const flatbuffers::Vector<int32_t> *intermediates() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INTERMEDIATES); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<uint32_t>(verifier, VT_OPCODE_INDEX) && + VerifyOffset(verifier, VT_INPUTS) && verifier.VerifyVector(inputs()) && + VerifyOffset(verifier, VT_OUTPUTS) && verifier.VerifyVector(outputs()) && + VerifyField<uint8_t>(verifier, VT_BUILTIN_OPTIONS_TYPE) && + VerifyOffset(verifier, VT_BUILTIN_OPTIONS) && + VerifyBuiltinOptions(verifier, builtin_options(), builtin_options_type()) && + VerifyOffset(verifier, VT_CUSTOM_OPTIONS) && verifier.VerifyVector(custom_options()) && + VerifyField<int8_t>(verifier, VT_CUSTOM_OPTIONS_FORMAT) && + VerifyOffset(verifier, VT_MUTATING_VARIABLE_INPUTS) && + verifier.VerifyVector(mutating_variable_inputs()) && + VerifyOffset(verifier, VT_INTERMEDIATES) && verifier.VerifyVector(intermediates()) && + verifier.EndTable(); + } +}; + +template <> inline const Conv2DOptions *Operator::builtin_options_as<Conv2DOptions>() const +{ + return builtin_options_as_Conv2DOptions(); +} + +template <> +inline const DepthwiseConv2DOptions *Operator::builtin_options_as<DepthwiseConv2DOptions>() const +{ + return builtin_options_as_DepthwiseConv2DOptions(); +} + +template <> +inline const ConcatEmbeddingsOptions *Operator::builtin_options_as<ConcatEmbeddingsOptions>() const +{ + return builtin_options_as_ConcatEmbeddingsOptions(); +} + +template <> +inline const LSHProjectionOptions *Operator::builtin_options_as<LSHProjectionOptions>() const +{ + return builtin_options_as_LSHProjectionOptions(); +} + +template <> inline const Pool2DOptions *Operator::builtin_options_as<Pool2DOptions>() const +{ + return builtin_options_as_Pool2DOptions(); +} + +template <> inline const SVDFOptions *Operator::builtin_options_as<SVDFOptions>() const +{ + return builtin_options_as_SVDFOptions(); +} + +template <> inline const RNNOptions *Operator::builtin_options_as<RNNOptions>() const +{ + return builtin_options_as_RNNOptions(); +} + +template <> +inline const FullyConnectedOptions *Operator::builtin_options_as<FullyConnectedOptions>() const +{ + return builtin_options_as_FullyConnectedOptions(); +} + +template <> inline const SoftmaxOptions *Operator::builtin_options_as<SoftmaxOptions>() const +{ + return builtin_options_as_SoftmaxOptions(); +} + +template <> +inline const ConcatenationOptions *Operator::builtin_options_as<ConcatenationOptions>() const +{ + return builtin_options_as_ConcatenationOptions(); +} + +template <> inline const AddOptions *Operator::builtin_options_as<AddOptions>() const +{ + return builtin_options_as_AddOptions(); +} + +template <> inline const L2NormOptions *Operator::builtin_options_as<L2NormOptions>() const +{ + return builtin_options_as_L2NormOptions(); +} + +template <> +inline const LocalResponseNormalizationOptions * +Operator::builtin_options_as<LocalResponseNormalizationOptions>() const +{ + return builtin_options_as_LocalResponseNormalizationOptions(); +} + +template <> inline const LSTMOptions *Operator::builtin_options_as<LSTMOptions>() const +{ + return builtin_options_as_LSTMOptions(); +} + +template <> +inline const ResizeBilinearOptions *Operator::builtin_options_as<ResizeBilinearOptions>() const +{ + return builtin_options_as_ResizeBilinearOptions(); +} + +template <> inline const CallOptions *Operator::builtin_options_as<CallOptions>() const +{ + return builtin_options_as_CallOptions(); +} + +template <> inline const ReshapeOptions *Operator::builtin_options_as<ReshapeOptions>() const +{ + return builtin_options_as_ReshapeOptions(); +} + +template <> inline const SkipGramOptions *Operator::builtin_options_as<SkipGramOptions>() const +{ + return builtin_options_as_SkipGramOptions(); +} + +template <> +inline const SpaceToDepthOptions *Operator::builtin_options_as<SpaceToDepthOptions>() const +{ + return builtin_options_as_SpaceToDepthOptions(); +} + +template <> +inline const EmbeddingLookupSparseOptions * +Operator::builtin_options_as<EmbeddingLookupSparseOptions>() const +{ + return builtin_options_as_EmbeddingLookupSparseOptions(); +} + +template <> inline const MulOptions *Operator::builtin_options_as<MulOptions>() const +{ + return builtin_options_as_MulOptions(); +} + +template <> inline const PadOptions *Operator::builtin_options_as<PadOptions>() const +{ + return builtin_options_as_PadOptions(); +} + +template <> inline const GatherOptions *Operator::builtin_options_as<GatherOptions>() const +{ + return builtin_options_as_GatherOptions(); +} + +template <> +inline const BatchToSpaceNDOptions *Operator::builtin_options_as<BatchToSpaceNDOptions>() const +{ + return builtin_options_as_BatchToSpaceNDOptions(); +} + +template <> +inline const SpaceToBatchNDOptions *Operator::builtin_options_as<SpaceToBatchNDOptions>() const +{ + return builtin_options_as_SpaceToBatchNDOptions(); +} + +template <> inline const TransposeOptions *Operator::builtin_options_as<TransposeOptions>() const +{ + return builtin_options_as_TransposeOptions(); +} + +template <> inline const ReducerOptions *Operator::builtin_options_as<ReducerOptions>() const +{ + return builtin_options_as_ReducerOptions(); +} + +template <> inline const SubOptions *Operator::builtin_options_as<SubOptions>() const +{ + return builtin_options_as_SubOptions(); +} + +template <> inline const DivOptions *Operator::builtin_options_as<DivOptions>() const +{ + return builtin_options_as_DivOptions(); +} + +template <> inline const SqueezeOptions *Operator::builtin_options_as<SqueezeOptions>() const +{ + return builtin_options_as_SqueezeOptions(); +} + +template <> +inline const SequenceRNNOptions *Operator::builtin_options_as<SequenceRNNOptions>() const +{ + return builtin_options_as_SequenceRNNOptions(); +} + +template <> +inline const StridedSliceOptions *Operator::builtin_options_as<StridedSliceOptions>() const +{ + return builtin_options_as_StridedSliceOptions(); +} + +template <> inline const ExpOptions *Operator::builtin_options_as<ExpOptions>() const +{ + return builtin_options_as_ExpOptions(); +} + +template <> inline const TopKV2Options *Operator::builtin_options_as<TopKV2Options>() const +{ + return builtin_options_as_TopKV2Options(); +} + +template <> inline const SplitOptions *Operator::builtin_options_as<SplitOptions>() const +{ + return builtin_options_as_SplitOptions(); +} + +template <> inline const LogSoftmaxOptions *Operator::builtin_options_as<LogSoftmaxOptions>() const +{ + return builtin_options_as_LogSoftmaxOptions(); +} + +template <> inline const CastOptions *Operator::builtin_options_as<CastOptions>() const +{ + return builtin_options_as_CastOptions(); +} + +template <> inline const DequantizeOptions *Operator::builtin_options_as<DequantizeOptions>() const +{ + return builtin_options_as_DequantizeOptions(); +} + +template <> +inline const MaximumMinimumOptions *Operator::builtin_options_as<MaximumMinimumOptions>() const +{ + return builtin_options_as_MaximumMinimumOptions(); +} + +template <> inline const ArgMaxOptions *Operator::builtin_options_as<ArgMaxOptions>() const +{ + return builtin_options_as_ArgMaxOptions(); +} + +template <> inline const LessOptions *Operator::builtin_options_as<LessOptions>() const +{ + return builtin_options_as_LessOptions(); +} + +template <> inline const NegOptions *Operator::builtin_options_as<NegOptions>() const +{ + return builtin_options_as_NegOptions(); +} + +template <> inline const PadV2Options *Operator::builtin_options_as<PadV2Options>() const +{ + return builtin_options_as_PadV2Options(); +} + +template <> inline const GreaterOptions *Operator::builtin_options_as<GreaterOptions>() const +{ + return builtin_options_as_GreaterOptions(); +} + +template <> +inline const GreaterEqualOptions *Operator::builtin_options_as<GreaterEqualOptions>() const +{ + return builtin_options_as_GreaterEqualOptions(); +} + +template <> inline const LessEqualOptions *Operator::builtin_options_as<LessEqualOptions>() const +{ + return builtin_options_as_LessEqualOptions(); +} + +template <> inline const SelectOptions *Operator::builtin_options_as<SelectOptions>() const +{ + return builtin_options_as_SelectOptions(); +} + +template <> inline const SliceOptions *Operator::builtin_options_as<SliceOptions>() const +{ + return builtin_options_as_SliceOptions(); +} + +template <> +inline const TransposeConvOptions *Operator::builtin_options_as<TransposeConvOptions>() const +{ + return builtin_options_as_TransposeConvOptions(); +} + +template <> +inline const SparseToDenseOptions *Operator::builtin_options_as<SparseToDenseOptions>() const +{ + return builtin_options_as_SparseToDenseOptions(); +} + +template <> inline const TileOptions *Operator::builtin_options_as<TileOptions>() const +{ + return builtin_options_as_TileOptions(); +} + +template <> inline const ExpandDimsOptions *Operator::builtin_options_as<ExpandDimsOptions>() const +{ + return builtin_options_as_ExpandDimsOptions(); +} + +template <> inline const EqualOptions *Operator::builtin_options_as<EqualOptions>() const +{ + return builtin_options_as_EqualOptions(); +} + +template <> inline const NotEqualOptions *Operator::builtin_options_as<NotEqualOptions>() const +{ + return builtin_options_as_NotEqualOptions(); +} + +template <> inline const ShapeOptions *Operator::builtin_options_as<ShapeOptions>() const +{ + return builtin_options_as_ShapeOptions(); +} + +template <> inline const PowOptions *Operator::builtin_options_as<PowOptions>() const +{ + return builtin_options_as_PowOptions(); +} + +template <> inline const ArgMinOptions *Operator::builtin_options_as<ArgMinOptions>() const +{ + return builtin_options_as_ArgMinOptions(); +} + +template <> inline const FakeQuantOptions *Operator::builtin_options_as<FakeQuantOptions>() const +{ + return builtin_options_as_FakeQuantOptions(); +} + +template <> inline const PackOptions *Operator::builtin_options_as<PackOptions>() const +{ + return builtin_options_as_PackOptions(); +} + +template <> inline const LogicalOrOptions *Operator::builtin_options_as<LogicalOrOptions>() const +{ + return builtin_options_as_LogicalOrOptions(); +} + +template <> inline const OneHotOptions *Operator::builtin_options_as<OneHotOptions>() const +{ + return builtin_options_as_OneHotOptions(); +} + +template <> inline const LogicalAndOptions *Operator::builtin_options_as<LogicalAndOptions>() const +{ + return builtin_options_as_LogicalAndOptions(); +} + +template <> inline const LogicalNotOptions *Operator::builtin_options_as<LogicalNotOptions>() const +{ + return builtin_options_as_LogicalNotOptions(); +} + +template <> inline const UnpackOptions *Operator::builtin_options_as<UnpackOptions>() const +{ + return builtin_options_as_UnpackOptions(); +} + +template <> inline const FloorDivOptions *Operator::builtin_options_as<FloorDivOptions>() const +{ + return builtin_options_as_FloorDivOptions(); +} + +template <> inline const SquareOptions *Operator::builtin_options_as<SquareOptions>() const +{ + return builtin_options_as_SquareOptions(); +} + +template <> inline const ZerosLikeOptions *Operator::builtin_options_as<ZerosLikeOptions>() const +{ + return builtin_options_as_ZerosLikeOptions(); +} + +template <> inline const FillOptions *Operator::builtin_options_as<FillOptions>() const +{ + return builtin_options_as_FillOptions(); +} + +template <> +inline const BidirectionalSequenceLSTMOptions * +Operator::builtin_options_as<BidirectionalSequenceLSTMOptions>() const +{ + return builtin_options_as_BidirectionalSequenceLSTMOptions(); +} + +template <> +inline const BidirectionalSequenceRNNOptions * +Operator::builtin_options_as<BidirectionalSequenceRNNOptions>() const +{ + return builtin_options_as_BidirectionalSequenceRNNOptions(); +} + +template <> +inline const UnidirectionalSequenceLSTMOptions * +Operator::builtin_options_as<UnidirectionalSequenceLSTMOptions>() const +{ + return builtin_options_as_UnidirectionalSequenceLSTMOptions(); +} + +template <> inline const FloorModOptions *Operator::builtin_options_as<FloorModOptions>() const +{ + return builtin_options_as_FloorModOptions(); +} + +template <> inline const RangeOptions *Operator::builtin_options_as<RangeOptions>() const +{ + return builtin_options_as_RangeOptions(); +} + +template <> +inline const ResizeNearestNeighborOptions * +Operator::builtin_options_as<ResizeNearestNeighborOptions>() const +{ + return builtin_options_as_ResizeNearestNeighborOptions(); +} + +template <> inline const LeakyReluOptions *Operator::builtin_options_as<LeakyReluOptions>() const +{ + return builtin_options_as_LeakyReluOptions(); +} + +template <> +inline const SquaredDifferenceOptions * +Operator::builtin_options_as<SquaredDifferenceOptions>() const +{ + return builtin_options_as_SquaredDifferenceOptions(); +} + +template <> inline const MirrorPadOptions *Operator::builtin_options_as<MirrorPadOptions>() const +{ + return builtin_options_as_MirrorPadOptions(); +} + +template <> inline const AbsOptions *Operator::builtin_options_as<AbsOptions>() const +{ + return builtin_options_as_AbsOptions(); +} + +template <> inline const SplitVOptions *Operator::builtin_options_as<SplitVOptions>() const +{ + return builtin_options_as_SplitVOptions(); +} + +template <> inline const UniqueOptions *Operator::builtin_options_as<UniqueOptions>() const +{ + return builtin_options_as_UniqueOptions(); +} + +template <> inline const ReverseV2Options *Operator::builtin_options_as<ReverseV2Options>() const +{ + return builtin_options_as_ReverseV2Options(); +} + +template <> inline const AddNOptions *Operator::builtin_options_as<AddNOptions>() const +{ + return builtin_options_as_AddNOptions(); +} + +template <> inline const GatherNdOptions *Operator::builtin_options_as<GatherNdOptions>() const +{ + return builtin_options_as_GatherNdOptions(); +} + +template <> inline const CosOptions *Operator::builtin_options_as<CosOptions>() const +{ + return builtin_options_as_CosOptions(); +} + +template <> inline const WhereOptions *Operator::builtin_options_as<WhereOptions>() const +{ + return builtin_options_as_WhereOptions(); +} + +template <> inline const RankOptions *Operator::builtin_options_as<RankOptions>() const +{ + return builtin_options_as_RankOptions(); +} + +template <> +inline const ReverseSequenceOptions *Operator::builtin_options_as<ReverseSequenceOptions>() const +{ + return builtin_options_as_ReverseSequenceOptions(); +} + +template <> inline const MatrixDiagOptions *Operator::builtin_options_as<MatrixDiagOptions>() const +{ + return builtin_options_as_MatrixDiagOptions(); +} + +template <> inline const QuantizeOptions *Operator::builtin_options_as<QuantizeOptions>() const +{ + return builtin_options_as_QuantizeOptions(); +} + +template <> +inline const MatrixSetDiagOptions *Operator::builtin_options_as<MatrixSetDiagOptions>() const +{ + return builtin_options_as_MatrixSetDiagOptions(); +} + +template <> inline const HardSwishOptions *Operator::builtin_options_as<HardSwishOptions>() const +{ + return builtin_options_as_HardSwishOptions(); +} + +template <> inline const IfOptions *Operator::builtin_options_as<IfOptions>() const +{ + return builtin_options_as_IfOptions(); +} + +template <> inline const WhileOptions *Operator::builtin_options_as<WhileOptions>() const +{ + return builtin_options_as_WhileOptions(); +} + +template <> +inline const DepthToSpaceOptions *Operator::builtin_options_as<DepthToSpaceOptions>() const +{ + return builtin_options_as_DepthToSpaceOptions(); +} + +template <> +inline const NonMaxSuppressionV4Options * +Operator::builtin_options_as<NonMaxSuppressionV4Options>() const +{ + return builtin_options_as_NonMaxSuppressionV4Options(); +} + +template <> +inline const NonMaxSuppressionV5Options * +Operator::builtin_options_as<NonMaxSuppressionV5Options>() const +{ + return builtin_options_as_NonMaxSuppressionV5Options(); +} + +template <> inline const ScatterNdOptions *Operator::builtin_options_as<ScatterNdOptions>() const +{ + return builtin_options_as_ScatterNdOptions(); +} + +template <> inline const SelectV2Options *Operator::builtin_options_as<SelectV2Options>() const +{ + return builtin_options_as_SelectV2Options(); +} + +template <> inline const DensifyOptions *Operator::builtin_options_as<DensifyOptions>() const +{ + return builtin_options_as_DensifyOptions(); +} + +template <> inline const SegmentSumOptions *Operator::builtin_options_as<SegmentSumOptions>() const +{ + return builtin_options_as_SegmentSumOptions(); +} + +template <> +inline const BatchMatMulOptions *Operator::builtin_options_as<BatchMatMulOptions>() const +{ + return builtin_options_as_BatchMatMulOptions(); +} + +template <> +inline const InstanceNormOptions *Operator::builtin_options_as<InstanceNormOptions>() const +{ + return builtin_options_as_InstanceNormOptions(); +} + +struct OperatorBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_opcode_index(uint32_t opcode_index) + { + fbb_.AddElement<uint32_t>(Operator::VT_OPCODE_INDEX, opcode_index, 0); + } + void add_inputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs) + { + fbb_.AddOffset(Operator::VT_INPUTS, inputs); + } + void add_outputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs) + { + fbb_.AddOffset(Operator::VT_OUTPUTS, outputs); + } + void add_builtin_options_type(BuiltinOptions builtin_options_type) + { + fbb_.AddElement<uint8_t>(Operator::VT_BUILTIN_OPTIONS_TYPE, + static_cast<uint8_t>(builtin_options_type), 0); + } + void add_builtin_options(flatbuffers::Offset<void> builtin_options) + { + fbb_.AddOffset(Operator::VT_BUILTIN_OPTIONS, builtin_options); + } + void add_custom_options(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom_options) + { + fbb_.AddOffset(Operator::VT_CUSTOM_OPTIONS, custom_options); + } + void add_custom_options_format(CustomOptionsFormat custom_options_format) + { + fbb_.AddElement<int8_t>(Operator::VT_CUSTOM_OPTIONS_FORMAT, + static_cast<int8_t>(custom_options_format), 0); + } + void add_mutating_variable_inputs( + flatbuffers::Offset<flatbuffers::Vector<uint8_t>> mutating_variable_inputs) + { + fbb_.AddOffset(Operator::VT_MUTATING_VARIABLE_INPUTS, mutating_variable_inputs); + } + void add_intermediates(flatbuffers::Offset<flatbuffers::Vector<int32_t>> intermediates) + { + fbb_.AddOffset(Operator::VT_INTERMEDIATES, intermediates); + } + explicit OperatorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + OperatorBuilder &operator=(const OperatorBuilder &); + flatbuffers::Offset<Operator> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<Operator>(end); + return o; + } +}; + +inline flatbuffers::Offset<Operator> +CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, uint32_t opcode_index = 0, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs = 0, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs = 0, + BuiltinOptions builtin_options_type = BuiltinOptions_NONE, + flatbuffers::Offset<void> builtin_options = 0, + flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom_options = 0, + CustomOptionsFormat custom_options_format = CustomOptionsFormat_FLEXBUFFERS, + flatbuffers::Offset<flatbuffers::Vector<uint8_t>> mutating_variable_inputs = 0, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> intermediates = 0) +{ + OperatorBuilder builder_(_fbb); + builder_.add_intermediates(intermediates); + builder_.add_mutating_variable_inputs(mutating_variable_inputs); + builder_.add_custom_options(custom_options); + builder_.add_builtin_options(builtin_options); + builder_.add_outputs(outputs); + builder_.add_inputs(inputs); + builder_.add_opcode_index(opcode_index); + builder_.add_custom_options_format(custom_options_format); + builder_.add_builtin_options_type(builtin_options_type); + return builder_.Finish(); +} + +inline flatbuffers::Offset<Operator> +CreateOperatorDirect(flatbuffers::FlatBufferBuilder &_fbb, uint32_t opcode_index = 0, + const std::vector<int32_t> *inputs = nullptr, + const std::vector<int32_t> *outputs = nullptr, + BuiltinOptions builtin_options_type = BuiltinOptions_NONE, + flatbuffers::Offset<void> builtin_options = 0, + const std::vector<uint8_t> *custom_options = nullptr, + CustomOptionsFormat custom_options_format = CustomOptionsFormat_FLEXBUFFERS, + const std::vector<uint8_t> *mutating_variable_inputs = nullptr, + const std::vector<int32_t> *intermediates = nullptr) +{ + return circle::CreateOperator( + _fbb, opcode_index, inputs ? _fbb.CreateVector<int32_t>(*inputs) : 0, + outputs ? _fbb.CreateVector<int32_t>(*outputs) : 0, builtin_options_type, builtin_options, + custom_options ? _fbb.CreateVector<uint8_t>(*custom_options) : 0, custom_options_format, + mutating_variable_inputs ? _fbb.CreateVector<uint8_t>(*mutating_variable_inputs) : 0, + intermediates ? _fbb.CreateVector<int32_t>(*intermediates) : 0); +} + +struct SubGraph FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_TENSORS = 4, + VT_INPUTS = 6, + VT_OUTPUTS = 8, + VT_OPERATORS = 10, + VT_NAME = 12, + VT_DATA_FORMAT = 14 + }; + const flatbuffers::Vector<flatbuffers::Offset<Tensor>> *tensors() const + { + return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Tensor>> *>(VT_TENSORS); + } + const flatbuffers::Vector<int32_t> *inputs() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUTS); + } + const flatbuffers::Vector<int32_t> *outputs() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUTS); + } + const flatbuffers::Vector<flatbuffers::Offset<Operator>> *operators() const + { + return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Operator>> *>(VT_OPERATORS); + } + const flatbuffers::String *name() const + { + return GetPointer<const flatbuffers::String *>(VT_NAME); + } + DataFormat data_format() const + { + return static_cast<DataFormat>(GetField<int8_t>(VT_DATA_FORMAT, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_TENSORS) && + verifier.VerifyVector(tensors()) && verifier.VerifyVectorOfTables(tensors()) && + VerifyOffset(verifier, VT_INPUTS) && verifier.VerifyVector(inputs()) && + VerifyOffset(verifier, VT_OUTPUTS) && verifier.VerifyVector(outputs()) && + VerifyOffset(verifier, VT_OPERATORS) && verifier.VerifyVector(operators()) && + verifier.VerifyVectorOfTables(operators()) && VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && VerifyField<int8_t>(verifier, VT_DATA_FORMAT) && + verifier.EndTable(); + } +}; + +struct SubGraphBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_tensors(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Tensor>>> tensors) + { + fbb_.AddOffset(SubGraph::VT_TENSORS, tensors); + } + void add_inputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs) + { + fbb_.AddOffset(SubGraph::VT_INPUTS, inputs); + } + void add_outputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs) + { + fbb_.AddOffset(SubGraph::VT_OUTPUTS, outputs); + } + void + add_operators(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Operator>>> operators) + { + fbb_.AddOffset(SubGraph::VT_OPERATORS, operators); + } + void add_name(flatbuffers::Offset<flatbuffers::String> name) + { + fbb_.AddOffset(SubGraph::VT_NAME, name); + } + void add_data_format(DataFormat data_format) + { + fbb_.AddElement<int8_t>(SubGraph::VT_DATA_FORMAT, static_cast<int8_t>(data_format), 0); + } + explicit SubGraphBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SubGraphBuilder &operator=(const SubGraphBuilder &); + flatbuffers::Offset<SubGraph> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SubGraph>(end); + return o; + } +}; + +inline flatbuffers::Offset<SubGraph> CreateSubGraph( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Tensor>>> tensors = 0, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs = 0, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs = 0, + flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Operator>>> operators = 0, + flatbuffers::Offset<flatbuffers::String> name = 0, + DataFormat data_format = DataFormat_CHANNELS_LAST) +{ + SubGraphBuilder builder_(_fbb); + builder_.add_name(name); + builder_.add_operators(operators); + builder_.add_outputs(outputs); + builder_.add_inputs(inputs); + builder_.add_tensors(tensors); + builder_.add_data_format(data_format); + return builder_.Finish(); +} + +inline flatbuffers::Offset<SubGraph> +CreateSubGraphDirect(flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<flatbuffers::Offset<Tensor>> *tensors = nullptr, + const std::vector<int32_t> *inputs = nullptr, + const std::vector<int32_t> *outputs = nullptr, + const std::vector<flatbuffers::Offset<Operator>> *operators = nullptr, + const char *name = nullptr, DataFormat data_format = DataFormat_CHANNELS_LAST) +{ + return circle::CreateSubGraph( + _fbb, tensors ? _fbb.CreateVector<flatbuffers::Offset<Tensor>>(*tensors) : 0, + inputs ? _fbb.CreateVector<int32_t>(*inputs) : 0, + outputs ? _fbb.CreateVector<int32_t>(*outputs) : 0, + operators ? _fbb.CreateVector<flatbuffers::Offset<Operator>>(*operators) : 0, + name ? _fbb.CreateString(name) : 0, data_format); +} + +struct Buffer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_DATA = 4 + }; + const flatbuffers::Vector<uint8_t> *data() const + { + return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_DATA); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_DATA) && + verifier.VerifyVector(data()) && verifier.EndTable(); + } +}; + +struct BufferBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_data(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data) + { + fbb_.AddOffset(Buffer::VT_DATA, data); + } + explicit BufferBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + BufferBuilder &operator=(const BufferBuilder &); + flatbuffers::Offset<Buffer> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<Buffer>(end); + return o; + } +}; + +inline flatbuffers::Offset<Buffer> +CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data = 0) +{ + BufferBuilder builder_(_fbb); + builder_.add_data(data); + return builder_.Finish(); +} + +inline flatbuffers::Offset<Buffer> CreateBufferDirect(flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<uint8_t> *data = nullptr) +{ + return circle::CreateBuffer(_fbb, data ? _fbb.CreateVector<uint8_t>(*data) : 0); +} + +struct Metadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_NAME = 4, + VT_BUFFER = 6 + }; + const flatbuffers::String *name() const + { + return GetPointer<const flatbuffers::String *>(VT_NAME); + } + uint32_t buffer() const { return GetField<uint32_t>(VT_BUFFER, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && VerifyField<uint32_t>(verifier, VT_BUFFER) && + verifier.EndTable(); + } +}; + +struct MetadataBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_name(flatbuffers::Offset<flatbuffers::String> name) + { + fbb_.AddOffset(Metadata::VT_NAME, name); + } + void add_buffer(uint32_t buffer) { fbb_.AddElement<uint32_t>(Metadata::VT_BUFFER, buffer, 0); } + explicit MetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + MetadataBuilder &operator=(const MetadataBuilder &); + flatbuffers::Offset<Metadata> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<Metadata>(end); + return o; + } +}; + +inline flatbuffers::Offset<Metadata> +CreateMetadata(flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::String> name = 0, uint32_t buffer = 0) +{ + MetadataBuilder builder_(_fbb); + builder_.add_buffer(buffer); + builder_.add_name(name); + return builder_.Finish(); +} + +inline flatbuffers::Offset<Metadata> CreateMetadataDirect(flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + uint32_t buffer = 0) +{ + return circle::CreateMetadata(_fbb, name ? _fbb.CreateString(name) : 0, buffer); +} + +struct Model FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_VERSION = 4, + VT_OPERATOR_CODES = 6, + VT_SUBGRAPHS = 8, + VT_DESCRIPTION = 10, + VT_BUFFERS = 12, + VT_METADATA_BUFFER = 14, + VT_METADATA = 16 + }; + uint32_t version() const { return GetField<uint32_t>(VT_VERSION, 0); } + const flatbuffers::Vector<flatbuffers::Offset<OperatorCode>> *operator_codes() const + { + return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<OperatorCode>> *>( + VT_OPERATOR_CODES); + } + const flatbuffers::Vector<flatbuffers::Offset<SubGraph>> *subgraphs() const + { + return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<SubGraph>> *>(VT_SUBGRAPHS); + } + const flatbuffers::String *description() const + { + return GetPointer<const flatbuffers::String *>(VT_DESCRIPTION); + } + const flatbuffers::Vector<flatbuffers::Offset<Buffer>> *buffers() const + { + return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Buffer>> *>(VT_BUFFERS); + } + const flatbuffers::Vector<int32_t> *metadata_buffer() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_METADATA_BUFFER); + } + const flatbuffers::Vector<flatbuffers::Offset<Metadata>> *metadata() const + { + return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Metadata>> *>(VT_METADATA); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<uint32_t>(verifier, VT_VERSION) && + VerifyOffset(verifier, VT_OPERATOR_CODES) && verifier.VerifyVector(operator_codes()) && + verifier.VerifyVectorOfTables(operator_codes()) && + VerifyOffset(verifier, VT_SUBGRAPHS) && verifier.VerifyVector(subgraphs()) && + verifier.VerifyVectorOfTables(subgraphs()) && VerifyOffset(verifier, VT_DESCRIPTION) && + verifier.VerifyString(description()) && VerifyOffset(verifier, VT_BUFFERS) && + verifier.VerifyVector(buffers()) && verifier.VerifyVectorOfTables(buffers()) && + VerifyOffset(verifier, VT_METADATA_BUFFER) && verifier.VerifyVector(metadata_buffer()) && + VerifyOffset(verifier, VT_METADATA) && verifier.VerifyVector(metadata()) && + verifier.VerifyVectorOfTables(metadata()) && verifier.EndTable(); + } +}; + +struct ModelBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_version(uint32_t version) { fbb_.AddElement<uint32_t>(Model::VT_VERSION, version, 0); } + void add_operator_codes( + flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>> operator_codes) + { + fbb_.AddOffset(Model::VT_OPERATOR_CODES, operator_codes); + } + void + add_subgraphs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<SubGraph>>> subgraphs) + { + fbb_.AddOffset(Model::VT_SUBGRAPHS, subgraphs); + } + void add_description(flatbuffers::Offset<flatbuffers::String> description) + { + fbb_.AddOffset(Model::VT_DESCRIPTION, description); + } + void add_buffers(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>> buffers) + { + fbb_.AddOffset(Model::VT_BUFFERS, buffers); + } + void add_metadata_buffer(flatbuffers::Offset<flatbuffers::Vector<int32_t>> metadata_buffer) + { + fbb_.AddOffset(Model::VT_METADATA_BUFFER, metadata_buffer); + } + void + add_metadata(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Metadata>>> metadata) + { + fbb_.AddOffset(Model::VT_METADATA, metadata); + } + explicit ModelBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ModelBuilder &operator=(const ModelBuilder &); + flatbuffers::Offset<Model> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<Model>(end); + return o; + } +}; + +inline flatbuffers::Offset<Model> CreateModel( + flatbuffers::FlatBufferBuilder &_fbb, uint32_t version = 0, + flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>> operator_codes = 0, + flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<SubGraph>>> subgraphs = 0, + flatbuffers::Offset<flatbuffers::String> description = 0, + flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>> buffers = 0, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> metadata_buffer = 0, + flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Metadata>>> metadata = 0) +{ + ModelBuilder builder_(_fbb); + builder_.add_metadata(metadata); + builder_.add_metadata_buffer(metadata_buffer); + builder_.add_buffers(buffers); + builder_.add_description(description); + builder_.add_subgraphs(subgraphs); + builder_.add_operator_codes(operator_codes); + builder_.add_version(version); + return builder_.Finish(); +} + +inline flatbuffers::Offset<Model> +CreateModelDirect(flatbuffers::FlatBufferBuilder &_fbb, uint32_t version = 0, + const std::vector<flatbuffers::Offset<OperatorCode>> *operator_codes = nullptr, + const std::vector<flatbuffers::Offset<SubGraph>> *subgraphs = nullptr, + const char *description = nullptr, + const std::vector<flatbuffers::Offset<Buffer>> *buffers = nullptr, + const std::vector<int32_t> *metadata_buffer = nullptr, + const std::vector<flatbuffers::Offset<Metadata>> *metadata = nullptr) +{ + return circle::CreateModel( + _fbb, version, + operator_codes ? _fbb.CreateVector<flatbuffers::Offset<OperatorCode>>(*operator_codes) : 0, + subgraphs ? _fbb.CreateVector<flatbuffers::Offset<SubGraph>>(*subgraphs) : 0, + description ? _fbb.CreateString(description) : 0, + buffers ? _fbb.CreateVector<flatbuffers::Offset<Buffer>>(*buffers) : 0, + metadata_buffer ? _fbb.CreateVector<int32_t>(*metadata_buffer) : 0, + metadata ? _fbb.CreateVector<flatbuffers::Offset<Metadata>>(*metadata) : 0); +} + +inline bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj, + QuantizationDetails type) +{ + switch (type) + { + case QuantizationDetails_NONE: + { + return true; + } + case QuantizationDetails_CustomQuantization: + { + auto ptr = reinterpret_cast<const CustomQuantization *>(obj); + return verifier.VerifyTable(ptr); + } + default: + return false; + } +} + +inline bool +VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier, + const flatbuffers::Vector<flatbuffers::Offset<void>> *values, + const flatbuffers::Vector<uint8_t> *types) +{ + if (!values || !types) + return !values && !types; + if (values->size() != types->size()) + return false; + for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) + { + if (!VerifyQuantizationDetails(verifier, values->Get(i), + types->GetEnum<QuantizationDetails>(i))) + { + return false; + } + } + return true; +} + +inline bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj, + SparseIndexVector type) +{ + switch (type) + { + case SparseIndexVector_NONE: + { + return true; + } + case SparseIndexVector_Int32Vector: + { + auto ptr = reinterpret_cast<const Int32Vector *>(obj); + return verifier.VerifyTable(ptr); + } + case SparseIndexVector_Uint16Vector: + { + auto ptr = reinterpret_cast<const Uint16Vector *>(obj); + return verifier.VerifyTable(ptr); + } + case SparseIndexVector_Uint8Vector: + { + auto ptr = reinterpret_cast<const Uint8Vector *>(obj); + return verifier.VerifyTable(ptr); + } + default: + return false; + } +} + +inline bool +VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier, + const flatbuffers::Vector<flatbuffers::Offset<void>> *values, + const flatbuffers::Vector<uint8_t> *types) +{ + if (!values || !types) + return !values && !types; + if (values->size() != types->size()) + return false; + for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) + { + if (!VerifySparseIndexVector(verifier, values->Get(i), types->GetEnum<SparseIndexVector>(i))) + { + return false; + } + } + return true; +} + +inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, + BuiltinOptions type) +{ + switch (type) + { + case BuiltinOptions_NONE: + { + return true; + } + case BuiltinOptions_Conv2DOptions: + { + auto ptr = reinterpret_cast<const Conv2DOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DepthwiseConv2DOptions: + { + auto ptr = reinterpret_cast<const DepthwiseConv2DOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ConcatEmbeddingsOptions: + { + auto ptr = reinterpret_cast<const ConcatEmbeddingsOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LSHProjectionOptions: + { + auto ptr = reinterpret_cast<const LSHProjectionOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_Pool2DOptions: + { + auto ptr = reinterpret_cast<const Pool2DOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SVDFOptions: + { + auto ptr = reinterpret_cast<const SVDFOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_RNNOptions: + { + auto ptr = reinterpret_cast<const RNNOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FullyConnectedOptions: + { + auto ptr = reinterpret_cast<const FullyConnectedOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SoftmaxOptions: + { + auto ptr = reinterpret_cast<const SoftmaxOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ConcatenationOptions: + { + auto ptr = reinterpret_cast<const ConcatenationOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_AddOptions: + { + auto ptr = reinterpret_cast<const AddOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_L2NormOptions: + { + auto ptr = reinterpret_cast<const L2NormOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LocalResponseNormalizationOptions: + { + auto ptr = reinterpret_cast<const LocalResponseNormalizationOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LSTMOptions: + { + auto ptr = reinterpret_cast<const LSTMOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ResizeBilinearOptions: + { + auto ptr = reinterpret_cast<const ResizeBilinearOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_CallOptions: + { + auto ptr = reinterpret_cast<const CallOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReshapeOptions: + { + auto ptr = reinterpret_cast<const ReshapeOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SkipGramOptions: + { + auto ptr = reinterpret_cast<const SkipGramOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SpaceToDepthOptions: + { + auto ptr = reinterpret_cast<const SpaceToDepthOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_EmbeddingLookupSparseOptions: + { + auto ptr = reinterpret_cast<const EmbeddingLookupSparseOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MulOptions: + { + auto ptr = reinterpret_cast<const MulOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_PadOptions: + { + auto ptr = reinterpret_cast<const PadOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GatherOptions: + { + auto ptr = reinterpret_cast<const GatherOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_BatchToSpaceNDOptions: + { + auto ptr = reinterpret_cast<const BatchToSpaceNDOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SpaceToBatchNDOptions: + { + auto ptr = reinterpret_cast<const SpaceToBatchNDOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_TransposeOptions: + { + auto ptr = reinterpret_cast<const TransposeOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReducerOptions: + { + auto ptr = reinterpret_cast<const ReducerOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SubOptions: + { + auto ptr = reinterpret_cast<const SubOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DivOptions: + { + auto ptr = reinterpret_cast<const DivOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SqueezeOptions: + { + auto ptr = reinterpret_cast<const SqueezeOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SequenceRNNOptions: + { + auto ptr = reinterpret_cast<const SequenceRNNOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_StridedSliceOptions: + { + auto ptr = reinterpret_cast<const StridedSliceOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ExpOptions: + { + auto ptr = reinterpret_cast<const ExpOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_TopKV2Options: + { + auto ptr = reinterpret_cast<const TopKV2Options *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SplitOptions: + { + auto ptr = reinterpret_cast<const SplitOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LogSoftmaxOptions: + { + auto ptr = reinterpret_cast<const LogSoftmaxOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_CastOptions: + { + auto ptr = reinterpret_cast<const CastOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DequantizeOptions: + { + auto ptr = reinterpret_cast<const DequantizeOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MaximumMinimumOptions: + { + auto ptr = reinterpret_cast<const MaximumMinimumOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ArgMaxOptions: + { + auto ptr = reinterpret_cast<const ArgMaxOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LessOptions: + { + auto ptr = reinterpret_cast<const LessOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_NegOptions: + { + auto ptr = reinterpret_cast<const NegOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_PadV2Options: + { + auto ptr = reinterpret_cast<const PadV2Options *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GreaterOptions: + { + auto ptr = reinterpret_cast<const GreaterOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GreaterEqualOptions: + { + auto ptr = reinterpret_cast<const GreaterEqualOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LessEqualOptions: + { + auto ptr = reinterpret_cast<const LessEqualOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SelectOptions: + { + auto ptr = reinterpret_cast<const SelectOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SliceOptions: + { + auto ptr = reinterpret_cast<const SliceOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_TransposeConvOptions: + { + auto ptr = reinterpret_cast<const TransposeConvOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SparseToDenseOptions: + { + auto ptr = reinterpret_cast<const SparseToDenseOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_TileOptions: + { + auto ptr = reinterpret_cast<const TileOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ExpandDimsOptions: + { + auto ptr = reinterpret_cast<const ExpandDimsOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_EqualOptions: + { + auto ptr = reinterpret_cast<const EqualOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_NotEqualOptions: + { + auto ptr = reinterpret_cast<const NotEqualOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ShapeOptions: + { + auto ptr = reinterpret_cast<const ShapeOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_PowOptions: + { + auto ptr = reinterpret_cast<const PowOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ArgMinOptions: + { + auto ptr = reinterpret_cast<const ArgMinOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FakeQuantOptions: + { + auto ptr = reinterpret_cast<const FakeQuantOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_PackOptions: + { + auto ptr = reinterpret_cast<const PackOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LogicalOrOptions: + { + auto ptr = reinterpret_cast<const LogicalOrOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_OneHotOptions: + { + auto ptr = reinterpret_cast<const OneHotOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LogicalAndOptions: + { + auto ptr = reinterpret_cast<const LogicalAndOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LogicalNotOptions: + { + auto ptr = reinterpret_cast<const LogicalNotOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_UnpackOptions: + { + auto ptr = reinterpret_cast<const UnpackOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FloorDivOptions: + { + auto ptr = reinterpret_cast<const FloorDivOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SquareOptions: + { + auto ptr = reinterpret_cast<const SquareOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ZerosLikeOptions: + { + auto ptr = reinterpret_cast<const ZerosLikeOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FillOptions: + { + auto ptr = reinterpret_cast<const FillOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_BidirectionalSequenceLSTMOptions: + { + auto ptr = reinterpret_cast<const BidirectionalSequenceLSTMOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_BidirectionalSequenceRNNOptions: + { + auto ptr = reinterpret_cast<const BidirectionalSequenceRNNOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_UnidirectionalSequenceLSTMOptions: + { + auto ptr = reinterpret_cast<const UnidirectionalSequenceLSTMOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FloorModOptions: + { + auto ptr = reinterpret_cast<const FloorModOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_RangeOptions: + { + auto ptr = reinterpret_cast<const RangeOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ResizeNearestNeighborOptions: + { + auto ptr = reinterpret_cast<const ResizeNearestNeighborOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LeakyReluOptions: + { + auto ptr = reinterpret_cast<const LeakyReluOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SquaredDifferenceOptions: + { + auto ptr = reinterpret_cast<const SquaredDifferenceOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MirrorPadOptions: + { + auto ptr = reinterpret_cast<const MirrorPadOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_AbsOptions: + { + auto ptr = reinterpret_cast<const AbsOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SplitVOptions: + { + auto ptr = reinterpret_cast<const SplitVOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_UniqueOptions: + { + auto ptr = reinterpret_cast<const UniqueOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReverseV2Options: + { + auto ptr = reinterpret_cast<const ReverseV2Options *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_AddNOptions: + { + auto ptr = reinterpret_cast<const AddNOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GatherNdOptions: + { + auto ptr = reinterpret_cast<const GatherNdOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_CosOptions: + { + auto ptr = reinterpret_cast<const CosOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_WhereOptions: + { + auto ptr = reinterpret_cast<const WhereOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_RankOptions: + { + auto ptr = reinterpret_cast<const RankOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReverseSequenceOptions: + { + auto ptr = reinterpret_cast<const ReverseSequenceOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MatrixDiagOptions: + { + auto ptr = reinterpret_cast<const MatrixDiagOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_QuantizeOptions: + { + auto ptr = reinterpret_cast<const QuantizeOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MatrixSetDiagOptions: + { + auto ptr = reinterpret_cast<const MatrixSetDiagOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_HardSwishOptions: + { + auto ptr = reinterpret_cast<const HardSwishOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_IfOptions: + { + auto ptr = reinterpret_cast<const IfOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_WhileOptions: + { + auto ptr = reinterpret_cast<const WhileOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DepthToSpaceOptions: + { + auto ptr = reinterpret_cast<const DepthToSpaceOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_NonMaxSuppressionV4Options: + { + auto ptr = reinterpret_cast<const NonMaxSuppressionV4Options *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_NonMaxSuppressionV5Options: + { + auto ptr = reinterpret_cast<const NonMaxSuppressionV5Options *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ScatterNdOptions: + { + auto ptr = reinterpret_cast<const ScatterNdOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SelectV2Options: + { + auto ptr = reinterpret_cast<const SelectV2Options *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DensifyOptions: + { + auto ptr = reinterpret_cast<const DensifyOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SegmentSumOptions: + { + auto ptr = reinterpret_cast<const SegmentSumOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_BatchMatMulOptions: + { + auto ptr = reinterpret_cast<const BatchMatMulOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_InstanceNormOptions: + { + auto ptr = reinterpret_cast<const InstanceNormOptions *>(obj); + return verifier.VerifyTable(ptr); + } + default: + return false; + } +} + +inline bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier, + const flatbuffers::Vector<flatbuffers::Offset<void>> *values, + const flatbuffers::Vector<uint8_t> *types) +{ + if (!values || !types) + return !values && !types; + if (values->size() != types->size()) + return false; + for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) + { + if (!VerifyBuiltinOptions(verifier, values->Get(i), types->GetEnum<BuiltinOptions>(i))) + { + return false; + } + } + return true; +} + +inline const circle::Model *GetModel(const void *buf) +{ + return flatbuffers::GetRoot<circle::Model>(buf); +} + +inline const circle::Model *GetSizePrefixedModel(const void *buf) +{ + return flatbuffers::GetSizePrefixedRoot<circle::Model>(buf); +} + +inline const char *ModelIdentifier() { return "CIR0"; } + +inline bool ModelBufferHasIdentifier(const void *buf) +{ + return flatbuffers::BufferHasIdentifier(buf, ModelIdentifier()); +} + +inline bool VerifyModelBuffer(flatbuffers::Verifier &verifier) +{ + return verifier.VerifyBuffer<circle::Model>(ModelIdentifier()); +} + +inline bool VerifySizePrefixedModelBuffer(flatbuffers::Verifier &verifier) +{ + return verifier.VerifySizePrefixedBuffer<circle::Model>(ModelIdentifier()); +} + +inline const char *ModelExtension() { return "circle"; } + +inline void FinishModelBuffer(flatbuffers::FlatBufferBuilder &fbb, + flatbuffers::Offset<circle::Model> root) +{ + fbb.Finish(root, ModelIdentifier()); +} + +inline void FinishSizePrefixedModelBuffer(flatbuffers::FlatBufferBuilder &fbb, + flatbuffers::Offset<circle::Model> root) +{ + fbb.FinishSizePrefixed(root, ModelIdentifier()); +} + +} // namespace circle + +#endif // FLATBUFFERS_GENERATED_CIRCLESCHEMA_CIRCLE_H_ diff --git a/runtime/onert/frontend/nnapi/ANeuralNetworksModel.test.cc b/runtime/onert/frontend/nnapi/ANeuralNetworksModel.test.cc new file mode 100644 index 000000000..15a279a7e --- /dev/null +++ b/runtime/onert/frontend/nnapi/ANeuralNetworksModel.test.cc @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <gtest/gtest.h> + +#include "wrapper/ANeuralNetworksModel.h" + +TEST(MODEL, model_build) +{ + ANeuralNetworksModel model; + ASSERT_EQ(model.isFinished(), false); +} diff --git a/runtime/onert/frontend/nnapi/CMakeLists.txt b/runtime/onert/frontend/nnapi/CMakeLists.txt new file mode 100644 index 000000000..b66b32e89 --- /dev/null +++ b/runtime/onert/frontend/nnapi/CMakeLists.txt @@ -0,0 +1,27 @@ +file(GLOB_RECURSE SOURCES_FRONTEND "*.cc") +file(GLOB_RECURSE TESTS_FRONTEND "*.test.cc") +list(REMOVE_ITEM SOURCES_FRONTEND ${TESTS_FRONTEND}) + +set(LIB_ONERT onert) + +add_library(${LIB_ONERT} SHARED ${SOURCES_FRONTEND}) +target_link_libraries(${LIB_ONERT} PUBLIC nnfw-nnapi-header) +target_link_libraries(${LIB_ONERT} PUBLIC onert_core) # TODO Link PRIVATE onert_core +target_link_libraries(${LIB_ONERT} PRIVATE nnfw_common) +target_link_libraries(${LIB_ONERT} PRIVATE nnfw_coverage) + +set_target_properties(${LIB_ONERT} PROPERTIES OUTPUT_NAME neuralnetworks) + +install(TARGETS ${LIB_ONERT} DESTINATION lib) + +if(NOT ENABLE_TEST) + return() +endif(NOT ENABLE_TEST) + +add_executable(test_onert_frontend_nnapi ${TESTS_FRONTEND}) + +target_link_libraries(test_onert_frontend_nnapi PRIVATE ${LIB_ONERT} dl) +target_link_libraries(test_onert_frontend_nnapi PRIVATE gtest) +target_link_libraries(test_onert_frontend_nnapi PRIVATE gtest_main) + +install(TARGETS test_onert_frontend_nnapi DESTINATION unittest) diff --git a/runtime/onert/frontend/nnapi/compilation.cc b/runtime/onert/frontend/nnapi/compilation.cc new file mode 100644 index 000000000..0823cb456 --- /dev/null +++ b/runtime/onert/frontend/nnapi/compilation.cc @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <NeuralNetworks.h> + +#include <new> + +#include "wrapper/ANeuralNetworksModel.h" +#include "wrapper/ANeuralNetworksCompilation.h" +#include "util/logging.h" + +// +// NNAPI Implementation +// +int ANeuralNetworksCompilation_create(ANeuralNetworksModel *model, + ANeuralNetworksCompilation **compilation) +{ + if ((model == nullptr) || (compilation == nullptr)) + { + VERBOSE(NNAPI::Compilation) << "create: Incorrect null pointer parameter(s)" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (!model->isFinished()) + { + VERBOSE(NNAPI::Compilation) << "create: Model define is not finished" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + std::shared_ptr<onert::ir::Graph> internal; + + model->release(internal); + + *compilation = new (std::nothrow) ANeuralNetworksCompilation(internal); + if (*compilation == nullptr) + { + VERBOSE(NNAPI::Compilation) << "create: ail to create compilation object" << std::endl; + return ANEURALNETWORKS_OUT_OF_MEMORY; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation *compilation) +{ + if (compilation == nullptr) + { + VERBOSE(NNAPI::Compilation) << "finish: Incorrect null pointer parameter" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (compilation->state() != ::onert::compiler::State::CREATED) + { + VERBOSE(NNAPI::Compilation) << "finish: Already finished" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + if (!compilation->finish()) + { + VERBOSE(NNAPI::Compilation) << "finish: Fail to compile" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation *compilation) +{ + delete compilation; +} + +int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation *compilation, + int32_t preference) +{ + if (compilation == nullptr) + { + VERBOSE(NNAPI::Compilation) << "setPreference: Incorrect null pointer parameter" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (compilation->state() != ::onert::compiler::State::CREATED) + { + VERBOSE(NNAPI::Compilation) << "setPreference: Already finished" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + const PreferenceCode FIRST_PREFERENCE_CODE = ANEURALNETWORKS_PREFER_LOW_POWER; + const PreferenceCode LAST_PREFERENCE_CODE = ANEURALNETWORKS_PREFER_SUSTAINED_SPEED; + if ((preference < FIRST_PREFERENCE_CODE) || (preference > LAST_PREFERENCE_CODE)) + { + VERBOSE(NNAPI::Compilation) << "setPreference: Incorrect preference code" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + // NYI: nothing to set + return ANEURALNETWORKS_NO_ERROR; +} diff --git a/runtime/onert/frontend/nnapi/event.cc b/runtime/onert/frontend/nnapi/event.cc new file mode 100644 index 000000000..593b74e90 --- /dev/null +++ b/runtime/onert/frontend/nnapi/event.cc @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <NeuralNetworks.h> + +#include "wrapper/ANeuralNetworksEvent.h" + +int ANeuralNetworksEvent_wait(ANeuralNetworksEvent *event) +{ + if (event == nullptr) + { + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (!event->waitFinish()) + { + return ANEURALNETWORKS_BAD_STATE; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +void ANeuralNetworksEvent_free(ANeuralNetworksEvent *event) { delete event; } diff --git a/runtime/onert/frontend/nnapi/execution.cc b/runtime/onert/frontend/nnapi/execution.cc new file mode 100644 index 000000000..6aaca1b4c --- /dev/null +++ b/runtime/onert/frontend/nnapi/execution.cc @@ -0,0 +1,480 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <NeuralNetworks.h> + +#include <new> + +#include "wrapper/ANeuralNetworksCompilation.h" +#include "wrapper/ANeuralNetworksExecution.h" +#include "wrapper/ANeuralNetworksMemory.h" +#include "wrapper/ANeuralNetworksEvent.h" +#include "wrapper/NNAPIConvert.h" +#include "util/logging.h" + +// +// NNAPI Implementation +// +int ANeuralNetworksExecution_create(ANeuralNetworksCompilation *compilation, + ANeuralNetworksExecution **execution) +{ + if ((compilation == nullptr) || (execution == nullptr)) + { + VERBOSE(NNAPI::Execution) << "create: Incorrect null pointer parameter(s)" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + std::shared_ptr<onert::exec::IExecutor> executor; + + compilation->publish(executor); + + if (executor == nullptr) + { + VERBOSE(NNAPI::Execution) << "create: Never compiled yet" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + *execution = new (std::nothrow) ANeuralNetworksExecution{executor}; + if (*execution == nullptr) + { + VERBOSE(NNAPI::Execution) << "create: Fail to create execution object" << std::endl; + return ANEURALNETWORKS_OUT_OF_MEMORY; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +// NOTE Handle optional input +// Unspecified shape on model build +// Optional and omitted input on execution: skip input setting (workaround for LSTM) +// Optional but not omitted input on execution: cannot handle +// Normal input on execution: cannot handle +// Fully specified shape on model build +// Optional input on execution: cannot handle +// Normal input: handle normally +int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32_t index, + const ANeuralNetworksOperandType *type, const void *buffer, + size_t length) +{ + // Don't check type + // Comment about ANeuralNetworksOperandType in NeuralNetworks.h: + // If the input or output is optional and omitted then it need not have a fully specified tensor + // operand type + if ((execution == nullptr) || ((buffer == nullptr) && (length != 0))) + { + VERBOSE(NNAPI::Execution) << "setInput: Incorrect null pointer parameter(s)" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if ((buffer != nullptr) && (length == 0)) + { + VERBOSE(NNAPI::Execution) << "setInput: Zero length input" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + const auto operand_index = execution->getInputOperandIndex(index); + if (!operand_index.valid()) + { + VERBOSE(NNAPI::Execution) << "setInput: Invalid input index" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + // Omitted optional input + // LSTM operation's some inputs can be optional input + if ((buffer == nullptr) && (length == 0)) + { + if (execution->haveUnspecifiedDims(operand_index)) + { + return ANEURALNETWORKS_NO_ERROR; + } + else + { + VERBOSE(NNAPI::Execution) << "setInput: Cannot handle fully-specified shape on model build " + "but omitted input on execution" + << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + + if (type != nullptr) + { + if (!execution->compareDataType(type, operand_index)) + { + VERBOSE(NNAPI::Execution) << "setInput: Data type mismatch" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!execution->compareShape(type, operand_index)) + { + VERBOSE(NNAPI::Execution) << "setInput: Shape mismatch" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (NNAPIConvert::calculateSizeFromType(type) != length) + { + VERBOSE(NNAPI::Execution) << "setInput: Invalid length" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + else + { + if (execution->haveUnspecifiedDims(operand_index)) + { + VERBOSE(NNAPI::Execution) << "setInput: Unspecified dimension value" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (execution->getOperandSize(operand_index) != length) + { + VERBOSE(NNAPI::Execution) << "setInput: Invalid length" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + + if (!execution->setInput(index, type, buffer, length)) + { + VERBOSE(NNAPI::Execution) << "setInput: Fail to set input" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution *execution, int32_t index, + const ANeuralNetworksOperandType *type, void *buffer, + size_t length) +{ + // Don't check type + // Comment about ANeuralNetworksOperandType in NeuralNetworks.h: + // If the input or output is optional and omitted then it need not have a fully specified tensor + // operand type + if ((execution == nullptr) || ((buffer == nullptr) && (length != 0))) + { + VERBOSE(NNAPI::Execution) << "setOutput: Incorrect null pointer parameter(s)" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if ((buffer != nullptr) && (length == 0)) + { + VERBOSE(NNAPI::Execution) << "setOutput: Zero length output" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + // Handle optional output + if (buffer == nullptr) + { + return ANEURALNETWORKS_NO_ERROR; + } + + const auto operand_index = execution->getOutputOperandIndex(index); + if (!operand_index.valid()) + { + VERBOSE(NNAPI::Execution) << "setOutput: Invalid output index" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (type != nullptr) + { + if (!execution->compareDataType(type, operand_index)) + { + VERBOSE(NNAPI::Execution) << "setOutput: Data type mismatch" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!execution->compareShape(type, operand_index)) + { + VERBOSE(NNAPI::Execution) << "setOutput: Shape mismatch" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (NNAPIConvert::calculateSizeFromType(type) != length) + { + VERBOSE(NNAPI::Execution) << "setOutput: Invalid length" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + else + { + if (execution->haveUnspecifiedDims(operand_index)) + { + VERBOSE(NNAPI::Execution) << "setOutput: Unspecified dimension value" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (execution->getOperandSize(operand_index) != length) + { + VERBOSE(NNAPI::Execution) << "setInput: Invalid length" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + + if (!execution->setOutput(index, type, buffer, length)) + { + VERBOSE(NNAPI::Execution) << "setOutput: Fail to set output" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution *execution, + ANeuralNetworksEvent **event) +{ + if ((execution == nullptr) || (event == nullptr)) + { + VERBOSE(NNAPI::Execution) << "startCompute: Incorrect null pointer parameter(s)" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + // TODO: Handle event + auto instance = execution->instance(); + *event = new (std::nothrow) ANeuralNetworksEvent{instance}; + if (*event == nullptr) + { + VERBOSE(NNAPI::Execution) << "startCompute: Fail to create event" << std::endl; + return ANEURALNETWORKS_OUT_OF_MEMORY; + } + + if (!execution->startExecute()) + { + VERBOSE(NNAPI::Execution) << "startCompute: Fail to start execution" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksExecution_compute(ANeuralNetworksExecution *execution) +{ + if (execution == nullptr) + { + VERBOSE(NNAPI::Execution) << "Compute: Incorrect null pointer parameter" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (!execution->execute()) + { + VERBOSE(NNAPI::Execution) << "Compute: Fail to execution" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +void ANeuralNetworksExecution_free(ANeuralNetworksExecution *execution) { delete execution; } + +int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution *execution, int32_t index, + const ANeuralNetworksOperandType *type, + const ANeuralNetworksMemory *memory, size_t offset, + size_t length) +{ + if ((execution == nullptr) || (memory == nullptr)) + { + VERBOSE(NNAPI::Execution) << "setInputFromMemory: Incorrect null pointer parameter(s)" + << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (length == 0) + { + VERBOSE(NNAPI::Execution) << "setInputFromMemory: Zero length input" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + const auto operand_index = execution->getInputOperandIndex(index); + if (!operand_index.valid()) + { + VERBOSE(NNAPI::Execution) << "setInputFromMemory: Invalid input index" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (type != nullptr) + { + if (!execution->compareDataType(type, operand_index)) + { + VERBOSE(NNAPI::Execution) << "setInputFromMemory: Data type mismatch" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!execution->compareShape(type, operand_index)) + { + VERBOSE(NNAPI::Execution) << "setInputFromMemory: Shape mismatch" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (NNAPIConvert::calculateSizeFromType(type) != length) + { + VERBOSE(NNAPI::Execution) << "setInputFromMemory: Invalid length" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + else + { + if (execution->haveUnspecifiedDims(operand_index)) + { + VERBOSE(NNAPI::Execution) << "setInputFromMemory: Unspecified dimension value" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (execution->getOperandSize(operand_index) != length) + { + VERBOSE(NNAPI::Execution) << "setInputFromMemory: Invalid length" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + + if (!memory->vaildAccess(offset, length)) + { + VERBOSE(NNAPI::Execution) << "setInputFromMemory: Invalid memory access" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!execution->setInput(index, type, reinterpret_cast<const void *>(memory->base() + offset), + length)) + { + VERBOSE(NNAPI::Execution) << "setInputFromMemory: Fail to set input" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution *execution, int32_t index, + const ANeuralNetworksOperandType *type, + const ANeuralNetworksMemory *memory, size_t offset, + size_t length) +{ + if ((execution == nullptr) || (memory == nullptr)) + { + VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Incorrect null pointer parameter(s)" + << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (length == 0) + { + VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Zero length input" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + const auto operand_index = execution->getOutputOperandIndex(index); + if (!operand_index.valid()) + { + VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Invalid output index" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (type != nullptr) + { + if (!execution->compareDataType(type, operand_index)) + { + VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Data type mismatch" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!execution->compareShape(type, operand_index)) + { + VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Shape mismatch" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (NNAPIConvert::calculateSizeFromType(type) != length) + { + VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Invalid length" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + else + { + if (execution->haveUnspecifiedDims(operand_index)) + { + VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Unspecified dimension value" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (execution->getOperandSize(operand_index) != length) + { + VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Invalid length" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + + if (!memory->vaildAccess(offset, length)) + { + VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Invalid memory access" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!execution->setOutput(index, type, reinterpret_cast<void *>(memory->base() + offset), length)) + { + VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Fail to set input" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksExecution_getOutputOperandRank(ANeuralNetworksExecution *execution, + int32_t index, uint32_t *rank) +{ + if ((execution == nullptr) || (rank == nullptr)) + { + VERBOSE(NNAPI::Execution) << "getOutputOperandRank: Incorrect null pointer parameter(s)" + << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + const auto operand_index = execution->getOutputOperandIndex(index); + if (!operand_index.valid()) + { + VERBOSE(NNAPI::Execution) << "getOutputOperandRank: Invalid output index" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!execution->getOutputOperandRank(index, rank)) + { + VERBOSE(NNAPI::Execution) << "getOutputOperandRank: Fail to get rank" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksExecution_getOutputOperandDimensions(ANeuralNetworksExecution *execution, + int32_t index, uint32_t *dimensions) +{ + if ((execution == nullptr) || (dimensions == nullptr)) + { + VERBOSE(NNAPI::Execution) << "getOutputOperandDimensions: Incorrect null pointer parameter(s)" + << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + const auto operand_index = execution->getOutputOperandIndex(index); + if (!operand_index.valid()) + { + VERBOSE(NNAPI::Execution) << "getOutputOperandDimensions: Invalid output index" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!execution->getOutputOperandDimensions(index, dimensions)) + { + VERBOSE(NNAPI::Execution) << "getOutputOperandDimensions: Fail to get rank" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + return ANEURALNETWORKS_NO_ERROR; +} diff --git a/runtime/onert/frontend/nnapi/memory.cc b/runtime/onert/frontend/nnapi/memory.cc new file mode 100644 index 000000000..6e568a926 --- /dev/null +++ b/runtime/onert/frontend/nnapi/memory.cc @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <NeuralNetworks.h> +#include <sys/mman.h> +#include <new> +#include <memory> + +#include <memory> +#include "wrapper/ANeuralNetworksMemory.h" + +int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset, + ANeuralNetworksMemory **memory) +{ + if (memory == nullptr) + { + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + *memory = new (std::nothrow) ANeuralNetworksMemory{size, protect, fd, offset}; + if (*memory == nullptr) + { + return ANEURALNETWORKS_OUT_OF_MEMORY; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +void ANeuralNetworksMemory_free(ANeuralNetworksMemory *memory) { delete memory; } diff --git a/runtime/onert/frontend/nnapi/model.cc b/runtime/onert/frontend/nnapi/model.cc new file mode 100644 index 000000000..e201a6753 --- /dev/null +++ b/runtime/onert/frontend/nnapi/model.cc @@ -0,0 +1,411 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <NeuralNetworks.h> +#include <NeuralNetworksEx.h> + +#include <new> + +#include "wrapper/ANeuralNetworksModel.h" +#include "wrapper/ANeuralNetworksMemory.h" +#include "util/logging.h" + +int ANeuralNetworksModel_create(ANeuralNetworksModel **model) +{ + if (model == nullptr) + { + VERBOSE(NNAPI::Model) << "create: Incorrect null pointer parameter" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + *model = new (std::nothrow) ANeuralNetworksModel{}; + if (*model == nullptr) + { + VERBOSE(NNAPI::Model) << "create: Fail to create model object" << std::endl; + return ANEURALNETWORKS_OUT_OF_MEMORY; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +void ANeuralNetworksModel_free(ANeuralNetworksModel *model) { delete model; } + +int ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model, + const ANeuralNetworksOperandType *type) +{ + if ((model == nullptr) || (type == nullptr)) + { + VERBOSE(NNAPI::Model) << "addOperand: Incorrect null pointer parameter(s)" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (model->isFinished()) + { + VERBOSE(NNAPI::Model) << "addOperand: Already finished" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + // scale and zeroPoint should be zero for scalars and non-fixed point tensors + // Quantized: + // scale: a 32 bit floating point value greater than zero + // zeroPoint: a 32 bit integer, in range [0, 255] + if (type->type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) + { + if (!(type->scale > 0.0f)) + { + VERBOSE(NNAPI::Model) << "addOperand: Incorrect scale value for quantization" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if ((type->zeroPoint < 0) || (type->zeroPoint > 255)) + { + VERBOSE(NNAPI::Model) << "addOperand: Incorrect zeroPoint value for quantization" + << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + // NOTE Validation of scale and zeroPoint would be skipped for a while. + // We do not know whether scalar type can have scale and zeroPoint. + // To pass ValidationTest and GeneratedTest, this validation code + // would not be implemented until we can define this issue clearly. + // + // scale and zeroPoint should be zero for scalars and non-fixed point tensors + // else if ((type->scale != 0.0f) || (type->zeroPoint != 0)) + // { + // return ANEURALNETWORKS_BAD_DATA; + // } + + // dimensionCount should be zero for scalars + if ((type->dimensionCount != 0) && + ((type->type == ANEURALNETWORKS_FLOAT32) || (type->type == ANEURALNETWORKS_INT32) || + (type->type == ANEURALNETWORKS_UINT32))) + { + VERBOSE(NNAPI::Model) << "addOperand: Incorrect data type" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!model->addOperand(type)) + { + VERBOSE(NNAPI::Model) << "addOperand: Fail to add operand" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t index, + const void *buffer, size_t length) +{ + const bool optional_operand = ((buffer == nullptr) && (length == 0)); + + if ((model == nullptr) || ((buffer == nullptr) && (length != 0))) + { + VERBOSE(NNAPI::Model) << "setOperandValue: Incorrect null pointer parameter(s)" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (model->isFinished()) + { + VERBOSE(NNAPI::Model) << "setOperandValue: Already finished" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + // Negative index value is not allowed + if (index < 0) + { + VERBOSE(NNAPI::Model) << "setOperandValue: Invalid index value (negative)" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + // NOTE OperandIndex uses uint32_t as its underlying type as various NNAPI + // functions such as ANeuralNetworksModel_addOperation use uint32_t to represent operand + // index + // ANeuralNetworksModel_setOperandValue, however, uses int32_t to represent operand index. + // + // Below, static_cast<uint32_t>(...) is introduced to eliminate compiler warning. + uint32_t ind = static_cast<uint32_t>(index); + + if (!model->isExistOperand(ind)) + { + VERBOSE(NNAPI::Model) << "setOperandValue: Invalid index value (not exist)" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!optional_operand && (model->operandSize(ind) != length)) + { + VERBOSE(NNAPI::Model) << "setOperandValue: Invalid data length" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (model->isUsageSet(ind)) + { + VERBOSE(NNAPI::Model) << "setOperandValue: Already set operand" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + // NNAPI spec in NeuralNetworks.h + // For values of length greater than ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES, + // the application is responsible for not changing the content of this region + // until all executions using this model have completed + bool copy_value = false; + if (length <= ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES) + { + copy_value = true; + } + + if (!model->setOperandValue(ind, buffer, length, optional_operand, copy_value)) + { + VERBOSE(NNAPI::Model) << "setOperandValue: Fail to set operand value" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel *model, int32_t index, + const ANeuralNetworksMemory *memory, + size_t offset, size_t length) +{ + if ((model == nullptr) || (memory == nullptr)) + { + VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Incorrect null pointer parameter(s)" + << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (model->isFinished()) + { + VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Already finished" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + // Negative index value is not allowed + if (index < 0) + { + VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Invalid index value (negative)" + << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + // NOTE OperandIndex uses uint32_t as its underlying type as various NNAPI + // functions such as ANeuralNetworksModel_addOperation use uint32_t to represent operand + // index + // ANeuralNetworksModel_setOperandValue, however, uses int32_t to represent operand index. + // + // Below, static_cast<uint32_t>(...) is introduced to eliminate compiler warning. + uint32_t ind = static_cast<uint32_t>(index); + + if (!model->isExistOperand(ind)) + { + VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Invalid index value (not exist)" + << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if ((model->operandSize(ind) != length) || (memory->size() < (offset + length))) + { + VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Invalid data length" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (model->isUsageSet(ind)) + { + VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Already set operand" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!model->setOperandValue(ind, memory->base() + offset, length)) + { + VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Fail to set operand value" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model, + ANeuralNetworksOperationType type, uint32_t inputCount, + const uint32_t *inputs, uint32_t outputCount, + const uint32_t *outputs) +{ + if ((model == nullptr) || (inputs == nullptr) || (outputs == nullptr)) + { + VERBOSE(NNAPI::Model) << "addOperation: Incorrect null pointer parameter(s)" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (model->isFinished()) + { + VERBOSE(NNAPI::Model) << "addOperation: Already finished" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + const ANeuralNetworksOperationType FIRST_OPERATION = ANEURALNETWORKS_ADD; + const ANeuralNetworksOperationType LAST_OPERATION = ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR; + if ((type < FIRST_OPERATION) || (type > LAST_OPERATION)) + { + return ANEURALNETWORKS_BAD_DATA; + } + + for (uint32_t i = 0; i < outputCount; i++) + { + if (model->isUsageSet(outputs[i])) + { + VERBOSE(NNAPI::Model) << "addOperation: Already set output operand" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + + if (!model->addOperation(type, inputCount, inputs, outputCount, outputs)) + { + VERBOSE(NNAPI::Model) << "addOperation: Fail to add operation" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksModel_addOperationEx(ANeuralNetworksModel *model, + ANeuralNetworksOperationTypeEx type, uint32_t inputCount, + const uint32_t *inputs, uint32_t outputCount, + const uint32_t *outputs) +{ + if ((model == nullptr) || (inputs == nullptr) || (outputs == nullptr)) + { + VERBOSE(NNAPI::Model) << "addOperation: Incorrect null pointer parameter(s)" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (model->isFinished()) + { + VERBOSE(NNAPI::Model) << "addOperation: Already finished" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + const ANeuralNetworksOperationTypeEx FIRST_OPERATION = ANEURALNETWORKS_CAST_EX; + const ANeuralNetworksOperationTypeEx LAST_OPERATION = ANEURALNETWORKS_SHAPE_EX; + if ((type < FIRST_OPERATION) || (type > LAST_OPERATION)) + { + VERBOSE(NNAPI::Model) << "addOperation: Invalid operation type" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + for (uint32_t i = 0; i < outputCount; i++) + { + if (model->isUsageSet(outputs[i])) + { + VERBOSE(NNAPI::Model) << "addOperation: Already set output operand" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + + if (!model->addOperationEx(type, inputCount, inputs, outputCount, outputs)) + { + VERBOSE(NNAPI::Model) << "addOperation: Fail to add operation" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel *model, uint32_t inputCount, + const uint32_t *inputs, uint32_t outputCount, + const uint32_t *outputs) +{ + if ((model == nullptr) || (inputs == nullptr) || (outputs == nullptr)) + { + VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Incorrect null pointer parameter(s)" + << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (model->isFinished()) + { + VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Already finished" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + for (uint32_t n = 0; n < inputCount; ++n) + { + uint32_t ind = inputs[n]; + if (model->isUsageSet(ind)) + { + VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Already set input operand" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!model->addModelInput(ind)) + { + VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Fail to add input" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + + for (uint32_t n = 0; n < outputCount; ++n) + { + uint32_t ind = outputs[n]; + + if (!model->isOperationOutput(ind)) + { + VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Need to set output operand" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + + if (!model->addModelOutput(ind)) + { + VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Fail to add output" << std::endl; + return ANEURALNETWORKS_BAD_DATA; + } + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksModel_finish(ANeuralNetworksModel *model) +{ + if (model == nullptr) + { + VERBOSE(NNAPI::Model) << "finish: Incorrect null pointer parameter" << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + if (model->isFinished()) + { + VERBOSE(NNAPI::Model) << "finish: Already finished" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + if (!model->finish()) + { + VERBOSE(NNAPI::Model) << "finish: Fail to generate internal graph" << std::endl; + return ANEURALNETWORKS_BAD_STATE; + } + + return ANEURALNETWORKS_NO_ERROR; +} + +int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel *model, bool) +{ + if (model == nullptr) + { + VERBOSE(NNAPI::Model) << "relaxComputationFloat32toFloat16: Incorrect null pointer parameter" + << std::endl; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + + // NYI: nothing to set + VERBOSE(NNAPI::Model) << "relaxComputationFloat32toFloat16: Do nothing yet" << std::endl; + + return ANEURALNETWORKS_NO_ERROR; +} diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.cc b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.cc new file mode 100644 index 000000000..03518a88a --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.cc @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ANeuralNetworksCompilation.h" + +#include "util/logging.h" + +ANeuralNetworksCompilation::ANeuralNetworksCompilation( + const std::shared_ptr<onert::ir::Graph> &model) noexcept + : _compiler{new onert::compiler::Compiler{model}} +{ + // DO NOTHING +} + +bool ANeuralNetworksCompilation::finish() noexcept +{ + try + { + _compiler->compile(); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.h b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.h new file mode 100644 index 000000000..8d72441b2 --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksCompilation.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __COMPILATION_H__ +#define __COMPILATION_H__ + +#include "compiler/Compiler.h" +#include "ir/Graph.h" +#include "exec/IExecutor.h" + +struct ANeuralNetworksCompilation +{ +public: + ANeuralNetworksCompilation(const std::shared_ptr<onert::ir::Graph> &graph) noexcept; + +public: + bool finish() noexcept; + + onert::compiler::State state(void) noexcept { return _compiler->state(); } + void publish(std::shared_ptr<onert::exec::IExecutor> &executor) noexcept + { + _compiler->release(executor); + } + +private: + std::shared_ptr<onert::compiler::Compiler> _compiler; +}; + +#endif diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksEvent.cc b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksEvent.cc new file mode 100644 index 000000000..2bea729be --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksEvent.cc @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ANeuralNetworksEvent.h" + +#include "exec/Execution.h" +#include "util/logging.h" + +ANeuralNetworksEvent::ANeuralNetworksEvent(const std::shared_ptr<onert::exec::Execution> &execution) + : _execution{execution} +{ + // DO NOTHING +} + +bool ANeuralNetworksEvent::waitFinish(void) noexcept +{ + try + { + _execution->waitFinish(); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksEvent.h b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksEvent.h new file mode 100644 index 000000000..7b462d3d6 --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksEvent.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __EVENT_H__ +#define __EVENT_H__ + +#include <NeuralNetworks.h> + +#include <memory> + +namespace onert +{ +namespace exec +{ +class Execution; +} // namespace exec +} // namespace onert + +struct ANeuralNetworksEvent +{ +public: + ANeuralNetworksEvent(const std::shared_ptr<onert::exec::Execution> &execution); + +public: + bool waitFinish(void) noexcept; + +private: + const std::shared_ptr<onert::exec::Execution> _execution; +}; + +#endif diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc new file mode 100644 index 000000000..15eb088c6 --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ANeuralNetworksExecution.h" +#include "NNAPIConvert.h" +#include "util/logging.h" + +const onert::ir::OperandIndex ANeuralNetworksExecution::getInputOperandIndex(int32_t index) noexcept +{ + if (index < 0) + { + // Negative index: return invalid index + return onert::ir::OperandIndex{}; + } + + uint32_t cast_index = static_cast<uint32_t>(index); + if (cast_index >= _execution->graph().getInputs().size()) + { + // Return invalid index + return onert::ir::OperandIndex{}; + } + + onert::ir::IOIndex input_index{cast_index}; + const auto operand_index = _execution->graph().getInputs().at(input_index); + return operand_index; +} + +const onert::ir::OperandIndex +ANeuralNetworksExecution::getOutputOperandIndex(int32_t index) noexcept +{ + if (index < 0) + { + // Negative index: return invalid index + return onert::ir::OperandIndex{}; + } + + uint32_t cast_index = static_cast<uint32_t>(index); + if (cast_index >= _execution->graph().getOutputs().size()) + { + // Return invalid index + return onert::ir::OperandIndex{}; + } + + onert::ir::IOIndex output_index{cast_index}; + const auto operand_index = _execution->graph().getOutputs().at(output_index); + return operand_index; +} + +bool ANeuralNetworksExecution::compareDataType(const ANeuralNetworksOperandType *type, + const onert::ir::OperandIndex index) noexcept +{ + try + { + const auto operand_type = _execution->graph().operands().at(index).typeInfo(); + const auto typeInfo = NNAPIConvert::getTypeInfo(type); + + if (operand_type != typeInfo) + { + // Data type mismatch + return false; + } + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} + +bool ANeuralNetworksExecution::compareShape(const ANeuralNetworksOperandType *type, + const onert::ir::OperandIndex index) noexcept +{ + // Passed shape should be specified + if (haveUnspecifiedDims(index)) + { + return false; + } + + const auto &operand_shape = _execution->graph().operands().at(index).shape(); + const auto &shape_from_type = NNAPIConvert::getShape(type); + + return operand_shape == shape_from_type; +} + +bool ANeuralNetworksExecution::haveUnspecifiedDims(const onert::ir::OperandIndex index) noexcept +{ + const auto operand_shape = _execution->graph().operands().at(index).shape(); + + return operand_shape.num_elements() == 0; +} + +size_t ANeuralNetworksExecution::getOperandSize(const onert::ir::OperandIndex index) noexcept +{ + try + { + return _execution->graph().operands().at(index).operandSize(); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return 0; + } +} + +bool ANeuralNetworksExecution::setInput(uint32_t index, const ANeuralNetworksOperandType *type, + const void *buffer, size_t length) noexcept +{ + try + { + onert::ir::IOIndex input_index{index}; + const auto operand_index = getInputOperandIndex(index); + + const auto type_info = _execution->graph().operands().at(operand_index).typeInfo(); + const auto shape = (type != nullptr) ? NNAPIConvert::getShape(type) + : _execution->graph().operands().at(operand_index).shape(); + + // NOTE The nnapi does not provide setting io_layout and not support changing layout. In other + // words, we can assume that io_layout from nnapi always is the same as layout of the used + // model. + // TODO Set layout of model + _execution->setInput(input_index, type_info, shape, buffer, length, onert::ir::Layout::NHWC); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} + +bool ANeuralNetworksExecution::setOutput(uint32_t index, const ANeuralNetworksOperandType *type, + void *buffer, size_t length) noexcept +{ + try + { + onert::ir::IOIndex output_index{index}; + const auto operand_index = getOutputOperandIndex(index); + + const auto type_info = _execution->graph().operands().at(operand_index).typeInfo(); + const auto shape = (type != nullptr) ? NNAPIConvert::getShape(type) + : _execution->graph().operands().at(operand_index).shape(); + + // NOTE The nnapi does not provide setting io_layout and not support changing layout. In other + // words, we can assume that io_layout from nnapi always is the same as layout of the used + // model. + // TODO Set layout of model + _execution->setOutput(output_index, type_info, shape, buffer, length, onert::ir::Layout::NHWC); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} + +bool ANeuralNetworksExecution::startExecute(void) noexcept +{ + try + { + _execution->startExecute(); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} + +bool ANeuralNetworksExecution::execute(void) noexcept +{ + try + { + _execution->execute(); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} + +const std::shared_ptr<onert::exec::Execution> ANeuralNetworksExecution::instance(void) noexcept +{ + return _execution; +} + +bool ANeuralNetworksExecution::getOutputOperandRank(uint32_t index, uint32_t *rank) noexcept +{ + try + { + onert::ir::IOIndex output_index{index}; + const auto operand_index = getOutputOperandIndex(index); + bool unspecified = haveUnspecifiedDims(operand_index); + + // TODO Get unspecified output operand's rank + if (unspecified) + { + throw std::runtime_error{"Unsupport feature"}; + } + + // Check execution is finished + // Output rank and shape may be decided after execution if output is unspecified operand + if (!_execution->isFinished()) + { + return false; + } + + *rank = _execution->graph().operands().at(operand_index).shape().rank(); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} + +bool ANeuralNetworksExecution::getOutputOperandDimensions(uint32_t index, uint32_t *dimensions) +{ + try + { + onert::ir::IOIndex output_index{index}; + const auto operand_index = getOutputOperandIndex(index); + bool unspecified = haveUnspecifiedDims(operand_index); + if (unspecified) + { + throw std::runtime_error{"NYI: Models with unspecified output dimensions"}; + } + + // Check execution is finished + // Output rank and shape may be decided after execution if output is unspecified operand + if (!_execution->isFinished()) + { + return false; + } + + auto shape = _execution->graph().operands().at(operand_index).shape(); + for (int i = 0; i < shape.rank(); i++) + { + auto dim = shape.dim(i); + + if (dim <= 0) + { + throw std::runtime_error{"Invalid dimension value"}; + } + + dimensions[i] = static_cast<uint32_t>(dim); + } + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.h b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.h new file mode 100644 index 000000000..af2465a81 --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __EXECUTION_H__ +#define __EXECUTION_H__ + +#include <NeuralNetworks.h> + +#include <memory> + +#include "exec/Execution.h" + +struct ANeuralNetworksExecution +{ +public: + ANeuralNetworksExecution(const std::shared_ptr<onert::exec::IExecutor> &executor) + : _execution{std::make_shared<onert::exec::Execution>(executor)} + { + // DO NOTHING + } + +public: + bool setInput(uint32_t index, const ANeuralNetworksOperandType *type, const void *buffer, + size_t length) noexcept; + bool setOutput(uint32_t index, const ANeuralNetworksOperandType *type, void *buffer, + size_t length) noexcept; + bool startExecute(void) noexcept; + bool execute(void) noexcept; + + const onert::ir::OperandIndex getInputOperandIndex(int32_t index) noexcept; + const onert::ir::OperandIndex getOutputOperandIndex(int32_t index) noexcept; + bool compareDataType(const ANeuralNetworksOperandType *type, + const onert::ir::OperandIndex index) noexcept; + bool compareShape(const ANeuralNetworksOperandType *type, + const onert::ir::OperandIndex index) noexcept; + bool haveUnspecifiedDims(const onert::ir::OperandIndex index) noexcept; + size_t getOperandSize(const onert::ir::OperandIndex index) noexcept; + const std::shared_ptr<onert::exec::Execution> instance(void) noexcept; + + /** + * @brief Get output operand's rank + * @param[in] index Output index + * @param[out] rank Output operand's rank + * @return @c true if success to get rank, otherwise @c false + */ + bool getOutputOperandRank(uint32_t index, uint32_t *rank) noexcept; + /** + * @brief Get dimensions of the output operand + * @param[in] index Output index + * @param[out] dimensions Output operand's dimensions + * @return @c true if success to get rank, otherwise @c false + * @note This must be called after execution is finished to get resolved output shape + * unspecified in model + */ + bool getOutputOperandDimensions(uint32_t index, uint32_t *dimensions); + +private: + std::shared_ptr<onert::exec::Execution> _execution; +}; + +#endif diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksMemory.cc b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksMemory.cc new file mode 100644 index 000000000..9cc100585 --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksMemory.cc @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <NeuralNetworks.h> +#include <sys/mman.h> + +#include "ANeuralNetworksMemory.h" + +// +// ANeuralNetworksMemory +// +ANeuralNetworksMemory::ANeuralNetworksMemory(size_t size, int protect, int fd, size_t offset) +{ + _base = reinterpret_cast<uint8_t *>(mmap(nullptr, size, protect, MAP_PRIVATE, fd, offset)); + _size = size; +} + +ANeuralNetworksMemory::~ANeuralNetworksMemory() { munmap(reinterpret_cast<void *>(_base), _size); } + +bool ANeuralNetworksMemory::vaildAccess(size_t offset, size_t length) const +{ + if ((offset >= _size) || (length > _size)) + { + return false; + } + + if ((offset + length) >= _size) + { + return false; + } + + return true; +} diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksMemory.h b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksMemory.h new file mode 100644 index 000000000..48a1bc5fc --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksMemory.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MEMORY_H__ +#define __MEMORY_H__ + +#include <cstdint> + +struct ANeuralNetworksMemory +{ +public: + ANeuralNetworksMemory(size_t size, int protect, int fd, size_t offset); + ~ANeuralNetworksMemory(); + +public: + size_t size(void) const { return _size; } + uint8_t *base(void) { return _base; } + uint8_t *base(void) const { return _base; } + bool vaildAccess(size_t offset, size_t length) const; + +private: + size_t _size; + uint8_t *_base; +}; + +#endif // __MEMORY_H__ diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.cc b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.cc new file mode 100644 index 000000000..d2d699ae1 --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.cc @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ANeuralNetworksModel.h" +#include "OperationFactory.h" +#include "NNAPIConvert.h" + +#include "ir/Operations.Include.h" +#include "util/logging.h" + +#include <memory> + +// +// ANeuralNetworksModel +// +ANeuralNetworksModel::ANeuralNetworksModel() noexcept : _optional_operands{}, _operand_usages{} +{ + _graph = std::make_shared<onert::ir::Graph>(); +} + +bool ANeuralNetworksModel::addOperand(const ANeuralNetworksOperandType *type) noexcept +{ + try + { + const auto shape = NNAPIConvert::getShape(type); + const auto typeInfo = NNAPIConvert::getTypeInfo(type); + _graph->addOperand(shape, typeInfo); + _operand_usages.emplace_back(OperandUsage::NOT_DEFINED); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} + +bool ANeuralNetworksModel::setOperandValue(uint32_t index, const void *buffer, size_t length, + bool optional, bool copy) noexcept +{ + const onert::ir::OperandIndex ind{index}; + + try + { + _operand_usages[index] = OperandUsage::CONSTANT; + + // Remain operands.at(ind).data()->base() as nullptr for optional operand + // This will be filled when model finished + if (optional) + { + setOptionalOperand(ind); + } + + using onert::ir::CachedData; + using onert::ir::ExternalData; + if (copy) + { + _graph->operands().at(ind).data( + std::make_unique<CachedData>(reinterpret_cast<const uint8_t *>(buffer), length)); + } + else + { + _graph->operands().at(ind).data( + std::make_unique<ExternalData>(reinterpret_cast<const uint8_t *>(buffer), length)); + } + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} + +bool ANeuralNetworksModel::addOperation(ANeuralNetworksOperationType type, uint32_t inputCount, + const uint32_t *inputs, uint32_t outputCount, + const uint32_t *outputs) noexcept +{ + try + { + for (uint32_t i = 0; i < outputCount; i++) + { + _operand_usages[outputs[i]] = OperandUsage::OPERATION_OUTPUT; + } + + auto &factory = OperationFactory::get(); + OperationFactory::Param param{inputCount, inputs, outputCount, outputs}; + + auto node = factory.create(type, param, _graph->operands()); + _graph->addOperation(std::unique_ptr<onert::ir::Operation>{node}); + + // TODO Move these codes to delegate.cpp + if (type == ANEURALNETWORKS_FULLY_CONNECTED) + { + const auto &input_operand = + _graph->operands().at(node->getInputs().at(onert::ir::operation::FullyConnected::INPUT)); + auto &weights_operand = + _graph->operands().at(node->getInputs().at(onert::ir::operation::FullyConnected::WEIGHT)); + if (input_operand.typeInfo().type() == onert::ir::DataType::FLOAT32 && + weights_operand.typeInfo().type() == onert::ir::DataType::QUANT8_ASYMM) + { + weights_operand.type(onert::ir::DataType::QUANT8_SYMM); + } + } + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} + +bool ANeuralNetworksModel::addOperationEx(ANeuralNetworksOperationTypeEx type, uint32_t inputCount, + const uint32_t *inputs, uint32_t outputCount, + const uint32_t *outputs) noexcept +{ + try + { + for (uint32_t i = 0; i < outputCount; i++) + { + _operand_usages[outputs[i]] = OperandUsage::OPERATION_OUTPUT; + } + + auto &factory = OperationFactory::get(); + OperationFactory::Param param{inputCount, inputs, outputCount, outputs}; + + auto node = factory.create(type, param, _graph->operands()); + _graph->addOperation(std::unique_ptr<onert::ir::Operation>{node}); + } + catch (const std::exception &e) + { + return false; + } + return true; +} + +bool ANeuralNetworksModel::addModelInput(uint32_t index) noexcept +{ + try + { + _operand_usages[index] = OperandUsage::MODEL_INPUT; + + const onert::ir::OperandIndex ind{index}; + _graph->addInput(ind); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} +bool ANeuralNetworksModel::addModelOutput(uint32_t index) noexcept +{ + try + { + const onert::ir::OperandIndex ind{index}; + + // Duplicated output is not allowed + if (_graph->getOutputs().contains(ind)) + { + return false; + } + + _graph->addOutput(ind); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << std::endl; + + return false; + } + + return true; +} + +bool ANeuralNetworksModel::finish() noexcept +{ + try + { + fillOptionalOperand(); + + _graph->finishBuilding(); + + _operand_usages.clear(); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << '\n'; + + return false; + } + + return true; +} + +bool ANeuralNetworksModel::isFinished() noexcept { return !_graph->isBuildingPhase(); } + +bool ANeuralNetworksModel::isExistOperand(uint32_t index) noexcept +{ + return _graph->operands().exist(onert::ir::OperandIndex{index}); +} + +size_t ANeuralNetworksModel::operandSize(uint32_t index) noexcept +{ + try + { + return _graph->operands().at(onert::ir::OperandIndex{index}).operandSize(); + } + catch (const std::exception &e) + { + VERBOSE(EXCEPTION) << e.what() << '\n'; + + return 0; + } +} + +bool ANeuralNetworksModel::isUsageSet(uint32_t index) noexcept +{ + return (_operand_usages[index] != OperandUsage::NOT_DEFINED); +} + +bool ANeuralNetworksModel::isOperationOutput(uint32_t index) noexcept +{ + return (_operand_usages[index] == OperandUsage::OPERATION_OUTPUT); +} + +void ANeuralNetworksModel::setOptionalOperand(const onert::ir::OperandIndex idx) +{ + _optional_operands.insert(idx); +} + +void ANeuralNetworksModel::fillOptionalOperand(void) +{ + _graph->operations().iterate([&](const onert::ir::OperationIndex &, onert::ir::Operation &node) { + for (auto input : node.getInputs()) + { + // TODO fill default value for optional operands + if (_optional_operands.find(input) != _optional_operands.end()) + { + throw std::runtime_error{"Optional operand is not supported yet"}; + } + } + }); +} diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.h b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.h new file mode 100644 index 000000000..3ccd941c7 --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksModel.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MODEL_H__ +#define __MODEL_H__ + +#include <unordered_set> +#include <NeuralNetworks.h> +#include <NeuralNetworksEx.h> + +#include "ir/Graph.h" + +struct ANeuralNetworksModel +{ +public: + enum class OperandUsage + { + NOT_DEFINED = 0, + MODEL_INPUT, + CONSTANT, + OPERATION_OUTPUT, + }; + +public: + ANeuralNetworksModel() noexcept; + +public: + bool addOperand(const ANeuralNetworksOperandType *type) noexcept; + bool setOperandValue(uint32_t index, const void *buffer, size_t length, bool optional = false, + bool copy = false) noexcept; + bool addOperation(ANeuralNetworksOperationType type, uint32_t inputCount, const uint32_t *inputs, + uint32_t outputCount, const uint32_t *outputs) noexcept; + bool addOperationEx(ANeuralNetworksOperationTypeEx type, uint32_t inputCount, + const uint32_t *inputs, uint32_t outputCount, + const uint32_t *outputs) noexcept; + bool addModelInput(uint32_t index) noexcept; + bool addModelOutput(uint32_t index) noexcept; + bool finish() noexcept; + + onert::ir::Graph &deref(void) { return *_graph; } + bool isFinished() noexcept; + bool isExistOperand(uint32_t index) noexcept; + size_t operandSize(uint32_t index) noexcept; + bool isUsageSet(uint32_t index) noexcept; + bool isOperationOutput(uint32_t index) noexcept; + void release(std::shared_ptr<onert::ir::Graph> &graph) { graph = _graph; } + +private: + void setOptionalOperand(const onert::ir::OperandIndex idx); + void fillOptionalOperand(void); + +private: + std::shared_ptr<onert::ir::Graph> _graph; + std::unordered_set<onert::ir::OperandIndex> _optional_operands; + std::vector<OperandUsage> _operand_usages; +}; + +#endif // __MODEL_H__ diff --git a/runtime/onert/frontend/nnapi/wrapper/NNAPIConvert.cc b/runtime/onert/frontend/nnapi/wrapper/NNAPIConvert.cc new file mode 100644 index 000000000..e07297241 --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/NNAPIConvert.cc @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "NNAPIConvert.h" + +#include <numeric> + +using namespace onert::ir; + +DataType NNAPIConvert::getDataType(OperandCode type) +{ + switch (type) + { + case ANEURALNETWORKS_FLOAT32: + case ANEURALNETWORKS_TENSOR_FLOAT32: + return DataType::FLOAT32; + case ANEURALNETWORKS_INT32: + case ANEURALNETWORKS_TENSOR_INT32: + return DataType::INT32; + case ANEURALNETWORKS_UINT32: + return DataType::UINT32; + case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM: + return DataType::QUANT8_ASYMM; + case ANEURALNETWORKS_TENSOR_QUANT8_SYMM: + return DataType::QUANT8_SYMM; + case ANEURALNETWORKS_BOOL: + case ANEURALNETWORKS_TENSOR_BOOL8: + return DataType::BOOL8; + default: + throw std::runtime_error("Unsupported type"); + } +} + +TypeInfo NNAPIConvert::getTypeInfo(const ANeuralNetworksOperandType *type) +{ + return TypeInfo(getDataType((OperandCode)(type->type)), type->scale, type->zeroPoint); +} + +Shape NNAPIConvert::getShape(const ANeuralNetworksOperandType *type) +{ + Shape shape(type->dimensionCount); + + for (uint32_t axis = 0; axis < type->dimensionCount; ++axis) + { + shape.dim(axis) = type->dimensions[axis]; + } + + return shape; +} + +size_t NNAPIConvert::calculateSizeFromType(const ANeuralNetworksOperandType *type) +{ + auto shape = getShape(type); + auto data_type = getDataType((OperandCode)(type->type)); + + return shape.num_elements() * sizeOfDataType(data_type); +} + +Activation NNAPIConvert::getFusedActivation(FuseCode act) +{ + switch (act) + { + case ANEURALNETWORKS_FUSED_NONE: + return Activation::NONE; + case ANEURALNETWORKS_FUSED_RELU: + return Activation::RELU; + case ANEURALNETWORKS_FUSED_RELU1: + return Activation::RELU1; + case ANEURALNETWORKS_FUSED_RELU6: + return Activation::RELU6; + default: + throw std::runtime_error("Unsupported activation type"); + } +} + +PaddingType NNAPIConvert::getPaddingType(PaddingCode type) +{ + switch (type) + { + case ANEURALNETWORKS_PADDING_SAME: + return PaddingType::SAME; + case ANEURALNETWORKS_PADDING_VALID: + return PaddingType::VALID; + default: + throw std::runtime_error("Unsupported type"); + } +} diff --git a/runtime/onert/frontend/nnapi/wrapper/NNAPIConvert.h b/runtime/onert/frontend/nnapi/wrapper/NNAPIConvert.h new file mode 100644 index 000000000..4fd985e6e --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/NNAPIConvert.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file NNAPIConvert.h + * @brief This file contains convereter(s)\n + * from NNAPI frontend's struct to onert's internal struct + */ +#ifndef __ONERT_NNAPI_CONVERT_H__ +#define __ONERT_NNAPI_CONVERT_H__ + +#include <NeuralNetworks.h> + +#include <ir/TypeInfo.h> +#include <ir/Shape.h> +#include <ir/Padding.h> +#include <ir/InternalType.h> + +class NNAPIConvert +{ + +public: + /** + * @brief Convert data type from NNAPI to internal data type + * @param[in] type NNAPI's data type + * @return onert's internal data type + */ + static onert::ir::DataType getDataType(OperandCode type); + + /** + * @brief Convert operand type info from NNAPI to interanl operand type info + * @param[in] type NNAPI's operand type + * @return onert's internal operand type info + */ + static onert::ir::TypeInfo getTypeInfo(const ANeuralNetworksOperandType *type); + + /** + * @brief Convert operand shape info from NNAPI to internal operand shape + * @param[in] type NNAPI's operand type + * @return onert's internal operand shape + */ + static onert::ir::Shape getShape(const ANeuralNetworksOperandType *type); + + /** + * @brief Calcaulate operand size from NNAPI type + * @param[in] type NNAPI's operand type + * @return Operand size + */ + static size_t calculateSizeFromType(const ANeuralNetworksOperandType *type); + + /** + * @brief Convert NNAPI FuseCode to internal activation type + * @param[in] act NNAPI's FuseCode type + * @return onert's internal activation type + */ + static onert::ir::Activation getFusedActivation(FuseCode act); + + /** + * @brief Convert NNAPI PaddingCode to internal padding type + * @param[in] type NNAPI's PaddingCode type + * @return onert's internal padding type + */ + static onert::ir::PaddingType getPaddingType(PaddingCode type); +}; + +#endif // __ONERT_NNAPI_CONVERT_H__ diff --git a/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc b/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc new file mode 100644 index 000000000..10e7c0341 --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/OperationFactory.cc @@ -0,0 +1,1899 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "OperationFactory.h" +#include "NNAPIConvert.h" + +#include <ir/Operations.Include.h> +#include <string.h> + +namespace +{ +using namespace onert::ir; + +void replaceDataType(Operands &operands, const OperandIndex &index, const DataType type) +{ + assert(operands.exist(index)); + operands.at(index).type(type); +} + +ExplicitPadding makeExplicitPadding(Operands &operands, const OperandIndex &left_index, + const OperandIndex &right_index, const OperandIndex &top_index, + const OperandIndex &bottom_index) +{ + auto left = operands.at(left_index).asScalar<int32_t>(); + auto right = operands.at(right_index).asScalar<int32_t>(); + auto top = operands.at(top_index).asScalar<int32_t>(); + auto bottom = operands.at(bottom_index).asScalar<int32_t>(); + + if (left < 0 || right < 0 || top < 0 || bottom < 0) + { + throw std::runtime_error{"Cannot handle negative explicit padding value"}; + } + + ExplicitPadding param; + param.left = static_cast<uint32_t>(left); + param.right = static_cast<uint32_t>(right); + param.top = static_cast<uint32_t>(top); + param.bottom = static_cast<uint32_t>(bottom); + + return param; +} + +Stride makeStride(Operands &operands, const OperandIndex &horizontal_index, + const OperandIndex &vertical_index) +{ + auto horizontal = operands.at(horizontal_index).asScalar<int32_t>(); + auto vertical = operands.at(vertical_index).asScalar<int32_t>(); + + if (vertical < 0 || horizontal < 0) + { + throw std::runtime_error{"Cannot handle negative stride value"}; + } + + Stride stride; + stride.horizontal = static_cast<uint32_t>(horizontal); + stride.vertical = static_cast<uint32_t>(vertical); + + return stride; +} + +uint32_t getUint32Scalar(Operands &operands, const OperandIndex index) +{ + auto int32_value = operands.at(index).asScalar<int32_t>(); + if (int32_value < 0) + { + throw std::runtime_error{"Cannot handle negative value"}; + } + + return static_cast<uint32_t>(int32_value); +} + +} // namespace + +OperationFactory &OperationFactory::get() +{ + static OperationFactory factory; + return factory; +} + +OperationFactory::OperationFactory() +{ + _map[ANEURALNETWORKS_BATCH_TO_SPACE_ND] = [](const OperationFactory::Param &init_param, + Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Block size Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + return new operation::BatchToSpaceND{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_DEPTHWISE_CONV_2D] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert((init_param.input_count == 8 || init_param.input_count == 11) && + init_param.output_count == 1); + + // In common + // 0 -> IFM Tensor Index + // 1 -> Kernel Tensor Index + // 2 -> Bias Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::DepthwiseConv2D::Param param; + if (init_param.input_count == 8) + { + // Imlicit Padding case + // Each input should be interpreted as follows: + // + // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index + // 4 -> Stride (width) Index + // 5 -> Stride (height) INdex + // 6 -> Depthwise multiplier + // 7 -> Activation Index + + const auto padding_index = OperandIndex{init_param.inputs[3]}; + const auto hstride_index = OperandIndex{init_param.inputs[4]}; + const auto vstride_index = OperandIndex{init_param.inputs[5]}; + const auto multiplier_index = OperandIndex{init_param.inputs[6]}; + const auto activation_index = OperandIndex{init_param.inputs[7]}; + + param.padding.type = + NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>()); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.multiplier = getUint32Scalar(operands, multiplier_index); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + else + { + // Explicit Padding case + // Each input should be interpreted as follows: + // + // 3 -> Padding On the Left + // 4 -> Padding On the Right + // 5 -> Padding On the Top + // 6 -> Padding On the Bottom + // 7 -> Stride (width) Index + // 8 -> Stride (height) Index + // 9 -> Depthwise multiplier + // 10-> Activation Index + + const auto padding_left_index = OperandIndex{init_param.inputs[3]}; + const auto padding_right_index = OperandIndex{init_param.inputs[4]}; + const auto padding_top_index = OperandIndex{init_param.inputs[5]}; + const auto padding_bottom_index = OperandIndex{init_param.inputs[6]}; + const auto hstride_index = OperandIndex{init_param.inputs[7]}; + const auto vstride_index = OperandIndex{init_param.inputs[8]}; + const auto multiplier_index = OperandIndex{init_param.inputs[9]}; + const auto activation_index = OperandIndex{init_param.inputs[10]}; + + param.padding.type = PaddingType::EXPLICIT; + param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index, + padding_top_index, padding_bottom_index); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.multiplier = getUint32Scalar(operands, multiplier_index); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + + return new operation::DepthwiseConv2D{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_MAX_POOL_2D] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 7 || init_param.input_count == 10); + assert(init_param.output_count == 1); + + // In common + // 0 -> IFM Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::MaxPool2D::Param param; + if (init_param.input_count == 7) // support implicit padding + { + // Each input should be interpreted as follows: + // + // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index + // 2 -> Horizontal (over width) Stride Index + // 3 -> Vertial (over height) Stride Index + // 4 -> Filter Width Index + // 5 -> Filter Height Index + // 6 -> FuseCode (activation) Index + + const auto padding_index = OperandIndex{init_param.inputs[1]}; + const auto hstride_index = OperandIndex{init_param.inputs[2]}; + const auto vstride_index = OperandIndex{init_param.inputs[3]}; + const auto kw_index = OperandIndex{init_param.inputs[4]}; + const auto kh_index = OperandIndex{init_param.inputs[5]}; + const auto activation_index = OperandIndex{init_param.inputs[6]}; + + param.padding.type = + NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>()); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.kw = getUint32Scalar(operands, kw_index); + param.kh = operands.at(kh_index).asScalar<uint32_t>(); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + else if (init_param.input_count == 10) // support explicit padding + { + // Each input should be interpreted as follows: + // + // 1 -> Padding_left index + // 2 -> Padding_right index + // 3 -> Padding_top index + // 4 -> Padding_bottom index + // 5 -> Horizontal (over width) Stride Index + // 6 -> Vertial (over height) Stride Index + // 7 -> Filter Width Index + // 8 -> Filter Height Index + // 9 -> FuseCode (activation) Index + + const auto padding_left_index = OperandIndex{init_param.inputs[1]}; + const auto padding_right_index = OperandIndex{init_param.inputs[2]}; + const auto padding_top_index = OperandIndex{init_param.inputs[3]}; + const auto padding_bottom_index = OperandIndex{init_param.inputs[4]}; + const auto hstride_index = OperandIndex{init_param.inputs[5]}; + const auto vstride_index = OperandIndex{init_param.inputs[6]}; + const auto kw_index = OperandIndex{init_param.inputs[7]}; + const auto kh_index = OperandIndex{init_param.inputs[8]}; + const auto activation_index = OperandIndex{init_param.inputs[9]}; + + param.padding.type = PaddingType::EXPLICIT; + param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index, + padding_top_index, padding_bottom_index); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.kw = getUint32Scalar(operands, kw_index); + param.kh = getUint32Scalar(operands, kh_index); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + return new operation::MaxPool2D{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_AVERAGE_POOL_2D] = [](const OperationFactory::Param &init_param, + Operands &operands) { + // TODO We may reuse code here for MAX_POOL_2D. Seems like these two are identical + assert(init_param.input_count == 7 || init_param.input_count == 10); + assert(init_param.output_count == 1); + + // In common + // 0 -> IFM Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::AvgPool2D::Param param; + if (init_param.input_count == 7) // support implicit padding + { + // Each input should be interpreted as follows: + // + // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index + // 2 -> Horizontal (over width) Stride Index + // 3 -> Vertial (over height) Stride Index + // 4 -> Filter Width Index + // 5 -> Filter Height Index + // 6 -> FuseCode (activation) Index + + const auto padding_index = OperandIndex{init_param.inputs[1]}; + const auto hstride_index = OperandIndex{init_param.inputs[2]}; + const auto vstride_index = OperandIndex{init_param.inputs[3]}; + const auto kw_index = OperandIndex{init_param.inputs[4]}; + const auto kh_index = OperandIndex{init_param.inputs[5]}; + const auto activation_index = OperandIndex{init_param.inputs[6]}; + + param.padding.type = + NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>()); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.kw = getUint32Scalar(operands, kw_index); + param.kh = getUint32Scalar(operands, kh_index); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + else if (init_param.input_count == 10) // support explicit padding + { + // Each input should be interpreted as follows: + // + // 1 -> Padding_left index + // 2 -> Padding_right index + // 3 -> Padding_top index + // 4 -> Padding_bottom index + // 5 -> Horizontal (over width) Stride Index + // 6 -> Vertial (over height) Stride Index + // 7 -> Filter Width Index + // 8 -> Filter Height Index + // 9 -> FuseCode (activation) Index + + const auto padding_left_index = OperandIndex{init_param.inputs[1]}; + const auto padding_right_index = OperandIndex{init_param.inputs[2]}; + const auto padding_top_index = OperandIndex{init_param.inputs[3]}; + const auto padding_bottom_index = OperandIndex{init_param.inputs[4]}; + const auto hstride_index = OperandIndex{init_param.inputs[5]}; + const auto vstride_index = OperandIndex{init_param.inputs[6]}; + const auto kw_index = OperandIndex{init_param.inputs[7]}; + const auto kh_index = OperandIndex{init_param.inputs[8]}; + const auto activation_index = OperandIndex{init_param.inputs[9]}; + + param.padding.type = PaddingType::EXPLICIT; + param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index, + padding_top_index, padding_bottom_index); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.kw = getUint32Scalar(operands, kw_index); + param.kh = getUint32Scalar(operands, kh_index); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + + return new operation::AvgPool2D{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_CONCATENATION] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count >= 2); // At least one one input tensor and axis + assert(init_param.output_count == 1); + + // When there are N + 1 inputs, each input should be interpreted as follows: + // + // [0, N) -> Input tensors + // N -> Axis + // + + OperandIndexSequence inputs; + for (uint32_t n = 0; n < init_param.input_count - 1; ++n) + { + inputs.append(OperandIndex{init_param.inputs[n]}); + } + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::Concat::Param param; + const OperandIndex axis_index{init_param.inputs[init_param.input_count - 1]}; + param.axis = operands.at(axis_index).asScalar<int32_t>(); + param.rank = operands.at(outputs.at(0)).shape().rank(); + + return new operation::Concat{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_RESHAPE] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + // Each input should be interpreted as follows: + // + // 0 -> A tensor, specifying the tensor to be reshaped. + // 1 -> A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32, defining the shape of the output + // tensor + + // TODO Second input should be shape tensor (init_param.inputs[1]) + // Currently unused since assume that it is same with output tensor size + OperandIndexSequence inputs{init_param.inputs[0] /* , init_param.inputs[1] */}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + return new operation::Reshape{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_FULLY_CONNECTED] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 4 && init_param.output_count == 1); + + // Each input should be interpreted as follows: + // + // 0 -> A tensor, specifying the input. + // 1 -> A 2-D tensor, specifying the weights + // 2 -> A 1-D tensor, specifying the bias + // 3 -> An INT32 value, and has to be one of the FuseCode values + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::FullyConnected::Param param; + const auto activation_index = OperandIndex{init_param.inputs[3]}; + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + + return new operation::FullyConnected{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_SOFTMAX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + // Each input should be interpreted as follows: + // + // 0 -> A 2-D or 4-D tensor, specifying the tensor to be reshaped. + // 1 -> FLOAT32 value, specifying the positive scaling factor for the exponent, beta. + + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + const auto beta_index = OperandIndex{init_param.inputs[1]}; + + operation::Softmax::Param param; + param.beta = operands.at(beta_index).asScalar<float>(); + + return new operation::Softmax{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_CAST] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // 0 -> input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + // NNAPI uses QUANT8_ASYMM to represent UINT8 type for ANEURALNETWORKS_CAST's input/output + if (operands.at(inputs.at(0)).typeInfo().type() == DataType::QUANT8_ASYMM) + { + replaceDataType(operands, inputs.at(0), DataType::UINT8); + } + if (operands.at(outputs.at(0)).typeInfo().type() == DataType::QUANT8_ASYMM) + { + replaceDataType(operands, outputs.at(0), DataType::UINT8); + } + + return new operation::Cast{inputs, outputs}; + }; + + // ANEURALNETWORKS_CAST_EX is deprecated + // TODO Remove ANEURALNETWORKS_CAST_EX + _map[ANEURALNETWORKS_CAST_EX] = _map[ANEURALNETWORKS_CAST]; + + _map[ANEURALNETWORKS_CONV_2D] = [](const OperationFactory::Param &init_param, + Operands &operands) { + using operation::Conv2D; + + // inputCount is either 7 or 10 acccording to NN API specification. + // - Padding is implicit when inputCount is 7 + // - Padding is explicit when inputCount is 10 + assert(init_param.input_count == 7 || init_param.input_count == 10); + assert(init_param.output_count == 1); + + // 0 -> IFM Tensor Index + // 1 -> Kernel Tensor Index + // 2 -> Bias Tensor Index + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + Conv2D::Param param; + + if (init_param.input_count == 7) // support implicit padding + { + // Each input should be interpreted as follows: + // + // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index + // 4 -> Stride (width) Index + // 5 -> Stride (height) INdex + // 6 -> Activation Index + + const auto padding_index = OperandIndex{init_param.inputs[3]}; + const auto hstride_index = OperandIndex{init_param.inputs[4]}; + const auto vstride_index = OperandIndex{init_param.inputs[5]}; + const auto activation_index = OperandIndex{init_param.inputs[6]}; + + param.padding.type = + NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>()); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + else if (init_param.input_count == 10) // support explicit padding + { + // Each input should be interpreted as follows: + // + // 3 -> Padding_left index + // 4 -> Padding_right index + // 5 -> Padding_top index + // 6 -> Padding_bottom index + // 7 -> Stride (width) Index + // 8 -> Stride (height) INdex + // 9 -> Activation Index + + const auto padding_left_index = OperandIndex{init_param.inputs[3]}; + const auto padding_right_index = OperandIndex{init_param.inputs[4]}; + const auto padding_top_index = OperandIndex{init_param.inputs[5]}; + const auto padding_bottom_index = OperandIndex{init_param.inputs[6]}; + const auto hstride_index = OperandIndex{init_param.inputs[7]}; + const auto vstride_index = OperandIndex{init_param.inputs[8]}; + const auto activation_index = OperandIndex{init_param.inputs[9]}; + + param.padding.type = PaddingType::EXPLICIT; + param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index, + padding_top_index, padding_bottom_index); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + + return new Conv2D{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_ADD] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 3); + assert(init_param.output_count == 1); + + // Each input should be interpreted as follows: + // + // 0 -> Lefthand side operand + // 1 -> Righthand side operand + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::Add::Param param; + + const auto activation_index = OperandIndex{init_param.inputs[2]}; + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + + return new operation::Add{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_REDUCE_SUM] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 3); + assert(init_param.output_count == 1); + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Reduced Axes Tensor Index + // 2 -> keep_dims Index + + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + std::vector<std::int32_t> axes = + operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>(); + + operation::ReduceSum::Param param; + param.axes.assign(axes.cbegin(), axes.cend()); + param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int8_t>() != 0; + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::ReduceSum{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_REDUCE_SUM_EX is deprecated + // TODO Remove ANEURALNETWORKS_REDUCE_SUM_EX + _map[ANEURALNETWORKS_REDUCE_SUM_EX] = _map[ANEURALNETWORKS_REDUCE_SUM]; + + _map[ANEURALNETWORKS_SUB] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 3); + assert(init_param.output_count == 1); + + // Each input should be interpreted as follows: + // + // 0 -> Lefthand side operand + // 1 -> Righthand side operand + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::Sub::Param param; + + const auto activation_index = OperandIndex{init_param.inputs[2]}; + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + + return new operation::Sub{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_SLICE] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 3 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Begins Tensor Index + // 2 -> Sizes Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]}; + + operation::Slice::Param param; + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::Slice{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_STRIDED_SLICE] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 7 && init_param.output_count == 1); + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2], + init_param.inputs[3]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 1 -> A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the starts of + // the dimensions of the input tensor to be sliced. The length must be + // of rank(input0). + // 2 -> A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the ends of + // the dimensions of the input tensor to be sliced. The length must be + // of rank(input0). + // 3 -> A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the strides of + // the dimensions of the input tensor to be sliced. The length must be + // of rank(input0). + // 4 -> An {@link ANEURALNETWORKS_INT32} scalar, begin_mask. If the ith bit + // of begin_mask is set, begin[i] is ignored and the fullest possible + // range in that dimension is used instead. + // 5 -> An {@link ANEURALNETWORKS_INT32} scalar, end_mask. If the ith bit of + // end_mask is set, end[i] is ignored and the fullest possible range in + // that dimension is used instead. + // 6 -> An {@link ANEURALNETWORKS_INT32} scalar, shrink_axis_mask. An int32 + // mask. If the ith bit of shrink_axis_mask is set, it implies that the + // ith specification shrinks the dimensionality by 1. A slice of size 1 + // starting from begin[i] in the dimension must be preserved. + + operation::StridedSlice::Param param; + + param.begin_mask = operands.at(OperandIndex{init_param.inputs[4]}).asScalar<std::int32_t>(); + param.end_mask = operands.at(OperandIndex{init_param.inputs[5]}).asScalar<std::int32_t>(); + param.shrink_axis_mask = + operands.at(OperandIndex{init_param.inputs[6]}).asScalar<std::int32_t>(); + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::StridedSlice{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_TRANSPOSE] = [](const OperationFactory::Param &init_param, + Operands &operands) { + // TODO make this work with init_param.input_count == 1 (when permutation vector is optional) + + // Inputs + // 0: An n-D tensor, specifying the tensor to be transposed. + // 1: An optional 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, + // the permutation of the dimensions of the input tensor. + // The returned tensor's dimension i corresponds to the input dimension + // perm[i]. If perm is not given, it is set to (n-1...0), where n is the + // rank of the input tensor. Hence by default, this operation performs a + // regular matrix transpose on 2-D input Tensors. + assert(init_param.input_count == 2); + assert(init_param.output_count == 1); + + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + std::vector<std::int32_t> perm = + operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>(); + + operation::Transpose::Param param; + param.perm.assign(perm.cbegin(), perm.cend()); + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::Transpose{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_MUL] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 3 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> LHS Tensor Index + // 1 -> RHS Tensor Index + // 2 -> Activation Index + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Mul::Param param; + + const auto activation_index = OperandIndex{init_param.inputs[2]}; + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + + return new operation::Mul{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_SQUEEZE] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 1 || init_param.input_count == 2); + assert(init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> An n-D tensor, the tensor to be squeezed. + // 1 -> An optional 1-D tensor of ANEURALNETWORKS_TENSOR_INT32. The dimensions to squeeze. + // If specified only squeezes the dimensions listed. Otherwise, squeezes all dimensions. + // The dimension index starts at 0. An error must be reported if squeezing a dimension that + // is not 1. + + // Add mandatory input index + OperandIndexSequence inputs{init_param.inputs[0]}; + + // Add dims index if specified + operation::Squeeze::Param param{}; + if (init_param.input_count == 2) + { + auto squeeze_dims_idx = OperandIndex{init_param.inputs[1]}; + assert(operands.at(squeeze_dims_idx).shape().rank() == 1); + assert(operands.at(squeeze_dims_idx).shape().dim(0) >= 0); + assert(static_cast<uint32_t>(operands.at(squeeze_dims_idx).shape().dim(0)) <= + sizeof(param.dims)); + param.ndim = operands.at(squeeze_dims_idx).shape().dim(0); + if (param.ndim > 0) + { + assert(operands.at(squeeze_dims_idx).data()); + memcpy(param.dims, operands.at(squeeze_dims_idx).data()->base(), + param.ndim * sizeof(param.dims[0])); + } + } + + return new operation::Squeeze{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_TANH] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::Tanh{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_LOGISTIC] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::Logistic{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_DIV] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 3 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> LHS Tensor Index + // 1 -> RHS Tensor Index + // 2 -> Activation Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Div::Param param; + + const auto activation_index = OperandIndex{init_param.inputs[2]}; + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + + return new operation::Div{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_EXP] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::Exp{inputs, outputs}; + }; + + // ANEURALNETWORKS_EXP_EX is deprecated + // TODO Remove ANEURALNETWORKS_EXP_EX + _map[ANEURALNETWORKS_EXP_EX] = _map[ANEURALNETWORKS_EXP]; + + _map[ANEURALNETWORKS_GREATER] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Comparison::Param param; + param.comparison_type = operation::Comparison::ComparisonType::Greater; + + return new operation::Comparison{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_GREATER_EQUAL] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Comparison::Param param; + param.comparison_type = operation::Comparison::ComparisonType::GreaterEqual; + + return new operation::Comparison{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_GREATER_EQUAL_EX is deprecated + // TODO Remove ANEURALNETWORKS_GREATER_EQUAL_EX + _map[ANEURALNETWORKS_GREATER_EQUAL_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Comparison::Param param; + param.comparison_type = operation::Comparison::ComparisonType::GreaterEqual; + + // Output operand type must be boolean + replaceDataType(operands, outputs.at(0), DataType::BOOL8); + + return new operation::Comparison{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_LESS] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Comparison::Param param; + param.comparison_type = operation::Comparison::ComparisonType::Less; + + return new operation::Comparison{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_LESS_EQUAL] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Comparison::Param param; + param.comparison_type = operation::Comparison::ComparisonType::LessEqual; + + return new operation::Comparison{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_LESS_EX is deprecated + // TODO Remove ANEURALNETWORKS_LESS_EX + _map[ANEURALNETWORKS_LESS_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Comparison::Param param; + param.comparison_type = operation::Comparison::ComparisonType::Less; + + // Output operand type must be boolean + replaceDataType(operands, outputs.at(0), DataType::BOOL8); + + return new operation::Comparison{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_REDUCE_MAX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 3 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Axis Tensor Index + // 2 -> keep_dims Index + OperandIndexSequence inputs{init_param.inputs[0]}; + std::vector<std::int32_t> axes = + operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>(); + + operation::ReduceMax::Param param; + param.axes.assign(axes.cbegin(), axes.cend()); + param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int8_t>() != 0; + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::ReduceMax{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_REDUCE_MAX_EX is deprecated + // TODO Remove ANEURALNETWORKS_REDUCE_MAX_EX + _map[ANEURALNETWORKS_REDUCE_MAX_EX] = _map[ANEURALNETWORKS_REDUCE_MAX]; + + _map[ANEURALNETWORKS_NOT_EQUAL] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input1 Tensor Index + // 1 -> input2 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Comparison::Param param; + param.comparison_type = operation::Comparison::ComparisonType::NotEqual; + + return new operation::Comparison{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_NOT_EQUAL_EX is deprecated + // TODO Remove ANEURALNETWORKS_NOT_EQUAL_EX + _map[ANEURALNETWORKS_NOT_EQUAL_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input1 Tensor Index + // 1 -> input2 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Comparison::Param param; + param.comparison_type = operation::Comparison::ComparisonType::NotEqual; + + // Output operand type must be boolean + replaceDataType(operands, outputs.at(0), DataType::BOOL8); + + return new operation::Comparison{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_LOGICAL_AND] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + return new operation::LogicalAnd{inputs, outputs}; + }; + + // ANEURALNETWORKS_LOGICAL_AND_EX is deprecated + // TODO Remove ANEURALNETWORKS_LOGICAL_AND_EX + _map[ANEURALNETWORKS_LOGICAL_AND_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + // This operation's operands must be boolean type. + replaceDataType(operands, inputs.at(0), DataType::BOOL8); + replaceDataType(operands, inputs.at(1), DataType::BOOL8); + replaceDataType(operands, outputs.at(0), DataType::BOOL8); + + return new operation::LogicalAnd{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_RSQRT] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::RSQRT{inputs, outputs}; + }; + + // ANEURALNETWORKS_RSQRT_EX is deprecated + // TODO Remove ANEURALNETWORKS_RSQRT_EX + _map[ANEURALNETWORKS_RSQRT_EX] = _map[ANEURALNETWORKS_RSQRT]; + + _map[ANEURALNETWORKS_RELU] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::ReLU{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_RESIZE_BILINEAR] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 3 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> IFM Index + // 1 -> Height Index + // 2 -> Width Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + operation::ResizeBilinear::Param param; + param.height_out = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<int32_t>(); + param.width_out = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int32_t>(); + + return new operation::ResizeBilinear{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_RELU1] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::ReLU1{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_RELU6] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::ReLU6{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_RNN] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 6 && init_param.output_count == 2); + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Weights Tensor Index + // 2 -> Recurrent Weights Tensor Index + // 3 -> Bias Tensor Index + // 4 -> Hidden state (in) Index + // 5 -> Activation Index + + OperandIndexSequence inputs; + for (uint32_t n = 0; n < init_param.input_count - 1; ++n) + { + inputs.append(OperandIndex{init_param.inputs[n]}); + } + OperandIndexSequence outputs; + for (uint32_t n = 0; n < init_param.output_count; ++n) + { + outputs.append(OperandIndex{init_param.outputs[n]}); + } + + operation::RNN::Param param; + const auto activation_index = OperandIndex{init_param.inputs[5]}; + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + + return new operation::RNN{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_FLOOR] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // 0 -> input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::Floor{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_SPACE_TO_BATCH_ND] = [](const OperationFactory::Param &init_param, + Operands &) { + assert(init_param.input_count == 3 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Block size Index + // 2 -> Paddings Index + OperandIndexSequence inputs; + for (uint32_t n = 0; n < init_param.input_count; ++n) + { + inputs.append(OperandIndex{init_param.inputs[n]}); + } + + return new operation::SpaceToBatchND{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_SPACE_TO_DEPTH] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Block size Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + operation::SpaceToDepth::Param param; + param.block_size = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>(); + + return new operation::SpaceToDepth{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_L2_POOL_2D] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 10 || init_param.input_count == 7); + assert(init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> IFM Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + operation::L2Pool2D::Param param; + + if (init_param.input_count == 7) // Imlicit Padding case + { + // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index + // 2 -> Horizontal (over width) Stride Index + // 3 -> Vertial (over height) Stride Index + // 4 -> Filter Width Index + // 5 -> Filter Height Index + // 6 -> FuseCode (activation) Index + const auto padding_index = OperandIndex{init_param.inputs[1]}; + const auto hstride_index = OperandIndex{init_param.inputs[2]}; + const auto vstride_index = OperandIndex{init_param.inputs[3]}; + const auto kw_index = OperandIndex{init_param.inputs[4]}; + const auto kh_index = OperandIndex{init_param.inputs[5]}; + const auto activation_index = OperandIndex{init_param.inputs[6]}; + + param.padding.type = + NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>()); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.kw = getUint32Scalar(operands, kw_index); + param.kh = getUint32Scalar(operands, kh_index); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + else // Explicit Padding case + { + // 1 -> Padding_left index + // 2 -> Padding_right index + // 3 -> Padding_top index + // 4 -> Padding_bottom index + // 5 -> Horizontal (over width) Stride Index + // 6 -> Vertial (over height) Stride Index + // 7 -> Filter Width Index + // 8 -> Filter Height Index + // 9 -> FuseCode (activation) Index + const auto padding_left_index = OperandIndex{init_param.inputs[1]}; + const auto padding_right_index = OperandIndex{init_param.inputs[2]}; + const auto padding_top_index = OperandIndex{init_param.inputs[3]}; + const auto padding_bottom_index = OperandIndex{init_param.inputs[4]}; + const auto hstride_index = OperandIndex{init_param.inputs[5]}; + const auto vstride_index = OperandIndex{init_param.inputs[6]}; + const auto kw_index = OperandIndex{init_param.inputs[7]}; + const auto kh_index = OperandIndex{init_param.inputs[8]}; + const auto activation_index = OperandIndex{init_param.inputs[9]}; + + param.padding.type = PaddingType::EXPLICIT; + param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index, + padding_top_index, padding_bottom_index); + param.stride = makeStride(operands, hstride_index, vstride_index); + param.kw = getUint32Scalar(operands, kw_index); + param.kh = getUint32Scalar(operands, kh_index); + param.activation = + NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>()); + } + + return new operation::L2Pool2D{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_EMBEDDING_LOOKUP] = [](const OperationFactory::Param &init_param, + Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Lookups Index + // 1 -> Values Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + return new operation::EmbeddingLookup{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_L2_NORMALIZATION] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // 0 -> input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + operation::L2Normalization::Param param; + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::L2Normalization{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_HASHTABLE_LOOKUP] = [](const OperationFactory::Param &init_param, + Operands &) { + assert(init_param.input_count == 3 && init_param.output_count == 2); + + // Each output should be interpreted as follows: + // + // 0 -> Output Index + // 1 -> Hits Index + OperandIndexSequence outputs{init_param.outputs[0], init_param.outputs[1]}; + + // Each input should be interpreted as follows: + // + // 0 -> Lookups Index + // 1 -> Keys Index + // 2 -> Values Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]}; + + return new operation::HashtableLookup{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_PRELU] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input Tensor Index + // 1 -> alpha Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + return new operation::PReLU{inputs, outputs}; + }; + + // ANEURALNETWORKS_PRELU_EX is deprecated + // TODO Remove ANEURALNETWORKS_PRELU_EX + _map[ANEURALNETWORKS_PRELU_EX] = _map[ANEURALNETWORKS_PRELU]; + + _map[ANEURALNETWORKS_TRANSPOSE_CONV_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 6 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Output Shape Index + // 1 -> Weights Index + // 2 -> Input Tensor Index + // 3 -> Padding Type + // 4 -> Stride width + // 5 -> Stride height + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]}; + + operation::TransposeConv::Param param; + + const auto padding_index = OperandIndex{init_param.inputs[3]}; + const auto hstride_index = OperandIndex{init_param.inputs[4]}; + const auto vstride_index = OperandIndex{init_param.inputs[5]}; + + param.padding.type = + NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>()); + param.stride = makeStride(operands, hstride_index, vstride_index); + + return new operation::TransposeConv{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_SQRT] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // 0 -> input Tensor Index + + OperandIndexSequence inputs{init_param.inputs[0]}; + return new operation::SQRT{inputs, outputs}; + }; + + // ANEURALNETWORKS_SQRT_EX is deprecated + // TODO Remove ANEURALNETWORKS_SQRT_EX + _map[ANEURALNETWORKS_SQRT_EX] = _map[ANEURALNETWORKS_SQRT]; + + _map[ANEURALNETWORKS_LOGICAL_OR] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + return new operation::LogicalOr{inputs, outputs}; + }; + + // ANEURALNETWORKS_LOGICAL_OR_EX is deprecated + // TODO Remove ANEURALNETWORKS_LOGICAL_OR_EX + _map[ANEURALNETWORKS_LOGICAL_OR_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + // This operation's operands must be boolean type. + replaceDataType(operands, inputs.at(0), DataType::BOOL8); + replaceDataType(operands, inputs.at(1), DataType::BOOL8); + replaceDataType(operands, outputs.at(0), DataType::BOOL8); + + return new operation::LogicalOr{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_LOGICAL_NOT] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::LogicalNot{inputs, outputs}; + }; + + // ANEURALNETWORKS_LOGICAL_NOT_EX is deprecated + // TODO Remove ANEURALNETWORKS_LOGICAL_NOT_EX + _map[ANEURALNETWORKS_LOGICAL_NOT_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + // This operation's operands must be boolean type. + replaceDataType(operands, inputs.at(0), DataType::BOOL8); + replaceDataType(operands, outputs.at(0), DataType::BOOL8); + + return new operation::LogicalNot{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_LSTM] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 23 && init_param.output_count == 4); + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Input to Input Tensor Index + // 2 -> Input to Forget Tensor Index + // 3 -> Input to Cell Tensor Index + // 4 -> Input to Output Tensor Index + // 5 -> Recurrent to Input Weights Tensor Index + // 6 -> Recurrent to Forget Weights Tensor Index + // 7 -> Recurrent to Cell Weights Tensor Index + // 8 -> Recurrent to Output Weights Tensor Index + // 9 -> Cell to Input Weights Tensor Index + // 10 -> Cell to Forget Weights Tensor Index + // 11 -> Cell to Output Weights Tensor Index + // 12 -> Input Gate Bias Tensor Index + // 13 -> Forget Gate Bias Tensor Index + // 14 -> Cell Bias Tensor Index + // 15 -> Output Gate Bias Tensor Index + // 16 -> Projection Weights Tensor Index + // 17 -> Projection Bias Tensor Index + // 18 -> Output State In Tensor Index + // 19 -> Cell State In Tensor Index + OperandIndexSequence inputs; + for (uint32_t n = 0; n < init_param.input_count - 3; ++n) + { + inputs.append(OperandIndex{init_param.inputs[n]}); + } + + // Each output should be interpreted as follows: + // + // 0 -> Scratch Buffer Tensor Index + // 1 -> Output State Out Tensor Index + // 2 -> Cell State Out Tensor Index + // 3 -> Output Tensor Index + OperandIndexSequence outputs; + for (uint32_t n = 0; n < init_param.output_count; ++n) + { + outputs.append(OperandIndex{init_param.outputs[n]}); + } + + operation::LSTM::Param param; + const auto activation_index = OperandIndex{init_param.inputs[20]}; + switch (operands.at(activation_index).asScalar<int32_t>()) + { + case 0: + param.activation = Activation::NONE; + break; + case 1: + param.activation = Activation::RELU; + break; + case 2: + param.activation = Activation::RELU1; + break; + case 3: + param.activation = Activation::RELU6; + break; + case 4: + param.activation = Activation::TANH; + break; + case 6: + param.activation = Activation::SIGMOID; + break; + default: + throw std::runtime_error("Unsupported activation type"); + break; + } + param.cell_threshold = operands.at(OperandIndex{init_param.inputs[21]}).asScalar<float>(); + param.projection_threshold = operands.at(OperandIndex{init_param.inputs[22]}).asScalar<float>(); + + return new operation::LSTM{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_EQUAL] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Comparison::Param param; + param.comparison_type = operation::Comparison::ComparisonType::Equal; + + return new operation::Comparison{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_EQUAL_EX is deprecated + // TODO Remove ANEURALNETWORKS_EQUAL_EX + _map[ANEURALNETWORKS_EQUAL_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input0 Tensor Index + // 1 -> input1 Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + operation::Comparison::Param param; + param.comparison_type = operation::Comparison::ComparisonType::Equal; + + // Output operand type must be boolean + replaceDataType(operands, outputs.at(0), DataType::BOOL8); + + return new operation::Comparison{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_SQUARED_DIFFERENCE_EX] = [](const OperationFactory::Param &init_param, + Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> LHS Tensor Index + // 1 -> RHS Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + + return new operation::SquaredDifference{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_TOPK_V2] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 2); + + // Each output should be interpreted as follows: + // + // 0 -> Index for Output Values + // 1 -> Index for Output Indices + OperandIndexSequence outputs{init_param.outputs[0], init_param.outputs[1]}; + + // Each input should be interpreted as follows: + // + // 0 -> Index for Input Data + // 1 -> Index for K + OperandIndexSequence inputs{init_param.inputs[0]}; + + operation::TopKV2::Param param; + param.k = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>(); + + return new operation::TopKV2{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_CAST_EX is deprecated + // TODO Remove ANEURALNETWORKS_CAST_EX + _map[ANEURALNETWORKS_TOPK_V2_EX] = _map[ANEURALNETWORKS_TOPK_V2]; + + _map[ANEURALNETWORKS_GATHER] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 3 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input Tensor Index + // 1 -> axis Index + // 2 -> indices Tensor Index + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[2]}; + + operation::Gather::Param param; + param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<int32_t>(); + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::Gather{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_GATHER_EX is deprecated + // TODO Remove ANEURALNETWORKS_GATHER_EX + _map[ANEURALNETWORKS_GATHER_EX] = _map[ANEURALNETWORKS_GATHER]; + + _map[ANEURALNETWORKS_NEG] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::Neg{inputs, outputs}; + }; + + // ANEURALNETWORKS_NEG_EX is deprecated + // TODO Remove ANEURALNETWORKS_NEG_EX + _map[ANEURALNETWORKS_NEG_EX] = _map[ANEURALNETWORKS_NEG]; + + _map[ANEURALNETWORKS_ABS] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::Abs{inputs, outputs}; + }; + + // ANEURALNETWORKS_ABS_EX is deprecated + // TODO Remove ANEURALNETWORKS_ABS_EX + _map[ANEURALNETWORKS_ABS_EX] = _map[ANEURALNETWORKS_ABS]; + + _map[ANEURALNETWORKS_ARGMAX] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Axis Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + operation::ArgMax::Param param; + param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>(); + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::ArgMax{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_ARGMAX_EX is deprecated + // TODO Remove ANEURALNETWORKS_ARGMAX_EX + _map[ANEURALNETWORKS_ARGMAX_EX] = _map[ANEURALNETWORKS_ARGMAX]; + + _map[ANEURALNETWORKS_DEQUANTIZE] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + return new operation::Dequantize{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_MEAN] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 3 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> ifm Tensor Index + // 1 -> axis Tensor Index + // 2 -> keep_dims Index + OperandIndexSequence inputs{init_param.inputs[0]}; + std::vector<std::int32_t> axes = + operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>(); + + operation::Mean::Param param; + param.axes.assign(axes.cbegin(), axes.cend()); + param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int32_t>() != 0; + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::Mean{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 5 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + OperandIndexSequence inputs{init_param.inputs[0]}; + + operation::LocalResponseNormalization::Param param; + param.radius = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>(); + param.bias = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<float>(); + param.alpha = operands.at(OperandIndex{init_param.inputs[3]}).asScalar<float>(); + param.beta = operands.at(OperandIndex{init_param.inputs[4]}).asScalar<float>(); + + return new operation::LocalResponseNormalization{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_DEPTH_TO_SPACE] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Block size Index + OperandIndexSequence inputs{init_param.inputs[0]}; + + operation::DepthToSpace::Param param; + param.block_size = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>(); + + return new operation::DepthToSpace{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_PACK_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count >= 3 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + OperandIndexSequence inputs; + for (uint32_t n = 0; n < init_param.input_count - 2; ++n) + { + inputs.append(OperandIndex{init_param.inputs[n]}); + } + + operation::Pack::Param param; + const auto num_index = OperandIndex{init_param.inputs[init_param.input_count - 2]}; + const auto axis_index = OperandIndex{init_param.inputs[init_param.input_count - 1]}; + param.num = operands.at(num_index).asScalar<int32_t>(); + param.axis = operands.at(axis_index).asScalar<int32_t>(); + param.rank = operands.at(outputs.at(0)).shape().rank(); + + return new operation::Pack{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_REDUCE_MIN] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 3 && init_param.output_count == 1); + + OperandIndexSequence outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Input Tensor Index + // 1 -> Axis Tensor Index + // 2 -> keep_dims Index + OperandIndexSequence inputs{init_param.inputs[0]}; + std::vector<std::int32_t> axes = + operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>(); + + operation::ReduceMin::Param param; + param.axes.assign(axes.cbegin(), axes.cend()); + param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int8_t>() != 0; + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::ReduceMin{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_REDUCE_MIN_EX is deprecated + // TODO Remove ANEURALNETWORKS_REDUCE_MIN_EX + _map[ANEURALNETWORKS_REDUCE_MIN_EX] = _map[ANEURALNETWORKS_REDUCE_MIN]; + + _map[ANEURALNETWORKS_SPLIT] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 3); + assert(init_param.output_count >= 1); // At least one output tensor and axis + + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs; + for (uint32_t n = 0; n < init_param.output_count; ++n) + { + outputs.append(OperandIndex{init_param.outputs[n]}); + } + + operation::Split::Param param; + param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>(); + param.num_splits = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<std::int32_t>(); + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::Split{inputs, outputs, param}; + }; + + // ANEURALNETWORKS_SPLIT_EX is deprecated + // TODO Remove ANEURALNETWORKS_SPLIT_EX + _map[ANEURALNETWORKS_SPLIT_EX] = _map[ANEURALNETWORKS_SPLIT]; + + _map[ANEURALNETWORKS_UNPACK_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 3 && init_param.output_count >= 1); + + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs; + for (uint32_t n = 0; n < init_param.output_count; ++n) + { + outputs.append(OperandIndex{init_param.outputs[n]}); + } + + operation::Unpack::Param param; + const auto num_index = OperandIndex{init_param.inputs[1]}; + const auto axis_index = OperandIndex{init_param.inputs[2]}; + param.num = operands.at(num_index).asScalar<int32_t>(); + param.axis = operands.at(axis_index).asScalar<int32_t>(); + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::Unpack{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_PAD] = [](const OperationFactory::Param &init_param, Operands &operands) { + assert(init_param.input_count == 2 && init_param.output_count >= 1); + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::Pad::Param param; + param.rank = operands.at(inputs.at(0)).shape().rank(); + + return new operation::Pad{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_MINIMUM] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + return new operation::Min{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_MAXIMUM] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + return new operation::Max{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_ONE_HOT_EX] = [](const OperationFactory::Param &init_param, + Operands &operands) { + assert(init_param.input_count == 5); + assert(init_param.output_count == 1); + // Each input should be interpreted as follows: + // + // 0 -> indices tensor + // 1 -> depth scalar + // 2 -> on_value scalar + // 3 -> off_value scalar + // 4 -> axis scalar + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + operation::OneHot::Param param; + param.depth = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>(); + param.on_value = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<float>(); + param.off_value = operands.at(OperandIndex{init_param.inputs[3]}).asScalar<float>(); + param.axis = operands.at(OperandIndex{init_param.inputs[4]}).asScalar<std::int32_t>(); + + return new operation::OneHot{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_SIN] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + return new operation::Sin{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_SHAPE_EX] = [](const OperationFactory::Param &init_param, Operands &) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + OperandIndexSequence inputs{init_param.inputs[0]}; + OperandIndexSequence outputs{init_param.outputs[0]}; + + return new operation::Shape{inputs, outputs}; + }; +} + +Operation *OperationFactory::create(ANeuralNetworksOperationType type, + const OperationFactory::Param ¶m, Operands &operands) +{ + auto it = _map.find(type); + if (it == _map.end()) + { + throw std::runtime_error("Unsupported operation type: " + std::to_string(type)); + } + return it->second(param, operands); +} diff --git a/runtime/onert/frontend/nnapi/wrapper/OperationFactory.h b/runtime/onert/frontend/nnapi/wrapper/OperationFactory.h new file mode 100644 index 000000000..367cf74db --- /dev/null +++ b/runtime/onert/frontend/nnapi/wrapper/OperationFactory.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __OPERATION_FACTORY_H__ +#define __OPERATION_FACTORY_H__ + +#include <unordered_map> + +#include "ir/Operands.h" +#include "ir/Operation.h" +#include "NeuralNetworks.h" +#include "NeuralNetworksEx.h" + +/** + * @brief A class to create a onert operation object from NN API input parameters + */ +class OperationFactory +{ +public: + struct Param + { + uint32_t input_count; + const uint32_t *inputs; + uint32_t output_count; + const uint32_t *outputs; + }; + +public: + using Generator = + std::function<onert::ir::Operation *(const OperationFactory::Param &, onert::ir::Operands &)>; + +public: + static OperationFactory &get(); + +private: + OperationFactory(); + +public: + onert::ir::Operation *create(ANeuralNetworksOperationType, const OperationFactory::Param ¶m, + onert::ir::Operands &operands); + // TODO add "register" method for separating registration, possibly supporting custom-ops + +private: + std::unordered_map<ANeuralNetworksOperationType, Generator> _map; +}; + +#endif // __OPERATION_FACTORY_H__ diff --git a/runtime/onert/frontend/tflite/CMakeLists.txt b/runtime/onert/frontend/tflite/CMakeLists.txt new file mode 100644 index 000000000..229f04f32 --- /dev/null +++ b/runtime/onert/frontend/tflite/CMakeLists.txt @@ -0,0 +1,17 @@ +if(NOT BUILD_TFLITE_LOADER) + return() +endif(NOT BUILD_TFLITE_LOADER) + +nnfw_find_package(FlatBuffersSource REQUIRED) + +set(TFLITE_LOADER_SOURCES src/tflite_loader.cc) + +add_library(tflite_loader SHARED ${TFLITE_LOADER_SOURCES}) + +target_include_directories(tflite_loader PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) +target_include_directories(tflite_loader PRIVATE ${FlatBuffersSource_DIR}/include) + +target_link_libraries(tflite_loader PUBLIC onert_core) +target_link_libraries(tflite_loader PRIVATE base_loader nnfw_common nnfw_coverage) + +install(TARGETS tflite_loader DESTINATION lib) diff --git a/runtime/onert/frontend/tflite/include/tflite_loader.h b/runtime/onert/frontend/tflite/include/tflite_loader.h new file mode 100644 index 000000000..d1816d47a --- /dev/null +++ b/runtime/onert/frontend/tflite/include/tflite_loader.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __TFLITE_TFLITE_LOADER_H__ +#define __TFLITE_TFLITE_LOADER_H__ + +#include "ir/Graph.h" + +#include <memory> + +namespace onert +{ +namespace tflite_loader +{ + +std::unique_ptr<ir::Graph> loadModel(const char *filename); + +} // namespace tflite_loader +} // namespace onert + +#endif // __TFLITE_TFLITE_LOADER_H__ diff --git a/runtime/onert/frontend/tflite/src/tflite_loader.cc b/runtime/onert/frontend/tflite/src/tflite_loader.cc new file mode 100644 index 000000000..7ede4441a --- /dev/null +++ b/runtime/onert/frontend/tflite/src/tflite_loader.cc @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tflite_loader.h" +#include "base_loader.h" +#include "tflite_schema_generated.h" + +namespace onert +{ +namespace tflite_loader +{ + +namespace +{ + +struct LoaderDomain +{ + using Verifier = flatbuffers::Verifier; + using ActivationFunctionType = onert_tflite::ActivationFunctionType; + using Buffer = onert_tflite::Buffer; + using BuiltinOperator = onert_tflite::BuiltinOperator; + using CustomOptionsFormat = onert_tflite::CustomOptionsFormat; + using Model = onert_tflite::Model; + using Operator = onert_tflite::Operator; + using Padding = onert_tflite::Padding; + using Pool2DOptions = onert_tflite::Pool2DOptions; + using Tensor = onert_tflite::Tensor; + using TensorType = onert_tflite::TensorType; + using SubGraph = onert_tflite::SubGraph; + + static const char *EnumNameBuiltinOperator(BuiltinOperator e) + { + return onert_tflite::EnumNameBuiltinOperator(e); + } + static const char *EnumNameActivationFunctionType(ActivationFunctionType e) + { + return onert_tflite::EnumNameActivationFunctionType(e); + } + static const char *EnumNameTensorType(TensorType e) + { + return onert_tflite::EnumNameTensorType(e); + } + static const Model *GetModel(const void *buf) { return onert_tflite::GetModel(buf); } + static bool VerifyModelBuffer(Verifier &verifier) + { + return onert_tflite::VerifyModelBuffer(verifier); + } +}; + +class TFLiteLoader final : public base_loader::BaseLoader<LoaderDomain, TFLiteLoader> +{ +public: + using BaseLoader::BaseLoader; + + std::unique_ptr<ir::Graph> loadSubgraph(const onert_tflite::SubGraph *tflite_subg) + { + auto subg = std::make_unique<ir::Graph>(); + // Load tensors + _tensor_to_operand.resize(tflite_subg->tensors()->size()); + for (flatbuffers::uoffset_t i = 0; i < tflite_subg->tensors()->size(); ++i) + { + _tensor_to_operand[i] = loadOperand(tflite_subg->tensors()->Get(i), *subg); + } + // Set inputs + for (const std::int32_t input_ind : *tflite_subg->inputs()) + { + subg->addInput(_tensor_to_operand[input_ind]); + } + // Set outputs + for (const std::int32_t output_ind : *tflite_subg->outputs()) + { + subg->addOutput(_tensor_to_operand[output_ind]); + } + // Create operations + for (const auto *op : *tflite_subg->operators()) + { + loadOperation(op, *subg); + } + + subg->finishBuilding(); + + return subg; + } +}; + +} // namespace + +std::unique_ptr<ir::Graph> loadModel(const char *filename) +{ + auto primary_subgraph = std::make_unique<ir::Graph>(); + TFLiteLoader loader(primary_subgraph); + loader.loadFromFile(filename); + return primary_subgraph; +} + +} // namespace tflite_loader +} // namespace onert diff --git a/runtime/onert/frontend/tflite/src/tflite_schema_generated.h b/runtime/onert/frontend/tflite/src/tflite_schema_generated.h new file mode 100644 index 000000000..c6e9147cd --- /dev/null +++ b/runtime/onert/frontend/tflite/src/tflite_schema_generated.h @@ -0,0 +1,9553 @@ +/* + * Copyright (c) 2019-2020 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2018 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// automatically generated by the FlatBuffers compiler, do not modify + +#ifndef FLATBUFFERS_GENERATED_TFLITESCHEMA_ONERT_TFLITE_H_ +#define FLATBUFFERS_GENERATED_TFLITESCHEMA_ONERT_TFLITE_H_ + +#include "flatbuffers/flatbuffers.h" + +namespace onert_tflite +{ + +struct CustomQuantization; + +struct QuantizationParameters; + +struct Int32Vector; + +struct Uint16Vector; + +struct Uint8Vector; + +struct DimensionMetadata; + +struct SparsityParameters; + +struct Tensor; + +struct Conv2DOptions; + +struct Pool2DOptions; + +struct DepthwiseConv2DOptions; + +struct ConcatEmbeddingsOptions; + +struct LSHProjectionOptions; + +struct SVDFOptions; + +struct RNNOptions; + +struct SequenceRNNOptions; + +struct BidirectionalSequenceRNNOptions; + +struct FullyConnectedOptions; + +struct SoftmaxOptions; + +struct ConcatenationOptions; + +struct AddOptions; + +struct MulOptions; + +struct L2NormOptions; + +struct LocalResponseNormalizationOptions; + +struct LSTMOptions; + +struct UnidirectionalSequenceLSTMOptions; + +struct BidirectionalSequenceLSTMOptions; + +struct ResizeBilinearOptions; + +struct ResizeNearestNeighborOptions; + +struct CallOptions; + +struct PadOptions; + +struct PadV2Options; + +struct ReshapeOptions; + +struct SpaceToBatchNDOptions; + +struct BatchToSpaceNDOptions; + +struct SkipGramOptions; + +struct SpaceToDepthOptions; + +struct DepthToSpaceOptions; + +struct SubOptions; + +struct DivOptions; + +struct TopKV2Options; + +struct EmbeddingLookupSparseOptions; + +struct GatherOptions; + +struct TransposeOptions; + +struct ExpOptions; + +struct CosOptions; + +struct ReducerOptions; + +struct SqueezeOptions; + +struct SplitOptions; + +struct SplitVOptions; + +struct StridedSliceOptions; + +struct LogSoftmaxOptions; + +struct CastOptions; + +struct DequantizeOptions; + +struct MaximumMinimumOptions; + +struct TileOptions; + +struct ArgMaxOptions; + +struct ArgMinOptions; + +struct GreaterOptions; + +struct GreaterEqualOptions; + +struct LessOptions; + +struct LessEqualOptions; + +struct NegOptions; + +struct SelectOptions; + +struct SliceOptions; + +struct TransposeConvOptions; + +struct ExpandDimsOptions; + +struct SparseToDenseOptions; + +struct EqualOptions; + +struct NotEqualOptions; + +struct ShapeOptions; + +struct RankOptions; + +struct PowOptions; + +struct FakeQuantOptions; + +struct PackOptions; + +struct LogicalOrOptions; + +struct OneHotOptions; + +struct AbsOptions; + +struct HardSwishOptions; + +struct LogicalAndOptions; + +struct LogicalNotOptions; + +struct UnpackOptions; + +struct FloorDivOptions; + +struct SquareOptions; + +struct ZerosLikeOptions; + +struct FillOptions; + +struct FloorModOptions; + +struct RangeOptions; + +struct LeakyReluOptions; + +struct SquaredDifferenceOptions; + +struct MirrorPadOptions; + +struct UniqueOptions; + +struct ReverseV2Options; + +struct AddNOptions; + +struct GatherNdOptions; + +struct WhereOptions; + +struct ReverseSequenceOptions; + +struct MatrixDiagOptions; + +struct QuantizeOptions; + +struct MatrixSetDiagOptions; + +struct IfOptions; + +struct WhileOptions; + +struct NonMaxSuppressionV4Options; + +struct NonMaxSuppressionV5Options; + +struct ScatterNdOptions; + +struct SelectV2Options; + +struct DensifyOptions; + +struct SegmentSumOptions; + +struct BatchMatMulOptions; + +struct OperatorCode; + +struct Operator; + +struct SubGraph; + +struct Buffer; + +struct Metadata; + +struct Model; + +enum TensorType +{ + TensorType_FLOAT32 = 0, + TensorType_FLOAT16 = 1, + TensorType_INT32 = 2, + TensorType_UINT8 = 3, + TensorType_INT64 = 4, + TensorType_STRING = 5, + TensorType_BOOL = 6, + TensorType_INT16 = 7, + TensorType_COMPLEX64 = 8, + TensorType_INT8 = 9, + TensorType_FLOAT64 = 10, + TensorType_MIN = TensorType_FLOAT32, + TensorType_MAX = TensorType_FLOAT64 +}; + +inline const TensorType (&EnumValuesTensorType())[11] +{ + static const TensorType values[] = {TensorType_FLOAT32, TensorType_FLOAT16, TensorType_INT32, + TensorType_UINT8, TensorType_INT64, TensorType_STRING, + TensorType_BOOL, TensorType_INT16, TensorType_COMPLEX64, + TensorType_INT8, TensorType_FLOAT64}; + return values; +} + +inline const char *const *EnumNamesTensorType() +{ + static const char *const names[] = {"FLOAT32", "FLOAT16", "INT32", "UINT8", + "INT64", "STRING", "BOOL", "INT16", + "COMPLEX64", "INT8", "FLOAT64", nullptr}; + return names; +} + +inline const char *EnumNameTensorType(TensorType e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesTensorType()[index]; +} + +enum QuantizationDetails +{ + QuantizationDetails_NONE = 0, + QuantizationDetails_CustomQuantization = 1, + QuantizationDetails_MIN = QuantizationDetails_NONE, + QuantizationDetails_MAX = QuantizationDetails_CustomQuantization +}; + +inline const QuantizationDetails (&EnumValuesQuantizationDetails())[2] +{ + static const QuantizationDetails values[] = {QuantizationDetails_NONE, + QuantizationDetails_CustomQuantization}; + return values; +} + +inline const char *const *EnumNamesQuantizationDetails() +{ + static const char *const names[] = {"NONE", "CustomQuantization", nullptr}; + return names; +} + +inline const char *EnumNameQuantizationDetails(QuantizationDetails e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesQuantizationDetails()[index]; +} + +template <typename T> struct QuantizationDetailsTraits +{ + static const QuantizationDetails enum_value = QuantizationDetails_NONE; +}; + +template <> struct QuantizationDetailsTraits<CustomQuantization> +{ + static const QuantizationDetails enum_value = QuantizationDetails_CustomQuantization; +}; + +bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj, + QuantizationDetails type); +bool VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier, + const flatbuffers::Vector<flatbuffers::Offset<void>> *values, + const flatbuffers::Vector<uint8_t> *types); + +enum DimensionType +{ + DimensionType_DENSE = 0, + DimensionType_SPARSE_CSR = 1, + DimensionType_MIN = DimensionType_DENSE, + DimensionType_MAX = DimensionType_SPARSE_CSR +}; + +inline const DimensionType (&EnumValuesDimensionType())[2] +{ + static const DimensionType values[] = {DimensionType_DENSE, DimensionType_SPARSE_CSR}; + return values; +} + +inline const char *const *EnumNamesDimensionType() +{ + static const char *const names[] = {"DENSE", "SPARSE_CSR", nullptr}; + return names; +} + +inline const char *EnumNameDimensionType(DimensionType e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesDimensionType()[index]; +} + +enum SparseIndexVector +{ + SparseIndexVector_NONE = 0, + SparseIndexVector_Int32Vector = 1, + SparseIndexVector_Uint16Vector = 2, + SparseIndexVector_Uint8Vector = 3, + SparseIndexVector_MIN = SparseIndexVector_NONE, + SparseIndexVector_MAX = SparseIndexVector_Uint8Vector +}; + +inline const SparseIndexVector (&EnumValuesSparseIndexVector())[4] +{ + static const SparseIndexVector values[] = {SparseIndexVector_NONE, SparseIndexVector_Int32Vector, + SparseIndexVector_Uint16Vector, + SparseIndexVector_Uint8Vector}; + return values; +} + +inline const char *const *EnumNamesSparseIndexVector() +{ + static const char *const names[] = {"NONE", "Int32Vector", "Uint16Vector", "Uint8Vector", + nullptr}; + return names; +} + +inline const char *EnumNameSparseIndexVector(SparseIndexVector e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesSparseIndexVector()[index]; +} + +template <typename T> struct SparseIndexVectorTraits +{ + static const SparseIndexVector enum_value = SparseIndexVector_NONE; +}; + +template <> struct SparseIndexVectorTraits<Int32Vector> +{ + static const SparseIndexVector enum_value = SparseIndexVector_Int32Vector; +}; + +template <> struct SparseIndexVectorTraits<Uint16Vector> +{ + static const SparseIndexVector enum_value = SparseIndexVector_Uint16Vector; +}; + +template <> struct SparseIndexVectorTraits<Uint8Vector> +{ + static const SparseIndexVector enum_value = SparseIndexVector_Uint8Vector; +}; + +bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj, + SparseIndexVector type); +bool VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier, + const flatbuffers::Vector<flatbuffers::Offset<void>> *values, + const flatbuffers::Vector<uint8_t> *types); + +enum BuiltinOperator +{ + BuiltinOperator_ADD = 0, + BuiltinOperator_AVERAGE_POOL_2D = 1, + BuiltinOperator_CONCATENATION = 2, + BuiltinOperator_CONV_2D = 3, + BuiltinOperator_DEPTHWISE_CONV_2D = 4, + BuiltinOperator_DEPTH_TO_SPACE = 5, + BuiltinOperator_DEQUANTIZE = 6, + BuiltinOperator_EMBEDDING_LOOKUP = 7, + BuiltinOperator_FLOOR = 8, + BuiltinOperator_FULLY_CONNECTED = 9, + BuiltinOperator_HASHTABLE_LOOKUP = 10, + BuiltinOperator_L2_NORMALIZATION = 11, + BuiltinOperator_L2_POOL_2D = 12, + BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION = 13, + BuiltinOperator_LOGISTIC = 14, + BuiltinOperator_LSH_PROJECTION = 15, + BuiltinOperator_LSTM = 16, + BuiltinOperator_MAX_POOL_2D = 17, + BuiltinOperator_MUL = 18, + BuiltinOperator_RELU = 19, + BuiltinOperator_RELU_N1_TO_1 = 20, + BuiltinOperator_RELU6 = 21, + BuiltinOperator_RESHAPE = 22, + BuiltinOperator_RESIZE_BILINEAR = 23, + BuiltinOperator_RNN = 24, + BuiltinOperator_SOFTMAX = 25, + BuiltinOperator_SPACE_TO_DEPTH = 26, + BuiltinOperator_SVDF = 27, + BuiltinOperator_TANH = 28, + BuiltinOperator_CONCAT_EMBEDDINGS = 29, + BuiltinOperator_SKIP_GRAM = 30, + BuiltinOperator_CALL = 31, + BuiltinOperator_CUSTOM = 32, + BuiltinOperator_EMBEDDING_LOOKUP_SPARSE = 33, + BuiltinOperator_PAD = 34, + BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN = 35, + BuiltinOperator_GATHER = 36, + BuiltinOperator_BATCH_TO_SPACE_ND = 37, + BuiltinOperator_SPACE_TO_BATCH_ND = 38, + BuiltinOperator_TRANSPOSE = 39, + BuiltinOperator_MEAN = 40, + BuiltinOperator_SUB = 41, + BuiltinOperator_DIV = 42, + BuiltinOperator_SQUEEZE = 43, + BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM = 44, + BuiltinOperator_STRIDED_SLICE = 45, + BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN = 46, + BuiltinOperator_EXP = 47, + BuiltinOperator_TOPK_V2 = 48, + BuiltinOperator_SPLIT = 49, + BuiltinOperator_LOG_SOFTMAX = 50, + BuiltinOperator_DELEGATE = 51, + BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM = 52, + BuiltinOperator_CAST = 53, + BuiltinOperator_PRELU = 54, + BuiltinOperator_MAXIMUM = 55, + BuiltinOperator_ARG_MAX = 56, + BuiltinOperator_MINIMUM = 57, + BuiltinOperator_LESS = 58, + BuiltinOperator_NEG = 59, + BuiltinOperator_PADV2 = 60, + BuiltinOperator_GREATER = 61, + BuiltinOperator_GREATER_EQUAL = 62, + BuiltinOperator_LESS_EQUAL = 63, + BuiltinOperator_SELECT = 64, + BuiltinOperator_SLICE = 65, + BuiltinOperator_SIN = 66, + BuiltinOperator_TRANSPOSE_CONV = 67, + BuiltinOperator_SPARSE_TO_DENSE = 68, + BuiltinOperator_TILE = 69, + BuiltinOperator_EXPAND_DIMS = 70, + BuiltinOperator_EQUAL = 71, + BuiltinOperator_NOT_EQUAL = 72, + BuiltinOperator_LOG = 73, + BuiltinOperator_SUM = 74, + BuiltinOperator_SQRT = 75, + BuiltinOperator_RSQRT = 76, + BuiltinOperator_SHAPE = 77, + BuiltinOperator_POW = 78, + BuiltinOperator_ARG_MIN = 79, + BuiltinOperator_FAKE_QUANT = 80, + BuiltinOperator_REDUCE_PROD = 81, + BuiltinOperator_REDUCE_MAX = 82, + BuiltinOperator_PACK = 83, + BuiltinOperator_LOGICAL_OR = 84, + BuiltinOperator_ONE_HOT = 85, + BuiltinOperator_LOGICAL_AND = 86, + BuiltinOperator_LOGICAL_NOT = 87, + BuiltinOperator_UNPACK = 88, + BuiltinOperator_REDUCE_MIN = 89, + BuiltinOperator_FLOOR_DIV = 90, + BuiltinOperator_REDUCE_ANY = 91, + BuiltinOperator_SQUARE = 92, + BuiltinOperator_ZEROS_LIKE = 93, + BuiltinOperator_FILL = 94, + BuiltinOperator_FLOOR_MOD = 95, + BuiltinOperator_RANGE = 96, + BuiltinOperator_RESIZE_NEAREST_NEIGHBOR = 97, + BuiltinOperator_LEAKY_RELU = 98, + BuiltinOperator_SQUARED_DIFFERENCE = 99, + BuiltinOperator_MIRROR_PAD = 100, + BuiltinOperator_ABS = 101, + BuiltinOperator_SPLIT_V = 102, + BuiltinOperator_UNIQUE = 103, + BuiltinOperator_CEIL = 104, + BuiltinOperator_REVERSE_V2 = 105, + BuiltinOperator_ADD_N = 106, + BuiltinOperator_GATHER_ND = 107, + BuiltinOperator_COS = 108, + BuiltinOperator_WHERE = 109, + BuiltinOperator_RANK = 110, + BuiltinOperator_ELU = 111, + BuiltinOperator_REVERSE_SEQUENCE = 112, + BuiltinOperator_MATRIX_DIAG = 113, + BuiltinOperator_QUANTIZE = 114, + BuiltinOperator_MATRIX_SET_DIAG = 115, + BuiltinOperator_ROUND = 116, + BuiltinOperator_HARD_SWISH = 117, + BuiltinOperator_IF = 118, + BuiltinOperator_WHILE = 119, + BuiltinOperator_NON_MAX_SUPPRESSION_V4 = 120, + BuiltinOperator_NON_MAX_SUPPRESSION_V5 = 121, + BuiltinOperator_SCATTER_ND = 122, + BuiltinOperator_SELECT_V2 = 123, + BuiltinOperator_DENSIFY = 124, + BuiltinOperator_SEGMENT_SUM = 125, + BuiltinOperator_BATCH_MATMUL = 126, + BuiltinOperator_MIN = BuiltinOperator_ADD, + BuiltinOperator_MAX = BuiltinOperator_BATCH_MATMUL +}; + +inline const BuiltinOperator (&EnumValuesBuiltinOperator())[127] +{ + static const BuiltinOperator values[] = {BuiltinOperator_ADD, + BuiltinOperator_AVERAGE_POOL_2D, + BuiltinOperator_CONCATENATION, + BuiltinOperator_CONV_2D, + BuiltinOperator_DEPTHWISE_CONV_2D, + BuiltinOperator_DEPTH_TO_SPACE, + BuiltinOperator_DEQUANTIZE, + BuiltinOperator_EMBEDDING_LOOKUP, + BuiltinOperator_FLOOR, + BuiltinOperator_FULLY_CONNECTED, + BuiltinOperator_HASHTABLE_LOOKUP, + BuiltinOperator_L2_NORMALIZATION, + BuiltinOperator_L2_POOL_2D, + BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION, + BuiltinOperator_LOGISTIC, + BuiltinOperator_LSH_PROJECTION, + BuiltinOperator_LSTM, + BuiltinOperator_MAX_POOL_2D, + BuiltinOperator_MUL, + BuiltinOperator_RELU, + BuiltinOperator_RELU_N1_TO_1, + BuiltinOperator_RELU6, + BuiltinOperator_RESHAPE, + BuiltinOperator_RESIZE_BILINEAR, + BuiltinOperator_RNN, + BuiltinOperator_SOFTMAX, + BuiltinOperator_SPACE_TO_DEPTH, + BuiltinOperator_SVDF, + BuiltinOperator_TANH, + BuiltinOperator_CONCAT_EMBEDDINGS, + BuiltinOperator_SKIP_GRAM, + BuiltinOperator_CALL, + BuiltinOperator_CUSTOM, + BuiltinOperator_EMBEDDING_LOOKUP_SPARSE, + BuiltinOperator_PAD, + BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN, + BuiltinOperator_GATHER, + BuiltinOperator_BATCH_TO_SPACE_ND, + BuiltinOperator_SPACE_TO_BATCH_ND, + BuiltinOperator_TRANSPOSE, + BuiltinOperator_MEAN, + BuiltinOperator_SUB, + BuiltinOperator_DIV, + BuiltinOperator_SQUEEZE, + BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM, + BuiltinOperator_STRIDED_SLICE, + BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN, + BuiltinOperator_EXP, + BuiltinOperator_TOPK_V2, + BuiltinOperator_SPLIT, + BuiltinOperator_LOG_SOFTMAX, + BuiltinOperator_DELEGATE, + BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM, + BuiltinOperator_CAST, + BuiltinOperator_PRELU, + BuiltinOperator_MAXIMUM, + BuiltinOperator_ARG_MAX, + BuiltinOperator_MINIMUM, + BuiltinOperator_LESS, + BuiltinOperator_NEG, + BuiltinOperator_PADV2, + BuiltinOperator_GREATER, + BuiltinOperator_GREATER_EQUAL, + BuiltinOperator_LESS_EQUAL, + BuiltinOperator_SELECT, + BuiltinOperator_SLICE, + BuiltinOperator_SIN, + BuiltinOperator_TRANSPOSE_CONV, + BuiltinOperator_SPARSE_TO_DENSE, + BuiltinOperator_TILE, + BuiltinOperator_EXPAND_DIMS, + BuiltinOperator_EQUAL, + BuiltinOperator_NOT_EQUAL, + BuiltinOperator_LOG, + BuiltinOperator_SUM, + BuiltinOperator_SQRT, + BuiltinOperator_RSQRT, + BuiltinOperator_SHAPE, + BuiltinOperator_POW, + BuiltinOperator_ARG_MIN, + BuiltinOperator_FAKE_QUANT, + BuiltinOperator_REDUCE_PROD, + BuiltinOperator_REDUCE_MAX, + BuiltinOperator_PACK, + BuiltinOperator_LOGICAL_OR, + BuiltinOperator_ONE_HOT, + BuiltinOperator_LOGICAL_AND, + BuiltinOperator_LOGICAL_NOT, + BuiltinOperator_UNPACK, + BuiltinOperator_REDUCE_MIN, + BuiltinOperator_FLOOR_DIV, + BuiltinOperator_REDUCE_ANY, + BuiltinOperator_SQUARE, + BuiltinOperator_ZEROS_LIKE, + BuiltinOperator_FILL, + BuiltinOperator_FLOOR_MOD, + BuiltinOperator_RANGE, + BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, + BuiltinOperator_LEAKY_RELU, + BuiltinOperator_SQUARED_DIFFERENCE, + BuiltinOperator_MIRROR_PAD, + BuiltinOperator_ABS, + BuiltinOperator_SPLIT_V, + BuiltinOperator_UNIQUE, + BuiltinOperator_CEIL, + BuiltinOperator_REVERSE_V2, + BuiltinOperator_ADD_N, + BuiltinOperator_GATHER_ND, + BuiltinOperator_COS, + BuiltinOperator_WHERE, + BuiltinOperator_RANK, + BuiltinOperator_ELU, + BuiltinOperator_REVERSE_SEQUENCE, + BuiltinOperator_MATRIX_DIAG, + BuiltinOperator_QUANTIZE, + BuiltinOperator_MATRIX_SET_DIAG, + BuiltinOperator_ROUND, + BuiltinOperator_HARD_SWISH, + BuiltinOperator_IF, + BuiltinOperator_WHILE, + BuiltinOperator_NON_MAX_SUPPRESSION_V4, + BuiltinOperator_NON_MAX_SUPPRESSION_V5, + BuiltinOperator_SCATTER_ND, + BuiltinOperator_SELECT_V2, + BuiltinOperator_DENSIFY, + BuiltinOperator_SEGMENT_SUM, + BuiltinOperator_BATCH_MATMUL}; + return values; +} + +inline const char *const *EnumNamesBuiltinOperator() +{ + static const char *const names[] = {"ADD", + "AVERAGE_POOL_2D", + "CONCATENATION", + "CONV_2D", + "DEPTHWISE_CONV_2D", + "DEPTH_TO_SPACE", + "DEQUANTIZE", + "EMBEDDING_LOOKUP", + "FLOOR", + "FULLY_CONNECTED", + "HASHTABLE_LOOKUP", + "L2_NORMALIZATION", + "L2_POOL_2D", + "LOCAL_RESPONSE_NORMALIZATION", + "LOGISTIC", + "LSH_PROJECTION", + "LSTM", + "MAX_POOL_2D", + "MUL", + "RELU", + "RELU_N1_TO_1", + "RELU6", + "RESHAPE", + "RESIZE_BILINEAR", + "RNN", + "SOFTMAX", + "SPACE_TO_DEPTH", + "SVDF", + "TANH", + "CONCAT_EMBEDDINGS", + "SKIP_GRAM", + "CALL", + "CUSTOM", + "EMBEDDING_LOOKUP_SPARSE", + "PAD", + "UNIDIRECTIONAL_SEQUENCE_RNN", + "GATHER", + "BATCH_TO_SPACE_ND", + "SPACE_TO_BATCH_ND", + "TRANSPOSE", + "MEAN", + "SUB", + "DIV", + "SQUEEZE", + "UNIDIRECTIONAL_SEQUENCE_LSTM", + "STRIDED_SLICE", + "BIDIRECTIONAL_SEQUENCE_RNN", + "EXP", + "TOPK_V2", + "SPLIT", + "LOG_SOFTMAX", + "DELEGATE", + "BIDIRECTIONAL_SEQUENCE_LSTM", + "CAST", + "PRELU", + "MAXIMUM", + "ARG_MAX", + "MINIMUM", + "LESS", + "NEG", + "PADV2", + "GREATER", + "GREATER_EQUAL", + "LESS_EQUAL", + "SELECT", + "SLICE", + "SIN", + "TRANSPOSE_CONV", + "SPARSE_TO_DENSE", + "TILE", + "EXPAND_DIMS", + "EQUAL", + "NOT_EQUAL", + "LOG", + "SUM", + "SQRT", + "RSQRT", + "SHAPE", + "POW", + "ARG_MIN", + "FAKE_QUANT", + "REDUCE_PROD", + "REDUCE_MAX", + "PACK", + "LOGICAL_OR", + "ONE_HOT", + "LOGICAL_AND", + "LOGICAL_NOT", + "UNPACK", + "REDUCE_MIN", + "FLOOR_DIV", + "REDUCE_ANY", + "SQUARE", + "ZEROS_LIKE", + "FILL", + "FLOOR_MOD", + "RANGE", + "RESIZE_NEAREST_NEIGHBOR", + "LEAKY_RELU", + "SQUARED_DIFFERENCE", + "MIRROR_PAD", + "ABS", + "SPLIT_V", + "UNIQUE", + "CEIL", + "REVERSE_V2", + "ADD_N", + "GATHER_ND", + "COS", + "WHERE", + "RANK", + "ELU", + "REVERSE_SEQUENCE", + "MATRIX_DIAG", + "QUANTIZE", + "MATRIX_SET_DIAG", + "ROUND", + "HARD_SWISH", + "IF", + "WHILE", + "NON_MAX_SUPPRESSION_V4", + "NON_MAX_SUPPRESSION_V5", + "SCATTER_ND", + "SELECT_V2", + "DENSIFY", + "SEGMENT_SUM", + "BATCH_MATMUL", + nullptr}; + return names; +} + +inline const char *EnumNameBuiltinOperator(BuiltinOperator e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesBuiltinOperator()[index]; +} + +enum BuiltinOptions +{ + BuiltinOptions_NONE = 0, + BuiltinOptions_Conv2DOptions = 1, + BuiltinOptions_DepthwiseConv2DOptions = 2, + BuiltinOptions_ConcatEmbeddingsOptions = 3, + BuiltinOptions_LSHProjectionOptions = 4, + BuiltinOptions_Pool2DOptions = 5, + BuiltinOptions_SVDFOptions = 6, + BuiltinOptions_RNNOptions = 7, + BuiltinOptions_FullyConnectedOptions = 8, + BuiltinOptions_SoftmaxOptions = 9, + BuiltinOptions_ConcatenationOptions = 10, + BuiltinOptions_AddOptions = 11, + BuiltinOptions_L2NormOptions = 12, + BuiltinOptions_LocalResponseNormalizationOptions = 13, + BuiltinOptions_LSTMOptions = 14, + BuiltinOptions_ResizeBilinearOptions = 15, + BuiltinOptions_CallOptions = 16, + BuiltinOptions_ReshapeOptions = 17, + BuiltinOptions_SkipGramOptions = 18, + BuiltinOptions_SpaceToDepthOptions = 19, + BuiltinOptions_EmbeddingLookupSparseOptions = 20, + BuiltinOptions_MulOptions = 21, + BuiltinOptions_PadOptions = 22, + BuiltinOptions_GatherOptions = 23, + BuiltinOptions_BatchToSpaceNDOptions = 24, + BuiltinOptions_SpaceToBatchNDOptions = 25, + BuiltinOptions_TransposeOptions = 26, + BuiltinOptions_ReducerOptions = 27, + BuiltinOptions_SubOptions = 28, + BuiltinOptions_DivOptions = 29, + BuiltinOptions_SqueezeOptions = 30, + BuiltinOptions_SequenceRNNOptions = 31, + BuiltinOptions_StridedSliceOptions = 32, + BuiltinOptions_ExpOptions = 33, + BuiltinOptions_TopKV2Options = 34, + BuiltinOptions_SplitOptions = 35, + BuiltinOptions_LogSoftmaxOptions = 36, + BuiltinOptions_CastOptions = 37, + BuiltinOptions_DequantizeOptions = 38, + BuiltinOptions_MaximumMinimumOptions = 39, + BuiltinOptions_ArgMaxOptions = 40, + BuiltinOptions_LessOptions = 41, + BuiltinOptions_NegOptions = 42, + BuiltinOptions_PadV2Options = 43, + BuiltinOptions_GreaterOptions = 44, + BuiltinOptions_GreaterEqualOptions = 45, + BuiltinOptions_LessEqualOptions = 46, + BuiltinOptions_SelectOptions = 47, + BuiltinOptions_SliceOptions = 48, + BuiltinOptions_TransposeConvOptions = 49, + BuiltinOptions_SparseToDenseOptions = 50, + BuiltinOptions_TileOptions = 51, + BuiltinOptions_ExpandDimsOptions = 52, + BuiltinOptions_EqualOptions = 53, + BuiltinOptions_NotEqualOptions = 54, + BuiltinOptions_ShapeOptions = 55, + BuiltinOptions_PowOptions = 56, + BuiltinOptions_ArgMinOptions = 57, + BuiltinOptions_FakeQuantOptions = 58, + BuiltinOptions_PackOptions = 59, + BuiltinOptions_LogicalOrOptions = 60, + BuiltinOptions_OneHotOptions = 61, + BuiltinOptions_LogicalAndOptions = 62, + BuiltinOptions_LogicalNotOptions = 63, + BuiltinOptions_UnpackOptions = 64, + BuiltinOptions_FloorDivOptions = 65, + BuiltinOptions_SquareOptions = 66, + BuiltinOptions_ZerosLikeOptions = 67, + BuiltinOptions_FillOptions = 68, + BuiltinOptions_BidirectionalSequenceLSTMOptions = 69, + BuiltinOptions_BidirectionalSequenceRNNOptions = 70, + BuiltinOptions_UnidirectionalSequenceLSTMOptions = 71, + BuiltinOptions_FloorModOptions = 72, + BuiltinOptions_RangeOptions = 73, + BuiltinOptions_ResizeNearestNeighborOptions = 74, + BuiltinOptions_LeakyReluOptions = 75, + BuiltinOptions_SquaredDifferenceOptions = 76, + BuiltinOptions_MirrorPadOptions = 77, + BuiltinOptions_AbsOptions = 78, + BuiltinOptions_SplitVOptions = 79, + BuiltinOptions_UniqueOptions = 80, + BuiltinOptions_ReverseV2Options = 81, + BuiltinOptions_AddNOptions = 82, + BuiltinOptions_GatherNdOptions = 83, + BuiltinOptions_CosOptions = 84, + BuiltinOptions_WhereOptions = 85, + BuiltinOptions_RankOptions = 86, + BuiltinOptions_ReverseSequenceOptions = 87, + BuiltinOptions_MatrixDiagOptions = 88, + BuiltinOptions_QuantizeOptions = 89, + BuiltinOptions_MatrixSetDiagOptions = 90, + BuiltinOptions_HardSwishOptions = 91, + BuiltinOptions_IfOptions = 92, + BuiltinOptions_WhileOptions = 93, + BuiltinOptions_DepthToSpaceOptions = 94, + BuiltinOptions_NonMaxSuppressionV4Options = 95, + BuiltinOptions_NonMaxSuppressionV5Options = 96, + BuiltinOptions_ScatterNdOptions = 97, + BuiltinOptions_SelectV2Options = 98, + BuiltinOptions_DensifyOptions = 99, + BuiltinOptions_SegmentSumOptions = 100, + BuiltinOptions_BatchMatMulOptions = 101, + BuiltinOptions_MIN = BuiltinOptions_NONE, + BuiltinOptions_MAX = BuiltinOptions_BatchMatMulOptions +}; + +inline const BuiltinOptions (&EnumValuesBuiltinOptions())[102] +{ + static const BuiltinOptions values[] = {BuiltinOptions_NONE, + BuiltinOptions_Conv2DOptions, + BuiltinOptions_DepthwiseConv2DOptions, + BuiltinOptions_ConcatEmbeddingsOptions, + BuiltinOptions_LSHProjectionOptions, + BuiltinOptions_Pool2DOptions, + BuiltinOptions_SVDFOptions, + BuiltinOptions_RNNOptions, + BuiltinOptions_FullyConnectedOptions, + BuiltinOptions_SoftmaxOptions, + BuiltinOptions_ConcatenationOptions, + BuiltinOptions_AddOptions, + BuiltinOptions_L2NormOptions, + BuiltinOptions_LocalResponseNormalizationOptions, + BuiltinOptions_LSTMOptions, + BuiltinOptions_ResizeBilinearOptions, + BuiltinOptions_CallOptions, + BuiltinOptions_ReshapeOptions, + BuiltinOptions_SkipGramOptions, + BuiltinOptions_SpaceToDepthOptions, + BuiltinOptions_EmbeddingLookupSparseOptions, + BuiltinOptions_MulOptions, + BuiltinOptions_PadOptions, + BuiltinOptions_GatherOptions, + BuiltinOptions_BatchToSpaceNDOptions, + BuiltinOptions_SpaceToBatchNDOptions, + BuiltinOptions_TransposeOptions, + BuiltinOptions_ReducerOptions, + BuiltinOptions_SubOptions, + BuiltinOptions_DivOptions, + BuiltinOptions_SqueezeOptions, + BuiltinOptions_SequenceRNNOptions, + BuiltinOptions_StridedSliceOptions, + BuiltinOptions_ExpOptions, + BuiltinOptions_TopKV2Options, + BuiltinOptions_SplitOptions, + BuiltinOptions_LogSoftmaxOptions, + BuiltinOptions_CastOptions, + BuiltinOptions_DequantizeOptions, + BuiltinOptions_MaximumMinimumOptions, + BuiltinOptions_ArgMaxOptions, + BuiltinOptions_LessOptions, + BuiltinOptions_NegOptions, + BuiltinOptions_PadV2Options, + BuiltinOptions_GreaterOptions, + BuiltinOptions_GreaterEqualOptions, + BuiltinOptions_LessEqualOptions, + BuiltinOptions_SelectOptions, + BuiltinOptions_SliceOptions, + BuiltinOptions_TransposeConvOptions, + BuiltinOptions_SparseToDenseOptions, + BuiltinOptions_TileOptions, + BuiltinOptions_ExpandDimsOptions, + BuiltinOptions_EqualOptions, + BuiltinOptions_NotEqualOptions, + BuiltinOptions_ShapeOptions, + BuiltinOptions_PowOptions, + BuiltinOptions_ArgMinOptions, + BuiltinOptions_FakeQuantOptions, + BuiltinOptions_PackOptions, + BuiltinOptions_LogicalOrOptions, + BuiltinOptions_OneHotOptions, + BuiltinOptions_LogicalAndOptions, + BuiltinOptions_LogicalNotOptions, + BuiltinOptions_UnpackOptions, + BuiltinOptions_FloorDivOptions, + BuiltinOptions_SquareOptions, + BuiltinOptions_ZerosLikeOptions, + BuiltinOptions_FillOptions, + BuiltinOptions_BidirectionalSequenceLSTMOptions, + BuiltinOptions_BidirectionalSequenceRNNOptions, + BuiltinOptions_UnidirectionalSequenceLSTMOptions, + BuiltinOptions_FloorModOptions, + BuiltinOptions_RangeOptions, + BuiltinOptions_ResizeNearestNeighborOptions, + BuiltinOptions_LeakyReluOptions, + BuiltinOptions_SquaredDifferenceOptions, + BuiltinOptions_MirrorPadOptions, + BuiltinOptions_AbsOptions, + BuiltinOptions_SplitVOptions, + BuiltinOptions_UniqueOptions, + BuiltinOptions_ReverseV2Options, + BuiltinOptions_AddNOptions, + BuiltinOptions_GatherNdOptions, + BuiltinOptions_CosOptions, + BuiltinOptions_WhereOptions, + BuiltinOptions_RankOptions, + BuiltinOptions_ReverseSequenceOptions, + BuiltinOptions_MatrixDiagOptions, + BuiltinOptions_QuantizeOptions, + BuiltinOptions_MatrixSetDiagOptions, + BuiltinOptions_HardSwishOptions, + BuiltinOptions_IfOptions, + BuiltinOptions_WhileOptions, + BuiltinOptions_DepthToSpaceOptions, + BuiltinOptions_NonMaxSuppressionV4Options, + BuiltinOptions_NonMaxSuppressionV5Options, + BuiltinOptions_ScatterNdOptions, + BuiltinOptions_SelectV2Options, + BuiltinOptions_DensifyOptions, + BuiltinOptions_SegmentSumOptions, + BuiltinOptions_BatchMatMulOptions}; + return values; +} + +inline const char *const *EnumNamesBuiltinOptions() +{ + static const char *const names[] = {"NONE", + "Conv2DOptions", + "DepthwiseConv2DOptions", + "ConcatEmbeddingsOptions", + "LSHProjectionOptions", + "Pool2DOptions", + "SVDFOptions", + "RNNOptions", + "FullyConnectedOptions", + "SoftmaxOptions", + "ConcatenationOptions", + "AddOptions", + "L2NormOptions", + "LocalResponseNormalizationOptions", + "LSTMOptions", + "ResizeBilinearOptions", + "CallOptions", + "ReshapeOptions", + "SkipGramOptions", + "SpaceToDepthOptions", + "EmbeddingLookupSparseOptions", + "MulOptions", + "PadOptions", + "GatherOptions", + "BatchToSpaceNDOptions", + "SpaceToBatchNDOptions", + "TransposeOptions", + "ReducerOptions", + "SubOptions", + "DivOptions", + "SqueezeOptions", + "SequenceRNNOptions", + "StridedSliceOptions", + "ExpOptions", + "TopKV2Options", + "SplitOptions", + "LogSoftmaxOptions", + "CastOptions", + "DequantizeOptions", + "MaximumMinimumOptions", + "ArgMaxOptions", + "LessOptions", + "NegOptions", + "PadV2Options", + "GreaterOptions", + "GreaterEqualOptions", + "LessEqualOptions", + "SelectOptions", + "SliceOptions", + "TransposeConvOptions", + "SparseToDenseOptions", + "TileOptions", + "ExpandDimsOptions", + "EqualOptions", + "NotEqualOptions", + "ShapeOptions", + "PowOptions", + "ArgMinOptions", + "FakeQuantOptions", + "PackOptions", + "LogicalOrOptions", + "OneHotOptions", + "LogicalAndOptions", + "LogicalNotOptions", + "UnpackOptions", + "FloorDivOptions", + "SquareOptions", + "ZerosLikeOptions", + "FillOptions", + "BidirectionalSequenceLSTMOptions", + "BidirectionalSequenceRNNOptions", + "UnidirectionalSequenceLSTMOptions", + "FloorModOptions", + "RangeOptions", + "ResizeNearestNeighborOptions", + "LeakyReluOptions", + "SquaredDifferenceOptions", + "MirrorPadOptions", + "AbsOptions", + "SplitVOptions", + "UniqueOptions", + "ReverseV2Options", + "AddNOptions", + "GatherNdOptions", + "CosOptions", + "WhereOptions", + "RankOptions", + "ReverseSequenceOptions", + "MatrixDiagOptions", + "QuantizeOptions", + "MatrixSetDiagOptions", + "HardSwishOptions", + "IfOptions", + "WhileOptions", + "DepthToSpaceOptions", + "NonMaxSuppressionV4Options", + "NonMaxSuppressionV5Options", + "ScatterNdOptions", + "SelectV2Options", + "DensifyOptions", + "SegmentSumOptions", + "BatchMatMulOptions", + nullptr}; + return names; +} + +inline const char *EnumNameBuiltinOptions(BuiltinOptions e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesBuiltinOptions()[index]; +} + +template <typename T> struct BuiltinOptionsTraits +{ + static const BuiltinOptions enum_value = BuiltinOptions_NONE; +}; + +template <> struct BuiltinOptionsTraits<Conv2DOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_Conv2DOptions; +}; + +template <> struct BuiltinOptionsTraits<DepthwiseConv2DOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_DepthwiseConv2DOptions; +}; + +template <> struct BuiltinOptionsTraits<ConcatEmbeddingsOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ConcatEmbeddingsOptions; +}; + +template <> struct BuiltinOptionsTraits<LSHProjectionOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_LSHProjectionOptions; +}; + +template <> struct BuiltinOptionsTraits<Pool2DOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_Pool2DOptions; +}; + +template <> struct BuiltinOptionsTraits<SVDFOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SVDFOptions; +}; + +template <> struct BuiltinOptionsTraits<RNNOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_RNNOptions; +}; + +template <> struct BuiltinOptionsTraits<FullyConnectedOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_FullyConnectedOptions; +}; + +template <> struct BuiltinOptionsTraits<SoftmaxOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SoftmaxOptions; +}; + +template <> struct BuiltinOptionsTraits<ConcatenationOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ConcatenationOptions; +}; + +template <> struct BuiltinOptionsTraits<AddOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_AddOptions; +}; + +template <> struct BuiltinOptionsTraits<L2NormOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_L2NormOptions; +}; + +template <> struct BuiltinOptionsTraits<LocalResponseNormalizationOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_LocalResponseNormalizationOptions; +}; + +template <> struct BuiltinOptionsTraits<LSTMOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_LSTMOptions; +}; + +template <> struct BuiltinOptionsTraits<ResizeBilinearOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ResizeBilinearOptions; +}; + +template <> struct BuiltinOptionsTraits<CallOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_CallOptions; +}; + +template <> struct BuiltinOptionsTraits<ReshapeOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ReshapeOptions; +}; + +template <> struct BuiltinOptionsTraits<SkipGramOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SkipGramOptions; +}; + +template <> struct BuiltinOptionsTraits<SpaceToDepthOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SpaceToDepthOptions; +}; + +template <> struct BuiltinOptionsTraits<EmbeddingLookupSparseOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_EmbeddingLookupSparseOptions; +}; + +template <> struct BuiltinOptionsTraits<MulOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_MulOptions; +}; + +template <> struct BuiltinOptionsTraits<PadOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_PadOptions; +}; + +template <> struct BuiltinOptionsTraits<GatherOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_GatherOptions; +}; + +template <> struct BuiltinOptionsTraits<BatchToSpaceNDOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_BatchToSpaceNDOptions; +}; + +template <> struct BuiltinOptionsTraits<SpaceToBatchNDOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SpaceToBatchNDOptions; +}; + +template <> struct BuiltinOptionsTraits<TransposeOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_TransposeOptions; +}; + +template <> struct BuiltinOptionsTraits<ReducerOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ReducerOptions; +}; + +template <> struct BuiltinOptionsTraits<SubOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SubOptions; +}; + +template <> struct BuiltinOptionsTraits<DivOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_DivOptions; +}; + +template <> struct BuiltinOptionsTraits<SqueezeOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SqueezeOptions; +}; + +template <> struct BuiltinOptionsTraits<SequenceRNNOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SequenceRNNOptions; +}; + +template <> struct BuiltinOptionsTraits<StridedSliceOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_StridedSliceOptions; +}; + +template <> struct BuiltinOptionsTraits<ExpOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ExpOptions; +}; + +template <> struct BuiltinOptionsTraits<TopKV2Options> +{ + static const BuiltinOptions enum_value = BuiltinOptions_TopKV2Options; +}; + +template <> struct BuiltinOptionsTraits<SplitOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SplitOptions; +}; + +template <> struct BuiltinOptionsTraits<LogSoftmaxOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_LogSoftmaxOptions; +}; + +template <> struct BuiltinOptionsTraits<CastOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_CastOptions; +}; + +template <> struct BuiltinOptionsTraits<DequantizeOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_DequantizeOptions; +}; + +template <> struct BuiltinOptionsTraits<MaximumMinimumOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_MaximumMinimumOptions; +}; + +template <> struct BuiltinOptionsTraits<ArgMaxOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ArgMaxOptions; +}; + +template <> struct BuiltinOptionsTraits<LessOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_LessOptions; +}; + +template <> struct BuiltinOptionsTraits<NegOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_NegOptions; +}; + +template <> struct BuiltinOptionsTraits<PadV2Options> +{ + static const BuiltinOptions enum_value = BuiltinOptions_PadV2Options; +}; + +template <> struct BuiltinOptionsTraits<GreaterOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_GreaterOptions; +}; + +template <> struct BuiltinOptionsTraits<GreaterEqualOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_GreaterEqualOptions; +}; + +template <> struct BuiltinOptionsTraits<LessEqualOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_LessEqualOptions; +}; + +template <> struct BuiltinOptionsTraits<SelectOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SelectOptions; +}; + +template <> struct BuiltinOptionsTraits<SliceOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SliceOptions; +}; + +template <> struct BuiltinOptionsTraits<TransposeConvOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_TransposeConvOptions; +}; + +template <> struct BuiltinOptionsTraits<SparseToDenseOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SparseToDenseOptions; +}; + +template <> struct BuiltinOptionsTraits<TileOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_TileOptions; +}; + +template <> struct BuiltinOptionsTraits<ExpandDimsOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ExpandDimsOptions; +}; + +template <> struct BuiltinOptionsTraits<EqualOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_EqualOptions; +}; + +template <> struct BuiltinOptionsTraits<NotEqualOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_NotEqualOptions; +}; + +template <> struct BuiltinOptionsTraits<ShapeOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ShapeOptions; +}; + +template <> struct BuiltinOptionsTraits<PowOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_PowOptions; +}; + +template <> struct BuiltinOptionsTraits<ArgMinOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ArgMinOptions; +}; + +template <> struct BuiltinOptionsTraits<FakeQuantOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_FakeQuantOptions; +}; + +template <> struct BuiltinOptionsTraits<PackOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_PackOptions; +}; + +template <> struct BuiltinOptionsTraits<LogicalOrOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_LogicalOrOptions; +}; + +template <> struct BuiltinOptionsTraits<OneHotOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_OneHotOptions; +}; + +template <> struct BuiltinOptionsTraits<LogicalAndOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_LogicalAndOptions; +}; + +template <> struct BuiltinOptionsTraits<LogicalNotOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_LogicalNotOptions; +}; + +template <> struct BuiltinOptionsTraits<UnpackOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_UnpackOptions; +}; + +template <> struct BuiltinOptionsTraits<FloorDivOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_FloorDivOptions; +}; + +template <> struct BuiltinOptionsTraits<SquareOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SquareOptions; +}; + +template <> struct BuiltinOptionsTraits<ZerosLikeOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ZerosLikeOptions; +}; + +template <> struct BuiltinOptionsTraits<FillOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_FillOptions; +}; + +template <> struct BuiltinOptionsTraits<BidirectionalSequenceLSTMOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceLSTMOptions; +}; + +template <> struct BuiltinOptionsTraits<BidirectionalSequenceRNNOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceRNNOptions; +}; + +template <> struct BuiltinOptionsTraits<UnidirectionalSequenceLSTMOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_UnidirectionalSequenceLSTMOptions; +}; + +template <> struct BuiltinOptionsTraits<FloorModOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_FloorModOptions; +}; + +template <> struct BuiltinOptionsTraits<RangeOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_RangeOptions; +}; + +template <> struct BuiltinOptionsTraits<ResizeNearestNeighborOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ResizeNearestNeighborOptions; +}; + +template <> struct BuiltinOptionsTraits<LeakyReluOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_LeakyReluOptions; +}; + +template <> struct BuiltinOptionsTraits<SquaredDifferenceOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SquaredDifferenceOptions; +}; + +template <> struct BuiltinOptionsTraits<MirrorPadOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_MirrorPadOptions; +}; + +template <> struct BuiltinOptionsTraits<AbsOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_AbsOptions; +}; + +template <> struct BuiltinOptionsTraits<SplitVOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SplitVOptions; +}; + +template <> struct BuiltinOptionsTraits<UniqueOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_UniqueOptions; +}; + +template <> struct BuiltinOptionsTraits<ReverseV2Options> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ReverseV2Options; +}; + +template <> struct BuiltinOptionsTraits<AddNOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_AddNOptions; +}; + +template <> struct BuiltinOptionsTraits<GatherNdOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_GatherNdOptions; +}; + +template <> struct BuiltinOptionsTraits<CosOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_CosOptions; +}; + +template <> struct BuiltinOptionsTraits<WhereOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_WhereOptions; +}; + +template <> struct BuiltinOptionsTraits<RankOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_RankOptions; +}; + +template <> struct BuiltinOptionsTraits<ReverseSequenceOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ReverseSequenceOptions; +}; + +template <> struct BuiltinOptionsTraits<MatrixDiagOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_MatrixDiagOptions; +}; + +template <> struct BuiltinOptionsTraits<QuantizeOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_QuantizeOptions; +}; + +template <> struct BuiltinOptionsTraits<MatrixSetDiagOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_MatrixSetDiagOptions; +}; + +template <> struct BuiltinOptionsTraits<HardSwishOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_HardSwishOptions; +}; + +template <> struct BuiltinOptionsTraits<IfOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_IfOptions; +}; + +template <> struct BuiltinOptionsTraits<WhileOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_WhileOptions; +}; + +template <> struct BuiltinOptionsTraits<DepthToSpaceOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_DepthToSpaceOptions; +}; + +template <> struct BuiltinOptionsTraits<NonMaxSuppressionV4Options> +{ + static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV4Options; +}; + +template <> struct BuiltinOptionsTraits<NonMaxSuppressionV5Options> +{ + static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV5Options; +}; + +template <> struct BuiltinOptionsTraits<ScatterNdOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_ScatterNdOptions; +}; + +template <> struct BuiltinOptionsTraits<SelectV2Options> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SelectV2Options; +}; + +template <> struct BuiltinOptionsTraits<DensifyOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_DensifyOptions; +}; + +template <> struct BuiltinOptionsTraits<SegmentSumOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_SegmentSumOptions; +}; + +template <> struct BuiltinOptionsTraits<BatchMatMulOptions> +{ + static const BuiltinOptions enum_value = BuiltinOptions_BatchMatMulOptions; +}; + +bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type); +bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier, + const flatbuffers::Vector<flatbuffers::Offset<void>> *values, + const flatbuffers::Vector<uint8_t> *types); + +enum Padding +{ + Padding_SAME = 0, + Padding_VALID = 1, + Padding_MIN = Padding_SAME, + Padding_MAX = Padding_VALID +}; + +inline const Padding (&EnumValuesPadding())[2] +{ + static const Padding values[] = {Padding_SAME, Padding_VALID}; + return values; +} + +inline const char *const *EnumNamesPadding() +{ + static const char *const names[] = {"SAME", "VALID", nullptr}; + return names; +} + +inline const char *EnumNamePadding(Padding e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesPadding()[index]; +} + +enum ActivationFunctionType +{ + ActivationFunctionType_NONE = 0, + ActivationFunctionType_RELU = 1, + ActivationFunctionType_RELU_N1_TO_1 = 2, + ActivationFunctionType_RELU6 = 3, + ActivationFunctionType_TANH = 4, + ActivationFunctionType_SIGN_BIT = 5, + ActivationFunctionType_MIN = ActivationFunctionType_NONE, + ActivationFunctionType_MAX = ActivationFunctionType_SIGN_BIT +}; + +inline const ActivationFunctionType (&EnumValuesActivationFunctionType())[6] +{ + static const ActivationFunctionType values[] = { + ActivationFunctionType_NONE, ActivationFunctionType_RELU, + ActivationFunctionType_RELU_N1_TO_1, ActivationFunctionType_RELU6, + ActivationFunctionType_TANH, ActivationFunctionType_SIGN_BIT}; + return values; +} + +inline const char *const *EnumNamesActivationFunctionType() +{ + static const char *const names[] = {"NONE", "RELU", "RELU_N1_TO_1", "RELU6", + "TANH", "SIGN_BIT", nullptr}; + return names; +} + +inline const char *EnumNameActivationFunctionType(ActivationFunctionType e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesActivationFunctionType()[index]; +} + +enum LSHProjectionType +{ + LSHProjectionType_UNKNOWN = 0, + LSHProjectionType_SPARSE = 1, + LSHProjectionType_DENSE = 2, + LSHProjectionType_MIN = LSHProjectionType_UNKNOWN, + LSHProjectionType_MAX = LSHProjectionType_DENSE +}; + +inline const LSHProjectionType (&EnumValuesLSHProjectionType())[3] +{ + static const LSHProjectionType values[] = {LSHProjectionType_UNKNOWN, LSHProjectionType_SPARSE, + LSHProjectionType_DENSE}; + return values; +} + +inline const char *const *EnumNamesLSHProjectionType() +{ + static const char *const names[] = {"UNKNOWN", "SPARSE", "DENSE", nullptr}; + return names; +} + +inline const char *EnumNameLSHProjectionType(LSHProjectionType e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesLSHProjectionType()[index]; +} + +enum FullyConnectedOptionsWeightsFormat +{ + FullyConnectedOptionsWeightsFormat_DEFAULT = 0, + FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 = 1, + FullyConnectedOptionsWeightsFormat_MIN = FullyConnectedOptionsWeightsFormat_DEFAULT, + FullyConnectedOptionsWeightsFormat_MAX = FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 +}; + +inline const FullyConnectedOptionsWeightsFormat (&EnumValuesFullyConnectedOptionsWeightsFormat())[2] +{ + static const FullyConnectedOptionsWeightsFormat values[] = { + FullyConnectedOptionsWeightsFormat_DEFAULT, + FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8}; + return values; +} + +inline const char *const *EnumNamesFullyConnectedOptionsWeightsFormat() +{ + static const char *const names[] = {"DEFAULT", "SHUFFLED4x16INT8", nullptr}; + return names; +} + +inline const char *EnumNameFullyConnectedOptionsWeightsFormat(FullyConnectedOptionsWeightsFormat e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesFullyConnectedOptionsWeightsFormat()[index]; +} + +enum LSTMKernelType +{ + LSTMKernelType_FULL = 0, + LSTMKernelType_BASIC = 1, + LSTMKernelType_MIN = LSTMKernelType_FULL, + LSTMKernelType_MAX = LSTMKernelType_BASIC +}; + +inline const LSTMKernelType (&EnumValuesLSTMKernelType())[2] +{ + static const LSTMKernelType values[] = {LSTMKernelType_FULL, LSTMKernelType_BASIC}; + return values; +} + +inline const char *const *EnumNamesLSTMKernelType() +{ + static const char *const names[] = {"FULL", "BASIC", nullptr}; + return names; +} + +inline const char *EnumNameLSTMKernelType(LSTMKernelType e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesLSTMKernelType()[index]; +} + +enum CombinerType +{ + CombinerType_SUM = 0, + CombinerType_MEAN = 1, + CombinerType_SQRTN = 2, + CombinerType_MIN = CombinerType_SUM, + CombinerType_MAX = CombinerType_SQRTN +}; + +inline const CombinerType (&EnumValuesCombinerType())[3] +{ + static const CombinerType values[] = {CombinerType_SUM, CombinerType_MEAN, CombinerType_SQRTN}; + return values; +} + +inline const char *const *EnumNamesCombinerType() +{ + static const char *const names[] = {"SUM", "MEAN", "SQRTN", nullptr}; + return names; +} + +inline const char *EnumNameCombinerType(CombinerType e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesCombinerType()[index]; +} + +enum MirrorPadMode +{ + MirrorPadMode_REFLECT = 0, + MirrorPadMode_SYMMETRIC = 1, + MirrorPadMode_MIN = MirrorPadMode_REFLECT, + MirrorPadMode_MAX = MirrorPadMode_SYMMETRIC +}; + +inline const MirrorPadMode (&EnumValuesMirrorPadMode())[2] +{ + static const MirrorPadMode values[] = {MirrorPadMode_REFLECT, MirrorPadMode_SYMMETRIC}; + return values; +} + +inline const char *const *EnumNamesMirrorPadMode() +{ + static const char *const names[] = {"REFLECT", "SYMMETRIC", nullptr}; + return names; +} + +inline const char *EnumNameMirrorPadMode(MirrorPadMode e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesMirrorPadMode()[index]; +} + +enum CustomOptionsFormat +{ + CustomOptionsFormat_FLEXBUFFERS = 0, + CustomOptionsFormat_MIN = CustomOptionsFormat_FLEXBUFFERS, + CustomOptionsFormat_MAX = CustomOptionsFormat_FLEXBUFFERS +}; + +inline const CustomOptionsFormat (&EnumValuesCustomOptionsFormat())[1] +{ + static const CustomOptionsFormat values[] = {CustomOptionsFormat_FLEXBUFFERS}; + return values; +} + +inline const char *const *EnumNamesCustomOptionsFormat() +{ + static const char *const names[] = {"FLEXBUFFERS", nullptr}; + return names; +} + +inline const char *EnumNameCustomOptionsFormat(CustomOptionsFormat e) +{ + const size_t index = static_cast<int>(e); + return EnumNamesCustomOptionsFormat()[index]; +} + +struct CustomQuantization FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_CUSTOM = 4 + }; + const flatbuffers::Vector<uint8_t> *custom() const + { + return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_CUSTOM) && + verifier.VerifyVector(custom()) && verifier.EndTable(); + } +}; + +struct CustomQuantizationBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_custom(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom) + { + fbb_.AddOffset(CustomQuantization::VT_CUSTOM, custom); + } + explicit CustomQuantizationBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + CustomQuantizationBuilder &operator=(const CustomQuantizationBuilder &); + flatbuffers::Offset<CustomQuantization> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<CustomQuantization>(end); + return o; + } +}; + +inline flatbuffers::Offset<CustomQuantization> +CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom = 0) +{ + CustomQuantizationBuilder builder_(_fbb); + builder_.add_custom(custom); + return builder_.Finish(); +} + +inline flatbuffers::Offset<CustomQuantization> +CreateCustomQuantizationDirect(flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<uint8_t> *custom = nullptr) +{ + return onert_tflite::CreateCustomQuantization(_fbb, + custom ? _fbb.CreateVector<uint8_t>(*custom) : 0); +} + +struct QuantizationParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_MIN = 4, + VT_MAX = 6, + VT_SCALE = 8, + VT_ZERO_POINT = 10, + VT_DETAILS_TYPE = 12, + VT_DETAILS = 14, + VT_QUANTIZED_DIMENSION = 16 + }; + const flatbuffers::Vector<float> *min() const + { + return GetPointer<const flatbuffers::Vector<float> *>(VT_MIN); + } + const flatbuffers::Vector<float> *max() const + { + return GetPointer<const flatbuffers::Vector<float> *>(VT_MAX); + } + const flatbuffers::Vector<float> *scale() const + { + return GetPointer<const flatbuffers::Vector<float> *>(VT_SCALE); + } + const flatbuffers::Vector<int64_t> *zero_point() const + { + return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_ZERO_POINT); + } + QuantizationDetails details_type() const + { + return static_cast<QuantizationDetails>(GetField<uint8_t>(VT_DETAILS_TYPE, 0)); + } + const void *details() const { return GetPointer<const void *>(VT_DETAILS); } + template <typename T> const T *details_as() const; + const CustomQuantization *details_as_CustomQuantization() const + { + return details_type() == QuantizationDetails_CustomQuantization + ? static_cast<const CustomQuantization *>(details()) + : nullptr; + } + int32_t quantized_dimension() const { return GetField<int32_t>(VT_QUANTIZED_DIMENSION, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_MIN) && + verifier.VerifyVector(min()) && VerifyOffset(verifier, VT_MAX) && + verifier.VerifyVector(max()) && VerifyOffset(verifier, VT_SCALE) && + verifier.VerifyVector(scale()) && VerifyOffset(verifier, VT_ZERO_POINT) && + verifier.VerifyVector(zero_point()) && VerifyField<uint8_t>(verifier, VT_DETAILS_TYPE) && + VerifyOffset(verifier, VT_DETAILS) && + VerifyQuantizationDetails(verifier, details(), details_type()) && + VerifyField<int32_t>(verifier, VT_QUANTIZED_DIMENSION) && verifier.EndTable(); + } +}; + +template <> +inline const CustomQuantization *QuantizationParameters::details_as<CustomQuantization>() const +{ + return details_as_CustomQuantization(); +} + +struct QuantizationParametersBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_min(flatbuffers::Offset<flatbuffers::Vector<float>> min) + { + fbb_.AddOffset(QuantizationParameters::VT_MIN, min); + } + void add_max(flatbuffers::Offset<flatbuffers::Vector<float>> max) + { + fbb_.AddOffset(QuantizationParameters::VT_MAX, max); + } + void add_scale(flatbuffers::Offset<flatbuffers::Vector<float>> scale) + { + fbb_.AddOffset(QuantizationParameters::VT_SCALE, scale); + } + void add_zero_point(flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point) + { + fbb_.AddOffset(QuantizationParameters::VT_ZERO_POINT, zero_point); + } + void add_details_type(QuantizationDetails details_type) + { + fbb_.AddElement<uint8_t>(QuantizationParameters::VT_DETAILS_TYPE, + static_cast<uint8_t>(details_type), 0); + } + void add_details(flatbuffers::Offset<void> details) + { + fbb_.AddOffset(QuantizationParameters::VT_DETAILS, details); + } + void add_quantized_dimension(int32_t quantized_dimension) + { + fbb_.AddElement<int32_t>(QuantizationParameters::VT_QUANTIZED_DIMENSION, quantized_dimension, + 0); + } + explicit QuantizationParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + QuantizationParametersBuilder &operator=(const QuantizationParametersBuilder &); + flatbuffers::Offset<QuantizationParameters> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<QuantizationParameters>(end); + return o; + } +}; + +inline flatbuffers::Offset<QuantizationParameters> +CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<float>> min = 0, + flatbuffers::Offset<flatbuffers::Vector<float>> max = 0, + flatbuffers::Offset<flatbuffers::Vector<float>> scale = 0, + flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point = 0, + QuantizationDetails details_type = QuantizationDetails_NONE, + flatbuffers::Offset<void> details = 0, int32_t quantized_dimension = 0) +{ + QuantizationParametersBuilder builder_(_fbb); + builder_.add_quantized_dimension(quantized_dimension); + builder_.add_details(details); + builder_.add_zero_point(zero_point); + builder_.add_scale(scale); + builder_.add_max(max); + builder_.add_min(min); + builder_.add_details_type(details_type); + return builder_.Finish(); +} + +inline flatbuffers::Offset<QuantizationParameters> CreateQuantizationParametersDirect( + flatbuffers::FlatBufferBuilder &_fbb, const std::vector<float> *min = nullptr, + const std::vector<float> *max = nullptr, const std::vector<float> *scale = nullptr, + const std::vector<int64_t> *zero_point = nullptr, + QuantizationDetails details_type = QuantizationDetails_NONE, + flatbuffers::Offset<void> details = 0, int32_t quantized_dimension = 0) +{ + return onert_tflite::CreateQuantizationParameters( + _fbb, min ? _fbb.CreateVector<float>(*min) : 0, max ? _fbb.CreateVector<float>(*max) : 0, + scale ? _fbb.CreateVector<float>(*scale) : 0, + zero_point ? _fbb.CreateVector<int64_t>(*zero_point) : 0, details_type, details, + quantized_dimension); +} + +struct Int32Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_VALUES = 4 + }; + const flatbuffers::Vector<int32_t> *values() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_VALUES); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_VALUES) && + verifier.VerifyVector(values()) && verifier.EndTable(); + } +}; + +struct Int32VectorBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_values(flatbuffers::Offset<flatbuffers::Vector<int32_t>> values) + { + fbb_.AddOffset(Int32Vector::VT_VALUES, values); + } + explicit Int32VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + Int32VectorBuilder &operator=(const Int32VectorBuilder &); + flatbuffers::Offset<Int32Vector> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<Int32Vector>(end); + return o; + } +}; + +inline flatbuffers::Offset<Int32Vector> +CreateInt32Vector(flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> values = 0) +{ + Int32VectorBuilder builder_(_fbb); + builder_.add_values(values); + return builder_.Finish(); +} + +inline flatbuffers::Offset<Int32Vector> +CreateInt32VectorDirect(flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<int32_t> *values = nullptr) +{ + return onert_tflite::CreateInt32Vector(_fbb, values ? _fbb.CreateVector<int32_t>(*values) : 0); +} + +struct Uint16Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_VALUES = 4 + }; + const flatbuffers::Vector<uint16_t> *values() const + { + return GetPointer<const flatbuffers::Vector<uint16_t> *>(VT_VALUES); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_VALUES) && + verifier.VerifyVector(values()) && verifier.EndTable(); + } +}; + +struct Uint16VectorBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_values(flatbuffers::Offset<flatbuffers::Vector<uint16_t>> values) + { + fbb_.AddOffset(Uint16Vector::VT_VALUES, values); + } + explicit Uint16VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + Uint16VectorBuilder &operator=(const Uint16VectorBuilder &); + flatbuffers::Offset<Uint16Vector> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<Uint16Vector>(end); + return o; + } +}; + +inline flatbuffers::Offset<Uint16Vector> +CreateUint16Vector(flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<uint16_t>> values = 0) +{ + Uint16VectorBuilder builder_(_fbb); + builder_.add_values(values); + return builder_.Finish(); +} + +inline flatbuffers::Offset<Uint16Vector> +CreateUint16VectorDirect(flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<uint16_t> *values = nullptr) +{ + return onert_tflite::CreateUint16Vector(_fbb, values ? _fbb.CreateVector<uint16_t>(*values) : 0); +} + +struct Uint8Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_VALUES = 4 + }; + const flatbuffers::Vector<uint8_t> *values() const + { + return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_VALUES); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_VALUES) && + verifier.VerifyVector(values()) && verifier.EndTable(); + } +}; + +struct Uint8VectorBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_values(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> values) + { + fbb_.AddOffset(Uint8Vector::VT_VALUES, values); + } + explicit Uint8VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + Uint8VectorBuilder &operator=(const Uint8VectorBuilder &); + flatbuffers::Offset<Uint8Vector> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<Uint8Vector>(end); + return o; + } +}; + +inline flatbuffers::Offset<Uint8Vector> +CreateUint8Vector(flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<uint8_t>> values = 0) +{ + Uint8VectorBuilder builder_(_fbb); + builder_.add_values(values); + return builder_.Finish(); +} + +inline flatbuffers::Offset<Uint8Vector> +CreateUint8VectorDirect(flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<uint8_t> *values = nullptr) +{ + return onert_tflite::CreateUint8Vector(_fbb, values ? _fbb.CreateVector<uint8_t>(*values) : 0); +} + +struct DimensionMetadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FORMAT = 4, + VT_DENSE_SIZE = 6, + VT_ARRAY_SEGMENTS_TYPE = 8, + VT_ARRAY_SEGMENTS = 10, + VT_ARRAY_INDICES_TYPE = 12, + VT_ARRAY_INDICES = 14 + }; + DimensionType format() const + { + return static_cast<DimensionType>(GetField<int8_t>(VT_FORMAT, 0)); + } + int32_t dense_size() const { return GetField<int32_t>(VT_DENSE_SIZE, 0); } + SparseIndexVector array_segments_type() const + { + return static_cast<SparseIndexVector>(GetField<uint8_t>(VT_ARRAY_SEGMENTS_TYPE, 0)); + } + const void *array_segments() const { return GetPointer<const void *>(VT_ARRAY_SEGMENTS); } + template <typename T> const T *array_segments_as() const; + const Int32Vector *array_segments_as_Int32Vector() const + { + return array_segments_type() == SparseIndexVector_Int32Vector + ? static_cast<const Int32Vector *>(array_segments()) + : nullptr; + } + const Uint16Vector *array_segments_as_Uint16Vector() const + { + return array_segments_type() == SparseIndexVector_Uint16Vector + ? static_cast<const Uint16Vector *>(array_segments()) + : nullptr; + } + const Uint8Vector *array_segments_as_Uint8Vector() const + { + return array_segments_type() == SparseIndexVector_Uint8Vector + ? static_cast<const Uint8Vector *>(array_segments()) + : nullptr; + } + SparseIndexVector array_indices_type() const + { + return static_cast<SparseIndexVector>(GetField<uint8_t>(VT_ARRAY_INDICES_TYPE, 0)); + } + const void *array_indices() const { return GetPointer<const void *>(VT_ARRAY_INDICES); } + template <typename T> const T *array_indices_as() const; + const Int32Vector *array_indices_as_Int32Vector() const + { + return array_indices_type() == SparseIndexVector_Int32Vector + ? static_cast<const Int32Vector *>(array_indices()) + : nullptr; + } + const Uint16Vector *array_indices_as_Uint16Vector() const + { + return array_indices_type() == SparseIndexVector_Uint16Vector + ? static_cast<const Uint16Vector *>(array_indices()) + : nullptr; + } + const Uint8Vector *array_indices_as_Uint8Vector() const + { + return array_indices_type() == SparseIndexVector_Uint8Vector + ? static_cast<const Uint8Vector *>(array_indices()) + : nullptr; + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_FORMAT) && + VerifyField<int32_t>(verifier, VT_DENSE_SIZE) && + VerifyField<uint8_t>(verifier, VT_ARRAY_SEGMENTS_TYPE) && + VerifyOffset(verifier, VT_ARRAY_SEGMENTS) && + VerifySparseIndexVector(verifier, array_segments(), array_segments_type()) && + VerifyField<uint8_t>(verifier, VT_ARRAY_INDICES_TYPE) && + VerifyOffset(verifier, VT_ARRAY_INDICES) && + VerifySparseIndexVector(verifier, array_indices(), array_indices_type()) && + verifier.EndTable(); + } +}; + +template <> inline const Int32Vector *DimensionMetadata::array_segments_as<Int32Vector>() const +{ + return array_segments_as_Int32Vector(); +} + +template <> inline const Uint16Vector *DimensionMetadata::array_segments_as<Uint16Vector>() const +{ + return array_segments_as_Uint16Vector(); +} + +template <> inline const Uint8Vector *DimensionMetadata::array_segments_as<Uint8Vector>() const +{ + return array_segments_as_Uint8Vector(); +} + +template <> inline const Int32Vector *DimensionMetadata::array_indices_as<Int32Vector>() const +{ + return array_indices_as_Int32Vector(); +} + +template <> inline const Uint16Vector *DimensionMetadata::array_indices_as<Uint16Vector>() const +{ + return array_indices_as_Uint16Vector(); +} + +template <> inline const Uint8Vector *DimensionMetadata::array_indices_as<Uint8Vector>() const +{ + return array_indices_as_Uint8Vector(); +} + +struct DimensionMetadataBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(DimensionType format) + { + fbb_.AddElement<int8_t>(DimensionMetadata::VT_FORMAT, static_cast<int8_t>(format), 0); + } + void add_dense_size(int32_t dense_size) + { + fbb_.AddElement<int32_t>(DimensionMetadata::VT_DENSE_SIZE, dense_size, 0); + } + void add_array_segments_type(SparseIndexVector array_segments_type) + { + fbb_.AddElement<uint8_t>(DimensionMetadata::VT_ARRAY_SEGMENTS_TYPE, + static_cast<uint8_t>(array_segments_type), 0); + } + void add_array_segments(flatbuffers::Offset<void> array_segments) + { + fbb_.AddOffset(DimensionMetadata::VT_ARRAY_SEGMENTS, array_segments); + } + void add_array_indices_type(SparseIndexVector array_indices_type) + { + fbb_.AddElement<uint8_t>(DimensionMetadata::VT_ARRAY_INDICES_TYPE, + static_cast<uint8_t>(array_indices_type), 0); + } + void add_array_indices(flatbuffers::Offset<void> array_indices) + { + fbb_.AddOffset(DimensionMetadata::VT_ARRAY_INDICES, array_indices); + } + explicit DimensionMetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + DimensionMetadataBuilder &operator=(const DimensionMetadataBuilder &); + flatbuffers::Offset<DimensionMetadata> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<DimensionMetadata>(end); + return o; + } +}; + +inline flatbuffers::Offset<DimensionMetadata> +CreateDimensionMetadata(flatbuffers::FlatBufferBuilder &_fbb, + DimensionType format = DimensionType_DENSE, int32_t dense_size = 0, + SparseIndexVector array_segments_type = SparseIndexVector_NONE, + flatbuffers::Offset<void> array_segments = 0, + SparseIndexVector array_indices_type = SparseIndexVector_NONE, + flatbuffers::Offset<void> array_indices = 0) +{ + DimensionMetadataBuilder builder_(_fbb); + builder_.add_array_indices(array_indices); + builder_.add_array_segments(array_segments); + builder_.add_dense_size(dense_size); + builder_.add_array_indices_type(array_indices_type); + builder_.add_array_segments_type(array_segments_type); + builder_.add_format(format); + return builder_.Finish(); +} + +struct SparsityParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_TRAVERSAL_ORDER = 4, + VT_BLOCK_MAP = 6, + VT_DIM_METADATA = 8 + }; + const flatbuffers::Vector<int32_t> *traversal_order() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_TRAVERSAL_ORDER); + } + const flatbuffers::Vector<int32_t> *block_map() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_BLOCK_MAP); + } + const flatbuffers::Vector<flatbuffers::Offset<DimensionMetadata>> *dim_metadata() const + { + return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<DimensionMetadata>> *>( + VT_DIM_METADATA); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_TRAVERSAL_ORDER) && + verifier.VerifyVector(traversal_order()) && VerifyOffset(verifier, VT_BLOCK_MAP) && + verifier.VerifyVector(block_map()) && VerifyOffset(verifier, VT_DIM_METADATA) && + verifier.VerifyVector(dim_metadata()) && verifier.VerifyVectorOfTables(dim_metadata()) && + verifier.EndTable(); + } +}; + +struct SparsityParametersBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_traversal_order(flatbuffers::Offset<flatbuffers::Vector<int32_t>> traversal_order) + { + fbb_.AddOffset(SparsityParameters::VT_TRAVERSAL_ORDER, traversal_order); + } + void add_block_map(flatbuffers::Offset<flatbuffers::Vector<int32_t>> block_map) + { + fbb_.AddOffset(SparsityParameters::VT_BLOCK_MAP, block_map); + } + void add_dim_metadata( + flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<DimensionMetadata>>> dim_metadata) + { + fbb_.AddOffset(SparsityParameters::VT_DIM_METADATA, dim_metadata); + } + explicit SparsityParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SparsityParametersBuilder &operator=(const SparsityParametersBuilder &); + flatbuffers::Offset<SparsityParameters> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SparsityParameters>(end); + return o; + } +}; + +inline flatbuffers::Offset<SparsityParameters> CreateSparsityParameters( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> traversal_order = 0, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> block_map = 0, + flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<DimensionMetadata>>> dim_metadata = + 0) +{ + SparsityParametersBuilder builder_(_fbb); + builder_.add_dim_metadata(dim_metadata); + builder_.add_block_map(block_map); + builder_.add_traversal_order(traversal_order); + return builder_.Finish(); +} + +inline flatbuffers::Offset<SparsityParameters> CreateSparsityParametersDirect( + flatbuffers::FlatBufferBuilder &_fbb, const std::vector<int32_t> *traversal_order = nullptr, + const std::vector<int32_t> *block_map = nullptr, + const std::vector<flatbuffers::Offset<DimensionMetadata>> *dim_metadata = nullptr) +{ + return onert_tflite::CreateSparsityParameters( + _fbb, traversal_order ? _fbb.CreateVector<int32_t>(*traversal_order) : 0, + block_map ? _fbb.CreateVector<int32_t>(*block_map) : 0, + dim_metadata ? _fbb.CreateVector<flatbuffers::Offset<DimensionMetadata>>(*dim_metadata) : 0); +} + +struct Tensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_SHAPE = 4, + VT_TYPE = 6, + VT_BUFFER = 8, + VT_NAME = 10, + VT_QUANTIZATION = 12, + VT_IS_VARIABLE = 14, + VT_SPARSITY = 16, + VT_SHAPE_SIGNATURE = 18 + }; + const flatbuffers::Vector<int32_t> *shape() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SHAPE); + } + TensorType type() const { return static_cast<TensorType>(GetField<int8_t>(VT_TYPE, 0)); } + uint32_t buffer() const { return GetField<uint32_t>(VT_BUFFER, 0); } + const flatbuffers::String *name() const + { + return GetPointer<const flatbuffers::String *>(VT_NAME); + } + const QuantizationParameters *quantization() const + { + return GetPointer<const QuantizationParameters *>(VT_QUANTIZATION); + } + bool is_variable() const { return GetField<uint8_t>(VT_IS_VARIABLE, 0) != 0; } + const SparsityParameters *sparsity() const + { + return GetPointer<const SparsityParameters *>(VT_SPARSITY); + } + const flatbuffers::Vector<int32_t> *shape_signature() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SHAPE_SIGNATURE); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_SHAPE) && + verifier.VerifyVector(shape()) && VerifyField<int8_t>(verifier, VT_TYPE) && + VerifyField<uint32_t>(verifier, VT_BUFFER) && VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && VerifyOffset(verifier, VT_QUANTIZATION) && + verifier.VerifyTable(quantization()) && VerifyField<uint8_t>(verifier, VT_IS_VARIABLE) && + VerifyOffset(verifier, VT_SPARSITY) && verifier.VerifyTable(sparsity()) && + VerifyOffset(verifier, VT_SHAPE_SIGNATURE) && verifier.VerifyVector(shape_signature()) && + verifier.EndTable(); + } +}; + +struct TensorBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape) + { + fbb_.AddOffset(Tensor::VT_SHAPE, shape); + } + void add_type(TensorType type) + { + fbb_.AddElement<int8_t>(Tensor::VT_TYPE, static_cast<int8_t>(type), 0); + } + void add_buffer(uint32_t buffer) { fbb_.AddElement<uint32_t>(Tensor::VT_BUFFER, buffer, 0); } + void add_name(flatbuffers::Offset<flatbuffers::String> name) + { + fbb_.AddOffset(Tensor::VT_NAME, name); + } + void add_quantization(flatbuffers::Offset<QuantizationParameters> quantization) + { + fbb_.AddOffset(Tensor::VT_QUANTIZATION, quantization); + } + void add_is_variable(bool is_variable) + { + fbb_.AddElement<uint8_t>(Tensor::VT_IS_VARIABLE, static_cast<uint8_t>(is_variable), 0); + } + void add_sparsity(flatbuffers::Offset<SparsityParameters> sparsity) + { + fbb_.AddOffset(Tensor::VT_SPARSITY, sparsity); + } + void add_shape_signature(flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape_signature) + { + fbb_.AddOffset(Tensor::VT_SHAPE_SIGNATURE, shape_signature); + } + explicit TensorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + TensorBuilder &operator=(const TensorBuilder &); + flatbuffers::Offset<Tensor> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<Tensor>(end); + return o; + } +}; + +inline flatbuffers::Offset<Tensor> +CreateTensor(flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape = 0, + TensorType type = TensorType_FLOAT32, uint32_t buffer = 0, + flatbuffers::Offset<flatbuffers::String> name = 0, + flatbuffers::Offset<QuantizationParameters> quantization = 0, bool is_variable = false, + flatbuffers::Offset<SparsityParameters> sparsity = 0, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape_signature = 0) +{ + TensorBuilder builder_(_fbb); + builder_.add_shape_signature(shape_signature); + builder_.add_sparsity(sparsity); + builder_.add_quantization(quantization); + builder_.add_name(name); + builder_.add_buffer(buffer); + builder_.add_shape(shape); + builder_.add_is_variable(is_variable); + builder_.add_type(type); + return builder_.Finish(); +} + +inline flatbuffers::Offset<Tensor> CreateTensorDirect( + flatbuffers::FlatBufferBuilder &_fbb, const std::vector<int32_t> *shape = nullptr, + TensorType type = TensorType_FLOAT32, uint32_t buffer = 0, const char *name = nullptr, + flatbuffers::Offset<QuantizationParameters> quantization = 0, bool is_variable = false, + flatbuffers::Offset<SparsityParameters> sparsity = 0, + const std::vector<int32_t> *shape_signature = nullptr) +{ + return onert_tflite::CreateTensor( + _fbb, shape ? _fbb.CreateVector<int32_t>(*shape) : 0, type, buffer, + name ? _fbb.CreateString(name) : 0, quantization, is_variable, sparsity, + shape_signature ? _fbb.CreateVector<int32_t>(*shape_signature) : 0); +} + +struct Conv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_PADDING = 4, + VT_STRIDE_W = 6, + VT_STRIDE_H = 8, + VT_FUSED_ACTIVATION_FUNCTION = 10, + VT_DILATION_W_FACTOR = 12, + VT_DILATION_H_FACTOR = 14 + }; + Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); } + int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); } + int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); } + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + int32_t dilation_w_factor() const { return GetField<int32_t>(VT_DILATION_W_FACTOR, 1); } + int32_t dilation_h_factor() const { return GetField<int32_t>(VT_DILATION_H_FACTOR, 1); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) && + VerifyField<int32_t>(verifier, VT_STRIDE_W) && + VerifyField<int32_t>(verifier, VT_STRIDE_H) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField<int32_t>(verifier, VT_DILATION_W_FACTOR) && + VerifyField<int32_t>(verifier, VT_DILATION_H_FACTOR) && verifier.EndTable(); + } +}; + +struct Conv2DOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_padding(Padding padding) + { + fbb_.AddElement<int8_t>(Conv2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0); + } + void add_stride_w(int32_t stride_w) + { + fbb_.AddElement<int32_t>(Conv2DOptions::VT_STRIDE_W, stride_w, 0); + } + void add_stride_h(int32_t stride_h) + { + fbb_.AddElement<int32_t>(Conv2DOptions::VT_STRIDE_H, stride_h, 0); + } + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(Conv2DOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + void add_dilation_w_factor(int32_t dilation_w_factor) + { + fbb_.AddElement<int32_t>(Conv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1); + } + void add_dilation_h_factor(int32_t dilation_h_factor) + { + fbb_.AddElement<int32_t>(Conv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1); + } + explicit Conv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + Conv2DOptionsBuilder &operator=(const Conv2DOptionsBuilder &); + flatbuffers::Offset<Conv2DOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<Conv2DOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<Conv2DOptions> +CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME, + int32_t stride_w = 0, int32_t stride_h = 0, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + int32_t dilation_w_factor = 1, int32_t dilation_h_factor = 1) +{ + Conv2DOptionsBuilder builder_(_fbb); + builder_.add_dilation_h_factor(dilation_h_factor); + builder_.add_dilation_w_factor(dilation_w_factor); + builder_.add_stride_h(stride_h); + builder_.add_stride_w(stride_w); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_padding(padding); + return builder_.Finish(); +} + +struct Pool2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_PADDING = 4, + VT_STRIDE_W = 6, + VT_STRIDE_H = 8, + VT_FILTER_WIDTH = 10, + VT_FILTER_HEIGHT = 12, + VT_FUSED_ACTIVATION_FUNCTION = 14 + }; + Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); } + int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); } + int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); } + int32_t filter_width() const { return GetField<int32_t>(VT_FILTER_WIDTH, 0); } + int32_t filter_height() const { return GetField<int32_t>(VT_FILTER_HEIGHT, 0); } + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) && + VerifyField<int32_t>(verifier, VT_STRIDE_W) && + VerifyField<int32_t>(verifier, VT_STRIDE_H) && + VerifyField<int32_t>(verifier, VT_FILTER_WIDTH) && + VerifyField<int32_t>(verifier, VT_FILTER_HEIGHT) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable(); + } +}; + +struct Pool2DOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_padding(Padding padding) + { + fbb_.AddElement<int8_t>(Pool2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0); + } + void add_stride_w(int32_t stride_w) + { + fbb_.AddElement<int32_t>(Pool2DOptions::VT_STRIDE_W, stride_w, 0); + } + void add_stride_h(int32_t stride_h) + { + fbb_.AddElement<int32_t>(Pool2DOptions::VT_STRIDE_H, stride_h, 0); + } + void add_filter_width(int32_t filter_width) + { + fbb_.AddElement<int32_t>(Pool2DOptions::VT_FILTER_WIDTH, filter_width, 0); + } + void add_filter_height(int32_t filter_height) + { + fbb_.AddElement<int32_t>(Pool2DOptions::VT_FILTER_HEIGHT, filter_height, 0); + } + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(Pool2DOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + explicit Pool2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + Pool2DOptionsBuilder &operator=(const Pool2DOptionsBuilder &); + flatbuffers::Offset<Pool2DOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<Pool2DOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<Pool2DOptions> +CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME, + int32_t stride_w = 0, int32_t stride_h = 0, int32_t filter_width = 0, + int32_t filter_height = 0, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) +{ + Pool2DOptionsBuilder builder_(_fbb); + builder_.add_filter_height(filter_height); + builder_.add_filter_width(filter_width); + builder_.add_stride_h(stride_h); + builder_.add_stride_w(stride_w); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_padding(padding); + return builder_.Finish(); +} + +struct DepthwiseConv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_PADDING = 4, + VT_STRIDE_W = 6, + VT_STRIDE_H = 8, + VT_DEPTH_MULTIPLIER = 10, + VT_FUSED_ACTIVATION_FUNCTION = 12, + VT_DILATION_W_FACTOR = 14, + VT_DILATION_H_FACTOR = 16 + }; + Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); } + int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); } + int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); } + int32_t depth_multiplier() const { return GetField<int32_t>(VT_DEPTH_MULTIPLIER, 0); } + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + int32_t dilation_w_factor() const { return GetField<int32_t>(VT_DILATION_W_FACTOR, 1); } + int32_t dilation_h_factor() const { return GetField<int32_t>(VT_DILATION_H_FACTOR, 1); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) && + VerifyField<int32_t>(verifier, VT_STRIDE_W) && + VerifyField<int32_t>(verifier, VT_STRIDE_H) && + VerifyField<int32_t>(verifier, VT_DEPTH_MULTIPLIER) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField<int32_t>(verifier, VT_DILATION_W_FACTOR) && + VerifyField<int32_t>(verifier, VT_DILATION_H_FACTOR) && verifier.EndTable(); + } +}; + +struct DepthwiseConv2DOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_padding(Padding padding) + { + fbb_.AddElement<int8_t>(DepthwiseConv2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0); + } + void add_stride_w(int32_t stride_w) + { + fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_STRIDE_W, stride_w, 0); + } + void add_stride_h(int32_t stride_h) + { + fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_STRIDE_H, stride_h, 0); + } + void add_depth_multiplier(int32_t depth_multiplier) + { + fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DEPTH_MULTIPLIER, depth_multiplier, 0); + } + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(DepthwiseConv2DOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + void add_dilation_w_factor(int32_t dilation_w_factor) + { + fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1); + } + void add_dilation_h_factor(int32_t dilation_h_factor) + { + fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1); + } + explicit DepthwiseConv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + DepthwiseConv2DOptionsBuilder &operator=(const DepthwiseConv2DOptionsBuilder &); + flatbuffers::Offset<DepthwiseConv2DOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<DepthwiseConv2DOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<DepthwiseConv2DOptions> CreateDepthwiseConv2DOptions( + flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME, int32_t stride_w = 0, + int32_t stride_h = 0, int32_t depth_multiplier = 0, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + int32_t dilation_w_factor = 1, int32_t dilation_h_factor = 1) +{ + DepthwiseConv2DOptionsBuilder builder_(_fbb); + builder_.add_dilation_h_factor(dilation_h_factor); + builder_.add_dilation_w_factor(dilation_w_factor); + builder_.add_depth_multiplier(depth_multiplier); + builder_.add_stride_h(stride_h); + builder_.add_stride_w(stride_w); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_padding(padding); + return builder_.Finish(); +} + +struct ConcatEmbeddingsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_NUM_CHANNELS = 4, + VT_NUM_COLUMNS_PER_CHANNEL = 6, + VT_EMBEDDING_DIM_PER_CHANNEL = 8 + }; + int32_t num_channels() const { return GetField<int32_t>(VT_NUM_CHANNELS, 0); } + const flatbuffers::Vector<int32_t> *num_columns_per_channel() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_NUM_COLUMNS_PER_CHANNEL); + } + const flatbuffers::Vector<int32_t> *embedding_dim_per_channel() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_EMBEDDING_DIM_PER_CHANNEL); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM_CHANNELS) && + VerifyOffset(verifier, VT_NUM_COLUMNS_PER_CHANNEL) && + verifier.VerifyVector(num_columns_per_channel()) && + VerifyOffset(verifier, VT_EMBEDDING_DIM_PER_CHANNEL) && + verifier.VerifyVector(embedding_dim_per_channel()) && verifier.EndTable(); + } +}; + +struct ConcatEmbeddingsOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_num_channels(int32_t num_channels) + { + fbb_.AddElement<int32_t>(ConcatEmbeddingsOptions::VT_NUM_CHANNELS, num_channels, 0); + } + void add_num_columns_per_channel( + flatbuffers::Offset<flatbuffers::Vector<int32_t>> num_columns_per_channel) + { + fbb_.AddOffset(ConcatEmbeddingsOptions::VT_NUM_COLUMNS_PER_CHANNEL, num_columns_per_channel); + } + void add_embedding_dim_per_channel( + flatbuffers::Offset<flatbuffers::Vector<int32_t>> embedding_dim_per_channel) + { + fbb_.AddOffset(ConcatEmbeddingsOptions::VT_EMBEDDING_DIM_PER_CHANNEL, + embedding_dim_per_channel); + } + explicit ConcatEmbeddingsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ConcatEmbeddingsOptionsBuilder &operator=(const ConcatEmbeddingsOptionsBuilder &); + flatbuffers::Offset<ConcatEmbeddingsOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ConcatEmbeddingsOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ConcatEmbeddingsOptions> CreateConcatEmbeddingsOptions( + flatbuffers::FlatBufferBuilder &_fbb, int32_t num_channels = 0, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> num_columns_per_channel = 0, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> embedding_dim_per_channel = 0) +{ + ConcatEmbeddingsOptionsBuilder builder_(_fbb); + builder_.add_embedding_dim_per_channel(embedding_dim_per_channel); + builder_.add_num_columns_per_channel(num_columns_per_channel); + builder_.add_num_channels(num_channels); + return builder_.Finish(); +} + +inline flatbuffers::Offset<ConcatEmbeddingsOptions> +CreateConcatEmbeddingsOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb, int32_t num_channels = 0, + const std::vector<int32_t> *num_columns_per_channel = nullptr, + const std::vector<int32_t> *embedding_dim_per_channel = nullptr) +{ + return onert_tflite::CreateConcatEmbeddingsOptions( + _fbb, num_channels, + num_columns_per_channel ? _fbb.CreateVector<int32_t>(*num_columns_per_channel) : 0, + embedding_dim_per_channel ? _fbb.CreateVector<int32_t>(*embedding_dim_per_channel) : 0); +} + +struct LSHProjectionOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_TYPE = 4 + }; + LSHProjectionType type() const + { + return static_cast<LSHProjectionType>(GetField<int8_t>(VT_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_TYPE) && + verifier.EndTable(); + } +}; + +struct LSHProjectionOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_type(LSHProjectionType type) + { + fbb_.AddElement<int8_t>(LSHProjectionOptions::VT_TYPE, static_cast<int8_t>(type), 0); + } + explicit LSHProjectionOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + LSHProjectionOptionsBuilder &operator=(const LSHProjectionOptionsBuilder &); + flatbuffers::Offset<LSHProjectionOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<LSHProjectionOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<LSHProjectionOptions> +CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb, + LSHProjectionType type = LSHProjectionType_UNKNOWN) +{ + LSHProjectionOptionsBuilder builder_(_fbb); + builder_.add_type(type); + return builder_.Finish(); +} + +struct SVDFOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_RANK = 4, + VT_FUSED_ACTIVATION_FUNCTION = 6, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 + }; + int32_t rank() const { return GetField<int32_t>(VT_RANK, 0); } + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool asymmetric_quantize_inputs() const + { + return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_RANK) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); + } +}; + +struct SVDFOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_rank(int32_t rank) { fbb_.AddElement<int32_t>(SVDFOptions::VT_RANK, rank, 0); } + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(SVDFOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) + { + fbb_.AddElement<uint8_t>(SVDFOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, + static_cast<uint8_t>(asymmetric_quantize_inputs), 0); + } + explicit SVDFOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SVDFOptionsBuilder &operator=(const SVDFOptionsBuilder &); + flatbuffers::Offset<SVDFOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SVDFOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SVDFOptions> +CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t rank = 0, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + bool asymmetric_quantize_inputs = false) +{ + SVDFOptionsBuilder builder_(_fbb); + builder_.add_rank(rank); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct RNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 6 + }; + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool asymmetric_quantize_inputs() const + { + return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); + } +}; + +struct RNNOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(RNNOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) + { + fbb_.AddElement<uint8_t>(RNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, + static_cast<uint8_t>(asymmetric_quantize_inputs), 0); + } + explicit RNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + RNNOptionsBuilder &operator=(const RNNOptionsBuilder &); + flatbuffers::Offset<RNNOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<RNNOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<RNNOptions> +CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + bool asymmetric_quantize_inputs = false) +{ + RNNOptionsBuilder builder_(_fbb); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct SequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_TIME_MAJOR = 4, + VT_FUSED_ACTIVATION_FUNCTION = 6, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 + }; + bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 0) != 0; } + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool asymmetric_quantize_inputs() const + { + return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); + } +}; + +struct SequenceRNNOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_time_major(bool time_major) + { + fbb_.AddElement<uint8_t>(SequenceRNNOptions::VT_TIME_MAJOR, static_cast<uint8_t>(time_major), + 0); + } + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(SequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) + { + fbb_.AddElement<uint8_t>(SequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, + static_cast<uint8_t>(asymmetric_quantize_inputs), 0); + } + explicit SequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SequenceRNNOptionsBuilder &operator=(const SequenceRNNOptionsBuilder &); + flatbuffers::Offset<SequenceRNNOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SequenceRNNOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SequenceRNNOptions> CreateSequenceRNNOptions( + flatbuffers::FlatBufferBuilder &_fbb, bool time_major = false, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + bool asymmetric_quantize_inputs = false) +{ + SequenceRNNOptionsBuilder builder_(_fbb); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_time_major(time_major); + return builder_.Finish(); +} + +struct BidirectionalSequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_TIME_MAJOR = 4, + VT_FUSED_ACTIVATION_FUNCTION = 6, + VT_MERGE_OUTPUTS = 8, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 10 + }; + bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 0) != 0; } + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool merge_outputs() const { return GetField<uint8_t>(VT_MERGE_OUTPUTS, 0) != 0; } + bool asymmetric_quantize_inputs() const + { + return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField<uint8_t>(verifier, VT_MERGE_OUTPUTS) && + VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); + } +}; + +struct BidirectionalSequenceRNNOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_time_major(bool time_major) + { + fbb_.AddElement<uint8_t>(BidirectionalSequenceRNNOptions::VT_TIME_MAJOR, + static_cast<uint8_t>(time_major), 0); + } + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(BidirectionalSequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + void add_merge_outputs(bool merge_outputs) + { + fbb_.AddElement<uint8_t>(BidirectionalSequenceRNNOptions::VT_MERGE_OUTPUTS, + static_cast<uint8_t>(merge_outputs), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) + { + fbb_.AddElement<uint8_t>(BidirectionalSequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, + static_cast<uint8_t>(asymmetric_quantize_inputs), 0); + } + explicit BidirectionalSequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + BidirectionalSequenceRNNOptionsBuilder &operator=(const BidirectionalSequenceRNNOptionsBuilder &); + flatbuffers::Offset<BidirectionalSequenceRNNOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<BidirectionalSequenceRNNOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<BidirectionalSequenceRNNOptions> CreateBidirectionalSequenceRNNOptions( + flatbuffers::FlatBufferBuilder &_fbb, bool time_major = false, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + bool merge_outputs = false, bool asymmetric_quantize_inputs = false) +{ + BidirectionalSequenceRNNOptionsBuilder builder_(_fbb); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_merge_outputs(merge_outputs); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_time_major(time_major); + return builder_.Finish(); +} + +struct FullyConnectedOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_WEIGHTS_FORMAT = 6, + VT_KEEP_NUM_DIMS = 8, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 10 + }; + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + FullyConnectedOptionsWeightsFormat weights_format() const + { + return static_cast<FullyConnectedOptionsWeightsFormat>(GetField<int8_t>(VT_WEIGHTS_FORMAT, 0)); + } + bool keep_num_dims() const { return GetField<uint8_t>(VT_KEEP_NUM_DIMS, 0) != 0; } + bool asymmetric_quantize_inputs() const + { + return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField<int8_t>(verifier, VT_WEIGHTS_FORMAT) && + VerifyField<uint8_t>(verifier, VT_KEEP_NUM_DIMS) && + VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); + } +}; + +struct FullyConnectedOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(FullyConnectedOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + void add_weights_format(FullyConnectedOptionsWeightsFormat weights_format) + { + fbb_.AddElement<int8_t>(FullyConnectedOptions::VT_WEIGHTS_FORMAT, + static_cast<int8_t>(weights_format), 0); + } + void add_keep_num_dims(bool keep_num_dims) + { + fbb_.AddElement<uint8_t>(FullyConnectedOptions::VT_KEEP_NUM_DIMS, + static_cast<uint8_t>(keep_num_dims), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) + { + fbb_.AddElement<uint8_t>(FullyConnectedOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, + static_cast<uint8_t>(asymmetric_quantize_inputs), 0); + } + explicit FullyConnectedOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + FullyConnectedOptionsBuilder &operator=(const FullyConnectedOptionsBuilder &); + flatbuffers::Offset<FullyConnectedOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<FullyConnectedOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<FullyConnectedOptions> CreateFullyConnectedOptions( + flatbuffers::FlatBufferBuilder &_fbb, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + FullyConnectedOptionsWeightsFormat weights_format = FullyConnectedOptionsWeightsFormat_DEFAULT, + bool keep_num_dims = false, bool asymmetric_quantize_inputs = false) +{ + FullyConnectedOptionsBuilder builder_(_fbb); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_keep_num_dims(keep_num_dims); + builder_.add_weights_format(weights_format); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct SoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_BETA = 4 + }; + float beta() const { return GetField<float>(VT_BETA, 0.0f); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_BETA) && + verifier.EndTable(); + } +}; + +struct SoftmaxOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_beta(float beta) { fbb_.AddElement<float>(SoftmaxOptions::VT_BETA, beta, 0.0f); } + explicit SoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SoftmaxOptionsBuilder &operator=(const SoftmaxOptionsBuilder &); + flatbuffers::Offset<SoftmaxOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SoftmaxOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SoftmaxOptions> +CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, float beta = 0.0f) +{ + SoftmaxOptionsBuilder builder_(_fbb); + builder_.add_beta(beta); + return builder_.Finish(); +} + +struct ConcatenationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_AXIS = 4, + VT_FUSED_ACTIVATION_FUNCTION = 6 + }; + int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); } + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_AXIS) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable(); + } +}; + +struct ConcatenationOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(ConcatenationOptions::VT_AXIS, axis, 0); } + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(ConcatenationOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + explicit ConcatenationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ConcatenationOptionsBuilder &operator=(const ConcatenationOptionsBuilder &); + flatbuffers::Offset<ConcatenationOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ConcatenationOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ConcatenationOptions> CreateConcatenationOptions( + flatbuffers::FlatBufferBuilder &_fbb, int32_t axis = 0, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) +{ + ConcatenationOptionsBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct AddOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FUSED_ACTIVATION_FUNCTION = 4 + }; + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable(); + } +}; + +struct AddOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(AddOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + explicit AddOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + AddOptionsBuilder &operator=(const AddOptionsBuilder &); + flatbuffers::Offset<AddOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<AddOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<AddOptions> +CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) +{ + AddOptionsBuilder builder_(_fbb); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct MulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FUSED_ACTIVATION_FUNCTION = 4 + }; + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable(); + } +}; + +struct MulOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(MulOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + explicit MulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + MulOptionsBuilder &operator=(const MulOptionsBuilder &); + flatbuffers::Offset<MulOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<MulOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<MulOptions> +CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) +{ + MulOptionsBuilder builder_(_fbb); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct L2NormOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FUSED_ACTIVATION_FUNCTION = 4 + }; + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable(); + } +}; + +struct L2NormOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(L2NormOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + explicit L2NormOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + L2NormOptionsBuilder &operator=(const L2NormOptionsBuilder &); + flatbuffers::Offset<L2NormOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<L2NormOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<L2NormOptions> +CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) +{ + L2NormOptionsBuilder builder_(_fbb); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct LocalResponseNormalizationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_RADIUS = 4, + VT_BIAS = 6, + VT_ALPHA = 8, + VT_BETA = 10 + }; + int32_t radius() const { return GetField<int32_t>(VT_RADIUS, 0); } + float bias() const { return GetField<float>(VT_BIAS, 0.0f); } + float alpha() const { return GetField<float>(VT_ALPHA, 0.0f); } + float beta() const { return GetField<float>(VT_BETA, 0.0f); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_RADIUS) && + VerifyField<float>(verifier, VT_BIAS) && VerifyField<float>(verifier, VT_ALPHA) && + VerifyField<float>(verifier, VT_BETA) && verifier.EndTable(); + } +}; + +struct LocalResponseNormalizationOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_radius(int32_t radius) + { + fbb_.AddElement<int32_t>(LocalResponseNormalizationOptions::VT_RADIUS, radius, 0); + } + void add_bias(float bias) + { + fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_BIAS, bias, 0.0f); + } + void add_alpha(float alpha) + { + fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_ALPHA, alpha, 0.0f); + } + void add_beta(float beta) + { + fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_BETA, beta, 0.0f); + } + explicit LocalResponseNormalizationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + LocalResponseNormalizationOptionsBuilder & + operator=(const LocalResponseNormalizationOptionsBuilder &); + flatbuffers::Offset<LocalResponseNormalizationOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<LocalResponseNormalizationOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<LocalResponseNormalizationOptions> +CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t radius = 0, + float bias = 0.0f, float alpha = 0.0f, float beta = 0.0f) +{ + LocalResponseNormalizationOptionsBuilder builder_(_fbb); + builder_.add_beta(beta); + builder_.add_alpha(alpha); + builder_.add_bias(bias); + builder_.add_radius(radius); + return builder_.Finish(); +} + +struct LSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_CELL_CLIP = 6, + VT_PROJ_CLIP = 8, + VT_KERNEL_TYPE = 10, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 12 + }; + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + float cell_clip() const { return GetField<float>(VT_CELL_CLIP, 0.0f); } + float proj_clip() const { return GetField<float>(VT_PROJ_CLIP, 0.0f); } + LSTMKernelType kernel_type() const + { + return static_cast<LSTMKernelType>(GetField<int8_t>(VT_KERNEL_TYPE, 0)); + } + bool asymmetric_quantize_inputs() const + { + return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField<float>(verifier, VT_CELL_CLIP) && + VerifyField<float>(verifier, VT_PROJ_CLIP) && + VerifyField<int8_t>(verifier, VT_KERNEL_TYPE) && + VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); + } +}; + +struct LSTMOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(LSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + void add_cell_clip(float cell_clip) + { + fbb_.AddElement<float>(LSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); + } + void add_proj_clip(float proj_clip) + { + fbb_.AddElement<float>(LSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); + } + void add_kernel_type(LSTMKernelType kernel_type) + { + fbb_.AddElement<int8_t>(LSTMOptions::VT_KERNEL_TYPE, static_cast<int8_t>(kernel_type), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) + { + fbb_.AddElement<uint8_t>(LSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, + static_cast<uint8_t>(asymmetric_quantize_inputs), 0); + } + explicit LSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + LSTMOptionsBuilder &operator=(const LSTMOptionsBuilder &); + flatbuffers::Offset<LSTMOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<LSTMOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<LSTMOptions> +CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + float cell_clip = 0.0f, float proj_clip = 0.0f, + LSTMKernelType kernel_type = LSTMKernelType_FULL, + bool asymmetric_quantize_inputs = false) +{ + LSTMOptionsBuilder builder_(_fbb); + builder_.add_proj_clip(proj_clip); + builder_.add_cell_clip(cell_clip); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_kernel_type(kernel_type); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct UnidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_CELL_CLIP = 6, + VT_PROJ_CLIP = 8, + VT_TIME_MAJOR = 10, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 12 + }; + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + float cell_clip() const { return GetField<float>(VT_CELL_CLIP, 0.0f); } + float proj_clip() const { return GetField<float>(VT_PROJ_CLIP, 0.0f); } + bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 0) != 0; } + bool asymmetric_quantize_inputs() const + { + return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField<float>(verifier, VT_CELL_CLIP) && + VerifyField<float>(verifier, VT_PROJ_CLIP) && + VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) && + VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); + } +}; + +struct UnidirectionalSequenceLSTMOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(UnidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + void add_cell_clip(float cell_clip) + { + fbb_.AddElement<float>(UnidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); + } + void add_proj_clip(float proj_clip) + { + fbb_.AddElement<float>(UnidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); + } + void add_time_major(bool time_major) + { + fbb_.AddElement<uint8_t>(UnidirectionalSequenceLSTMOptions::VT_TIME_MAJOR, + static_cast<uint8_t>(time_major), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) + { + fbb_.AddElement<uint8_t>(UnidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, + static_cast<uint8_t>(asymmetric_quantize_inputs), 0); + } + explicit UnidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + UnidirectionalSequenceLSTMOptionsBuilder & + operator=(const UnidirectionalSequenceLSTMOptionsBuilder &); + flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<UnidirectionalSequenceLSTMOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> +CreateUnidirectionalSequenceLSTMOptions( + flatbuffers::FlatBufferBuilder &_fbb, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + float cell_clip = 0.0f, float proj_clip = 0.0f, bool time_major = false, + bool asymmetric_quantize_inputs = false) +{ + UnidirectionalSequenceLSTMOptionsBuilder builder_(_fbb); + builder_.add_proj_clip(proj_clip); + builder_.add_cell_clip(cell_clip); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_time_major(time_major); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct BidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_CELL_CLIP = 6, + VT_PROJ_CLIP = 8, + VT_MERGE_OUTPUTS = 10, + VT_TIME_MAJOR = 12, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 14 + }; + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + float cell_clip() const { return GetField<float>(VT_CELL_CLIP, 0.0f); } + float proj_clip() const { return GetField<float>(VT_PROJ_CLIP, 0.0f); } + bool merge_outputs() const { return GetField<uint8_t>(VT_MERGE_OUTPUTS, 0) != 0; } + bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 1) != 0; } + bool asymmetric_quantize_inputs() const + { + return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField<float>(verifier, VT_CELL_CLIP) && + VerifyField<float>(verifier, VT_PROJ_CLIP) && + VerifyField<uint8_t>(verifier, VT_MERGE_OUTPUTS) && + VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) && + VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); + } +}; + +struct BidirectionalSequenceLSTMOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(BidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + void add_cell_clip(float cell_clip) + { + fbb_.AddElement<float>(BidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); + } + void add_proj_clip(float proj_clip) + { + fbb_.AddElement<float>(BidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); + } + void add_merge_outputs(bool merge_outputs) + { + fbb_.AddElement<uint8_t>(BidirectionalSequenceLSTMOptions::VT_MERGE_OUTPUTS, + static_cast<uint8_t>(merge_outputs), 0); + } + void add_time_major(bool time_major) + { + fbb_.AddElement<uint8_t>(BidirectionalSequenceLSTMOptions::VT_TIME_MAJOR, + static_cast<uint8_t>(time_major), 1); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) + { + fbb_.AddElement<uint8_t>(BidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, + static_cast<uint8_t>(asymmetric_quantize_inputs), 0); + } + explicit BidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + BidirectionalSequenceLSTMOptionsBuilder & + operator=(const BidirectionalSequenceLSTMOptionsBuilder &); + flatbuffers::Offset<BidirectionalSequenceLSTMOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<BidirectionalSequenceLSTMOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<BidirectionalSequenceLSTMOptions> CreateBidirectionalSequenceLSTMOptions( + flatbuffers::FlatBufferBuilder &_fbb, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE, + float cell_clip = 0.0f, float proj_clip = 0.0f, bool merge_outputs = false, + bool time_major = true, bool asymmetric_quantize_inputs = false) +{ + BidirectionalSequenceLSTMOptionsBuilder builder_(_fbb); + builder_.add_proj_clip(proj_clip); + builder_.add_cell_clip(cell_clip); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_time_major(time_major); + builder_.add_merge_outputs(merge_outputs); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct ResizeBilinearOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_ALIGN_CORNERS = 8, + VT_HALF_PIXEL_CENTERS = 10 + }; + bool align_corners() const { return GetField<uint8_t>(VT_ALIGN_CORNERS, 0) != 0; } + bool half_pixel_centers() const { return GetField<uint8_t>(VT_HALF_PIXEL_CENTERS, 0) != 0; } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_ALIGN_CORNERS) && + VerifyField<uint8_t>(verifier, VT_HALF_PIXEL_CENTERS) && verifier.EndTable(); + } +}; + +struct ResizeBilinearOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_align_corners(bool align_corners) + { + fbb_.AddElement<uint8_t>(ResizeBilinearOptions::VT_ALIGN_CORNERS, + static_cast<uint8_t>(align_corners), 0); + } + void add_half_pixel_centers(bool half_pixel_centers) + { + fbb_.AddElement<uint8_t>(ResizeBilinearOptions::VT_HALF_PIXEL_CENTERS, + static_cast<uint8_t>(half_pixel_centers), 0); + } + explicit ResizeBilinearOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ResizeBilinearOptionsBuilder &operator=(const ResizeBilinearOptionsBuilder &); + flatbuffers::Offset<ResizeBilinearOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ResizeBilinearOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ResizeBilinearOptions> +CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, bool align_corners = false, + bool half_pixel_centers = false) +{ + ResizeBilinearOptionsBuilder builder_(_fbb); + builder_.add_half_pixel_centers(half_pixel_centers); + builder_.add_align_corners(align_corners); + return builder_.Finish(); +} + +struct ResizeNearestNeighborOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_ALIGN_CORNERS = 4 + }; + bool align_corners() const { return GetField<uint8_t>(VT_ALIGN_CORNERS, 0) != 0; } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_ALIGN_CORNERS) && + verifier.EndTable(); + } +}; + +struct ResizeNearestNeighborOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_align_corners(bool align_corners) + { + fbb_.AddElement<uint8_t>(ResizeNearestNeighborOptions::VT_ALIGN_CORNERS, + static_cast<uint8_t>(align_corners), 0); + } + explicit ResizeNearestNeighborOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ResizeNearestNeighborOptionsBuilder &operator=(const ResizeNearestNeighborOptionsBuilder &); + flatbuffers::Offset<ResizeNearestNeighborOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ResizeNearestNeighborOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ResizeNearestNeighborOptions> +CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb, bool align_corners = false) +{ + ResizeNearestNeighborOptionsBuilder builder_(_fbb); + builder_.add_align_corners(align_corners); + return builder_.Finish(); +} + +struct CallOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_SUBGRAPH = 4 + }; + uint32_t subgraph() const { return GetField<uint32_t>(VT_SUBGRAPH, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<uint32_t>(verifier, VT_SUBGRAPH) && + verifier.EndTable(); + } +}; + +struct CallOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_subgraph(uint32_t subgraph) + { + fbb_.AddElement<uint32_t>(CallOptions::VT_SUBGRAPH, subgraph, 0); + } + explicit CallOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + CallOptionsBuilder &operator=(const CallOptionsBuilder &); + flatbuffers::Offset<CallOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<CallOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<CallOptions> CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb, + uint32_t subgraph = 0) +{ + CallOptionsBuilder builder_(_fbb); + builder_.add_subgraph(subgraph); + return builder_.Finish(); +} + +struct PadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct PadOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit PadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + PadOptionsBuilder &operator=(const PadOptionsBuilder &); + flatbuffers::Offset<PadOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<PadOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<PadOptions> CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + PadOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct PadV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct PadV2OptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit PadV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + PadV2OptionsBuilder &operator=(const PadV2OptionsBuilder &); + flatbuffers::Offset<PadV2Options> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<PadV2Options>(end); + return o; + } +}; + +inline flatbuffers::Offset<PadV2Options> CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb) +{ + PadV2OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ReshapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_NEW_SHAPE = 4 + }; + const flatbuffers::Vector<int32_t> *new_shape() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_NEW_SHAPE); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NEW_SHAPE) && + verifier.VerifyVector(new_shape()) && verifier.EndTable(); + } +}; + +struct ReshapeOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_new_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> new_shape) + { + fbb_.AddOffset(ReshapeOptions::VT_NEW_SHAPE, new_shape); + } + explicit ReshapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ReshapeOptionsBuilder &operator=(const ReshapeOptionsBuilder &); + flatbuffers::Offset<ReshapeOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ReshapeOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ReshapeOptions> +CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> new_shape = 0) +{ + ReshapeOptionsBuilder builder_(_fbb); + builder_.add_new_shape(new_shape); + return builder_.Finish(); +} + +inline flatbuffers::Offset<ReshapeOptions> +CreateReshapeOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<int32_t> *new_shape = nullptr) +{ + return onert_tflite::CreateReshapeOptions(_fbb, + new_shape ? _fbb.CreateVector<int32_t>(*new_shape) : 0); +} + +struct SpaceToBatchNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct SpaceToBatchNDOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SpaceToBatchNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SpaceToBatchNDOptionsBuilder &operator=(const SpaceToBatchNDOptionsBuilder &); + flatbuffers::Offset<SpaceToBatchNDOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SpaceToBatchNDOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SpaceToBatchNDOptions> +CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + SpaceToBatchNDOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct BatchToSpaceNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct BatchToSpaceNDOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit BatchToSpaceNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + BatchToSpaceNDOptionsBuilder &operator=(const BatchToSpaceNDOptionsBuilder &); + flatbuffers::Offset<BatchToSpaceNDOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<BatchToSpaceNDOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<BatchToSpaceNDOptions> +CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + BatchToSpaceNDOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SkipGramOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_NGRAM_SIZE = 4, + VT_MAX_SKIP_SIZE = 6, + VT_INCLUDE_ALL_NGRAMS = 8 + }; + int32_t ngram_size() const { return GetField<int32_t>(VT_NGRAM_SIZE, 0); } + int32_t max_skip_size() const { return GetField<int32_t>(VT_MAX_SKIP_SIZE, 0); } + bool include_all_ngrams() const { return GetField<uint8_t>(VT_INCLUDE_ALL_NGRAMS, 0) != 0; } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NGRAM_SIZE) && + VerifyField<int32_t>(verifier, VT_MAX_SKIP_SIZE) && + VerifyField<uint8_t>(verifier, VT_INCLUDE_ALL_NGRAMS) && verifier.EndTable(); + } +}; + +struct SkipGramOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_ngram_size(int32_t ngram_size) + { + fbb_.AddElement<int32_t>(SkipGramOptions::VT_NGRAM_SIZE, ngram_size, 0); + } + void add_max_skip_size(int32_t max_skip_size) + { + fbb_.AddElement<int32_t>(SkipGramOptions::VT_MAX_SKIP_SIZE, max_skip_size, 0); + } + void add_include_all_ngrams(bool include_all_ngrams) + { + fbb_.AddElement<uint8_t>(SkipGramOptions::VT_INCLUDE_ALL_NGRAMS, + static_cast<uint8_t>(include_all_ngrams), 0); + } + explicit SkipGramOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SkipGramOptionsBuilder &operator=(const SkipGramOptionsBuilder &); + flatbuffers::Offset<SkipGramOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SkipGramOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SkipGramOptions> +CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t ngram_size = 0, + int32_t max_skip_size = 0, bool include_all_ngrams = false) +{ + SkipGramOptionsBuilder builder_(_fbb); + builder_.add_max_skip_size(max_skip_size); + builder_.add_ngram_size(ngram_size); + builder_.add_include_all_ngrams(include_all_ngrams); + return builder_.Finish(); +} + +struct SpaceToDepthOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_BLOCK_SIZE = 4 + }; + int32_t block_size() const { return GetField<int32_t>(VT_BLOCK_SIZE, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_BLOCK_SIZE) && + verifier.EndTable(); + } +}; + +struct SpaceToDepthOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_block_size(int32_t block_size) + { + fbb_.AddElement<int32_t>(SpaceToDepthOptions::VT_BLOCK_SIZE, block_size, 0); + } + explicit SpaceToDepthOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SpaceToDepthOptionsBuilder &operator=(const SpaceToDepthOptionsBuilder &); + flatbuffers::Offset<SpaceToDepthOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SpaceToDepthOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SpaceToDepthOptions> +CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t block_size = 0) +{ + SpaceToDepthOptionsBuilder builder_(_fbb); + builder_.add_block_size(block_size); + return builder_.Finish(); +} + +struct DepthToSpaceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_BLOCK_SIZE = 4 + }; + int32_t block_size() const { return GetField<int32_t>(VT_BLOCK_SIZE, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_BLOCK_SIZE) && + verifier.EndTable(); + } +}; + +struct DepthToSpaceOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_block_size(int32_t block_size) + { + fbb_.AddElement<int32_t>(DepthToSpaceOptions::VT_BLOCK_SIZE, block_size, 0); + } + explicit DepthToSpaceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + DepthToSpaceOptionsBuilder &operator=(const DepthToSpaceOptionsBuilder &); + flatbuffers::Offset<DepthToSpaceOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<DepthToSpaceOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<DepthToSpaceOptions> +CreateDepthToSpaceOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t block_size = 0) +{ + DepthToSpaceOptionsBuilder builder_(_fbb); + builder_.add_block_size(block_size); + return builder_.Finish(); +} + +struct SubOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FUSED_ACTIVATION_FUNCTION = 4 + }; + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable(); + } +}; + +struct SubOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(SubOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + explicit SubOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SubOptionsBuilder &operator=(const SubOptionsBuilder &); + flatbuffers::Offset<SubOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SubOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SubOptions> +CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) +{ + SubOptionsBuilder builder_(_fbb); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct DivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_FUSED_ACTIVATION_FUNCTION = 4 + }; + ActivationFunctionType fused_activation_function() const + { + return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && + VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable(); + } +}; + +struct DivOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(ActivationFunctionType fused_activation_function) + { + fbb_.AddElement<int8_t>(DivOptions::VT_FUSED_ACTIVATION_FUNCTION, + static_cast<int8_t>(fused_activation_function), 0); + } + explicit DivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + DivOptionsBuilder &operator=(const DivOptionsBuilder &); + flatbuffers::Offset<DivOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<DivOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<DivOptions> +CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb, + ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE) +{ + DivOptionsBuilder builder_(_fbb); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +struct TopKV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct TopKV2OptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit TopKV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + TopKV2OptionsBuilder &operator=(const TopKV2OptionsBuilder &); + flatbuffers::Offset<TopKV2Options> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<TopKV2Options>(end); + return o; + } +}; + +inline flatbuffers::Offset<TopKV2Options> CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb) +{ + TopKV2OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct EmbeddingLookupSparseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_COMBINER = 4 + }; + CombinerType combiner() const + { + return static_cast<CombinerType>(GetField<int8_t>(VT_COMBINER, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_COMBINER) && + verifier.EndTable(); + } +}; + +struct EmbeddingLookupSparseOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_combiner(CombinerType combiner) + { + fbb_.AddElement<int8_t>(EmbeddingLookupSparseOptions::VT_COMBINER, + static_cast<int8_t>(combiner), 0); + } + explicit EmbeddingLookupSparseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + EmbeddingLookupSparseOptionsBuilder &operator=(const EmbeddingLookupSparseOptionsBuilder &); + flatbuffers::Offset<EmbeddingLookupSparseOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<EmbeddingLookupSparseOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<EmbeddingLookupSparseOptions> +CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb, + CombinerType combiner = CombinerType_SUM) +{ + EmbeddingLookupSparseOptionsBuilder builder_(_fbb); + builder_.add_combiner(combiner); + return builder_.Finish(); +} + +struct GatherOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_AXIS = 4 + }; + int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_AXIS) && + verifier.EndTable(); + } +}; + +struct GatherOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(GatherOptions::VT_AXIS, axis, 0); } + explicit GatherOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + GatherOptionsBuilder &operator=(const GatherOptionsBuilder &); + flatbuffers::Offset<GatherOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<GatherOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<GatherOptions> CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0) +{ + GatherOptionsBuilder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +struct TransposeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct TransposeOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit TransposeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + TransposeOptionsBuilder &operator=(const TransposeOptionsBuilder &); + flatbuffers::Offset<TransposeOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<TransposeOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<TransposeOptions> +CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + TransposeOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ExpOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct ExpOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ExpOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ExpOptionsBuilder &operator=(const ExpOptionsBuilder &); + flatbuffers::Offset<ExpOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ExpOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ExpOptions> CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + ExpOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct CosOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct CosOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit CosOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + CosOptionsBuilder &operator=(const CosOptionsBuilder &); + flatbuffers::Offset<CosOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<CosOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<CosOptions> CreateCosOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + CosOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ReducerOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_KEEP_DIMS = 4 + }; + bool keep_dims() const { return GetField<uint8_t>(VT_KEEP_DIMS, 0) != 0; } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_KEEP_DIMS) && + verifier.EndTable(); + } +}; + +struct ReducerOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_keep_dims(bool keep_dims) + { + fbb_.AddElement<uint8_t>(ReducerOptions::VT_KEEP_DIMS, static_cast<uint8_t>(keep_dims), 0); + } + explicit ReducerOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ReducerOptionsBuilder &operator=(const ReducerOptionsBuilder &); + flatbuffers::Offset<ReducerOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ReducerOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ReducerOptions> +CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, bool keep_dims = false) +{ + ReducerOptionsBuilder builder_(_fbb); + builder_.add_keep_dims(keep_dims); + return builder_.Finish(); +} + +struct SqueezeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_SQUEEZE_DIMS = 4 + }; + const flatbuffers::Vector<int32_t> *squeeze_dims() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SQUEEZE_DIMS); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_SQUEEZE_DIMS) && + verifier.VerifyVector(squeeze_dims()) && verifier.EndTable(); + } +}; + +struct SqueezeOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_squeeze_dims(flatbuffers::Offset<flatbuffers::Vector<int32_t>> squeeze_dims) + { + fbb_.AddOffset(SqueezeOptions::VT_SQUEEZE_DIMS, squeeze_dims); + } + explicit SqueezeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SqueezeOptionsBuilder &operator=(const SqueezeOptionsBuilder &); + flatbuffers::Offset<SqueezeOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SqueezeOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SqueezeOptions> +CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> squeeze_dims = 0) +{ + SqueezeOptionsBuilder builder_(_fbb); + builder_.add_squeeze_dims(squeeze_dims); + return builder_.Finish(); +} + +inline flatbuffers::Offset<SqueezeOptions> +CreateSqueezeOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<int32_t> *squeeze_dims = nullptr) +{ + return onert_tflite::CreateSqueezeOptions( + _fbb, squeeze_dims ? _fbb.CreateVector<int32_t>(*squeeze_dims) : 0); +} + +struct SplitOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_NUM_SPLITS = 4 + }; + int32_t num_splits() const { return GetField<int32_t>(VT_NUM_SPLITS, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM_SPLITS) && + verifier.EndTable(); + } +}; + +struct SplitOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_num_splits(int32_t num_splits) + { + fbb_.AddElement<int32_t>(SplitOptions::VT_NUM_SPLITS, num_splits, 0); + } + explicit SplitOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SplitOptionsBuilder &operator=(const SplitOptionsBuilder &); + flatbuffers::Offset<SplitOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SplitOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SplitOptions> CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb, + int32_t num_splits = 0) +{ + SplitOptionsBuilder builder_(_fbb); + builder_.add_num_splits(num_splits); + return builder_.Finish(); +} + +struct SplitVOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_NUM_SPLITS = 4 + }; + int32_t num_splits() const { return GetField<int32_t>(VT_NUM_SPLITS, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM_SPLITS) && + verifier.EndTable(); + } +}; + +struct SplitVOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_num_splits(int32_t num_splits) + { + fbb_.AddElement<int32_t>(SplitVOptions::VT_NUM_SPLITS, num_splits, 0); + } + explicit SplitVOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SplitVOptionsBuilder &operator=(const SplitVOptionsBuilder &); + flatbuffers::Offset<SplitVOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SplitVOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SplitVOptions> CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb, + int32_t num_splits = 0) +{ + SplitVOptionsBuilder builder_(_fbb); + builder_.add_num_splits(num_splits); + return builder_.Finish(); +} + +struct StridedSliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_BEGIN_MASK = 4, + VT_END_MASK = 6, + VT_ELLIPSIS_MASK = 8, + VT_NEW_AXIS_MASK = 10, + VT_SHRINK_AXIS_MASK = 12 + }; + int32_t begin_mask() const { return GetField<int32_t>(VT_BEGIN_MASK, 0); } + int32_t end_mask() const { return GetField<int32_t>(VT_END_MASK, 0); } + int32_t ellipsis_mask() const { return GetField<int32_t>(VT_ELLIPSIS_MASK, 0); } + int32_t new_axis_mask() const { return GetField<int32_t>(VT_NEW_AXIS_MASK, 0); } + int32_t shrink_axis_mask() const { return GetField<int32_t>(VT_SHRINK_AXIS_MASK, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_BEGIN_MASK) && + VerifyField<int32_t>(verifier, VT_END_MASK) && + VerifyField<int32_t>(verifier, VT_ELLIPSIS_MASK) && + VerifyField<int32_t>(verifier, VT_NEW_AXIS_MASK) && + VerifyField<int32_t>(verifier, VT_SHRINK_AXIS_MASK) && verifier.EndTable(); + } +}; + +struct StridedSliceOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_begin_mask(int32_t begin_mask) + { + fbb_.AddElement<int32_t>(StridedSliceOptions::VT_BEGIN_MASK, begin_mask, 0); + } + void add_end_mask(int32_t end_mask) + { + fbb_.AddElement<int32_t>(StridedSliceOptions::VT_END_MASK, end_mask, 0); + } + void add_ellipsis_mask(int32_t ellipsis_mask) + { + fbb_.AddElement<int32_t>(StridedSliceOptions::VT_ELLIPSIS_MASK, ellipsis_mask, 0); + } + void add_new_axis_mask(int32_t new_axis_mask) + { + fbb_.AddElement<int32_t>(StridedSliceOptions::VT_NEW_AXIS_MASK, new_axis_mask, 0); + } + void add_shrink_axis_mask(int32_t shrink_axis_mask) + { + fbb_.AddElement<int32_t>(StridedSliceOptions::VT_SHRINK_AXIS_MASK, shrink_axis_mask, 0); + } + explicit StridedSliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + StridedSliceOptionsBuilder &operator=(const StridedSliceOptionsBuilder &); + flatbuffers::Offset<StridedSliceOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<StridedSliceOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<StridedSliceOptions> +CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t begin_mask = 0, + int32_t end_mask = 0, int32_t ellipsis_mask = 0, + int32_t new_axis_mask = 0, int32_t shrink_axis_mask = 0) +{ + StridedSliceOptionsBuilder builder_(_fbb); + builder_.add_shrink_axis_mask(shrink_axis_mask); + builder_.add_new_axis_mask(new_axis_mask); + builder_.add_ellipsis_mask(ellipsis_mask); + builder_.add_end_mask(end_mask); + builder_.add_begin_mask(begin_mask); + return builder_.Finish(); +} + +struct LogSoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct LogSoftmaxOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogSoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + LogSoftmaxOptionsBuilder &operator=(const LogSoftmaxOptionsBuilder &); + flatbuffers::Offset<LogSoftmaxOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<LogSoftmaxOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<LogSoftmaxOptions> +CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + LogSoftmaxOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct CastOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_IN_DATA_TYPE = 4, + VT_OUT_DATA_TYPE = 6 + }; + TensorType in_data_type() const + { + return static_cast<TensorType>(GetField<int8_t>(VT_IN_DATA_TYPE, 0)); + } + TensorType out_data_type() const + { + return static_cast<TensorType>(GetField<int8_t>(VT_OUT_DATA_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_IN_DATA_TYPE) && + VerifyField<int8_t>(verifier, VT_OUT_DATA_TYPE) && verifier.EndTable(); + } +}; + +struct CastOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_in_data_type(TensorType in_data_type) + { + fbb_.AddElement<int8_t>(CastOptions::VT_IN_DATA_TYPE, static_cast<int8_t>(in_data_type), 0); + } + void add_out_data_type(TensorType out_data_type) + { + fbb_.AddElement<int8_t>(CastOptions::VT_OUT_DATA_TYPE, static_cast<int8_t>(out_data_type), 0); + } + explicit CastOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + CastOptionsBuilder &operator=(const CastOptionsBuilder &); + flatbuffers::Offset<CastOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<CastOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<CastOptions> +CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb, + TensorType in_data_type = TensorType_FLOAT32, + TensorType out_data_type = TensorType_FLOAT32) +{ + CastOptionsBuilder builder_(_fbb); + builder_.add_out_data_type(out_data_type); + builder_.add_in_data_type(in_data_type); + return builder_.Finish(); +} + +struct DequantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct DequantizeOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit DequantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + DequantizeOptionsBuilder &operator=(const DequantizeOptionsBuilder &); + flatbuffers::Offset<DequantizeOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<DequantizeOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<DequantizeOptions> +CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + DequantizeOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct MaximumMinimumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct MaximumMinimumOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit MaximumMinimumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + MaximumMinimumOptionsBuilder &operator=(const MaximumMinimumOptionsBuilder &); + flatbuffers::Offset<MaximumMinimumOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<MaximumMinimumOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<MaximumMinimumOptions> +CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + MaximumMinimumOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct TileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct TileOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit TileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + TileOptionsBuilder &operator=(const TileOptionsBuilder &); + flatbuffers::Offset<TileOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<TileOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<TileOptions> CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + TileOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ArgMaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_OUTPUT_TYPE = 4 + }; + TensorType output_type() const + { + return static_cast<TensorType>(GetField<int8_t>(VT_OUTPUT_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_OUTPUT_TYPE) && + verifier.EndTable(); + } +}; + +struct ArgMaxOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_output_type(TensorType output_type) + { + fbb_.AddElement<int8_t>(ArgMaxOptions::VT_OUTPUT_TYPE, static_cast<int8_t>(output_type), 0); + } + explicit ArgMaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ArgMaxOptionsBuilder &operator=(const ArgMaxOptionsBuilder &); + flatbuffers::Offset<ArgMaxOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ArgMaxOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ArgMaxOptions> +CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, + TensorType output_type = TensorType_FLOAT32) +{ + ArgMaxOptionsBuilder builder_(_fbb); + builder_.add_output_type(output_type); + return builder_.Finish(); +} + +struct ArgMinOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_OUTPUT_TYPE = 4 + }; + TensorType output_type() const + { + return static_cast<TensorType>(GetField<int8_t>(VT_OUTPUT_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_OUTPUT_TYPE) && + verifier.EndTable(); + } +}; + +struct ArgMinOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_output_type(TensorType output_type) + { + fbb_.AddElement<int8_t>(ArgMinOptions::VT_OUTPUT_TYPE, static_cast<int8_t>(output_type), 0); + } + explicit ArgMinOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ArgMinOptionsBuilder &operator=(const ArgMinOptionsBuilder &); + flatbuffers::Offset<ArgMinOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ArgMinOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ArgMinOptions> +CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb, + TensorType output_type = TensorType_FLOAT32) +{ + ArgMinOptionsBuilder builder_(_fbb); + builder_.add_output_type(output_type); + return builder_.Finish(); +} + +struct GreaterOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct GreaterOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit GreaterOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + GreaterOptionsBuilder &operator=(const GreaterOptionsBuilder &); + flatbuffers::Offset<GreaterOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<GreaterOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<GreaterOptions> +CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + GreaterOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct GreaterEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct GreaterEqualOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit GreaterEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + GreaterEqualOptionsBuilder &operator=(const GreaterEqualOptionsBuilder &); + flatbuffers::Offset<GreaterEqualOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<GreaterEqualOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<GreaterEqualOptions> +CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + GreaterEqualOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LessOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct LessOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LessOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + LessOptionsBuilder &operator=(const LessOptionsBuilder &); + flatbuffers::Offset<LessOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<LessOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<LessOptions> CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + LessOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LessEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct LessEqualOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LessEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + LessEqualOptionsBuilder &operator=(const LessEqualOptionsBuilder &); + flatbuffers::Offset<LessEqualOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<LessEqualOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<LessEqualOptions> +CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + LessEqualOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct NegOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct NegOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit NegOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + NegOptionsBuilder &operator=(const NegOptionsBuilder &); + flatbuffers::Offset<NegOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<NegOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<NegOptions> CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + NegOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SelectOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct SelectOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SelectOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SelectOptionsBuilder &operator=(const SelectOptionsBuilder &); + flatbuffers::Offset<SelectOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SelectOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SelectOptions> CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + SelectOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct SliceOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SliceOptionsBuilder &operator=(const SliceOptionsBuilder &); + flatbuffers::Offset<SliceOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SliceOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SliceOptions> CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + SliceOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct TransposeConvOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_PADDING = 4, + VT_STRIDE_W = 6, + VT_STRIDE_H = 8 + }; + Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); } + int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); } + int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) && + VerifyField<int32_t>(verifier, VT_STRIDE_W) && + VerifyField<int32_t>(verifier, VT_STRIDE_H) && verifier.EndTable(); + } +}; + +struct TransposeConvOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_padding(Padding padding) + { + fbb_.AddElement<int8_t>(TransposeConvOptions::VT_PADDING, static_cast<int8_t>(padding), 0); + } + void add_stride_w(int32_t stride_w) + { + fbb_.AddElement<int32_t>(TransposeConvOptions::VT_STRIDE_W, stride_w, 0); + } + void add_stride_h(int32_t stride_h) + { + fbb_.AddElement<int32_t>(TransposeConvOptions::VT_STRIDE_H, stride_h, 0); + } + explicit TransposeConvOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + TransposeConvOptionsBuilder &operator=(const TransposeConvOptionsBuilder &); + flatbuffers::Offset<TransposeConvOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<TransposeConvOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<TransposeConvOptions> +CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME, + int32_t stride_w = 0, int32_t stride_h = 0) +{ + TransposeConvOptionsBuilder builder_(_fbb); + builder_.add_stride_h(stride_h); + builder_.add_stride_w(stride_w); + builder_.add_padding(padding); + return builder_.Finish(); +} + +struct ExpandDimsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct ExpandDimsOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ExpandDimsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ExpandDimsOptionsBuilder &operator=(const ExpandDimsOptionsBuilder &); + flatbuffers::Offset<ExpandDimsOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ExpandDimsOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ExpandDimsOptions> +CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + ExpandDimsOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SparseToDenseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_VALIDATE_INDICES = 4 + }; + bool validate_indices() const { return GetField<uint8_t>(VT_VALIDATE_INDICES, 0) != 0; } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_VALIDATE_INDICES) && + verifier.EndTable(); + } +}; + +struct SparseToDenseOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_validate_indices(bool validate_indices) + { + fbb_.AddElement<uint8_t>(SparseToDenseOptions::VT_VALIDATE_INDICES, + static_cast<uint8_t>(validate_indices), 0); + } + explicit SparseToDenseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SparseToDenseOptionsBuilder &operator=(const SparseToDenseOptionsBuilder &); + flatbuffers::Offset<SparseToDenseOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SparseToDenseOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SparseToDenseOptions> +CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, bool validate_indices = false) +{ + SparseToDenseOptionsBuilder builder_(_fbb); + builder_.add_validate_indices(validate_indices); + return builder_.Finish(); +} + +struct EqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct EqualOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit EqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + EqualOptionsBuilder &operator=(const EqualOptionsBuilder &); + flatbuffers::Offset<EqualOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<EqualOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<EqualOptions> CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + EqualOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct NotEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct NotEqualOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit NotEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + NotEqualOptionsBuilder &operator=(const NotEqualOptionsBuilder &); + flatbuffers::Offset<NotEqualOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<NotEqualOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<NotEqualOptions> +CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + NotEqualOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ShapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_OUT_TYPE = 4 + }; + TensorType out_type() const { return static_cast<TensorType>(GetField<int8_t>(VT_OUT_TYPE, 0)); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_OUT_TYPE) && + verifier.EndTable(); + } +}; + +struct ShapeOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_out_type(TensorType out_type) + { + fbb_.AddElement<int8_t>(ShapeOptions::VT_OUT_TYPE, static_cast<int8_t>(out_type), 0); + } + explicit ShapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ShapeOptionsBuilder &operator=(const ShapeOptionsBuilder &); + flatbuffers::Offset<ShapeOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ShapeOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ShapeOptions> +CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, TensorType out_type = TensorType_FLOAT32) +{ + ShapeOptionsBuilder builder_(_fbb); + builder_.add_out_type(out_type); + return builder_.Finish(); +} + +struct RankOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct RankOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit RankOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + RankOptionsBuilder &operator=(const RankOptionsBuilder &); + flatbuffers::Offset<RankOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<RankOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<RankOptions> CreateRankOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + RankOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct PowOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct PowOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit PowOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + PowOptionsBuilder &operator=(const PowOptionsBuilder &); + flatbuffers::Offset<PowOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<PowOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<PowOptions> CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + PowOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct FakeQuantOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_MIN = 4, + VT_MAX = 6, + VT_NUM_BITS = 8, + VT_NARROW_RANGE = 10 + }; + float min() const { return GetField<float>(VT_MIN, 0.0f); } + float max() const { return GetField<float>(VT_MAX, 0.0f); } + int32_t num_bits() const { return GetField<int32_t>(VT_NUM_BITS, 0); } + bool narrow_range() const { return GetField<uint8_t>(VT_NARROW_RANGE, 0) != 0; } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_MIN) && + VerifyField<float>(verifier, VT_MAX) && VerifyField<int32_t>(verifier, VT_NUM_BITS) && + VerifyField<uint8_t>(verifier, VT_NARROW_RANGE) && verifier.EndTable(); + } +}; + +struct FakeQuantOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_min(float min) { fbb_.AddElement<float>(FakeQuantOptions::VT_MIN, min, 0.0f); } + void add_max(float max) { fbb_.AddElement<float>(FakeQuantOptions::VT_MAX, max, 0.0f); } + void add_num_bits(int32_t num_bits) + { + fbb_.AddElement<int32_t>(FakeQuantOptions::VT_NUM_BITS, num_bits, 0); + } + void add_narrow_range(bool narrow_range) + { + fbb_.AddElement<uint8_t>(FakeQuantOptions::VT_NARROW_RANGE, static_cast<uint8_t>(narrow_range), + 0); + } + explicit FakeQuantOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + FakeQuantOptionsBuilder &operator=(const FakeQuantOptionsBuilder &); + flatbuffers::Offset<FakeQuantOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<FakeQuantOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<FakeQuantOptions> +CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, float min = 0.0f, float max = 0.0f, + int32_t num_bits = 0, bool narrow_range = false) +{ + FakeQuantOptionsBuilder builder_(_fbb); + builder_.add_num_bits(num_bits); + builder_.add_max(max); + builder_.add_min(min); + builder_.add_narrow_range(narrow_range); + return builder_.Finish(); +} + +struct PackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_VALUES_COUNT = 4, + VT_AXIS = 6 + }; + int32_t values_count() const { return GetField<int32_t>(VT_VALUES_COUNT, 0); } + int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_VALUES_COUNT) && + VerifyField<int32_t>(verifier, VT_AXIS) && verifier.EndTable(); + } +}; + +struct PackOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_values_count(int32_t values_count) + { + fbb_.AddElement<int32_t>(PackOptions::VT_VALUES_COUNT, values_count, 0); + } + void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(PackOptions::VT_AXIS, axis, 0); } + explicit PackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + PackOptionsBuilder &operator=(const PackOptionsBuilder &); + flatbuffers::Offset<PackOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<PackOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<PackOptions> +CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t values_count = 0, int32_t axis = 0) +{ + PackOptionsBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_values_count(values_count); + return builder_.Finish(); +} + +struct LogicalOrOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct LogicalOrOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogicalOrOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + LogicalOrOptionsBuilder &operator=(const LogicalOrOptionsBuilder &); + flatbuffers::Offset<LogicalOrOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<LogicalOrOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<LogicalOrOptions> +CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + LogicalOrOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct OneHotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_AXIS = 4 + }; + int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_AXIS) && + verifier.EndTable(); + } +}; + +struct OneHotOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(OneHotOptions::VT_AXIS, axis, 0); } + explicit OneHotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + OneHotOptionsBuilder &operator=(const OneHotOptionsBuilder &); + flatbuffers::Offset<OneHotOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<OneHotOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<OneHotOptions> CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0) +{ + OneHotOptionsBuilder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +struct AbsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct AbsOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit AbsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + AbsOptionsBuilder &operator=(const AbsOptionsBuilder &); + flatbuffers::Offset<AbsOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<AbsOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<AbsOptions> CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + AbsOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct HardSwishOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct HardSwishOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit HardSwishOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + HardSwishOptionsBuilder &operator=(const HardSwishOptionsBuilder &); + flatbuffers::Offset<HardSwishOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<HardSwishOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<HardSwishOptions> +CreateHardSwishOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + HardSwishOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LogicalAndOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct LogicalAndOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogicalAndOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + LogicalAndOptionsBuilder &operator=(const LogicalAndOptionsBuilder &); + flatbuffers::Offset<LogicalAndOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<LogicalAndOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<LogicalAndOptions> +CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + LogicalAndOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LogicalNotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct LogicalNotOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogicalNotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + LogicalNotOptionsBuilder &operator=(const LogicalNotOptionsBuilder &); + flatbuffers::Offset<LogicalNotOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<LogicalNotOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<LogicalNotOptions> +CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + LogicalNotOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct UnpackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_NUM = 4, + VT_AXIS = 6 + }; + int32_t num() const { return GetField<int32_t>(VT_NUM, 0); } + int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM) && + VerifyField<int32_t>(verifier, VT_AXIS) && verifier.EndTable(); + } +}; + +struct UnpackOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_num(int32_t num) { fbb_.AddElement<int32_t>(UnpackOptions::VT_NUM, num, 0); } + void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(UnpackOptions::VT_AXIS, axis, 0); } + explicit UnpackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + UnpackOptionsBuilder &operator=(const UnpackOptionsBuilder &); + flatbuffers::Offset<UnpackOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<UnpackOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<UnpackOptions> CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb, + int32_t num = 0, int32_t axis = 0) +{ + UnpackOptionsBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_num(num); + return builder_.Finish(); +} + +struct FloorDivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct FloorDivOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FloorDivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + FloorDivOptionsBuilder &operator=(const FloorDivOptionsBuilder &); + flatbuffers::Offset<FloorDivOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<FloorDivOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<FloorDivOptions> +CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + FloorDivOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SquareOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct SquareOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SquareOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SquareOptionsBuilder &operator=(const SquareOptionsBuilder &); + flatbuffers::Offset<SquareOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SquareOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SquareOptions> CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + SquareOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ZerosLikeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct ZerosLikeOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ZerosLikeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ZerosLikeOptionsBuilder &operator=(const ZerosLikeOptionsBuilder &); + flatbuffers::Offset<ZerosLikeOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ZerosLikeOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ZerosLikeOptions> +CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + ZerosLikeOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct FillOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct FillOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FillOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + FillOptionsBuilder &operator=(const FillOptionsBuilder &); + flatbuffers::Offset<FillOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<FillOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<FillOptions> CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + FillOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct FloorModOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct FloorModOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FloorModOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + FloorModOptionsBuilder &operator=(const FloorModOptionsBuilder &); + flatbuffers::Offset<FloorModOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<FloorModOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<FloorModOptions> +CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + FloorModOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct RangeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct RangeOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit RangeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + RangeOptionsBuilder &operator=(const RangeOptionsBuilder &); + flatbuffers::Offset<RangeOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<RangeOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<RangeOptions> CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + RangeOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct LeakyReluOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_ALPHA = 4 + }; + float alpha() const { return GetField<float>(VT_ALPHA, 0.0f); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_ALPHA) && + verifier.EndTable(); + } +}; + +struct LeakyReluOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_alpha(float alpha) { fbb_.AddElement<float>(LeakyReluOptions::VT_ALPHA, alpha, 0.0f); } + explicit LeakyReluOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + LeakyReluOptionsBuilder &operator=(const LeakyReluOptionsBuilder &); + flatbuffers::Offset<LeakyReluOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<LeakyReluOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<LeakyReluOptions> +CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, float alpha = 0.0f) +{ + LeakyReluOptionsBuilder builder_(_fbb); + builder_.add_alpha(alpha); + return builder_.Finish(); +} + +struct SquaredDifferenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct SquaredDifferenceOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SquaredDifferenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SquaredDifferenceOptionsBuilder &operator=(const SquaredDifferenceOptionsBuilder &); + flatbuffers::Offset<SquaredDifferenceOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SquaredDifferenceOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SquaredDifferenceOptions> +CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + SquaredDifferenceOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct MirrorPadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_MODE = 4 + }; + MirrorPadMode mode() const { return static_cast<MirrorPadMode>(GetField<int8_t>(VT_MODE, 0)); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_MODE) && + verifier.EndTable(); + } +}; + +struct MirrorPadOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_mode(MirrorPadMode mode) + { + fbb_.AddElement<int8_t>(MirrorPadOptions::VT_MODE, static_cast<int8_t>(mode), 0); + } + explicit MirrorPadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + MirrorPadOptionsBuilder &operator=(const MirrorPadOptionsBuilder &); + flatbuffers::Offset<MirrorPadOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<MirrorPadOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<MirrorPadOptions> +CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb, + MirrorPadMode mode = MirrorPadMode_REFLECT) +{ + MirrorPadOptionsBuilder builder_(_fbb); + builder_.add_mode(mode); + return builder_.Finish(); +} + +struct UniqueOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_IDX_OUT_TYPE = 4 + }; + TensorType idx_out_type() const + { + return static_cast<TensorType>(GetField<int8_t>(VT_IDX_OUT_TYPE, 2)); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_IDX_OUT_TYPE) && + verifier.EndTable(); + } +}; + +struct UniqueOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_idx_out_type(TensorType idx_out_type) + { + fbb_.AddElement<int8_t>(UniqueOptions::VT_IDX_OUT_TYPE, static_cast<int8_t>(idx_out_type), 2); + } + explicit UniqueOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + UniqueOptionsBuilder &operator=(const UniqueOptionsBuilder &); + flatbuffers::Offset<UniqueOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<UniqueOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<UniqueOptions> +CreateUniqueOptions(flatbuffers::FlatBufferBuilder &_fbb, + TensorType idx_out_type = TensorType_INT32) +{ + UniqueOptionsBuilder builder_(_fbb); + builder_.add_idx_out_type(idx_out_type); + return builder_.Finish(); +} + +struct ReverseV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct ReverseV2OptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ReverseV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ReverseV2OptionsBuilder &operator=(const ReverseV2OptionsBuilder &); + flatbuffers::Offset<ReverseV2Options> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ReverseV2Options>(end); + return o; + } +}; + +inline flatbuffers::Offset<ReverseV2Options> +CreateReverseV2Options(flatbuffers::FlatBufferBuilder &_fbb) +{ + ReverseV2OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct AddNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct AddNOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit AddNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + AddNOptionsBuilder &operator=(const AddNOptionsBuilder &); + flatbuffers::Offset<AddNOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<AddNOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<AddNOptions> CreateAddNOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + AddNOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct GatherNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct GatherNdOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit GatherNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + GatherNdOptionsBuilder &operator=(const GatherNdOptionsBuilder &); + flatbuffers::Offset<GatherNdOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<GatherNdOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<GatherNdOptions> +CreateGatherNdOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + GatherNdOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct WhereOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct WhereOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit WhereOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + WhereOptionsBuilder &operator=(const WhereOptionsBuilder &); + flatbuffers::Offset<WhereOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<WhereOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<WhereOptions> CreateWhereOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + WhereOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ReverseSequenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_SEQ_DIM = 4, + VT_BATCH_DIM = 6 + }; + int32_t seq_dim() const { return GetField<int32_t>(VT_SEQ_DIM, 0); } + int32_t batch_dim() const { return GetField<int32_t>(VT_BATCH_DIM, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_SEQ_DIM) && + VerifyField<int32_t>(verifier, VT_BATCH_DIM) && verifier.EndTable(); + } +}; + +struct ReverseSequenceOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_seq_dim(int32_t seq_dim) + { + fbb_.AddElement<int32_t>(ReverseSequenceOptions::VT_SEQ_DIM, seq_dim, 0); + } + void add_batch_dim(int32_t batch_dim) + { + fbb_.AddElement<int32_t>(ReverseSequenceOptions::VT_BATCH_DIM, batch_dim, 0); + } + explicit ReverseSequenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ReverseSequenceOptionsBuilder &operator=(const ReverseSequenceOptionsBuilder &); + flatbuffers::Offset<ReverseSequenceOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ReverseSequenceOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ReverseSequenceOptions> +CreateReverseSequenceOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t seq_dim = 0, + int32_t batch_dim = 0) +{ + ReverseSequenceOptionsBuilder builder_(_fbb); + builder_.add_batch_dim(batch_dim); + builder_.add_seq_dim(seq_dim); + return builder_.Finish(); +} + +struct MatrixDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct MatrixDiagOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit MatrixDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + MatrixDiagOptionsBuilder &operator=(const MatrixDiagOptionsBuilder &); + flatbuffers::Offset<MatrixDiagOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<MatrixDiagOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<MatrixDiagOptions> +CreateMatrixDiagOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + MatrixDiagOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct QuantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct QuantizeOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit QuantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + QuantizeOptionsBuilder &operator=(const QuantizeOptionsBuilder &); + flatbuffers::Offset<QuantizeOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<QuantizeOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<QuantizeOptions> +CreateQuantizeOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + QuantizeOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct MatrixSetDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct MatrixSetDiagOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit MatrixSetDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + MatrixSetDiagOptionsBuilder &operator=(const MatrixSetDiagOptionsBuilder &); + flatbuffers::Offset<MatrixSetDiagOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<MatrixSetDiagOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<MatrixSetDiagOptions> +CreateMatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + MatrixSetDiagOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct IfOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_THEN_SUBGRAPH_INDEX = 4, + VT_ELSE_SUBGRAPH_INDEX = 6 + }; + int32_t then_subgraph_index() const { return GetField<int32_t>(VT_THEN_SUBGRAPH_INDEX, 0); } + int32_t else_subgraph_index() const { return GetField<int32_t>(VT_ELSE_SUBGRAPH_INDEX, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_THEN_SUBGRAPH_INDEX) && + VerifyField<int32_t>(verifier, VT_ELSE_SUBGRAPH_INDEX) && verifier.EndTable(); + } +}; + +struct IfOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_then_subgraph_index(int32_t then_subgraph_index) + { + fbb_.AddElement<int32_t>(IfOptions::VT_THEN_SUBGRAPH_INDEX, then_subgraph_index, 0); + } + void add_else_subgraph_index(int32_t else_subgraph_index) + { + fbb_.AddElement<int32_t>(IfOptions::VT_ELSE_SUBGRAPH_INDEX, else_subgraph_index, 0); + } + explicit IfOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + IfOptionsBuilder &operator=(const IfOptionsBuilder &); + flatbuffers::Offset<IfOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<IfOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<IfOptions> CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb, + int32_t then_subgraph_index = 0, + int32_t else_subgraph_index = 0) +{ + IfOptionsBuilder builder_(_fbb); + builder_.add_else_subgraph_index(else_subgraph_index); + builder_.add_then_subgraph_index(then_subgraph_index); + return builder_.Finish(); +} + +struct WhileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_COND_SUBGRAPH_INDEX = 4, + VT_BODY_SUBGRAPH_INDEX = 6 + }; + int32_t cond_subgraph_index() const { return GetField<int32_t>(VT_COND_SUBGRAPH_INDEX, 0); } + int32_t body_subgraph_index() const { return GetField<int32_t>(VT_BODY_SUBGRAPH_INDEX, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_COND_SUBGRAPH_INDEX) && + VerifyField<int32_t>(verifier, VT_BODY_SUBGRAPH_INDEX) && verifier.EndTable(); + } +}; + +struct WhileOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_cond_subgraph_index(int32_t cond_subgraph_index) + { + fbb_.AddElement<int32_t>(WhileOptions::VT_COND_SUBGRAPH_INDEX, cond_subgraph_index, 0); + } + void add_body_subgraph_index(int32_t body_subgraph_index) + { + fbb_.AddElement<int32_t>(WhileOptions::VT_BODY_SUBGRAPH_INDEX, body_subgraph_index, 0); + } + explicit WhileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + WhileOptionsBuilder &operator=(const WhileOptionsBuilder &); + flatbuffers::Offset<WhileOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<WhileOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<WhileOptions> CreateWhileOptions(flatbuffers::FlatBufferBuilder &_fbb, + int32_t cond_subgraph_index = 0, + int32_t body_subgraph_index = 0) +{ + WhileOptionsBuilder builder_(_fbb); + builder_.add_body_subgraph_index(body_subgraph_index); + builder_.add_cond_subgraph_index(cond_subgraph_index); + return builder_.Finish(); +} + +struct NonMaxSuppressionV4Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct NonMaxSuppressionV4OptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit NonMaxSuppressionV4OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + NonMaxSuppressionV4OptionsBuilder &operator=(const NonMaxSuppressionV4OptionsBuilder &); + flatbuffers::Offset<NonMaxSuppressionV4Options> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<NonMaxSuppressionV4Options>(end); + return o; + } +}; + +inline flatbuffers::Offset<NonMaxSuppressionV4Options> +CreateNonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &_fbb) +{ + NonMaxSuppressionV4OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct NonMaxSuppressionV5Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct NonMaxSuppressionV5OptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit NonMaxSuppressionV5OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + NonMaxSuppressionV5OptionsBuilder &operator=(const NonMaxSuppressionV5OptionsBuilder &); + flatbuffers::Offset<NonMaxSuppressionV5Options> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<NonMaxSuppressionV5Options>(end); + return o; + } +}; + +inline flatbuffers::Offset<NonMaxSuppressionV5Options> +CreateNonMaxSuppressionV5Options(flatbuffers::FlatBufferBuilder &_fbb) +{ + NonMaxSuppressionV5OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct ScatterNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct ScatterNdOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ScatterNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ScatterNdOptionsBuilder &operator=(const ScatterNdOptionsBuilder &); + flatbuffers::Offset<ScatterNdOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<ScatterNdOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<ScatterNdOptions> +CreateScatterNdOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + ScatterNdOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SelectV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct SelectV2OptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SelectV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SelectV2OptionsBuilder &operator=(const SelectV2OptionsBuilder &); + flatbuffers::Offset<SelectV2Options> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SelectV2Options>(end); + return o; + } +}; + +inline flatbuffers::Offset<SelectV2Options> +CreateSelectV2Options(flatbuffers::FlatBufferBuilder &_fbb) +{ + SelectV2OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct DensifyOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct DensifyOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit DensifyOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + DensifyOptionsBuilder &operator=(const DensifyOptionsBuilder &); + flatbuffers::Offset<DensifyOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<DensifyOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<DensifyOptions> +CreateDensifyOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + DensifyOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct SegmentSumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && verifier.EndTable(); + } +}; + +struct SegmentSumOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SegmentSumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SegmentSumOptionsBuilder &operator=(const SegmentSumOptionsBuilder &); + flatbuffers::Offset<SegmentSumOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SegmentSumOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<SegmentSumOptions> +CreateSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb) +{ + SegmentSumOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +struct BatchMatMulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_ADJOINT_LHS = 4, + VT_ADJOINT_RHS = 6 + }; + bool adjoint_lhs() const { return GetField<uint8_t>(VT_ADJOINT_LHS, 0) != 0; } + bool adjoint_rhs() const { return GetField<uint8_t>(VT_ADJOINT_RHS, 0) != 0; } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_ADJOINT_LHS) && + VerifyField<uint8_t>(verifier, VT_ADJOINT_RHS) && verifier.EndTable(); + } +}; + +struct BatchMatMulOptionsBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_adjoint_lhs(bool adjoint_lhs) + { + fbb_.AddElement<uint8_t>(BatchMatMulOptions::VT_ADJOINT_LHS, static_cast<uint8_t>(adjoint_lhs), + 0); + } + void add_adjoint_rhs(bool adjoint_rhs) + { + fbb_.AddElement<uint8_t>(BatchMatMulOptions::VT_ADJOINT_RHS, static_cast<uint8_t>(adjoint_rhs), + 0); + } + explicit BatchMatMulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + BatchMatMulOptionsBuilder &operator=(const BatchMatMulOptionsBuilder &); + flatbuffers::Offset<BatchMatMulOptions> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<BatchMatMulOptions>(end); + return o; + } +}; + +inline flatbuffers::Offset<BatchMatMulOptions> +CreateBatchMatMulOptions(flatbuffers::FlatBufferBuilder &_fbb, bool adjoint_lhs = false, + bool adjoint_rhs = false) +{ + BatchMatMulOptionsBuilder builder_(_fbb); + builder_.add_adjoint_rhs(adjoint_rhs); + builder_.add_adjoint_lhs(adjoint_lhs); + return builder_.Finish(); +} + +struct OperatorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_BUILTIN_CODE = 4, + VT_CUSTOM_CODE = 6, + VT_VERSION = 8 + }; + BuiltinOperator builtin_code() const + { + return static_cast<BuiltinOperator>(GetField<int8_t>(VT_BUILTIN_CODE, 0)); + } + const flatbuffers::String *custom_code() const + { + return GetPointer<const flatbuffers::String *>(VT_CUSTOM_CODE); + } + int32_t version() const { return GetField<int32_t>(VT_VERSION, 1); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_BUILTIN_CODE) && + VerifyOffset(verifier, VT_CUSTOM_CODE) && verifier.VerifyString(custom_code()) && + VerifyField<int32_t>(verifier, VT_VERSION) && verifier.EndTable(); + } +}; + +struct OperatorCodeBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_builtin_code(BuiltinOperator builtin_code) + { + fbb_.AddElement<int8_t>(OperatorCode::VT_BUILTIN_CODE, static_cast<int8_t>(builtin_code), 0); + } + void add_custom_code(flatbuffers::Offset<flatbuffers::String> custom_code) + { + fbb_.AddOffset(OperatorCode::VT_CUSTOM_CODE, custom_code); + } + void add_version(int32_t version) + { + fbb_.AddElement<int32_t>(OperatorCode::VT_VERSION, version, 1); + } + explicit OperatorCodeBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + OperatorCodeBuilder &operator=(const OperatorCodeBuilder &); + flatbuffers::Offset<OperatorCode> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<OperatorCode>(end); + return o; + } +}; + +inline flatbuffers::Offset<OperatorCode> +CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, + BuiltinOperator builtin_code = BuiltinOperator_ADD, + flatbuffers::Offset<flatbuffers::String> custom_code = 0, int32_t version = 1) +{ + OperatorCodeBuilder builder_(_fbb); + builder_.add_version(version); + builder_.add_custom_code(custom_code); + builder_.add_builtin_code(builtin_code); + return builder_.Finish(); +} + +inline flatbuffers::Offset<OperatorCode> +CreateOperatorCodeDirect(flatbuffers::FlatBufferBuilder &_fbb, + BuiltinOperator builtin_code = BuiltinOperator_ADD, + const char *custom_code = nullptr, int32_t version = 1) +{ + return onert_tflite::CreateOperatorCode( + _fbb, builtin_code, custom_code ? _fbb.CreateString(custom_code) : 0, version); +} + +struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_OPCODE_INDEX = 4, + VT_INPUTS = 6, + VT_OUTPUTS = 8, + VT_BUILTIN_OPTIONS_TYPE = 10, + VT_BUILTIN_OPTIONS = 12, + VT_CUSTOM_OPTIONS = 14, + VT_CUSTOM_OPTIONS_FORMAT = 16, + VT_MUTATING_VARIABLE_INPUTS = 18, + VT_INTERMEDIATES = 20 + }; + uint32_t opcode_index() const { return GetField<uint32_t>(VT_OPCODE_INDEX, 0); } + const flatbuffers::Vector<int32_t> *inputs() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUTS); + } + const flatbuffers::Vector<int32_t> *outputs() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUTS); + } + BuiltinOptions builtin_options_type() const + { + return static_cast<BuiltinOptions>(GetField<uint8_t>(VT_BUILTIN_OPTIONS_TYPE, 0)); + } + const void *builtin_options() const { return GetPointer<const void *>(VT_BUILTIN_OPTIONS); } + template <typename T> const T *builtin_options_as() const; + const Conv2DOptions *builtin_options_as_Conv2DOptions() const + { + return builtin_options_type() == BuiltinOptions_Conv2DOptions + ? static_cast<const Conv2DOptions *>(builtin_options()) + : nullptr; + } + const DepthwiseConv2DOptions *builtin_options_as_DepthwiseConv2DOptions() const + { + return builtin_options_type() == BuiltinOptions_DepthwiseConv2DOptions + ? static_cast<const DepthwiseConv2DOptions *>(builtin_options()) + : nullptr; + } + const ConcatEmbeddingsOptions *builtin_options_as_ConcatEmbeddingsOptions() const + { + return builtin_options_type() == BuiltinOptions_ConcatEmbeddingsOptions + ? static_cast<const ConcatEmbeddingsOptions *>(builtin_options()) + : nullptr; + } + const LSHProjectionOptions *builtin_options_as_LSHProjectionOptions() const + { + return builtin_options_type() == BuiltinOptions_LSHProjectionOptions + ? static_cast<const LSHProjectionOptions *>(builtin_options()) + : nullptr; + } + const Pool2DOptions *builtin_options_as_Pool2DOptions() const + { + return builtin_options_type() == BuiltinOptions_Pool2DOptions + ? static_cast<const Pool2DOptions *>(builtin_options()) + : nullptr; + } + const SVDFOptions *builtin_options_as_SVDFOptions() const + { + return builtin_options_type() == BuiltinOptions_SVDFOptions + ? static_cast<const SVDFOptions *>(builtin_options()) + : nullptr; + } + const RNNOptions *builtin_options_as_RNNOptions() const + { + return builtin_options_type() == BuiltinOptions_RNNOptions + ? static_cast<const RNNOptions *>(builtin_options()) + : nullptr; + } + const FullyConnectedOptions *builtin_options_as_FullyConnectedOptions() const + { + return builtin_options_type() == BuiltinOptions_FullyConnectedOptions + ? static_cast<const FullyConnectedOptions *>(builtin_options()) + : nullptr; + } + const SoftmaxOptions *builtin_options_as_SoftmaxOptions() const + { + return builtin_options_type() == BuiltinOptions_SoftmaxOptions + ? static_cast<const SoftmaxOptions *>(builtin_options()) + : nullptr; + } + const ConcatenationOptions *builtin_options_as_ConcatenationOptions() const + { + return builtin_options_type() == BuiltinOptions_ConcatenationOptions + ? static_cast<const ConcatenationOptions *>(builtin_options()) + : nullptr; + } + const AddOptions *builtin_options_as_AddOptions() const + { + return builtin_options_type() == BuiltinOptions_AddOptions + ? static_cast<const AddOptions *>(builtin_options()) + : nullptr; + } + const L2NormOptions *builtin_options_as_L2NormOptions() const + { + return builtin_options_type() == BuiltinOptions_L2NormOptions + ? static_cast<const L2NormOptions *>(builtin_options()) + : nullptr; + } + const LocalResponseNormalizationOptions * + builtin_options_as_LocalResponseNormalizationOptions() const + { + return builtin_options_type() == BuiltinOptions_LocalResponseNormalizationOptions + ? static_cast<const LocalResponseNormalizationOptions *>(builtin_options()) + : nullptr; + } + const LSTMOptions *builtin_options_as_LSTMOptions() const + { + return builtin_options_type() == BuiltinOptions_LSTMOptions + ? static_cast<const LSTMOptions *>(builtin_options()) + : nullptr; + } + const ResizeBilinearOptions *builtin_options_as_ResizeBilinearOptions() const + { + return builtin_options_type() == BuiltinOptions_ResizeBilinearOptions + ? static_cast<const ResizeBilinearOptions *>(builtin_options()) + : nullptr; + } + const CallOptions *builtin_options_as_CallOptions() const + { + return builtin_options_type() == BuiltinOptions_CallOptions + ? static_cast<const CallOptions *>(builtin_options()) + : nullptr; + } + const ReshapeOptions *builtin_options_as_ReshapeOptions() const + { + return builtin_options_type() == BuiltinOptions_ReshapeOptions + ? static_cast<const ReshapeOptions *>(builtin_options()) + : nullptr; + } + const SkipGramOptions *builtin_options_as_SkipGramOptions() const + { + return builtin_options_type() == BuiltinOptions_SkipGramOptions + ? static_cast<const SkipGramOptions *>(builtin_options()) + : nullptr; + } + const SpaceToDepthOptions *builtin_options_as_SpaceToDepthOptions() const + { + return builtin_options_type() == BuiltinOptions_SpaceToDepthOptions + ? static_cast<const SpaceToDepthOptions *>(builtin_options()) + : nullptr; + } + const EmbeddingLookupSparseOptions *builtin_options_as_EmbeddingLookupSparseOptions() const + { + return builtin_options_type() == BuiltinOptions_EmbeddingLookupSparseOptions + ? static_cast<const EmbeddingLookupSparseOptions *>(builtin_options()) + : nullptr; + } + const MulOptions *builtin_options_as_MulOptions() const + { + return builtin_options_type() == BuiltinOptions_MulOptions + ? static_cast<const MulOptions *>(builtin_options()) + : nullptr; + } + const PadOptions *builtin_options_as_PadOptions() const + { + return builtin_options_type() == BuiltinOptions_PadOptions + ? static_cast<const PadOptions *>(builtin_options()) + : nullptr; + } + const GatherOptions *builtin_options_as_GatherOptions() const + { + return builtin_options_type() == BuiltinOptions_GatherOptions + ? static_cast<const GatherOptions *>(builtin_options()) + : nullptr; + } + const BatchToSpaceNDOptions *builtin_options_as_BatchToSpaceNDOptions() const + { + return builtin_options_type() == BuiltinOptions_BatchToSpaceNDOptions + ? static_cast<const BatchToSpaceNDOptions *>(builtin_options()) + : nullptr; + } + const SpaceToBatchNDOptions *builtin_options_as_SpaceToBatchNDOptions() const + { + return builtin_options_type() == BuiltinOptions_SpaceToBatchNDOptions + ? static_cast<const SpaceToBatchNDOptions *>(builtin_options()) + : nullptr; + } + const TransposeOptions *builtin_options_as_TransposeOptions() const + { + return builtin_options_type() == BuiltinOptions_TransposeOptions + ? static_cast<const TransposeOptions *>(builtin_options()) + : nullptr; + } + const ReducerOptions *builtin_options_as_ReducerOptions() const + { + return builtin_options_type() == BuiltinOptions_ReducerOptions + ? static_cast<const ReducerOptions *>(builtin_options()) + : nullptr; + } + const SubOptions *builtin_options_as_SubOptions() const + { + return builtin_options_type() == BuiltinOptions_SubOptions + ? static_cast<const SubOptions *>(builtin_options()) + : nullptr; + } + const DivOptions *builtin_options_as_DivOptions() const + { + return builtin_options_type() == BuiltinOptions_DivOptions + ? static_cast<const DivOptions *>(builtin_options()) + : nullptr; + } + const SqueezeOptions *builtin_options_as_SqueezeOptions() const + { + return builtin_options_type() == BuiltinOptions_SqueezeOptions + ? static_cast<const SqueezeOptions *>(builtin_options()) + : nullptr; + } + const SequenceRNNOptions *builtin_options_as_SequenceRNNOptions() const + { + return builtin_options_type() == BuiltinOptions_SequenceRNNOptions + ? static_cast<const SequenceRNNOptions *>(builtin_options()) + : nullptr; + } + const StridedSliceOptions *builtin_options_as_StridedSliceOptions() const + { + return builtin_options_type() == BuiltinOptions_StridedSliceOptions + ? static_cast<const StridedSliceOptions *>(builtin_options()) + : nullptr; + } + const ExpOptions *builtin_options_as_ExpOptions() const + { + return builtin_options_type() == BuiltinOptions_ExpOptions + ? static_cast<const ExpOptions *>(builtin_options()) + : nullptr; + } + const TopKV2Options *builtin_options_as_TopKV2Options() const + { + return builtin_options_type() == BuiltinOptions_TopKV2Options + ? static_cast<const TopKV2Options *>(builtin_options()) + : nullptr; + } + const SplitOptions *builtin_options_as_SplitOptions() const + { + return builtin_options_type() == BuiltinOptions_SplitOptions + ? static_cast<const SplitOptions *>(builtin_options()) + : nullptr; + } + const LogSoftmaxOptions *builtin_options_as_LogSoftmaxOptions() const + { + return builtin_options_type() == BuiltinOptions_LogSoftmaxOptions + ? static_cast<const LogSoftmaxOptions *>(builtin_options()) + : nullptr; + } + const CastOptions *builtin_options_as_CastOptions() const + { + return builtin_options_type() == BuiltinOptions_CastOptions + ? static_cast<const CastOptions *>(builtin_options()) + : nullptr; + } + const DequantizeOptions *builtin_options_as_DequantizeOptions() const + { + return builtin_options_type() == BuiltinOptions_DequantizeOptions + ? static_cast<const DequantizeOptions *>(builtin_options()) + : nullptr; + } + const MaximumMinimumOptions *builtin_options_as_MaximumMinimumOptions() const + { + return builtin_options_type() == BuiltinOptions_MaximumMinimumOptions + ? static_cast<const MaximumMinimumOptions *>(builtin_options()) + : nullptr; + } + const ArgMaxOptions *builtin_options_as_ArgMaxOptions() const + { + return builtin_options_type() == BuiltinOptions_ArgMaxOptions + ? static_cast<const ArgMaxOptions *>(builtin_options()) + : nullptr; + } + const LessOptions *builtin_options_as_LessOptions() const + { + return builtin_options_type() == BuiltinOptions_LessOptions + ? static_cast<const LessOptions *>(builtin_options()) + : nullptr; + } + const NegOptions *builtin_options_as_NegOptions() const + { + return builtin_options_type() == BuiltinOptions_NegOptions + ? static_cast<const NegOptions *>(builtin_options()) + : nullptr; + } + const PadV2Options *builtin_options_as_PadV2Options() const + { + return builtin_options_type() == BuiltinOptions_PadV2Options + ? static_cast<const PadV2Options *>(builtin_options()) + : nullptr; + } + const GreaterOptions *builtin_options_as_GreaterOptions() const + { + return builtin_options_type() == BuiltinOptions_GreaterOptions + ? static_cast<const GreaterOptions *>(builtin_options()) + : nullptr; + } + const GreaterEqualOptions *builtin_options_as_GreaterEqualOptions() const + { + return builtin_options_type() == BuiltinOptions_GreaterEqualOptions + ? static_cast<const GreaterEqualOptions *>(builtin_options()) + : nullptr; + } + const LessEqualOptions *builtin_options_as_LessEqualOptions() const + { + return builtin_options_type() == BuiltinOptions_LessEqualOptions + ? static_cast<const LessEqualOptions *>(builtin_options()) + : nullptr; + } + const SelectOptions *builtin_options_as_SelectOptions() const + { + return builtin_options_type() == BuiltinOptions_SelectOptions + ? static_cast<const SelectOptions *>(builtin_options()) + : nullptr; + } + const SliceOptions *builtin_options_as_SliceOptions() const + { + return builtin_options_type() == BuiltinOptions_SliceOptions + ? static_cast<const SliceOptions *>(builtin_options()) + : nullptr; + } + const TransposeConvOptions *builtin_options_as_TransposeConvOptions() const + { + return builtin_options_type() == BuiltinOptions_TransposeConvOptions + ? static_cast<const TransposeConvOptions *>(builtin_options()) + : nullptr; + } + const SparseToDenseOptions *builtin_options_as_SparseToDenseOptions() const + { + return builtin_options_type() == BuiltinOptions_SparseToDenseOptions + ? static_cast<const SparseToDenseOptions *>(builtin_options()) + : nullptr; + } + const TileOptions *builtin_options_as_TileOptions() const + { + return builtin_options_type() == BuiltinOptions_TileOptions + ? static_cast<const TileOptions *>(builtin_options()) + : nullptr; + } + const ExpandDimsOptions *builtin_options_as_ExpandDimsOptions() const + { + return builtin_options_type() == BuiltinOptions_ExpandDimsOptions + ? static_cast<const ExpandDimsOptions *>(builtin_options()) + : nullptr; + } + const EqualOptions *builtin_options_as_EqualOptions() const + { + return builtin_options_type() == BuiltinOptions_EqualOptions + ? static_cast<const EqualOptions *>(builtin_options()) + : nullptr; + } + const NotEqualOptions *builtin_options_as_NotEqualOptions() const + { + return builtin_options_type() == BuiltinOptions_NotEqualOptions + ? static_cast<const NotEqualOptions *>(builtin_options()) + : nullptr; + } + const ShapeOptions *builtin_options_as_ShapeOptions() const + { + return builtin_options_type() == BuiltinOptions_ShapeOptions + ? static_cast<const ShapeOptions *>(builtin_options()) + : nullptr; + } + const PowOptions *builtin_options_as_PowOptions() const + { + return builtin_options_type() == BuiltinOptions_PowOptions + ? static_cast<const PowOptions *>(builtin_options()) + : nullptr; + } + const ArgMinOptions *builtin_options_as_ArgMinOptions() const + { + return builtin_options_type() == BuiltinOptions_ArgMinOptions + ? static_cast<const ArgMinOptions *>(builtin_options()) + : nullptr; + } + const FakeQuantOptions *builtin_options_as_FakeQuantOptions() const + { + return builtin_options_type() == BuiltinOptions_FakeQuantOptions + ? static_cast<const FakeQuantOptions *>(builtin_options()) + : nullptr; + } + const PackOptions *builtin_options_as_PackOptions() const + { + return builtin_options_type() == BuiltinOptions_PackOptions + ? static_cast<const PackOptions *>(builtin_options()) + : nullptr; + } + const LogicalOrOptions *builtin_options_as_LogicalOrOptions() const + { + return builtin_options_type() == BuiltinOptions_LogicalOrOptions + ? static_cast<const LogicalOrOptions *>(builtin_options()) + : nullptr; + } + const OneHotOptions *builtin_options_as_OneHotOptions() const + { + return builtin_options_type() == BuiltinOptions_OneHotOptions + ? static_cast<const OneHotOptions *>(builtin_options()) + : nullptr; + } + const LogicalAndOptions *builtin_options_as_LogicalAndOptions() const + { + return builtin_options_type() == BuiltinOptions_LogicalAndOptions + ? static_cast<const LogicalAndOptions *>(builtin_options()) + : nullptr; + } + const LogicalNotOptions *builtin_options_as_LogicalNotOptions() const + { + return builtin_options_type() == BuiltinOptions_LogicalNotOptions + ? static_cast<const LogicalNotOptions *>(builtin_options()) + : nullptr; + } + const UnpackOptions *builtin_options_as_UnpackOptions() const + { + return builtin_options_type() == BuiltinOptions_UnpackOptions + ? static_cast<const UnpackOptions *>(builtin_options()) + : nullptr; + } + const FloorDivOptions *builtin_options_as_FloorDivOptions() const + { + return builtin_options_type() == BuiltinOptions_FloorDivOptions + ? static_cast<const FloorDivOptions *>(builtin_options()) + : nullptr; + } + const SquareOptions *builtin_options_as_SquareOptions() const + { + return builtin_options_type() == BuiltinOptions_SquareOptions + ? static_cast<const SquareOptions *>(builtin_options()) + : nullptr; + } + const ZerosLikeOptions *builtin_options_as_ZerosLikeOptions() const + { + return builtin_options_type() == BuiltinOptions_ZerosLikeOptions + ? static_cast<const ZerosLikeOptions *>(builtin_options()) + : nullptr; + } + const FillOptions *builtin_options_as_FillOptions() const + { + return builtin_options_type() == BuiltinOptions_FillOptions + ? static_cast<const FillOptions *>(builtin_options()) + : nullptr; + } + const BidirectionalSequenceLSTMOptions * + builtin_options_as_BidirectionalSequenceLSTMOptions() const + { + return builtin_options_type() == BuiltinOptions_BidirectionalSequenceLSTMOptions + ? static_cast<const BidirectionalSequenceLSTMOptions *>(builtin_options()) + : nullptr; + } + const BidirectionalSequenceRNNOptions *builtin_options_as_BidirectionalSequenceRNNOptions() const + { + return builtin_options_type() == BuiltinOptions_BidirectionalSequenceRNNOptions + ? static_cast<const BidirectionalSequenceRNNOptions *>(builtin_options()) + : nullptr; + } + const UnidirectionalSequenceLSTMOptions * + builtin_options_as_UnidirectionalSequenceLSTMOptions() const + { + return builtin_options_type() == BuiltinOptions_UnidirectionalSequenceLSTMOptions + ? static_cast<const UnidirectionalSequenceLSTMOptions *>(builtin_options()) + : nullptr; + } + const FloorModOptions *builtin_options_as_FloorModOptions() const + { + return builtin_options_type() == BuiltinOptions_FloorModOptions + ? static_cast<const FloorModOptions *>(builtin_options()) + : nullptr; + } + const RangeOptions *builtin_options_as_RangeOptions() const + { + return builtin_options_type() == BuiltinOptions_RangeOptions + ? static_cast<const RangeOptions *>(builtin_options()) + : nullptr; + } + const ResizeNearestNeighborOptions *builtin_options_as_ResizeNearestNeighborOptions() const + { + return builtin_options_type() == BuiltinOptions_ResizeNearestNeighborOptions + ? static_cast<const ResizeNearestNeighborOptions *>(builtin_options()) + : nullptr; + } + const LeakyReluOptions *builtin_options_as_LeakyReluOptions() const + { + return builtin_options_type() == BuiltinOptions_LeakyReluOptions + ? static_cast<const LeakyReluOptions *>(builtin_options()) + : nullptr; + } + const SquaredDifferenceOptions *builtin_options_as_SquaredDifferenceOptions() const + { + return builtin_options_type() == BuiltinOptions_SquaredDifferenceOptions + ? static_cast<const SquaredDifferenceOptions *>(builtin_options()) + : nullptr; + } + const MirrorPadOptions *builtin_options_as_MirrorPadOptions() const + { + return builtin_options_type() == BuiltinOptions_MirrorPadOptions + ? static_cast<const MirrorPadOptions *>(builtin_options()) + : nullptr; + } + const AbsOptions *builtin_options_as_AbsOptions() const + { + return builtin_options_type() == BuiltinOptions_AbsOptions + ? static_cast<const AbsOptions *>(builtin_options()) + : nullptr; + } + const SplitVOptions *builtin_options_as_SplitVOptions() const + { + return builtin_options_type() == BuiltinOptions_SplitVOptions + ? static_cast<const SplitVOptions *>(builtin_options()) + : nullptr; + } + const UniqueOptions *builtin_options_as_UniqueOptions() const + { + return builtin_options_type() == BuiltinOptions_UniqueOptions + ? static_cast<const UniqueOptions *>(builtin_options()) + : nullptr; + } + const ReverseV2Options *builtin_options_as_ReverseV2Options() const + { + return builtin_options_type() == BuiltinOptions_ReverseV2Options + ? static_cast<const ReverseV2Options *>(builtin_options()) + : nullptr; + } + const AddNOptions *builtin_options_as_AddNOptions() const + { + return builtin_options_type() == BuiltinOptions_AddNOptions + ? static_cast<const AddNOptions *>(builtin_options()) + : nullptr; + } + const GatherNdOptions *builtin_options_as_GatherNdOptions() const + { + return builtin_options_type() == BuiltinOptions_GatherNdOptions + ? static_cast<const GatherNdOptions *>(builtin_options()) + : nullptr; + } + const CosOptions *builtin_options_as_CosOptions() const + { + return builtin_options_type() == BuiltinOptions_CosOptions + ? static_cast<const CosOptions *>(builtin_options()) + : nullptr; + } + const WhereOptions *builtin_options_as_WhereOptions() const + { + return builtin_options_type() == BuiltinOptions_WhereOptions + ? static_cast<const WhereOptions *>(builtin_options()) + : nullptr; + } + const RankOptions *builtin_options_as_RankOptions() const + { + return builtin_options_type() == BuiltinOptions_RankOptions + ? static_cast<const RankOptions *>(builtin_options()) + : nullptr; + } + const ReverseSequenceOptions *builtin_options_as_ReverseSequenceOptions() const + { + return builtin_options_type() == BuiltinOptions_ReverseSequenceOptions + ? static_cast<const ReverseSequenceOptions *>(builtin_options()) + : nullptr; + } + const MatrixDiagOptions *builtin_options_as_MatrixDiagOptions() const + { + return builtin_options_type() == BuiltinOptions_MatrixDiagOptions + ? static_cast<const MatrixDiagOptions *>(builtin_options()) + : nullptr; + } + const QuantizeOptions *builtin_options_as_QuantizeOptions() const + { + return builtin_options_type() == BuiltinOptions_QuantizeOptions + ? static_cast<const QuantizeOptions *>(builtin_options()) + : nullptr; + } + const MatrixSetDiagOptions *builtin_options_as_MatrixSetDiagOptions() const + { + return builtin_options_type() == BuiltinOptions_MatrixSetDiagOptions + ? static_cast<const MatrixSetDiagOptions *>(builtin_options()) + : nullptr; + } + const HardSwishOptions *builtin_options_as_HardSwishOptions() const + { + return builtin_options_type() == BuiltinOptions_HardSwishOptions + ? static_cast<const HardSwishOptions *>(builtin_options()) + : nullptr; + } + const IfOptions *builtin_options_as_IfOptions() const + { + return builtin_options_type() == BuiltinOptions_IfOptions + ? static_cast<const IfOptions *>(builtin_options()) + : nullptr; + } + const WhileOptions *builtin_options_as_WhileOptions() const + { + return builtin_options_type() == BuiltinOptions_WhileOptions + ? static_cast<const WhileOptions *>(builtin_options()) + : nullptr; + } + const DepthToSpaceOptions *builtin_options_as_DepthToSpaceOptions() const + { + return builtin_options_type() == BuiltinOptions_DepthToSpaceOptions + ? static_cast<const DepthToSpaceOptions *>(builtin_options()) + : nullptr; + } + const NonMaxSuppressionV4Options *builtin_options_as_NonMaxSuppressionV4Options() const + { + return builtin_options_type() == BuiltinOptions_NonMaxSuppressionV4Options + ? static_cast<const NonMaxSuppressionV4Options *>(builtin_options()) + : nullptr; + } + const NonMaxSuppressionV5Options *builtin_options_as_NonMaxSuppressionV5Options() const + { + return builtin_options_type() == BuiltinOptions_NonMaxSuppressionV5Options + ? static_cast<const NonMaxSuppressionV5Options *>(builtin_options()) + : nullptr; + } + const ScatterNdOptions *builtin_options_as_ScatterNdOptions() const + { + return builtin_options_type() == BuiltinOptions_ScatterNdOptions + ? static_cast<const ScatterNdOptions *>(builtin_options()) + : nullptr; + } + const SelectV2Options *builtin_options_as_SelectV2Options() const + { + return builtin_options_type() == BuiltinOptions_SelectV2Options + ? static_cast<const SelectV2Options *>(builtin_options()) + : nullptr; + } + const DensifyOptions *builtin_options_as_DensifyOptions() const + { + return builtin_options_type() == BuiltinOptions_DensifyOptions + ? static_cast<const DensifyOptions *>(builtin_options()) + : nullptr; + } + const SegmentSumOptions *builtin_options_as_SegmentSumOptions() const + { + return builtin_options_type() == BuiltinOptions_SegmentSumOptions + ? static_cast<const SegmentSumOptions *>(builtin_options()) + : nullptr; + } + const BatchMatMulOptions *builtin_options_as_BatchMatMulOptions() const + { + return builtin_options_type() == BuiltinOptions_BatchMatMulOptions + ? static_cast<const BatchMatMulOptions *>(builtin_options()) + : nullptr; + } + const flatbuffers::Vector<uint8_t> *custom_options() const + { + return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM_OPTIONS); + } + CustomOptionsFormat custom_options_format() const + { + return static_cast<CustomOptionsFormat>(GetField<int8_t>(VT_CUSTOM_OPTIONS_FORMAT, 0)); + } + const flatbuffers::Vector<uint8_t> *mutating_variable_inputs() const + { + return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_MUTATING_VARIABLE_INPUTS); + } + const flatbuffers::Vector<int32_t> *intermediates() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INTERMEDIATES); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<uint32_t>(verifier, VT_OPCODE_INDEX) && + VerifyOffset(verifier, VT_INPUTS) && verifier.VerifyVector(inputs()) && + VerifyOffset(verifier, VT_OUTPUTS) && verifier.VerifyVector(outputs()) && + VerifyField<uint8_t>(verifier, VT_BUILTIN_OPTIONS_TYPE) && + VerifyOffset(verifier, VT_BUILTIN_OPTIONS) && + VerifyBuiltinOptions(verifier, builtin_options(), builtin_options_type()) && + VerifyOffset(verifier, VT_CUSTOM_OPTIONS) && verifier.VerifyVector(custom_options()) && + VerifyField<int8_t>(verifier, VT_CUSTOM_OPTIONS_FORMAT) && + VerifyOffset(verifier, VT_MUTATING_VARIABLE_INPUTS) && + verifier.VerifyVector(mutating_variable_inputs()) && + VerifyOffset(verifier, VT_INTERMEDIATES) && verifier.VerifyVector(intermediates()) && + verifier.EndTable(); + } +}; + +template <> inline const Conv2DOptions *Operator::builtin_options_as<Conv2DOptions>() const +{ + return builtin_options_as_Conv2DOptions(); +} + +template <> +inline const DepthwiseConv2DOptions *Operator::builtin_options_as<DepthwiseConv2DOptions>() const +{ + return builtin_options_as_DepthwiseConv2DOptions(); +} + +template <> +inline const ConcatEmbeddingsOptions *Operator::builtin_options_as<ConcatEmbeddingsOptions>() const +{ + return builtin_options_as_ConcatEmbeddingsOptions(); +} + +template <> +inline const LSHProjectionOptions *Operator::builtin_options_as<LSHProjectionOptions>() const +{ + return builtin_options_as_LSHProjectionOptions(); +} + +template <> inline const Pool2DOptions *Operator::builtin_options_as<Pool2DOptions>() const +{ + return builtin_options_as_Pool2DOptions(); +} + +template <> inline const SVDFOptions *Operator::builtin_options_as<SVDFOptions>() const +{ + return builtin_options_as_SVDFOptions(); +} + +template <> inline const RNNOptions *Operator::builtin_options_as<RNNOptions>() const +{ + return builtin_options_as_RNNOptions(); +} + +template <> +inline const FullyConnectedOptions *Operator::builtin_options_as<FullyConnectedOptions>() const +{ + return builtin_options_as_FullyConnectedOptions(); +} + +template <> inline const SoftmaxOptions *Operator::builtin_options_as<SoftmaxOptions>() const +{ + return builtin_options_as_SoftmaxOptions(); +} + +template <> +inline const ConcatenationOptions *Operator::builtin_options_as<ConcatenationOptions>() const +{ + return builtin_options_as_ConcatenationOptions(); +} + +template <> inline const AddOptions *Operator::builtin_options_as<AddOptions>() const +{ + return builtin_options_as_AddOptions(); +} + +template <> inline const L2NormOptions *Operator::builtin_options_as<L2NormOptions>() const +{ + return builtin_options_as_L2NormOptions(); +} + +template <> +inline const LocalResponseNormalizationOptions * +Operator::builtin_options_as<LocalResponseNormalizationOptions>() const +{ + return builtin_options_as_LocalResponseNormalizationOptions(); +} + +template <> inline const LSTMOptions *Operator::builtin_options_as<LSTMOptions>() const +{ + return builtin_options_as_LSTMOptions(); +} + +template <> +inline const ResizeBilinearOptions *Operator::builtin_options_as<ResizeBilinearOptions>() const +{ + return builtin_options_as_ResizeBilinearOptions(); +} + +template <> inline const CallOptions *Operator::builtin_options_as<CallOptions>() const +{ + return builtin_options_as_CallOptions(); +} + +template <> inline const ReshapeOptions *Operator::builtin_options_as<ReshapeOptions>() const +{ + return builtin_options_as_ReshapeOptions(); +} + +template <> inline const SkipGramOptions *Operator::builtin_options_as<SkipGramOptions>() const +{ + return builtin_options_as_SkipGramOptions(); +} + +template <> +inline const SpaceToDepthOptions *Operator::builtin_options_as<SpaceToDepthOptions>() const +{ + return builtin_options_as_SpaceToDepthOptions(); +} + +template <> +inline const EmbeddingLookupSparseOptions * +Operator::builtin_options_as<EmbeddingLookupSparseOptions>() const +{ + return builtin_options_as_EmbeddingLookupSparseOptions(); +} + +template <> inline const MulOptions *Operator::builtin_options_as<MulOptions>() const +{ + return builtin_options_as_MulOptions(); +} + +template <> inline const PadOptions *Operator::builtin_options_as<PadOptions>() const +{ + return builtin_options_as_PadOptions(); +} + +template <> inline const GatherOptions *Operator::builtin_options_as<GatherOptions>() const +{ + return builtin_options_as_GatherOptions(); +} + +template <> +inline const BatchToSpaceNDOptions *Operator::builtin_options_as<BatchToSpaceNDOptions>() const +{ + return builtin_options_as_BatchToSpaceNDOptions(); +} + +template <> +inline const SpaceToBatchNDOptions *Operator::builtin_options_as<SpaceToBatchNDOptions>() const +{ + return builtin_options_as_SpaceToBatchNDOptions(); +} + +template <> inline const TransposeOptions *Operator::builtin_options_as<TransposeOptions>() const +{ + return builtin_options_as_TransposeOptions(); +} + +template <> inline const ReducerOptions *Operator::builtin_options_as<ReducerOptions>() const +{ + return builtin_options_as_ReducerOptions(); +} + +template <> inline const SubOptions *Operator::builtin_options_as<SubOptions>() const +{ + return builtin_options_as_SubOptions(); +} + +template <> inline const DivOptions *Operator::builtin_options_as<DivOptions>() const +{ + return builtin_options_as_DivOptions(); +} + +template <> inline const SqueezeOptions *Operator::builtin_options_as<SqueezeOptions>() const +{ + return builtin_options_as_SqueezeOptions(); +} + +template <> +inline const SequenceRNNOptions *Operator::builtin_options_as<SequenceRNNOptions>() const +{ + return builtin_options_as_SequenceRNNOptions(); +} + +template <> +inline const StridedSliceOptions *Operator::builtin_options_as<StridedSliceOptions>() const +{ + return builtin_options_as_StridedSliceOptions(); +} + +template <> inline const ExpOptions *Operator::builtin_options_as<ExpOptions>() const +{ + return builtin_options_as_ExpOptions(); +} + +template <> inline const TopKV2Options *Operator::builtin_options_as<TopKV2Options>() const +{ + return builtin_options_as_TopKV2Options(); +} + +template <> inline const SplitOptions *Operator::builtin_options_as<SplitOptions>() const +{ + return builtin_options_as_SplitOptions(); +} + +template <> inline const LogSoftmaxOptions *Operator::builtin_options_as<LogSoftmaxOptions>() const +{ + return builtin_options_as_LogSoftmaxOptions(); +} + +template <> inline const CastOptions *Operator::builtin_options_as<CastOptions>() const +{ + return builtin_options_as_CastOptions(); +} + +template <> inline const DequantizeOptions *Operator::builtin_options_as<DequantizeOptions>() const +{ + return builtin_options_as_DequantizeOptions(); +} + +template <> +inline const MaximumMinimumOptions *Operator::builtin_options_as<MaximumMinimumOptions>() const +{ + return builtin_options_as_MaximumMinimumOptions(); +} + +template <> inline const ArgMaxOptions *Operator::builtin_options_as<ArgMaxOptions>() const +{ + return builtin_options_as_ArgMaxOptions(); +} + +template <> inline const LessOptions *Operator::builtin_options_as<LessOptions>() const +{ + return builtin_options_as_LessOptions(); +} + +template <> inline const NegOptions *Operator::builtin_options_as<NegOptions>() const +{ + return builtin_options_as_NegOptions(); +} + +template <> inline const PadV2Options *Operator::builtin_options_as<PadV2Options>() const +{ + return builtin_options_as_PadV2Options(); +} + +template <> inline const GreaterOptions *Operator::builtin_options_as<GreaterOptions>() const +{ + return builtin_options_as_GreaterOptions(); +} + +template <> +inline const GreaterEqualOptions *Operator::builtin_options_as<GreaterEqualOptions>() const +{ + return builtin_options_as_GreaterEqualOptions(); +} + +template <> inline const LessEqualOptions *Operator::builtin_options_as<LessEqualOptions>() const +{ + return builtin_options_as_LessEqualOptions(); +} + +template <> inline const SelectOptions *Operator::builtin_options_as<SelectOptions>() const +{ + return builtin_options_as_SelectOptions(); +} + +template <> inline const SliceOptions *Operator::builtin_options_as<SliceOptions>() const +{ + return builtin_options_as_SliceOptions(); +} + +template <> +inline const TransposeConvOptions *Operator::builtin_options_as<TransposeConvOptions>() const +{ + return builtin_options_as_TransposeConvOptions(); +} + +template <> +inline const SparseToDenseOptions *Operator::builtin_options_as<SparseToDenseOptions>() const +{ + return builtin_options_as_SparseToDenseOptions(); +} + +template <> inline const TileOptions *Operator::builtin_options_as<TileOptions>() const +{ + return builtin_options_as_TileOptions(); +} + +template <> inline const ExpandDimsOptions *Operator::builtin_options_as<ExpandDimsOptions>() const +{ + return builtin_options_as_ExpandDimsOptions(); +} + +template <> inline const EqualOptions *Operator::builtin_options_as<EqualOptions>() const +{ + return builtin_options_as_EqualOptions(); +} + +template <> inline const NotEqualOptions *Operator::builtin_options_as<NotEqualOptions>() const +{ + return builtin_options_as_NotEqualOptions(); +} + +template <> inline const ShapeOptions *Operator::builtin_options_as<ShapeOptions>() const +{ + return builtin_options_as_ShapeOptions(); +} + +template <> inline const PowOptions *Operator::builtin_options_as<PowOptions>() const +{ + return builtin_options_as_PowOptions(); +} + +template <> inline const ArgMinOptions *Operator::builtin_options_as<ArgMinOptions>() const +{ + return builtin_options_as_ArgMinOptions(); +} + +template <> inline const FakeQuantOptions *Operator::builtin_options_as<FakeQuantOptions>() const +{ + return builtin_options_as_FakeQuantOptions(); +} + +template <> inline const PackOptions *Operator::builtin_options_as<PackOptions>() const +{ + return builtin_options_as_PackOptions(); +} + +template <> inline const LogicalOrOptions *Operator::builtin_options_as<LogicalOrOptions>() const +{ + return builtin_options_as_LogicalOrOptions(); +} + +template <> inline const OneHotOptions *Operator::builtin_options_as<OneHotOptions>() const +{ + return builtin_options_as_OneHotOptions(); +} + +template <> inline const LogicalAndOptions *Operator::builtin_options_as<LogicalAndOptions>() const +{ + return builtin_options_as_LogicalAndOptions(); +} + +template <> inline const LogicalNotOptions *Operator::builtin_options_as<LogicalNotOptions>() const +{ + return builtin_options_as_LogicalNotOptions(); +} + +template <> inline const UnpackOptions *Operator::builtin_options_as<UnpackOptions>() const +{ + return builtin_options_as_UnpackOptions(); +} + +template <> inline const FloorDivOptions *Operator::builtin_options_as<FloorDivOptions>() const +{ + return builtin_options_as_FloorDivOptions(); +} + +template <> inline const SquareOptions *Operator::builtin_options_as<SquareOptions>() const +{ + return builtin_options_as_SquareOptions(); +} + +template <> inline const ZerosLikeOptions *Operator::builtin_options_as<ZerosLikeOptions>() const +{ + return builtin_options_as_ZerosLikeOptions(); +} + +template <> inline const FillOptions *Operator::builtin_options_as<FillOptions>() const +{ + return builtin_options_as_FillOptions(); +} + +template <> +inline const BidirectionalSequenceLSTMOptions * +Operator::builtin_options_as<BidirectionalSequenceLSTMOptions>() const +{ + return builtin_options_as_BidirectionalSequenceLSTMOptions(); +} + +template <> +inline const BidirectionalSequenceRNNOptions * +Operator::builtin_options_as<BidirectionalSequenceRNNOptions>() const +{ + return builtin_options_as_BidirectionalSequenceRNNOptions(); +} + +template <> +inline const UnidirectionalSequenceLSTMOptions * +Operator::builtin_options_as<UnidirectionalSequenceLSTMOptions>() const +{ + return builtin_options_as_UnidirectionalSequenceLSTMOptions(); +} + +template <> inline const FloorModOptions *Operator::builtin_options_as<FloorModOptions>() const +{ + return builtin_options_as_FloorModOptions(); +} + +template <> inline const RangeOptions *Operator::builtin_options_as<RangeOptions>() const +{ + return builtin_options_as_RangeOptions(); +} + +template <> +inline const ResizeNearestNeighborOptions * +Operator::builtin_options_as<ResizeNearestNeighborOptions>() const +{ + return builtin_options_as_ResizeNearestNeighborOptions(); +} + +template <> inline const LeakyReluOptions *Operator::builtin_options_as<LeakyReluOptions>() const +{ + return builtin_options_as_LeakyReluOptions(); +} + +template <> +inline const SquaredDifferenceOptions * +Operator::builtin_options_as<SquaredDifferenceOptions>() const +{ + return builtin_options_as_SquaredDifferenceOptions(); +} + +template <> inline const MirrorPadOptions *Operator::builtin_options_as<MirrorPadOptions>() const +{ + return builtin_options_as_MirrorPadOptions(); +} + +template <> inline const AbsOptions *Operator::builtin_options_as<AbsOptions>() const +{ + return builtin_options_as_AbsOptions(); +} + +template <> inline const SplitVOptions *Operator::builtin_options_as<SplitVOptions>() const +{ + return builtin_options_as_SplitVOptions(); +} + +template <> inline const UniqueOptions *Operator::builtin_options_as<UniqueOptions>() const +{ + return builtin_options_as_UniqueOptions(); +} + +template <> inline const ReverseV2Options *Operator::builtin_options_as<ReverseV2Options>() const +{ + return builtin_options_as_ReverseV2Options(); +} + +template <> inline const AddNOptions *Operator::builtin_options_as<AddNOptions>() const +{ + return builtin_options_as_AddNOptions(); +} + +template <> inline const GatherNdOptions *Operator::builtin_options_as<GatherNdOptions>() const +{ + return builtin_options_as_GatherNdOptions(); +} + +template <> inline const CosOptions *Operator::builtin_options_as<CosOptions>() const +{ + return builtin_options_as_CosOptions(); +} + +template <> inline const WhereOptions *Operator::builtin_options_as<WhereOptions>() const +{ + return builtin_options_as_WhereOptions(); +} + +template <> inline const RankOptions *Operator::builtin_options_as<RankOptions>() const +{ + return builtin_options_as_RankOptions(); +} + +template <> +inline const ReverseSequenceOptions *Operator::builtin_options_as<ReverseSequenceOptions>() const +{ + return builtin_options_as_ReverseSequenceOptions(); +} + +template <> inline const MatrixDiagOptions *Operator::builtin_options_as<MatrixDiagOptions>() const +{ + return builtin_options_as_MatrixDiagOptions(); +} + +template <> inline const QuantizeOptions *Operator::builtin_options_as<QuantizeOptions>() const +{ + return builtin_options_as_QuantizeOptions(); +} + +template <> +inline const MatrixSetDiagOptions *Operator::builtin_options_as<MatrixSetDiagOptions>() const +{ + return builtin_options_as_MatrixSetDiagOptions(); +} + +template <> inline const HardSwishOptions *Operator::builtin_options_as<HardSwishOptions>() const +{ + return builtin_options_as_HardSwishOptions(); +} + +template <> inline const IfOptions *Operator::builtin_options_as<IfOptions>() const +{ + return builtin_options_as_IfOptions(); +} + +template <> inline const WhileOptions *Operator::builtin_options_as<WhileOptions>() const +{ + return builtin_options_as_WhileOptions(); +} + +template <> +inline const DepthToSpaceOptions *Operator::builtin_options_as<DepthToSpaceOptions>() const +{ + return builtin_options_as_DepthToSpaceOptions(); +} + +template <> +inline const NonMaxSuppressionV4Options * +Operator::builtin_options_as<NonMaxSuppressionV4Options>() const +{ + return builtin_options_as_NonMaxSuppressionV4Options(); +} + +template <> +inline const NonMaxSuppressionV5Options * +Operator::builtin_options_as<NonMaxSuppressionV5Options>() const +{ + return builtin_options_as_NonMaxSuppressionV5Options(); +} + +template <> inline const ScatterNdOptions *Operator::builtin_options_as<ScatterNdOptions>() const +{ + return builtin_options_as_ScatterNdOptions(); +} + +template <> inline const SelectV2Options *Operator::builtin_options_as<SelectV2Options>() const +{ + return builtin_options_as_SelectV2Options(); +} + +template <> inline const DensifyOptions *Operator::builtin_options_as<DensifyOptions>() const +{ + return builtin_options_as_DensifyOptions(); +} + +template <> inline const SegmentSumOptions *Operator::builtin_options_as<SegmentSumOptions>() const +{ + return builtin_options_as_SegmentSumOptions(); +} + +template <> +inline const BatchMatMulOptions *Operator::builtin_options_as<BatchMatMulOptions>() const +{ + return builtin_options_as_BatchMatMulOptions(); +} + +struct OperatorBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_opcode_index(uint32_t opcode_index) + { + fbb_.AddElement<uint32_t>(Operator::VT_OPCODE_INDEX, opcode_index, 0); + } + void add_inputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs) + { + fbb_.AddOffset(Operator::VT_INPUTS, inputs); + } + void add_outputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs) + { + fbb_.AddOffset(Operator::VT_OUTPUTS, outputs); + } + void add_builtin_options_type(BuiltinOptions builtin_options_type) + { + fbb_.AddElement<uint8_t>(Operator::VT_BUILTIN_OPTIONS_TYPE, + static_cast<uint8_t>(builtin_options_type), 0); + } + void add_builtin_options(flatbuffers::Offset<void> builtin_options) + { + fbb_.AddOffset(Operator::VT_BUILTIN_OPTIONS, builtin_options); + } + void add_custom_options(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom_options) + { + fbb_.AddOffset(Operator::VT_CUSTOM_OPTIONS, custom_options); + } + void add_custom_options_format(CustomOptionsFormat custom_options_format) + { + fbb_.AddElement<int8_t>(Operator::VT_CUSTOM_OPTIONS_FORMAT, + static_cast<int8_t>(custom_options_format), 0); + } + void add_mutating_variable_inputs( + flatbuffers::Offset<flatbuffers::Vector<uint8_t>> mutating_variable_inputs) + { + fbb_.AddOffset(Operator::VT_MUTATING_VARIABLE_INPUTS, mutating_variable_inputs); + } + void add_intermediates(flatbuffers::Offset<flatbuffers::Vector<int32_t>> intermediates) + { + fbb_.AddOffset(Operator::VT_INTERMEDIATES, intermediates); + } + explicit OperatorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + OperatorBuilder &operator=(const OperatorBuilder &); + flatbuffers::Offset<Operator> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<Operator>(end); + return o; + } +}; + +inline flatbuffers::Offset<Operator> +CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, uint32_t opcode_index = 0, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs = 0, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs = 0, + BuiltinOptions builtin_options_type = BuiltinOptions_NONE, + flatbuffers::Offset<void> builtin_options = 0, + flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom_options = 0, + CustomOptionsFormat custom_options_format = CustomOptionsFormat_FLEXBUFFERS, + flatbuffers::Offset<flatbuffers::Vector<uint8_t>> mutating_variable_inputs = 0, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> intermediates = 0) +{ + OperatorBuilder builder_(_fbb); + builder_.add_intermediates(intermediates); + builder_.add_mutating_variable_inputs(mutating_variable_inputs); + builder_.add_custom_options(custom_options); + builder_.add_builtin_options(builtin_options); + builder_.add_outputs(outputs); + builder_.add_inputs(inputs); + builder_.add_opcode_index(opcode_index); + builder_.add_custom_options_format(custom_options_format); + builder_.add_builtin_options_type(builtin_options_type); + return builder_.Finish(); +} + +inline flatbuffers::Offset<Operator> +CreateOperatorDirect(flatbuffers::FlatBufferBuilder &_fbb, uint32_t opcode_index = 0, + const std::vector<int32_t> *inputs = nullptr, + const std::vector<int32_t> *outputs = nullptr, + BuiltinOptions builtin_options_type = BuiltinOptions_NONE, + flatbuffers::Offset<void> builtin_options = 0, + const std::vector<uint8_t> *custom_options = nullptr, + CustomOptionsFormat custom_options_format = CustomOptionsFormat_FLEXBUFFERS, + const std::vector<uint8_t> *mutating_variable_inputs = nullptr, + const std::vector<int32_t> *intermediates = nullptr) +{ + return onert_tflite::CreateOperator( + _fbb, opcode_index, inputs ? _fbb.CreateVector<int32_t>(*inputs) : 0, + outputs ? _fbb.CreateVector<int32_t>(*outputs) : 0, builtin_options_type, builtin_options, + custom_options ? _fbb.CreateVector<uint8_t>(*custom_options) : 0, custom_options_format, + mutating_variable_inputs ? _fbb.CreateVector<uint8_t>(*mutating_variable_inputs) : 0, + intermediates ? _fbb.CreateVector<int32_t>(*intermediates) : 0); +} + +struct SubGraph FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_TENSORS = 4, + VT_INPUTS = 6, + VT_OUTPUTS = 8, + VT_OPERATORS = 10, + VT_NAME = 12 + }; + const flatbuffers::Vector<flatbuffers::Offset<Tensor>> *tensors() const + { + return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Tensor>> *>(VT_TENSORS); + } + const flatbuffers::Vector<int32_t> *inputs() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUTS); + } + const flatbuffers::Vector<int32_t> *outputs() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUTS); + } + const flatbuffers::Vector<flatbuffers::Offset<Operator>> *operators() const + { + return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Operator>> *>(VT_OPERATORS); + } + const flatbuffers::String *name() const + { + return GetPointer<const flatbuffers::String *>(VT_NAME); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_TENSORS) && + verifier.VerifyVector(tensors()) && verifier.VerifyVectorOfTables(tensors()) && + VerifyOffset(verifier, VT_INPUTS) && verifier.VerifyVector(inputs()) && + VerifyOffset(verifier, VT_OUTPUTS) && verifier.VerifyVector(outputs()) && + VerifyOffset(verifier, VT_OPERATORS) && verifier.VerifyVector(operators()) && + verifier.VerifyVectorOfTables(operators()) && VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && verifier.EndTable(); + } +}; + +struct SubGraphBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_tensors(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Tensor>>> tensors) + { + fbb_.AddOffset(SubGraph::VT_TENSORS, tensors); + } + void add_inputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs) + { + fbb_.AddOffset(SubGraph::VT_INPUTS, inputs); + } + void add_outputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs) + { + fbb_.AddOffset(SubGraph::VT_OUTPUTS, outputs); + } + void + add_operators(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Operator>>> operators) + { + fbb_.AddOffset(SubGraph::VT_OPERATORS, operators); + } + void add_name(flatbuffers::Offset<flatbuffers::String> name) + { + fbb_.AddOffset(SubGraph::VT_NAME, name); + } + explicit SubGraphBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + SubGraphBuilder &operator=(const SubGraphBuilder &); + flatbuffers::Offset<SubGraph> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<SubGraph>(end); + return o; + } +}; + +inline flatbuffers::Offset<SubGraph> CreateSubGraph( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Tensor>>> tensors = 0, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs = 0, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs = 0, + flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Operator>>> operators = 0, + flatbuffers::Offset<flatbuffers::String> name = 0) +{ + SubGraphBuilder builder_(_fbb); + builder_.add_name(name); + builder_.add_operators(operators); + builder_.add_outputs(outputs); + builder_.add_inputs(inputs); + builder_.add_tensors(tensors); + return builder_.Finish(); +} + +inline flatbuffers::Offset<SubGraph> +CreateSubGraphDirect(flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<flatbuffers::Offset<Tensor>> *tensors = nullptr, + const std::vector<int32_t> *inputs = nullptr, + const std::vector<int32_t> *outputs = nullptr, + const std::vector<flatbuffers::Offset<Operator>> *operators = nullptr, + const char *name = nullptr) +{ + return onert_tflite::CreateSubGraph( + _fbb, tensors ? _fbb.CreateVector<flatbuffers::Offset<Tensor>>(*tensors) : 0, + inputs ? _fbb.CreateVector<int32_t>(*inputs) : 0, + outputs ? _fbb.CreateVector<int32_t>(*outputs) : 0, + operators ? _fbb.CreateVector<flatbuffers::Offset<Operator>>(*operators) : 0, + name ? _fbb.CreateString(name) : 0); +} + +struct Buffer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_DATA = 4 + }; + const flatbuffers::Vector<uint8_t> *data() const + { + return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_DATA); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_DATA) && + verifier.VerifyVector(data()) && verifier.EndTable(); + } +}; + +struct BufferBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_data(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data) + { + fbb_.AddOffset(Buffer::VT_DATA, data); + } + explicit BufferBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + BufferBuilder &operator=(const BufferBuilder &); + flatbuffers::Offset<Buffer> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<Buffer>(end); + return o; + } +}; + +inline flatbuffers::Offset<Buffer> +CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data = 0) +{ + BufferBuilder builder_(_fbb); + builder_.add_data(data); + return builder_.Finish(); +} + +inline flatbuffers::Offset<Buffer> CreateBufferDirect(flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<uint8_t> *data = nullptr) +{ + return onert_tflite::CreateBuffer(_fbb, data ? _fbb.CreateVector<uint8_t>(*data) : 0); +} + +struct Metadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_NAME = 4, + VT_BUFFER = 6 + }; + const flatbuffers::String *name() const + { + return GetPointer<const flatbuffers::String *>(VT_NAME); + } + uint32_t buffer() const { return GetField<uint32_t>(VT_BUFFER, 0); } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && VerifyField<uint32_t>(verifier, VT_BUFFER) && + verifier.EndTable(); + } +}; + +struct MetadataBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_name(flatbuffers::Offset<flatbuffers::String> name) + { + fbb_.AddOffset(Metadata::VT_NAME, name); + } + void add_buffer(uint32_t buffer) { fbb_.AddElement<uint32_t>(Metadata::VT_BUFFER, buffer, 0); } + explicit MetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + MetadataBuilder &operator=(const MetadataBuilder &); + flatbuffers::Offset<Metadata> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<Metadata>(end); + return o; + } +}; + +inline flatbuffers::Offset<Metadata> +CreateMetadata(flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset<flatbuffers::String> name = 0, uint32_t buffer = 0) +{ + MetadataBuilder builder_(_fbb); + builder_.add_buffer(buffer); + builder_.add_name(name); + return builder_.Finish(); +} + +inline flatbuffers::Offset<Metadata> CreateMetadataDirect(flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + uint32_t buffer = 0) +{ + return onert_tflite::CreateMetadata(_fbb, name ? _fbb.CreateString(name) : 0, buffer); +} + +struct Model FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table +{ + enum + { + VT_VERSION = 4, + VT_OPERATOR_CODES = 6, + VT_SUBGRAPHS = 8, + VT_DESCRIPTION = 10, + VT_BUFFERS = 12, + VT_METADATA_BUFFER = 14, + VT_METADATA = 16 + }; + uint32_t version() const { return GetField<uint32_t>(VT_VERSION, 0); } + const flatbuffers::Vector<flatbuffers::Offset<OperatorCode>> *operator_codes() const + { + return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<OperatorCode>> *>( + VT_OPERATOR_CODES); + } + const flatbuffers::Vector<flatbuffers::Offset<SubGraph>> *subgraphs() const + { + return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<SubGraph>> *>(VT_SUBGRAPHS); + } + const flatbuffers::String *description() const + { + return GetPointer<const flatbuffers::String *>(VT_DESCRIPTION); + } + const flatbuffers::Vector<flatbuffers::Offset<Buffer>> *buffers() const + { + return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Buffer>> *>(VT_BUFFERS); + } + const flatbuffers::Vector<int32_t> *metadata_buffer() const + { + return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_METADATA_BUFFER); + } + const flatbuffers::Vector<flatbuffers::Offset<Metadata>> *metadata() const + { + return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Metadata>> *>(VT_METADATA); + } + bool Verify(flatbuffers::Verifier &verifier) const + { + return VerifyTableStart(verifier) && VerifyField<uint32_t>(verifier, VT_VERSION) && + VerifyOffset(verifier, VT_OPERATOR_CODES) && verifier.VerifyVector(operator_codes()) && + verifier.VerifyVectorOfTables(operator_codes()) && + VerifyOffset(verifier, VT_SUBGRAPHS) && verifier.VerifyVector(subgraphs()) && + verifier.VerifyVectorOfTables(subgraphs()) && VerifyOffset(verifier, VT_DESCRIPTION) && + verifier.VerifyString(description()) && VerifyOffset(verifier, VT_BUFFERS) && + verifier.VerifyVector(buffers()) && verifier.VerifyVectorOfTables(buffers()) && + VerifyOffset(verifier, VT_METADATA_BUFFER) && verifier.VerifyVector(metadata_buffer()) && + VerifyOffset(verifier, VT_METADATA) && verifier.VerifyVector(metadata()) && + verifier.VerifyVectorOfTables(metadata()) && verifier.EndTable(); + } +}; + +struct ModelBuilder +{ + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_version(uint32_t version) { fbb_.AddElement<uint32_t>(Model::VT_VERSION, version, 0); } + void add_operator_codes( + flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>> operator_codes) + { + fbb_.AddOffset(Model::VT_OPERATOR_CODES, operator_codes); + } + void + add_subgraphs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<SubGraph>>> subgraphs) + { + fbb_.AddOffset(Model::VT_SUBGRAPHS, subgraphs); + } + void add_description(flatbuffers::Offset<flatbuffers::String> description) + { + fbb_.AddOffset(Model::VT_DESCRIPTION, description); + } + void add_buffers(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>> buffers) + { + fbb_.AddOffset(Model::VT_BUFFERS, buffers); + } + void add_metadata_buffer(flatbuffers::Offset<flatbuffers::Vector<int32_t>> metadata_buffer) + { + fbb_.AddOffset(Model::VT_METADATA_BUFFER, metadata_buffer); + } + void + add_metadata(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Metadata>>> metadata) + { + fbb_.AddOffset(Model::VT_METADATA, metadata); + } + explicit ModelBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) + { + start_ = fbb_.StartTable(); + } + ModelBuilder &operator=(const ModelBuilder &); + flatbuffers::Offset<Model> Finish() + { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset<Model>(end); + return o; + } +}; + +inline flatbuffers::Offset<Model> CreateModel( + flatbuffers::FlatBufferBuilder &_fbb, uint32_t version = 0, + flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>> operator_codes = 0, + flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<SubGraph>>> subgraphs = 0, + flatbuffers::Offset<flatbuffers::String> description = 0, + flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>> buffers = 0, + flatbuffers::Offset<flatbuffers::Vector<int32_t>> metadata_buffer = 0, + flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Metadata>>> metadata = 0) +{ + ModelBuilder builder_(_fbb); + builder_.add_metadata(metadata); + builder_.add_metadata_buffer(metadata_buffer); + builder_.add_buffers(buffers); + builder_.add_description(description); + builder_.add_subgraphs(subgraphs); + builder_.add_operator_codes(operator_codes); + builder_.add_version(version); + return builder_.Finish(); +} + +inline flatbuffers::Offset<Model> +CreateModelDirect(flatbuffers::FlatBufferBuilder &_fbb, uint32_t version = 0, + const std::vector<flatbuffers::Offset<OperatorCode>> *operator_codes = nullptr, + const std::vector<flatbuffers::Offset<SubGraph>> *subgraphs = nullptr, + const char *description = nullptr, + const std::vector<flatbuffers::Offset<Buffer>> *buffers = nullptr, + const std::vector<int32_t> *metadata_buffer = nullptr, + const std::vector<flatbuffers::Offset<Metadata>> *metadata = nullptr) +{ + return onert_tflite::CreateModel( + _fbb, version, + operator_codes ? _fbb.CreateVector<flatbuffers::Offset<OperatorCode>>(*operator_codes) : 0, + subgraphs ? _fbb.CreateVector<flatbuffers::Offset<SubGraph>>(*subgraphs) : 0, + description ? _fbb.CreateString(description) : 0, + buffers ? _fbb.CreateVector<flatbuffers::Offset<Buffer>>(*buffers) : 0, + metadata_buffer ? _fbb.CreateVector<int32_t>(*metadata_buffer) : 0, + metadata ? _fbb.CreateVector<flatbuffers::Offset<Metadata>>(*metadata) : 0); +} + +inline bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj, + QuantizationDetails type) +{ + switch (type) + { + case QuantizationDetails_NONE: + { + return true; + } + case QuantizationDetails_CustomQuantization: + { + auto ptr = reinterpret_cast<const CustomQuantization *>(obj); + return verifier.VerifyTable(ptr); + } + default: + return false; + } +} + +inline bool +VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier, + const flatbuffers::Vector<flatbuffers::Offset<void>> *values, + const flatbuffers::Vector<uint8_t> *types) +{ + if (!values || !types) + return !values && !types; + if (values->size() != types->size()) + return false; + for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) + { + if (!VerifyQuantizationDetails(verifier, values->Get(i), + types->GetEnum<QuantizationDetails>(i))) + { + return false; + } + } + return true; +} + +inline bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj, + SparseIndexVector type) +{ + switch (type) + { + case SparseIndexVector_NONE: + { + return true; + } + case SparseIndexVector_Int32Vector: + { + auto ptr = reinterpret_cast<const Int32Vector *>(obj); + return verifier.VerifyTable(ptr); + } + case SparseIndexVector_Uint16Vector: + { + auto ptr = reinterpret_cast<const Uint16Vector *>(obj); + return verifier.VerifyTable(ptr); + } + case SparseIndexVector_Uint8Vector: + { + auto ptr = reinterpret_cast<const Uint8Vector *>(obj); + return verifier.VerifyTable(ptr); + } + default: + return false; + } +} + +inline bool +VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier, + const flatbuffers::Vector<flatbuffers::Offset<void>> *values, + const flatbuffers::Vector<uint8_t> *types) +{ + if (!values || !types) + return !values && !types; + if (values->size() != types->size()) + return false; + for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) + { + if (!VerifySparseIndexVector(verifier, values->Get(i), types->GetEnum<SparseIndexVector>(i))) + { + return false; + } + } + return true; +} + +inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, + BuiltinOptions type) +{ + switch (type) + { + case BuiltinOptions_NONE: + { + return true; + } + case BuiltinOptions_Conv2DOptions: + { + auto ptr = reinterpret_cast<const Conv2DOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DepthwiseConv2DOptions: + { + auto ptr = reinterpret_cast<const DepthwiseConv2DOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ConcatEmbeddingsOptions: + { + auto ptr = reinterpret_cast<const ConcatEmbeddingsOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LSHProjectionOptions: + { + auto ptr = reinterpret_cast<const LSHProjectionOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_Pool2DOptions: + { + auto ptr = reinterpret_cast<const Pool2DOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SVDFOptions: + { + auto ptr = reinterpret_cast<const SVDFOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_RNNOptions: + { + auto ptr = reinterpret_cast<const RNNOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FullyConnectedOptions: + { + auto ptr = reinterpret_cast<const FullyConnectedOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SoftmaxOptions: + { + auto ptr = reinterpret_cast<const SoftmaxOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ConcatenationOptions: + { + auto ptr = reinterpret_cast<const ConcatenationOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_AddOptions: + { + auto ptr = reinterpret_cast<const AddOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_L2NormOptions: + { + auto ptr = reinterpret_cast<const L2NormOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LocalResponseNormalizationOptions: + { + auto ptr = reinterpret_cast<const LocalResponseNormalizationOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LSTMOptions: + { + auto ptr = reinterpret_cast<const LSTMOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ResizeBilinearOptions: + { + auto ptr = reinterpret_cast<const ResizeBilinearOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_CallOptions: + { + auto ptr = reinterpret_cast<const CallOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReshapeOptions: + { + auto ptr = reinterpret_cast<const ReshapeOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SkipGramOptions: + { + auto ptr = reinterpret_cast<const SkipGramOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SpaceToDepthOptions: + { + auto ptr = reinterpret_cast<const SpaceToDepthOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_EmbeddingLookupSparseOptions: + { + auto ptr = reinterpret_cast<const EmbeddingLookupSparseOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MulOptions: + { + auto ptr = reinterpret_cast<const MulOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_PadOptions: + { + auto ptr = reinterpret_cast<const PadOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GatherOptions: + { + auto ptr = reinterpret_cast<const GatherOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_BatchToSpaceNDOptions: + { + auto ptr = reinterpret_cast<const BatchToSpaceNDOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SpaceToBatchNDOptions: + { + auto ptr = reinterpret_cast<const SpaceToBatchNDOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_TransposeOptions: + { + auto ptr = reinterpret_cast<const TransposeOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReducerOptions: + { + auto ptr = reinterpret_cast<const ReducerOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SubOptions: + { + auto ptr = reinterpret_cast<const SubOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DivOptions: + { + auto ptr = reinterpret_cast<const DivOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SqueezeOptions: + { + auto ptr = reinterpret_cast<const SqueezeOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SequenceRNNOptions: + { + auto ptr = reinterpret_cast<const SequenceRNNOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_StridedSliceOptions: + { + auto ptr = reinterpret_cast<const StridedSliceOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ExpOptions: + { + auto ptr = reinterpret_cast<const ExpOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_TopKV2Options: + { + auto ptr = reinterpret_cast<const TopKV2Options *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SplitOptions: + { + auto ptr = reinterpret_cast<const SplitOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LogSoftmaxOptions: + { + auto ptr = reinterpret_cast<const LogSoftmaxOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_CastOptions: + { + auto ptr = reinterpret_cast<const CastOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DequantizeOptions: + { + auto ptr = reinterpret_cast<const DequantizeOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MaximumMinimumOptions: + { + auto ptr = reinterpret_cast<const MaximumMinimumOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ArgMaxOptions: + { + auto ptr = reinterpret_cast<const ArgMaxOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LessOptions: + { + auto ptr = reinterpret_cast<const LessOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_NegOptions: + { + auto ptr = reinterpret_cast<const NegOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_PadV2Options: + { + auto ptr = reinterpret_cast<const PadV2Options *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GreaterOptions: + { + auto ptr = reinterpret_cast<const GreaterOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GreaterEqualOptions: + { + auto ptr = reinterpret_cast<const GreaterEqualOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LessEqualOptions: + { + auto ptr = reinterpret_cast<const LessEqualOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SelectOptions: + { + auto ptr = reinterpret_cast<const SelectOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SliceOptions: + { + auto ptr = reinterpret_cast<const SliceOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_TransposeConvOptions: + { + auto ptr = reinterpret_cast<const TransposeConvOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SparseToDenseOptions: + { + auto ptr = reinterpret_cast<const SparseToDenseOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_TileOptions: + { + auto ptr = reinterpret_cast<const TileOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ExpandDimsOptions: + { + auto ptr = reinterpret_cast<const ExpandDimsOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_EqualOptions: + { + auto ptr = reinterpret_cast<const EqualOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_NotEqualOptions: + { + auto ptr = reinterpret_cast<const NotEqualOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ShapeOptions: + { + auto ptr = reinterpret_cast<const ShapeOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_PowOptions: + { + auto ptr = reinterpret_cast<const PowOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ArgMinOptions: + { + auto ptr = reinterpret_cast<const ArgMinOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FakeQuantOptions: + { + auto ptr = reinterpret_cast<const FakeQuantOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_PackOptions: + { + auto ptr = reinterpret_cast<const PackOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LogicalOrOptions: + { + auto ptr = reinterpret_cast<const LogicalOrOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_OneHotOptions: + { + auto ptr = reinterpret_cast<const OneHotOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LogicalAndOptions: + { + auto ptr = reinterpret_cast<const LogicalAndOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LogicalNotOptions: + { + auto ptr = reinterpret_cast<const LogicalNotOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_UnpackOptions: + { + auto ptr = reinterpret_cast<const UnpackOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FloorDivOptions: + { + auto ptr = reinterpret_cast<const FloorDivOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SquareOptions: + { + auto ptr = reinterpret_cast<const SquareOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ZerosLikeOptions: + { + auto ptr = reinterpret_cast<const ZerosLikeOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FillOptions: + { + auto ptr = reinterpret_cast<const FillOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_BidirectionalSequenceLSTMOptions: + { + auto ptr = reinterpret_cast<const BidirectionalSequenceLSTMOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_BidirectionalSequenceRNNOptions: + { + auto ptr = reinterpret_cast<const BidirectionalSequenceRNNOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_UnidirectionalSequenceLSTMOptions: + { + auto ptr = reinterpret_cast<const UnidirectionalSequenceLSTMOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FloorModOptions: + { + auto ptr = reinterpret_cast<const FloorModOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_RangeOptions: + { + auto ptr = reinterpret_cast<const RangeOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ResizeNearestNeighborOptions: + { + auto ptr = reinterpret_cast<const ResizeNearestNeighborOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LeakyReluOptions: + { + auto ptr = reinterpret_cast<const LeakyReluOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SquaredDifferenceOptions: + { + auto ptr = reinterpret_cast<const SquaredDifferenceOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MirrorPadOptions: + { + auto ptr = reinterpret_cast<const MirrorPadOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_AbsOptions: + { + auto ptr = reinterpret_cast<const AbsOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SplitVOptions: + { + auto ptr = reinterpret_cast<const SplitVOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_UniqueOptions: + { + auto ptr = reinterpret_cast<const UniqueOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReverseV2Options: + { + auto ptr = reinterpret_cast<const ReverseV2Options *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_AddNOptions: + { + auto ptr = reinterpret_cast<const AddNOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GatherNdOptions: + { + auto ptr = reinterpret_cast<const GatherNdOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_CosOptions: + { + auto ptr = reinterpret_cast<const CosOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_WhereOptions: + { + auto ptr = reinterpret_cast<const WhereOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_RankOptions: + { + auto ptr = reinterpret_cast<const RankOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReverseSequenceOptions: + { + auto ptr = reinterpret_cast<const ReverseSequenceOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MatrixDiagOptions: + { + auto ptr = reinterpret_cast<const MatrixDiagOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_QuantizeOptions: + { + auto ptr = reinterpret_cast<const QuantizeOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MatrixSetDiagOptions: + { + auto ptr = reinterpret_cast<const MatrixSetDiagOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_HardSwishOptions: + { + auto ptr = reinterpret_cast<const HardSwishOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_IfOptions: + { + auto ptr = reinterpret_cast<const IfOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_WhileOptions: + { + auto ptr = reinterpret_cast<const WhileOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DepthToSpaceOptions: + { + auto ptr = reinterpret_cast<const DepthToSpaceOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_NonMaxSuppressionV4Options: + { + auto ptr = reinterpret_cast<const NonMaxSuppressionV4Options *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_NonMaxSuppressionV5Options: + { + auto ptr = reinterpret_cast<const NonMaxSuppressionV5Options *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ScatterNdOptions: + { + auto ptr = reinterpret_cast<const ScatterNdOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SelectV2Options: + { + auto ptr = reinterpret_cast<const SelectV2Options *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DensifyOptions: + { + auto ptr = reinterpret_cast<const DensifyOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SegmentSumOptions: + { + auto ptr = reinterpret_cast<const SegmentSumOptions *>(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_BatchMatMulOptions: + { + auto ptr = reinterpret_cast<const BatchMatMulOptions *>(obj); + return verifier.VerifyTable(ptr); + } + default: + return false; + } +} + +inline bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier, + const flatbuffers::Vector<flatbuffers::Offset<void>> *values, + const flatbuffers::Vector<uint8_t> *types) +{ + if (!values || !types) + return !values && !types; + if (values->size() != types->size()) + return false; + for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) + { + if (!VerifyBuiltinOptions(verifier, values->Get(i), types->GetEnum<BuiltinOptions>(i))) + { + return false; + } + } + return true; +} + +inline const onert_tflite::Model *GetModel(const void *buf) +{ + return flatbuffers::GetRoot<onert_tflite::Model>(buf); +} + +inline const onert_tflite::Model *GetSizePrefixedModel(const void *buf) +{ + return flatbuffers::GetSizePrefixedRoot<onert_tflite::Model>(buf); +} + +inline const char *ModelIdentifier() { return "TFL3"; } + +inline bool ModelBufferHasIdentifier(const void *buf) +{ + return flatbuffers::BufferHasIdentifier(buf, ModelIdentifier()); +} + +inline bool VerifyModelBuffer(flatbuffers::Verifier &verifier) +{ + return verifier.VerifyBuffer<onert_tflite::Model>(ModelIdentifier()); +} + +inline bool VerifySizePrefixedModelBuffer(flatbuffers::Verifier &verifier) +{ + return verifier.VerifySizePrefixedBuffer<onert_tflite::Model>(ModelIdentifier()); +} + +inline const char *ModelExtension() { return "tflite"; } + +inline void FinishModelBuffer(flatbuffers::FlatBufferBuilder &fbb, + flatbuffers::Offset<onert_tflite::Model> root) +{ + fbb.Finish(root, ModelIdentifier()); +} + +inline void FinishSizePrefixedModelBuffer(flatbuffers::FlatBufferBuilder &fbb, + flatbuffers::Offset<onert_tflite::Model> root) +{ + fbb.FinishSizePrefixed(root, ModelIdentifier()); +} + +} // namespace onert_tflite + +#endif // FLATBUFFERS_GENERATED_TFLITESCHEMA_ONERT_TFLITE_H_ diff --git a/runtime/onert/frontend/tflite/tflite_schema-1.13.1.fbs b/runtime/onert/frontend/tflite/tflite_schema-1.13.1.fbs new file mode 100644 index 000000000..ae6b5230f --- /dev/null +++ b/runtime/onert/frontend/tflite/tflite_schema-1.13.1.fbs @@ -0,0 +1,795 @@ +// Copyright 2017 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Revision History +// Version 0: Initial version. +// Version 1: Add subgraphs to schema. +// Version 2: Rename operators to conform to NN API. +// Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers. + +// Change namespace to onert_tflite +namespace onert_tflite; + +// This corresponds to the version. +file_identifier "TFL3"; +// File extension of any written files. +file_extension "tflite"; + +// IMPORTANT: All new members of tables, enums and unions must be added at the +// end to ensure backwards compatibility. + +// The type of data stored in a tensor. +enum TensorType : byte { + FLOAT32 = 0, + FLOAT16 = 1, + INT32 = 2, + UINT8 = 3, + INT64 = 4, + STRING = 5, + BOOL = 6, + INT16 = 7, + COMPLEX64 = 8, + INT8 = 9, +} + +// Custom quantization parameters for experimenting with new quantization +// techniques. +table CustomQuantization { + custom:[ubyte] (force_align: 16); +} + +// Represents a specific quantization technique's parameters. +union QuantizationDetails { + CustomQuantization, +} + +// Parameters for converting a quantized tensor back to float. +table QuantizationParameters { + // These four parameters are the asymmetric linear quantization parameters. + // Given a quantized value q, the corresponding float value f should be: + // f = scale * (q - zero_point) + // For other quantization types, the QuantizationDetails below is used. + min:[float]; // For importing back into tensorflow. + max:[float]; // For importing back into tensorflow. + scale:[float]; // For dequantizing the tensor's values. + zero_point:[long]; + + // If this is not none, the quantization parameters above are ignored and the + // value of the QuantizationDetails union below should be used. + details:QuantizationDetails; +} + +table Tensor { + // The tensor shape. The meaning of each entry is operator-specific but + // builtin ops use: [batch size, height, width, number of channels] (That's + // Tensorflow's NHWC). + shape:[int]; + type:TensorType; + // An index that refers to the buffers table at the root of the model. Or, + // if there is no data buffer associated (i.e. intermediate results), then + // this is 0 (which refers to an always existent empty buffer). + // + // The data_buffer itself is an opaque container, with the assumption that the + // target device is little-endian. In addition, all builtin operators assume + // the memory is ordered such that if `shape` is [4, 3, 2], then index + // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k]. + buffer:uint; + name:string; // For debugging and importing back into tensorflow. + quantization:QuantizationParameters; // Optional. + + is_variable:bool = false; +} + +// A list of builtin operators. Builtin operators are slightly faster than custom +// ones, but not by much. Moreover, while custom operators accept an opaque +// object containing configuration parameters, builtins have a predetermined +// set of acceptable options. +enum BuiltinOperator : byte { + ADD = 0, + AVERAGE_POOL_2D = 1, + CONCATENATION = 2, + CONV_2D = 3, + DEPTHWISE_CONV_2D = 4, + // DEPTH_TO_SPACE = 5, + DEQUANTIZE = 6, + EMBEDDING_LOOKUP = 7, + FLOOR = 8, + FULLY_CONNECTED = 9, + HASHTABLE_LOOKUP = 10, + L2_NORMALIZATION = 11, + L2_POOL_2D = 12, + LOCAL_RESPONSE_NORMALIZATION = 13, + LOGISTIC = 14, + LSH_PROJECTION = 15, + LSTM = 16, + MAX_POOL_2D = 17, + MUL = 18, + RELU = 19, + // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed + // since different model developers use RELU1 in different ways. Never + // create another op called RELU1. + RELU_N1_TO_1 = 20, + RELU6 = 21, + RESHAPE = 22, + RESIZE_BILINEAR = 23, + RNN = 24, + SOFTMAX = 25, + SPACE_TO_DEPTH = 26, + SVDF = 27, + TANH = 28, + // TODO(aselle): Consider rename to CONCATENATE_EMBEDDINGS + CONCAT_EMBEDDINGS = 29, + SKIP_GRAM = 30, + CALL = 31, + CUSTOM = 32, + EMBEDDING_LOOKUP_SPARSE = 33, + PAD = 34, + UNIDIRECTIONAL_SEQUENCE_RNN = 35, + GATHER = 36, + BATCH_TO_SPACE_ND = 37, + SPACE_TO_BATCH_ND = 38, + TRANSPOSE = 39, + MEAN = 40, + SUB = 41, + DIV = 42, + SQUEEZE = 43, + UNIDIRECTIONAL_SEQUENCE_LSTM = 44, + STRIDED_SLICE = 45, + BIDIRECTIONAL_SEQUENCE_RNN = 46, + EXP = 47, + TOPK_V2 = 48, + SPLIT = 49, + LOG_SOFTMAX = 50, + // DELEGATE is a special op type for the operations which are delegated to + // other backends. + // WARNING: Experimental interface, subject to change + DELEGATE = 51, + BIDIRECTIONAL_SEQUENCE_LSTM = 52, + CAST = 53, + PRELU = 54, + MAXIMUM = 55, + ARG_MAX = 56, + MINIMUM = 57, + LESS = 58, + NEG = 59, + PADV2 = 60, + GREATER = 61, + GREATER_EQUAL = 62, + LESS_EQUAL = 63, + SELECT = 64, + SLICE = 65, + SIN = 66, + TRANSPOSE_CONV = 67, + SPARSE_TO_DENSE = 68, + TILE = 69, + EXPAND_DIMS = 70, + EQUAL = 71, + NOT_EQUAL = 72, + LOG = 73, + SUM = 74, + SQRT = 75, + RSQRT = 76, + SHAPE = 77, + POW = 78, + ARG_MIN = 79, + FAKE_QUANT = 80, + REDUCE_PROD = 81, + REDUCE_MAX = 82, + PACK = 83, + LOGICAL_OR = 84, + ONE_HOT = 85, + LOGICAL_AND = 86, + LOGICAL_NOT = 87, + UNPACK = 88, + REDUCE_MIN = 89, + FLOOR_DIV = 90, + REDUCE_ANY = 91, + SQUARE = 92, + ZEROS_LIKE = 93, + FILL = 94, + FLOOR_MOD = 95, + RANGE = 96, + RESIZE_NEAREST_NEIGHBOR = 97, + LEAKY_RELU = 98, + SQUARED_DIFFERENCE = 99, + MIRROR_PAD = 100, + ABS = 101, + SPLIT_V = 102, +} + +// Options for the builtin operators. +union BuiltinOptions { + Conv2DOptions, + DepthwiseConv2DOptions, + ConcatEmbeddingsOptions, + LSHProjectionOptions, + Pool2DOptions, + SVDFOptions, + RNNOptions, + FullyConnectedOptions, + SoftmaxOptions, + ConcatenationOptions, + AddOptions, + L2NormOptions, + LocalResponseNormalizationOptions, + LSTMOptions, + ResizeBilinearOptions, + CallOptions, + ReshapeOptions, + SkipGramOptions, + SpaceToDepthOptions, + EmbeddingLookupSparseOptions, + MulOptions, + PadOptions, + GatherOptions, + BatchToSpaceNDOptions, + SpaceToBatchNDOptions, + TransposeOptions, + ReducerOptions, + SubOptions, + DivOptions, + SqueezeOptions, + SequenceRNNOptions, + StridedSliceOptions, + ExpOptions, + TopKV2Options, + SplitOptions, + LogSoftmaxOptions, + CastOptions, + DequantizeOptions, + MaximumMinimumOptions, + ArgMaxOptions, + LessOptions, + NegOptions, + PadV2Options, + GreaterOptions, + GreaterEqualOptions, + LessEqualOptions, + SelectOptions, + SliceOptions, + TransposeConvOptions, + SparseToDenseOptions, + TileOptions, + ExpandDimsOptions, + EqualOptions, + NotEqualOptions, + ShapeOptions, + PowOptions, + ArgMinOptions, + FakeQuantOptions, + PackOptions, + LogicalOrOptions, + OneHotOptions, + LogicalAndOptions, + LogicalNotOptions, + UnpackOptions, + FloorDivOptions, + SquareOptions, + ZerosLikeOptions, + FillOptions, + BidirectionalSequenceLSTMOptions, + BidirectionalSequenceRNNOptions, + UnidirectionalSequenceLSTMOptions, + FloorModOptions, + RangeOptions, + ResizeNearestNeighborOptions, + LeakyReluOptions, + SquaredDifferenceOptions, + MirrorPadOptions, + AbsOptions, + SplitVOptions, +} + +enum Padding : byte { SAME, VALID } + +enum ActivationFunctionType : byte { + NONE = 0, + RELU = 1, + RELU_N1_TO_1 = 2, + RELU6 = 3, + TANH = 4, + SIGN_BIT = 5, +} + +table Conv2DOptions { + padding:Padding; + stride_w:int; + stride_h:int; + fused_activation_function:ActivationFunctionType; + dilation_w_factor:int = 1; + dilation_h_factor:int = 1; +} + +table Pool2DOptions { + padding:Padding; + stride_w:int; + stride_h:int; + filter_width:int; + filter_height:int; + fused_activation_function:ActivationFunctionType; +} + +table DepthwiseConv2DOptions { + // Parameters for DepthwiseConv version 1 or above. + padding:Padding; + stride_w:int; + stride_h:int; + depth_multiplier:int; + fused_activation_function:ActivationFunctionType; + // Parameters for DepthwiseConv version 2 or above. + dilation_w_factor:int = 1; + dilation_h_factor:int = 1; +} + +table ConcatEmbeddingsOptions { + num_channels:int; + num_columns_per_channel:[int]; + embedding_dim_per_channel:[int]; // This could be inferred from parameters. +} + +enum LSHProjectionType: byte { + UNKNOWN = 0, + SPARSE = 1, + DENSE = 2, +} + +table LSHProjectionOptions { + type: LSHProjectionType; +} + +table SVDFOptions { + rank:int; + fused_activation_function:ActivationFunctionType; +} + +// An implementation of TensorFlow RNNCell. +table RNNOptions { + fused_activation_function:ActivationFunctionType; +} + +// An implementation of TensorFlow dynamic_rnn with RNNCell. +table SequenceRNNOptions { + time_major:bool; + fused_activation_function:ActivationFunctionType; +} + +// An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell. +table BidirectionalSequenceRNNOptions { + time_major:bool; + fused_activation_function:ActivationFunctionType; + merge_outputs: bool; +} + +enum FullyConnectedOptionsWeightsFormat: byte { + DEFAULT = 0, + SHUFFLED4x16INT8 = 1, +} + +// An implementation of TensorFlow fully_connected (a.k.a Dense) layer. +table FullyConnectedOptions { + // Parameters for FullyConnected version 1 or above. + fused_activation_function:ActivationFunctionType; + + // Parameters for FullyConnected version 2 or above. + weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT; +} + +table SoftmaxOptions { + beta: float; +} + +// An implementation of TensorFlow concat. +table ConcatenationOptions { + axis:int; + fused_activation_function:ActivationFunctionType; +} + +table AddOptions { + fused_activation_function:ActivationFunctionType; +} + +table MulOptions { + fused_activation_function:ActivationFunctionType; +} + +table L2NormOptions { + fused_activation_function:ActivationFunctionType; +} + +table LocalResponseNormalizationOptions { + radius:int; + bias:float; + alpha:float; + beta:float; +} + +enum LSTMKernelType : byte { + // Full LSTM kernel which supports peephole and projection. + FULL = 0, + // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell. + BASIC = 1, +} + +// An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell +table LSTMOptions { + // Parameters for LSTM version 1 or above. + fused_activation_function:ActivationFunctionType; + cell_clip: float; // Optional, 0.0 means no clipping + proj_clip: float; // Optional, 0.0 means no clipping + + // Parameters for LSTM version 2 or above. + // Basic kernel is only supported in version 2 or above. + kernel_type: LSTMKernelType = FULL; +} + +// An implementation of TensorFlow dynamic_rnn with LSTMCell. +table UnidirectionalSequenceLSTMOptions { + fused_activation_function:ActivationFunctionType; + cell_clip: float; // Optional, 0.0 means no clipping + proj_clip: float; // Optional, 0.0 means no clipping + + // If true then first dimension is sequence, otherwise batch. + time_major:bool; +} + +table BidirectionalSequenceLSTMOptions { + fused_activation_function:ActivationFunctionType; + cell_clip: float; // Optional, 0.0 means no clipping + proj_clip: float; // Optional, 0.0 means no clipping + + // If true, store the outputs of both directions into the first output. + merge_outputs: bool; +} + +table ResizeBilinearOptions { + new_height: int (deprecated); + new_width: int (deprecated); + align_corners: bool; +} + +table ResizeNearestNeighborOptions { + align_corners: bool; +} + +// A call operation options +table CallOptions { + // The subgraph index that needs to be called. + subgraph:uint; +} + +table PadOptions { +} + +table PadV2Options { +} + +table ReshapeOptions { + new_shape:[int]; +} + +table SpaceToBatchNDOptions { +} + +table BatchToSpaceNDOptions { +} + +table SkipGramOptions { + ngram_size: int; + max_skip_size: int; + include_all_ngrams: bool; +} + +table SpaceToDepthOptions { + block_size: int; +} + +table SubOptions { + fused_activation_function:ActivationFunctionType; +} + +table DivOptions { + fused_activation_function:ActivationFunctionType; +} + +table TopKV2Options { +} + +enum CombinerType : byte { + SUM = 0, + MEAN = 1, + SQRTN = 2, +} + +table EmbeddingLookupSparseOptions { + combiner:CombinerType; +} + +table GatherOptions { + axis: int; +} + +table TransposeOptions { +} + +table ExpOptions { +} + +table ReducerOptions { + keep_dims: bool; +} + +table SqueezeOptions { + squeeze_dims:[int]; +} + +table SplitOptions { + num_splits: int; +} + +table SplitVOptions { + num_splits: int; +} + +table StridedSliceOptions { + begin_mask: int; + end_mask: int; + ellipsis_mask: int; + new_axis_mask: int; + shrink_axis_mask: int; +} + +table LogSoftmaxOptions { +} + +table CastOptions { + in_data_type: TensorType; + out_data_type: TensorType; +} + +table DequantizeOptions { +} + +table MaximumMinimumOptions { +} + +table TileOptions { +} + +table ArgMaxOptions { + output_type : TensorType; +} + +table ArgMinOptions { + output_type : TensorType; +} + +table GreaterOptions { +} + +table GreaterEqualOptions { +} + +table LessOptions { +} + +table LessEqualOptions { +} + +table NegOptions { +} + +table SelectOptions { +} + +table SliceOptions { +} + +table TransposeConvOptions { + padding:Padding; + stride_w:int; + stride_h:int; +} + +table ExpandDimsOptions { +} + +table SparseToDenseOptions { + validate_indices:bool; +} + +table EqualOptions { +} + +table NotEqualOptions { +} + +table ShapeOptions { + // Optional output type of the operation (int32 or int64). Defaults to int32. + out_type : TensorType; +} + +table PowOptions { +} + +table FakeQuantOptions { + // Parameters supported by version 1: + min:float; + max:float; + num_bits:int; + + // Parameters supported by version 2: + narrow_range:bool; +} + +table PackOptions { + values_count:int; + axis:int; +} + +table LogicalOrOptions { +} + +table OneHotOptions { + axis:int; +} + +table AbsOptions { +} + + +table LogicalAndOptions { +} + +table LogicalNotOptions { +} + +table UnpackOptions { + num:int; + axis:int; +} + +table FloorDivOptions { +} + +table SquareOptions { +} + +table ZerosLikeOptions { +} + +table FillOptions { +} + +table FloorModOptions { +} + +table RangeOptions { +} + +table LeakyReluOptions { + alpha:float; +} + +table SquaredDifferenceOptions { +} + +enum MirrorPadMode : byte { + // Doesn't include borders. + REFLECT = 0, + // Includes borders. + SYMMETRIC = 1, +} + +table MirrorPadOptions { + mode:MirrorPadMode; +} + +// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a +// builtin, or a string if the operator is custom. +table OperatorCode { + builtin_code:BuiltinOperator; + custom_code:string; + + // The version of the operator. The version need to be bumped whenever new + // parameters are introduced into an op. + version:int = 1; +} + +enum CustomOptionsFormat : byte { + FLEXBUFFERS = 0, +} + +// An operator takes tensors as inputs and outputs. The type of operation being +// performed is determined by an index into the list of valid OperatorCodes, +// while the specifics of each operations is configured using builtin_options +// or custom_options. +table Operator { + // Index into the operator_codes array. Using an integer here avoids + // complicate map lookups. + opcode_index:uint; + + // Optional input and output tensors are indicated by -1. + inputs:[int]; + outputs:[int]; + + builtin_options:BuiltinOptions; + custom_options:[ubyte]; + custom_options_format:CustomOptionsFormat; + + // A list of booleans indicating the input tensors which are being mutated by + // this operator.(e.g. used by RNN and LSTM). + // For example, if the "inputs" array refers to 5 tensors and the second and + // fifth are mutable variables, then this list will contain + // [false, true, false, false, true]. + // + // If the list is empty, no variable is mutated in this operator. + // The list either has the same length as `inputs`, or is empty. + mutating_variable_inputs:[bool]; +} + +// The root type, defining a subgraph, which typically represents an entire +// model. +table SubGraph { + // A list of all tensors used in this subgraph. + tensors:[Tensor]; + + // Indices of the tensors that are inputs into this subgraph. Note this is + // the list of non-static tensors that feed into the subgraph for inference. + inputs:[int]; + + // Indices of the tensors that are outputs out of this subgraph. Note this is + // the list of output tensors that are considered the product of the + // subgraph's inference. + outputs:[int]; + + // All operators, in execution order. + operators:[Operator]; + + // Name of this subgraph (used for debugging). + name:string; +} + +// Table of raw data buffers (used for constant tensors). Referenced by tensors +// by index. The generous alignment accommodates mmap-friendly data structures. +table Buffer { + data:[ubyte] (force_align: 16); +} + +table Model { + // Version of the schema. + version:uint; + + // A list of all operator codes used in this model. This is + // kept in order because operators carry an index into this + // vector. + operator_codes:[OperatorCode]; + + // All the subgraphs of the model. The 0th is assumed to be the main + // model. + subgraphs:[SubGraph]; + + // A description of the model. + description:string; + + // Buffers of the model. + // Note the 0th entry of this array must be an empty buffer (sentinel). + // This is a convention so that tensors without a buffer can provide 0 as + // their buffer. + buffers:[Buffer]; + + // Metadata about the model. Indirects into the existings buffers list. + metadata_buffer:[int]; +} + +root_type Model; diff --git a/runtime/onert/frontend/tflite/tflite_schema.fbs b/runtime/onert/frontend/tflite/tflite_schema.fbs new file mode 100644 index 000000000..9bffb4f3c --- /dev/null +++ b/runtime/onert/frontend/tflite/tflite_schema.fbs @@ -0,0 +1,1095 @@ +// Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved +// Copyright 2017 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Revision History +// Version 0: Initial version. +// Version 1: Add subgraphs to schema. +// Version 2: Rename operators to conform to NN API. +// Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers. + +// Change namespace to onert_tflite +namespace onert_tflite; + +// This corresponds to the version. +file_identifier "TFL3"; +// File extension of any written files. +file_extension "tflite"; + +// IMPORTANT: All new members of tables, enums and unions must be added at the +// end to ensure backwards compatibility. + +// The type of data stored in a tensor. +enum TensorType : byte { + FLOAT32 = 0, + FLOAT16 = 1, + INT32 = 2, + UINT8 = 3, + INT64 = 4, + STRING = 5, + BOOL = 6, + INT16 = 7, + COMPLEX64 = 8, + INT8 = 9, + FLOAT64 = 10, +} + +// Custom quantization parameters for experimenting with new quantization +// techniques. +table CustomQuantization { + custom:[ubyte] (force_align: 16); +} + +// Represents a specific quantization technique's parameters. +union QuantizationDetails { + CustomQuantization, +} + +// Parameters for converting a quantized tensor back to float. +table QuantizationParameters { + // These four parameters are the asymmetric linear quantization parameters. + // Given a quantized value q, the corresponding float value f should be: + // f = scale * (q - zero_point) + // For other quantization types, the QuantizationDetails below is used. + min:[float]; // For importing back into tensorflow. + max:[float]; // For importing back into tensorflow. + scale:[float]; // For dequantizing the tensor's values. + zero_point:[long]; + + // If this is not none, the other quantization parameters (i.e. min, max, + // scale, zero_point fields above) are ignored and the value of the + // QuantizationDetails union should be used. + details:QuantizationDetails; + + // Specifies the dimension of the Tensor's shape that the scales and + // zero_points correspond to. For example, a tensor t, with dims=[4, 3, 2, 1] + // with quantization params: + // scale=[1.0, 2.0, 3.0], zero_point=[1, 2, 3], quantization_dimension=1 + // will be quantized across the second dimension of t. + // t[:, 0, :, :] will have scale[0]=1.0, zero_point[0]=1 + // t[:, 1, :, :] will have scale[1]=2.0, zero_point[0]=2 + // t[:, 2, :, :] will have scale[2]=3.0, zero_point[0]=3 + quantized_dimension:int; +} + +// Sparse tensors. +// We use a modification of the TACO format. +// Reference: http://tensor-compiler.org/kjolstad-oopsla17-tensor-compiler.pdf +// +// To encode a conceptual n-dimensional dense tensor with dims (d0, ..., dn-1), +// potentially with a k-dimensional block (0 <= k <= n) with dims +// (dn, ..., dn+k-1), the format needs to specify: +// 1. In what order to traverse these dimensions. For example, to store a 2-D +// matrix in row major order, the traversal order would be (d0, d1), +// whereas to store it in column major order, the traversal order would be +// (d1, d0). If the 2-D matrix has a 2-D inner block, the traversal order +// could be (d0, d1, d2, d3). +// 2. How each block dimension in (dn, ..., dn+k-1) maps to the original +// tensor dimension in (d0, ..., dn-1). +// 3. In the traversal order defined above, the format (dense vs. sparse) and +// index metadata for each dimension. For a dense dimension, this is just +// the size of that dimension. For a sparse dimension, it's the same as +// the compressed index defined in the Compressed Sparse Row (CSR) format. +// (http://scipy-lectures.org/advanced/scipy_sparse/csr_matrix.html) + +// The storage type for a dimension. Currently we support: +// 1. DENSE: each coordinate in this dimension is stored implicitly. +// 2. SPARSE_CSR: only the coordinates with non-zero elements are stored. The +// compression technique is the same what CSR uses. +// More types like a sparse dimension with a different compression technique +// could be added to the list in the future. +enum DimensionType : byte { + DENSE = 0, + SPARSE_CSR = 1, +} + +table Int32Vector { + values:[int]; +} + +table Uint16Vector { + values:[ushort] (force_align: 4); +} + +table Uint8Vector { + values:[ubyte] (force_align: 4); +} + +// Variable-typed buffer to store the index metadata for a sparse dimension. +// The widest type is Int32 instead of UInt32 because tensor's shape is a int32 +// vector. We don't want the per-dimensional index to overflow that range. +union SparseIndexVector { + Int32Vector, + Uint16Vector, + Uint8Vector +} + +table DimensionMetadata { + // Whether a dimension is dense or sparse. + format:DimensionType; + // Index metadata used for a dimension. + // - If format is DimensionType.DENSE then we use the dense_size field to + // store the size of that dimension. Each index in that dimension is + // stored implicitly. + // - If format is DimensionType.SPARSE_CSR then we use array_segments and + // array_indices to encode that dimension. array_segments represents how + // to segment the indices array, each segment corresponds to one element + // in the previous dimension. array_indices represents the index of the + // non-zero elements within this dimension (as those in the CSR matrix + // format, where the first array is row pointers and the second array is + // column indices). + dense_size:int; + array_segments:SparseIndexVector; + array_indices:SparseIndexVector; +} + +// Parameters to encode a sparse TfLite tensor. +table SparsityParameters { + // The traversal order of the dimensions defined in the `shape` field of the + // conceptual dense tensor. For a n-dimensional tensors with dims (d0, d1, + // ..., dn-1), + // - if not block sparse, the traversal_order is just a permutation of (d0, + // ..., dn-1). For example, a 2-D matrix stored in row-major order would + // have traversal_order = (d0, d1). + // - if block sparse with a k-dimensional block (0 <= k <= n), the + // traversal_order has n + k elements. The first n elements are still a + // permutation of (d0, ..., dn-1). The lask k elements are a permutation + // of (dn, ..., dn+k-1), defining how to traverse a block internally. For + // example, a 2-D matrix with 2-D blocks, both stored in row-major order + // would have traversal_order = (d0, d1, d2, d3). + traversal_order:[int]; + // For an n-dimensional tensor with a k-dimensional block (0 <= k <= n), + // stores how a block dimension in (dn, ..., dn+k-1) maps to the original + // tensor dimension in (d0, ..., dn). + // It's stored in the order of (dn, ..., dn+k-1). + // If not block-sparse, this field is NULL. + block_map:[int]; + // In the traversal order defined above, the metadata needed for + // each dimension to locate the non-zero values in the original dense tensor. + // The size of the dim_metadata array = the size of the traversal_order array + // = n + k. + dim_metadata:[DimensionMetadata]; +} + +table Tensor { + // The tensor shape. The meaning of each entry is operator-specific but + // builtin ops use: [batch size, height, width, number of channels] (That's + // Tensorflow's NHWC). + shape:[int]; + type:TensorType; + // An index that refers to the buffers table at the root of the model. Or, + // if there is no data buffer associated (i.e. intermediate results), then + // this is 0 (which refers to an always existent empty buffer). + // + // The data_buffer itself is an opaque container, with the assumption that the + // target device is little-endian. In addition, all builtin operators assume + // the memory is ordered such that if `shape` is [4, 3, 2], then index + // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k]. + buffer:uint; + name:string; // For debugging and importing back into tensorflow. + quantization:QuantizationParameters; // Optional. + + is_variable:bool = false; + + // Parameters to encode a sparse tensor. See the example in + // tensorflow/lite/testdata/sparse_tensor.json. + sparsity:SparsityParameters; // Optional. + + // Encodes `shape` with unknown dimensions. Unknown dimensions are + // represented with -1. + shape_signature:[int]; // Optional. +} + +// A list of builtin operators. Builtin operators are slightly faster than custom +// ones, but not by much. Moreover, while custom operators accept an opaque +// object containing configuration parameters, builtins have a predetermined +// set of acceptable options. + +enum BuiltinOperator : byte { + ADD = 0, + AVERAGE_POOL_2D = 1, + CONCATENATION = 2, + CONV_2D = 3, + DEPTHWISE_CONV_2D = 4, + DEPTH_TO_SPACE = 5, + DEQUANTIZE = 6, + EMBEDDING_LOOKUP = 7, + FLOOR = 8, + FULLY_CONNECTED = 9, + HASHTABLE_LOOKUP = 10, + L2_NORMALIZATION = 11, + L2_POOL_2D = 12, + LOCAL_RESPONSE_NORMALIZATION = 13, + LOGISTIC = 14, + LSH_PROJECTION = 15, + LSTM = 16, + MAX_POOL_2D = 17, + MUL = 18, + RELU = 19, + // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed + // since different model developers use RELU1 in different ways. Never + // create another op called RELU1. + RELU_N1_TO_1 = 20, + RELU6 = 21, + RESHAPE = 22, + RESIZE_BILINEAR = 23, + RNN = 24, + SOFTMAX = 25, + SPACE_TO_DEPTH = 26, + SVDF = 27, + TANH = 28, + // TODO(aselle): Consider rename to CONCATENATE_EMBEDDINGS + CONCAT_EMBEDDINGS = 29, + SKIP_GRAM = 30, + CALL = 31, + CUSTOM = 32, + EMBEDDING_LOOKUP_SPARSE = 33, + PAD = 34, + UNIDIRECTIONAL_SEQUENCE_RNN = 35, + GATHER = 36, + BATCH_TO_SPACE_ND = 37, + SPACE_TO_BATCH_ND = 38, + TRANSPOSE = 39, + MEAN = 40, + SUB = 41, + DIV = 42, + SQUEEZE = 43, + UNIDIRECTIONAL_SEQUENCE_LSTM = 44, + STRIDED_SLICE = 45, + BIDIRECTIONAL_SEQUENCE_RNN = 46, + EXP = 47, + TOPK_V2 = 48, + SPLIT = 49, + LOG_SOFTMAX = 50, + // DELEGATE is a special op type for the operations which are delegated to + // other backends. + // WARNING: Experimental interface, subject to change + DELEGATE = 51, + BIDIRECTIONAL_SEQUENCE_LSTM = 52, + CAST = 53, + PRELU = 54, + MAXIMUM = 55, + ARG_MAX = 56, + MINIMUM = 57, + LESS = 58, + NEG = 59, + PADV2 = 60, + GREATER = 61, + GREATER_EQUAL = 62, + LESS_EQUAL = 63, + SELECT = 64, + SLICE = 65, + SIN = 66, + TRANSPOSE_CONV = 67, + SPARSE_TO_DENSE = 68, + TILE = 69, + EXPAND_DIMS = 70, + EQUAL = 71, + NOT_EQUAL = 72, + LOG = 73, + SUM = 74, + SQRT = 75, + RSQRT = 76, + SHAPE = 77, + POW = 78, + ARG_MIN = 79, + FAKE_QUANT = 80, + REDUCE_PROD = 81, + REDUCE_MAX = 82, + PACK = 83, + LOGICAL_OR = 84, + ONE_HOT = 85, + LOGICAL_AND = 86, + LOGICAL_NOT = 87, + UNPACK = 88, + REDUCE_MIN = 89, + FLOOR_DIV = 90, + REDUCE_ANY = 91, + SQUARE = 92, + ZEROS_LIKE = 93, + FILL = 94, + FLOOR_MOD = 95, + RANGE = 96, + RESIZE_NEAREST_NEIGHBOR = 97, + LEAKY_RELU = 98, + SQUARED_DIFFERENCE = 99, + MIRROR_PAD = 100, + ABS = 101, + SPLIT_V = 102, + UNIQUE = 103, + CEIL = 104, + REVERSE_V2 = 105, + ADD_N = 106, + GATHER_ND = 107, + COS = 108, + WHERE = 109, + RANK = 110, + ELU = 111, + REVERSE_SEQUENCE = 112, + MATRIX_DIAG = 113, + QUANTIZE = 114, + MATRIX_SET_DIAG = 115, + ROUND = 116, + HARD_SWISH = 117, + IF = 118, + WHILE = 119, + NON_MAX_SUPPRESSION_V4 = 120, + NON_MAX_SUPPRESSION_V5 = 121, + SCATTER_ND = 122, + SELECT_V2 = 123, + DENSIFY = 124, + SEGMENT_SUM = 125, + BATCH_MATMUL = 126 +} + + +// Options for the builtin operators. +union BuiltinOptions { + Conv2DOptions, + DepthwiseConv2DOptions, + ConcatEmbeddingsOptions, + LSHProjectionOptions, + Pool2DOptions, + SVDFOptions, + RNNOptions, + FullyConnectedOptions, + SoftmaxOptions, + ConcatenationOptions, + AddOptions, + L2NormOptions, + LocalResponseNormalizationOptions, + LSTMOptions, + ResizeBilinearOptions, + CallOptions, + ReshapeOptions, + SkipGramOptions, + SpaceToDepthOptions, + EmbeddingLookupSparseOptions, + MulOptions, + PadOptions, + GatherOptions, + BatchToSpaceNDOptions, + SpaceToBatchNDOptions, + TransposeOptions, + ReducerOptions, + SubOptions, + DivOptions, + SqueezeOptions, + SequenceRNNOptions, + StridedSliceOptions, + ExpOptions, + TopKV2Options, + SplitOptions, + LogSoftmaxOptions, + CastOptions, + DequantizeOptions, + MaximumMinimumOptions, + ArgMaxOptions, + LessOptions, + NegOptions, + PadV2Options, + GreaterOptions, + GreaterEqualOptions, + LessEqualOptions, + SelectOptions, + SliceOptions, + TransposeConvOptions, + SparseToDenseOptions, + TileOptions, + ExpandDimsOptions, + EqualOptions, + NotEqualOptions, + ShapeOptions, + PowOptions, + ArgMinOptions, + FakeQuantOptions, + PackOptions, + LogicalOrOptions, + OneHotOptions, + LogicalAndOptions, + LogicalNotOptions, + UnpackOptions, + FloorDivOptions, + SquareOptions, + ZerosLikeOptions, + FillOptions, + BidirectionalSequenceLSTMOptions, + BidirectionalSequenceRNNOptions, + UnidirectionalSequenceLSTMOptions, + FloorModOptions, + RangeOptions, + ResizeNearestNeighborOptions, + LeakyReluOptions, + SquaredDifferenceOptions, + MirrorPadOptions, + AbsOptions, + SplitVOptions, + UniqueOptions, + ReverseV2Options, + AddNOptions, + GatherNdOptions, + CosOptions, + WhereOptions, + RankOptions, + ReverseSequenceOptions, + MatrixDiagOptions, + QuantizeOptions, + MatrixSetDiagOptions, + HardSwishOptions, + IfOptions, + WhileOptions, + DepthToSpaceOptions, + NonMaxSuppressionV4Options, + NonMaxSuppressionV5Options, + ScatterNdOptions, + SelectV2Options, + DensifyOptions, + SegmentSumOptions, + BatchMatMulOptions +} + +enum Padding : byte { SAME, VALID } + +enum ActivationFunctionType : byte { + NONE = 0, + RELU = 1, + RELU_N1_TO_1 = 2, + RELU6 = 3, + TANH = 4, + SIGN_BIT = 5, +} + +table Conv2DOptions { + padding:Padding; + stride_w:int; + stride_h:int; + fused_activation_function:ActivationFunctionType; + dilation_w_factor:int = 1; + dilation_h_factor:int = 1; +} + +table Pool2DOptions { + padding:Padding; + stride_w:int; + stride_h:int; + filter_width:int; + filter_height:int; + fused_activation_function:ActivationFunctionType; +} + +table DepthwiseConv2DOptions { + // Parameters for DepthwiseConv version 1 or above. + padding:Padding; + stride_w:int; + stride_h:int; + // `depth_multiplier` is redundant. It's used by CPU kernels in + // TensorFlow 2.0 or below, but ignored in versions above. + // See comments in lite/c/builtin_op_data.h for more details. + depth_multiplier:int; + fused_activation_function:ActivationFunctionType; + // Parameters for DepthwiseConv version 2 or above. + dilation_w_factor:int = 1; + dilation_h_factor:int = 1; +} + +table ConcatEmbeddingsOptions { + num_channels:int; + num_columns_per_channel:[int]; + embedding_dim_per_channel:[int]; // This could be inferred from parameters. +} + +enum LSHProjectionType: byte { + UNKNOWN = 0, + SPARSE = 1, + DENSE = 2, +} + +table LSHProjectionOptions { + type: LSHProjectionType; +} + +table SVDFOptions { + rank:int; + fused_activation_function:ActivationFunctionType; + // For weights-only quantization, use asymmetric quantization for non + // constant inputs at evaluation time. + asymmetric_quantize_inputs:bool; +} + +// An implementation of TensorFlow RNNCell. +table RNNOptions { + fused_activation_function:ActivationFunctionType; + asymmetric_quantize_inputs:bool; +} + +// An implementation of TensorFlow dynamic_rnn with RNNCell. +table SequenceRNNOptions { + time_major:bool; + fused_activation_function:ActivationFunctionType; + asymmetric_quantize_inputs:bool; +} + +// An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell. +table BidirectionalSequenceRNNOptions { + time_major:bool; + fused_activation_function:ActivationFunctionType; + merge_outputs: bool; + asymmetric_quantize_inputs:bool; +} + +enum FullyConnectedOptionsWeightsFormat: byte { + DEFAULT = 0, + SHUFFLED4x16INT8 = 1, +} + +// An implementation of TensorFlow fully_connected (a.k.a Dense) layer. +table FullyConnectedOptions { + // Parameters for FullyConnected version 1 or above. + fused_activation_function:ActivationFunctionType; + + // Parameters for FullyConnected version 2 or above. + weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT; + + // Parameters for FullyConnected version 5 or above. + // If set to true, then the number of dimension is preserved. Furthermore, + // all but the last dimension of the input and output shapes will be equal. + keep_num_dims: bool; + + // Parameters for FullyConnected version 7 or above. + // If set to true, then weights-only op will use asymmetric quantization for + // inputs. + asymmetric_quantize_inputs: bool; +} + +table SoftmaxOptions { + beta: float; +} + +// An implementation of TensorFlow concat. +table ConcatenationOptions { + axis:int; + fused_activation_function:ActivationFunctionType; +} + +table AddOptions { + fused_activation_function:ActivationFunctionType; +} + +table MulOptions { + fused_activation_function:ActivationFunctionType; +} + +table L2NormOptions { + fused_activation_function:ActivationFunctionType; +} + +table LocalResponseNormalizationOptions { + radius:int; + bias:float; + alpha:float; + beta:float; +} + +enum LSTMKernelType : byte { + // Full LSTM kernel which supports peephole and projection. + FULL = 0, + // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell. + BASIC = 1, +} + +// An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell +table LSTMOptions { + // Parameters for LSTM version 1 or above. + fused_activation_function:ActivationFunctionType; + cell_clip: float; // Optional, 0.0 means no clipping + proj_clip: float; // Optional, 0.0 means no clipping + + // Parameters for LSTM version 2 or above. + // Basic kernel is only supported in version 2 or above. + kernel_type: LSTMKernelType = FULL; + + // Parameters for LSTM version 4 or above. + asymmetric_quantize_inputs: bool; +} + +// An implementation of TensorFlow dynamic_rnn with LSTMCell. +table UnidirectionalSequenceLSTMOptions { + fused_activation_function:ActivationFunctionType; + cell_clip: float; // Optional, 0.0 means no clipping + proj_clip: float; // Optional, 0.0 means no clipping + + // If true then first dimension is sequence, otherwise batch. + time_major:bool; + + // Parameter for Unidirectional Sequence LSTM version 4. + asymmetric_quantize_inputs:bool; +} + +table BidirectionalSequenceLSTMOptions { + // Parameters supported by version 1: + fused_activation_function:ActivationFunctionType; + cell_clip: float; // Optional, 0.0 means no clipping + proj_clip: float; // Optional, 0.0 means no clipping + + // If true, store the outputs of both directions into the first output. + merge_outputs: bool; + + // Parameters supported by version 2: + // If true then first dimension is sequence, otherwise batch. + // Version 1 implementations assumed time_major to be true, so this default + // value should never change. + time_major: bool = true; + + // Parameters for version 3 or above. + asymmetric_quantize_inputs:bool; +} + +table ResizeBilinearOptions { + new_height: int (deprecated); + new_width: int (deprecated); + align_corners: bool; + half_pixel_centers: bool; +} + +table ResizeNearestNeighborOptions { + align_corners: bool; +} + +// A call operation options +table CallOptions { + // The subgraph index that needs to be called. + subgraph:uint; +} + +table PadOptions { +} + +table PadV2Options { +} + +table ReshapeOptions { + new_shape:[int]; +} + +table SpaceToBatchNDOptions { +} + +table BatchToSpaceNDOptions { +} + +table SkipGramOptions { + ngram_size: int; + max_skip_size: int; + include_all_ngrams: bool; +} + +table SpaceToDepthOptions { + block_size: int; +} + +table DepthToSpaceOptions { + block_size: int; +} + +table SubOptions { + fused_activation_function:ActivationFunctionType; +} + +table DivOptions { + fused_activation_function:ActivationFunctionType; +} + +table TopKV2Options { +} + +enum CombinerType : byte { + SUM = 0, + MEAN = 1, + SQRTN = 2, +} + +table EmbeddingLookupSparseOptions { + combiner:CombinerType; +} + +table GatherOptions { + axis: int; +} + +table TransposeOptions { +} + +table ExpOptions { +} + +table CosOptions { +} + +table ReducerOptions { + keep_dims: bool; +} + +table SqueezeOptions { + squeeze_dims:[int]; +} + +table SplitOptions { + num_splits: int; +} + +table SplitVOptions { + num_splits: int; +} + +table StridedSliceOptions { + begin_mask: int; + end_mask: int; + ellipsis_mask: int; + new_axis_mask: int; + shrink_axis_mask: int; +} + +table LogSoftmaxOptions { +} + +table CastOptions { + in_data_type: TensorType; + out_data_type: TensorType; +} + +table DequantizeOptions { +} + +table MaximumMinimumOptions { +} + +table TileOptions { +} + +table ArgMaxOptions { + output_type : TensorType; +} + +table ArgMinOptions { + output_type : TensorType; +} + +table GreaterOptions { +} + +table GreaterEqualOptions { +} + +table LessOptions { +} + +table LessEqualOptions { +} + +table NegOptions { +} + +table SelectOptions { +} + +table SliceOptions { +} + +table TransposeConvOptions { + padding:Padding; + stride_w:int; + stride_h:int; +} + +table ExpandDimsOptions { +} + +table SparseToDenseOptions { + validate_indices:bool; +} + +table EqualOptions { +} + +table NotEqualOptions { +} + +table ShapeOptions { + // Optional output type of the operation (int32 or int64). Defaults to int32. + out_type : TensorType; +} + +table RankOptions { +} + +table PowOptions { +} + +table FakeQuantOptions { + // Parameters supported by version 1: + min:float; + max:float; + num_bits:int; + + // Parameters supported by version 2: + narrow_range:bool; +} + +table PackOptions { + values_count:int; + axis:int; +} + +table LogicalOrOptions { +} + +table OneHotOptions { + axis:int; +} + +table AbsOptions { +} + + +table HardSwishOptions { +} + +table LogicalAndOptions { +} + +table LogicalNotOptions { +} + +table UnpackOptions { + num:int; + axis:int; +} + +table FloorDivOptions { +} + +table SquareOptions { +} + +table ZerosLikeOptions { +} + +table FillOptions { +} + +table FloorModOptions { +} + +table RangeOptions { +} + +table LeakyReluOptions { + alpha:float; +} + +table SquaredDifferenceOptions { +} + +enum MirrorPadMode : byte { + // Doesn't include borders. + REFLECT = 0, + // Includes borders. + SYMMETRIC = 1, +} + +table MirrorPadOptions { + mode:MirrorPadMode; +} + +table UniqueOptions { + idx_out_type:TensorType = INT32; +} + +table ReverseV2Options { +} + +table AddNOptions { +} + +table GatherNdOptions { +} + +table WhereOptions { +} + +table ReverseSequenceOptions { + seq_dim:int; + batch_dim:int = 0; +} + +table MatrixDiagOptions { +} + +table QuantizeOptions { +} + +table MatrixSetDiagOptions { +} + +table IfOptions { + then_subgraph_index:int; + else_subgraph_index:int; +} + +table WhileOptions { + cond_subgraph_index:int; + body_subgraph_index:int; +} + +table NonMaxSuppressionV4Options { +} + +table NonMaxSuppressionV5Options { +} + +table ScatterNdOptions { +} + +table SelectV2Options { +} + +table DensifyOptions { +} + +table SegmentSumOptions { +} + +table BatchMatMulOptions { + adjoint_lhs:bool; + adjoint_rhs:bool; +} + +// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a +// builtin, or a string if the operator is custom. +table OperatorCode { + builtin_code:BuiltinOperator; + custom_code:string; + + // The version of the operator. The version need to be bumped whenever new + // parameters are introduced into an op. + version:int = 1; +} + +enum CustomOptionsFormat : byte { + FLEXBUFFERS = 0, +} + +// An operator takes tensors as inputs and outputs. The type of operation being +// performed is determined by an index into the list of valid OperatorCodes, +// while the specifics of each operations is configured using builtin_options +// or custom_options. +table Operator { + // Index into the operator_codes array. Using an integer here avoids + // complicate map lookups. + opcode_index:uint; + + // Optional input are indicated by -1. + inputs:[int]; + outputs:[int]; + + builtin_options:BuiltinOptions; + custom_options:[ubyte]; + custom_options_format:CustomOptionsFormat; + + // A list of booleans indicating the input tensors which are being mutated by + // this operator.(e.g. used by RNN and LSTM). + // For example, if the "inputs" array refers to 5 tensors and the second and + // fifth are mutable variables, then this list will contain + // [false, true, false, false, true]. + // + // If the list is empty, no variable is mutated in this operator. + // The list either has the same length as `inputs`, or is empty. + mutating_variable_inputs:[bool]; + + // A list of indices to the subgraph's "tensors" that are internal to an Op. + // Internal tensors are those that do not flow in or out of the operation, + // but instead are part of internal computation. As such, the operation's + // implementation may manage its memory more efficiently. They are needed + // however (i.e. not just an implementation detail) since they are part of the + // computation, which may require relevant metadata such as quantization + // parameters. + intermediates:[int]; +} + +// The root type, defining a subgraph, which typically represents an entire +// model. +table SubGraph { + // A list of all tensors used in this subgraph. + tensors:[Tensor]; + + // Indices of the tensors that are inputs into this subgraph. Note this is + // the list of non-static tensors that feed into the subgraph for inference. + inputs:[int]; + + // Indices of the tensors that are outputs out of this subgraph. Note this is + // the list of output tensors that are considered the product of the + // subgraph's inference. + outputs:[int]; + + // All operators, in execution order. + operators:[Operator]; + + // Name of this subgraph (used for debugging). + name:string; +} + +// Table of raw data buffers (used for constant tensors). Referenced by tensors +// by index. The generous alignment accommodates mmap-friendly data structures. +table Buffer { + data:[ubyte] (force_align: 16); +} + +table Metadata { + // A human readable string to uniquely identify a Metadata. + name:string; + // An index to the buffers table. + buffer:uint; +} + +table Model { + // Version of the schema. + version:uint; + + // A list of all operator codes used in this model. This is + // kept in order because operators carry an index into this + // vector. + operator_codes:[OperatorCode]; + + // All the subgraphs of the model. The 0th is assumed to be the main + // model. + subgraphs:[SubGraph]; + + // A description of the model. + description:string; + + // Buffers of the model. + // Note the 0th entry of this array must be an empty buffer (sentinel). + // This is a convention so that tensors without a buffer can provide 0 as + // their buffer. + buffers:[Buffer]; + + // Metadata about the model. Indirects into the existings buffers list. + // Deprecated, prefer to use metadata field. + metadata_buffer:[int]; + + // Metadata about the model. + metadata:[Metadata]; +} + +root_type Model; |