summaryrefslogtreecommitdiff
path: root/runtime/neurun/frontend
diff options
context:
space:
mode:
authorChunseok Lee <chunseok.lee@samsung.com>2020-04-23 14:45:49 +0900
committerChunseok Lee <chunseok.lee@samsung.com>2020-04-23 14:45:49 +0900
commite2ef8438a24f7c56a0744eb579a6e293ee2fbf8e (patch)
tree44a1a7951d168dd4370e13593ed03f4bc6d920c5 /runtime/neurun/frontend
parent302e6564a7a76109e1178207e44e45a58631c477 (diff)
downloadnnfw-e2ef8438a24f7c56a0744eb579a6e293ee2fbf8e.tar.gz
nnfw-e2ef8438a24f7c56a0744eb579a6e293ee2fbf8e.tar.bz2
nnfw-e2ef8438a24f7c56a0744eb579a6e293ee2fbf8e.zip
Imported Upstream version 1.4.0upstream/1.4.0submit/tizen/20200423.054851
Diffstat (limited to 'runtime/neurun/frontend')
-rw-r--r--runtime/neurun/frontend/CMakeLists.txt1
-rw-r--r--runtime/neurun/frontend/base_loader/CMakeLists.txt7
-rw-r--r--runtime/neurun/frontend/base_loader/include/base_loader.h1278
-rw-r--r--runtime/neurun/frontend/circle/CMakeLists.txt17
-rw-r--r--runtime/neurun/frontend/circle/include/circle_loader.h32
-rw-r--r--runtime/neurun/frontend/circle/src/circle_loader.cc116
-rw-r--r--runtime/neurun/frontend/circle/src/circle_schema_generated.h7546
-rw-r--r--runtime/neurun/frontend/nnapi/ANeuralNetworksModel.test.cc25
-rw-r--r--runtime/neurun/frontend/nnapi/CMakeLists.txt23
-rw-r--r--runtime/neurun/frontend/nnapi/compilation.cc110
-rw-r--r--runtime/neurun/frontend/nnapi/event.cc36
-rw-r--r--runtime/neurun/frontend/nnapi/execution.cc480
-rw-r--r--runtime/neurun/frontend/nnapi/memory.cc42
-rw-r--r--runtime/neurun/frontend/nnapi/model.cc411
-rw-r--r--runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksCompilation.cc42
-rw-r--r--runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksCompilation.h42
-rw-r--r--runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksEvent.cc43
-rw-r--r--runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksEvent.h44
-rw-r--r--runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc289
-rw-r--r--runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.h74
-rw-r--r--runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksMemory.cc46
-rw-r--r--runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksMemory.h39
-rw-r--r--runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.cc268
-rw-r--r--runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.h71
-rw-r--r--runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.cc100
-rw-r--r--runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h78
-rw-r--r--runtime/neurun/frontend/nnapi/wrapper/OperationFactory.cc1680
-rw-r--r--runtime/neurun/frontend/nnapi/wrapper/OperationFactory.h60
-rw-r--r--runtime/neurun/frontend/tflite/CMakeLists.txt17
-rw-r--r--runtime/neurun/frontend/tflite/include/tflite_loader.h34
-rw-r--r--runtime/neurun/frontend/tflite/src/tflite_loader.cc105
-rw-r--r--runtime/neurun/frontend/tflite/src/tflite_schema_generated.h7275
-rw-r--r--runtime/neurun/frontend/tflite/tflite_schema.fbs795
33 files changed, 0 insertions, 21226 deletions
diff --git a/runtime/neurun/frontend/CMakeLists.txt b/runtime/neurun/frontend/CMakeLists.txt
deleted file mode 100644
index 5ea6cdadd..000000000
--- a/runtime/neurun/frontend/CMakeLists.txt
+++ /dev/null
@@ -1 +0,0 @@
-add_subdirectories()
diff --git a/runtime/neurun/frontend/base_loader/CMakeLists.txt b/runtime/neurun/frontend/base_loader/CMakeLists.txt
deleted file mode 100644
index 358fc2646..000000000
--- a/runtime/neurun/frontend/base_loader/CMakeLists.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-if(NOT BUILD_TFLITE_LOADER AND NOT BUILD_CIRCLE_LOADER)
- return()
-endif(NOT BUILD_TFLITE_LOADER AND NOT BUILD_CIRCLE_LOADER)
-
-add_library(base_loader INTERFACE)
-target_include_directories(base_loader INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/include)
-target_link_libraries(base_loader INTERFACE neurun_core nnfw_lib_cpp14)
diff --git a/runtime/neurun/frontend/base_loader/include/base_loader.h b/runtime/neurun/frontend/base_loader/include/base_loader.h
deleted file mode 100644
index ae1562f6c..000000000
--- a/runtime/neurun/frontend/base_loader/include/base_loader.h
+++ /dev/null
@@ -1,1278 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __BASE_LOADER_BASE_LOADER_H__
-#define __BASE_LOADER_BASE_LOADER_H__
-
-#include "ir/Graph.h"
-#include "ir/Operations.Include.h"
-
-#include <map>
-#include <cpp14/memory.h>
-#include <fstream>
-#include <limits>
-
-namespace neurun
-{
-namespace base_loader
-{
-
-template <typename LoaderDomain, typename SpecificLoader> class BaseLoader
-{
- using Verifier = typename LoaderDomain::Verifier;
- using ActivationFunctionType = typename LoaderDomain::ActivationFunctionType;
- using Buffer = typename LoaderDomain::Buffer;
- using BuiltinOperator = typename LoaderDomain::BuiltinOperator;
- using CustomOptionsFormat = typename LoaderDomain::CustomOptionsFormat;
- using Model = typename LoaderDomain::Model;
- using Operator = typename LoaderDomain::Operator;
- using Padding = typename LoaderDomain::Padding;
- using Pool2DOptions = typename LoaderDomain::Pool2DOptions;
- using SubGraph = typename LoaderDomain::SubGraph;
- using Tensor = typename LoaderDomain::Tensor;
- using TensorType = typename LoaderDomain::TensorType;
-
-public:
- /**
- * @brief Construct a new Loader object
- *
- * @param graph reference on graph
- */
- explicit BaseLoader(ir::Graph &graph) : _graph(graph), _model{nullptr} {}
-
- /**
- * @brief Load a model from file
- *
- * @param file_path
- */
- void loadFromFile(const char *file_path);
-
-protected:
- ~BaseLoader() = default;
-
- void loadModel();
-
- // Helper functions
- ir::Activation convertActivation(ActivationFunctionType type);
- ir::DataType tensorTypeToDataType(TensorType type);
-
- // Create operands form tflite::Tensor
- ir::OperandIndex loadOperand(const Tensor *tensor);
- void loadOperationIO(const Operator *op, ir::OperandIndexSequence &inputs,
- ir::OperandIndexSequence &outputs);
- // Create operations from Operator
- void loadOperation(const Operator *op);
- // Load Strides and Paddings from options to param
- template <typename Param, typename OptionsType>
- void loadStridesAndPaddings(Param &param, const OptionsType *options);
- // Load Pool2D param
- template <typename Param> void loadPool2D(Param &param, const Pool2DOptions *options);
-
- // Operations
- void loadConv2D(const Operator *op);
- void loadDepthwiseConv2D(const Operator *op);
- void loadTransposeConv(const Operator *op);
- void loadAvgPool2D(const Operator *op);
- void loadReshape(const Operator *op);
- void loadSoftmax(const Operator *op);
- void loadMaxPool2D(const Operator *op);
- void loadConcatenation(const Operator *op);
- void loadInstanceNorm(const Operator *op);
- void loadFC(const Operator *op);
- void loadAdd(const Operator *op);
- void loadSub(const Operator *op);
- void loadMul(const Operator *op);
- void loadDiv(const Operator *op);
- void loadPack(const Operator *op);
- void loadRelu(const Operator *op);
- void loadRelu6(const Operator *op);
- void loadResizeBilinear(const Operator *op);
- void loadRsqrt(const Operator *op);
- void loadSqrt(const Operator *op);
- void loadSquaredDifference(const Operator *op);
- void loadTanh(const Operator *op);
- void loadTranspose(const Operator *op);
- void loadMean(const Operator *op);
- void loadReduceMax(const Operator *op);
- void loadPad(const Operator *op);
- void loadLogistic(const Operator *op);
- void loadExp(const Operator *op);
- void loadGather(const Operator *op);
- void loadCustom(const Operator *op);
- void loadSpaceToBatchND(const Operator *op);
- void loadBatchToSpaceND(const Operator *op);
- void loadReduceSum(const Operator *op);
- void loadSqueeze(const Operator *op);
- void loadPrelu(const Operator *op);
- void loadSplit(const Operator *op);
- void loadSlice(const Operator *op);
- void loadStridedSlice(const Operator *op);
- void loadUnpack(const Operator *op);
- void loadMinimum(const Operator *op);
- void loadMaximum(const Operator *op);
- void loadCast(const Operator *op);
- void loadComparison(const Operator *op);
- void loadOneHot(const Operator *op);
-
-protected:
- // Buffer for loading (if needed)
- std::vector<char> _buffer;
- // Reference on loadable Graph
- ir::Graph &_graph;
- const Model *_model;
- // Maps Tensor indices to neurun Operands.
- std::vector<ir::OperandIndex> _tensor_to_operand;
- // Verifier
- std::unique_ptr<Verifier> _verifier;
-};
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::BaseLoader::loadFromFile(const char *file_path)
-{
- std::ifstream stream(file_path, std::fstream::in | std::fstream::binary);
-
- if (!stream)
- {
- std::string msg = "Failed to open file `";
- msg += file_path;
- msg += "`";
- throw std::runtime_error{msg};
- }
-
- stream.seekg(0, stream.end);
- auto size = stream.tellg();
- stream.seekg(0, stream.beg);
-
- _buffer.resize(size);
- stream.read(_buffer.data(), size);
-
- stream.close();
-
- // Prepare verifier
- _verifier = nnfw::cpp14::make_unique<Verifier>(
- reinterpret_cast<const std::uint8_t *>(_buffer.data()), _buffer.size());
-
- loadModel();
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-ir::Activation BaseLoader<LoaderDomain, SpecificLoader>::BaseLoader::convertActivation(
- const ActivationFunctionType type)
-{
- switch (type)
- {
- case ActivationFunctionType::ActivationFunctionType_NONE:
- return ir::Activation::NONE;
- case ActivationFunctionType::ActivationFunctionType_RELU:
- return ir::Activation::RELU;
- case ActivationFunctionType::ActivationFunctionType_RELU_N1_TO_1:
- return ir::Activation::RELU1;
- case ActivationFunctionType::ActivationFunctionType_RELU6:
- return ir::Activation::RELU6;
- case ActivationFunctionType::ActivationFunctionType_TANH:
- return ir::Activation::TANH;
- default:
- throw std::runtime_error(std::string("Unsupported activation type: ")
- .append(EnumNameActivationFunctionType(type)));
- }
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-ir::DataType
-BaseLoader<LoaderDomain, SpecificLoader>::BaseLoader::tensorTypeToDataType(const TensorType type)
-{
- switch (type)
- {
- case TensorType::TensorType_FLOAT32:
- return ir::DataType::FLOAT32;
- case TensorType::TensorType_INT32:
- return ir::DataType::INT32;
- case TensorType::TensorType_BOOL:
- return ir::DataType::BOOL8;
- case TensorType::TensorType_UINT8:
- return ir::DataType::UINT8;
- default:
- throw std::runtime_error(
- std::string("Unsupported tensor type: ").append(EnumNameTensorType(type)));
- }
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-ir::OperandIndex BaseLoader<LoaderDomain, SpecificLoader>::loadOperand(const Tensor *tensor)
-{
- ir::Shape shape;
- // Shape
- const auto *tensor_shape = tensor->shape();
- for (const auto &dim : *tensor_shape)
- {
- shape.append(dim);
- }
- // Type
- ir::DataType data_type = tensorTypeToDataType(tensor->type());
- // Quantization
- auto q_params = tensor->quantization();
- float scale = 0.0;
- long zero_point = 0;
- if (q_params != nullptr)
- {
- if (q_params->scale())
- {
- if (q_params->scale()->size() != 1)
- {
- throw std::runtime_error("Only 1 scale for a tensor is supported.");
- }
- scale = q_params->scale()->Get(0);
- }
-
- if (q_params->zero_point())
- {
- if (q_params->zero_point()->size() != 1)
- {
- throw std::runtime_error("Only 1 zero_point value for a tensor is supported.");
- }
- zero_point = q_params->zero_point()->Get(0);
- // zero_point is long while TypeInfo.zero_point is defined as int32_t.
- assert(zero_point >= std::numeric_limits<int32_t>::min());
- assert(zero_point <= std::numeric_limits<int32_t>::max());
- }
- auto details = q_params->details_as_CustomQuantization();
- if (details != nullptr)
- throw std::runtime_error("Custom Quantization is not supported");
-
- if (q_params->scale() && q_params->zero_point())
- {
- data_type = ir::DataType::QUANT8_ASYMM;
- }
- }
- // Create TypeInfo
- ir::TypeInfo type_info(data_type, scale, zero_point);
- // Create operand
- const auto operand_index = _graph.addOperand(shape, type_info);
-
- // Constant tensors are indicated by non-empty data.
- const auto *data = _model->buffers()->Get(tensor->buffer())->data();
- if (data != nullptr)
- {
- auto ptr = nnfw::cpp14::make_unique<ir::CachedData>(data->data(), data->size());
- _graph.setOperandValue(operand_index, std::move(ptr));
- }
-
- // Name unused
- // auto name = tensor->name();
- // Variablie
- if (tensor->is_variable())
- throw std::runtime_error("Variable tensor not supported!");
-
- return operand_index;
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadOperationIO(const Operator *op,
- ir::OperandIndexSequence &inputs,
- ir::OperandIndexSequence &outputs)
-{
- for (const std::int32_t idx : *op->inputs())
- {
- inputs.append(_tensor_to_operand[idx]);
- }
-
- for (const std::int32_t idx : *op->outputs())
- {
- outputs.append(_tensor_to_operand[idx]);
- }
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-template <typename Param, typename OptionsType>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadStridesAndPaddings(Param &param,
- const OptionsType *options)
-{
- // Strides
- param.stride.vertical = options->stride_w();
- param.stride.horizontal = options->stride_h();
- // Paddings
- if (options->padding() == Padding::Padding_SAME)
- param.padding.type = ir::PaddingType::SAME;
- if (options->padding() == Padding::Padding_VALID)
- param.padding.type = ir::PaddingType::VALID;
- // param paddings indexes unused
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-template <typename Param>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadPool2D(Param &param,
- const Pool2DOptions *options)
-{
- // Strides and Paddings
- loadStridesAndPaddings(param, options);
- // Filter width and height
- // Strides
- param.kw = options->filter_width();
- param.kh = options->filter_height();
- // Activation
- param.activation = convertActivation(options->fused_activation_function());
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadConv2D(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::Conv2D::Param param;
- const auto *options = op->builtin_options_as_Conv2DOptions();
- param.activation = convertActivation(options->fused_activation_function());
- loadStridesAndPaddings(param, options);
- // Dilation h/w factor unused
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Conv2D(inputs, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadDepthwiseConv2D(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::DepthwiseConv2D::Param param;
- const auto *options = op->builtin_options_as_DepthwiseConv2DOptions();
- param.activation = convertActivation(options->fused_activation_function());
- loadStridesAndPaddings(param, options);
- // Multiplier
- param.multiplier = options->depth_multiplier();
- // Dilation h/w factor unused
- std::unique_ptr<ir::Operation> new_op(new ir::operation::DepthwiseConv2D(inputs, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadTransposeConv(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::TransposeConv::Param param;
- const auto *options = op->builtin_options_as_TransposeConvOptions();
- loadStridesAndPaddings(param, options);
- std::unique_ptr<ir::Operation> new_op(new ir::operation::TransposeConv(inputs, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadAvgPool2D(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::AvgPool2D::Param param;
- const auto *options = op->builtin_options_as_Pool2DOptions();
-
- loadPool2D(param, options);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::AvgPool2D(inputs, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadReshape(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- // const auto *options = op->builtin_options_as_ReshapeOptions();
- // No params
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Reshape(inputs, outputs));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadSoftmax(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::Softmax::Param param;
- const auto *options = op->builtin_options_as_SoftmaxOptions();
- // Beta
- param.beta = options->beta();
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Softmax(inputs, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadMaxPool2D(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::MaxPool2D::Param param;
- const auto *options = op->builtin_options_as_Pool2DOptions();
-
- loadPool2D(param, options);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::MaxPool2D(inputs, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadConcatenation(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::Concat::Param param;
- const auto *options = op->builtin_options_as_ConcatenationOptions();
- // Axis
- param.axis = options->axis();
- param.rank = _graph.operands().at(outputs.at(0)).shape().rank();
- // activation unused
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Concat(inputs, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadInstanceNorm(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::InstanceNorm::Param param;
- const auto *options = op->builtin_options_as_InstanceNormOptions();
-
- param.activation = convertActivation(options->fused_activation_function());
- // Use default value 1e-5 if value of epsilon is zero
- param.epsilon = options->epsilon() == 0.f ? 1e-5 : options->epsilon();
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::InstanceNorm(inputs, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadFC(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- const auto &input_operand = _graph.operands().at(inputs.at(ir::operation::FullyConnected::INPUT));
- auto &weights_operand = _graph.operands().at(inputs.at(ir::operation::FullyConnected::WEIGHT));
- if (input_operand.typeInfo().type() == ir::DataType::FLOAT32 &&
- weights_operand.typeInfo().type() == ir::DataType::QUANT8_ASYMM)
- {
- weights_operand.type(ir::DataType::QUANT8_SYMM);
- }
-
- ir::operation::FullyConnected::Param param;
- const auto *options = op->builtin_options_as_FullyConnectedOptions();
-
- param.activation = convertActivation(options->fused_activation_function());
- // weights_format unused
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::FullyConnected(inputs, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadAdd(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::Add::Param param;
- const auto *options = op->builtin_options_as_AddOptions();
-
- param.activation = convertActivation(options->fused_activation_function());
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Add(inputs, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadSub(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::Sub::Param param;
- const auto *options = op->builtin_options_as_SubOptions();
-
- param.activation = convertActivation(options->fused_activation_function());
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Sub(inputs, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadMul(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::Mul::Param param;
- const auto *options = op->builtin_options_as_MulOptions();
-
- param.activation = convertActivation(options->fused_activation_function());
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Mul(inputs, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadDiv(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::Div::Param param;
- const auto *options = op->builtin_options_as_DivOptions();
-
- param.activation = convertActivation(options->fused_activation_function());
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Div(inputs, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadPack(const Operator *op)
-{
- // This runtime_error will be removed if the one of backend supports this operation
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::Pack::Param param;
- const auto *options = op->builtin_options_as_PackOptions();
- param.num = options->values_count();
- param.axis = options->axis();
- param.rank = _graph.operands().at(outputs.at(0)).shape().rank();
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Pack(inputs, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadRelu(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::ReLU(inputs, outputs));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadRelu6(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::ReLU6(inputs, outputs));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadResizeBilinear(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
- auto input = inputs.at(0);
- auto size = inputs.at(1);
-
- // FIXME Handle ResizeBilinearOptions.
- if (!_graph.operands().at(size).isConstant())
- throw std::runtime_error("ResizeBilinear: non-constant 'size' is not supported.");
-
- std::vector<std::int32_t> size_v = _graph.operands().at(size).template asVector<std::int32_t>();
-
- ir::operation::ResizeBilinear::Param param;
- param.height_out = size_v[0];
- param.width_out = size_v[1];
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::ResizeBilinear({input}, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadRsqrt(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::RSQRT(inputs, outputs));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadSqrt(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::SQRT(inputs, outputs));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadSquaredDifference(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::SquaredDifference(inputs, outputs));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadTanh(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Tanh(inputs, outputs));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadTranspose(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
- auto input = inputs.at(0);
- auto perm = inputs.at(1);
-
- if (!_graph.operands().at(perm).isConstant())
- throw std::runtime_error("Transpose: non-constant 'perm' is not supported.");
-
- ir::operation::Transpose::Param param;
- param.perm = _graph.operands().at(perm).template asVector<int>();
- param.rank = _graph.operands().at(inputs.at(0)).shape().rank();
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Transpose({input}, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadMean(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
- auto input = inputs.at(0);
- auto axes = inputs.at(1);
-
- if (!_graph.operands().at(axes).isConstant())
- throw std::runtime_error("Mean: non-constant 'axes' is not supported.");
-
- ir::operation::Mean::Param param;
- param.axes = _graph.operands().at(axes).template asVector<int>();
- param.keep_dims = op->builtin_options_as_ReducerOptions()->keep_dims();
- param.rank = _graph.operands().at(inputs.at(0)).shape().rank();
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Mean({input}, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadReduceMax(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
- auto input = inputs.at(0);
- auto axes = inputs.at(1);
-
- // FIXME Handle ReducerOptions.
- if (!_graph.operands().at(axes).isConstant())
- throw std::runtime_error("ReduceSum: non-constant 'axes' is not supported.");
-
- ir::operation::ReduceMax::Param param;
- param.axes = _graph.operands().at(axes).template asVector<int>();
- param.keep_dims = op->builtin_options_as_ReducerOptions()->keep_dims();
- param.rank = _graph.operands().at(inputs.at(0)).shape().rank();
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::ReduceMax({input}, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadPad(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::Pad::Param param;
- param.rank = _graph.operands().at(inputs.at(0)).shape().rank();
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Pad(inputs, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadLogistic(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Logistic(inputs, outputs));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadExp(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Exp(inputs, outputs));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadGather(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
- ir::operation::Gather::Param param;
- param.axis = op->builtin_options_as_GatherOptions()->axis();
- param.rank = _graph.operands().at(inputs.at(0)).shape().rank();
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Gather(inputs, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadSpaceToBatchND(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op{new ir::operation::SpaceToBatchND{inputs, outputs}};
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadBatchToSpaceND(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
- auto input = inputs.at(0);
- auto block_shape = inputs.at(1);
- auto crops = inputs.at(2);
-
- if (!_graph.operands().at(crops).isConstant())
- throw std::runtime_error("BatchToSpaceND: non-constant 'crops' is not supported.");
-
- std::vector<std::int32_t> crops_v = _graph.operands().at(crops).template asVector<std::int32_t>();
- assert(crops_v.size() == 4);
- if (crops_v != std::vector<std::int32_t>{0, 0, 0, 0})
- throw std::runtime_error("BatchToSpaceND: 'crops' other than {0, 0, 0, 0} is not supported.");
-
- std::unique_ptr<ir::Operation> new_op{
- new ir::operation::BatchToSpaceND{{input, block_shape}, outputs}};
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadReduceSum(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
- auto input = inputs.at(0);
- auto axes = inputs.at(1);
-
- // FIXME Handle ReducerOptions.
- if (!_graph.operands().at(axes).isConstant())
- throw std::runtime_error("ReduceSum: non-constant 'axes' is not supported.");
-
- ir::operation::ReduceSum::Param param;
- param.axes = _graph.operands().at(axes).template asVector<int>();
- param.keep_dims = op->builtin_options_as_ReducerOptions()->keep_dims();
- param.rank = _graph.operands().at(inputs.at(0)).shape().rank();
-
- std::unique_ptr<ir::Operation> new_op{new ir::operation::ReduceSum{{input}, outputs, param}};
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadCustom(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- auto *op_code = _model->operator_codes()->Get(op->opcode_index());
- auto custom_op_id = op_code->custom_code()->str();
-
- auto constraint = ir::OperandConstraint::createExact(inputs.size());
-
- assert(op->custom_options_format() == CustomOptionsFormat::CustomOptionsFormat_FLEXBUFFERS &&
- "Unsupported custom operation options format");
-
- size_t custom_op_data_size = op->custom_options()->size();
- auto custom_op_data = new char[custom_op_data_size];
- std::copy(op->custom_options()->begin(), op->custom_options()->end(), custom_op_data);
-
- ir::operation::Custom::Userdata userdata{};
- userdata.data = custom_op_data;
- userdata.size = custom_op_data_size;
-
- auto new_op = nnfw::cpp14::make_unique<ir::operation::Custom>(constraint, inputs, outputs,
- custom_op_id, userdata);
-
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadSqueeze(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::Squeeze::Param param{};
- const auto *options = op->builtin_options_as_SqueezeOptions();
- const auto *dims = options->squeeze_dims();
- if (dims)
- {
- if (dims->Length() > sizeof(param.dims) / sizeof(param.dims[0]))
- throw std::runtime_error("Squeeze: 'param.ndims' is out of range.");
- param.ndim = dims->Length();
- for (int i = 0; i < param.ndim; ++i)
- param.dims[i] = dims->Get(i);
- }
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Squeeze(inputs, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadPrelu(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::PReLU(inputs, outputs));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadSplit(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
- // Notice : input order is strange for tflite split
- auto input = inputs.at(1);
- auto axis = inputs.at(0);
-
- // FIXME Handle SplitOptions.
- if (!_graph.operands().at(axis).isConstant())
- throw std::runtime_error("Split: non-constant 'axis' is not supported.");
-
- ir::operation::Split::Param param{};
- param.axis = _graph.operands().at(axis).template asScalar<int>();
- const auto *options = op->builtin_options_as_SplitOptions();
- param.num_splits = options->num_splits();
- param.rank = _graph.operands().at(inputs.at(0)).shape().rank();
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Split({input}, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadSlice(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::Slice::Param param;
- param.rank = _graph.operands().at(inputs.at(0)).shape().rank();
-
- std::unique_ptr<ir::Operation> new_op{new ir::operation::Slice{inputs, outputs, param}};
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadStridedSlice(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::StridedSlice::Param param;
-
- const auto *options = op->builtin_options_as_StridedSliceOptions();
- param.begin_mask = options->begin_mask();
- param.end_mask = options->end_mask();
- param.shrink_axis_mask = options->shrink_axis_mask();
- param.rank = _graph.operands().at(inputs.at(0)).shape().rank();
-
- std::unique_ptr<ir::Operation> new_op{new ir::operation::StridedSlice{inputs, outputs, param}};
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadUnpack(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::Unpack::Param param;
- const auto *options = op->builtin_options_as_UnpackOptions();
- param.num = options->num();
- param.axis = options->axis();
- param.rank = _graph.operands().at(inputs.at(0)).shape().rank();
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Unpack(inputs, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadMinimum(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Min(inputs, outputs));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadMaximum(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Max(inputs, outputs));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadCast(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Cast(inputs, outputs));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadComparison(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::Comparison::Param param;
-
- const auto builtin_op = _model->operator_codes()->Get(op->opcode_index())->builtin_code();
-
- switch (builtin_op)
- {
- case BuiltinOperator::BuiltinOperator_EQUAL:
- param.comparison_type = ir::operation::Comparison::ComparisonType::Equal;
- break;
- case BuiltinOperator::BuiltinOperator_GREATER_EQUAL:
- param.comparison_type = ir::operation::Comparison::ComparisonType::GreaterEqual;
- break;
- case BuiltinOperator::BuiltinOperator_GREATER:
- param.comparison_type = ir::operation::Comparison::ComparisonType::Greater;
- break;
- case BuiltinOperator::BuiltinOperator_LESS_EQUAL:
- param.comparison_type = ir::operation::Comparison::ComparisonType::LessEqual;
- break;
- case BuiltinOperator::BuiltinOperator_LESS:
- param.comparison_type = ir::operation::Comparison::ComparisonType::Less;
- break;
- default:
- throw std::runtime_error(
- std::string("Unsupported operation: ").append(EnumNameBuiltinOperator(builtin_op)));
- }
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::Comparison(inputs, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadOneHot(const Operator *op)
-{
- ir::OperandIndexSequence inputs;
- ir::OperandIndexSequence outputs;
-
- loadOperationIO(op, inputs, outputs);
-
- ir::operation::OneHot::Param param{};
- const auto *options = op->builtin_options_as_OneHotOptions();
- const auto axis = options->axis();
- const auto &indices = _graph.operands().at(inputs.at(ir::operation::OneHot::INDICES));
- auto indices_dims = indices.shape().rank();
- param.axis = (axis == -1) ? indices_dims : axis;
-
- std::unique_ptr<ir::Operation> new_op(new ir::operation::OneHot(inputs, outputs, param));
- _graph.addOperation(std::move(new_op));
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op)
-{
- const auto builtin_op = _model->operator_codes()->Get(op->opcode_index())->builtin_code();
-
- switch (builtin_op)
- {
- case BuiltinOperator::BuiltinOperator_CONV_2D:
- loadConv2D(op);
- return;
- case BuiltinOperator::BuiltinOperator_AVERAGE_POOL_2D:
- loadAvgPool2D(op);
- return;
- case BuiltinOperator::BuiltinOperator_DEPTHWISE_CONV_2D:
- loadDepthwiseConv2D(op);
- return;
- case BuiltinOperator::BuiltinOperator_TRANSPOSE_CONV:
- loadTransposeConv(op);
- return;
- case BuiltinOperator::BuiltinOperator_RESHAPE:
- loadReshape(op);
- return;
- case BuiltinOperator::BuiltinOperator_SOFTMAX:
- loadSoftmax(op);
- return;
- case BuiltinOperator::BuiltinOperator_MAX_POOL_2D:
- loadMaxPool2D(op);
- return;
- case BuiltinOperator::BuiltinOperator_CONCATENATION:
- loadConcatenation(op);
- return;
- case BuiltinOperator::BuiltinOperator_FULLY_CONNECTED:
- loadFC(op);
- return;
- case BuiltinOperator::BuiltinOperator_ADD:
- loadAdd(op);
- return;
- case BuiltinOperator::BuiltinOperator_SUB:
- loadSub(op);
- return;
- case BuiltinOperator::BuiltinOperator_MUL:
- loadMul(op);
- return;
- case BuiltinOperator::BuiltinOperator_DIV:
- loadDiv(op);
- return;
- case BuiltinOperator::BuiltinOperator_PACK:
- loadPack(op);
- return;
- case BuiltinOperator::BuiltinOperator_RELU:
- loadRelu(op);
- return;
- case BuiltinOperator::BuiltinOperator_RELU6:
- loadRelu6(op);
- return;
- case BuiltinOperator::BuiltinOperator_RESIZE_BILINEAR:
- loadResizeBilinear(op);
- return;
- case BuiltinOperator::BuiltinOperator_RSQRT:
- loadRsqrt(op);
- return;
- case BuiltinOperator::BuiltinOperator_SQRT:
- loadSqrt(op);
- return;
- case BuiltinOperator::BuiltinOperator_SQUARED_DIFFERENCE:
- loadSquaredDifference(op);
- return;
- case BuiltinOperator::BuiltinOperator_TANH:
- loadTanh(op);
- return;
- case BuiltinOperator::BuiltinOperator_TRANSPOSE:
- loadTranspose(op);
- return;
- case BuiltinOperator::BuiltinOperator_MEAN:
- loadMean(op);
- return;
- case BuiltinOperator::BuiltinOperator_REDUCE_MAX:
- loadReduceMax(op);
- return;
- case BuiltinOperator::BuiltinOperator_PAD:
- loadPad(op);
- return;
- case BuiltinOperator::BuiltinOperator_LOGISTIC:
- loadLogistic(op);
- return;
- case BuiltinOperator::BuiltinOperator_EXP:
- loadExp(op);
- return;
- case BuiltinOperator::BuiltinOperator_GATHER:
- loadGather(op);
- return;
- case BuiltinOperator::BuiltinOperator_SPACE_TO_BATCH_ND:
- loadSpaceToBatchND(op);
- return;
- case BuiltinOperator::BuiltinOperator_BATCH_TO_SPACE_ND:
- loadBatchToSpaceND(op);
- return;
- case BuiltinOperator::BuiltinOperator_SUM:
- loadReduceSum(op);
- return;
- case BuiltinOperator::BuiltinOperator_CUSTOM:
- loadCustom(op);
- return;
- case BuiltinOperator::BuiltinOperator_SQUEEZE:
- loadSqueeze(op);
- return;
- case BuiltinOperator::BuiltinOperator_PRELU:
- loadPrelu(op);
- return;
- case BuiltinOperator::BuiltinOperator_SPLIT:
- loadSplit(op);
- return;
- case BuiltinOperator::BuiltinOperator_SLICE:
- loadSlice(op);
- return;
- case BuiltinOperator::BuiltinOperator_STRIDED_SLICE:
- loadStridedSlice(op);
- return;
- case BuiltinOperator::BuiltinOperator_UNPACK:
- loadUnpack(op);
- return;
- case BuiltinOperator::BuiltinOperator_MINIMUM:
- loadMinimum(op);
- return;
- case BuiltinOperator::BuiltinOperator_MAXIMUM:
- loadMaximum(op);
- return;
- case BuiltinOperator::BuiltinOperator_CAST:
- loadCast(op);
- return;
- case BuiltinOperator::BuiltinOperator_EQUAL:
- case BuiltinOperator::BuiltinOperator_GREATER_EQUAL:
- case BuiltinOperator::BuiltinOperator_GREATER:
- case BuiltinOperator::BuiltinOperator_LESS_EQUAL:
- case BuiltinOperator::BuiltinOperator_LESS:
- loadComparison(op);
- return;
- case BuiltinOperator::BuiltinOperator_ONE_HOT:
- loadOneHot(op);
- return;
- default:
- throw std::runtime_error(
- std::string("Unsupported operation: ").append(EnumNameBuiltinOperator(builtin_op)));
- }
-}
-
-template <typename LoaderDomain, typename SpecificLoader>
-void BaseLoader<LoaderDomain, SpecificLoader>::loadModel()
-{
- LoaderDomain::VerifyModelBuffer(*_verifier.get());
- _model = LoaderDomain::GetModel(_buffer.data());
- // Version unused
- // const auto version = _model->version();
- // Description unused
- // const auto *description = _model->description();
- // Metabuffer unsued
- // const auto *metadata_buffer = _model->metadata_buffer();
- // Load subgraphs and mapping from op to subgraph
- for (const auto *subgraph : *_model->subgraphs())
- {
- static_cast<SpecificLoader *>(this)->loadSubgraph(subgraph);
- }
-
- _graph.finishBuilding();
-}
-
-} // namespace base_loader
-} // namespace neurun
-
-#endif //__BASE_LOADER_BASE_LOADER_H__
diff --git a/runtime/neurun/frontend/circle/CMakeLists.txt b/runtime/neurun/frontend/circle/CMakeLists.txt
deleted file mode 100644
index fea9725c2..000000000
--- a/runtime/neurun/frontend/circle/CMakeLists.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-if (NOT BUILD_CIRCLE_LOADER)
- return()
-endif ()
-
-nnfw_find_package(FlatBuffersSource REQUIRED)
-
-set(CIRCLE_LOADER_SOURCES src/circle_loader.cc)
-
-add_library(circle_loader SHARED ${CIRCLE_LOADER_SOURCES})
-
-target_include_directories(circle_loader PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
-target_include_directories(circle_loader PRIVATE ${FlatBuffersSource_DIR}/include)
-
-target_link_libraries(circle_loader PUBLIC neurun_core)
-target_link_libraries(circle_loader PRIVATE base_loader nnfw_lib_cpp14 nnfw_common nnfw_coverage)
-
-install(TARGETS circle_loader DESTINATION lib)
diff --git a/runtime/neurun/frontend/circle/include/circle_loader.h b/runtime/neurun/frontend/circle/include/circle_loader.h
deleted file mode 100644
index 898bd32b1..000000000
--- a/runtime/neurun/frontend/circle/include/circle_loader.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __CIRCLE_CIRCLE_LOADER_H__
-#define __CIRCLE_CIRCLE_LOADER_H__
-
-#include "ir/Graph.h"
-
-#include <memory>
-
-namespace neurun
-{
-namespace circle_loader
-{
-std::unique_ptr<ir::Graph> loadModel(const char *filename);
-} // namespace circle_loader
-} // namespace neurun
-
-#endif // __CIRCLE_CIRCLE_LOADER_H__
diff --git a/runtime/neurun/frontend/circle/src/circle_loader.cc b/runtime/neurun/frontend/circle/src/circle_loader.cc
deleted file mode 100644
index cc48a793d..000000000
--- a/runtime/neurun/frontend/circle/src/circle_loader.cc
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "circle_loader.h"
-#include "base_loader.h"
-#include "circle_schema_generated.h"
-
-namespace neurun
-{
-namespace circle_loader
-{
-
-namespace
-{
-
-struct LoaderDomain
-{
- using Verifier = flatbuffers::Verifier;
- using ActivationFunctionType = circle::ActivationFunctionType;
- using Buffer = circle::Buffer;
- using BuiltinOperator = circle::BuiltinOperator;
- using CustomOptionsFormat = circle::CustomOptionsFormat;
- using Model = circle::Model;
- using Operator = circle::Operator;
- using Padding = circle::Padding;
- using Pool2DOptions = circle::Pool2DOptions;
- using Tensor = circle::Tensor;
- using TensorType = circle::TensorType;
- using SubGraph = circle::SubGraph;
-
- static const char *EnumNameBuiltinOperator(BuiltinOperator e)
- {
- return circle::EnumNameBuiltinOperator(e);
- }
- static const char *EnumNameActivationFunctionType(ActivationFunctionType e)
- {
- return circle::EnumNameActivationFunctionType(e);
- }
- static const char *EnumNameTensorType(TensorType e) { return circle::EnumNameTensorType(e); }
- static const Model *GetModel(const void *buf) { return circle::GetModel(buf); }
- static bool VerifyModelBuffer(Verifier &verifier) { return circle::VerifyModelBuffer(verifier); }
-};
-
-class CircleLoader final : public base_loader::BaseLoader<LoaderDomain, CircleLoader>
-{
-public:
- using BaseLoader::BaseLoader;
-
- void loadSubgraph(const circle::SubGraph *subgraph)
- {
- // Load tensors
- _tensor_to_operand.resize(subgraph->tensors()->size());
- for (flatbuffers::uoffset_t i = 0; i < subgraph->tensors()->size(); ++i)
- {
- _tensor_to_operand[i] = loadOperand(subgraph->tensors()->Get(i));
- }
- // Set inputs
- for (const std::int32_t input_ind : *subgraph->inputs())
- {
- _graph.addInput(_tensor_to_operand[input_ind]);
- }
- // Set outputs
- for (const std::int32_t output_ind : *subgraph->outputs())
- {
- _graph.addOutput(_tensor_to_operand[output_ind]);
- }
- // Create operations
- for (const auto *op : *subgraph->operators())
- {
- CircleLoader::loadOperation(op);
- }
-
- (void)subgraph->data_format();
- }
-
- void loadOperation(const circle::Operator *op)
- {
- const auto builtin_op = _model->operator_codes()->Get(op->opcode_index())->builtin_code();
-
- switch (builtin_op)
- {
- case circle::BuiltinOperator::BuiltinOperator_INSTANCE_NORM:
- loadInstanceNorm(op);
- return;
- default:
- BaseLoader::loadOperation(op);
- return;
- }
- }
-};
-
-} // namespace
-
-std::unique_ptr<ir::Graph> loadModel(const char *filename)
-{
- auto graph = nnfw::cpp14::make_unique<ir::Graph>();
- CircleLoader loader(*graph);
- loader.loadFromFile(filename);
- return graph;
-}
-
-} // namespace circle_loader
-} // namespace neurun
diff --git a/runtime/neurun/frontend/circle/src/circle_schema_generated.h b/runtime/neurun/frontend/circle/src/circle_schema_generated.h
deleted file mode 100644
index 5f7aad462..000000000
--- a/runtime/neurun/frontend/circle/src/circle_schema_generated.h
+++ /dev/null
@@ -1,7546 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-// automatically generated by the FlatBuffers compiler, do not modify
-
-#ifndef FLATBUFFERS_GENERATED_CIRCLESCHEMA_CIRCLE_H_
-#define FLATBUFFERS_GENERATED_CIRCLESCHEMA_CIRCLE_H_
-
-#include "flatbuffers/flatbuffers.h"
-
-namespace circle
-{
-
-struct CustomQuantization;
-
-struct QuantizationParameters;
-
-struct Tensor;
-
-struct Conv2DOptions;
-
-struct Pool2DOptions;
-
-struct DepthwiseConv2DOptions;
-
-struct ConcatEmbeddingsOptions;
-
-struct LSHProjectionOptions;
-
-struct SVDFOptions;
-
-struct RNNOptions;
-
-struct SequenceRNNOptions;
-
-struct BidirectionalSequenceRNNOptions;
-
-struct FullyConnectedOptions;
-
-struct SoftmaxOptions;
-
-struct ConcatenationOptions;
-
-struct AddOptions;
-
-struct MulOptions;
-
-struct L2NormOptions;
-
-struct LocalResponseNormalizationOptions;
-
-struct LSTMOptions;
-
-struct UnidirectionalSequenceLSTMOptions;
-
-struct BidirectionalSequenceLSTMOptions;
-
-struct ResizeBilinearOptions;
-
-struct ResizeNearestNeighborOptions;
-
-struct CallOptions;
-
-struct PadOptions;
-
-struct PadV2Options;
-
-struct ReshapeOptions;
-
-struct SpaceToBatchNDOptions;
-
-struct BatchToSpaceNDOptions;
-
-struct SkipGramOptions;
-
-struct SpaceToDepthOptions;
-
-struct SubOptions;
-
-struct DivOptions;
-
-struct TopKV2Options;
-
-struct EmbeddingLookupSparseOptions;
-
-struct GatherOptions;
-
-struct TransposeOptions;
-
-struct ExpOptions;
-
-struct ReducerOptions;
-
-struct SqueezeOptions;
-
-struct SplitOptions;
-
-struct SplitVOptions;
-
-struct StridedSliceOptions;
-
-struct LogSoftmaxOptions;
-
-struct CastOptions;
-
-struct DequantizeOptions;
-
-struct MaximumMinimumOptions;
-
-struct TileOptions;
-
-struct ArgMaxOptions;
-
-struct ArgMinOptions;
-
-struct GreaterOptions;
-
-struct GreaterEqualOptions;
-
-struct LessOptions;
-
-struct LessEqualOptions;
-
-struct NegOptions;
-
-struct SelectOptions;
-
-struct SliceOptions;
-
-struct TransposeConvOptions;
-
-struct ExpandDimsOptions;
-
-struct SparseToDenseOptions;
-
-struct EqualOptions;
-
-struct NotEqualOptions;
-
-struct ShapeOptions;
-
-struct PowOptions;
-
-struct FakeQuantOptions;
-
-struct PackOptions;
-
-struct LogicalOrOptions;
-
-struct OneHotOptions;
-
-struct AbsOptions;
-
-struct LogicalAndOptions;
-
-struct LogicalNotOptions;
-
-struct UnpackOptions;
-
-struct FloorDivOptions;
-
-struct SquareOptions;
-
-struct ZerosLikeOptions;
-
-struct FillOptions;
-
-struct FloorModOptions;
-
-struct RangeOptions;
-
-struct LeakyReluOptions;
-
-struct SquaredDifferenceOptions;
-
-struct MirrorPadOptions;
-
-struct InstanceNormOptions;
-
-struct OperatorCode;
-
-struct Operator;
-
-struct SubGraph;
-
-struct Buffer;
-
-struct Model;
-
-enum TensorType
-{
- TensorType_FLOAT32 = 0,
- TensorType_FLOAT16 = 1,
- TensorType_INT32 = 2,
- TensorType_UINT8 = 3,
- TensorType_INT64 = 4,
- TensorType_STRING = 5,
- TensorType_BOOL = 6,
- TensorType_INT16 = 7,
- TensorType_COMPLEX64 = 8,
- TensorType_INT8 = 9,
- TensorType_MIN = TensorType_FLOAT32,
- TensorType_MAX = TensorType_INT8
-};
-
-inline const TensorType (&EnumValuesTensorType())[10]
-{
- static const TensorType values[] = {TensorType_FLOAT32, TensorType_FLOAT16, TensorType_INT32,
- TensorType_UINT8, TensorType_INT64, TensorType_STRING,
- TensorType_BOOL, TensorType_INT16, TensorType_COMPLEX64,
- TensorType_INT8};
- return values;
-}
-
-inline const char *const *EnumNamesTensorType()
-{
- static const char *const names[] = {"FLOAT32", "FLOAT16", "INT32", "UINT8", "INT64", "STRING",
- "BOOL", "INT16", "COMPLEX64", "INT8", nullptr};
- return names;
-}
-
-inline const char *EnumNameTensorType(TensorType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesTensorType()[index];
-}
-
-enum QuantizationDetails
-{
- QuantizationDetails_NONE = 0,
- QuantizationDetails_CustomQuantization = 1,
- QuantizationDetails_MIN = QuantizationDetails_NONE,
- QuantizationDetails_MAX = QuantizationDetails_CustomQuantization
-};
-
-inline const QuantizationDetails (&EnumValuesQuantizationDetails())[2]
-{
- static const QuantizationDetails values[] = {QuantizationDetails_NONE,
- QuantizationDetails_CustomQuantization};
- return values;
-}
-
-inline const char *const *EnumNamesQuantizationDetails()
-{
- static const char *const names[] = {"NONE", "CustomQuantization", nullptr};
- return names;
-}
-
-inline const char *EnumNameQuantizationDetails(QuantizationDetails e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesQuantizationDetails()[index];
-}
-
-template <typename T> struct QuantizationDetailsTraits
-{
- static const QuantizationDetails enum_value = QuantizationDetails_NONE;
-};
-
-template <> struct QuantizationDetailsTraits<CustomQuantization>
-{
- static const QuantizationDetails enum_value = QuantizationDetails_CustomQuantization;
-};
-
-bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj,
- QuantizationDetails type);
-bool VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier,
- const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
- const flatbuffers::Vector<uint8_t> *types);
-
-enum BuiltinOperator
-{
- BuiltinOperator_ADD = 0,
- BuiltinOperator_AVERAGE_POOL_2D = 1,
- BuiltinOperator_CONCATENATION = 2,
- BuiltinOperator_CONV_2D = 3,
- BuiltinOperator_DEPTHWISE_CONV_2D = 4,
- BuiltinOperator_DEQUANTIZE = 6,
- BuiltinOperator_EMBEDDING_LOOKUP = 7,
- BuiltinOperator_FLOOR = 8,
- BuiltinOperator_FULLY_CONNECTED = 9,
- BuiltinOperator_HASHTABLE_LOOKUP = 10,
- BuiltinOperator_L2_NORMALIZATION = 11,
- BuiltinOperator_L2_POOL_2D = 12,
- BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION = 13,
- BuiltinOperator_LOGISTIC = 14,
- BuiltinOperator_LSH_PROJECTION = 15,
- BuiltinOperator_LSTM = 16,
- BuiltinOperator_MAX_POOL_2D = 17,
- BuiltinOperator_MUL = 18,
- BuiltinOperator_RELU = 19,
- BuiltinOperator_RELU_N1_TO_1 = 20,
- BuiltinOperator_RELU6 = 21,
- BuiltinOperator_RESHAPE = 22,
- BuiltinOperator_RESIZE_BILINEAR = 23,
- BuiltinOperator_RNN = 24,
- BuiltinOperator_SOFTMAX = 25,
- BuiltinOperator_SPACE_TO_DEPTH = 26,
- BuiltinOperator_SVDF = 27,
- BuiltinOperator_TANH = 28,
- BuiltinOperator_CONCAT_EMBEDDINGS = 29,
- BuiltinOperator_SKIP_GRAM = 30,
- BuiltinOperator_CALL = 31,
- BuiltinOperator_CUSTOM = 32,
- BuiltinOperator_EMBEDDING_LOOKUP_SPARSE = 33,
- BuiltinOperator_PAD = 34,
- BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN = 35,
- BuiltinOperator_GATHER = 36,
- BuiltinOperator_BATCH_TO_SPACE_ND = 37,
- BuiltinOperator_SPACE_TO_BATCH_ND = 38,
- BuiltinOperator_TRANSPOSE = 39,
- BuiltinOperator_MEAN = 40,
- BuiltinOperator_SUB = 41,
- BuiltinOperator_DIV = 42,
- BuiltinOperator_SQUEEZE = 43,
- BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
- BuiltinOperator_STRIDED_SLICE = 45,
- BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN = 46,
- BuiltinOperator_EXP = 47,
- BuiltinOperator_TOPK_V2 = 48,
- BuiltinOperator_SPLIT = 49,
- BuiltinOperator_LOG_SOFTMAX = 50,
- BuiltinOperator_DELEGATE = 51,
- BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM = 52,
- BuiltinOperator_CAST = 53,
- BuiltinOperator_PRELU = 54,
- BuiltinOperator_MAXIMUM = 55,
- BuiltinOperator_ARG_MAX = 56,
- BuiltinOperator_MINIMUM = 57,
- BuiltinOperator_LESS = 58,
- BuiltinOperator_NEG = 59,
- BuiltinOperator_PADV2 = 60,
- BuiltinOperator_GREATER = 61,
- BuiltinOperator_GREATER_EQUAL = 62,
- BuiltinOperator_LESS_EQUAL = 63,
- BuiltinOperator_SELECT = 64,
- BuiltinOperator_SLICE = 65,
- BuiltinOperator_SIN = 66,
- BuiltinOperator_TRANSPOSE_CONV = 67,
- BuiltinOperator_SPARSE_TO_DENSE = 68,
- BuiltinOperator_TILE = 69,
- BuiltinOperator_EXPAND_DIMS = 70,
- BuiltinOperator_EQUAL = 71,
- BuiltinOperator_NOT_EQUAL = 72,
- BuiltinOperator_LOG = 73,
- BuiltinOperator_SUM = 74,
- BuiltinOperator_SQRT = 75,
- BuiltinOperator_RSQRT = 76,
- BuiltinOperator_SHAPE = 77,
- BuiltinOperator_POW = 78,
- BuiltinOperator_ARG_MIN = 79,
- BuiltinOperator_FAKE_QUANT = 80,
- BuiltinOperator_REDUCE_PROD = 81,
- BuiltinOperator_REDUCE_MAX = 82,
- BuiltinOperator_PACK = 83,
- BuiltinOperator_LOGICAL_OR = 84,
- BuiltinOperator_ONE_HOT = 85,
- BuiltinOperator_LOGICAL_AND = 86,
- BuiltinOperator_LOGICAL_NOT = 87,
- BuiltinOperator_UNPACK = 88,
- BuiltinOperator_REDUCE_MIN = 89,
- BuiltinOperator_FLOOR_DIV = 90,
- BuiltinOperator_REDUCE_ANY = 91,
- BuiltinOperator_SQUARE = 92,
- BuiltinOperator_ZEROS_LIKE = 93,
- BuiltinOperator_FILL = 94,
- BuiltinOperator_FLOOR_MOD = 95,
- BuiltinOperator_RANGE = 96,
- BuiltinOperator_RESIZE_NEAREST_NEIGHBOR = 97,
- BuiltinOperator_LEAKY_RELU = 98,
- BuiltinOperator_SQUARED_DIFFERENCE = 99,
- BuiltinOperator_MIRROR_PAD = 100,
- BuiltinOperator_ABS = 101,
- BuiltinOperator_SPLIT_V = 102,
- BuiltinOperator_INSTANCE_NORM = 254,
- BuiltinOperator_MIN = BuiltinOperator_ADD,
- BuiltinOperator_MAX = BuiltinOperator_INSTANCE_NORM
-};
-
-inline const BuiltinOperator (&EnumValuesBuiltinOperator())[103]
-{
- static const BuiltinOperator values[] = {BuiltinOperator_ADD,
- BuiltinOperator_AVERAGE_POOL_2D,
- BuiltinOperator_CONCATENATION,
- BuiltinOperator_CONV_2D,
- BuiltinOperator_DEPTHWISE_CONV_2D,
- BuiltinOperator_DEQUANTIZE,
- BuiltinOperator_EMBEDDING_LOOKUP,
- BuiltinOperator_FLOOR,
- BuiltinOperator_FULLY_CONNECTED,
- BuiltinOperator_HASHTABLE_LOOKUP,
- BuiltinOperator_L2_NORMALIZATION,
- BuiltinOperator_L2_POOL_2D,
- BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
- BuiltinOperator_LOGISTIC,
- BuiltinOperator_LSH_PROJECTION,
- BuiltinOperator_LSTM,
- BuiltinOperator_MAX_POOL_2D,
- BuiltinOperator_MUL,
- BuiltinOperator_RELU,
- BuiltinOperator_RELU_N1_TO_1,
- BuiltinOperator_RELU6,
- BuiltinOperator_RESHAPE,
- BuiltinOperator_RESIZE_BILINEAR,
- BuiltinOperator_RNN,
- BuiltinOperator_SOFTMAX,
- BuiltinOperator_SPACE_TO_DEPTH,
- BuiltinOperator_SVDF,
- BuiltinOperator_TANH,
- BuiltinOperator_CONCAT_EMBEDDINGS,
- BuiltinOperator_SKIP_GRAM,
- BuiltinOperator_CALL,
- BuiltinOperator_CUSTOM,
- BuiltinOperator_EMBEDDING_LOOKUP_SPARSE,
- BuiltinOperator_PAD,
- BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN,
- BuiltinOperator_GATHER,
- BuiltinOperator_BATCH_TO_SPACE_ND,
- BuiltinOperator_SPACE_TO_BATCH_ND,
- BuiltinOperator_TRANSPOSE,
- BuiltinOperator_MEAN,
- BuiltinOperator_SUB,
- BuiltinOperator_DIV,
- BuiltinOperator_SQUEEZE,
- BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM,
- BuiltinOperator_STRIDED_SLICE,
- BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN,
- BuiltinOperator_EXP,
- BuiltinOperator_TOPK_V2,
- BuiltinOperator_SPLIT,
- BuiltinOperator_LOG_SOFTMAX,
- BuiltinOperator_DELEGATE,
- BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM,
- BuiltinOperator_CAST,
- BuiltinOperator_PRELU,
- BuiltinOperator_MAXIMUM,
- BuiltinOperator_ARG_MAX,
- BuiltinOperator_MINIMUM,
- BuiltinOperator_LESS,
- BuiltinOperator_NEG,
- BuiltinOperator_PADV2,
- BuiltinOperator_GREATER,
- BuiltinOperator_GREATER_EQUAL,
- BuiltinOperator_LESS_EQUAL,
- BuiltinOperator_SELECT,
- BuiltinOperator_SLICE,
- BuiltinOperator_SIN,
- BuiltinOperator_TRANSPOSE_CONV,
- BuiltinOperator_SPARSE_TO_DENSE,
- BuiltinOperator_TILE,
- BuiltinOperator_EXPAND_DIMS,
- BuiltinOperator_EQUAL,
- BuiltinOperator_NOT_EQUAL,
- BuiltinOperator_LOG,
- BuiltinOperator_SUM,
- BuiltinOperator_SQRT,
- BuiltinOperator_RSQRT,
- BuiltinOperator_SHAPE,
- BuiltinOperator_POW,
- BuiltinOperator_ARG_MIN,
- BuiltinOperator_FAKE_QUANT,
- BuiltinOperator_REDUCE_PROD,
- BuiltinOperator_REDUCE_MAX,
- BuiltinOperator_PACK,
- BuiltinOperator_LOGICAL_OR,
- BuiltinOperator_ONE_HOT,
- BuiltinOperator_LOGICAL_AND,
- BuiltinOperator_LOGICAL_NOT,
- BuiltinOperator_UNPACK,
- BuiltinOperator_REDUCE_MIN,
- BuiltinOperator_FLOOR_DIV,
- BuiltinOperator_REDUCE_ANY,
- BuiltinOperator_SQUARE,
- BuiltinOperator_ZEROS_LIKE,
- BuiltinOperator_FILL,
- BuiltinOperator_FLOOR_MOD,
- BuiltinOperator_RANGE,
- BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
- BuiltinOperator_LEAKY_RELU,
- BuiltinOperator_SQUARED_DIFFERENCE,
- BuiltinOperator_MIRROR_PAD,
- BuiltinOperator_ABS,
- BuiltinOperator_SPLIT_V,
- BuiltinOperator_INSTANCE_NORM};
- return values;
-}
-
-inline const char *const *EnumNamesBuiltinOperator()
-{
- static const char *const names[] = {"ADD",
- "AVERAGE_POOL_2D",
- "CONCATENATION",
- "CONV_2D",
- "DEPTHWISE_CONV_2D",
- "",
- "DEQUANTIZE",
- "EMBEDDING_LOOKUP",
- "FLOOR",
- "FULLY_CONNECTED",
- "HASHTABLE_LOOKUP",
- "L2_NORMALIZATION",
- "L2_POOL_2D",
- "LOCAL_RESPONSE_NORMALIZATION",
- "LOGISTIC",
- "LSH_PROJECTION",
- "LSTM",
- "MAX_POOL_2D",
- "MUL",
- "RELU",
- "RELU_N1_TO_1",
- "RELU6",
- "RESHAPE",
- "RESIZE_BILINEAR",
- "RNN",
- "SOFTMAX",
- "SPACE_TO_DEPTH",
- "SVDF",
- "TANH",
- "CONCAT_EMBEDDINGS",
- "SKIP_GRAM",
- "CALL",
- "CUSTOM",
- "EMBEDDING_LOOKUP_SPARSE",
- "PAD",
- "UNIDIRECTIONAL_SEQUENCE_RNN",
- "GATHER",
- "BATCH_TO_SPACE_ND",
- "SPACE_TO_BATCH_ND",
- "TRANSPOSE",
- "MEAN",
- "SUB",
- "DIV",
- "SQUEEZE",
- "UNIDIRECTIONAL_SEQUENCE_LSTM",
- "STRIDED_SLICE",
- "BIDIRECTIONAL_SEQUENCE_RNN",
- "EXP",
- "TOPK_V2",
- "SPLIT",
- "LOG_SOFTMAX",
- "DELEGATE",
- "BIDIRECTIONAL_SEQUENCE_LSTM",
- "CAST",
- "PRELU",
- "MAXIMUM",
- "ARG_MAX",
- "MINIMUM",
- "LESS",
- "NEG",
- "PADV2",
- "GREATER",
- "GREATER_EQUAL",
- "LESS_EQUAL",
- "SELECT",
- "SLICE",
- "SIN",
- "TRANSPOSE_CONV",
- "SPARSE_TO_DENSE",
- "TILE",
- "EXPAND_DIMS",
- "EQUAL",
- "NOT_EQUAL",
- "LOG",
- "SUM",
- "SQRT",
- "RSQRT",
- "SHAPE",
- "POW",
- "ARG_MIN",
- "FAKE_QUANT",
- "REDUCE_PROD",
- "REDUCE_MAX",
- "PACK",
- "LOGICAL_OR",
- "ONE_HOT",
- "LOGICAL_AND",
- "LOGICAL_NOT",
- "UNPACK",
- "REDUCE_MIN",
- "FLOOR_DIV",
- "REDUCE_ANY",
- "SQUARE",
- "ZEROS_LIKE",
- "FILL",
- "FLOOR_MOD",
- "RANGE",
- "RESIZE_NEAREST_NEIGHBOR",
- "LEAKY_RELU",
- "SQUARED_DIFFERENCE",
- "MIRROR_PAD",
- "ABS",
- "SPLIT_V",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "",
- "INSTANCE_NORM",
- nullptr};
- return names;
-}
-
-inline const char *EnumNameBuiltinOperator(BuiltinOperator e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesBuiltinOperator()[index];
-}
-
-enum BuiltinOptions
-{
- BuiltinOptions_NONE = 0,
- BuiltinOptions_Conv2DOptions = 1,
- BuiltinOptions_DepthwiseConv2DOptions = 2,
- BuiltinOptions_ConcatEmbeddingsOptions = 3,
- BuiltinOptions_LSHProjectionOptions = 4,
- BuiltinOptions_Pool2DOptions = 5,
- BuiltinOptions_SVDFOptions = 6,
- BuiltinOptions_RNNOptions = 7,
- BuiltinOptions_FullyConnectedOptions = 8,
- BuiltinOptions_SoftmaxOptions = 9,
- BuiltinOptions_ConcatenationOptions = 10,
- BuiltinOptions_AddOptions = 11,
- BuiltinOptions_L2NormOptions = 12,
- BuiltinOptions_LocalResponseNormalizationOptions = 13,
- BuiltinOptions_LSTMOptions = 14,
- BuiltinOptions_ResizeBilinearOptions = 15,
- BuiltinOptions_CallOptions = 16,
- BuiltinOptions_ReshapeOptions = 17,
- BuiltinOptions_SkipGramOptions = 18,
- BuiltinOptions_SpaceToDepthOptions = 19,
- BuiltinOptions_EmbeddingLookupSparseOptions = 20,
- BuiltinOptions_MulOptions = 21,
- BuiltinOptions_PadOptions = 22,
- BuiltinOptions_GatherOptions = 23,
- BuiltinOptions_BatchToSpaceNDOptions = 24,
- BuiltinOptions_SpaceToBatchNDOptions = 25,
- BuiltinOptions_TransposeOptions = 26,
- BuiltinOptions_ReducerOptions = 27,
- BuiltinOptions_SubOptions = 28,
- BuiltinOptions_DivOptions = 29,
- BuiltinOptions_SqueezeOptions = 30,
- BuiltinOptions_SequenceRNNOptions = 31,
- BuiltinOptions_StridedSliceOptions = 32,
- BuiltinOptions_ExpOptions = 33,
- BuiltinOptions_TopKV2Options = 34,
- BuiltinOptions_SplitOptions = 35,
- BuiltinOptions_LogSoftmaxOptions = 36,
- BuiltinOptions_CastOptions = 37,
- BuiltinOptions_DequantizeOptions = 38,
- BuiltinOptions_MaximumMinimumOptions = 39,
- BuiltinOptions_ArgMaxOptions = 40,
- BuiltinOptions_LessOptions = 41,
- BuiltinOptions_NegOptions = 42,
- BuiltinOptions_PadV2Options = 43,
- BuiltinOptions_GreaterOptions = 44,
- BuiltinOptions_GreaterEqualOptions = 45,
- BuiltinOptions_LessEqualOptions = 46,
- BuiltinOptions_SelectOptions = 47,
- BuiltinOptions_SliceOptions = 48,
- BuiltinOptions_TransposeConvOptions = 49,
- BuiltinOptions_SparseToDenseOptions = 50,
- BuiltinOptions_TileOptions = 51,
- BuiltinOptions_ExpandDimsOptions = 52,
- BuiltinOptions_EqualOptions = 53,
- BuiltinOptions_NotEqualOptions = 54,
- BuiltinOptions_ShapeOptions = 55,
- BuiltinOptions_PowOptions = 56,
- BuiltinOptions_ArgMinOptions = 57,
- BuiltinOptions_FakeQuantOptions = 58,
- BuiltinOptions_PackOptions = 59,
- BuiltinOptions_LogicalOrOptions = 60,
- BuiltinOptions_OneHotOptions = 61,
- BuiltinOptions_LogicalAndOptions = 62,
- BuiltinOptions_LogicalNotOptions = 63,
- BuiltinOptions_UnpackOptions = 64,
- BuiltinOptions_FloorDivOptions = 65,
- BuiltinOptions_SquareOptions = 66,
- BuiltinOptions_ZerosLikeOptions = 67,
- BuiltinOptions_FillOptions = 68,
- BuiltinOptions_BidirectionalSequenceLSTMOptions = 69,
- BuiltinOptions_BidirectionalSequenceRNNOptions = 70,
- BuiltinOptions_UnidirectionalSequenceLSTMOptions = 71,
- BuiltinOptions_FloorModOptions = 72,
- BuiltinOptions_RangeOptions = 73,
- BuiltinOptions_ResizeNearestNeighborOptions = 74,
- BuiltinOptions_LeakyReluOptions = 75,
- BuiltinOptions_SquaredDifferenceOptions = 76,
- BuiltinOptions_MirrorPadOptions = 77,
- BuiltinOptions_AbsOptions = 78,
- BuiltinOptions_SplitVOptions = 79,
- BuiltinOptions_InstanceNormOptions = 80,
- BuiltinOptions_MIN = BuiltinOptions_NONE,
- BuiltinOptions_MAX = BuiltinOptions_InstanceNormOptions
-};
-
-inline const BuiltinOptions (&EnumValuesBuiltinOptions())[81]
-{
- static const BuiltinOptions values[] = {BuiltinOptions_NONE,
- BuiltinOptions_Conv2DOptions,
- BuiltinOptions_DepthwiseConv2DOptions,
- BuiltinOptions_ConcatEmbeddingsOptions,
- BuiltinOptions_LSHProjectionOptions,
- BuiltinOptions_Pool2DOptions,
- BuiltinOptions_SVDFOptions,
- BuiltinOptions_RNNOptions,
- BuiltinOptions_FullyConnectedOptions,
- BuiltinOptions_SoftmaxOptions,
- BuiltinOptions_ConcatenationOptions,
- BuiltinOptions_AddOptions,
- BuiltinOptions_L2NormOptions,
- BuiltinOptions_LocalResponseNormalizationOptions,
- BuiltinOptions_LSTMOptions,
- BuiltinOptions_ResizeBilinearOptions,
- BuiltinOptions_CallOptions,
- BuiltinOptions_ReshapeOptions,
- BuiltinOptions_SkipGramOptions,
- BuiltinOptions_SpaceToDepthOptions,
- BuiltinOptions_EmbeddingLookupSparseOptions,
- BuiltinOptions_MulOptions,
- BuiltinOptions_PadOptions,
- BuiltinOptions_GatherOptions,
- BuiltinOptions_BatchToSpaceNDOptions,
- BuiltinOptions_SpaceToBatchNDOptions,
- BuiltinOptions_TransposeOptions,
- BuiltinOptions_ReducerOptions,
- BuiltinOptions_SubOptions,
- BuiltinOptions_DivOptions,
- BuiltinOptions_SqueezeOptions,
- BuiltinOptions_SequenceRNNOptions,
- BuiltinOptions_StridedSliceOptions,
- BuiltinOptions_ExpOptions,
- BuiltinOptions_TopKV2Options,
- BuiltinOptions_SplitOptions,
- BuiltinOptions_LogSoftmaxOptions,
- BuiltinOptions_CastOptions,
- BuiltinOptions_DequantizeOptions,
- BuiltinOptions_MaximumMinimumOptions,
- BuiltinOptions_ArgMaxOptions,
- BuiltinOptions_LessOptions,
- BuiltinOptions_NegOptions,
- BuiltinOptions_PadV2Options,
- BuiltinOptions_GreaterOptions,
- BuiltinOptions_GreaterEqualOptions,
- BuiltinOptions_LessEqualOptions,
- BuiltinOptions_SelectOptions,
- BuiltinOptions_SliceOptions,
- BuiltinOptions_TransposeConvOptions,
- BuiltinOptions_SparseToDenseOptions,
- BuiltinOptions_TileOptions,
- BuiltinOptions_ExpandDimsOptions,
- BuiltinOptions_EqualOptions,
- BuiltinOptions_NotEqualOptions,
- BuiltinOptions_ShapeOptions,
- BuiltinOptions_PowOptions,
- BuiltinOptions_ArgMinOptions,
- BuiltinOptions_FakeQuantOptions,
- BuiltinOptions_PackOptions,
- BuiltinOptions_LogicalOrOptions,
- BuiltinOptions_OneHotOptions,
- BuiltinOptions_LogicalAndOptions,
- BuiltinOptions_LogicalNotOptions,
- BuiltinOptions_UnpackOptions,
- BuiltinOptions_FloorDivOptions,
- BuiltinOptions_SquareOptions,
- BuiltinOptions_ZerosLikeOptions,
- BuiltinOptions_FillOptions,
- BuiltinOptions_BidirectionalSequenceLSTMOptions,
- BuiltinOptions_BidirectionalSequenceRNNOptions,
- BuiltinOptions_UnidirectionalSequenceLSTMOptions,
- BuiltinOptions_FloorModOptions,
- BuiltinOptions_RangeOptions,
- BuiltinOptions_ResizeNearestNeighborOptions,
- BuiltinOptions_LeakyReluOptions,
- BuiltinOptions_SquaredDifferenceOptions,
- BuiltinOptions_MirrorPadOptions,
- BuiltinOptions_AbsOptions,
- BuiltinOptions_SplitVOptions,
- BuiltinOptions_InstanceNormOptions};
- return values;
-}
-
-inline const char *const *EnumNamesBuiltinOptions()
-{
- static const char *const names[] = {"NONE",
- "Conv2DOptions",
- "DepthwiseConv2DOptions",
- "ConcatEmbeddingsOptions",
- "LSHProjectionOptions",
- "Pool2DOptions",
- "SVDFOptions",
- "RNNOptions",
- "FullyConnectedOptions",
- "SoftmaxOptions",
- "ConcatenationOptions",
- "AddOptions",
- "L2NormOptions",
- "LocalResponseNormalizationOptions",
- "LSTMOptions",
- "ResizeBilinearOptions",
- "CallOptions",
- "ReshapeOptions",
- "SkipGramOptions",
- "SpaceToDepthOptions",
- "EmbeddingLookupSparseOptions",
- "MulOptions",
- "PadOptions",
- "GatherOptions",
- "BatchToSpaceNDOptions",
- "SpaceToBatchNDOptions",
- "TransposeOptions",
- "ReducerOptions",
- "SubOptions",
- "DivOptions",
- "SqueezeOptions",
- "SequenceRNNOptions",
- "StridedSliceOptions",
- "ExpOptions",
- "TopKV2Options",
- "SplitOptions",
- "LogSoftmaxOptions",
- "CastOptions",
- "DequantizeOptions",
- "MaximumMinimumOptions",
- "ArgMaxOptions",
- "LessOptions",
- "NegOptions",
- "PadV2Options",
- "GreaterOptions",
- "GreaterEqualOptions",
- "LessEqualOptions",
- "SelectOptions",
- "SliceOptions",
- "TransposeConvOptions",
- "SparseToDenseOptions",
- "TileOptions",
- "ExpandDimsOptions",
- "EqualOptions",
- "NotEqualOptions",
- "ShapeOptions",
- "PowOptions",
- "ArgMinOptions",
- "FakeQuantOptions",
- "PackOptions",
- "LogicalOrOptions",
- "OneHotOptions",
- "LogicalAndOptions",
- "LogicalNotOptions",
- "UnpackOptions",
- "FloorDivOptions",
- "SquareOptions",
- "ZerosLikeOptions",
- "FillOptions",
- "BidirectionalSequenceLSTMOptions",
- "BidirectionalSequenceRNNOptions",
- "UnidirectionalSequenceLSTMOptions",
- "FloorModOptions",
- "RangeOptions",
- "ResizeNearestNeighborOptions",
- "LeakyReluOptions",
- "SquaredDifferenceOptions",
- "MirrorPadOptions",
- "AbsOptions",
- "SplitVOptions",
- "InstanceNormOptions",
- nullptr};
- return names;
-}
-
-inline const char *EnumNameBuiltinOptions(BuiltinOptions e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesBuiltinOptions()[index];
-}
-
-template <typename T> struct BuiltinOptionsTraits
-{
- static const BuiltinOptions enum_value = BuiltinOptions_NONE;
-};
-
-template <> struct BuiltinOptionsTraits<Conv2DOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_Conv2DOptions;
-};
-
-template <> struct BuiltinOptionsTraits<DepthwiseConv2DOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_DepthwiseConv2DOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ConcatEmbeddingsOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ConcatEmbeddingsOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LSHProjectionOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LSHProjectionOptions;
-};
-
-template <> struct BuiltinOptionsTraits<Pool2DOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_Pool2DOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SVDFOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SVDFOptions;
-};
-
-template <> struct BuiltinOptionsTraits<RNNOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_RNNOptions;
-};
-
-template <> struct BuiltinOptionsTraits<FullyConnectedOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_FullyConnectedOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SoftmaxOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SoftmaxOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ConcatenationOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ConcatenationOptions;
-};
-
-template <> struct BuiltinOptionsTraits<AddOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_AddOptions;
-};
-
-template <> struct BuiltinOptionsTraits<L2NormOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_L2NormOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LocalResponseNormalizationOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LocalResponseNormalizationOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LSTMOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LSTMOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ResizeBilinearOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ResizeBilinearOptions;
-};
-
-template <> struct BuiltinOptionsTraits<CallOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_CallOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ReshapeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ReshapeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SkipGramOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SkipGramOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SpaceToDepthOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SpaceToDepthOptions;
-};
-
-template <> struct BuiltinOptionsTraits<EmbeddingLookupSparseOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_EmbeddingLookupSparseOptions;
-};
-
-template <> struct BuiltinOptionsTraits<MulOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_MulOptions;
-};
-
-template <> struct BuiltinOptionsTraits<PadOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_PadOptions;
-};
-
-template <> struct BuiltinOptionsTraits<GatherOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_GatherOptions;
-};
-
-template <> struct BuiltinOptionsTraits<BatchToSpaceNDOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_BatchToSpaceNDOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SpaceToBatchNDOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SpaceToBatchNDOptions;
-};
-
-template <> struct BuiltinOptionsTraits<TransposeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_TransposeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ReducerOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ReducerOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SubOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SubOptions;
-};
-
-template <> struct BuiltinOptionsTraits<DivOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_DivOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SqueezeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SqueezeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SequenceRNNOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SequenceRNNOptions;
-};
-
-template <> struct BuiltinOptionsTraits<StridedSliceOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_StridedSliceOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ExpOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ExpOptions;
-};
-
-template <> struct BuiltinOptionsTraits<TopKV2Options>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_TopKV2Options;
-};
-
-template <> struct BuiltinOptionsTraits<SplitOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SplitOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LogSoftmaxOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LogSoftmaxOptions;
-};
-
-template <> struct BuiltinOptionsTraits<CastOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_CastOptions;
-};
-
-template <> struct BuiltinOptionsTraits<DequantizeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_DequantizeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<MaximumMinimumOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_MaximumMinimumOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ArgMaxOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ArgMaxOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LessOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LessOptions;
-};
-
-template <> struct BuiltinOptionsTraits<NegOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_NegOptions;
-};
-
-template <> struct BuiltinOptionsTraits<PadV2Options>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_PadV2Options;
-};
-
-template <> struct BuiltinOptionsTraits<GreaterOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_GreaterOptions;
-};
-
-template <> struct BuiltinOptionsTraits<GreaterEqualOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_GreaterEqualOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LessEqualOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LessEqualOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SelectOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SelectOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SliceOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SliceOptions;
-};
-
-template <> struct BuiltinOptionsTraits<TransposeConvOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_TransposeConvOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SparseToDenseOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SparseToDenseOptions;
-};
-
-template <> struct BuiltinOptionsTraits<TileOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_TileOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ExpandDimsOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ExpandDimsOptions;
-};
-
-template <> struct BuiltinOptionsTraits<EqualOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_EqualOptions;
-};
-
-template <> struct BuiltinOptionsTraits<NotEqualOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_NotEqualOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ShapeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ShapeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<PowOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_PowOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ArgMinOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ArgMinOptions;
-};
-
-template <> struct BuiltinOptionsTraits<FakeQuantOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_FakeQuantOptions;
-};
-
-template <> struct BuiltinOptionsTraits<PackOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_PackOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LogicalOrOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LogicalOrOptions;
-};
-
-template <> struct BuiltinOptionsTraits<OneHotOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_OneHotOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LogicalAndOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LogicalAndOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LogicalNotOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LogicalNotOptions;
-};
-
-template <> struct BuiltinOptionsTraits<UnpackOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_UnpackOptions;
-};
-
-template <> struct BuiltinOptionsTraits<FloorDivOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_FloorDivOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SquareOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SquareOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ZerosLikeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ZerosLikeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<FillOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_FillOptions;
-};
-
-template <> struct BuiltinOptionsTraits<BidirectionalSequenceLSTMOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceLSTMOptions;
-};
-
-template <> struct BuiltinOptionsTraits<BidirectionalSequenceRNNOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceRNNOptions;
-};
-
-template <> struct BuiltinOptionsTraits<UnidirectionalSequenceLSTMOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_UnidirectionalSequenceLSTMOptions;
-};
-
-template <> struct BuiltinOptionsTraits<FloorModOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_FloorModOptions;
-};
-
-template <> struct BuiltinOptionsTraits<RangeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_RangeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ResizeNearestNeighborOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ResizeNearestNeighborOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LeakyReluOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LeakyReluOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SquaredDifferenceOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SquaredDifferenceOptions;
-};
-
-template <> struct BuiltinOptionsTraits<MirrorPadOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_MirrorPadOptions;
-};
-
-template <> struct BuiltinOptionsTraits<AbsOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_AbsOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SplitVOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SplitVOptions;
-};
-
-template <> struct BuiltinOptionsTraits<InstanceNormOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_InstanceNormOptions;
-};
-
-bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type);
-bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier,
- const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
- const flatbuffers::Vector<uint8_t> *types);
-
-enum Padding
-{
- Padding_SAME = 0,
- Padding_VALID = 1,
- Padding_MIN = Padding_SAME,
- Padding_MAX = Padding_VALID
-};
-
-inline const Padding (&EnumValuesPadding())[2]
-{
- static const Padding values[] = {Padding_SAME, Padding_VALID};
- return values;
-}
-
-inline const char *const *EnumNamesPadding()
-{
- static const char *const names[] = {"SAME", "VALID", nullptr};
- return names;
-}
-
-inline const char *EnumNamePadding(Padding e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesPadding()[index];
-}
-
-enum ActivationFunctionType
-{
- ActivationFunctionType_NONE = 0,
- ActivationFunctionType_RELU = 1,
- ActivationFunctionType_RELU_N1_TO_1 = 2,
- ActivationFunctionType_RELU6 = 3,
- ActivationFunctionType_TANH = 4,
- ActivationFunctionType_SIGN_BIT = 5,
- ActivationFunctionType_MIN = ActivationFunctionType_NONE,
- ActivationFunctionType_MAX = ActivationFunctionType_SIGN_BIT
-};
-
-inline const ActivationFunctionType (&EnumValuesActivationFunctionType())[6]
-{
- static const ActivationFunctionType values[] = {
- ActivationFunctionType_NONE, ActivationFunctionType_RELU,
- ActivationFunctionType_RELU_N1_TO_1, ActivationFunctionType_RELU6,
- ActivationFunctionType_TANH, ActivationFunctionType_SIGN_BIT};
- return values;
-}
-
-inline const char *const *EnumNamesActivationFunctionType()
-{
- static const char *const names[] = {"NONE", "RELU", "RELU_N1_TO_1", "RELU6",
- "TANH", "SIGN_BIT", nullptr};
- return names;
-}
-
-inline const char *EnumNameActivationFunctionType(ActivationFunctionType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesActivationFunctionType()[index];
-}
-
-enum LSHProjectionType
-{
- LSHProjectionType_UNKNOWN = 0,
- LSHProjectionType_SPARSE = 1,
- LSHProjectionType_DENSE = 2,
- LSHProjectionType_MIN = LSHProjectionType_UNKNOWN,
- LSHProjectionType_MAX = LSHProjectionType_DENSE
-};
-
-inline const LSHProjectionType (&EnumValuesLSHProjectionType())[3]
-{
- static const LSHProjectionType values[] = {LSHProjectionType_UNKNOWN, LSHProjectionType_SPARSE,
- LSHProjectionType_DENSE};
- return values;
-}
-
-inline const char *const *EnumNamesLSHProjectionType()
-{
- static const char *const names[] = {"UNKNOWN", "SPARSE", "DENSE", nullptr};
- return names;
-}
-
-inline const char *EnumNameLSHProjectionType(LSHProjectionType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesLSHProjectionType()[index];
-}
-
-enum FullyConnectedOptionsWeightsFormat
-{
- FullyConnectedOptionsWeightsFormat_DEFAULT = 0,
- FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 = 1,
- FullyConnectedOptionsWeightsFormat_MIN = FullyConnectedOptionsWeightsFormat_DEFAULT,
- FullyConnectedOptionsWeightsFormat_MAX = FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8
-};
-
-inline const FullyConnectedOptionsWeightsFormat (&EnumValuesFullyConnectedOptionsWeightsFormat())[2]
-{
- static const FullyConnectedOptionsWeightsFormat values[] = {
- FullyConnectedOptionsWeightsFormat_DEFAULT,
- FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8};
- return values;
-}
-
-inline const char *const *EnumNamesFullyConnectedOptionsWeightsFormat()
-{
- static const char *const names[] = {"DEFAULT", "SHUFFLED4x16INT8", nullptr};
- return names;
-}
-
-inline const char *EnumNameFullyConnectedOptionsWeightsFormat(FullyConnectedOptionsWeightsFormat e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesFullyConnectedOptionsWeightsFormat()[index];
-}
-
-enum LSTMKernelType
-{
- LSTMKernelType_FULL = 0,
- LSTMKernelType_BASIC = 1,
- LSTMKernelType_MIN = LSTMKernelType_FULL,
- LSTMKernelType_MAX = LSTMKernelType_BASIC
-};
-
-inline const LSTMKernelType (&EnumValuesLSTMKernelType())[2]
-{
- static const LSTMKernelType values[] = {LSTMKernelType_FULL, LSTMKernelType_BASIC};
- return values;
-}
-
-inline const char *const *EnumNamesLSTMKernelType()
-{
- static const char *const names[] = {"FULL", "BASIC", nullptr};
- return names;
-}
-
-inline const char *EnumNameLSTMKernelType(LSTMKernelType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesLSTMKernelType()[index];
-}
-
-enum CombinerType
-{
- CombinerType_SUM = 0,
- CombinerType_MEAN = 1,
- CombinerType_SQRTN = 2,
- CombinerType_MIN = CombinerType_SUM,
- CombinerType_MAX = CombinerType_SQRTN
-};
-
-inline const CombinerType (&EnumValuesCombinerType())[3]
-{
- static const CombinerType values[] = {CombinerType_SUM, CombinerType_MEAN, CombinerType_SQRTN};
- return values;
-}
-
-inline const char *const *EnumNamesCombinerType()
-{
- static const char *const names[] = {"SUM", "MEAN", "SQRTN", nullptr};
- return names;
-}
-
-inline const char *EnumNameCombinerType(CombinerType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesCombinerType()[index];
-}
-
-enum MirrorPadMode
-{
- MirrorPadMode_REFLECT = 0,
- MirrorPadMode_SYMMETRIC = 1,
- MirrorPadMode_MIN = MirrorPadMode_REFLECT,
- MirrorPadMode_MAX = MirrorPadMode_SYMMETRIC
-};
-
-inline const MirrorPadMode (&EnumValuesMirrorPadMode())[2]
-{
- static const MirrorPadMode values[] = {MirrorPadMode_REFLECT, MirrorPadMode_SYMMETRIC};
- return values;
-}
-
-inline const char *const *EnumNamesMirrorPadMode()
-{
- static const char *const names[] = {"REFLECT", "SYMMETRIC", nullptr};
- return names;
-}
-
-inline const char *EnumNameMirrorPadMode(MirrorPadMode e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesMirrorPadMode()[index];
-}
-
-enum CustomOptionsFormat
-{
- CustomOptionsFormat_FLEXBUFFERS = 0,
- CustomOptionsFormat_MIN = CustomOptionsFormat_FLEXBUFFERS,
- CustomOptionsFormat_MAX = CustomOptionsFormat_FLEXBUFFERS
-};
-
-inline const CustomOptionsFormat (&EnumValuesCustomOptionsFormat())[1]
-{
- static const CustomOptionsFormat values[] = {CustomOptionsFormat_FLEXBUFFERS};
- return values;
-}
-
-inline const char *const *EnumNamesCustomOptionsFormat()
-{
- static const char *const names[] = {"FLEXBUFFERS", nullptr};
- return names;
-}
-
-inline const char *EnumNameCustomOptionsFormat(CustomOptionsFormat e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesCustomOptionsFormat()[index];
-}
-
-enum DataFormat
-{
- DataFormat_CHANNELS_LAST = 0,
- DataFormat_CHANNELS_FIRST = 1,
- DataFormat_MIN = DataFormat_CHANNELS_LAST,
- DataFormat_MAX = DataFormat_CHANNELS_FIRST
-};
-
-inline const DataFormat (&EnumValuesDataFormat())[2]
-{
- static const DataFormat values[] = {DataFormat_CHANNELS_LAST, DataFormat_CHANNELS_FIRST};
- return values;
-}
-
-inline const char *const *EnumNamesDataFormat()
-{
- static const char *const names[] = {"CHANNELS_LAST", "CHANNELS_FIRST", nullptr};
- return names;
-}
-
-inline const char *EnumNameDataFormat(DataFormat e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesDataFormat()[index];
-}
-
-struct CustomQuantization FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_CUSTOM = 4
- };
- const flatbuffers::Vector<uint8_t> *custom() const
- {
- return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_CUSTOM) &&
- verifier.VerifyVector(custom()) && verifier.EndTable();
- }
-};
-
-struct CustomQuantizationBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_custom(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom)
- {
- fbb_.AddOffset(CustomQuantization::VT_CUSTOM, custom);
- }
- explicit CustomQuantizationBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- CustomQuantizationBuilder &operator=(const CustomQuantizationBuilder &);
- flatbuffers::Offset<CustomQuantization> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<CustomQuantization>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<CustomQuantization>
-CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom = 0)
-{
- CustomQuantizationBuilder builder_(_fbb);
- builder_.add_custom(custom);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<CustomQuantization>
-CreateCustomQuantizationDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<uint8_t> *custom = nullptr)
-{
- return circle::CreateCustomQuantization(_fbb, custom ? _fbb.CreateVector<uint8_t>(*custom) : 0);
-}
-
-struct QuantizationParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_MIN = 4,
- VT_MAX = 6,
- VT_SCALE = 8,
- VT_ZERO_POINT = 10,
- VT_DETAILS_TYPE = 12,
- VT_DETAILS = 14
- };
- const flatbuffers::Vector<float> *min() const
- {
- return GetPointer<const flatbuffers::Vector<float> *>(VT_MIN);
- }
- const flatbuffers::Vector<float> *max() const
- {
- return GetPointer<const flatbuffers::Vector<float> *>(VT_MAX);
- }
- const flatbuffers::Vector<float> *scale() const
- {
- return GetPointer<const flatbuffers::Vector<float> *>(VT_SCALE);
- }
- const flatbuffers::Vector<int64_t> *zero_point() const
- {
- return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_ZERO_POINT);
- }
- QuantizationDetails details_type() const
- {
- return static_cast<QuantizationDetails>(GetField<uint8_t>(VT_DETAILS_TYPE, 0));
- }
- const void *details() const { return GetPointer<const void *>(VT_DETAILS); }
- template <typename T> const T *details_as() const;
- const CustomQuantization *details_as_CustomQuantization() const
- {
- return details_type() == QuantizationDetails_CustomQuantization
- ? static_cast<const CustomQuantization *>(details())
- : nullptr;
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_MIN) &&
- verifier.VerifyVector(min()) && VerifyOffset(verifier, VT_MAX) &&
- verifier.VerifyVector(max()) && VerifyOffset(verifier, VT_SCALE) &&
- verifier.VerifyVector(scale()) && VerifyOffset(verifier, VT_ZERO_POINT) &&
- verifier.VerifyVector(zero_point()) && VerifyField<uint8_t>(verifier, VT_DETAILS_TYPE) &&
- VerifyOffset(verifier, VT_DETAILS) &&
- VerifyQuantizationDetails(verifier, details(), details_type()) && verifier.EndTable();
- }
-};
-
-template <>
-inline const CustomQuantization *QuantizationParameters::details_as<CustomQuantization>() const
-{
- return details_as_CustomQuantization();
-}
-
-struct QuantizationParametersBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_min(flatbuffers::Offset<flatbuffers::Vector<float>> min)
- {
- fbb_.AddOffset(QuantizationParameters::VT_MIN, min);
- }
- void add_max(flatbuffers::Offset<flatbuffers::Vector<float>> max)
- {
- fbb_.AddOffset(QuantizationParameters::VT_MAX, max);
- }
- void add_scale(flatbuffers::Offset<flatbuffers::Vector<float>> scale)
- {
- fbb_.AddOffset(QuantizationParameters::VT_SCALE, scale);
- }
- void add_zero_point(flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point)
- {
- fbb_.AddOffset(QuantizationParameters::VT_ZERO_POINT, zero_point);
- }
- void add_details_type(QuantizationDetails details_type)
- {
- fbb_.AddElement<uint8_t>(QuantizationParameters::VT_DETAILS_TYPE,
- static_cast<uint8_t>(details_type), 0);
- }
- void add_details(flatbuffers::Offset<void> details)
- {
- fbb_.AddOffset(QuantizationParameters::VT_DETAILS, details);
- }
- explicit QuantizationParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- QuantizationParametersBuilder &operator=(const QuantizationParametersBuilder &);
- flatbuffers::Offset<QuantizationParameters> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<QuantizationParameters>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<QuantizationParameters>
-CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<float>> min = 0,
- flatbuffers::Offset<flatbuffers::Vector<float>> max = 0,
- flatbuffers::Offset<flatbuffers::Vector<float>> scale = 0,
- flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point = 0,
- QuantizationDetails details_type = QuantizationDetails_NONE,
- flatbuffers::Offset<void> details = 0)
-{
- QuantizationParametersBuilder builder_(_fbb);
- builder_.add_details(details);
- builder_.add_zero_point(zero_point);
- builder_.add_scale(scale);
- builder_.add_max(max);
- builder_.add_min(min);
- builder_.add_details_type(details_type);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<QuantizationParameters> CreateQuantizationParametersDirect(
- flatbuffers::FlatBufferBuilder &_fbb, const std::vector<float> *min = nullptr,
- const std::vector<float> *max = nullptr, const std::vector<float> *scale = nullptr,
- const std::vector<int64_t> *zero_point = nullptr,
- QuantizationDetails details_type = QuantizationDetails_NONE,
- flatbuffers::Offset<void> details = 0)
-{
- return circle::CreateQuantizationParameters(
- _fbb, min ? _fbb.CreateVector<float>(*min) : 0, max ? _fbb.CreateVector<float>(*max) : 0,
- scale ? _fbb.CreateVector<float>(*scale) : 0,
- zero_point ? _fbb.CreateVector<int64_t>(*zero_point) : 0, details_type, details);
-}
-
-struct Tensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_SHAPE = 4,
- VT_TYPE = 6,
- VT_BUFFER = 8,
- VT_NAME = 10,
- VT_QUANTIZATION = 12,
- VT_IS_VARIABLE = 14
- };
- const flatbuffers::Vector<int32_t> *shape() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SHAPE);
- }
- TensorType type() const { return static_cast<TensorType>(GetField<int8_t>(VT_TYPE, 0)); }
- uint32_t buffer() const { return GetField<uint32_t>(VT_BUFFER, 0); }
- const flatbuffers::String *name() const
- {
- return GetPointer<const flatbuffers::String *>(VT_NAME);
- }
- const QuantizationParameters *quantization() const
- {
- return GetPointer<const QuantizationParameters *>(VT_QUANTIZATION);
- }
- bool is_variable() const { return GetField<uint8_t>(VT_IS_VARIABLE, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_SHAPE) &&
- verifier.VerifyVector(shape()) && VerifyField<int8_t>(verifier, VT_TYPE) &&
- VerifyField<uint32_t>(verifier, VT_BUFFER) && VerifyOffset(verifier, VT_NAME) &&
- verifier.VerifyString(name()) && VerifyOffset(verifier, VT_QUANTIZATION) &&
- verifier.VerifyTable(quantization()) && VerifyField<uint8_t>(verifier, VT_IS_VARIABLE) &&
- verifier.EndTable();
- }
-};
-
-struct TensorBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape)
- {
- fbb_.AddOffset(Tensor::VT_SHAPE, shape);
- }
- void add_type(TensorType type)
- {
- fbb_.AddElement<int8_t>(Tensor::VT_TYPE, static_cast<int8_t>(type), 0);
- }
- void add_buffer(uint32_t buffer) { fbb_.AddElement<uint32_t>(Tensor::VT_BUFFER, buffer, 0); }
- void add_name(flatbuffers::Offset<flatbuffers::String> name)
- {
- fbb_.AddOffset(Tensor::VT_NAME, name);
- }
- void add_quantization(flatbuffers::Offset<QuantizationParameters> quantization)
- {
- fbb_.AddOffset(Tensor::VT_QUANTIZATION, quantization);
- }
- void add_is_variable(bool is_variable)
- {
- fbb_.AddElement<uint8_t>(Tensor::VT_IS_VARIABLE, static_cast<uint8_t>(is_variable), 0);
- }
- explicit TensorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- TensorBuilder &operator=(const TensorBuilder &);
- flatbuffers::Offset<Tensor> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Tensor>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Tensor>
-CreateTensor(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape = 0,
- TensorType type = TensorType_FLOAT32, uint32_t buffer = 0,
- flatbuffers::Offset<flatbuffers::String> name = 0,
- flatbuffers::Offset<QuantizationParameters> quantization = 0, bool is_variable = false)
-{
- TensorBuilder builder_(_fbb);
- builder_.add_quantization(quantization);
- builder_.add_name(name);
- builder_.add_buffer(buffer);
- builder_.add_shape(shape);
- builder_.add_is_variable(is_variable);
- builder_.add_type(type);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Tensor> CreateTensorDirect(
- flatbuffers::FlatBufferBuilder &_fbb, const std::vector<int32_t> *shape = nullptr,
- TensorType type = TensorType_FLOAT32, uint32_t buffer = 0, const char *name = nullptr,
- flatbuffers::Offset<QuantizationParameters> quantization = 0, bool is_variable = false)
-{
- return circle::CreateTensor(_fbb, shape ? _fbb.CreateVector<int32_t>(*shape) : 0, type, buffer,
- name ? _fbb.CreateString(name) : 0, quantization, is_variable);
-}
-
-struct Conv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_PADDING = 4,
- VT_STRIDE_W = 6,
- VT_STRIDE_H = 8,
- VT_FUSED_ACTIVATION_FUNCTION = 10,
- VT_DILATION_W_FACTOR = 12,
- VT_DILATION_H_FACTOR = 14
- };
- Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); }
- int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); }
- int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- int32_t dilation_w_factor() const { return GetField<int32_t>(VT_DILATION_W_FACTOR, 1); }
- int32_t dilation_h_factor() const { return GetField<int32_t>(VT_DILATION_H_FACTOR, 1); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_H) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<int32_t>(verifier, VT_DILATION_W_FACTOR) &&
- VerifyField<int32_t>(verifier, VT_DILATION_H_FACTOR) && verifier.EndTable();
- }
-};
-
-struct Conv2DOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_padding(Padding padding)
- {
- fbb_.AddElement<int8_t>(Conv2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
- }
- void add_stride_w(int32_t stride_w)
- {
- fbb_.AddElement<int32_t>(Conv2DOptions::VT_STRIDE_W, stride_w, 0);
- }
- void add_stride_h(int32_t stride_h)
- {
- fbb_.AddElement<int32_t>(Conv2DOptions::VT_STRIDE_H, stride_h, 0);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(Conv2DOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_dilation_w_factor(int32_t dilation_w_factor)
- {
- fbb_.AddElement<int32_t>(Conv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1);
- }
- void add_dilation_h_factor(int32_t dilation_h_factor)
- {
- fbb_.AddElement<int32_t>(Conv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1);
- }
- explicit Conv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- Conv2DOptionsBuilder &operator=(const Conv2DOptionsBuilder &);
- flatbuffers::Offset<Conv2DOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Conv2DOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Conv2DOptions>
-CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME,
- int32_t stride_w = 0, int32_t stride_h = 0,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- int32_t dilation_w_factor = 1, int32_t dilation_h_factor = 1)
-{
- Conv2DOptionsBuilder builder_(_fbb);
- builder_.add_dilation_h_factor(dilation_h_factor);
- builder_.add_dilation_w_factor(dilation_w_factor);
- builder_.add_stride_h(stride_h);
- builder_.add_stride_w(stride_w);
- builder_.add_fused_activation_function(fused_activation_function);
- builder_.add_padding(padding);
- return builder_.Finish();
-}
-
-struct Pool2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_PADDING = 4,
- VT_STRIDE_W = 6,
- VT_STRIDE_H = 8,
- VT_FILTER_WIDTH = 10,
- VT_FILTER_HEIGHT = 12,
- VT_FUSED_ACTIVATION_FUNCTION = 14
- };
- Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); }
- int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); }
- int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); }
- int32_t filter_width() const { return GetField<int32_t>(VT_FILTER_WIDTH, 0); }
- int32_t filter_height() const { return GetField<int32_t>(VT_FILTER_HEIGHT, 0); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_H) &&
- VerifyField<int32_t>(verifier, VT_FILTER_WIDTH) &&
- VerifyField<int32_t>(verifier, VT_FILTER_HEIGHT) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct Pool2DOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_padding(Padding padding)
- {
- fbb_.AddElement<int8_t>(Pool2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
- }
- void add_stride_w(int32_t stride_w)
- {
- fbb_.AddElement<int32_t>(Pool2DOptions::VT_STRIDE_W, stride_w, 0);
- }
- void add_stride_h(int32_t stride_h)
- {
- fbb_.AddElement<int32_t>(Pool2DOptions::VT_STRIDE_H, stride_h, 0);
- }
- void add_filter_width(int32_t filter_width)
- {
- fbb_.AddElement<int32_t>(Pool2DOptions::VT_FILTER_WIDTH, filter_width, 0);
- }
- void add_filter_height(int32_t filter_height)
- {
- fbb_.AddElement<int32_t>(Pool2DOptions::VT_FILTER_HEIGHT, filter_height, 0);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(Pool2DOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit Pool2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- Pool2DOptionsBuilder &operator=(const Pool2DOptionsBuilder &);
- flatbuffers::Offset<Pool2DOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Pool2DOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Pool2DOptions>
-CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME,
- int32_t stride_w = 0, int32_t stride_h = 0, int32_t filter_width = 0,
- int32_t filter_height = 0,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- Pool2DOptionsBuilder builder_(_fbb);
- builder_.add_filter_height(filter_height);
- builder_.add_filter_width(filter_width);
- builder_.add_stride_h(stride_h);
- builder_.add_stride_w(stride_w);
- builder_.add_fused_activation_function(fused_activation_function);
- builder_.add_padding(padding);
- return builder_.Finish();
-}
-
-struct DepthwiseConv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_PADDING = 4,
- VT_STRIDE_W = 6,
- VT_STRIDE_H = 8,
- VT_DEPTH_MULTIPLIER = 10,
- VT_FUSED_ACTIVATION_FUNCTION = 12,
- VT_DILATION_W_FACTOR = 14,
- VT_DILATION_H_FACTOR = 16
- };
- Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); }
- int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); }
- int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); }
- int32_t depth_multiplier() const { return GetField<int32_t>(VT_DEPTH_MULTIPLIER, 0); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- int32_t dilation_w_factor() const { return GetField<int32_t>(VT_DILATION_W_FACTOR, 1); }
- int32_t dilation_h_factor() const { return GetField<int32_t>(VT_DILATION_H_FACTOR, 1); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_H) &&
- VerifyField<int32_t>(verifier, VT_DEPTH_MULTIPLIER) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<int32_t>(verifier, VT_DILATION_W_FACTOR) &&
- VerifyField<int32_t>(verifier, VT_DILATION_H_FACTOR) && verifier.EndTable();
- }
-};
-
-struct DepthwiseConv2DOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_padding(Padding padding)
- {
- fbb_.AddElement<int8_t>(DepthwiseConv2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
- }
- void add_stride_w(int32_t stride_w)
- {
- fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_STRIDE_W, stride_w, 0);
- }
- void add_stride_h(int32_t stride_h)
- {
- fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_STRIDE_H, stride_h, 0);
- }
- void add_depth_multiplier(int32_t depth_multiplier)
- {
- fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DEPTH_MULTIPLIER, depth_multiplier, 0);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(DepthwiseConv2DOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_dilation_w_factor(int32_t dilation_w_factor)
- {
- fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1);
- }
- void add_dilation_h_factor(int32_t dilation_h_factor)
- {
- fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1);
- }
- explicit DepthwiseConv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- DepthwiseConv2DOptionsBuilder &operator=(const DepthwiseConv2DOptionsBuilder &);
- flatbuffers::Offset<DepthwiseConv2DOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<DepthwiseConv2DOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<DepthwiseConv2DOptions> CreateDepthwiseConv2DOptions(
- flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME, int32_t stride_w = 0,
- int32_t stride_h = 0, int32_t depth_multiplier = 0,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- int32_t dilation_w_factor = 1, int32_t dilation_h_factor = 1)
-{
- DepthwiseConv2DOptionsBuilder builder_(_fbb);
- builder_.add_dilation_h_factor(dilation_h_factor);
- builder_.add_dilation_w_factor(dilation_w_factor);
- builder_.add_depth_multiplier(depth_multiplier);
- builder_.add_stride_h(stride_h);
- builder_.add_stride_w(stride_w);
- builder_.add_fused_activation_function(fused_activation_function);
- builder_.add_padding(padding);
- return builder_.Finish();
-}
-
-struct ConcatEmbeddingsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NUM_CHANNELS = 4,
- VT_NUM_COLUMNS_PER_CHANNEL = 6,
- VT_EMBEDDING_DIM_PER_CHANNEL = 8
- };
- int32_t num_channels() const { return GetField<int32_t>(VT_NUM_CHANNELS, 0); }
- const flatbuffers::Vector<int32_t> *num_columns_per_channel() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_NUM_COLUMNS_PER_CHANNEL);
- }
- const flatbuffers::Vector<int32_t> *embedding_dim_per_channel() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_EMBEDDING_DIM_PER_CHANNEL);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM_CHANNELS) &&
- VerifyOffset(verifier, VT_NUM_COLUMNS_PER_CHANNEL) &&
- verifier.VerifyVector(num_columns_per_channel()) &&
- VerifyOffset(verifier, VT_EMBEDDING_DIM_PER_CHANNEL) &&
- verifier.VerifyVector(embedding_dim_per_channel()) && verifier.EndTable();
- }
-};
-
-struct ConcatEmbeddingsOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_num_channels(int32_t num_channels)
- {
- fbb_.AddElement<int32_t>(ConcatEmbeddingsOptions::VT_NUM_CHANNELS, num_channels, 0);
- }
- void add_num_columns_per_channel(
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> num_columns_per_channel)
- {
- fbb_.AddOffset(ConcatEmbeddingsOptions::VT_NUM_COLUMNS_PER_CHANNEL, num_columns_per_channel);
- }
- void add_embedding_dim_per_channel(
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> embedding_dim_per_channel)
- {
- fbb_.AddOffset(ConcatEmbeddingsOptions::VT_EMBEDDING_DIM_PER_CHANNEL,
- embedding_dim_per_channel);
- }
- explicit ConcatEmbeddingsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ConcatEmbeddingsOptionsBuilder &operator=(const ConcatEmbeddingsOptionsBuilder &);
- flatbuffers::Offset<ConcatEmbeddingsOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ConcatEmbeddingsOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ConcatEmbeddingsOptions> CreateConcatEmbeddingsOptions(
- flatbuffers::FlatBufferBuilder &_fbb, int32_t num_channels = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> num_columns_per_channel = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> embedding_dim_per_channel = 0)
-{
- ConcatEmbeddingsOptionsBuilder builder_(_fbb);
- builder_.add_embedding_dim_per_channel(embedding_dim_per_channel);
- builder_.add_num_columns_per_channel(num_columns_per_channel);
- builder_.add_num_channels(num_channels);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<ConcatEmbeddingsOptions>
-CreateConcatEmbeddingsOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb, int32_t num_channels = 0,
- const std::vector<int32_t> *num_columns_per_channel = nullptr,
- const std::vector<int32_t> *embedding_dim_per_channel = nullptr)
-{
- return circle::CreateConcatEmbeddingsOptions(
- _fbb, num_channels,
- num_columns_per_channel ? _fbb.CreateVector<int32_t>(*num_columns_per_channel) : 0,
- embedding_dim_per_channel ? _fbb.CreateVector<int32_t>(*embedding_dim_per_channel) : 0);
-}
-
-struct LSHProjectionOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_TYPE = 4
- };
- LSHProjectionType type() const
- {
- return static_cast<LSHProjectionType>(GetField<int8_t>(VT_TYPE, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_TYPE) &&
- verifier.EndTable();
- }
-};
-
-struct LSHProjectionOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_type(LSHProjectionType type)
- {
- fbb_.AddElement<int8_t>(LSHProjectionOptions::VT_TYPE, static_cast<int8_t>(type), 0);
- }
- explicit LSHProjectionOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LSHProjectionOptionsBuilder &operator=(const LSHProjectionOptionsBuilder &);
- flatbuffers::Offset<LSHProjectionOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LSHProjectionOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LSHProjectionOptions>
-CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb,
- LSHProjectionType type = LSHProjectionType_UNKNOWN)
-{
- LSHProjectionOptionsBuilder builder_(_fbb);
- builder_.add_type(type);
- return builder_.Finish();
-}
-
-struct SVDFOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_RANK = 4,
- VT_FUSED_ACTIVATION_FUNCTION = 6
- };
- int32_t rank() const { return GetField<int32_t>(VT_RANK, 0); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_RANK) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct SVDFOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_rank(int32_t rank) { fbb_.AddElement<int32_t>(SVDFOptions::VT_RANK, rank, 0); }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(SVDFOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit SVDFOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SVDFOptionsBuilder &operator=(const SVDFOptionsBuilder &);
- flatbuffers::Offset<SVDFOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SVDFOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SVDFOptions>
-CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t rank = 0,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- SVDFOptionsBuilder builder_(_fbb);
- builder_.add_rank(rank);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct RNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct RNNOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(RNNOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit RNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- RNNOptionsBuilder &operator=(const RNNOptionsBuilder &);
- flatbuffers::Offset<RNNOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<RNNOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<RNNOptions>
-CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- RNNOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct SequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_TIME_MAJOR = 4,
- VT_FUSED_ACTIVATION_FUNCTION = 6
- };
- bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 0) != 0; }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct SequenceRNNOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_time_major(bool time_major)
- {
- fbb_.AddElement<uint8_t>(SequenceRNNOptions::VT_TIME_MAJOR, static_cast<uint8_t>(time_major),
- 0);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(SequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit SequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SequenceRNNOptionsBuilder &operator=(const SequenceRNNOptionsBuilder &);
- flatbuffers::Offset<SequenceRNNOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SequenceRNNOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SequenceRNNOptions> CreateSequenceRNNOptions(
- flatbuffers::FlatBufferBuilder &_fbb, bool time_major = false,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- SequenceRNNOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- builder_.add_time_major(time_major);
- return builder_.Finish();
-}
-
-struct BidirectionalSequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_TIME_MAJOR = 4,
- VT_FUSED_ACTIVATION_FUNCTION = 6,
- VT_MERGE_OUTPUTS = 8
- };
- bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 0) != 0; }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool merge_outputs() const { return GetField<uint8_t>(VT_MERGE_OUTPUTS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<uint8_t>(verifier, VT_MERGE_OUTPUTS) && verifier.EndTable();
- }
-};
-
-struct BidirectionalSequenceRNNOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_time_major(bool time_major)
- {
- fbb_.AddElement<uint8_t>(BidirectionalSequenceRNNOptions::VT_TIME_MAJOR,
- static_cast<uint8_t>(time_major), 0);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(BidirectionalSequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_merge_outputs(bool merge_outputs)
- {
- fbb_.AddElement<uint8_t>(BidirectionalSequenceRNNOptions::VT_MERGE_OUTPUTS,
- static_cast<uint8_t>(merge_outputs), 0);
- }
- explicit BidirectionalSequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- BidirectionalSequenceRNNOptionsBuilder &operator=(const BidirectionalSequenceRNNOptionsBuilder &);
- flatbuffers::Offset<BidirectionalSequenceRNNOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<BidirectionalSequenceRNNOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<BidirectionalSequenceRNNOptions> CreateBidirectionalSequenceRNNOptions(
- flatbuffers::FlatBufferBuilder &_fbb, bool time_major = false,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- bool merge_outputs = false)
-{
- BidirectionalSequenceRNNOptionsBuilder builder_(_fbb);
- builder_.add_merge_outputs(merge_outputs);
- builder_.add_fused_activation_function(fused_activation_function);
- builder_.add_time_major(time_major);
- return builder_.Finish();
-}
-
-struct FullyConnectedOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4,
- VT_WEIGHTS_FORMAT = 6
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- FullyConnectedOptionsWeightsFormat weights_format() const
- {
- return static_cast<FullyConnectedOptionsWeightsFormat>(GetField<int8_t>(VT_WEIGHTS_FORMAT, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<int8_t>(verifier, VT_WEIGHTS_FORMAT) && verifier.EndTable();
- }
-};
-
-struct FullyConnectedOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(FullyConnectedOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_weights_format(FullyConnectedOptionsWeightsFormat weights_format)
- {
- fbb_.AddElement<int8_t>(FullyConnectedOptions::VT_WEIGHTS_FORMAT,
- static_cast<int8_t>(weights_format), 0);
- }
- explicit FullyConnectedOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- FullyConnectedOptionsBuilder &operator=(const FullyConnectedOptionsBuilder &);
- flatbuffers::Offset<FullyConnectedOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<FullyConnectedOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<FullyConnectedOptions> CreateFullyConnectedOptions(
- flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- FullyConnectedOptionsWeightsFormat weights_format = FullyConnectedOptionsWeightsFormat_DEFAULT)
-{
- FullyConnectedOptionsBuilder builder_(_fbb);
- builder_.add_weights_format(weights_format);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct SoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_BETA = 4
- };
- float beta() const { return GetField<float>(VT_BETA, 0.0f); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_BETA) &&
- verifier.EndTable();
- }
-};
-
-struct SoftmaxOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_beta(float beta) { fbb_.AddElement<float>(SoftmaxOptions::VT_BETA, beta, 0.0f); }
- explicit SoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SoftmaxOptionsBuilder &operator=(const SoftmaxOptionsBuilder &);
- flatbuffers::Offset<SoftmaxOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SoftmaxOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SoftmaxOptions>
-CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, float beta = 0.0f)
-{
- SoftmaxOptionsBuilder builder_(_fbb);
- builder_.add_beta(beta);
- return builder_.Finish();
-}
-
-struct ConcatenationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_AXIS = 4,
- VT_FUSED_ACTIVATION_FUNCTION = 6
- };
- int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_AXIS) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct ConcatenationOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(ConcatenationOptions::VT_AXIS, axis, 0); }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(ConcatenationOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit ConcatenationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ConcatenationOptionsBuilder &operator=(const ConcatenationOptionsBuilder &);
- flatbuffers::Offset<ConcatenationOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ConcatenationOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ConcatenationOptions> CreateConcatenationOptions(
- flatbuffers::FlatBufferBuilder &_fbb, int32_t axis = 0,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- ConcatenationOptionsBuilder builder_(_fbb);
- builder_.add_axis(axis);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct AddOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct AddOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(AddOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit AddOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- AddOptionsBuilder &operator=(const AddOptionsBuilder &);
- flatbuffers::Offset<AddOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<AddOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<AddOptions>
-CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- AddOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct MulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct MulOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(MulOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit MulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- MulOptionsBuilder &operator=(const MulOptionsBuilder &);
- flatbuffers::Offset<MulOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<MulOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<MulOptions>
-CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- MulOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct L2NormOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct L2NormOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(L2NormOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit L2NormOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- L2NormOptionsBuilder &operator=(const L2NormOptionsBuilder &);
- flatbuffers::Offset<L2NormOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<L2NormOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<L2NormOptions>
-CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- L2NormOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct LocalResponseNormalizationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_RADIUS = 4,
- VT_BIAS = 6,
- VT_ALPHA = 8,
- VT_BETA = 10
- };
- int32_t radius() const { return GetField<int32_t>(VT_RADIUS, 0); }
- float bias() const { return GetField<float>(VT_BIAS, 0.0f); }
- float alpha() const { return GetField<float>(VT_ALPHA, 0.0f); }
- float beta() const { return GetField<float>(VT_BETA, 0.0f); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_RADIUS) &&
- VerifyField<float>(verifier, VT_BIAS) && VerifyField<float>(verifier, VT_ALPHA) &&
- VerifyField<float>(verifier, VT_BETA) && verifier.EndTable();
- }
-};
-
-struct LocalResponseNormalizationOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_radius(int32_t radius)
- {
- fbb_.AddElement<int32_t>(LocalResponseNormalizationOptions::VT_RADIUS, radius, 0);
- }
- void add_bias(float bias)
- {
- fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_BIAS, bias, 0.0f);
- }
- void add_alpha(float alpha)
- {
- fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_ALPHA, alpha, 0.0f);
- }
- void add_beta(float beta)
- {
- fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_BETA, beta, 0.0f);
- }
- explicit LocalResponseNormalizationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LocalResponseNormalizationOptionsBuilder &
- operator=(const LocalResponseNormalizationOptionsBuilder &);
- flatbuffers::Offset<LocalResponseNormalizationOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LocalResponseNormalizationOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LocalResponseNormalizationOptions>
-CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t radius = 0,
- float bias = 0.0f, float alpha = 0.0f, float beta = 0.0f)
-{
- LocalResponseNormalizationOptionsBuilder builder_(_fbb);
- builder_.add_beta(beta);
- builder_.add_alpha(alpha);
- builder_.add_bias(bias);
- builder_.add_radius(radius);
- return builder_.Finish();
-}
-
-struct LSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4,
- VT_CELL_CLIP = 6,
- VT_PROJ_CLIP = 8,
- VT_KERNEL_TYPE = 10
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- float cell_clip() const { return GetField<float>(VT_CELL_CLIP, 0.0f); }
- float proj_clip() const { return GetField<float>(VT_PROJ_CLIP, 0.0f); }
- LSTMKernelType kernel_type() const
- {
- return static_cast<LSTMKernelType>(GetField<int8_t>(VT_KERNEL_TYPE, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<float>(verifier, VT_CELL_CLIP) &&
- VerifyField<float>(verifier, VT_PROJ_CLIP) &&
- VerifyField<int8_t>(verifier, VT_KERNEL_TYPE) && verifier.EndTable();
- }
-};
-
-struct LSTMOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(LSTMOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_cell_clip(float cell_clip)
- {
- fbb_.AddElement<float>(LSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f);
- }
- void add_proj_clip(float proj_clip)
- {
- fbb_.AddElement<float>(LSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f);
- }
- void add_kernel_type(LSTMKernelType kernel_type)
- {
- fbb_.AddElement<int8_t>(LSTMOptions::VT_KERNEL_TYPE, static_cast<int8_t>(kernel_type), 0);
- }
- explicit LSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LSTMOptionsBuilder &operator=(const LSTMOptionsBuilder &);
- flatbuffers::Offset<LSTMOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LSTMOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LSTMOptions>
-CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- float cell_clip = 0.0f, float proj_clip = 0.0f,
- LSTMKernelType kernel_type = LSTMKernelType_FULL)
-{
- LSTMOptionsBuilder builder_(_fbb);
- builder_.add_proj_clip(proj_clip);
- builder_.add_cell_clip(cell_clip);
- builder_.add_kernel_type(kernel_type);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct UnidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4,
- VT_CELL_CLIP = 6,
- VT_PROJ_CLIP = 8,
- VT_TIME_MAJOR = 10
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- float cell_clip() const { return GetField<float>(VT_CELL_CLIP, 0.0f); }
- float proj_clip() const { return GetField<float>(VT_PROJ_CLIP, 0.0f); }
- bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<float>(verifier, VT_CELL_CLIP) &&
- VerifyField<float>(verifier, VT_PROJ_CLIP) &&
- VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) && verifier.EndTable();
- }
-};
-
-struct UnidirectionalSequenceLSTMOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(UnidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_cell_clip(float cell_clip)
- {
- fbb_.AddElement<float>(UnidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f);
- }
- void add_proj_clip(float proj_clip)
- {
- fbb_.AddElement<float>(UnidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f);
- }
- void add_time_major(bool time_major)
- {
- fbb_.AddElement<uint8_t>(UnidirectionalSequenceLSTMOptions::VT_TIME_MAJOR,
- static_cast<uint8_t>(time_major), 0);
- }
- explicit UnidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- UnidirectionalSequenceLSTMOptionsBuilder &
- operator=(const UnidirectionalSequenceLSTMOptionsBuilder &);
- flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<UnidirectionalSequenceLSTMOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<UnidirectionalSequenceLSTMOptions>
-CreateUnidirectionalSequenceLSTMOptions(
- flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- float cell_clip = 0.0f, float proj_clip = 0.0f, bool time_major = false)
-{
- UnidirectionalSequenceLSTMOptionsBuilder builder_(_fbb);
- builder_.add_proj_clip(proj_clip);
- builder_.add_cell_clip(cell_clip);
- builder_.add_time_major(time_major);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct BidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4,
- VT_CELL_CLIP = 6,
- VT_PROJ_CLIP = 8,
- VT_MERGE_OUTPUTS = 10
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- float cell_clip() const { return GetField<float>(VT_CELL_CLIP, 0.0f); }
- float proj_clip() const { return GetField<float>(VT_PROJ_CLIP, 0.0f); }
- bool merge_outputs() const { return GetField<uint8_t>(VT_MERGE_OUTPUTS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<float>(verifier, VT_CELL_CLIP) &&
- VerifyField<float>(verifier, VT_PROJ_CLIP) &&
- VerifyField<uint8_t>(verifier, VT_MERGE_OUTPUTS) && verifier.EndTable();
- }
-};
-
-struct BidirectionalSequenceLSTMOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(BidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_cell_clip(float cell_clip)
- {
- fbb_.AddElement<float>(BidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f);
- }
- void add_proj_clip(float proj_clip)
- {
- fbb_.AddElement<float>(BidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f);
- }
- void add_merge_outputs(bool merge_outputs)
- {
- fbb_.AddElement<uint8_t>(BidirectionalSequenceLSTMOptions::VT_MERGE_OUTPUTS,
- static_cast<uint8_t>(merge_outputs), 0);
- }
- explicit BidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- BidirectionalSequenceLSTMOptionsBuilder &
- operator=(const BidirectionalSequenceLSTMOptionsBuilder &);
- flatbuffers::Offset<BidirectionalSequenceLSTMOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<BidirectionalSequenceLSTMOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<BidirectionalSequenceLSTMOptions> CreateBidirectionalSequenceLSTMOptions(
- flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- float cell_clip = 0.0f, float proj_clip = 0.0f, bool merge_outputs = false)
-{
- BidirectionalSequenceLSTMOptionsBuilder builder_(_fbb);
- builder_.add_proj_clip(proj_clip);
- builder_.add_cell_clip(cell_clip);
- builder_.add_merge_outputs(merge_outputs);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct ResizeBilinearOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_ALIGN_CORNERS = 8
- };
- bool align_corners() const { return GetField<uint8_t>(VT_ALIGN_CORNERS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_ALIGN_CORNERS) &&
- verifier.EndTable();
- }
-};
-
-struct ResizeBilinearOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_align_corners(bool align_corners)
- {
- fbb_.AddElement<uint8_t>(ResizeBilinearOptions::VT_ALIGN_CORNERS,
- static_cast<uint8_t>(align_corners), 0);
- }
- explicit ResizeBilinearOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ResizeBilinearOptionsBuilder &operator=(const ResizeBilinearOptionsBuilder &);
- flatbuffers::Offset<ResizeBilinearOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ResizeBilinearOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ResizeBilinearOptions>
-CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, bool align_corners = false)
-{
- ResizeBilinearOptionsBuilder builder_(_fbb);
- builder_.add_align_corners(align_corners);
- return builder_.Finish();
-}
-
-struct ResizeNearestNeighborOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_ALIGN_CORNERS = 4
- };
- bool align_corners() const { return GetField<uint8_t>(VT_ALIGN_CORNERS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_ALIGN_CORNERS) &&
- verifier.EndTable();
- }
-};
-
-struct ResizeNearestNeighborOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_align_corners(bool align_corners)
- {
- fbb_.AddElement<uint8_t>(ResizeNearestNeighborOptions::VT_ALIGN_CORNERS,
- static_cast<uint8_t>(align_corners), 0);
- }
- explicit ResizeNearestNeighborOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ResizeNearestNeighborOptionsBuilder &operator=(const ResizeNearestNeighborOptionsBuilder &);
- flatbuffers::Offset<ResizeNearestNeighborOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ResizeNearestNeighborOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ResizeNearestNeighborOptions>
-CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb, bool align_corners = false)
-{
- ResizeNearestNeighborOptionsBuilder builder_(_fbb);
- builder_.add_align_corners(align_corners);
- return builder_.Finish();
-}
-
-struct CallOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_SUBGRAPH = 4
- };
- uint32_t subgraph() const { return GetField<uint32_t>(VT_SUBGRAPH, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint32_t>(verifier, VT_SUBGRAPH) &&
- verifier.EndTable();
- }
-};
-
-struct CallOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_subgraph(uint32_t subgraph)
- {
- fbb_.AddElement<uint32_t>(CallOptions::VT_SUBGRAPH, subgraph, 0);
- }
- explicit CallOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- CallOptionsBuilder &operator=(const CallOptionsBuilder &);
- flatbuffers::Offset<CallOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<CallOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<CallOptions> CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb,
- uint32_t subgraph = 0)
-{
- CallOptionsBuilder builder_(_fbb);
- builder_.add_subgraph(subgraph);
- return builder_.Finish();
-}
-
-struct PadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct PadOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit PadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- PadOptionsBuilder &operator=(const PadOptionsBuilder &);
- flatbuffers::Offset<PadOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<PadOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<PadOptions> CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- PadOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct PadV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct PadV2OptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit PadV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- PadV2OptionsBuilder &operator=(const PadV2OptionsBuilder &);
- flatbuffers::Offset<PadV2Options> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<PadV2Options>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<PadV2Options> CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb)
-{
- PadV2OptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ReshapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NEW_SHAPE = 4
- };
- const flatbuffers::Vector<int32_t> *new_shape() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_NEW_SHAPE);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NEW_SHAPE) &&
- verifier.VerifyVector(new_shape()) && verifier.EndTable();
- }
-};
-
-struct ReshapeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_new_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> new_shape)
- {
- fbb_.AddOffset(ReshapeOptions::VT_NEW_SHAPE, new_shape);
- }
- explicit ReshapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ReshapeOptionsBuilder &operator=(const ReshapeOptionsBuilder &);
- flatbuffers::Offset<ReshapeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ReshapeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ReshapeOptions>
-CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> new_shape = 0)
-{
- ReshapeOptionsBuilder builder_(_fbb);
- builder_.add_new_shape(new_shape);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<ReshapeOptions>
-CreateReshapeOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *new_shape = nullptr)
-{
- return circle::CreateReshapeOptions(_fbb, new_shape ? _fbb.CreateVector<int32_t>(*new_shape) : 0);
-}
-
-struct SpaceToBatchNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SpaceToBatchNDOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SpaceToBatchNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SpaceToBatchNDOptionsBuilder &operator=(const SpaceToBatchNDOptionsBuilder &);
- flatbuffers::Offset<SpaceToBatchNDOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SpaceToBatchNDOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SpaceToBatchNDOptions>
-CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SpaceToBatchNDOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct BatchToSpaceNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct BatchToSpaceNDOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit BatchToSpaceNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- BatchToSpaceNDOptionsBuilder &operator=(const BatchToSpaceNDOptionsBuilder &);
- flatbuffers::Offset<BatchToSpaceNDOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<BatchToSpaceNDOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<BatchToSpaceNDOptions>
-CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- BatchToSpaceNDOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SkipGramOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NGRAM_SIZE = 4,
- VT_MAX_SKIP_SIZE = 6,
- VT_INCLUDE_ALL_NGRAMS = 8
- };
- int32_t ngram_size() const { return GetField<int32_t>(VT_NGRAM_SIZE, 0); }
- int32_t max_skip_size() const { return GetField<int32_t>(VT_MAX_SKIP_SIZE, 0); }
- bool include_all_ngrams() const { return GetField<uint8_t>(VT_INCLUDE_ALL_NGRAMS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NGRAM_SIZE) &&
- VerifyField<int32_t>(verifier, VT_MAX_SKIP_SIZE) &&
- VerifyField<uint8_t>(verifier, VT_INCLUDE_ALL_NGRAMS) && verifier.EndTable();
- }
-};
-
-struct SkipGramOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_ngram_size(int32_t ngram_size)
- {
- fbb_.AddElement<int32_t>(SkipGramOptions::VT_NGRAM_SIZE, ngram_size, 0);
- }
- void add_max_skip_size(int32_t max_skip_size)
- {
- fbb_.AddElement<int32_t>(SkipGramOptions::VT_MAX_SKIP_SIZE, max_skip_size, 0);
- }
- void add_include_all_ngrams(bool include_all_ngrams)
- {
- fbb_.AddElement<uint8_t>(SkipGramOptions::VT_INCLUDE_ALL_NGRAMS,
- static_cast<uint8_t>(include_all_ngrams), 0);
- }
- explicit SkipGramOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SkipGramOptionsBuilder &operator=(const SkipGramOptionsBuilder &);
- flatbuffers::Offset<SkipGramOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SkipGramOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SkipGramOptions>
-CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t ngram_size = 0,
- int32_t max_skip_size = 0, bool include_all_ngrams = false)
-{
- SkipGramOptionsBuilder builder_(_fbb);
- builder_.add_max_skip_size(max_skip_size);
- builder_.add_ngram_size(ngram_size);
- builder_.add_include_all_ngrams(include_all_ngrams);
- return builder_.Finish();
-}
-
-struct SpaceToDepthOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_BLOCK_SIZE = 4
- };
- int32_t block_size() const { return GetField<int32_t>(VT_BLOCK_SIZE, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_BLOCK_SIZE) &&
- verifier.EndTable();
- }
-};
-
-struct SpaceToDepthOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_block_size(int32_t block_size)
- {
- fbb_.AddElement<int32_t>(SpaceToDepthOptions::VT_BLOCK_SIZE, block_size, 0);
- }
- explicit SpaceToDepthOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SpaceToDepthOptionsBuilder &operator=(const SpaceToDepthOptionsBuilder &);
- flatbuffers::Offset<SpaceToDepthOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SpaceToDepthOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SpaceToDepthOptions>
-CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t block_size = 0)
-{
- SpaceToDepthOptionsBuilder builder_(_fbb);
- builder_.add_block_size(block_size);
- return builder_.Finish();
-}
-
-struct SubOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct SubOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(SubOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit SubOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SubOptionsBuilder &operator=(const SubOptionsBuilder &);
- flatbuffers::Offset<SubOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SubOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SubOptions>
-CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- SubOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct DivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct DivOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(DivOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit DivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- DivOptionsBuilder &operator=(const DivOptionsBuilder &);
- flatbuffers::Offset<DivOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<DivOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<DivOptions>
-CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- DivOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct TopKV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct TopKV2OptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit TopKV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- TopKV2OptionsBuilder &operator=(const TopKV2OptionsBuilder &);
- flatbuffers::Offset<TopKV2Options> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TopKV2Options>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TopKV2Options> CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb)
-{
- TopKV2OptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct EmbeddingLookupSparseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_COMBINER = 4
- };
- CombinerType combiner() const
- {
- return static_cast<CombinerType>(GetField<int8_t>(VT_COMBINER, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_COMBINER) &&
- verifier.EndTable();
- }
-};
-
-struct EmbeddingLookupSparseOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_combiner(CombinerType combiner)
- {
- fbb_.AddElement<int8_t>(EmbeddingLookupSparseOptions::VT_COMBINER,
- static_cast<int8_t>(combiner), 0);
- }
- explicit EmbeddingLookupSparseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- EmbeddingLookupSparseOptionsBuilder &operator=(const EmbeddingLookupSparseOptionsBuilder &);
- flatbuffers::Offset<EmbeddingLookupSparseOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<EmbeddingLookupSparseOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<EmbeddingLookupSparseOptions>
-CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb,
- CombinerType combiner = CombinerType_SUM)
-{
- EmbeddingLookupSparseOptionsBuilder builder_(_fbb);
- builder_.add_combiner(combiner);
- return builder_.Finish();
-}
-
-struct GatherOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_AXIS = 4
- };
- int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_AXIS) &&
- verifier.EndTable();
- }
-};
-
-struct GatherOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(GatherOptions::VT_AXIS, axis, 0); }
- explicit GatherOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- GatherOptionsBuilder &operator=(const GatherOptionsBuilder &);
- flatbuffers::Offset<GatherOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<GatherOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<GatherOptions> CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t axis = 0)
-{
- GatherOptionsBuilder builder_(_fbb);
- builder_.add_axis(axis);
- return builder_.Finish();
-}
-
-struct TransposeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct TransposeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit TransposeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- TransposeOptionsBuilder &operator=(const TransposeOptionsBuilder &);
- flatbuffers::Offset<TransposeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TransposeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TransposeOptions>
-CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- TransposeOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ExpOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct ExpOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit ExpOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ExpOptionsBuilder &operator=(const ExpOptionsBuilder &);
- flatbuffers::Offset<ExpOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ExpOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ExpOptions> CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- ExpOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ReducerOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_KEEP_DIMS = 4
- };
- bool keep_dims() const { return GetField<uint8_t>(VT_KEEP_DIMS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_KEEP_DIMS) &&
- verifier.EndTable();
- }
-};
-
-struct ReducerOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_keep_dims(bool keep_dims)
- {
- fbb_.AddElement<uint8_t>(ReducerOptions::VT_KEEP_DIMS, static_cast<uint8_t>(keep_dims), 0);
- }
- explicit ReducerOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ReducerOptionsBuilder &operator=(const ReducerOptionsBuilder &);
- flatbuffers::Offset<ReducerOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ReducerOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ReducerOptions>
-CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, bool keep_dims = false)
-{
- ReducerOptionsBuilder builder_(_fbb);
- builder_.add_keep_dims(keep_dims);
- return builder_.Finish();
-}
-
-struct SqueezeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_SQUEEZE_DIMS = 4
- };
- const flatbuffers::Vector<int32_t> *squeeze_dims() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SQUEEZE_DIMS);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_SQUEEZE_DIMS) &&
- verifier.VerifyVector(squeeze_dims()) && verifier.EndTable();
- }
-};
-
-struct SqueezeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_squeeze_dims(flatbuffers::Offset<flatbuffers::Vector<int32_t>> squeeze_dims)
- {
- fbb_.AddOffset(SqueezeOptions::VT_SQUEEZE_DIMS, squeeze_dims);
- }
- explicit SqueezeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SqueezeOptionsBuilder &operator=(const SqueezeOptionsBuilder &);
- flatbuffers::Offset<SqueezeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SqueezeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SqueezeOptions>
-CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> squeeze_dims = 0)
-{
- SqueezeOptionsBuilder builder_(_fbb);
- builder_.add_squeeze_dims(squeeze_dims);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<SqueezeOptions>
-CreateSqueezeOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *squeeze_dims = nullptr)
-{
- return circle::CreateSqueezeOptions(_fbb,
- squeeze_dims ? _fbb.CreateVector<int32_t>(*squeeze_dims) : 0);
-}
-
-struct SplitOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NUM_SPLITS = 4
- };
- int32_t num_splits() const { return GetField<int32_t>(VT_NUM_SPLITS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM_SPLITS) &&
- verifier.EndTable();
- }
-};
-
-struct SplitOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_num_splits(int32_t num_splits)
- {
- fbb_.AddElement<int32_t>(SplitOptions::VT_NUM_SPLITS, num_splits, 0);
- }
- explicit SplitOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SplitOptionsBuilder &operator=(const SplitOptionsBuilder &);
- flatbuffers::Offset<SplitOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SplitOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SplitOptions> CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t num_splits = 0)
-{
- SplitOptionsBuilder builder_(_fbb);
- builder_.add_num_splits(num_splits);
- return builder_.Finish();
-}
-
-struct SplitVOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NUM_SPLITS = 4
- };
- int32_t num_splits() const { return GetField<int32_t>(VT_NUM_SPLITS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM_SPLITS) &&
- verifier.EndTable();
- }
-};
-
-struct SplitVOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_num_splits(int32_t num_splits)
- {
- fbb_.AddElement<int32_t>(SplitVOptions::VT_NUM_SPLITS, num_splits, 0);
- }
- explicit SplitVOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SplitVOptionsBuilder &operator=(const SplitVOptionsBuilder &);
- flatbuffers::Offset<SplitVOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SplitVOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SplitVOptions> CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t num_splits = 0)
-{
- SplitVOptionsBuilder builder_(_fbb);
- builder_.add_num_splits(num_splits);
- return builder_.Finish();
-}
-
-struct StridedSliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_BEGIN_MASK = 4,
- VT_END_MASK = 6,
- VT_ELLIPSIS_MASK = 8,
- VT_NEW_AXIS_MASK = 10,
- VT_SHRINK_AXIS_MASK = 12
- };
- int32_t begin_mask() const { return GetField<int32_t>(VT_BEGIN_MASK, 0); }
- int32_t end_mask() const { return GetField<int32_t>(VT_END_MASK, 0); }
- int32_t ellipsis_mask() const { return GetField<int32_t>(VT_ELLIPSIS_MASK, 0); }
- int32_t new_axis_mask() const { return GetField<int32_t>(VT_NEW_AXIS_MASK, 0); }
- int32_t shrink_axis_mask() const { return GetField<int32_t>(VT_SHRINK_AXIS_MASK, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_BEGIN_MASK) &&
- VerifyField<int32_t>(verifier, VT_END_MASK) &&
- VerifyField<int32_t>(verifier, VT_ELLIPSIS_MASK) &&
- VerifyField<int32_t>(verifier, VT_NEW_AXIS_MASK) &&
- VerifyField<int32_t>(verifier, VT_SHRINK_AXIS_MASK) && verifier.EndTable();
- }
-};
-
-struct StridedSliceOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_begin_mask(int32_t begin_mask)
- {
- fbb_.AddElement<int32_t>(StridedSliceOptions::VT_BEGIN_MASK, begin_mask, 0);
- }
- void add_end_mask(int32_t end_mask)
- {
- fbb_.AddElement<int32_t>(StridedSliceOptions::VT_END_MASK, end_mask, 0);
- }
- void add_ellipsis_mask(int32_t ellipsis_mask)
- {
- fbb_.AddElement<int32_t>(StridedSliceOptions::VT_ELLIPSIS_MASK, ellipsis_mask, 0);
- }
- void add_new_axis_mask(int32_t new_axis_mask)
- {
- fbb_.AddElement<int32_t>(StridedSliceOptions::VT_NEW_AXIS_MASK, new_axis_mask, 0);
- }
- void add_shrink_axis_mask(int32_t shrink_axis_mask)
- {
- fbb_.AddElement<int32_t>(StridedSliceOptions::VT_SHRINK_AXIS_MASK, shrink_axis_mask, 0);
- }
- explicit StridedSliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- StridedSliceOptionsBuilder &operator=(const StridedSliceOptionsBuilder &);
- flatbuffers::Offset<StridedSliceOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<StridedSliceOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<StridedSliceOptions>
-CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t begin_mask = 0,
- int32_t end_mask = 0, int32_t ellipsis_mask = 0,
- int32_t new_axis_mask = 0, int32_t shrink_axis_mask = 0)
-{
- StridedSliceOptionsBuilder builder_(_fbb);
- builder_.add_shrink_axis_mask(shrink_axis_mask);
- builder_.add_new_axis_mask(new_axis_mask);
- builder_.add_ellipsis_mask(ellipsis_mask);
- builder_.add_end_mask(end_mask);
- builder_.add_begin_mask(begin_mask);
- return builder_.Finish();
-}
-
-struct LogSoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LogSoftmaxOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LogSoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LogSoftmaxOptionsBuilder &operator=(const LogSoftmaxOptionsBuilder &);
- flatbuffers::Offset<LogSoftmaxOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LogSoftmaxOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LogSoftmaxOptions>
-CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LogSoftmaxOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct CastOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_IN_DATA_TYPE = 4,
- VT_OUT_DATA_TYPE = 6
- };
- TensorType in_data_type() const
- {
- return static_cast<TensorType>(GetField<int8_t>(VT_IN_DATA_TYPE, 0));
- }
- TensorType out_data_type() const
- {
- return static_cast<TensorType>(GetField<int8_t>(VT_OUT_DATA_TYPE, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_IN_DATA_TYPE) &&
- VerifyField<int8_t>(verifier, VT_OUT_DATA_TYPE) && verifier.EndTable();
- }
-};
-
-struct CastOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_in_data_type(TensorType in_data_type)
- {
- fbb_.AddElement<int8_t>(CastOptions::VT_IN_DATA_TYPE, static_cast<int8_t>(in_data_type), 0);
- }
- void add_out_data_type(TensorType out_data_type)
- {
- fbb_.AddElement<int8_t>(CastOptions::VT_OUT_DATA_TYPE, static_cast<int8_t>(out_data_type), 0);
- }
- explicit CastOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- CastOptionsBuilder &operator=(const CastOptionsBuilder &);
- flatbuffers::Offset<CastOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<CastOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<CastOptions>
-CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb,
- TensorType in_data_type = TensorType_FLOAT32,
- TensorType out_data_type = TensorType_FLOAT32)
-{
- CastOptionsBuilder builder_(_fbb);
- builder_.add_out_data_type(out_data_type);
- builder_.add_in_data_type(in_data_type);
- return builder_.Finish();
-}
-
-struct DequantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct DequantizeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit DequantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- DequantizeOptionsBuilder &operator=(const DequantizeOptionsBuilder &);
- flatbuffers::Offset<DequantizeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<DequantizeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<DequantizeOptions>
-CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- DequantizeOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct MaximumMinimumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct MaximumMinimumOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit MaximumMinimumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- MaximumMinimumOptionsBuilder &operator=(const MaximumMinimumOptionsBuilder &);
- flatbuffers::Offset<MaximumMinimumOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<MaximumMinimumOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<MaximumMinimumOptions>
-CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- MaximumMinimumOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct TileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct TileOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit TileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- TileOptionsBuilder &operator=(const TileOptionsBuilder &);
- flatbuffers::Offset<TileOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TileOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TileOptions> CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- TileOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ArgMaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_OUTPUT_TYPE = 4
- };
- TensorType output_type() const
- {
- return static_cast<TensorType>(GetField<int8_t>(VT_OUTPUT_TYPE, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_OUTPUT_TYPE) &&
- verifier.EndTable();
- }
-};
-
-struct ArgMaxOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_output_type(TensorType output_type)
- {
- fbb_.AddElement<int8_t>(ArgMaxOptions::VT_OUTPUT_TYPE, static_cast<int8_t>(output_type), 0);
- }
- explicit ArgMaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ArgMaxOptionsBuilder &operator=(const ArgMaxOptionsBuilder &);
- flatbuffers::Offset<ArgMaxOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ArgMaxOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ArgMaxOptions>
-CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb,
- TensorType output_type = TensorType_FLOAT32)
-{
- ArgMaxOptionsBuilder builder_(_fbb);
- builder_.add_output_type(output_type);
- return builder_.Finish();
-}
-
-struct ArgMinOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_OUTPUT_TYPE = 4
- };
- TensorType output_type() const
- {
- return static_cast<TensorType>(GetField<int8_t>(VT_OUTPUT_TYPE, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_OUTPUT_TYPE) &&
- verifier.EndTable();
- }
-};
-
-struct ArgMinOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_output_type(TensorType output_type)
- {
- fbb_.AddElement<int8_t>(ArgMinOptions::VT_OUTPUT_TYPE, static_cast<int8_t>(output_type), 0);
- }
- explicit ArgMinOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ArgMinOptionsBuilder &operator=(const ArgMinOptionsBuilder &);
- flatbuffers::Offset<ArgMinOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ArgMinOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ArgMinOptions>
-CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb,
- TensorType output_type = TensorType_FLOAT32)
-{
- ArgMinOptionsBuilder builder_(_fbb);
- builder_.add_output_type(output_type);
- return builder_.Finish();
-}
-
-struct GreaterOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct GreaterOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit GreaterOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- GreaterOptionsBuilder &operator=(const GreaterOptionsBuilder &);
- flatbuffers::Offset<GreaterOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<GreaterOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<GreaterOptions>
-CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- GreaterOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct GreaterEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct GreaterEqualOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit GreaterEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- GreaterEqualOptionsBuilder &operator=(const GreaterEqualOptionsBuilder &);
- flatbuffers::Offset<GreaterEqualOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<GreaterEqualOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<GreaterEqualOptions>
-CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- GreaterEqualOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct LessOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LessOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LessOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LessOptionsBuilder &operator=(const LessOptionsBuilder &);
- flatbuffers::Offset<LessOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LessOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LessOptions> CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LessOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct LessEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LessEqualOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LessEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LessEqualOptionsBuilder &operator=(const LessEqualOptionsBuilder &);
- flatbuffers::Offset<LessEqualOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LessEqualOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LessEqualOptions>
-CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LessEqualOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct NegOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct NegOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit NegOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- NegOptionsBuilder &operator=(const NegOptionsBuilder &);
- flatbuffers::Offset<NegOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<NegOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<NegOptions> CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- NegOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SelectOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SelectOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SelectOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SelectOptionsBuilder &operator=(const SelectOptionsBuilder &);
- flatbuffers::Offset<SelectOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SelectOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SelectOptions> CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SelectOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SliceOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SliceOptionsBuilder &operator=(const SliceOptionsBuilder &);
- flatbuffers::Offset<SliceOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SliceOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SliceOptions> CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SliceOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct TransposeConvOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_PADDING = 4,
- VT_STRIDE_W = 6,
- VT_STRIDE_H = 8
- };
- Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); }
- int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); }
- int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_H) && verifier.EndTable();
- }
-};
-
-struct TransposeConvOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_padding(Padding padding)
- {
- fbb_.AddElement<int8_t>(TransposeConvOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
- }
- void add_stride_w(int32_t stride_w)
- {
- fbb_.AddElement<int32_t>(TransposeConvOptions::VT_STRIDE_W, stride_w, 0);
- }
- void add_stride_h(int32_t stride_h)
- {
- fbb_.AddElement<int32_t>(TransposeConvOptions::VT_STRIDE_H, stride_h, 0);
- }
- explicit TransposeConvOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- TransposeConvOptionsBuilder &operator=(const TransposeConvOptionsBuilder &);
- flatbuffers::Offset<TransposeConvOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TransposeConvOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TransposeConvOptions>
-CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME,
- int32_t stride_w = 0, int32_t stride_h = 0)
-{
- TransposeConvOptionsBuilder builder_(_fbb);
- builder_.add_stride_h(stride_h);
- builder_.add_stride_w(stride_w);
- builder_.add_padding(padding);
- return builder_.Finish();
-}
-
-struct ExpandDimsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct ExpandDimsOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit ExpandDimsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ExpandDimsOptionsBuilder &operator=(const ExpandDimsOptionsBuilder &);
- flatbuffers::Offset<ExpandDimsOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ExpandDimsOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ExpandDimsOptions>
-CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- ExpandDimsOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SparseToDenseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_VALIDATE_INDICES = 4
- };
- bool validate_indices() const { return GetField<uint8_t>(VT_VALIDATE_INDICES, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_VALIDATE_INDICES) &&
- verifier.EndTable();
- }
-};
-
-struct SparseToDenseOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_validate_indices(bool validate_indices)
- {
- fbb_.AddElement<uint8_t>(SparseToDenseOptions::VT_VALIDATE_INDICES,
- static_cast<uint8_t>(validate_indices), 0);
- }
- explicit SparseToDenseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SparseToDenseOptionsBuilder &operator=(const SparseToDenseOptionsBuilder &);
- flatbuffers::Offset<SparseToDenseOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SparseToDenseOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SparseToDenseOptions>
-CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, bool validate_indices = false)
-{
- SparseToDenseOptionsBuilder builder_(_fbb);
- builder_.add_validate_indices(validate_indices);
- return builder_.Finish();
-}
-
-struct EqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct EqualOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit EqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- EqualOptionsBuilder &operator=(const EqualOptionsBuilder &);
- flatbuffers::Offset<EqualOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<EqualOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<EqualOptions> CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- EqualOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct NotEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct NotEqualOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit NotEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- NotEqualOptionsBuilder &operator=(const NotEqualOptionsBuilder &);
- flatbuffers::Offset<NotEqualOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<NotEqualOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<NotEqualOptions>
-CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- NotEqualOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ShapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_OUT_TYPE = 4
- };
- TensorType out_type() const { return static_cast<TensorType>(GetField<int8_t>(VT_OUT_TYPE, 0)); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_OUT_TYPE) &&
- verifier.EndTable();
- }
-};
-
-struct ShapeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_out_type(TensorType out_type)
- {
- fbb_.AddElement<int8_t>(ShapeOptions::VT_OUT_TYPE, static_cast<int8_t>(out_type), 0);
- }
- explicit ShapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ShapeOptionsBuilder &operator=(const ShapeOptionsBuilder &);
- flatbuffers::Offset<ShapeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ShapeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ShapeOptions>
-CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, TensorType out_type = TensorType_FLOAT32)
-{
- ShapeOptionsBuilder builder_(_fbb);
- builder_.add_out_type(out_type);
- return builder_.Finish();
-}
-
-struct PowOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct PowOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit PowOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- PowOptionsBuilder &operator=(const PowOptionsBuilder &);
- flatbuffers::Offset<PowOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<PowOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<PowOptions> CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- PowOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct FakeQuantOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_MIN = 4,
- VT_MAX = 6,
- VT_NUM_BITS = 8,
- VT_NARROW_RANGE = 10
- };
- float min() const { return GetField<float>(VT_MIN, 0.0f); }
- float max() const { return GetField<float>(VT_MAX, 0.0f); }
- int32_t num_bits() const { return GetField<int32_t>(VT_NUM_BITS, 0); }
- bool narrow_range() const { return GetField<uint8_t>(VT_NARROW_RANGE, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_MIN) &&
- VerifyField<float>(verifier, VT_MAX) && VerifyField<int32_t>(verifier, VT_NUM_BITS) &&
- VerifyField<uint8_t>(verifier, VT_NARROW_RANGE) && verifier.EndTable();
- }
-};
-
-struct FakeQuantOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_min(float min) { fbb_.AddElement<float>(FakeQuantOptions::VT_MIN, min, 0.0f); }
- void add_max(float max) { fbb_.AddElement<float>(FakeQuantOptions::VT_MAX, max, 0.0f); }
- void add_num_bits(int32_t num_bits)
- {
- fbb_.AddElement<int32_t>(FakeQuantOptions::VT_NUM_BITS, num_bits, 0);
- }
- void add_narrow_range(bool narrow_range)
- {
- fbb_.AddElement<uint8_t>(FakeQuantOptions::VT_NARROW_RANGE, static_cast<uint8_t>(narrow_range),
- 0);
- }
- explicit FakeQuantOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- FakeQuantOptionsBuilder &operator=(const FakeQuantOptionsBuilder &);
- flatbuffers::Offset<FakeQuantOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<FakeQuantOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<FakeQuantOptions>
-CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, float min = 0.0f, float max = 0.0f,
- int32_t num_bits = 0, bool narrow_range = false)
-{
- FakeQuantOptionsBuilder builder_(_fbb);
- builder_.add_num_bits(num_bits);
- builder_.add_max(max);
- builder_.add_min(min);
- builder_.add_narrow_range(narrow_range);
- return builder_.Finish();
-}
-
-struct PackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_VALUES_COUNT = 4,
- VT_AXIS = 6
- };
- int32_t values_count() const { return GetField<int32_t>(VT_VALUES_COUNT, 0); }
- int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_VALUES_COUNT) &&
- VerifyField<int32_t>(verifier, VT_AXIS) && verifier.EndTable();
- }
-};
-
-struct PackOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_values_count(int32_t values_count)
- {
- fbb_.AddElement<int32_t>(PackOptions::VT_VALUES_COUNT, values_count, 0);
- }
- void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(PackOptions::VT_AXIS, axis, 0); }
- explicit PackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- PackOptionsBuilder &operator=(const PackOptionsBuilder &);
- flatbuffers::Offset<PackOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<PackOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<PackOptions>
-CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t values_count = 0, int32_t axis = 0)
-{
- PackOptionsBuilder builder_(_fbb);
- builder_.add_axis(axis);
- builder_.add_values_count(values_count);
- return builder_.Finish();
-}
-
-struct LogicalOrOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LogicalOrOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LogicalOrOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LogicalOrOptionsBuilder &operator=(const LogicalOrOptionsBuilder &);
- flatbuffers::Offset<LogicalOrOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LogicalOrOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LogicalOrOptions>
-CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LogicalOrOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct OneHotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_AXIS = 4
- };
- int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_AXIS) &&
- verifier.EndTable();
- }
-};
-
-struct OneHotOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(OneHotOptions::VT_AXIS, axis, 0); }
- explicit OneHotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- OneHotOptionsBuilder &operator=(const OneHotOptionsBuilder &);
- flatbuffers::Offset<OneHotOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<OneHotOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<OneHotOptions> CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t axis = 0)
-{
- OneHotOptionsBuilder builder_(_fbb);
- builder_.add_axis(axis);
- return builder_.Finish();
-}
-
-struct AbsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct AbsOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit AbsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- AbsOptionsBuilder &operator=(const AbsOptionsBuilder &);
- flatbuffers::Offset<AbsOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<AbsOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<AbsOptions> CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- AbsOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct LogicalAndOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LogicalAndOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LogicalAndOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LogicalAndOptionsBuilder &operator=(const LogicalAndOptionsBuilder &);
- flatbuffers::Offset<LogicalAndOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LogicalAndOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LogicalAndOptions>
-CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LogicalAndOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct LogicalNotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LogicalNotOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LogicalNotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LogicalNotOptionsBuilder &operator=(const LogicalNotOptionsBuilder &);
- flatbuffers::Offset<LogicalNotOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LogicalNotOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LogicalNotOptions>
-CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LogicalNotOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct UnpackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NUM = 4,
- VT_AXIS = 6
- };
- int32_t num() const { return GetField<int32_t>(VT_NUM, 0); }
- int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM) &&
- VerifyField<int32_t>(verifier, VT_AXIS) && verifier.EndTable();
- }
-};
-
-struct UnpackOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_num(int32_t num) { fbb_.AddElement<int32_t>(UnpackOptions::VT_NUM, num, 0); }
- void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(UnpackOptions::VT_AXIS, axis, 0); }
- explicit UnpackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- UnpackOptionsBuilder &operator=(const UnpackOptionsBuilder &);
- flatbuffers::Offset<UnpackOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<UnpackOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<UnpackOptions> CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t num = 0, int32_t axis = 0)
-{
- UnpackOptionsBuilder builder_(_fbb);
- builder_.add_axis(axis);
- builder_.add_num(num);
- return builder_.Finish();
-}
-
-struct FloorDivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct FloorDivOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit FloorDivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- FloorDivOptionsBuilder &operator=(const FloorDivOptionsBuilder &);
- flatbuffers::Offset<FloorDivOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<FloorDivOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<FloorDivOptions>
-CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- FloorDivOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SquareOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SquareOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SquareOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SquareOptionsBuilder &operator=(const SquareOptionsBuilder &);
- flatbuffers::Offset<SquareOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SquareOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SquareOptions> CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SquareOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ZerosLikeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct ZerosLikeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit ZerosLikeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ZerosLikeOptionsBuilder &operator=(const ZerosLikeOptionsBuilder &);
- flatbuffers::Offset<ZerosLikeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ZerosLikeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ZerosLikeOptions>
-CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- ZerosLikeOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct FillOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct FillOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit FillOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- FillOptionsBuilder &operator=(const FillOptionsBuilder &);
- flatbuffers::Offset<FillOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<FillOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<FillOptions> CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- FillOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct FloorModOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct FloorModOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit FloorModOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- FloorModOptionsBuilder &operator=(const FloorModOptionsBuilder &);
- flatbuffers::Offset<FloorModOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<FloorModOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<FloorModOptions>
-CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- FloorModOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct RangeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct RangeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit RangeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- RangeOptionsBuilder &operator=(const RangeOptionsBuilder &);
- flatbuffers::Offset<RangeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<RangeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<RangeOptions> CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- RangeOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct LeakyReluOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_ALPHA = 4
- };
- float alpha() const { return GetField<float>(VT_ALPHA, 0.0f); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_ALPHA) &&
- verifier.EndTable();
- }
-};
-
-struct LeakyReluOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_alpha(float alpha) { fbb_.AddElement<float>(LeakyReluOptions::VT_ALPHA, alpha, 0.0f); }
- explicit LeakyReluOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LeakyReluOptionsBuilder &operator=(const LeakyReluOptionsBuilder &);
- flatbuffers::Offset<LeakyReluOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LeakyReluOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LeakyReluOptions>
-CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, float alpha = 0.0f)
-{
- LeakyReluOptionsBuilder builder_(_fbb);
- builder_.add_alpha(alpha);
- return builder_.Finish();
-}
-
-struct SquaredDifferenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SquaredDifferenceOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SquaredDifferenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SquaredDifferenceOptionsBuilder &operator=(const SquaredDifferenceOptionsBuilder &);
- flatbuffers::Offset<SquaredDifferenceOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SquaredDifferenceOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SquaredDifferenceOptions>
-CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SquaredDifferenceOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct MirrorPadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_MODE = 4
- };
- MirrorPadMode mode() const { return static_cast<MirrorPadMode>(GetField<int8_t>(VT_MODE, 0)); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_MODE) &&
- verifier.EndTable();
- }
-};
-
-struct MirrorPadOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_mode(MirrorPadMode mode)
- {
- fbb_.AddElement<int8_t>(MirrorPadOptions::VT_MODE, static_cast<int8_t>(mode), 0);
- }
- explicit MirrorPadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- MirrorPadOptionsBuilder &operator=(const MirrorPadOptionsBuilder &);
- flatbuffers::Offset<MirrorPadOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<MirrorPadOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<MirrorPadOptions>
-CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb,
- MirrorPadMode mode = MirrorPadMode_REFLECT)
-{
- MirrorPadOptionsBuilder builder_(_fbb);
- builder_.add_mode(mode);
- return builder_.Finish();
-}
-
-struct InstanceNormOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_EPSILON = 4,
- VT_FUSED_ACTIVATION_FUNCTION = 6
- };
- float epsilon() const { return GetField<float>(VT_EPSILON, 0.0f); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_EPSILON) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct InstanceNormOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_epsilon(float epsilon)
- {
- fbb_.AddElement<float>(InstanceNormOptions::VT_EPSILON, epsilon, 0.0f);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(InstanceNormOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit InstanceNormOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- InstanceNormOptionsBuilder &operator=(const InstanceNormOptionsBuilder &);
- flatbuffers::Offset<InstanceNormOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<InstanceNormOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<InstanceNormOptions> CreateInstanceNormOptions(
- flatbuffers::FlatBufferBuilder &_fbb, float epsilon = 0.0f,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- InstanceNormOptionsBuilder builder_(_fbb);
- builder_.add_epsilon(epsilon);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct OperatorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_BUILTIN_CODE = 4,
- VT_CUSTOM_CODE = 6,
- VT_VERSION = 8
- };
- BuiltinOperator builtin_code() const
- {
- return static_cast<BuiltinOperator>(GetField<uint8_t>(VT_BUILTIN_CODE, 0));
- }
- const flatbuffers::String *custom_code() const
- {
- return GetPointer<const flatbuffers::String *>(VT_CUSTOM_CODE);
- }
- int32_t version() const { return GetField<int32_t>(VT_VERSION, 1); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_BUILTIN_CODE) &&
- VerifyOffset(verifier, VT_CUSTOM_CODE) && verifier.VerifyString(custom_code()) &&
- VerifyField<int32_t>(verifier, VT_VERSION) && verifier.EndTable();
- }
-};
-
-struct OperatorCodeBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_builtin_code(BuiltinOperator builtin_code)
- {
- fbb_.AddElement<uint8_t>(OperatorCode::VT_BUILTIN_CODE, static_cast<uint8_t>(builtin_code), 0);
- }
- void add_custom_code(flatbuffers::Offset<flatbuffers::String> custom_code)
- {
- fbb_.AddOffset(OperatorCode::VT_CUSTOM_CODE, custom_code);
- }
- void add_version(int32_t version)
- {
- fbb_.AddElement<int32_t>(OperatorCode::VT_VERSION, version, 1);
- }
- explicit OperatorCodeBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- OperatorCodeBuilder &operator=(const OperatorCodeBuilder &);
- flatbuffers::Offset<OperatorCode> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<OperatorCode>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<OperatorCode>
-CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb,
- BuiltinOperator builtin_code = BuiltinOperator_ADD,
- flatbuffers::Offset<flatbuffers::String> custom_code = 0, int32_t version = 1)
-{
- OperatorCodeBuilder builder_(_fbb);
- builder_.add_version(version);
- builder_.add_custom_code(custom_code);
- builder_.add_builtin_code(builtin_code);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<OperatorCode>
-CreateOperatorCodeDirect(flatbuffers::FlatBufferBuilder &_fbb,
- BuiltinOperator builtin_code = BuiltinOperator_ADD,
- const char *custom_code = nullptr, int32_t version = 1)
-{
- return circle::CreateOperatorCode(_fbb, builtin_code,
- custom_code ? _fbb.CreateString(custom_code) : 0, version);
-}
-
-struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_OPCODE_INDEX = 4,
- VT_INPUTS = 6,
- VT_OUTPUTS = 8,
- VT_BUILTIN_OPTIONS_TYPE = 10,
- VT_BUILTIN_OPTIONS = 12,
- VT_CUSTOM_OPTIONS = 14,
- VT_CUSTOM_OPTIONS_FORMAT = 16,
- VT_MUTATING_VARIABLE_INPUTS = 18
- };
- uint32_t opcode_index() const { return GetField<uint32_t>(VT_OPCODE_INDEX, 0); }
- const flatbuffers::Vector<int32_t> *inputs() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUTS);
- }
- const flatbuffers::Vector<int32_t> *outputs() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUTS);
- }
- BuiltinOptions builtin_options_type() const
- {
- return static_cast<BuiltinOptions>(GetField<uint8_t>(VT_BUILTIN_OPTIONS_TYPE, 0));
- }
- const void *builtin_options() const { return GetPointer<const void *>(VT_BUILTIN_OPTIONS); }
- template <typename T> const T *builtin_options_as() const;
- const Conv2DOptions *builtin_options_as_Conv2DOptions() const
- {
- return builtin_options_type() == BuiltinOptions_Conv2DOptions
- ? static_cast<const Conv2DOptions *>(builtin_options())
- : nullptr;
- }
- const DepthwiseConv2DOptions *builtin_options_as_DepthwiseConv2DOptions() const
- {
- return builtin_options_type() == BuiltinOptions_DepthwiseConv2DOptions
- ? static_cast<const DepthwiseConv2DOptions *>(builtin_options())
- : nullptr;
- }
- const ConcatEmbeddingsOptions *builtin_options_as_ConcatEmbeddingsOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ConcatEmbeddingsOptions
- ? static_cast<const ConcatEmbeddingsOptions *>(builtin_options())
- : nullptr;
- }
- const LSHProjectionOptions *builtin_options_as_LSHProjectionOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LSHProjectionOptions
- ? static_cast<const LSHProjectionOptions *>(builtin_options())
- : nullptr;
- }
- const Pool2DOptions *builtin_options_as_Pool2DOptions() const
- {
- return builtin_options_type() == BuiltinOptions_Pool2DOptions
- ? static_cast<const Pool2DOptions *>(builtin_options())
- : nullptr;
- }
- const SVDFOptions *builtin_options_as_SVDFOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SVDFOptions
- ? static_cast<const SVDFOptions *>(builtin_options())
- : nullptr;
- }
- const RNNOptions *builtin_options_as_RNNOptions() const
- {
- return builtin_options_type() == BuiltinOptions_RNNOptions
- ? static_cast<const RNNOptions *>(builtin_options())
- : nullptr;
- }
- const FullyConnectedOptions *builtin_options_as_FullyConnectedOptions() const
- {
- return builtin_options_type() == BuiltinOptions_FullyConnectedOptions
- ? static_cast<const FullyConnectedOptions *>(builtin_options())
- : nullptr;
- }
- const SoftmaxOptions *builtin_options_as_SoftmaxOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SoftmaxOptions
- ? static_cast<const SoftmaxOptions *>(builtin_options())
- : nullptr;
- }
- const ConcatenationOptions *builtin_options_as_ConcatenationOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ConcatenationOptions
- ? static_cast<const ConcatenationOptions *>(builtin_options())
- : nullptr;
- }
- const AddOptions *builtin_options_as_AddOptions() const
- {
- return builtin_options_type() == BuiltinOptions_AddOptions
- ? static_cast<const AddOptions *>(builtin_options())
- : nullptr;
- }
- const L2NormOptions *builtin_options_as_L2NormOptions() const
- {
- return builtin_options_type() == BuiltinOptions_L2NormOptions
- ? static_cast<const L2NormOptions *>(builtin_options())
- : nullptr;
- }
- const LocalResponseNormalizationOptions *
- builtin_options_as_LocalResponseNormalizationOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LocalResponseNormalizationOptions
- ? static_cast<const LocalResponseNormalizationOptions *>(builtin_options())
- : nullptr;
- }
- const LSTMOptions *builtin_options_as_LSTMOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LSTMOptions
- ? static_cast<const LSTMOptions *>(builtin_options())
- : nullptr;
- }
- const ResizeBilinearOptions *builtin_options_as_ResizeBilinearOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ResizeBilinearOptions
- ? static_cast<const ResizeBilinearOptions *>(builtin_options())
- : nullptr;
- }
- const CallOptions *builtin_options_as_CallOptions() const
- {
- return builtin_options_type() == BuiltinOptions_CallOptions
- ? static_cast<const CallOptions *>(builtin_options())
- : nullptr;
- }
- const ReshapeOptions *builtin_options_as_ReshapeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ReshapeOptions
- ? static_cast<const ReshapeOptions *>(builtin_options())
- : nullptr;
- }
- const SkipGramOptions *builtin_options_as_SkipGramOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SkipGramOptions
- ? static_cast<const SkipGramOptions *>(builtin_options())
- : nullptr;
- }
- const SpaceToDepthOptions *builtin_options_as_SpaceToDepthOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SpaceToDepthOptions
- ? static_cast<const SpaceToDepthOptions *>(builtin_options())
- : nullptr;
- }
- const EmbeddingLookupSparseOptions *builtin_options_as_EmbeddingLookupSparseOptions() const
- {
- return builtin_options_type() == BuiltinOptions_EmbeddingLookupSparseOptions
- ? static_cast<const EmbeddingLookupSparseOptions *>(builtin_options())
- : nullptr;
- }
- const MulOptions *builtin_options_as_MulOptions() const
- {
- return builtin_options_type() == BuiltinOptions_MulOptions
- ? static_cast<const MulOptions *>(builtin_options())
- : nullptr;
- }
- const PadOptions *builtin_options_as_PadOptions() const
- {
- return builtin_options_type() == BuiltinOptions_PadOptions
- ? static_cast<const PadOptions *>(builtin_options())
- : nullptr;
- }
- const GatherOptions *builtin_options_as_GatherOptions() const
- {
- return builtin_options_type() == BuiltinOptions_GatherOptions
- ? static_cast<const GatherOptions *>(builtin_options())
- : nullptr;
- }
- const BatchToSpaceNDOptions *builtin_options_as_BatchToSpaceNDOptions() const
- {
- return builtin_options_type() == BuiltinOptions_BatchToSpaceNDOptions
- ? static_cast<const BatchToSpaceNDOptions *>(builtin_options())
- : nullptr;
- }
- const SpaceToBatchNDOptions *builtin_options_as_SpaceToBatchNDOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SpaceToBatchNDOptions
- ? static_cast<const SpaceToBatchNDOptions *>(builtin_options())
- : nullptr;
- }
- const TransposeOptions *builtin_options_as_TransposeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_TransposeOptions
- ? static_cast<const TransposeOptions *>(builtin_options())
- : nullptr;
- }
- const ReducerOptions *builtin_options_as_ReducerOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ReducerOptions
- ? static_cast<const ReducerOptions *>(builtin_options())
- : nullptr;
- }
- const SubOptions *builtin_options_as_SubOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SubOptions
- ? static_cast<const SubOptions *>(builtin_options())
- : nullptr;
- }
- const DivOptions *builtin_options_as_DivOptions() const
- {
- return builtin_options_type() == BuiltinOptions_DivOptions
- ? static_cast<const DivOptions *>(builtin_options())
- : nullptr;
- }
- const SqueezeOptions *builtin_options_as_SqueezeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SqueezeOptions
- ? static_cast<const SqueezeOptions *>(builtin_options())
- : nullptr;
- }
- const SequenceRNNOptions *builtin_options_as_SequenceRNNOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SequenceRNNOptions
- ? static_cast<const SequenceRNNOptions *>(builtin_options())
- : nullptr;
- }
- const StridedSliceOptions *builtin_options_as_StridedSliceOptions() const
- {
- return builtin_options_type() == BuiltinOptions_StridedSliceOptions
- ? static_cast<const StridedSliceOptions *>(builtin_options())
- : nullptr;
- }
- const ExpOptions *builtin_options_as_ExpOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ExpOptions
- ? static_cast<const ExpOptions *>(builtin_options())
- : nullptr;
- }
- const TopKV2Options *builtin_options_as_TopKV2Options() const
- {
- return builtin_options_type() == BuiltinOptions_TopKV2Options
- ? static_cast<const TopKV2Options *>(builtin_options())
- : nullptr;
- }
- const SplitOptions *builtin_options_as_SplitOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SplitOptions
- ? static_cast<const SplitOptions *>(builtin_options())
- : nullptr;
- }
- const LogSoftmaxOptions *builtin_options_as_LogSoftmaxOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LogSoftmaxOptions
- ? static_cast<const LogSoftmaxOptions *>(builtin_options())
- : nullptr;
- }
- const CastOptions *builtin_options_as_CastOptions() const
- {
- return builtin_options_type() == BuiltinOptions_CastOptions
- ? static_cast<const CastOptions *>(builtin_options())
- : nullptr;
- }
- const DequantizeOptions *builtin_options_as_DequantizeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_DequantizeOptions
- ? static_cast<const DequantizeOptions *>(builtin_options())
- : nullptr;
- }
- const MaximumMinimumOptions *builtin_options_as_MaximumMinimumOptions() const
- {
- return builtin_options_type() == BuiltinOptions_MaximumMinimumOptions
- ? static_cast<const MaximumMinimumOptions *>(builtin_options())
- : nullptr;
- }
- const ArgMaxOptions *builtin_options_as_ArgMaxOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ArgMaxOptions
- ? static_cast<const ArgMaxOptions *>(builtin_options())
- : nullptr;
- }
- const LessOptions *builtin_options_as_LessOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LessOptions
- ? static_cast<const LessOptions *>(builtin_options())
- : nullptr;
- }
- const NegOptions *builtin_options_as_NegOptions() const
- {
- return builtin_options_type() == BuiltinOptions_NegOptions
- ? static_cast<const NegOptions *>(builtin_options())
- : nullptr;
- }
- const PadV2Options *builtin_options_as_PadV2Options() const
- {
- return builtin_options_type() == BuiltinOptions_PadV2Options
- ? static_cast<const PadV2Options *>(builtin_options())
- : nullptr;
- }
- const GreaterOptions *builtin_options_as_GreaterOptions() const
- {
- return builtin_options_type() == BuiltinOptions_GreaterOptions
- ? static_cast<const GreaterOptions *>(builtin_options())
- : nullptr;
- }
- const GreaterEqualOptions *builtin_options_as_GreaterEqualOptions() const
- {
- return builtin_options_type() == BuiltinOptions_GreaterEqualOptions
- ? static_cast<const GreaterEqualOptions *>(builtin_options())
- : nullptr;
- }
- const LessEqualOptions *builtin_options_as_LessEqualOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LessEqualOptions
- ? static_cast<const LessEqualOptions *>(builtin_options())
- : nullptr;
- }
- const SelectOptions *builtin_options_as_SelectOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SelectOptions
- ? static_cast<const SelectOptions *>(builtin_options())
- : nullptr;
- }
- const SliceOptions *builtin_options_as_SliceOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SliceOptions
- ? static_cast<const SliceOptions *>(builtin_options())
- : nullptr;
- }
- const TransposeConvOptions *builtin_options_as_TransposeConvOptions() const
- {
- return builtin_options_type() == BuiltinOptions_TransposeConvOptions
- ? static_cast<const TransposeConvOptions *>(builtin_options())
- : nullptr;
- }
- const SparseToDenseOptions *builtin_options_as_SparseToDenseOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SparseToDenseOptions
- ? static_cast<const SparseToDenseOptions *>(builtin_options())
- : nullptr;
- }
- const TileOptions *builtin_options_as_TileOptions() const
- {
- return builtin_options_type() == BuiltinOptions_TileOptions
- ? static_cast<const TileOptions *>(builtin_options())
- : nullptr;
- }
- const ExpandDimsOptions *builtin_options_as_ExpandDimsOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ExpandDimsOptions
- ? static_cast<const ExpandDimsOptions *>(builtin_options())
- : nullptr;
- }
- const EqualOptions *builtin_options_as_EqualOptions() const
- {
- return builtin_options_type() == BuiltinOptions_EqualOptions
- ? static_cast<const EqualOptions *>(builtin_options())
- : nullptr;
- }
- const NotEqualOptions *builtin_options_as_NotEqualOptions() const
- {
- return builtin_options_type() == BuiltinOptions_NotEqualOptions
- ? static_cast<const NotEqualOptions *>(builtin_options())
- : nullptr;
- }
- const ShapeOptions *builtin_options_as_ShapeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ShapeOptions
- ? static_cast<const ShapeOptions *>(builtin_options())
- : nullptr;
- }
- const PowOptions *builtin_options_as_PowOptions() const
- {
- return builtin_options_type() == BuiltinOptions_PowOptions
- ? static_cast<const PowOptions *>(builtin_options())
- : nullptr;
- }
- const ArgMinOptions *builtin_options_as_ArgMinOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ArgMinOptions
- ? static_cast<const ArgMinOptions *>(builtin_options())
- : nullptr;
- }
- const FakeQuantOptions *builtin_options_as_FakeQuantOptions() const
- {
- return builtin_options_type() == BuiltinOptions_FakeQuantOptions
- ? static_cast<const FakeQuantOptions *>(builtin_options())
- : nullptr;
- }
- const PackOptions *builtin_options_as_PackOptions() const
- {
- return builtin_options_type() == BuiltinOptions_PackOptions
- ? static_cast<const PackOptions *>(builtin_options())
- : nullptr;
- }
- const LogicalOrOptions *builtin_options_as_LogicalOrOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LogicalOrOptions
- ? static_cast<const LogicalOrOptions *>(builtin_options())
- : nullptr;
- }
- const OneHotOptions *builtin_options_as_OneHotOptions() const
- {
- return builtin_options_type() == BuiltinOptions_OneHotOptions
- ? static_cast<const OneHotOptions *>(builtin_options())
- : nullptr;
- }
- const LogicalAndOptions *builtin_options_as_LogicalAndOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LogicalAndOptions
- ? static_cast<const LogicalAndOptions *>(builtin_options())
- : nullptr;
- }
- const LogicalNotOptions *builtin_options_as_LogicalNotOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LogicalNotOptions
- ? static_cast<const LogicalNotOptions *>(builtin_options())
- : nullptr;
- }
- const UnpackOptions *builtin_options_as_UnpackOptions() const
- {
- return builtin_options_type() == BuiltinOptions_UnpackOptions
- ? static_cast<const UnpackOptions *>(builtin_options())
- : nullptr;
- }
- const FloorDivOptions *builtin_options_as_FloorDivOptions() const
- {
- return builtin_options_type() == BuiltinOptions_FloorDivOptions
- ? static_cast<const FloorDivOptions *>(builtin_options())
- : nullptr;
- }
- const SquareOptions *builtin_options_as_SquareOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SquareOptions
- ? static_cast<const SquareOptions *>(builtin_options())
- : nullptr;
- }
- const ZerosLikeOptions *builtin_options_as_ZerosLikeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ZerosLikeOptions
- ? static_cast<const ZerosLikeOptions *>(builtin_options())
- : nullptr;
- }
- const FillOptions *builtin_options_as_FillOptions() const
- {
- return builtin_options_type() == BuiltinOptions_FillOptions
- ? static_cast<const FillOptions *>(builtin_options())
- : nullptr;
- }
- const BidirectionalSequenceLSTMOptions *
- builtin_options_as_BidirectionalSequenceLSTMOptions() const
- {
- return builtin_options_type() == BuiltinOptions_BidirectionalSequenceLSTMOptions
- ? static_cast<const BidirectionalSequenceLSTMOptions *>(builtin_options())
- : nullptr;
- }
- const BidirectionalSequenceRNNOptions *builtin_options_as_BidirectionalSequenceRNNOptions() const
- {
- return builtin_options_type() == BuiltinOptions_BidirectionalSequenceRNNOptions
- ? static_cast<const BidirectionalSequenceRNNOptions *>(builtin_options())
- : nullptr;
- }
- const UnidirectionalSequenceLSTMOptions *
- builtin_options_as_UnidirectionalSequenceLSTMOptions() const
- {
- return builtin_options_type() == BuiltinOptions_UnidirectionalSequenceLSTMOptions
- ? static_cast<const UnidirectionalSequenceLSTMOptions *>(builtin_options())
- : nullptr;
- }
- const FloorModOptions *builtin_options_as_FloorModOptions() const
- {
- return builtin_options_type() == BuiltinOptions_FloorModOptions
- ? static_cast<const FloorModOptions *>(builtin_options())
- : nullptr;
- }
- const RangeOptions *builtin_options_as_RangeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_RangeOptions
- ? static_cast<const RangeOptions *>(builtin_options())
- : nullptr;
- }
- const ResizeNearestNeighborOptions *builtin_options_as_ResizeNearestNeighborOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ResizeNearestNeighborOptions
- ? static_cast<const ResizeNearestNeighborOptions *>(builtin_options())
- : nullptr;
- }
- const LeakyReluOptions *builtin_options_as_LeakyReluOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LeakyReluOptions
- ? static_cast<const LeakyReluOptions *>(builtin_options())
- : nullptr;
- }
- const SquaredDifferenceOptions *builtin_options_as_SquaredDifferenceOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SquaredDifferenceOptions
- ? static_cast<const SquaredDifferenceOptions *>(builtin_options())
- : nullptr;
- }
- const MirrorPadOptions *builtin_options_as_MirrorPadOptions() const
- {
- return builtin_options_type() == BuiltinOptions_MirrorPadOptions
- ? static_cast<const MirrorPadOptions *>(builtin_options())
- : nullptr;
- }
- const AbsOptions *builtin_options_as_AbsOptions() const
- {
- return builtin_options_type() == BuiltinOptions_AbsOptions
- ? static_cast<const AbsOptions *>(builtin_options())
- : nullptr;
- }
- const SplitVOptions *builtin_options_as_SplitVOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SplitVOptions
- ? static_cast<const SplitVOptions *>(builtin_options())
- : nullptr;
- }
- const InstanceNormOptions *builtin_options_as_InstanceNormOptions() const
- {
- return builtin_options_type() == BuiltinOptions_InstanceNormOptions
- ? static_cast<const InstanceNormOptions *>(builtin_options())
- : nullptr;
- }
- const flatbuffers::Vector<uint8_t> *custom_options() const
- {
- return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM_OPTIONS);
- }
- CustomOptionsFormat custom_options_format() const
- {
- return static_cast<CustomOptionsFormat>(GetField<int8_t>(VT_CUSTOM_OPTIONS_FORMAT, 0));
- }
- const flatbuffers::Vector<uint8_t> *mutating_variable_inputs() const
- {
- return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_MUTATING_VARIABLE_INPUTS);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint32_t>(verifier, VT_OPCODE_INDEX) &&
- VerifyOffset(verifier, VT_INPUTS) && verifier.VerifyVector(inputs()) &&
- VerifyOffset(verifier, VT_OUTPUTS) && verifier.VerifyVector(outputs()) &&
- VerifyField<uint8_t>(verifier, VT_BUILTIN_OPTIONS_TYPE) &&
- VerifyOffset(verifier, VT_BUILTIN_OPTIONS) &&
- VerifyBuiltinOptions(verifier, builtin_options(), builtin_options_type()) &&
- VerifyOffset(verifier, VT_CUSTOM_OPTIONS) && verifier.VerifyVector(custom_options()) &&
- VerifyField<int8_t>(verifier, VT_CUSTOM_OPTIONS_FORMAT) &&
- VerifyOffset(verifier, VT_MUTATING_VARIABLE_INPUTS) &&
- verifier.VerifyVector(mutating_variable_inputs()) && verifier.EndTable();
- }
-};
-
-template <> inline const Conv2DOptions *Operator::builtin_options_as<Conv2DOptions>() const
-{
- return builtin_options_as_Conv2DOptions();
-}
-
-template <>
-inline const DepthwiseConv2DOptions *Operator::builtin_options_as<DepthwiseConv2DOptions>() const
-{
- return builtin_options_as_DepthwiseConv2DOptions();
-}
-
-template <>
-inline const ConcatEmbeddingsOptions *Operator::builtin_options_as<ConcatEmbeddingsOptions>() const
-{
- return builtin_options_as_ConcatEmbeddingsOptions();
-}
-
-template <>
-inline const LSHProjectionOptions *Operator::builtin_options_as<LSHProjectionOptions>() const
-{
- return builtin_options_as_LSHProjectionOptions();
-}
-
-template <> inline const Pool2DOptions *Operator::builtin_options_as<Pool2DOptions>() const
-{
- return builtin_options_as_Pool2DOptions();
-}
-
-template <> inline const SVDFOptions *Operator::builtin_options_as<SVDFOptions>() const
-{
- return builtin_options_as_SVDFOptions();
-}
-
-template <> inline const RNNOptions *Operator::builtin_options_as<RNNOptions>() const
-{
- return builtin_options_as_RNNOptions();
-}
-
-template <>
-inline const FullyConnectedOptions *Operator::builtin_options_as<FullyConnectedOptions>() const
-{
- return builtin_options_as_FullyConnectedOptions();
-}
-
-template <> inline const SoftmaxOptions *Operator::builtin_options_as<SoftmaxOptions>() const
-{
- return builtin_options_as_SoftmaxOptions();
-}
-
-template <>
-inline const ConcatenationOptions *Operator::builtin_options_as<ConcatenationOptions>() const
-{
- return builtin_options_as_ConcatenationOptions();
-}
-
-template <> inline const AddOptions *Operator::builtin_options_as<AddOptions>() const
-{
- return builtin_options_as_AddOptions();
-}
-
-template <> inline const L2NormOptions *Operator::builtin_options_as<L2NormOptions>() const
-{
- return builtin_options_as_L2NormOptions();
-}
-
-template <>
-inline const LocalResponseNormalizationOptions *
-Operator::builtin_options_as<LocalResponseNormalizationOptions>() const
-{
- return builtin_options_as_LocalResponseNormalizationOptions();
-}
-
-template <> inline const LSTMOptions *Operator::builtin_options_as<LSTMOptions>() const
-{
- return builtin_options_as_LSTMOptions();
-}
-
-template <>
-inline const ResizeBilinearOptions *Operator::builtin_options_as<ResizeBilinearOptions>() const
-{
- return builtin_options_as_ResizeBilinearOptions();
-}
-
-template <> inline const CallOptions *Operator::builtin_options_as<CallOptions>() const
-{
- return builtin_options_as_CallOptions();
-}
-
-template <> inline const ReshapeOptions *Operator::builtin_options_as<ReshapeOptions>() const
-{
- return builtin_options_as_ReshapeOptions();
-}
-
-template <> inline const SkipGramOptions *Operator::builtin_options_as<SkipGramOptions>() const
-{
- return builtin_options_as_SkipGramOptions();
-}
-
-template <>
-inline const SpaceToDepthOptions *Operator::builtin_options_as<SpaceToDepthOptions>() const
-{
- return builtin_options_as_SpaceToDepthOptions();
-}
-
-template <>
-inline const EmbeddingLookupSparseOptions *
-Operator::builtin_options_as<EmbeddingLookupSparseOptions>() const
-{
- return builtin_options_as_EmbeddingLookupSparseOptions();
-}
-
-template <> inline const MulOptions *Operator::builtin_options_as<MulOptions>() const
-{
- return builtin_options_as_MulOptions();
-}
-
-template <> inline const PadOptions *Operator::builtin_options_as<PadOptions>() const
-{
- return builtin_options_as_PadOptions();
-}
-
-template <> inline const GatherOptions *Operator::builtin_options_as<GatherOptions>() const
-{
- return builtin_options_as_GatherOptions();
-}
-
-template <>
-inline const BatchToSpaceNDOptions *Operator::builtin_options_as<BatchToSpaceNDOptions>() const
-{
- return builtin_options_as_BatchToSpaceNDOptions();
-}
-
-template <>
-inline const SpaceToBatchNDOptions *Operator::builtin_options_as<SpaceToBatchNDOptions>() const
-{
- return builtin_options_as_SpaceToBatchNDOptions();
-}
-
-template <> inline const TransposeOptions *Operator::builtin_options_as<TransposeOptions>() const
-{
- return builtin_options_as_TransposeOptions();
-}
-
-template <> inline const ReducerOptions *Operator::builtin_options_as<ReducerOptions>() const
-{
- return builtin_options_as_ReducerOptions();
-}
-
-template <> inline const SubOptions *Operator::builtin_options_as<SubOptions>() const
-{
- return builtin_options_as_SubOptions();
-}
-
-template <> inline const DivOptions *Operator::builtin_options_as<DivOptions>() const
-{
- return builtin_options_as_DivOptions();
-}
-
-template <> inline const SqueezeOptions *Operator::builtin_options_as<SqueezeOptions>() const
-{
- return builtin_options_as_SqueezeOptions();
-}
-
-template <>
-inline const SequenceRNNOptions *Operator::builtin_options_as<SequenceRNNOptions>() const
-{
- return builtin_options_as_SequenceRNNOptions();
-}
-
-template <>
-inline const StridedSliceOptions *Operator::builtin_options_as<StridedSliceOptions>() const
-{
- return builtin_options_as_StridedSliceOptions();
-}
-
-template <> inline const ExpOptions *Operator::builtin_options_as<ExpOptions>() const
-{
- return builtin_options_as_ExpOptions();
-}
-
-template <> inline const TopKV2Options *Operator::builtin_options_as<TopKV2Options>() const
-{
- return builtin_options_as_TopKV2Options();
-}
-
-template <> inline const SplitOptions *Operator::builtin_options_as<SplitOptions>() const
-{
- return builtin_options_as_SplitOptions();
-}
-
-template <> inline const LogSoftmaxOptions *Operator::builtin_options_as<LogSoftmaxOptions>() const
-{
- return builtin_options_as_LogSoftmaxOptions();
-}
-
-template <> inline const CastOptions *Operator::builtin_options_as<CastOptions>() const
-{
- return builtin_options_as_CastOptions();
-}
-
-template <> inline const DequantizeOptions *Operator::builtin_options_as<DequantizeOptions>() const
-{
- return builtin_options_as_DequantizeOptions();
-}
-
-template <>
-inline const MaximumMinimumOptions *Operator::builtin_options_as<MaximumMinimumOptions>() const
-{
- return builtin_options_as_MaximumMinimumOptions();
-}
-
-template <> inline const ArgMaxOptions *Operator::builtin_options_as<ArgMaxOptions>() const
-{
- return builtin_options_as_ArgMaxOptions();
-}
-
-template <> inline const LessOptions *Operator::builtin_options_as<LessOptions>() const
-{
- return builtin_options_as_LessOptions();
-}
-
-template <> inline const NegOptions *Operator::builtin_options_as<NegOptions>() const
-{
- return builtin_options_as_NegOptions();
-}
-
-template <> inline const PadV2Options *Operator::builtin_options_as<PadV2Options>() const
-{
- return builtin_options_as_PadV2Options();
-}
-
-template <> inline const GreaterOptions *Operator::builtin_options_as<GreaterOptions>() const
-{
- return builtin_options_as_GreaterOptions();
-}
-
-template <>
-inline const GreaterEqualOptions *Operator::builtin_options_as<GreaterEqualOptions>() const
-{
- return builtin_options_as_GreaterEqualOptions();
-}
-
-template <> inline const LessEqualOptions *Operator::builtin_options_as<LessEqualOptions>() const
-{
- return builtin_options_as_LessEqualOptions();
-}
-
-template <> inline const SelectOptions *Operator::builtin_options_as<SelectOptions>() const
-{
- return builtin_options_as_SelectOptions();
-}
-
-template <> inline const SliceOptions *Operator::builtin_options_as<SliceOptions>() const
-{
- return builtin_options_as_SliceOptions();
-}
-
-template <>
-inline const TransposeConvOptions *Operator::builtin_options_as<TransposeConvOptions>() const
-{
- return builtin_options_as_TransposeConvOptions();
-}
-
-template <>
-inline const SparseToDenseOptions *Operator::builtin_options_as<SparseToDenseOptions>() const
-{
- return builtin_options_as_SparseToDenseOptions();
-}
-
-template <> inline const TileOptions *Operator::builtin_options_as<TileOptions>() const
-{
- return builtin_options_as_TileOptions();
-}
-
-template <> inline const ExpandDimsOptions *Operator::builtin_options_as<ExpandDimsOptions>() const
-{
- return builtin_options_as_ExpandDimsOptions();
-}
-
-template <> inline const EqualOptions *Operator::builtin_options_as<EqualOptions>() const
-{
- return builtin_options_as_EqualOptions();
-}
-
-template <> inline const NotEqualOptions *Operator::builtin_options_as<NotEqualOptions>() const
-{
- return builtin_options_as_NotEqualOptions();
-}
-
-template <> inline const ShapeOptions *Operator::builtin_options_as<ShapeOptions>() const
-{
- return builtin_options_as_ShapeOptions();
-}
-
-template <> inline const PowOptions *Operator::builtin_options_as<PowOptions>() const
-{
- return builtin_options_as_PowOptions();
-}
-
-template <> inline const ArgMinOptions *Operator::builtin_options_as<ArgMinOptions>() const
-{
- return builtin_options_as_ArgMinOptions();
-}
-
-template <> inline const FakeQuantOptions *Operator::builtin_options_as<FakeQuantOptions>() const
-{
- return builtin_options_as_FakeQuantOptions();
-}
-
-template <> inline const PackOptions *Operator::builtin_options_as<PackOptions>() const
-{
- return builtin_options_as_PackOptions();
-}
-
-template <> inline const LogicalOrOptions *Operator::builtin_options_as<LogicalOrOptions>() const
-{
- return builtin_options_as_LogicalOrOptions();
-}
-
-template <> inline const OneHotOptions *Operator::builtin_options_as<OneHotOptions>() const
-{
- return builtin_options_as_OneHotOptions();
-}
-
-template <> inline const LogicalAndOptions *Operator::builtin_options_as<LogicalAndOptions>() const
-{
- return builtin_options_as_LogicalAndOptions();
-}
-
-template <> inline const LogicalNotOptions *Operator::builtin_options_as<LogicalNotOptions>() const
-{
- return builtin_options_as_LogicalNotOptions();
-}
-
-template <> inline const UnpackOptions *Operator::builtin_options_as<UnpackOptions>() const
-{
- return builtin_options_as_UnpackOptions();
-}
-
-template <> inline const FloorDivOptions *Operator::builtin_options_as<FloorDivOptions>() const
-{
- return builtin_options_as_FloorDivOptions();
-}
-
-template <> inline const SquareOptions *Operator::builtin_options_as<SquareOptions>() const
-{
- return builtin_options_as_SquareOptions();
-}
-
-template <> inline const ZerosLikeOptions *Operator::builtin_options_as<ZerosLikeOptions>() const
-{
- return builtin_options_as_ZerosLikeOptions();
-}
-
-template <> inline const FillOptions *Operator::builtin_options_as<FillOptions>() const
-{
- return builtin_options_as_FillOptions();
-}
-
-template <>
-inline const BidirectionalSequenceLSTMOptions *
-Operator::builtin_options_as<BidirectionalSequenceLSTMOptions>() const
-{
- return builtin_options_as_BidirectionalSequenceLSTMOptions();
-}
-
-template <>
-inline const BidirectionalSequenceRNNOptions *
-Operator::builtin_options_as<BidirectionalSequenceRNNOptions>() const
-{
- return builtin_options_as_BidirectionalSequenceRNNOptions();
-}
-
-template <>
-inline const UnidirectionalSequenceLSTMOptions *
-Operator::builtin_options_as<UnidirectionalSequenceLSTMOptions>() const
-{
- return builtin_options_as_UnidirectionalSequenceLSTMOptions();
-}
-
-template <> inline const FloorModOptions *Operator::builtin_options_as<FloorModOptions>() const
-{
- return builtin_options_as_FloorModOptions();
-}
-
-template <> inline const RangeOptions *Operator::builtin_options_as<RangeOptions>() const
-{
- return builtin_options_as_RangeOptions();
-}
-
-template <>
-inline const ResizeNearestNeighborOptions *
-Operator::builtin_options_as<ResizeNearestNeighborOptions>() const
-{
- return builtin_options_as_ResizeNearestNeighborOptions();
-}
-
-template <> inline const LeakyReluOptions *Operator::builtin_options_as<LeakyReluOptions>() const
-{
- return builtin_options_as_LeakyReluOptions();
-}
-
-template <>
-inline const SquaredDifferenceOptions *
-Operator::builtin_options_as<SquaredDifferenceOptions>() const
-{
- return builtin_options_as_SquaredDifferenceOptions();
-}
-
-template <> inline const MirrorPadOptions *Operator::builtin_options_as<MirrorPadOptions>() const
-{
- return builtin_options_as_MirrorPadOptions();
-}
-
-template <> inline const AbsOptions *Operator::builtin_options_as<AbsOptions>() const
-{
- return builtin_options_as_AbsOptions();
-}
-
-template <> inline const SplitVOptions *Operator::builtin_options_as<SplitVOptions>() const
-{
- return builtin_options_as_SplitVOptions();
-}
-
-template <>
-inline const InstanceNormOptions *Operator::builtin_options_as<InstanceNormOptions>() const
-{
- return builtin_options_as_InstanceNormOptions();
-}
-
-struct OperatorBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_opcode_index(uint32_t opcode_index)
- {
- fbb_.AddElement<uint32_t>(Operator::VT_OPCODE_INDEX, opcode_index, 0);
- }
- void add_inputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs)
- {
- fbb_.AddOffset(Operator::VT_INPUTS, inputs);
- }
- void add_outputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs)
- {
- fbb_.AddOffset(Operator::VT_OUTPUTS, outputs);
- }
- void add_builtin_options_type(BuiltinOptions builtin_options_type)
- {
- fbb_.AddElement<uint8_t>(Operator::VT_BUILTIN_OPTIONS_TYPE,
- static_cast<uint8_t>(builtin_options_type), 0);
- }
- void add_builtin_options(flatbuffers::Offset<void> builtin_options)
- {
- fbb_.AddOffset(Operator::VT_BUILTIN_OPTIONS, builtin_options);
- }
- void add_custom_options(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom_options)
- {
- fbb_.AddOffset(Operator::VT_CUSTOM_OPTIONS, custom_options);
- }
- void add_custom_options_format(CustomOptionsFormat custom_options_format)
- {
- fbb_.AddElement<int8_t>(Operator::VT_CUSTOM_OPTIONS_FORMAT,
- static_cast<int8_t>(custom_options_format), 0);
- }
- void add_mutating_variable_inputs(
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> mutating_variable_inputs)
- {
- fbb_.AddOffset(Operator::VT_MUTATING_VARIABLE_INPUTS, mutating_variable_inputs);
- }
- explicit OperatorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- OperatorBuilder &operator=(const OperatorBuilder &);
- flatbuffers::Offset<Operator> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Operator>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Operator>
-CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, uint32_t opcode_index = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs = 0,
- BuiltinOptions builtin_options_type = BuiltinOptions_NONE,
- flatbuffers::Offset<void> builtin_options = 0,
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom_options = 0,
- CustomOptionsFormat custom_options_format = CustomOptionsFormat_FLEXBUFFERS,
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> mutating_variable_inputs = 0)
-{
- OperatorBuilder builder_(_fbb);
- builder_.add_mutating_variable_inputs(mutating_variable_inputs);
- builder_.add_custom_options(custom_options);
- builder_.add_builtin_options(builtin_options);
- builder_.add_outputs(outputs);
- builder_.add_inputs(inputs);
- builder_.add_opcode_index(opcode_index);
- builder_.add_custom_options_format(custom_options_format);
- builder_.add_builtin_options_type(builtin_options_type);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Operator>
-CreateOperatorDirect(flatbuffers::FlatBufferBuilder &_fbb, uint32_t opcode_index = 0,
- const std::vector<int32_t> *inputs = nullptr,
- const std::vector<int32_t> *outputs = nullptr,
- BuiltinOptions builtin_options_type = BuiltinOptions_NONE,
- flatbuffers::Offset<void> builtin_options = 0,
- const std::vector<uint8_t> *custom_options = nullptr,
- CustomOptionsFormat custom_options_format = CustomOptionsFormat_FLEXBUFFERS,
- const std::vector<uint8_t> *mutating_variable_inputs = nullptr)
-{
- return circle::CreateOperator(
- _fbb, opcode_index, inputs ? _fbb.CreateVector<int32_t>(*inputs) : 0,
- outputs ? _fbb.CreateVector<int32_t>(*outputs) : 0, builtin_options_type, builtin_options,
- custom_options ? _fbb.CreateVector<uint8_t>(*custom_options) : 0, custom_options_format,
- mutating_variable_inputs ? _fbb.CreateVector<uint8_t>(*mutating_variable_inputs) : 0);
-}
-
-struct SubGraph FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_TENSORS = 4,
- VT_INPUTS = 6,
- VT_OUTPUTS = 8,
- VT_OPERATORS = 10,
- VT_NAME = 12,
- VT_DATA_FORMAT = 14
- };
- const flatbuffers::Vector<flatbuffers::Offset<Tensor>> *tensors() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Tensor>> *>(VT_TENSORS);
- }
- const flatbuffers::Vector<int32_t> *inputs() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUTS);
- }
- const flatbuffers::Vector<int32_t> *outputs() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUTS);
- }
- const flatbuffers::Vector<flatbuffers::Offset<Operator>> *operators() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Operator>> *>(VT_OPERATORS);
- }
- const flatbuffers::String *name() const
- {
- return GetPointer<const flatbuffers::String *>(VT_NAME);
- }
- DataFormat data_format() const
- {
- return static_cast<DataFormat>(GetField<int8_t>(VT_DATA_FORMAT, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_TENSORS) &&
- verifier.VerifyVector(tensors()) && verifier.VerifyVectorOfTables(tensors()) &&
- VerifyOffset(verifier, VT_INPUTS) && verifier.VerifyVector(inputs()) &&
- VerifyOffset(verifier, VT_OUTPUTS) && verifier.VerifyVector(outputs()) &&
- VerifyOffset(verifier, VT_OPERATORS) && verifier.VerifyVector(operators()) &&
- verifier.VerifyVectorOfTables(operators()) && VerifyOffset(verifier, VT_NAME) &&
- verifier.VerifyString(name()) && VerifyField<int8_t>(verifier, VT_DATA_FORMAT) &&
- verifier.EndTable();
- }
-};
-
-struct SubGraphBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_tensors(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Tensor>>> tensors)
- {
- fbb_.AddOffset(SubGraph::VT_TENSORS, tensors);
- }
- void add_inputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs)
- {
- fbb_.AddOffset(SubGraph::VT_INPUTS, inputs);
- }
- void add_outputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs)
- {
- fbb_.AddOffset(SubGraph::VT_OUTPUTS, outputs);
- }
- void
- add_operators(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Operator>>> operators)
- {
- fbb_.AddOffset(SubGraph::VT_OPERATORS, operators);
- }
- void add_name(flatbuffers::Offset<flatbuffers::String> name)
- {
- fbb_.AddOffset(SubGraph::VT_NAME, name);
- }
- void add_data_format(DataFormat data_format)
- {
- fbb_.AddElement<int8_t>(SubGraph::VT_DATA_FORMAT, static_cast<int8_t>(data_format), 0);
- }
- explicit SubGraphBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SubGraphBuilder &operator=(const SubGraphBuilder &);
- flatbuffers::Offset<SubGraph> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SubGraph>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SubGraph> CreateSubGraph(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Tensor>>> tensors = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Operator>>> operators = 0,
- flatbuffers::Offset<flatbuffers::String> name = 0,
- DataFormat data_format = DataFormat_CHANNELS_LAST)
-{
- SubGraphBuilder builder_(_fbb);
- builder_.add_name(name);
- builder_.add_operators(operators);
- builder_.add_outputs(outputs);
- builder_.add_inputs(inputs);
- builder_.add_tensors(tensors);
- builder_.add_data_format(data_format);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<SubGraph>
-CreateSubGraphDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<flatbuffers::Offset<Tensor>> *tensors = nullptr,
- const std::vector<int32_t> *inputs = nullptr,
- const std::vector<int32_t> *outputs = nullptr,
- const std::vector<flatbuffers::Offset<Operator>> *operators = nullptr,
- const char *name = nullptr, DataFormat data_format = DataFormat_CHANNELS_LAST)
-{
- return circle::CreateSubGraph(
- _fbb, tensors ? _fbb.CreateVector<flatbuffers::Offset<Tensor>>(*tensors) : 0,
- inputs ? _fbb.CreateVector<int32_t>(*inputs) : 0,
- outputs ? _fbb.CreateVector<int32_t>(*outputs) : 0,
- operators ? _fbb.CreateVector<flatbuffers::Offset<Operator>>(*operators) : 0,
- name ? _fbb.CreateString(name) : 0, data_format);
-}
-
-struct Buffer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_DATA = 4
- };
- const flatbuffers::Vector<uint8_t> *data() const
- {
- return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_DATA);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_DATA) &&
- verifier.VerifyVector(data()) && verifier.EndTable();
- }
-};
-
-struct BufferBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_data(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data)
- {
- fbb_.AddOffset(Buffer::VT_DATA, data);
- }
- explicit BufferBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- BufferBuilder &operator=(const BufferBuilder &);
- flatbuffers::Offset<Buffer> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Buffer>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Buffer>
-CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data = 0)
-{
- BufferBuilder builder_(_fbb);
- builder_.add_data(data);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Buffer> CreateBufferDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<uint8_t> *data = nullptr)
-{
- return circle::CreateBuffer(_fbb, data ? _fbb.CreateVector<uint8_t>(*data) : 0);
-}
-
-struct Model FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_VERSION = 4,
- VT_OPERATOR_CODES = 6,
- VT_SUBGRAPHS = 8,
- VT_DESCRIPTION = 10,
- VT_BUFFERS = 12,
- VT_METADATA_BUFFER = 14
- };
- uint32_t version() const { return GetField<uint32_t>(VT_VERSION, 0); }
- const flatbuffers::Vector<flatbuffers::Offset<OperatorCode>> *operator_codes() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<OperatorCode>> *>(
- VT_OPERATOR_CODES);
- }
- const flatbuffers::Vector<flatbuffers::Offset<SubGraph>> *subgraphs() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<SubGraph>> *>(VT_SUBGRAPHS);
- }
- const flatbuffers::String *description() const
- {
- return GetPointer<const flatbuffers::String *>(VT_DESCRIPTION);
- }
- const flatbuffers::Vector<flatbuffers::Offset<Buffer>> *buffers() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Buffer>> *>(VT_BUFFERS);
- }
- const flatbuffers::Vector<int32_t> *metadata_buffer() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_METADATA_BUFFER);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint32_t>(verifier, VT_VERSION) &&
- VerifyOffset(verifier, VT_OPERATOR_CODES) && verifier.VerifyVector(operator_codes()) &&
- verifier.VerifyVectorOfTables(operator_codes()) &&
- VerifyOffset(verifier, VT_SUBGRAPHS) && verifier.VerifyVector(subgraphs()) &&
- verifier.VerifyVectorOfTables(subgraphs()) && VerifyOffset(verifier, VT_DESCRIPTION) &&
- verifier.VerifyString(description()) && VerifyOffset(verifier, VT_BUFFERS) &&
- verifier.VerifyVector(buffers()) && verifier.VerifyVectorOfTables(buffers()) &&
- VerifyOffset(verifier, VT_METADATA_BUFFER) && verifier.VerifyVector(metadata_buffer()) &&
- verifier.EndTable();
- }
-};
-
-struct ModelBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_version(uint32_t version) { fbb_.AddElement<uint32_t>(Model::VT_VERSION, version, 0); }
- void add_operator_codes(
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>> operator_codes)
- {
- fbb_.AddOffset(Model::VT_OPERATOR_CODES, operator_codes);
- }
- void
- add_subgraphs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<SubGraph>>> subgraphs)
- {
- fbb_.AddOffset(Model::VT_SUBGRAPHS, subgraphs);
- }
- void add_description(flatbuffers::Offset<flatbuffers::String> description)
- {
- fbb_.AddOffset(Model::VT_DESCRIPTION, description);
- }
- void add_buffers(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>> buffers)
- {
- fbb_.AddOffset(Model::VT_BUFFERS, buffers);
- }
- void add_metadata_buffer(flatbuffers::Offset<flatbuffers::Vector<int32_t>> metadata_buffer)
- {
- fbb_.AddOffset(Model::VT_METADATA_BUFFER, metadata_buffer);
- }
- explicit ModelBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ModelBuilder &operator=(const ModelBuilder &);
- flatbuffers::Offset<Model> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Model>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Model> CreateModel(
- flatbuffers::FlatBufferBuilder &_fbb, uint32_t version = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>> operator_codes = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<SubGraph>>> subgraphs = 0,
- flatbuffers::Offset<flatbuffers::String> description = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>> buffers = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> metadata_buffer = 0)
-{
- ModelBuilder builder_(_fbb);
- builder_.add_metadata_buffer(metadata_buffer);
- builder_.add_buffers(buffers);
- builder_.add_description(description);
- builder_.add_subgraphs(subgraphs);
- builder_.add_operator_codes(operator_codes);
- builder_.add_version(version);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Model>
-CreateModelDirect(flatbuffers::FlatBufferBuilder &_fbb, uint32_t version = 0,
- const std::vector<flatbuffers::Offset<OperatorCode>> *operator_codes = nullptr,
- const std::vector<flatbuffers::Offset<SubGraph>> *subgraphs = nullptr,
- const char *description = nullptr,
- const std::vector<flatbuffers::Offset<Buffer>> *buffers = nullptr,
- const std::vector<int32_t> *metadata_buffer = nullptr)
-{
- return circle::CreateModel(
- _fbb, version,
- operator_codes ? _fbb.CreateVector<flatbuffers::Offset<OperatorCode>>(*operator_codes) : 0,
- subgraphs ? _fbb.CreateVector<flatbuffers::Offset<SubGraph>>(*subgraphs) : 0,
- description ? _fbb.CreateString(description) : 0,
- buffers ? _fbb.CreateVector<flatbuffers::Offset<Buffer>>(*buffers) : 0,
- metadata_buffer ? _fbb.CreateVector<int32_t>(*metadata_buffer) : 0);
-}
-
-inline bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj,
- QuantizationDetails type)
-{
- switch (type)
- {
- case QuantizationDetails_NONE:
- {
- return true;
- }
- case QuantizationDetails_CustomQuantization:
- {
- auto ptr = reinterpret_cast<const CustomQuantization *>(obj);
- return verifier.VerifyTable(ptr);
- }
- default:
- return false;
- }
-}
-
-inline bool
-VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier,
- const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
- const flatbuffers::Vector<uint8_t> *types)
-{
- if (!values || !types)
- return !values && !types;
- if (values->size() != types->size())
- return false;
- for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i)
- {
- if (!VerifyQuantizationDetails(verifier, values->Get(i),
- types->GetEnum<QuantizationDetails>(i)))
- {
- return false;
- }
- }
- return true;
-}
-
-inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj,
- BuiltinOptions type)
-{
- switch (type)
- {
- case BuiltinOptions_NONE:
- {
- return true;
- }
- case BuiltinOptions_Conv2DOptions:
- {
- auto ptr = reinterpret_cast<const Conv2DOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_DepthwiseConv2DOptions:
- {
- auto ptr = reinterpret_cast<const DepthwiseConv2DOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ConcatEmbeddingsOptions:
- {
- auto ptr = reinterpret_cast<const ConcatEmbeddingsOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LSHProjectionOptions:
- {
- auto ptr = reinterpret_cast<const LSHProjectionOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_Pool2DOptions:
- {
- auto ptr = reinterpret_cast<const Pool2DOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SVDFOptions:
- {
- auto ptr = reinterpret_cast<const SVDFOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_RNNOptions:
- {
- auto ptr = reinterpret_cast<const RNNOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_FullyConnectedOptions:
- {
- auto ptr = reinterpret_cast<const FullyConnectedOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SoftmaxOptions:
- {
- auto ptr = reinterpret_cast<const SoftmaxOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ConcatenationOptions:
- {
- auto ptr = reinterpret_cast<const ConcatenationOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_AddOptions:
- {
- auto ptr = reinterpret_cast<const AddOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_L2NormOptions:
- {
- auto ptr = reinterpret_cast<const L2NormOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LocalResponseNormalizationOptions:
- {
- auto ptr = reinterpret_cast<const LocalResponseNormalizationOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LSTMOptions:
- {
- auto ptr = reinterpret_cast<const LSTMOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ResizeBilinearOptions:
- {
- auto ptr = reinterpret_cast<const ResizeBilinearOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_CallOptions:
- {
- auto ptr = reinterpret_cast<const CallOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ReshapeOptions:
- {
- auto ptr = reinterpret_cast<const ReshapeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SkipGramOptions:
- {
- auto ptr = reinterpret_cast<const SkipGramOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SpaceToDepthOptions:
- {
- auto ptr = reinterpret_cast<const SpaceToDepthOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_EmbeddingLookupSparseOptions:
- {
- auto ptr = reinterpret_cast<const EmbeddingLookupSparseOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_MulOptions:
- {
- auto ptr = reinterpret_cast<const MulOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_PadOptions:
- {
- auto ptr = reinterpret_cast<const PadOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_GatherOptions:
- {
- auto ptr = reinterpret_cast<const GatherOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_BatchToSpaceNDOptions:
- {
- auto ptr = reinterpret_cast<const BatchToSpaceNDOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SpaceToBatchNDOptions:
- {
- auto ptr = reinterpret_cast<const SpaceToBatchNDOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_TransposeOptions:
- {
- auto ptr = reinterpret_cast<const TransposeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ReducerOptions:
- {
- auto ptr = reinterpret_cast<const ReducerOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SubOptions:
- {
- auto ptr = reinterpret_cast<const SubOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_DivOptions:
- {
- auto ptr = reinterpret_cast<const DivOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SqueezeOptions:
- {
- auto ptr = reinterpret_cast<const SqueezeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SequenceRNNOptions:
- {
- auto ptr = reinterpret_cast<const SequenceRNNOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_StridedSliceOptions:
- {
- auto ptr = reinterpret_cast<const StridedSliceOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ExpOptions:
- {
- auto ptr = reinterpret_cast<const ExpOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_TopKV2Options:
- {
- auto ptr = reinterpret_cast<const TopKV2Options *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SplitOptions:
- {
- auto ptr = reinterpret_cast<const SplitOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LogSoftmaxOptions:
- {
- auto ptr = reinterpret_cast<const LogSoftmaxOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_CastOptions:
- {
- auto ptr = reinterpret_cast<const CastOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_DequantizeOptions:
- {
- auto ptr = reinterpret_cast<const DequantizeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_MaximumMinimumOptions:
- {
- auto ptr = reinterpret_cast<const MaximumMinimumOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ArgMaxOptions:
- {
- auto ptr = reinterpret_cast<const ArgMaxOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LessOptions:
- {
- auto ptr = reinterpret_cast<const LessOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_NegOptions:
- {
- auto ptr = reinterpret_cast<const NegOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_PadV2Options:
- {
- auto ptr = reinterpret_cast<const PadV2Options *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_GreaterOptions:
- {
- auto ptr = reinterpret_cast<const GreaterOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_GreaterEqualOptions:
- {
- auto ptr = reinterpret_cast<const GreaterEqualOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LessEqualOptions:
- {
- auto ptr = reinterpret_cast<const LessEqualOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SelectOptions:
- {
- auto ptr = reinterpret_cast<const SelectOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SliceOptions:
- {
- auto ptr = reinterpret_cast<const SliceOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_TransposeConvOptions:
- {
- auto ptr = reinterpret_cast<const TransposeConvOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SparseToDenseOptions:
- {
- auto ptr = reinterpret_cast<const SparseToDenseOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_TileOptions:
- {
- auto ptr = reinterpret_cast<const TileOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ExpandDimsOptions:
- {
- auto ptr = reinterpret_cast<const ExpandDimsOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_EqualOptions:
- {
- auto ptr = reinterpret_cast<const EqualOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_NotEqualOptions:
- {
- auto ptr = reinterpret_cast<const NotEqualOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ShapeOptions:
- {
- auto ptr = reinterpret_cast<const ShapeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_PowOptions:
- {
- auto ptr = reinterpret_cast<const PowOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ArgMinOptions:
- {
- auto ptr = reinterpret_cast<const ArgMinOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_FakeQuantOptions:
- {
- auto ptr = reinterpret_cast<const FakeQuantOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_PackOptions:
- {
- auto ptr = reinterpret_cast<const PackOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LogicalOrOptions:
- {
- auto ptr = reinterpret_cast<const LogicalOrOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_OneHotOptions:
- {
- auto ptr = reinterpret_cast<const OneHotOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LogicalAndOptions:
- {
- auto ptr = reinterpret_cast<const LogicalAndOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LogicalNotOptions:
- {
- auto ptr = reinterpret_cast<const LogicalNotOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_UnpackOptions:
- {
- auto ptr = reinterpret_cast<const UnpackOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_FloorDivOptions:
- {
- auto ptr = reinterpret_cast<const FloorDivOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SquareOptions:
- {
- auto ptr = reinterpret_cast<const SquareOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ZerosLikeOptions:
- {
- auto ptr = reinterpret_cast<const ZerosLikeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_FillOptions:
- {
- auto ptr = reinterpret_cast<const FillOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_BidirectionalSequenceLSTMOptions:
- {
- auto ptr = reinterpret_cast<const BidirectionalSequenceLSTMOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_BidirectionalSequenceRNNOptions:
- {
- auto ptr = reinterpret_cast<const BidirectionalSequenceRNNOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_UnidirectionalSequenceLSTMOptions:
- {
- auto ptr = reinterpret_cast<const UnidirectionalSequenceLSTMOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_FloorModOptions:
- {
- auto ptr = reinterpret_cast<const FloorModOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_RangeOptions:
- {
- auto ptr = reinterpret_cast<const RangeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ResizeNearestNeighborOptions:
- {
- auto ptr = reinterpret_cast<const ResizeNearestNeighborOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LeakyReluOptions:
- {
- auto ptr = reinterpret_cast<const LeakyReluOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SquaredDifferenceOptions:
- {
- auto ptr = reinterpret_cast<const SquaredDifferenceOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_MirrorPadOptions:
- {
- auto ptr = reinterpret_cast<const MirrorPadOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_AbsOptions:
- {
- auto ptr = reinterpret_cast<const AbsOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SplitVOptions:
- {
- auto ptr = reinterpret_cast<const SplitVOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_InstanceNormOptions:
- {
- auto ptr = reinterpret_cast<const InstanceNormOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- default:
- return false;
- }
-}
-
-inline bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier,
- const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
- const flatbuffers::Vector<uint8_t> *types)
-{
- if (!values || !types)
- return !values && !types;
- if (values->size() != types->size())
- return false;
- for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i)
- {
- if (!VerifyBuiltinOptions(verifier, values->Get(i), types->GetEnum<BuiltinOptions>(i)))
- {
- return false;
- }
- }
- return true;
-}
-
-inline const circle::Model *GetModel(const void *buf)
-{
- return flatbuffers::GetRoot<circle::Model>(buf);
-}
-
-inline const circle::Model *GetSizePrefixedModel(const void *buf)
-{
- return flatbuffers::GetSizePrefixedRoot<circle::Model>(buf);
-}
-
-inline const char *ModelIdentifier() { return "CIR0"; }
-
-inline bool ModelBufferHasIdentifier(const void *buf)
-{
- return flatbuffers::BufferHasIdentifier(buf, ModelIdentifier());
-}
-
-inline bool VerifyModelBuffer(flatbuffers::Verifier &verifier)
-{
- return verifier.VerifyBuffer<circle::Model>(ModelIdentifier());
-}
-
-inline bool VerifySizePrefixedModelBuffer(flatbuffers::Verifier &verifier)
-{
- return verifier.VerifySizePrefixedBuffer<circle::Model>(ModelIdentifier());
-}
-
-inline const char *ModelExtension() { return "circle"; }
-
-inline void FinishModelBuffer(flatbuffers::FlatBufferBuilder &fbb,
- flatbuffers::Offset<circle::Model> root)
-{
- fbb.Finish(root, ModelIdentifier());
-}
-
-inline void FinishSizePrefixedModelBuffer(flatbuffers::FlatBufferBuilder &fbb,
- flatbuffers::Offset<circle::Model> root)
-{
- fbb.FinishSizePrefixed(root, ModelIdentifier());
-}
-
-} // namespace circle
-
-#endif // FLATBUFFERS_GENERATED_CIRCLESCHEMA_CIRCLE_H_
diff --git a/runtime/neurun/frontend/nnapi/ANeuralNetworksModel.test.cc b/runtime/neurun/frontend/nnapi/ANeuralNetworksModel.test.cc
deleted file mode 100644
index 15a279a7e..000000000
--- a/runtime/neurun/frontend/nnapi/ANeuralNetworksModel.test.cc
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-
-#include "wrapper/ANeuralNetworksModel.h"
-
-TEST(MODEL, model_build)
-{
- ANeuralNetworksModel model;
- ASSERT_EQ(model.isFinished(), false);
-}
diff --git a/runtime/neurun/frontend/nnapi/CMakeLists.txt b/runtime/neurun/frontend/nnapi/CMakeLists.txt
deleted file mode 100644
index 3c3411e05..000000000
--- a/runtime/neurun/frontend/nnapi/CMakeLists.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-file(GLOB_RECURSE SOURCES_FRONTEND "*.cc")
-file(GLOB_RECURSE TESTS_FRONTEND "*.test.cc")
-list(REMOVE_ITEM SOURCES_FRONTEND ${TESTS_FRONTEND})
-
-set(LIB_NEURUN neurun)
-
-add_library(${LIB_NEURUN} SHARED ${SOURCES_FRONTEND})
-target_link_libraries(${LIB_NEURUN} PUBLIC nnfw-nnapi-header)
-target_link_libraries(${LIB_NEURUN} PUBLIC neurun_core) # TODO Link PRIVATE neurun_core
-target_link_libraries(${LIB_NEURUN} PRIVATE nnfw_common)
-target_link_libraries(${LIB_NEURUN} PRIVATE nnfw_coverage)
-
-set_target_properties(${LIB_NEURUN} PROPERTIES OUTPUT_NAME neuralnetworks)
-
-install(TARGETS ${LIB_NEURUN} DESTINATION lib)
-
-add_executable(test_neurun_frontend_nnapi ${TESTS_FRONTEND})
-
-target_link_libraries(test_neurun_frontend_nnapi PRIVATE ${LIB_NEURUN} dl)
-target_link_libraries(test_neurun_frontend_nnapi PRIVATE gtest)
-target_link_libraries(test_neurun_frontend_nnapi PRIVATE gtest_main)
-
-install(TARGETS test_neurun_frontend_nnapi DESTINATION unittest)
diff --git a/runtime/neurun/frontend/nnapi/compilation.cc b/runtime/neurun/frontend/nnapi/compilation.cc
deleted file mode 100644
index 164158f19..000000000
--- a/runtime/neurun/frontend/nnapi/compilation.cc
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <NeuralNetworks.h>
-
-#include <new>
-
-#include "wrapper/ANeuralNetworksModel.h"
-#include "wrapper/ANeuralNetworksCompilation.h"
-#include "util/logging.h"
-
-//
-// NNAPI Implementation
-//
-int ANeuralNetworksCompilation_create(ANeuralNetworksModel *model,
- ANeuralNetworksCompilation **compilation)
-{
- if ((model == nullptr) || (compilation == nullptr))
- {
- VERBOSE(NNAPI::Compilation) << "create: Incorrect null pointer parameter(s)" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (!model->isFinished())
- {
- VERBOSE(NNAPI::Compilation) << "create: Model define is not finished" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- std::shared_ptr<neurun::ir::Graph> internal;
-
- model->release(internal);
-
- *compilation = new (std::nothrow) ANeuralNetworksCompilation(internal);
- if (*compilation == nullptr)
- {
- VERBOSE(NNAPI::Compilation) << "create: ail to create compilation object" << std::endl;
- return ANEURALNETWORKS_OUT_OF_MEMORY;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation *compilation)
-{
- if (compilation == nullptr)
- {
- VERBOSE(NNAPI::Compilation) << "finish: Incorrect null pointer parameter" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (compilation->state() != ::neurun::compiler::State::CREATED)
- {
- VERBOSE(NNAPI::Compilation) << "finish: Already finished" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- if (!compilation->finish())
- {
- VERBOSE(NNAPI::Compilation) << "finish: Fail to compile" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation *compilation)
-{
- delete compilation;
-}
-
-int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation *compilation,
- int32_t preference)
-{
- if (compilation == nullptr)
- {
- VERBOSE(NNAPI::Compilation) << "setPreference: Incorrect null pointer parameter" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (compilation->state() != ::neurun::compiler::State::CREATED)
- {
- VERBOSE(NNAPI::Compilation) << "setPreference: Already finished" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- const PreferenceCode FIRST_PREFERENCE_CODE = ANEURALNETWORKS_PREFER_LOW_POWER;
- const PreferenceCode LAST_PREFERENCE_CODE = ANEURALNETWORKS_PREFER_SUSTAINED_SPEED;
- if ((preference < FIRST_PREFERENCE_CODE) || (preference > LAST_PREFERENCE_CODE))
- {
- VERBOSE(NNAPI::Compilation) << "setPreference: Incorrect preference code" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- // NYI: nothing to set
- return ANEURALNETWORKS_NO_ERROR;
-}
diff --git a/runtime/neurun/frontend/nnapi/event.cc b/runtime/neurun/frontend/nnapi/event.cc
deleted file mode 100644
index 593b74e90..000000000
--- a/runtime/neurun/frontend/nnapi/event.cc
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <NeuralNetworks.h>
-
-#include "wrapper/ANeuralNetworksEvent.h"
-
-int ANeuralNetworksEvent_wait(ANeuralNetworksEvent *event)
-{
- if (event == nullptr)
- {
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (!event->waitFinish())
- {
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-void ANeuralNetworksEvent_free(ANeuralNetworksEvent *event) { delete event; }
diff --git a/runtime/neurun/frontend/nnapi/execution.cc b/runtime/neurun/frontend/nnapi/execution.cc
deleted file mode 100644
index 08f2df4c2..000000000
--- a/runtime/neurun/frontend/nnapi/execution.cc
+++ /dev/null
@@ -1,480 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <NeuralNetworks.h>
-
-#include <new>
-
-#include "wrapper/ANeuralNetworksCompilation.h"
-#include "wrapper/ANeuralNetworksExecution.h"
-#include "wrapper/ANeuralNetworksMemory.h"
-#include "wrapper/ANeuralNetworksEvent.h"
-#include "wrapper/NNAPIConvert.h"
-#include "util/logging.h"
-
-//
-// NNAPI Implementation
-//
-int ANeuralNetworksExecution_create(ANeuralNetworksCompilation *compilation,
- ANeuralNetworksExecution **execution)
-{
- if ((compilation == nullptr) || (execution == nullptr))
- {
- VERBOSE(NNAPI::Execution) << "create: Incorrect null pointer parameter(s)" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- std::shared_ptr<neurun::exec::IExecutor> executor;
-
- compilation->publish(executor);
-
- if (executor == nullptr)
- {
- VERBOSE(NNAPI::Execution) << "create: Never compiled yet" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- *execution = new (std::nothrow) ANeuralNetworksExecution{executor};
- if (*execution == nullptr)
- {
- VERBOSE(NNAPI::Execution) << "create: Fail to create execution object" << std::endl;
- return ANEURALNETWORKS_OUT_OF_MEMORY;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-// NOTE Handle optional input
-// Unspecified shape on model build
-// Optional and omitted input on execution: skip input setting (workaround for LSTM)
-// Optional but not omitted input on execution: cannot handle
-// Normal input on execution: cannot handle
-// Fully specified shape on model build
-// Optional input on execution: cannot handle
-// Normal input: handle normally
-int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32_t index,
- const ANeuralNetworksOperandType *type, const void *buffer,
- size_t length)
-{
- // Don't check type
- // Comment about ANeuralNetworksOperandType in NeuralNetworks.h:
- // If the input or output is optional and omitted then it need not have a fully specified tensor
- // operand type
- if ((execution == nullptr) || ((buffer == nullptr) && (length != 0)))
- {
- VERBOSE(NNAPI::Execution) << "setInput: Incorrect null pointer parameter(s)" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if ((buffer != nullptr) && (length == 0))
- {
- VERBOSE(NNAPI::Execution) << "setInput: Zero length input" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- const auto operand_index = execution->getInputOperandIndex(index);
- if (!operand_index.valid())
- {
- VERBOSE(NNAPI::Execution) << "setInput: Invalid input index" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- // Omitted optional input
- // LSTM operation's some inputs can be optional input
- if ((buffer == nullptr) && (length == 0))
- {
- if (execution->haveUnspecifiedDims(operand_index))
- {
- return ANEURALNETWORKS_NO_ERROR;
- }
- else
- {
- VERBOSE(NNAPI::Execution) << "setInput: Cannot handle fully-specified shape on model build "
- "but omitted input on execution"
- << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- if (type != nullptr)
- {
- if (!execution->compareDataType(type, operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setInput: Data type mismatch" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!execution->compareShape(type, operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setInput: Shape mismatch" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (NNAPIConvert::calculateSizeFromType(type) != length)
- {
- VERBOSE(NNAPI::Execution) << "setInput: Invalid length" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
- else
- {
- if (execution->haveUnspecifiedDims(operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setInput: Unspecified dimension value" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (execution->getOperandSize(operand_index) != length)
- {
- VERBOSE(NNAPI::Execution) << "setInput: Invalid length" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- if (!execution->setInput(index, type, buffer, length))
- {
- VERBOSE(NNAPI::Execution) << "setInput: Fail to set input" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution *execution, int32_t index,
- const ANeuralNetworksOperandType *type, void *buffer,
- size_t length)
-{
- // Don't check type
- // Comment about ANeuralNetworksOperandType in NeuralNetworks.h:
- // If the input or output is optional and omitted then it need not have a fully specified tensor
- // operand type
- if ((execution == nullptr) || ((buffer == nullptr) && (length != 0)))
- {
- VERBOSE(NNAPI::Execution) << "setOutput: Incorrect null pointer parameter(s)" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if ((buffer != nullptr) && (length == 0))
- {
- VERBOSE(NNAPI::Execution) << "setOutput: Zero length output" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- // Handle optional output
- if (buffer == nullptr)
- {
- return ANEURALNETWORKS_NO_ERROR;
- }
-
- const auto operand_index = execution->getOutputOperandIndex(index);
- if (!operand_index.valid())
- {
- VERBOSE(NNAPI::Execution) << "setOutput: Invalid output index" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (type != nullptr)
- {
- if (!execution->compareDataType(type, operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setOutput: Data type mismatch" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!execution->compareShape(type, operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setOutput: Shape mismatch" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (NNAPIConvert::calculateSizeFromType(type) != length)
- {
- VERBOSE(NNAPI::Execution) << "setOutput: Invalid length" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
- else
- {
- if (execution->haveUnspecifiedDims(operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setOutput: Unspecified dimension value" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (execution->getOperandSize(operand_index) != length)
- {
- VERBOSE(NNAPI::Execution) << "setInput: Invalid length" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- if (!execution->setOutput(index, type, buffer, length))
- {
- VERBOSE(NNAPI::Execution) << "setOutput: Fail to set output" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution *execution,
- ANeuralNetworksEvent **event)
-{
- if ((execution == nullptr) || (event == nullptr))
- {
- VERBOSE(NNAPI::Execution) << "startCompute: Incorrect null pointer parameter(s)" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- // TODO: Handle event
- auto instance = execution->instance();
- *event = new (std::nothrow) ANeuralNetworksEvent{instance};
- if (*event == nullptr)
- {
- VERBOSE(NNAPI::Execution) << "startCompute: Fail to create event" << std::endl;
- return ANEURALNETWORKS_OUT_OF_MEMORY;
- }
-
- if (!execution->startExecute())
- {
- VERBOSE(NNAPI::Execution) << "startCompute: Fail to start execution" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksExecution_compute(ANeuralNetworksExecution *execution)
-{
- if (execution == nullptr)
- {
- VERBOSE(NNAPI::Execution) << "Compute: Incorrect null pointer parameter" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (!execution->execute())
- {
- VERBOSE(NNAPI::Execution) << "Compute: Fail to execution" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-void ANeuralNetworksExecution_free(ANeuralNetworksExecution *execution) { delete execution; }
-
-int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution *execution, int32_t index,
- const ANeuralNetworksOperandType *type,
- const ANeuralNetworksMemory *memory, size_t offset,
- size_t length)
-{
- if ((execution == nullptr) || (memory == nullptr))
- {
- VERBOSE(NNAPI::Execution) << "setInputFromMemory: Incorrect null pointer parameter(s)"
- << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (length == 0)
- {
- VERBOSE(NNAPI::Execution) << "setInputFromMemory: Zero length input" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- const auto operand_index = execution->getInputOperandIndex(index);
- if (!operand_index.valid())
- {
- VERBOSE(NNAPI::Execution) << "setInputFromMemory: Invalid input index" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (type != nullptr)
- {
- if (!execution->compareDataType(type, operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setInputFromMemory: Data type mismatch" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!execution->compareShape(type, operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setInputFromMemory: Shape mismatch" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (NNAPIConvert::calculateSizeFromType(type) != length)
- {
- VERBOSE(NNAPI::Execution) << "setInputFromMemory: Invalid length" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
- else
- {
- if (execution->haveUnspecifiedDims(operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setInputFromMemory: Unspecified dimension value" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (execution->getOperandSize(operand_index) != length)
- {
- VERBOSE(NNAPI::Execution) << "setInputFromMemory: Invalid length" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- if (!memory->vaildAccess(offset, length))
- {
- VERBOSE(NNAPI::Execution) << "setInputFromMemory: Invalid memory access" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!execution->setInput(index, type, reinterpret_cast<const void *>(memory->base() + offset),
- length))
- {
- VERBOSE(NNAPI::Execution) << "setInputFromMemory: Fail to set input" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution *execution, int32_t index,
- const ANeuralNetworksOperandType *type,
- const ANeuralNetworksMemory *memory, size_t offset,
- size_t length)
-{
- if ((execution == nullptr) || (memory == nullptr))
- {
- VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Incorrect null pointer parameter(s)"
- << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (length == 0)
- {
- VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Zero length input" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- const auto operand_index = execution->getOutputOperandIndex(index);
- if (!operand_index.valid())
- {
- VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Invalid output index" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (type != nullptr)
- {
- if (!execution->compareDataType(type, operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Data type mismatch" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!execution->compareShape(type, operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Shape mismatch" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (NNAPIConvert::calculateSizeFromType(type) != length)
- {
- VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Invalid length" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
- else
- {
- if (execution->haveUnspecifiedDims(operand_index))
- {
- VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Unspecified dimension value" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (execution->getOperandSize(operand_index) != length)
- {
- VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Invalid length" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- if (!memory->vaildAccess(offset, length))
- {
- VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Invalid memory access" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!execution->setOutput(index, type, reinterpret_cast<void *>(memory->base() + offset), length))
- {
- VERBOSE(NNAPI::Execution) << "setOutputFromMemory: Fail to set input" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksExecution_getOutputOperandRank(ANeuralNetworksExecution *execution,
- int32_t index, uint32_t *rank)
-{
- if ((execution == nullptr) || (rank == nullptr))
- {
- VERBOSE(NNAPI::Execution) << "getOutputOperandRank: Incorrect null pointer parameter(s)"
- << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- const auto operand_index = execution->getOutputOperandIndex(index);
- if (!operand_index.valid())
- {
- VERBOSE(NNAPI::Execution) << "getOutputOperandRank: Invalid output index" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!execution->getOutputOperandRank(index, rank))
- {
- VERBOSE(NNAPI::Execution) << "getOutputOperandRank: Fail to get rank" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksExecution_getOutputOperandDimensions(ANeuralNetworksExecution *execution,
- int32_t index, uint32_t *dimensions)
-{
- if ((execution == nullptr) || (dimensions == nullptr))
- {
- VERBOSE(NNAPI::Execution) << "getOutputOperandDimensions: Incorrect null pointer parameter(s)"
- << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- const auto operand_index = execution->getOutputOperandIndex(index);
- if (!operand_index.valid())
- {
- VERBOSE(NNAPI::Execution) << "getOutputOperandDimensions: Invalid output index" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!execution->getOutputOperandDimensions(index, dimensions))
- {
- VERBOSE(NNAPI::Execution) << "getOutputOperandDimensions: Fail to get rank" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
diff --git a/runtime/neurun/frontend/nnapi/memory.cc b/runtime/neurun/frontend/nnapi/memory.cc
deleted file mode 100644
index fbe1a48e8..000000000
--- a/runtime/neurun/frontend/nnapi/memory.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <NeuralNetworks.h>
-#include <sys/mman.h>
-#include <new>
-#include <memory>
-
-#include "cpp14/memory.h"
-#include "wrapper/ANeuralNetworksMemory.h"
-
-int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset,
- ANeuralNetworksMemory **memory)
-{
- if (memory == nullptr)
- {
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- *memory = new (std::nothrow) ANeuralNetworksMemory{size, protect, fd, offset};
- if (*memory == nullptr)
- {
- return ANEURALNETWORKS_OUT_OF_MEMORY;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-void ANeuralNetworksMemory_free(ANeuralNetworksMemory *memory) { delete memory; }
diff --git a/runtime/neurun/frontend/nnapi/model.cc b/runtime/neurun/frontend/nnapi/model.cc
deleted file mode 100644
index 72a66e630..000000000
--- a/runtime/neurun/frontend/nnapi/model.cc
+++ /dev/null
@@ -1,411 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <NeuralNetworks.h>
-#include <NeuralNetworksEx.h>
-
-#include <new>
-
-#include "wrapper/ANeuralNetworksModel.h"
-#include "wrapper/ANeuralNetworksMemory.h"
-#include "util/logging.h"
-
-int ANeuralNetworksModel_create(ANeuralNetworksModel **model)
-{
- if (model == nullptr)
- {
- VERBOSE(NNAPI::Model) << "create: Incorrect null pointer parameter" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- *model = new (std::nothrow) ANeuralNetworksModel{};
- if (*model == nullptr)
- {
- VERBOSE(NNAPI::Model) << "create: Fail to create model object" << std::endl;
- return ANEURALNETWORKS_OUT_OF_MEMORY;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-void ANeuralNetworksModel_free(ANeuralNetworksModel *model) { delete model; }
-
-int ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model,
- const ANeuralNetworksOperandType *type)
-{
- if ((model == nullptr) || (type == nullptr))
- {
- VERBOSE(NNAPI::Model) << "addOperand: Incorrect null pointer parameter(s)" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (model->isFinished())
- {
- VERBOSE(NNAPI::Model) << "addOperand: Already finished" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- // scale and zeroPoint should be zero for scalars and non-fixed point tensors
- // Quantized:
- // scale: a 32 bit floating point value greater than zero
- // zeroPoint: a 32 bit integer, in range [0, 255]
- if (type->type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM)
- {
- if (!(type->scale > 0.0f))
- {
- VERBOSE(NNAPI::Model) << "addOperand: Incorrect scale value for quantization" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if ((type->zeroPoint < 0) || (type->zeroPoint > 255))
- {
- VERBOSE(NNAPI::Model) << "addOperand: Incorrect zeroPoint value for quantization"
- << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
- // NOTE Validation of scale and zeroPoint would be skipped for a while.
- // We do not know whether scalar type can have scale and zeroPoint.
- // To pass ValidationTest and GeneratedTest, this validation code
- // would not be implemented until we can define this issue clearly.
- //
- // scale and zeroPoint should be zero for scalars and non-fixed point tensors
- // else if ((type->scale != 0.0f) || (type->zeroPoint != 0))
- // {
- // return ANEURALNETWORKS_BAD_DATA;
- // }
-
- // dimensionCount should be zero for scalars
- if ((type->dimensionCount != 0) &&
- ((type->type == ANEURALNETWORKS_FLOAT32) || (type->type == ANEURALNETWORKS_INT32) ||
- (type->type == ANEURALNETWORKS_UINT32)))
- {
- VERBOSE(NNAPI::Model) << "addOperand: Incorrect data type" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!model->addOperand(type))
- {
- VERBOSE(NNAPI::Model) << "addOperand: Fail to add operand" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t index,
- const void *buffer, size_t length)
-{
- const bool optional_operand = ((buffer == nullptr) && (length == 0));
-
- if ((model == nullptr) || ((buffer == nullptr) && (length != 0)))
- {
- VERBOSE(NNAPI::Model) << "setOperandValue: Incorrect null pointer parameter(s)" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (model->isFinished())
- {
- VERBOSE(NNAPI::Model) << "setOperandValue: Already finished" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- // Negative index value is not allowed
- if (index < 0)
- {
- VERBOSE(NNAPI::Model) << "setOperandValue: Invalid index value (negative)" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- // NOTE OperandIndex uses uint32_t as its underlying type as various NNAPI
- // functions such as ANeuralNetworksModel_addOperation use uint32_t to represent operand
- // index
- // ANeuralNetworksModel_setOperandValue, however, uses int32_t to represent operand index.
- //
- // Below, static_cast<uint32_t>(...) is introduced to eliminate compiler warning.
- uint32_t ind = static_cast<uint32_t>(index);
-
- if (!model->isExistOperand(ind))
- {
- VERBOSE(NNAPI::Model) << "setOperandValue: Invalid index value (not exist)" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!optional_operand && (model->operandSize(ind) != length))
- {
- VERBOSE(NNAPI::Model) << "setOperandValue: Invalid data length" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (model->isUsageSet(ind))
- {
- VERBOSE(NNAPI::Model) << "setOperandValue: Already set operand" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- // NNAPI spec in NeuralNetworks.h
- // For values of length greater than ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES,
- // the application is responsible for not changing the content of this region
- // until all executions using this model have completed
- bool copy_value = false;
- if (length <= ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES)
- {
- copy_value = true;
- }
-
- if (!model->setOperandValue(ind, buffer, length, optional_operand, copy_value))
- {
- VERBOSE(NNAPI::Model) << "setOperandValue: Fail to set operand value" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel *model, int32_t index,
- const ANeuralNetworksMemory *memory,
- size_t offset, size_t length)
-{
- if ((model == nullptr) || (memory == nullptr))
- {
- VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Incorrect null pointer parameter(s)"
- << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (model->isFinished())
- {
- VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Already finished" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- // Negative index value is not allowed
- if (index < 0)
- {
- VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Invalid index value (negative)"
- << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- // NOTE OperandIndex uses uint32_t as its underlying type as various NNAPI
- // functions such as ANeuralNetworksModel_addOperation use uint32_t to represent operand
- // index
- // ANeuralNetworksModel_setOperandValue, however, uses int32_t to represent operand index.
- //
- // Below, static_cast<uint32_t>(...) is introduced to eliminate compiler warning.
- uint32_t ind = static_cast<uint32_t>(index);
-
- if (!model->isExistOperand(ind))
- {
- VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Invalid index value (not exist)"
- << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if ((model->operandSize(ind) != length) || (memory->size() < (offset + length)))
- {
- VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Invalid data length" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (model->isUsageSet(ind))
- {
- VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Already set operand" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!model->setOperandValue(ind, memory->base() + offset, length))
- {
- VERBOSE(NNAPI::Model) << "setOperandValueFromMemory: Fail to set operand value" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
- ANeuralNetworksOperationType type, uint32_t inputCount,
- const uint32_t *inputs, uint32_t outputCount,
- const uint32_t *outputs)
-{
- if ((model == nullptr) || (inputs == nullptr) || (outputs == nullptr))
- {
- VERBOSE(NNAPI::Model) << "addOperation: Incorrect null pointer parameter(s)" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (model->isFinished())
- {
- VERBOSE(NNAPI::Model) << "addOperation: Already finished" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- const ANeuralNetworksOperationType FIRST_OPERATION = ANEURALNETWORKS_ADD;
- const ANeuralNetworksOperationType LAST_OPERATION = ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR;
- if ((type < FIRST_OPERATION) || (type > LAST_OPERATION))
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- for (uint32_t i = 0; i < outputCount; i++)
- {
- if (model->isUsageSet(outputs[i]))
- {
- VERBOSE(NNAPI::Model) << "addOperation: Already set output operand" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- if (!model->addOperation(type, inputCount, inputs, outputCount, outputs))
- {
- VERBOSE(NNAPI::Model) << "addOperation: Fail to add operation" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_addOperationEx(ANeuralNetworksModel *model,
- ANeuralNetworksOperationTypeEx type, uint32_t inputCount,
- const uint32_t *inputs, uint32_t outputCount,
- const uint32_t *outputs)
-{
- if ((model == nullptr) || (inputs == nullptr) || (outputs == nullptr))
- {
- VERBOSE(NNAPI::Model) << "addOperation: Incorrect null pointer parameter(s)" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (model->isFinished())
- {
- VERBOSE(NNAPI::Model) << "addOperation: Already finished" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- const ANeuralNetworksOperationTypeEx FIRST_OPERATION = ANEURALNETWORKS_CAST_EX;
- const ANeuralNetworksOperationTypeEx LAST_OPERATION = ANEURALNETWORKS_LESS_EX;
- if ((type < FIRST_OPERATION) || (type > LAST_OPERATION))
- {
- VERBOSE(NNAPI::Model) << "addOperation: Invalid operation type" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- for (uint32_t i = 0; i < outputCount; i++)
- {
- if (model->isUsageSet(outputs[i]))
- {
- VERBOSE(NNAPI::Model) << "addOperation: Already set output operand" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- if (!model->addOperationEx(type, inputCount, inputs, outputCount, outputs))
- {
- VERBOSE(NNAPI::Model) << "addOperation: Fail to add operation" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel *model, uint32_t inputCount,
- const uint32_t *inputs, uint32_t outputCount,
- const uint32_t *outputs)
-{
- if ((model == nullptr) || (inputs == nullptr) || (outputs == nullptr))
- {
- VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Incorrect null pointer parameter(s)"
- << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (model->isFinished())
- {
- VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Already finished" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- for (uint32_t n = 0; n < inputCount; ++n)
- {
- uint32_t ind = inputs[n];
- if (model->isUsageSet(ind))
- {
- VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Already set input operand" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!model->addModelInput(ind))
- {
- VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Fail to add input" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- for (uint32_t n = 0; n < outputCount; ++n)
- {
- uint32_t ind = outputs[n];
-
- if (!model->isOperationOutput(ind))
- {
- VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Need to set output operand" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
-
- if (!model->addModelOutput(ind))
- {
- VERBOSE(NNAPI::Model) << "identifyInputsAndOutputs: Fail to add output" << std::endl;
- return ANEURALNETWORKS_BAD_DATA;
- }
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_finish(ANeuralNetworksModel *model)
-{
- if (model == nullptr)
- {
- VERBOSE(NNAPI::Model) << "finish: Incorrect null pointer parameter" << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- if (model->isFinished())
- {
- VERBOSE(NNAPI::Model) << "finish: Already finished" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- if (!model->finish())
- {
- VERBOSE(NNAPI::Model) << "finish: Fail to generate internal graph" << std::endl;
- return ANEURALNETWORKS_BAD_STATE;
- }
-
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel *model, bool)
-{
- if (model == nullptr)
- {
- VERBOSE(NNAPI::Model) << "relaxComputationFloat32toFloat16: Incorrect null pointer parameter"
- << std::endl;
- return ANEURALNETWORKS_UNEXPECTED_NULL;
- }
-
- // NYI: nothing to set
- VERBOSE(NNAPI::Model) << "relaxComputationFloat32toFloat16: Do nothing yet" << std::endl;
-
- return ANEURALNETWORKS_NO_ERROR;
-}
diff --git a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksCompilation.cc b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksCompilation.cc
deleted file mode 100644
index 1aa1583aa..000000000
--- a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksCompilation.cc
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ANeuralNetworksCompilation.h"
-
-#include "util/logging.h"
-
-ANeuralNetworksCompilation::ANeuralNetworksCompilation(
- const std::shared_ptr<neurun::ir::Graph> &model) noexcept
- : _compiler{new neurun::compiler::Compiler{model}}
-{
- // DO NOTHING
-}
-
-bool ANeuralNetworksCompilation::finish() noexcept
-{
- try
- {
- _compiler->compile();
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
diff --git a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksCompilation.h b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksCompilation.h
deleted file mode 100644
index 56b402d16..000000000
--- a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksCompilation.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __COMPILATION_H__
-#define __COMPILATION_H__
-
-#include "compiler/Compiler.h"
-#include "ir/Graph.h"
-#include "exec/IExecutor.h"
-
-struct ANeuralNetworksCompilation
-{
-public:
- ANeuralNetworksCompilation(const std::shared_ptr<neurun::ir::Graph> &graph) noexcept;
-
-public:
- bool finish() noexcept;
-
- neurun::compiler::State state(void) noexcept { return _compiler->state(); }
- void publish(std::shared_ptr<neurun::exec::IExecutor> &executor) noexcept
- {
- _compiler->release(executor);
- }
-
-private:
- std::shared_ptr<neurun::compiler::Compiler> _compiler;
-};
-
-#endif
diff --git a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksEvent.cc b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksEvent.cc
deleted file mode 100644
index b09f9abe6..000000000
--- a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksEvent.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ANeuralNetworksEvent.h"
-
-#include "exec/Execution.h"
-#include "util/logging.h"
-
-ANeuralNetworksEvent::ANeuralNetworksEvent(
- const std::shared_ptr<neurun::exec::Execution> &execution)
- : _execution{execution}
-{
- // DO NOTHING
-}
-
-bool ANeuralNetworksEvent::waitFinish(void) noexcept
-{
- try
- {
- _execution->waitFinish();
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
diff --git a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksEvent.h b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksEvent.h
deleted file mode 100644
index e499bab77..000000000
--- a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksEvent.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __EVENT_H__
-#define __EVENT_H__
-
-#include <NeuralNetworks.h>
-
-#include <memory>
-
-namespace neurun
-{
-namespace exec
-{
-class Execution;
-} // namespace exec
-} // namespace neurun
-
-struct ANeuralNetworksEvent
-{
-public:
- ANeuralNetworksEvent(const std::shared_ptr<neurun::exec::Execution> &execution);
-
-public:
- bool waitFinish(void) noexcept;
-
-private:
- const std::shared_ptr<neurun::exec::Execution> _execution;
-};
-
-#endif
diff --git a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc
deleted file mode 100644
index b8e43a691..000000000
--- a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ANeuralNetworksExecution.h"
-#include "NNAPIConvert.h"
-#include "util/logging.h"
-
-const neurun::ir::OperandIndex
-ANeuralNetworksExecution::getInputOperandIndex(int32_t index) noexcept
-{
- if (index < 0)
- {
- // Negative index: return invalid index
- return neurun::ir::OperandIndex{};
- }
-
- uint32_t cast_index = static_cast<uint32_t>(index);
- if (cast_index >= _execution->graph().getInputs().size())
- {
- // Return invalid index
- return neurun::ir::OperandIndex{};
- }
-
- neurun::ir::IOIndex input_index{cast_index};
- const auto operand_index = _execution->graph().getInputs().at(input_index);
- return operand_index;
-}
-
-const neurun::ir::OperandIndex
-ANeuralNetworksExecution::getOutputOperandIndex(int32_t index) noexcept
-{
- if (index < 0)
- {
- // Negative index: return invalid index
- return neurun::ir::OperandIndex{};
- }
-
- uint32_t cast_index = static_cast<uint32_t>(index);
- if (cast_index >= _execution->graph().getOutputs().size())
- {
- // Return invalid index
- return neurun::ir::OperandIndex{};
- }
-
- neurun::ir::IOIndex output_index{cast_index};
- const auto operand_index = _execution->graph().getOutputs().at(output_index);
- return operand_index;
-}
-
-bool ANeuralNetworksExecution::compareDataType(const ANeuralNetworksOperandType *type,
- const neurun::ir::OperandIndex index) noexcept
-{
- try
- {
- const auto operand_type = _execution->graph().operands().at(index).typeInfo();
- const auto typeInfo = NNAPIConvert::getTypeInfo(type);
-
- if (operand_type != typeInfo)
- {
- // Data type mismatch
- return false;
- }
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-
-bool ANeuralNetworksExecution::compareShape(const ANeuralNetworksOperandType *type,
- const neurun::ir::OperandIndex index) noexcept
-{
- // Passed shape should be specified
- if (haveUnspecifiedDims(index))
- {
- return false;
- }
-
- const auto &operand_shape = _execution->graph().operands().at(index).shape();
- const auto &shape_from_type = NNAPIConvert::getShape(type);
-
- return operand_shape == shape_from_type;
-}
-
-bool ANeuralNetworksExecution::haveUnspecifiedDims(const neurun::ir::OperandIndex index) noexcept
-{
- const auto operand_shape = _execution->graph().operands().at(index).shape();
-
- return operand_shape.num_elements() == 0;
-}
-
-size_t ANeuralNetworksExecution::getOperandSize(const neurun::ir::OperandIndex index) noexcept
-{
- try
- {
- return _execution->graph().operands().at(index).operandSize();
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return 0;
- }
-}
-
-bool ANeuralNetworksExecution::setInput(uint32_t index, const ANeuralNetworksOperandType *type,
- const void *buffer, size_t length) noexcept
-{
- try
- {
- neurun::ir::IOIndex input_index{index};
- const auto operand_index = getInputOperandIndex(index);
-
- const auto type_info = _execution->graph().operands().at(operand_index).typeInfo();
- const auto shape = (type != nullptr) ? NNAPIConvert::getShape(type)
- : _execution->graph().operands().at(operand_index).shape();
-
- // NOTE The nnapi does not provide setting io_layout and not support changing layout. In other
- // words, we can assume that io_layout from nnapi always is the same as layout of the used
- // model.
- // TODO Set layout of model
- _execution->setInput(input_index, type_info, shape, buffer, length, neurun::ir::Layout::NHWC);
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-
-bool ANeuralNetworksExecution::setOutput(uint32_t index, const ANeuralNetworksOperandType *type,
- void *buffer, size_t length) noexcept
-{
- try
- {
- neurun::ir::IOIndex output_index{index};
- const auto operand_index = getOutputOperandIndex(index);
-
- const auto type_info = _execution->graph().operands().at(operand_index).typeInfo();
- const auto shape = (type != nullptr) ? NNAPIConvert::getShape(type)
- : _execution->graph().operands().at(operand_index).shape();
-
- // NOTE The nnapi does not provide setting io_layout and not support changing layout. In other
- // words, we can assume that io_layout from nnapi always is the same as layout of the used
- // model.
- // TODO Set layout of model
- _execution->setOutput(output_index, type_info, shape, buffer, length, neurun::ir::Layout::NHWC);
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-
-bool ANeuralNetworksExecution::startExecute(void) noexcept
-{
- try
- {
- _execution->startExecute();
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-
-bool ANeuralNetworksExecution::execute(void) noexcept
-{
- try
- {
- _execution->execute();
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-
-const std::shared_ptr<neurun::exec::Execution> ANeuralNetworksExecution::instance(void) noexcept
-{
- return _execution;
-}
-
-bool ANeuralNetworksExecution::getOutputOperandRank(uint32_t index, uint32_t *rank) noexcept
-{
- try
- {
- neurun::ir::IOIndex output_index{index};
- const auto operand_index = getOutputOperandIndex(index);
- bool unspecified = haveUnspecifiedDims(operand_index);
-
- // TODO Get unspecified output operand's rank
- if (unspecified)
- {
- throw std::runtime_error{"Unsupport feature"};
- }
-
- // Check execution is finished
- // Output rank and shape may be decided after execution if output is unspecified operand
- if (!_execution->isFinished())
- {
- return false;
- }
-
- *rank = _execution->graph().operands().at(operand_index).shape().rank();
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-
-bool ANeuralNetworksExecution::getOutputOperandDimensions(uint32_t index, uint32_t *dimensions)
-{
- try
- {
- neurun::ir::IOIndex output_index{index};
- const auto operand_index = getOutputOperandIndex(index);
- bool unspecified = haveUnspecifiedDims(operand_index);
- if (unspecified)
- {
- throw std::runtime_error{"NYI: Models with unspecified output dimensions"};
- }
-
- // Check execution is finished
- // Output rank and shape may be decided after execution if output is unspecified operand
- if (!_execution->isFinished())
- {
- return false;
- }
-
- auto shape = _execution->graph().operands().at(operand_index).shape();
- for (int i = 0; i < shape.rank(); i++)
- {
- auto dim = shape.dim(i);
-
- if (dim <= 0)
- {
- throw std::runtime_error{"Invalid dimension value"};
- }
-
- dimensions[i] = static_cast<uint32_t>(dim);
- }
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
diff --git a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.h b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.h
deleted file mode 100644
index ecffedc0a..000000000
--- a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __EXECUTION_H__
-#define __EXECUTION_H__
-
-#include <NeuralNetworks.h>
-
-#include <memory>
-
-#include "exec/Execution.h"
-
-struct ANeuralNetworksExecution
-{
-public:
- ANeuralNetworksExecution(const std::shared_ptr<neurun::exec::IExecutor> &executor)
- : _execution{std::make_shared<neurun::exec::Execution>(executor)}
- {
- // DO NOTHING
- }
-
-public:
- bool setInput(uint32_t index, const ANeuralNetworksOperandType *type, const void *buffer,
- size_t length) noexcept;
- bool setOutput(uint32_t index, const ANeuralNetworksOperandType *type, void *buffer,
- size_t length) noexcept;
- bool startExecute(void) noexcept;
- bool execute(void) noexcept;
-
- const neurun::ir::OperandIndex getInputOperandIndex(int32_t index) noexcept;
- const neurun::ir::OperandIndex getOutputOperandIndex(int32_t index) noexcept;
- bool compareDataType(const ANeuralNetworksOperandType *type,
- const neurun::ir::OperandIndex index) noexcept;
- bool compareShape(const ANeuralNetworksOperandType *type,
- const neurun::ir::OperandIndex index) noexcept;
- bool haveUnspecifiedDims(const neurun::ir::OperandIndex index) noexcept;
- size_t getOperandSize(const neurun::ir::OperandIndex index) noexcept;
- const std::shared_ptr<neurun::exec::Execution> instance(void) noexcept;
-
- /**
- * @brief Get output operand's rank
- * @param[in] index Output index
- * @param[out] rank Output operand's rank
- * @return @c true if success to get rank, otherwise @c false
- */
- bool getOutputOperandRank(uint32_t index, uint32_t *rank) noexcept;
- /**
- * @brief Get dimensions of the output operand
- * @param[in] index Output index
- * @param[out] dimensions Output operand's dimensions
- * @return @c true if success to get rank, otherwise @c false
- * @note This must be called after execution is finished to get resolved output shape
- * unspecified in model
- */
- bool getOutputOperandDimensions(uint32_t index, uint32_t *dimensions);
-
-private:
- std::shared_ptr<neurun::exec::Execution> _execution;
-};
-
-#endif
diff --git a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksMemory.cc b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksMemory.cc
deleted file mode 100644
index 9cc100585..000000000
--- a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksMemory.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <NeuralNetworks.h>
-#include <sys/mman.h>
-
-#include "ANeuralNetworksMemory.h"
-
-//
-// ANeuralNetworksMemory
-//
-ANeuralNetworksMemory::ANeuralNetworksMemory(size_t size, int protect, int fd, size_t offset)
-{
- _base = reinterpret_cast<uint8_t *>(mmap(nullptr, size, protect, MAP_PRIVATE, fd, offset));
- _size = size;
-}
-
-ANeuralNetworksMemory::~ANeuralNetworksMemory() { munmap(reinterpret_cast<void *>(_base), _size); }
-
-bool ANeuralNetworksMemory::vaildAccess(size_t offset, size_t length) const
-{
- if ((offset >= _size) || (length > _size))
- {
- return false;
- }
-
- if ((offset + length) >= _size)
- {
- return false;
- }
-
- return true;
-}
diff --git a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksMemory.h b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksMemory.h
deleted file mode 100644
index 48a1bc5fc..000000000
--- a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksMemory.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __MEMORY_H__
-#define __MEMORY_H__
-
-#include <cstdint>
-
-struct ANeuralNetworksMemory
-{
-public:
- ANeuralNetworksMemory(size_t size, int protect, int fd, size_t offset);
- ~ANeuralNetworksMemory();
-
-public:
- size_t size(void) const { return _size; }
- uint8_t *base(void) { return _base; }
- uint8_t *base(void) const { return _base; }
- bool vaildAccess(size_t offset, size_t length) const;
-
-private:
- size_t _size;
- uint8_t *_base;
-};
-
-#endif // __MEMORY_H__
diff --git a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.cc b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.cc
deleted file mode 100644
index 5542a2e83..000000000
--- a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.cc
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ANeuralNetworksModel.h"
-#include "OperationFactory.h"
-#include "NNAPIConvert.h"
-
-#include "ir/Operations.Include.h"
-#include "util/logging.h"
-
-#include "cpp14/memory.h"
-
-//
-// ANeuralNetworksModel
-//
-ANeuralNetworksModel::ANeuralNetworksModel() noexcept : _optional_operands{}, _operand_usages{}
-{
- _graph = std::make_shared<neurun::ir::Graph>();
-}
-
-bool ANeuralNetworksModel::addOperand(const ANeuralNetworksOperandType *type) noexcept
-{
- try
- {
- const auto shape = NNAPIConvert::getShape(type);
- const auto typeInfo = NNAPIConvert::getTypeInfo(type);
- _graph->addOperand(shape, typeInfo);
- _operand_usages.emplace_back(OperandUsage::NOT_DEFINED);
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-
-bool ANeuralNetworksModel::setOperandValue(uint32_t index, const void *buffer, size_t length,
- bool optional, bool copy) noexcept
-{
- const neurun::ir::OperandIndex ind{index};
-
- try
- {
- _operand_usages[index] = OperandUsage::CONSTANT;
-
- // Remain operands.at(ind).data()->base() as nullptr for optional operand
- // This will be filled when model finished
- if (optional)
- {
- setOptionalOperand(ind);
- }
-
- using neurun::ir::CachedData;
- using neurun::ir::ExternalData;
- if (copy)
- {
- _graph->operands().at(ind).data(
- nnfw::cpp14::make_unique<CachedData>(reinterpret_cast<const uint8_t *>(buffer), length));
- }
- else
- {
- _graph->operands().at(ind).data(nnfw::cpp14::make_unique<ExternalData>(
- reinterpret_cast<const uint8_t *>(buffer), length));
- }
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-
-bool ANeuralNetworksModel::addOperation(ANeuralNetworksOperationType type, uint32_t inputCount,
- const uint32_t *inputs, uint32_t outputCount,
- const uint32_t *outputs) noexcept
-{
- try
- {
- for (uint32_t i = 0; i < outputCount; i++)
- {
- _operand_usages[outputs[i]] = OperandUsage::OPERATION_OUTPUT;
- }
-
- auto &factory = OperationFactory::get();
- OperationFactory::Param param{inputCount, inputs, outputCount, outputs};
-
- auto node = factory.create(type, param, _graph->operands());
- _graph->addOperation(std::unique_ptr<neurun::ir::Operation>{node});
-
- // TODO Move these codes to delegate.cpp
- if (type == ANEURALNETWORKS_FULLY_CONNECTED)
- {
- const auto &input_operand =
- _graph->operands().at(node->getInputs().at(neurun::ir::operation::FullyConnected::INPUT));
- auto &weights_operand = _graph->operands().at(
- node->getInputs().at(neurun::ir::operation::FullyConnected::WEIGHT));
- if (input_operand.typeInfo().type() == neurun::ir::DataType::FLOAT32 &&
- weights_operand.typeInfo().type() == neurun::ir::DataType::QUANT8_ASYMM)
- {
- weights_operand.type(neurun::ir::DataType::QUANT8_SYMM);
- }
- }
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-
-bool ANeuralNetworksModel::addOperationEx(ANeuralNetworksOperationTypeEx type, uint32_t inputCount,
- const uint32_t *inputs, uint32_t outputCount,
- const uint32_t *outputs) noexcept
-{
- try
- {
- for (uint32_t i = 0; i < outputCount; i++)
- {
- _operand_usages[outputs[i]] = OperandUsage::OPERATION_OUTPUT;
- }
-
- auto &factory = OperationFactory::get();
- OperationFactory::Param param{inputCount, inputs, outputCount, outputs};
-
- auto node = factory.create(type, param, _graph->operands());
- _graph->addOperation(std::unique_ptr<neurun::ir::Operation>{node});
- }
- catch (const std::exception &e)
- {
- return false;
- }
- return true;
-}
-
-bool ANeuralNetworksModel::addModelInput(uint32_t index) noexcept
-{
- try
- {
- _operand_usages[index] = OperandUsage::MODEL_INPUT;
-
- const neurun::ir::OperandIndex ind{index};
- _graph->addInput(ind);
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-bool ANeuralNetworksModel::addModelOutput(uint32_t index) noexcept
-{
- try
- {
- const neurun::ir::OperandIndex ind{index};
-
- // Duplicated output is not allowed
- if (_graph->getOutputs().contains(ind))
- {
- return false;
- }
-
- _graph->addOutput(ind);
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << std::endl;
-
- return false;
- }
-
- return true;
-}
-
-bool ANeuralNetworksModel::finish() noexcept
-{
- try
- {
- fillOptionalOperand();
-
- _graph->finishBuilding();
-
- _operand_usages.clear();
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << '\n';
-
- return false;
- }
-
- return true;
-}
-
-bool ANeuralNetworksModel::isFinished() noexcept { return !_graph->isBuildingPhase(); }
-
-bool ANeuralNetworksModel::isExistOperand(uint32_t index) noexcept
-{
- return _graph->operands().exist(neurun::ir::OperandIndex{index});
-}
-
-size_t ANeuralNetworksModel::operandSize(uint32_t index) noexcept
-{
- try
- {
- return _graph->operands().at(neurun::ir::OperandIndex{index}).operandSize();
- }
- catch (const std::exception &e)
- {
- VERBOSE(EXCEPTION) << e.what() << '\n';
-
- return 0;
- }
-}
-
-bool ANeuralNetworksModel::isUsageSet(uint32_t index) noexcept
-{
- return (_operand_usages[index] != OperandUsage::NOT_DEFINED);
-}
-
-bool ANeuralNetworksModel::isOperationOutput(uint32_t index) noexcept
-{
- return (_operand_usages[index] == OperandUsage::OPERATION_OUTPUT);
-}
-
-void ANeuralNetworksModel::setOptionalOperand(const neurun::ir::OperandIndex idx)
-{
- _optional_operands.insert(idx);
-}
-
-void ANeuralNetworksModel::fillOptionalOperand(void)
-{
- _graph->operations().iterate(
- [&](const neurun::ir::OperationIndex &, neurun::ir::Operation &node) {
- for (auto input : node.getInputs())
- {
- // TODO fill default value for optional operands
- if (_optional_operands.find(input) != _optional_operands.end())
- {
- throw std::runtime_error{"Optional operand is not supported yet"};
- }
- }
- });
-}
diff --git a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.h b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.h
deleted file mode 100644
index d364ee39e..000000000
--- a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksModel.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __MODEL_H__
-#define __MODEL_H__
-
-#include <unordered_set>
-#include <NeuralNetworks.h>
-#include <NeuralNetworksEx.h>
-
-#include "ir/Graph.h"
-
-struct ANeuralNetworksModel
-{
-public:
- enum class OperandUsage
- {
- NOT_DEFINED = 0,
- MODEL_INPUT,
- CONSTANT,
- OPERATION_OUTPUT,
- };
-
-public:
- ANeuralNetworksModel() noexcept;
-
-public:
- bool addOperand(const ANeuralNetworksOperandType *type) noexcept;
- bool setOperandValue(uint32_t index, const void *buffer, size_t length, bool optional = false,
- bool copy = false) noexcept;
- bool addOperation(ANeuralNetworksOperationType type, uint32_t inputCount, const uint32_t *inputs,
- uint32_t outputCount, const uint32_t *outputs) noexcept;
- bool addOperationEx(ANeuralNetworksOperationTypeEx type, uint32_t inputCount,
- const uint32_t *inputs, uint32_t outputCount,
- const uint32_t *outputs) noexcept;
- bool addModelInput(uint32_t index) noexcept;
- bool addModelOutput(uint32_t index) noexcept;
- bool finish() noexcept;
-
- neurun::ir::Graph &deref(void) { return *_graph; }
- bool isFinished() noexcept;
- bool isExistOperand(uint32_t index) noexcept;
- size_t operandSize(uint32_t index) noexcept;
- bool isUsageSet(uint32_t index) noexcept;
- bool isOperationOutput(uint32_t index) noexcept;
- void release(std::shared_ptr<neurun::ir::Graph> &graph) { graph = _graph; }
-
-private:
- void setOptionalOperand(const neurun::ir::OperandIndex idx);
- void fillOptionalOperand(void);
-
-private:
- std::shared_ptr<neurun::ir::Graph> _graph;
- std::unordered_set<neurun::ir::OperandIndex> _optional_operands;
- std::vector<OperandUsage> _operand_usages;
-};
-
-#endif // __MODEL_H__
diff --git a/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.cc b/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.cc
deleted file mode 100644
index 79589be75..000000000
--- a/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.cc
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "NNAPIConvert.h"
-
-#include <numeric>
-
-using namespace neurun::ir;
-
-DataType NNAPIConvert::getDataType(OperandCode type)
-{
- switch (type)
- {
- case ANEURALNETWORKS_FLOAT32:
- case ANEURALNETWORKS_TENSOR_FLOAT32:
- return DataType::FLOAT32;
- case ANEURALNETWORKS_INT32:
- case ANEURALNETWORKS_TENSOR_INT32:
- return DataType::INT32;
- case ANEURALNETWORKS_UINT32:
- return DataType::UINT32;
- case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM:
- return DataType::QUANT8_ASYMM;
- case ANEURALNETWORKS_TENSOR_QUANT8_SYMM:
- return DataType::QUANT8_SYMM;
- case ANEURALNETWORKS_BOOL:
- case ANEURALNETWORKS_TENSOR_BOOL8:
- return DataType::BOOL8;
- default:
- throw std::runtime_error("Unsupported type");
- }
-}
-
-TypeInfo NNAPIConvert::getTypeInfo(const ANeuralNetworksOperandType *type)
-{
- return TypeInfo(getDataType((OperandCode)(type->type)), type->scale, type->zeroPoint);
-}
-
-Shape NNAPIConvert::getShape(const ANeuralNetworksOperandType *type)
-{
- Shape shape(type->dimensionCount);
-
- for (uint32_t axis = 0; axis < type->dimensionCount; ++axis)
- {
- shape.dim(axis) = type->dimensions[axis];
- }
-
- return shape;
-}
-
-size_t NNAPIConvert::calculateSizeFromType(const ANeuralNetworksOperandType *type)
-{
- auto shape = getShape(type);
- auto data_type = getDataType((OperandCode)(type->type));
-
- return shape.num_elements() * sizeOfDataType(data_type);
-}
-
-Activation NNAPIConvert::getFusedActivation(FuseCode act)
-{
- switch (act)
- {
- case ANEURALNETWORKS_FUSED_NONE:
- return Activation::NONE;
- case ANEURALNETWORKS_FUSED_RELU:
- return Activation::RELU;
- case ANEURALNETWORKS_FUSED_RELU1:
- return Activation::RELU1;
- case ANEURALNETWORKS_FUSED_RELU6:
- return Activation::RELU6;
- default:
- throw std::runtime_error("Unsupported activation type");
- }
-}
-
-PaddingType NNAPIConvert::getPaddingType(PaddingCode type)
-{
- switch (type)
- {
- case ANEURALNETWORKS_PADDING_SAME:
- return PaddingType::SAME;
- case ANEURALNETWORKS_PADDING_VALID:
- return PaddingType::VALID;
- default:
- throw std::runtime_error("Unsupported type");
- }
-}
diff --git a/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h b/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h
deleted file mode 100644
index 91f84b983..000000000
--- a/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * @file NNAPIConvert.h
- * @brief This file contains convereter(s)\n
- * from NNAPI frontend's struct to neurun's internal struct
- */
-#ifndef __NEURUN_NNAPI_CONVERT_H__
-#define __NEURUN_NNAPI_CONVERT_H__
-
-#include <NeuralNetworks.h>
-
-#include <ir/TypeInfo.h>
-#include <ir/Shape.h>
-#include <ir/InternalType.h>
-
-class NNAPIConvert
-{
-
-public:
- /**
- * @brief Convert data type from NNAPI to internal data type
- * @param[in] type NNAPI's data type
- * @return neurun's internal data type
- */
- static neurun::ir::DataType getDataType(OperandCode type);
-
- /**
- * @brief Convert operand type info from NNAPI to interanl operand type info
- * @param[in] type NNAPI's operand type
- * @return neurun's internal operand type info
- */
- static neurun::ir::TypeInfo getTypeInfo(const ANeuralNetworksOperandType *type);
-
- /**
- * @brief Convert operand shape info from NNAPI to internal operand shape
- * @param[in] type NNAPI's operand type
- * @return neurun's internal operand shape
- */
- static neurun::ir::Shape getShape(const ANeuralNetworksOperandType *type);
-
- /**
- * @brief Calcaulate operand size from NNAPI type
- * @param[in] type NNAPI's operand type
- * @return Operand size
- */
- static size_t calculateSizeFromType(const ANeuralNetworksOperandType *type);
-
- /**
- * @brief Convert NNAPI FuseCode to internal activation type
- * @param[in] act NNAPI's FuseCode type
- * @return neurun's internal activation type
- */
- static neurun::ir::Activation getFusedActivation(FuseCode act);
-
- /**
- * @brief Convert NNAPI PaddingCode to internal padding type
- * @param[in] type NNAPI's PaddingCode type
- * @return neurun's internal padding type
- */
- static neurun::ir::PaddingType getPaddingType(PaddingCode type);
-};
-
-#endif // __NEURUN_NNAPI_CONVERT_H__
diff --git a/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.cc b/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.cc
deleted file mode 100644
index 84f876e86..000000000
--- a/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.cc
+++ /dev/null
@@ -1,1680 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "OperationFactory.h"
-#include "NNAPIConvert.h"
-
-#include <ir/Operations.Include.h>
-#include <string.h>
-
-namespace
-{
-using namespace neurun::ir;
-
-void replaceDataType(Operands &operands, const OperandIndex &index, const DataType type)
-{
- assert(operands.exist(index));
- operands.at(index).type(type);
-}
-
-ExplicitPadding makeExplicitPadding(Operands &operands, const OperandIndex &left_index,
- const OperandIndex &right_index, const OperandIndex &top_index,
- const OperandIndex &bottom_index)
-{
- auto left = operands.at(left_index).asScalar<int32_t>();
- auto right = operands.at(right_index).asScalar<int32_t>();
- auto top = operands.at(top_index).asScalar<int32_t>();
- auto bottom = operands.at(bottom_index).asScalar<int32_t>();
-
- if (left < 0 || right < 0 || top < 0 || bottom < 0)
- {
- throw std::runtime_error{"Cannot handle negative explicit padding value"};
- }
-
- ExplicitPadding param;
- param.left = static_cast<uint32_t>(left);
- param.right = static_cast<uint32_t>(right);
- param.top = static_cast<uint32_t>(top);
- param.bottom = static_cast<uint32_t>(bottom);
-
- return param;
-}
-
-Stride makeStride(Operands &operands, const OperandIndex &horizontal_index,
- const OperandIndex &vertical_index)
-{
- auto horizontal = operands.at(horizontal_index).asScalar<int32_t>();
- auto vertical = operands.at(vertical_index).asScalar<int32_t>();
-
- if (vertical < 0 || horizontal < 0)
- {
- throw std::runtime_error{"Cannot handle negative stride value"};
- }
-
- Stride stride;
- stride.horizontal = static_cast<uint32_t>(horizontal);
- stride.vertical = static_cast<uint32_t>(vertical);
-
- return stride;
-}
-
-uint32_t getUint32Scalar(Operands &operands, const OperandIndex index)
-{
- auto int32_value = operands.at(index).asScalar<int32_t>();
- if (int32_value < 0)
- {
- throw std::runtime_error{"Cannot handle negative value"};
- }
-
- return static_cast<uint32_t>(int32_value);
-}
-
-} // namespace
-
-OperationFactory &OperationFactory::get()
-{
- static OperationFactory factory;
- return factory;
-}
-
-OperationFactory::OperationFactory()
-{
- _map[ANEURALNETWORKS_BATCH_TO_SPACE_ND] = [](const OperationFactory::Param &init_param,
- Operands &) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- // 1 -> Block size Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- return new operation::BatchToSpaceND{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_DEPTHWISE_CONV_2D] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert((init_param.input_count == 8 || init_param.input_count == 11) &&
- init_param.output_count == 1);
-
- // In common
- // 0 -> IFM Tensor Index
- // 1 -> Kernel Tensor Index
- // 2 -> Bias Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::DepthwiseConv2D::Param param;
- if (init_param.input_count == 8)
- {
- // Imlicit Padding case
- // Each input should be interpreted as follows:
- //
- // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
- // 4 -> Stride (width) Index
- // 5 -> Stride (height) INdex
- // 6 -> Depthwise multiplier
- // 7 -> Activation Index
-
- const auto padding_index = OperandIndex{init_param.inputs[3]};
- const auto hstride_index = OperandIndex{init_param.inputs[4]};
- const auto vstride_index = OperandIndex{init_param.inputs[5]};
- const auto multiplier_index = OperandIndex{init_param.inputs[6]};
- const auto activation_index = OperandIndex{init_param.inputs[7]};
-
- param.padding.type =
- NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
- param.stride = makeStride(operands, hstride_index, vstride_index);
- param.multiplier = getUint32Scalar(operands, multiplier_index);
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
- }
- else
- {
- // Explicit Padding case
- // Each input should be interpreted as follows:
- //
- // 3 -> Padding On the Left
- // 4 -> Padding On the Right
- // 5 -> Padding On the Top
- // 6 -> Padding On the Bottom
- // 7 -> Stride (width) Index
- // 8 -> Stride (height) Index
- // 9 -> Depthwise multiplier
- // 10-> Activation Index
-
- const auto padding_left_index = OperandIndex{init_param.inputs[3]};
- const auto padding_right_index = OperandIndex{init_param.inputs[4]};
- const auto padding_top_index = OperandIndex{init_param.inputs[5]};
- const auto padding_bottom_index = OperandIndex{init_param.inputs[6]};
- const auto hstride_index = OperandIndex{init_param.inputs[7]};
- const auto vstride_index = OperandIndex{init_param.inputs[8]};
- const auto multiplier_index = OperandIndex{init_param.inputs[9]};
- const auto activation_index = OperandIndex{init_param.inputs[10]};
-
- param.padding.type = PaddingType::EXPLICIT;
- param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index,
- padding_top_index, padding_bottom_index);
- param.stride = makeStride(operands, hstride_index, vstride_index);
- param.multiplier = getUint32Scalar(operands, multiplier_index);
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
- }
-
- return new operation::DepthwiseConv2D{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_MAX_POOL_2D] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 7 || init_param.input_count == 10);
- assert(init_param.output_count == 1);
-
- // In common
- // 0 -> IFM Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::MaxPool2D::Param param;
- if (init_param.input_count == 7) // support implicit padding
- {
- // Each input should be interpreted as follows:
- //
- // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
- // 2 -> Horizontal (over width) Stride Index
- // 3 -> Vertial (over height) Stride Index
- // 4 -> Filter Width Index
- // 5 -> Filter Height Index
- // 6 -> FuseCode (activation) Index
-
- const auto padding_index = OperandIndex{init_param.inputs[1]};
- const auto hstride_index = OperandIndex{init_param.inputs[2]};
- const auto vstride_index = OperandIndex{init_param.inputs[3]};
- const auto kw_index = OperandIndex{init_param.inputs[4]};
- const auto kh_index = OperandIndex{init_param.inputs[5]};
- const auto activation_index = OperandIndex{init_param.inputs[6]};
-
- param.padding.type =
- NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
- param.stride = makeStride(operands, hstride_index, vstride_index);
- param.kw = getUint32Scalar(operands, kw_index);
- param.kh = operands.at(kh_index).asScalar<uint32_t>();
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
- }
- else if (init_param.input_count == 10) // support explicit padding
- {
- // Each input should be interpreted as follows:
- //
- // 1 -> Padding_left index
- // 2 -> Padding_right index
- // 3 -> Padding_top index
- // 4 -> Padding_bottom index
- // 5 -> Horizontal (over width) Stride Index
- // 6 -> Vertial (over height) Stride Index
- // 7 -> Filter Width Index
- // 8 -> Filter Height Index
- // 9 -> FuseCode (activation) Index
-
- const auto padding_left_index = OperandIndex{init_param.inputs[1]};
- const auto padding_right_index = OperandIndex{init_param.inputs[2]};
- const auto padding_top_index = OperandIndex{init_param.inputs[3]};
- const auto padding_bottom_index = OperandIndex{init_param.inputs[4]};
- const auto hstride_index = OperandIndex{init_param.inputs[5]};
- const auto vstride_index = OperandIndex{init_param.inputs[6]};
- const auto kw_index = OperandIndex{init_param.inputs[7]};
- const auto kh_index = OperandIndex{init_param.inputs[8]};
- const auto activation_index = OperandIndex{init_param.inputs[9]};
-
- param.padding.type = PaddingType::EXPLICIT;
- param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index,
- padding_top_index, padding_bottom_index);
- param.stride = makeStride(operands, hstride_index, vstride_index);
- param.kw = getUint32Scalar(operands, kw_index);
- param.kh = getUint32Scalar(operands, kh_index);
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
- }
- return new operation::MaxPool2D{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_AVERAGE_POOL_2D] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- // TODO We may reuse code here for MAX_POOL_2D. Seems like these two are identical
- assert(init_param.input_count == 7 || init_param.input_count == 10);
- assert(init_param.output_count == 1);
-
- // In common
- // 0 -> IFM Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::AvgPool2D::Param param;
- if (init_param.input_count == 7) // support implicit padding
- {
- // Each input should be interpreted as follows:
- //
- // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
- // 2 -> Horizontal (over width) Stride Index
- // 3 -> Vertial (over height) Stride Index
- // 4 -> Filter Width Index
- // 5 -> Filter Height Index
- // 6 -> FuseCode (activation) Index
-
- const auto padding_index = OperandIndex{init_param.inputs[1]};
- const auto hstride_index = OperandIndex{init_param.inputs[2]};
- const auto vstride_index = OperandIndex{init_param.inputs[3]};
- const auto kw_index = OperandIndex{init_param.inputs[4]};
- const auto kh_index = OperandIndex{init_param.inputs[5]};
- const auto activation_index = OperandIndex{init_param.inputs[6]};
-
- param.padding.type =
- NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
- param.stride = makeStride(operands, hstride_index, vstride_index);
- param.kw = getUint32Scalar(operands, kw_index);
- param.kh = getUint32Scalar(operands, kh_index);
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
- }
- else if (init_param.input_count == 10) // support explicit padding
- {
- // Each input should be interpreted as follows:
- //
- // 1 -> Padding_left index
- // 2 -> Padding_right index
- // 3 -> Padding_top index
- // 4 -> Padding_bottom index
- // 5 -> Horizontal (over width) Stride Index
- // 6 -> Vertial (over height) Stride Index
- // 7 -> Filter Width Index
- // 8 -> Filter Height Index
- // 9 -> FuseCode (activation) Index
-
- const auto padding_left_index = OperandIndex{init_param.inputs[1]};
- const auto padding_right_index = OperandIndex{init_param.inputs[2]};
- const auto padding_top_index = OperandIndex{init_param.inputs[3]};
- const auto padding_bottom_index = OperandIndex{init_param.inputs[4]};
- const auto hstride_index = OperandIndex{init_param.inputs[5]};
- const auto vstride_index = OperandIndex{init_param.inputs[6]};
- const auto kw_index = OperandIndex{init_param.inputs[7]};
- const auto kh_index = OperandIndex{init_param.inputs[8]};
- const auto activation_index = OperandIndex{init_param.inputs[9]};
-
- param.padding.type = PaddingType::EXPLICIT;
- param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index,
- padding_top_index, padding_bottom_index);
- param.stride = makeStride(operands, hstride_index, vstride_index);
- param.kw = getUint32Scalar(operands, kw_index);
- param.kh = getUint32Scalar(operands, kh_index);
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
- }
-
- return new operation::AvgPool2D{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_CONCATENATION] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count >= 2); // At least one one input tensor and axis
- assert(init_param.output_count == 1);
-
- // When there are N + 1 inputs, each input should be interpreted as follows:
- //
- // [0, N) -> Input tensors
- // N -> Axis
- //
-
- OperandIndexSequence inputs;
- for (uint32_t n = 0; n < init_param.input_count - 1; ++n)
- {
- inputs.append(OperandIndex{init_param.inputs[n]});
- }
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::Concat::Param param;
- const OperandIndex axis_index{init_param.inputs[init_param.input_count - 1]};
- param.axis = operands.at(axis_index).asScalar<int32_t>();
- param.rank = operands.at(outputs.at(0)).shape().rank();
-
- return new operation::Concat{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_RESHAPE] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- // Each input should be interpreted as follows:
- //
- // 0 -> A tensor, specifying the tensor to be reshaped.
- // 1 -> A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32, defining the shape of the output
- // tensor
-
- // TODO Second input should be shape tensor (init_param.inputs[1])
- // Currently unused since assume that it is same with output tensor size
- OperandIndexSequence inputs{init_param.inputs[0] /* , init_param.inputs[1] */};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- return new operation::Reshape{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_FULLY_CONNECTED] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 4 && init_param.output_count == 1);
-
- // Each input should be interpreted as follows:
- //
- // 0 -> A tensor, specifying the input.
- // 1 -> A 2-D tensor, specifying the weights
- // 2 -> A 1-D tensor, specifying the bias
- // 3 -> An INT32 value, and has to be one of the FuseCode values
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::FullyConnected::Param param;
- const auto activation_index = OperandIndex{init_param.inputs[3]};
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
-
- return new operation::FullyConnected{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_SOFTMAX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- // Each input should be interpreted as follows:
- //
- // 0 -> A 2-D or 4-D tensor, specifying the tensor to be reshaped.
- // 1 -> FLOAT32 value, specifying the positive scaling factor for the exponent, beta.
-
- OperandIndexSequence inputs{init_param.inputs[0]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- const auto beta_index = OperandIndex{init_param.inputs[1]};
-
- operation::Softmax::Param param;
- param.beta = operands.at(beta_index).asScalar<float>();
-
- return new operation::Softmax{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_CAST] = [](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- // 0 -> input Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- // NNAPI uses QUANT8_ASYMM to represent UINT8 type for ANEURALNETWORKS_CAST's input/output
- if (operands.at(inputs.at(0)).typeInfo().type() == DataType::QUANT8_ASYMM)
- {
- replaceDataType(operands, inputs.at(0), DataType::UINT8);
- }
- if (operands.at(outputs.at(0)).typeInfo().type() == DataType::QUANT8_ASYMM)
- {
- replaceDataType(operands, outputs.at(0), DataType::UINT8);
- }
-
- return new operation::Cast{inputs, outputs};
- };
-
- // ANEURALNETWORKS_CAST_EX is deprecated
- // TODO Remove ANEURALNETWORKS_CAST_EX
- _map[ANEURALNETWORKS_CAST_EX] = _map[ANEURALNETWORKS_CAST];
-
- _map[ANEURALNETWORKS_CONV_2D] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- using operation::Conv2D;
-
- // inputCount is either 7 or 10 acccording to NN API specification.
- // - Padding is implicit when inputCount is 7
- // - Padding is explicit when inputCount is 10
- assert(init_param.input_count == 7 || init_param.input_count == 10);
- assert(init_param.output_count == 1);
-
- // 0 -> IFM Tensor Index
- // 1 -> Kernel Tensor Index
- // 2 -> Bias Tensor Index
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- Conv2D::Param param;
-
- if (init_param.input_count == 7) // support implicit padding
- {
- // Each input should be interpreted as follows:
- //
- // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
- // 4 -> Stride (width) Index
- // 5 -> Stride (height) INdex
- // 6 -> Activation Index
-
- const auto padding_index = OperandIndex{init_param.inputs[3]};
- const auto hstride_index = OperandIndex{init_param.inputs[4]};
- const auto vstride_index = OperandIndex{init_param.inputs[5]};
- const auto activation_index = OperandIndex{init_param.inputs[6]};
-
- param.padding.type =
- NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
- param.stride = makeStride(operands, hstride_index, vstride_index);
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
- }
- else if (init_param.input_count == 10) // support explicit padding
- {
- // Each input should be interpreted as follows:
- //
- // 3 -> Padding_left index
- // 4 -> Padding_right index
- // 5 -> Padding_top index
- // 6 -> Padding_bottom index
- // 7 -> Stride (width) Index
- // 8 -> Stride (height) INdex
- // 9 -> Activation Index
-
- const auto padding_left_index = OperandIndex{init_param.inputs[3]};
- const auto padding_right_index = OperandIndex{init_param.inputs[4]};
- const auto padding_top_index = OperandIndex{init_param.inputs[5]};
- const auto padding_bottom_index = OperandIndex{init_param.inputs[6]};
- const auto hstride_index = OperandIndex{init_param.inputs[7]};
- const auto vstride_index = OperandIndex{init_param.inputs[8]};
- const auto activation_index = OperandIndex{init_param.inputs[9]};
-
- param.padding.type = PaddingType::EXPLICIT;
- param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index,
- padding_top_index, padding_bottom_index);
- param.stride = makeStride(operands, hstride_index, vstride_index);
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
- }
-
- return new Conv2D{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_ADD] = [](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 3);
- assert(init_param.output_count == 1);
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Lefthand side operand
- // 1 -> Righthand side operand
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::Add::Param param;
-
- const auto activation_index = OperandIndex{init_param.inputs[2]};
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
-
- return new operation::Add{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_REDUCE_SUM_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 3);
- assert(init_param.output_count == 1);
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- // 1 -> Reduced Axes Tensor Index
- // 2 -> keep_dims Index
-
- OperandIndexSequence inputs{init_param.inputs[0]};
- OperandIndexSequence outputs{init_param.outputs[0]};
- std::vector<std::int32_t> axes =
- operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>();
-
- operation::ReduceSum::Param param;
- param.axes.assign(axes.cbegin(), axes.cend());
- param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int32_t>() != 0;
- param.rank = operands.at(inputs.at(0)).shape().rank();
-
- return new operation::ReduceSum{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_SUB] = [](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 3);
- assert(init_param.output_count == 1);
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Lefthand side operand
- // 1 -> Righthand side operand
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::Sub::Param param;
-
- const auto activation_index = OperandIndex{init_param.inputs[2]};
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
-
- return new operation::Sub{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_SLICE] = [](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 3 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- // 1 -> Begins Tensor Index
- // 2 -> Sizes Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
-
- operation::Slice::Param param;
- param.rank = operands.at(inputs.at(0)).shape().rank();
-
- return new operation::Slice{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_STRIDED_SLICE] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 7 && init_param.output_count == 1);
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2],
- init_param.inputs[3]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 1 -> A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the starts of
- // the dimensions of the input tensor to be sliced. The length must be
- // of rank(input0).
- // 2 -> A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the ends of
- // the dimensions of the input tensor to be sliced. The length must be
- // of rank(input0).
- // 3 -> A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the strides of
- // the dimensions of the input tensor to be sliced. The length must be
- // of rank(input0).
- // 4 -> An {@link ANEURALNETWORKS_INT32} scalar, begin_mask. If the ith bit
- // of begin_mask is set, begin[i] is ignored and the fullest possible
- // range in that dimension is used instead.
- // 5 -> An {@link ANEURALNETWORKS_INT32} scalar, end_mask. If the ith bit of
- // end_mask is set, end[i] is ignored and the fullest possible range in
- // that dimension is used instead.
- // 6 -> An {@link ANEURALNETWORKS_INT32} scalar, shrink_axis_mask. An int32
- // mask. If the ith bit of shrink_axis_mask is set, it implies that the
- // ith specification shrinks the dimensionality by 1. A slice of size 1
- // starting from begin[i] in the dimension must be preserved.
-
- operation::StridedSlice::Param param;
-
- param.begin_mask = operands.at(OperandIndex{init_param.inputs[4]}).asScalar<std::int32_t>();
- param.end_mask = operands.at(OperandIndex{init_param.inputs[5]}).asScalar<std::int32_t>();
- param.shrink_axis_mask =
- operands.at(OperandIndex{init_param.inputs[6]}).asScalar<std::int32_t>();
- param.rank = operands.at(inputs.at(0)).shape().rank();
-
- return new operation::StridedSlice{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_TRANSPOSE] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- // TODO make this work with init_param.input_count == 1 (when permutation vector is optional)
-
- // Inputs
- // 0: An n-D tensor, specifying the tensor to be transposed.
- // 1: An optional 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32},
- // the permutation of the dimensions of the input tensor.
- // The returned tensor's dimension i corresponds to the input dimension
- // perm[i]. If perm is not given, it is set to (n-1...0), where n is the
- // rank of the input tensor. Hence by default, this operation performs a
- // regular matrix transpose on 2-D input Tensors.
- assert(init_param.input_count == 2);
- assert(init_param.output_count == 1);
-
- OperandIndexSequence inputs{init_param.inputs[0]};
- OperandIndexSequence outputs{init_param.outputs[0]};
- std::vector<std::int32_t> perm =
- operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>();
-
- operation::Transpose::Param param;
- param.perm.assign(perm.cbegin(), perm.cend());
- param.rank = operands.at(inputs.at(0)).shape().rank();
-
- return new operation::Transpose{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_MUL] = [](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 3 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> LHS Tensor Index
- // 1 -> RHS Tensor Index
- // 2 -> Activation Index
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::Mul::Param param;
-
- const auto activation_index = OperandIndex{init_param.inputs[2]};
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
-
- return new operation::Mul{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_SQUEEZE] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 1 || init_param.input_count == 2);
- assert(init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> An n-D tensor, the tensor to be squeezed.
- // 1 -> An optional 1-D tensor of ANEURALNETWORKS_TENSOR_INT32. The dimensions to squeeze.
- // If specified only squeezes the dimensions listed. Otherwise, squeezes all dimensions.
- // The dimension index starts at 0. An error must be reported if squeezing a dimension that
- // is not 1.
-
- // Add mandatory input index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- // Add dims index if specified
- operation::Squeeze::Param param{};
- if (init_param.input_count == 2)
- {
- auto squeeze_dims_idx = OperandIndex{init_param.inputs[1]};
- assert(operands.at(squeeze_dims_idx).shape().rank() == 1);
- assert(operands.at(squeeze_dims_idx).shape().dim(0) >= 0);
- assert(static_cast<uint32_t>(operands.at(squeeze_dims_idx).shape().dim(0)) <=
- sizeof(param.dims));
- param.ndim = operands.at(squeeze_dims_idx).shape().dim(0);
- if (param.ndim > 0)
- memcpy(param.dims, operands.at(squeeze_dims_idx).data().base(),
- param.ndim * sizeof(param.dims[0]));
- }
-
- return new operation::Squeeze{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_TANH] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- return new operation::Tanh{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_LOGISTIC] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- return new operation::Logistic{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_DIV] = [](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 3 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> LHS Tensor Index
- // 1 -> RHS Tensor Index
- // 2 -> Activation Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::Div::Param param;
-
- const auto activation_index = OperandIndex{init_param.inputs[2]};
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
-
- return new operation::Div{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_EXP] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- return new operation::Exp{inputs, outputs};
- };
-
- // ANEURALNETWORKS_EXP_EX is deprecated
- // TODO Remove ANEURALNETWORKS_EXP_EX
- _map[ANEURALNETWORKS_EXP_EX] = _map[ANEURALNETWORKS_EXP];
-
- _map[ANEURALNETWORKS_GREATER_EQUAL_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input0 Tensor Index
- // 1 -> input1 Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::Comparison::Param param;
- param.comparison_type = operation::Comparison::ComparisonType::GreaterEqual;
-
- // Output operand type must be boolean
- replaceDataType(operands, outputs.at(0), DataType::BOOL8);
-
- return new operation::Comparison{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_LESS_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input0 Tensor Index
- // 1 -> input1 Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::Comparison::Param param;
- param.comparison_type = operation::Comparison::ComparisonType::Less;
-
- // Output operand type must be boolean
- replaceDataType(operands, outputs.at(0), DataType::BOOL8);
-
- return new operation::Comparison{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_REDUCE_MAX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 3 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- // 1 -> Axis Tensor Index
- // 2 -> keep_dims Index
- OperandIndexSequence inputs{init_param.inputs[0]};
- std::vector<std::int32_t> axes =
- operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>();
-
- operation::ReduceMax::Param param;
- param.axes.assign(axes.cbegin(), axes.cend());
- param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int8_t>() != 0;
- param.rank = operands.at(inputs.at(0)).shape().rank();
-
- return new operation::ReduceMax{inputs, outputs, param};
- };
-
- // ANEURALNETWORKS_REDUCE_MAX_EX is deprecated
- // TODO Remove ANEURALNETWORKS_REDUCE_MAX_EX
- _map[ANEURALNETWORKS_REDUCE_MAX_EX] = _map[ANEURALNETWORKS_REDUCE_MAX];
-
- _map[ANEURALNETWORKS_NOT_EQUAL_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input1 Tensor Index
- // 1 -> input2 Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::Comparison::Param param;
- param.comparison_type = operation::Comparison::ComparisonType::NotEqual;
-
- // Output operand type must be boolean
- replaceDataType(operands, outputs.at(0), DataType::BOOL8);
-
- return new operation::Comparison{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_LOGICAL_AND_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input0 Tensor Index
- // 1 -> input1 Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- // This operation's operands must be boolean type.
- replaceDataType(operands, inputs.at(0), DataType::BOOL8);
- replaceDataType(operands, inputs.at(1), DataType::BOOL8);
- replaceDataType(operands, outputs.at(0), DataType::BOOL8);
-
- return new operation::LogicalAnd{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_RSQRT] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- return new operation::RSQRT{inputs, outputs};
- };
-
- // ANEURALNETWORKS_RSQRT_EX is deprecated
- // TODO Remove ANEURALNETWORKS_RSQRT_EX
- _map[ANEURALNETWORKS_RSQRT_EX] = _map[ANEURALNETWORKS_RSQRT];
-
- _map[ANEURALNETWORKS_RELU] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- return new operation::ReLU{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_RESIZE_BILINEAR] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 3 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> IFM Index
- // 1 -> Height Index
- // 2 -> Width Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- operation::ResizeBilinear::Param param;
- param.height_out = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<int32_t>();
- param.width_out = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int32_t>();
-
- return new operation::ResizeBilinear{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_RELU1] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- return new operation::ReLU1{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_RELU6] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- return new operation::ReLU6{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_RNN] = [](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 6 && init_param.output_count == 2);
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- // 1 -> Weights Tensor Index
- // 2 -> Recurrent Weights Tensor Index
- // 3 -> Bias Tensor Index
- // 4 -> Hidden state (in) Index
- // 5 -> Activation Index
-
- OperandIndexSequence inputs;
- for (uint32_t n = 0; n < init_param.input_count - 1; ++n)
- {
- inputs.append(OperandIndex{init_param.inputs[n]});
- }
- OperandIndexSequence outputs;
- for (uint32_t n = 0; n < init_param.output_count; ++n)
- {
- outputs.append(OperandIndex{init_param.outputs[n]});
- }
-
- operation::RNN::Param param;
- const auto activation_index = OperandIndex{init_param.inputs[5]};
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
-
- return new operation::RNN{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_FLOOR] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- // 0 -> input Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- return new operation::Floor{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_SPACE_TO_BATCH_ND] = [](const OperationFactory::Param &init_param,
- Operands &) {
- assert(init_param.input_count == 3 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- // 1 -> Block size Index
- // 2 -> Paddings Index
- OperandIndexSequence inputs;
- for (uint32_t n = 0; n < init_param.input_count; ++n)
- {
- inputs.append(OperandIndex{init_param.inputs[n]});
- }
-
- return new operation::SpaceToBatchND{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_SPACE_TO_DEPTH] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- // 1 -> Block size Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- operation::SpaceToDepth::Param param;
- param.block_size = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
-
- return new operation::SpaceToDepth{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_L2_POOL_2D] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 10 || init_param.input_count == 7);
- assert(init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> IFM Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- operation::L2Pool2D::Param param;
-
- if (init_param.input_count == 7) // Imlicit Padding case
- {
- // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
- // 2 -> Horizontal (over width) Stride Index
- // 3 -> Vertial (over height) Stride Index
- // 4 -> Filter Width Index
- // 5 -> Filter Height Index
- // 6 -> FuseCode (activation) Index
- const auto padding_index = OperandIndex{init_param.inputs[1]};
- const auto hstride_index = OperandIndex{init_param.inputs[2]};
- const auto vstride_index = OperandIndex{init_param.inputs[3]};
- const auto kw_index = OperandIndex{init_param.inputs[4]};
- const auto kh_index = OperandIndex{init_param.inputs[5]};
- const auto activation_index = OperandIndex{init_param.inputs[6]};
-
- param.padding.type =
- NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
- param.stride = makeStride(operands, hstride_index, vstride_index);
- param.kw = getUint32Scalar(operands, kw_index);
- param.kh = getUint32Scalar(operands, kh_index);
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
- }
- else // Explicit Padding case
- {
- // 1 -> Padding_left index
- // 2 -> Padding_right index
- // 3 -> Padding_top index
- // 4 -> Padding_bottom index
- // 5 -> Horizontal (over width) Stride Index
- // 6 -> Vertial (over height) Stride Index
- // 7 -> Filter Width Index
- // 8 -> Filter Height Index
- // 9 -> FuseCode (activation) Index
- const auto padding_left_index = OperandIndex{init_param.inputs[1]};
- const auto padding_right_index = OperandIndex{init_param.inputs[2]};
- const auto padding_top_index = OperandIndex{init_param.inputs[3]};
- const auto padding_bottom_index = OperandIndex{init_param.inputs[4]};
- const auto hstride_index = OperandIndex{init_param.inputs[5]};
- const auto vstride_index = OperandIndex{init_param.inputs[6]};
- const auto kw_index = OperandIndex{init_param.inputs[7]};
- const auto kh_index = OperandIndex{init_param.inputs[8]};
- const auto activation_index = OperandIndex{init_param.inputs[9]};
-
- param.padding.type = PaddingType::EXPLICIT;
- param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index,
- padding_top_index, padding_bottom_index);
- param.stride = makeStride(operands, hstride_index, vstride_index);
- param.kw = getUint32Scalar(operands, kw_index);
- param.kh = getUint32Scalar(operands, kh_index);
- param.activation =
- NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
- }
-
- return new operation::L2Pool2D{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_EMBEDDING_LOOKUP] = [](const OperationFactory::Param &init_param,
- Operands &) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Lookups Index
- // 1 -> Values Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- return new operation::EmbeddingLookup{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_L2_NORMALIZATION] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- // 0 -> input Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- operation::L2Normalization::Param param;
- param.rank = operands.at(inputs.at(0)).shape().rank();
-
- return new operation::L2Normalization{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_HASHTABLE_LOOKUP] = [](const OperationFactory::Param &init_param,
- Operands &) {
- assert(init_param.input_count == 3 && init_param.output_count == 2);
-
- // Each output should be interpreted as follows:
- //
- // 0 -> Output Index
- // 1 -> Hits Index
- OperandIndexSequence outputs{init_param.outputs[0], init_param.outputs[1]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Lookups Index
- // 1 -> Keys Index
- // 2 -> Values Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
-
- return new operation::HashtableLookup{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_PRELU_EX] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input Tensor Index
- // 1 -> alpha Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- return new operation::PReLU{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_TRANSPOSE_CONV_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 6 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Output Shape Index
- // 1 -> Weights Index
- // 2 -> Input Tensor Index
- // 3 -> Padding Type
- // 4 -> Stride width
- // 5 -> Stride height
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
-
- operation::TransposeConv::Param param;
-
- const auto padding_index = OperandIndex{init_param.inputs[3]};
- const auto hstride_index = OperandIndex{init_param.inputs[4]};
- const auto vstride_index = OperandIndex{init_param.inputs[5]};
-
- param.padding.type =
- NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
- param.stride = makeStride(operands, hstride_index, vstride_index);
-
- return new operation::TransposeConv{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_SQRT] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- // 0 -> input Tensor Index
-
- OperandIndexSequence inputs{init_param.inputs[0]};
- return new operation::SQRT{inputs, outputs};
- };
-
- // ANEURALNETWORKS_SQRT_EX is deprecated
- // TODO Remove ANEURALNETWORKS_SQRT_EX
- _map[ANEURALNETWORKS_SQRT_EX] = _map[ANEURALNETWORKS_SQRT];
-
- _map[ANEURALNETWORKS_LOGICAL_OR_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input0 Tensor Index
- // 1 -> input1 Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- // This operation's operands must be boolean type.
- replaceDataType(operands, inputs.at(0), DataType::BOOL8);
- replaceDataType(operands, inputs.at(1), DataType::BOOL8);
- replaceDataType(operands, outputs.at(0), DataType::BOOL8);
-
- return new operation::LogicalOr{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_LOGICAL_NOT_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- // This operation's operands must be boolean type.
- replaceDataType(operands, inputs.at(0), DataType::BOOL8);
- replaceDataType(operands, outputs.at(0), DataType::BOOL8);
-
- return new operation::LogicalNot{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_LSTM] = [](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 23 && init_param.output_count == 4);
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- // 1 -> Input to Input Tensor Index
- // 2 -> Input to Forget Tensor Index
- // 3 -> Input to Cell Tensor Index
- // 4 -> Input to Output Tensor Index
- // 5 -> Recurrent to Input Weights Tensor Index
- // 6 -> Recurrent to Forget Weights Tensor Index
- // 7 -> Recurrent to Cell Weights Tensor Index
- // 8 -> Recurrent to Output Weights Tensor Index
- // 9 -> Cell to Input Weights Tensor Index
- // 10 -> Cell to Forget Weights Tensor Index
- // 11 -> Cell to Output Weights Tensor Index
- // 12 -> Input Gate Bias Tensor Index
- // 13 -> Forget Gate Bias Tensor Index
- // 14 -> Cell Bias Tensor Index
- // 15 -> Output Gate Bias Tensor Index
- // 16 -> Projection Weights Tensor Index
- // 17 -> Projection Bias Tensor Index
- // 18 -> Output State In Tensor Index
- // 19 -> Cell State In Tensor Index
- OperandIndexSequence inputs;
- for (uint32_t n = 0; n < init_param.input_count - 3; ++n)
- {
- inputs.append(OperandIndex{init_param.inputs[n]});
- }
-
- // Each output should be interpreted as follows:
- //
- // 0 -> Scratch Buffer Tensor Index
- // 1 -> Output State Out Tensor Index
- // 2 -> Cell State Out Tensor Index
- // 3 -> Output Tensor Index
- OperandIndexSequence outputs;
- for (uint32_t n = 0; n < init_param.output_count; ++n)
- {
- outputs.append(OperandIndex{init_param.outputs[n]});
- }
-
- operation::LSTM::Param param;
- const auto activation_index = OperandIndex{init_param.inputs[20]};
- switch (operands.at(activation_index).asScalar<int32_t>())
- {
- case 0:
- param.activation = Activation::NONE;
- break;
- case 1:
- param.activation = Activation::RELU;
- break;
- case 2:
- param.activation = Activation::RELU1;
- break;
- case 3:
- param.activation = Activation::RELU6;
- break;
- case 4:
- param.activation = Activation::TANH;
- break;
- case 6:
- param.activation = Activation::SIGMOID;
- break;
- default:
- throw std::runtime_error("Unsupported activation type");
- break;
- }
- param.cell_threshold = operands.at(OperandIndex{init_param.inputs[21]}).asScalar<float>();
- param.projection_threshold = operands.at(OperandIndex{init_param.inputs[22]}).asScalar<float>();
-
- return new operation::LSTM{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_EQUAL_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input0 Tensor Index
- // 1 -> input1 Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- operation::Comparison::Param param;
- param.comparison_type = operation::Comparison::ComparisonType::Equal;
-
- // Output operand type must be boolean
- replaceDataType(operands, outputs.at(0), DataType::BOOL8);
-
- return new operation::Comparison{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_SQUARED_DIFFERENCE_EX] = [](const OperationFactory::Param &init_param,
- Operands &) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> LHS Tensor Index
- // 1 -> RHS Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
-
- return new operation::SquaredDifference{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_TOPK_V2] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count == 2);
-
- // Each output should be interpreted as follows:
- //
- // 0 -> Index for Output Values
- // 1 -> Index for Output Indices
- OperandIndexSequence outputs{init_param.outputs[0], init_param.outputs[1]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Index for Input Data
- // 1 -> Index for K
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- operation::TopKV2::Param param;
- param.k = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
-
- return new operation::TopKV2{inputs, outputs, param};
- };
-
- // ANEURALNETWORKS_CAST_EX is deprecated
- // TODO Remove ANEURALNETWORKS_CAST_EX
- _map[ANEURALNETWORKS_TOPK_V2_EX] = _map[ANEURALNETWORKS_TOPK_V2];
-
- _map[ANEURALNETWORKS_GATHER] = [](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 3 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> input Tensor Index
- // 1 -> axis Index
- // 2 -> indices Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[2]};
-
- operation::Gather::Param param;
- param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<int32_t>();
- param.rank = operands.at(inputs.at(0)).shape().rank();
-
- return new operation::Gather{inputs, outputs, param};
- };
-
- // ANEURALNETWORKS_GATHER_EX is deprecated
- // TODO Remove ANEURALNETWORKS_GATHER_EX
- _map[ANEURALNETWORKS_GATHER_EX] = _map[ANEURALNETWORKS_GATHER];
-
- _map[ANEURALNETWORKS_NEG] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- return new operation::Neg{inputs, outputs};
- };
-
- // ANEURALNETWORKS_NEG_EX is deprecated
- // TODO Remove ANEURALNETWORKS_NEG_EX
- _map[ANEURALNETWORKS_NEG_EX] = _map[ANEURALNETWORKS_NEG];
-
- _map[ANEURALNETWORKS_ABS] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- return new operation::Abs{inputs, outputs};
- };
-
- // ANEURALNETWORKS_ABS_EX is deprecated
- // TODO Remove ANEURALNETWORKS_ABS_EX
- _map[ANEURALNETWORKS_ABS_EX] = _map[ANEURALNETWORKS_ABS];
-
- _map[ANEURALNETWORKS_ARGMAX_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- // 1 -> Axis Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- operation::ArgMax::Param param;
- param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
- param.rank = operands.at(inputs.at(0)).shape().rank();
-
- return new operation::ArgMax{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_DEQUANTIZE] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 1 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- return new operation::Dequantize{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_MEAN] = [](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 3 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> ifm Tensor Index
- // 1 -> axis Tensor Index
- // 2 -> keep_dims Index
- OperandIndexSequence inputs{init_param.inputs[0]};
- std::vector<std::int32_t> axes =
- operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>();
-
- operation::Mean::Param param;
- param.axes.assign(axes.cbegin(), axes.cend());
- param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int32_t>() != 0;
- param.rank = operands.at(inputs.at(0)).shape().rank();
-
- return new operation::Mean{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 5 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- operation::LocalResponseNormalization::Param param;
- param.radius = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
- param.bias = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<float>();
- param.alpha = operands.at(OperandIndex{init_param.inputs[3]}).asScalar<float>();
- param.beta = operands.at(OperandIndex{init_param.inputs[4]}).asScalar<float>();
-
- return new operation::LocalResponseNormalization{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_DEPTH_TO_SPACE] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- // 1 -> Block size Index
- OperandIndexSequence inputs{init_param.inputs[0]};
-
- operation::DepthToSpace::Param param;
- param.block_size = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
-
- return new operation::DepthToSpace{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_PACK_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count >= 3 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
- OperandIndexSequence inputs;
- for (uint32_t n = 0; n < init_param.input_count - 2; ++n)
- {
- inputs.append(OperandIndex{init_param.inputs[n]});
- }
-
- operation::Pack::Param param;
- const auto num_index = OperandIndex{init_param.inputs[init_param.input_count - 2]};
- const auto axis_index = OperandIndex{init_param.inputs[init_param.input_count - 1]};
- param.num = operands.at(num_index).asScalar<int32_t>();
- param.axis = operands.at(axis_index).asScalar<int32_t>();
- param.rank = operands.at(outputs.at(0)).shape().rank();
-
- return new operation::Pack{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_REDUCE_MIN_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 3 && init_param.output_count == 1);
-
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- // Each input should be interpreted as follows:
- //
- // 0 -> Input Tensor Index
- // 1 -> Axis Tensor Index
- // 2 -> keep_dims Index
- OperandIndexSequence inputs{init_param.inputs[0]};
- std::vector<std::int32_t> axes =
- operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>();
-
- operation::ReduceMin::Param param;
- param.axes.assign(axes.cbegin(), axes.cend());
- param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int32_t>() != 0;
- param.rank = operands.at(inputs.at(0)).shape().rank();
-
- return new operation::ReduceMin{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_SPLIT_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 3);
- assert(init_param.output_count >= 1); // At least one output tensor and axis
-
- OperandIndexSequence inputs{init_param.inputs[0]};
- OperandIndexSequence outputs;
- for (uint32_t n = 0; n < init_param.output_count; ++n)
- {
- outputs.append(OperandIndex{init_param.outputs[n]});
- }
-
- operation::Split::Param param;
- param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
- param.num_splits = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<std::int32_t>();
- param.rank = operands.at(inputs.at(0)).shape().rank();
-
- return new operation::Split{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_UNPACK_EX] = [](const OperationFactory::Param &init_param,
- Operands &operands) {
- assert(init_param.input_count == 3 && init_param.output_count >= 1);
-
- OperandIndexSequence inputs{init_param.inputs[0]};
- OperandIndexSequence outputs;
- for (uint32_t n = 0; n < init_param.output_count; ++n)
- {
- outputs.append(OperandIndex{init_param.outputs[n]});
- }
-
- operation::Unpack::Param param;
- const auto num_index = OperandIndex{init_param.inputs[1]};
- const auto axis_index = OperandIndex{init_param.inputs[2]};
- param.num = operands.at(num_index).asScalar<int32_t>();
- param.axis = operands.at(axis_index).asScalar<int32_t>();
- param.rank = operands.at(inputs.at(0)).shape().rank();
-
- return new operation::Unpack{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_PAD] = [](const OperationFactory::Param &init_param, Operands &operands) {
- assert(init_param.input_count == 2 && init_param.output_count >= 1);
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- operation::Pad::Param param;
- param.rank = operands.at(inputs.at(0)).shape().rank();
-
- return new operation::Pad{inputs, outputs, param};
- };
-
- _map[ANEURALNETWORKS_MINIMUM] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- return new operation::Min{inputs, outputs};
- };
-
- _map[ANEURALNETWORKS_MAXIMUM] = [](const OperationFactory::Param &init_param, Operands &) {
- assert(init_param.input_count == 2 && init_param.output_count == 1);
-
- OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
- OperandIndexSequence outputs{init_param.outputs[0]};
-
- return new operation::Max{inputs, outputs};
- };
-}
-
-Operation *OperationFactory::create(ANeuralNetworksOperationType type,
- const OperationFactory::Param &param, Operands &operands)
-{
- auto it = _map.find(type);
- if (it == _map.end())
- {
- throw std::runtime_error("Unsupported operation type: " + std::to_string(type));
- }
- return it->second(param, operands);
-}
diff --git a/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.h b/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.h
deleted file mode 100644
index 003e4eb7a..000000000
--- a/runtime/neurun/frontend/nnapi/wrapper/OperationFactory.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __OPERATION_FACTORY_H__
-#define __OPERATION_FACTORY_H__
-
-#include <unordered_map>
-
-#include "ir/Operands.h"
-#include "ir/Operation.h"
-#include "NeuralNetworks.h"
-#include "NeuralNetworksEx.h"
-
-/**
- * @brief A class to create a neurun operation object from NN API input parameters
- */
-class OperationFactory
-{
-public:
- struct Param
- {
- uint32_t input_count;
- const uint32_t *inputs;
- uint32_t output_count;
- const uint32_t *outputs;
- };
-
-public:
- using Generator = std::function<neurun::ir::Operation *(const OperationFactory::Param &,
- neurun::ir::Operands &)>;
-
-public:
- static OperationFactory &get();
-
-private:
- OperationFactory();
-
-public:
- neurun::ir::Operation *create(ANeuralNetworksOperationType, const OperationFactory::Param &param,
- neurun::ir::Operands &operands);
- // TODO add "register" method for separating registration, possibly supporting custom-ops
-
-private:
- std::unordered_map<ANeuralNetworksOperationType, Generator> _map;
-};
-
-#endif // __OPERATION_FACTORY_H__
diff --git a/runtime/neurun/frontend/tflite/CMakeLists.txt b/runtime/neurun/frontend/tflite/CMakeLists.txt
deleted file mode 100644
index 5157869f3..000000000
--- a/runtime/neurun/frontend/tflite/CMakeLists.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-if(NOT BUILD_TFLITE_LOADER)
- return()
-endif(NOT BUILD_TFLITE_LOADER)
-
-nnfw_find_package(FlatBuffersSource REQUIRED)
-
-set(TFLITE_LOADER_SOURCES src/tflite_loader.cc)
-
-add_library(tflite_loader SHARED ${TFLITE_LOADER_SOURCES})
-
-target_include_directories(tflite_loader PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include)
-target_include_directories(tflite_loader PRIVATE ${FlatBuffersSource_DIR}/include)
-
-target_link_libraries(tflite_loader PUBLIC neurun_core)
-target_link_libraries(tflite_loader PRIVATE base_loader nnfw_lib_cpp14 nnfw_common nnfw_coverage)
-
-install(TARGETS tflite_loader DESTINATION lib)
diff --git a/runtime/neurun/frontend/tflite/include/tflite_loader.h b/runtime/neurun/frontend/tflite/include/tflite_loader.h
deleted file mode 100644
index 033230b4b..000000000
--- a/runtime/neurun/frontend/tflite/include/tflite_loader.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __TFLITE_TFLITE_LOADER_H__
-#define __TFLITE_TFLITE_LOADER_H__
-
-#include "ir/Graph.h"
-
-#include <memory>
-
-namespace neurun
-{
-namespace tflite_loader
-{
-
-std::unique_ptr<ir::Graph> loadModel(const char *filename);
-
-} // namespace tflite_loader
-} // namespace neurun
-
-#endif // __TFLITE_TFLITE_LOADER_H__
diff --git a/runtime/neurun/frontend/tflite/src/tflite_loader.cc b/runtime/neurun/frontend/tflite/src/tflite_loader.cc
deleted file mode 100644
index 10a4fc095..000000000
--- a/runtime/neurun/frontend/tflite/src/tflite_loader.cc
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "tflite_loader.h"
-#include "base_loader.h"
-#include "tflite_schema_generated.h"
-
-namespace neurun
-{
-namespace tflite_loader
-{
-
-namespace
-{
-
-struct LoaderDomain
-{
- using Verifier = flatbuffers::Verifier;
- using ActivationFunctionType = neurun_tflite::ActivationFunctionType;
- using Buffer = neurun_tflite::Buffer;
- using BuiltinOperator = neurun_tflite::BuiltinOperator;
- using CustomOptionsFormat = neurun_tflite::CustomOptionsFormat;
- using Model = neurun_tflite::Model;
- using Operator = neurun_tflite::Operator;
- using Padding = neurun_tflite::Padding;
- using Pool2DOptions = neurun_tflite::Pool2DOptions;
- using Tensor = neurun_tflite::Tensor;
- using TensorType = neurun_tflite::TensorType;
- using SubGraph = neurun_tflite::SubGraph;
-
- static const char *EnumNameBuiltinOperator(BuiltinOperator e)
- {
- return neurun_tflite::EnumNameBuiltinOperator(e);
- }
- static const char *EnumNameActivationFunctionType(ActivationFunctionType e)
- {
- return neurun_tflite::EnumNameActivationFunctionType(e);
- }
- static const char *EnumNameTensorType(TensorType e)
- {
- return neurun_tflite::EnumNameTensorType(e);
- }
- static const Model *GetModel(const void *buf) { return neurun_tflite::GetModel(buf); }
- static bool VerifyModelBuffer(Verifier &verifier)
- {
- return neurun_tflite::VerifyModelBuffer(verifier);
- }
-};
-
-class TFLiteLoader final : public base_loader::BaseLoader<LoaderDomain, TFLiteLoader>
-{
-public:
- using BaseLoader::BaseLoader;
-
- void loadSubgraph(const neurun_tflite::SubGraph *subgraph)
- {
- // Load tensors
- _tensor_to_operand.resize(subgraph->tensors()->size());
- for (flatbuffers::uoffset_t i = 0; i < subgraph->tensors()->size(); ++i)
- {
- _tensor_to_operand[i] = loadOperand(subgraph->tensors()->Get(i));
- }
- // Set inputs
- for (const std::int32_t input_ind : *subgraph->inputs())
- {
- _graph.addInput(_tensor_to_operand[input_ind]);
- }
- // Set outputs
- for (const std::int32_t output_ind : *subgraph->outputs())
- {
- _graph.addOutput(_tensor_to_operand[output_ind]);
- }
- // Create operations
- for (const auto *op : *subgraph->operators())
- {
- loadOperation(op);
- }
- }
-};
-
-} // namespace
-
-std::unique_ptr<ir::Graph> loadModel(const char *filename)
-{
- auto graph = nnfw::cpp14::make_unique<ir::Graph>();
- TFLiteLoader loader(*graph);
- loader.loadFromFile(filename);
- return graph;
-}
-
-} // namespace tflite_loader
-} // namespace neurun
diff --git a/runtime/neurun/frontend/tflite/src/tflite_schema_generated.h b/runtime/neurun/frontend/tflite/src/tflite_schema_generated.h
deleted file mode 100644
index 21669e2ff..000000000
--- a/runtime/neurun/frontend/tflite/src/tflite_schema_generated.h
+++ /dev/null
@@ -1,7275 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-// automatically generated by the FlatBuffers compiler, do not modify
-
-#ifndef FLATBUFFERS_GENERATED_SCHEMA_NEURUN_TFLITE_H_
-#define FLATBUFFERS_GENERATED_SCHEMA_NEURUN_TFLITE_H_
-
-#include "flatbuffers/flatbuffers.h"
-
-namespace neurun_tflite
-{
-
-struct CustomQuantization;
-
-struct QuantizationParameters;
-
-struct Tensor;
-
-struct Conv2DOptions;
-
-struct Pool2DOptions;
-
-struct DepthwiseConv2DOptions;
-
-struct ConcatEmbeddingsOptions;
-
-struct LSHProjectionOptions;
-
-struct SVDFOptions;
-
-struct RNNOptions;
-
-struct SequenceRNNOptions;
-
-struct BidirectionalSequenceRNNOptions;
-
-struct FullyConnectedOptions;
-
-struct SoftmaxOptions;
-
-struct ConcatenationOptions;
-
-struct AddOptions;
-
-struct MulOptions;
-
-struct L2NormOptions;
-
-struct LocalResponseNormalizationOptions;
-
-struct LSTMOptions;
-
-struct UnidirectionalSequenceLSTMOptions;
-
-struct BidirectionalSequenceLSTMOptions;
-
-struct ResizeBilinearOptions;
-
-struct ResizeNearestNeighborOptions;
-
-struct CallOptions;
-
-struct PadOptions;
-
-struct PadV2Options;
-
-struct ReshapeOptions;
-
-struct SpaceToBatchNDOptions;
-
-struct BatchToSpaceNDOptions;
-
-struct SkipGramOptions;
-
-struct SpaceToDepthOptions;
-
-struct SubOptions;
-
-struct DivOptions;
-
-struct TopKV2Options;
-
-struct EmbeddingLookupSparseOptions;
-
-struct GatherOptions;
-
-struct TransposeOptions;
-
-struct ExpOptions;
-
-struct ReducerOptions;
-
-struct SqueezeOptions;
-
-struct SplitOptions;
-
-struct SplitVOptions;
-
-struct StridedSliceOptions;
-
-struct LogSoftmaxOptions;
-
-struct CastOptions;
-
-struct DequantizeOptions;
-
-struct MaximumMinimumOptions;
-
-struct TileOptions;
-
-struct ArgMaxOptions;
-
-struct ArgMinOptions;
-
-struct GreaterOptions;
-
-struct GreaterEqualOptions;
-
-struct LessOptions;
-
-struct LessEqualOptions;
-
-struct NegOptions;
-
-struct SelectOptions;
-
-struct SliceOptions;
-
-struct TransposeConvOptions;
-
-struct ExpandDimsOptions;
-
-struct SparseToDenseOptions;
-
-struct EqualOptions;
-
-struct NotEqualOptions;
-
-struct ShapeOptions;
-
-struct PowOptions;
-
-struct FakeQuantOptions;
-
-struct PackOptions;
-
-struct LogicalOrOptions;
-
-struct OneHotOptions;
-
-struct AbsOptions;
-
-struct LogicalAndOptions;
-
-struct LogicalNotOptions;
-
-struct UnpackOptions;
-
-struct FloorDivOptions;
-
-struct SquareOptions;
-
-struct ZerosLikeOptions;
-
-struct FillOptions;
-
-struct FloorModOptions;
-
-struct RangeOptions;
-
-struct LeakyReluOptions;
-
-struct SquaredDifferenceOptions;
-
-struct MirrorPadOptions;
-
-struct OperatorCode;
-
-struct Operator;
-
-struct SubGraph;
-
-struct Buffer;
-
-struct Model;
-
-enum TensorType
-{
- TensorType_FLOAT32 = 0,
- TensorType_FLOAT16 = 1,
- TensorType_INT32 = 2,
- TensorType_UINT8 = 3,
- TensorType_INT64 = 4,
- TensorType_STRING = 5,
- TensorType_BOOL = 6,
- TensorType_INT16 = 7,
- TensorType_COMPLEX64 = 8,
- TensorType_INT8 = 9,
- TensorType_MIN = TensorType_FLOAT32,
- TensorType_MAX = TensorType_INT8
-};
-
-inline const TensorType (&EnumValuesTensorType())[10]
-{
- static const TensorType values[] = {TensorType_FLOAT32, TensorType_FLOAT16, TensorType_INT32,
- TensorType_UINT8, TensorType_INT64, TensorType_STRING,
- TensorType_BOOL, TensorType_INT16, TensorType_COMPLEX64,
- TensorType_INT8};
- return values;
-}
-
-inline const char *const *EnumNamesTensorType()
-{
- static const char *const names[] = {"FLOAT32", "FLOAT16", "INT32", "UINT8", "INT64", "STRING",
- "BOOL", "INT16", "COMPLEX64", "INT8", nullptr};
- return names;
-}
-
-inline const char *EnumNameTensorType(TensorType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesTensorType()[index];
-}
-
-enum QuantizationDetails
-{
- QuantizationDetails_NONE = 0,
- QuantizationDetails_CustomQuantization = 1,
- QuantizationDetails_MIN = QuantizationDetails_NONE,
- QuantizationDetails_MAX = QuantizationDetails_CustomQuantization
-};
-
-inline const QuantizationDetails (&EnumValuesQuantizationDetails())[2]
-{
- static const QuantizationDetails values[] = {QuantizationDetails_NONE,
- QuantizationDetails_CustomQuantization};
- return values;
-}
-
-inline const char *const *EnumNamesQuantizationDetails()
-{
- static const char *const names[] = {"NONE", "CustomQuantization", nullptr};
- return names;
-}
-
-inline const char *EnumNameQuantizationDetails(QuantizationDetails e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesQuantizationDetails()[index];
-}
-
-template <typename T> struct QuantizationDetailsTraits
-{
- static const QuantizationDetails enum_value = QuantizationDetails_NONE;
-};
-
-template <> struct QuantizationDetailsTraits<CustomQuantization>
-{
- static const QuantizationDetails enum_value = QuantizationDetails_CustomQuantization;
-};
-
-bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj,
- QuantizationDetails type);
-bool VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier,
- const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
- const flatbuffers::Vector<uint8_t> *types);
-
-enum BuiltinOperator
-{
- BuiltinOperator_ADD = 0,
- BuiltinOperator_AVERAGE_POOL_2D = 1,
- BuiltinOperator_CONCATENATION = 2,
- BuiltinOperator_CONV_2D = 3,
- BuiltinOperator_DEPTHWISE_CONV_2D = 4,
- BuiltinOperator_DEQUANTIZE = 6,
- BuiltinOperator_EMBEDDING_LOOKUP = 7,
- BuiltinOperator_FLOOR = 8,
- BuiltinOperator_FULLY_CONNECTED = 9,
- BuiltinOperator_HASHTABLE_LOOKUP = 10,
- BuiltinOperator_L2_NORMALIZATION = 11,
- BuiltinOperator_L2_POOL_2D = 12,
- BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION = 13,
- BuiltinOperator_LOGISTIC = 14,
- BuiltinOperator_LSH_PROJECTION = 15,
- BuiltinOperator_LSTM = 16,
- BuiltinOperator_MAX_POOL_2D = 17,
- BuiltinOperator_MUL = 18,
- BuiltinOperator_RELU = 19,
- BuiltinOperator_RELU_N1_TO_1 = 20,
- BuiltinOperator_RELU6 = 21,
- BuiltinOperator_RESHAPE = 22,
- BuiltinOperator_RESIZE_BILINEAR = 23,
- BuiltinOperator_RNN = 24,
- BuiltinOperator_SOFTMAX = 25,
- BuiltinOperator_SPACE_TO_DEPTH = 26,
- BuiltinOperator_SVDF = 27,
- BuiltinOperator_TANH = 28,
- BuiltinOperator_CONCAT_EMBEDDINGS = 29,
- BuiltinOperator_SKIP_GRAM = 30,
- BuiltinOperator_CALL = 31,
- BuiltinOperator_CUSTOM = 32,
- BuiltinOperator_EMBEDDING_LOOKUP_SPARSE = 33,
- BuiltinOperator_PAD = 34,
- BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN = 35,
- BuiltinOperator_GATHER = 36,
- BuiltinOperator_BATCH_TO_SPACE_ND = 37,
- BuiltinOperator_SPACE_TO_BATCH_ND = 38,
- BuiltinOperator_TRANSPOSE = 39,
- BuiltinOperator_MEAN = 40,
- BuiltinOperator_SUB = 41,
- BuiltinOperator_DIV = 42,
- BuiltinOperator_SQUEEZE = 43,
- BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
- BuiltinOperator_STRIDED_SLICE = 45,
- BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN = 46,
- BuiltinOperator_EXP = 47,
- BuiltinOperator_TOPK_V2 = 48,
- BuiltinOperator_SPLIT = 49,
- BuiltinOperator_LOG_SOFTMAX = 50,
- BuiltinOperator_DELEGATE = 51,
- BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM = 52,
- BuiltinOperator_CAST = 53,
- BuiltinOperator_PRELU = 54,
- BuiltinOperator_MAXIMUM = 55,
- BuiltinOperator_ARG_MAX = 56,
- BuiltinOperator_MINIMUM = 57,
- BuiltinOperator_LESS = 58,
- BuiltinOperator_NEG = 59,
- BuiltinOperator_PADV2 = 60,
- BuiltinOperator_GREATER = 61,
- BuiltinOperator_GREATER_EQUAL = 62,
- BuiltinOperator_LESS_EQUAL = 63,
- BuiltinOperator_SELECT = 64,
- BuiltinOperator_SLICE = 65,
- BuiltinOperator_SIN = 66,
- BuiltinOperator_TRANSPOSE_CONV = 67,
- BuiltinOperator_SPARSE_TO_DENSE = 68,
- BuiltinOperator_TILE = 69,
- BuiltinOperator_EXPAND_DIMS = 70,
- BuiltinOperator_EQUAL = 71,
- BuiltinOperator_NOT_EQUAL = 72,
- BuiltinOperator_LOG = 73,
- BuiltinOperator_SUM = 74,
- BuiltinOperator_SQRT = 75,
- BuiltinOperator_RSQRT = 76,
- BuiltinOperator_SHAPE = 77,
- BuiltinOperator_POW = 78,
- BuiltinOperator_ARG_MIN = 79,
- BuiltinOperator_FAKE_QUANT = 80,
- BuiltinOperator_REDUCE_PROD = 81,
- BuiltinOperator_REDUCE_MAX = 82,
- BuiltinOperator_PACK = 83,
- BuiltinOperator_LOGICAL_OR = 84,
- BuiltinOperator_ONE_HOT = 85,
- BuiltinOperator_LOGICAL_AND = 86,
- BuiltinOperator_LOGICAL_NOT = 87,
- BuiltinOperator_UNPACK = 88,
- BuiltinOperator_REDUCE_MIN = 89,
- BuiltinOperator_FLOOR_DIV = 90,
- BuiltinOperator_REDUCE_ANY = 91,
- BuiltinOperator_SQUARE = 92,
- BuiltinOperator_ZEROS_LIKE = 93,
- BuiltinOperator_FILL = 94,
- BuiltinOperator_FLOOR_MOD = 95,
- BuiltinOperator_RANGE = 96,
- BuiltinOperator_RESIZE_NEAREST_NEIGHBOR = 97,
- BuiltinOperator_LEAKY_RELU = 98,
- BuiltinOperator_SQUARED_DIFFERENCE = 99,
- BuiltinOperator_MIRROR_PAD = 100,
- BuiltinOperator_ABS = 101,
- BuiltinOperator_SPLIT_V = 102,
- BuiltinOperator_MIN = BuiltinOperator_ADD,
- BuiltinOperator_MAX = BuiltinOperator_SPLIT_V
-};
-
-inline const BuiltinOperator (&EnumValuesBuiltinOperator())[102]
-{
- static const BuiltinOperator values[] = {BuiltinOperator_ADD,
- BuiltinOperator_AVERAGE_POOL_2D,
- BuiltinOperator_CONCATENATION,
- BuiltinOperator_CONV_2D,
- BuiltinOperator_DEPTHWISE_CONV_2D,
- BuiltinOperator_DEQUANTIZE,
- BuiltinOperator_EMBEDDING_LOOKUP,
- BuiltinOperator_FLOOR,
- BuiltinOperator_FULLY_CONNECTED,
- BuiltinOperator_HASHTABLE_LOOKUP,
- BuiltinOperator_L2_NORMALIZATION,
- BuiltinOperator_L2_POOL_2D,
- BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
- BuiltinOperator_LOGISTIC,
- BuiltinOperator_LSH_PROJECTION,
- BuiltinOperator_LSTM,
- BuiltinOperator_MAX_POOL_2D,
- BuiltinOperator_MUL,
- BuiltinOperator_RELU,
- BuiltinOperator_RELU_N1_TO_1,
- BuiltinOperator_RELU6,
- BuiltinOperator_RESHAPE,
- BuiltinOperator_RESIZE_BILINEAR,
- BuiltinOperator_RNN,
- BuiltinOperator_SOFTMAX,
- BuiltinOperator_SPACE_TO_DEPTH,
- BuiltinOperator_SVDF,
- BuiltinOperator_TANH,
- BuiltinOperator_CONCAT_EMBEDDINGS,
- BuiltinOperator_SKIP_GRAM,
- BuiltinOperator_CALL,
- BuiltinOperator_CUSTOM,
- BuiltinOperator_EMBEDDING_LOOKUP_SPARSE,
- BuiltinOperator_PAD,
- BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN,
- BuiltinOperator_GATHER,
- BuiltinOperator_BATCH_TO_SPACE_ND,
- BuiltinOperator_SPACE_TO_BATCH_ND,
- BuiltinOperator_TRANSPOSE,
- BuiltinOperator_MEAN,
- BuiltinOperator_SUB,
- BuiltinOperator_DIV,
- BuiltinOperator_SQUEEZE,
- BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM,
- BuiltinOperator_STRIDED_SLICE,
- BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN,
- BuiltinOperator_EXP,
- BuiltinOperator_TOPK_V2,
- BuiltinOperator_SPLIT,
- BuiltinOperator_LOG_SOFTMAX,
- BuiltinOperator_DELEGATE,
- BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM,
- BuiltinOperator_CAST,
- BuiltinOperator_PRELU,
- BuiltinOperator_MAXIMUM,
- BuiltinOperator_ARG_MAX,
- BuiltinOperator_MINIMUM,
- BuiltinOperator_LESS,
- BuiltinOperator_NEG,
- BuiltinOperator_PADV2,
- BuiltinOperator_GREATER,
- BuiltinOperator_GREATER_EQUAL,
- BuiltinOperator_LESS_EQUAL,
- BuiltinOperator_SELECT,
- BuiltinOperator_SLICE,
- BuiltinOperator_SIN,
- BuiltinOperator_TRANSPOSE_CONV,
- BuiltinOperator_SPARSE_TO_DENSE,
- BuiltinOperator_TILE,
- BuiltinOperator_EXPAND_DIMS,
- BuiltinOperator_EQUAL,
- BuiltinOperator_NOT_EQUAL,
- BuiltinOperator_LOG,
- BuiltinOperator_SUM,
- BuiltinOperator_SQRT,
- BuiltinOperator_RSQRT,
- BuiltinOperator_SHAPE,
- BuiltinOperator_POW,
- BuiltinOperator_ARG_MIN,
- BuiltinOperator_FAKE_QUANT,
- BuiltinOperator_REDUCE_PROD,
- BuiltinOperator_REDUCE_MAX,
- BuiltinOperator_PACK,
- BuiltinOperator_LOGICAL_OR,
- BuiltinOperator_ONE_HOT,
- BuiltinOperator_LOGICAL_AND,
- BuiltinOperator_LOGICAL_NOT,
- BuiltinOperator_UNPACK,
- BuiltinOperator_REDUCE_MIN,
- BuiltinOperator_FLOOR_DIV,
- BuiltinOperator_REDUCE_ANY,
- BuiltinOperator_SQUARE,
- BuiltinOperator_ZEROS_LIKE,
- BuiltinOperator_FILL,
- BuiltinOperator_FLOOR_MOD,
- BuiltinOperator_RANGE,
- BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
- BuiltinOperator_LEAKY_RELU,
- BuiltinOperator_SQUARED_DIFFERENCE,
- BuiltinOperator_MIRROR_PAD,
- BuiltinOperator_ABS,
- BuiltinOperator_SPLIT_V};
- return values;
-}
-
-inline const char *const *EnumNamesBuiltinOperator()
-{
- static const char *const names[] = {"ADD",
- "AVERAGE_POOL_2D",
- "CONCATENATION",
- "CONV_2D",
- "DEPTHWISE_CONV_2D",
- "",
- "DEQUANTIZE",
- "EMBEDDING_LOOKUP",
- "FLOOR",
- "FULLY_CONNECTED",
- "HASHTABLE_LOOKUP",
- "L2_NORMALIZATION",
- "L2_POOL_2D",
- "LOCAL_RESPONSE_NORMALIZATION",
- "LOGISTIC",
- "LSH_PROJECTION",
- "LSTM",
- "MAX_POOL_2D",
- "MUL",
- "RELU",
- "RELU_N1_TO_1",
- "RELU6",
- "RESHAPE",
- "RESIZE_BILINEAR",
- "RNN",
- "SOFTMAX",
- "SPACE_TO_DEPTH",
- "SVDF",
- "TANH",
- "CONCAT_EMBEDDINGS",
- "SKIP_GRAM",
- "CALL",
- "CUSTOM",
- "EMBEDDING_LOOKUP_SPARSE",
- "PAD",
- "UNIDIRECTIONAL_SEQUENCE_RNN",
- "GATHER",
- "BATCH_TO_SPACE_ND",
- "SPACE_TO_BATCH_ND",
- "TRANSPOSE",
- "MEAN",
- "SUB",
- "DIV",
- "SQUEEZE",
- "UNIDIRECTIONAL_SEQUENCE_LSTM",
- "STRIDED_SLICE",
- "BIDIRECTIONAL_SEQUENCE_RNN",
- "EXP",
- "TOPK_V2",
- "SPLIT",
- "LOG_SOFTMAX",
- "DELEGATE",
- "BIDIRECTIONAL_SEQUENCE_LSTM",
- "CAST",
- "PRELU",
- "MAXIMUM",
- "ARG_MAX",
- "MINIMUM",
- "LESS",
- "NEG",
- "PADV2",
- "GREATER",
- "GREATER_EQUAL",
- "LESS_EQUAL",
- "SELECT",
- "SLICE",
- "SIN",
- "TRANSPOSE_CONV",
- "SPARSE_TO_DENSE",
- "TILE",
- "EXPAND_DIMS",
- "EQUAL",
- "NOT_EQUAL",
- "LOG",
- "SUM",
- "SQRT",
- "RSQRT",
- "SHAPE",
- "POW",
- "ARG_MIN",
- "FAKE_QUANT",
- "REDUCE_PROD",
- "REDUCE_MAX",
- "PACK",
- "LOGICAL_OR",
- "ONE_HOT",
- "LOGICAL_AND",
- "LOGICAL_NOT",
- "UNPACK",
- "REDUCE_MIN",
- "FLOOR_DIV",
- "REDUCE_ANY",
- "SQUARE",
- "ZEROS_LIKE",
- "FILL",
- "FLOOR_MOD",
- "RANGE",
- "RESIZE_NEAREST_NEIGHBOR",
- "LEAKY_RELU",
- "SQUARED_DIFFERENCE",
- "MIRROR_PAD",
- "ABS",
- "SPLIT_V",
- nullptr};
- return names;
-}
-
-inline const char *EnumNameBuiltinOperator(BuiltinOperator e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesBuiltinOperator()[index];
-}
-
-enum BuiltinOptions
-{
- BuiltinOptions_NONE = 0,
- BuiltinOptions_Conv2DOptions = 1,
- BuiltinOptions_DepthwiseConv2DOptions = 2,
- BuiltinOptions_ConcatEmbeddingsOptions = 3,
- BuiltinOptions_LSHProjectionOptions = 4,
- BuiltinOptions_Pool2DOptions = 5,
- BuiltinOptions_SVDFOptions = 6,
- BuiltinOptions_RNNOptions = 7,
- BuiltinOptions_FullyConnectedOptions = 8,
- BuiltinOptions_SoftmaxOptions = 9,
- BuiltinOptions_ConcatenationOptions = 10,
- BuiltinOptions_AddOptions = 11,
- BuiltinOptions_L2NormOptions = 12,
- BuiltinOptions_LocalResponseNormalizationOptions = 13,
- BuiltinOptions_LSTMOptions = 14,
- BuiltinOptions_ResizeBilinearOptions = 15,
- BuiltinOptions_CallOptions = 16,
- BuiltinOptions_ReshapeOptions = 17,
- BuiltinOptions_SkipGramOptions = 18,
- BuiltinOptions_SpaceToDepthOptions = 19,
- BuiltinOptions_EmbeddingLookupSparseOptions = 20,
- BuiltinOptions_MulOptions = 21,
- BuiltinOptions_PadOptions = 22,
- BuiltinOptions_GatherOptions = 23,
- BuiltinOptions_BatchToSpaceNDOptions = 24,
- BuiltinOptions_SpaceToBatchNDOptions = 25,
- BuiltinOptions_TransposeOptions = 26,
- BuiltinOptions_ReducerOptions = 27,
- BuiltinOptions_SubOptions = 28,
- BuiltinOptions_DivOptions = 29,
- BuiltinOptions_SqueezeOptions = 30,
- BuiltinOptions_SequenceRNNOptions = 31,
- BuiltinOptions_StridedSliceOptions = 32,
- BuiltinOptions_ExpOptions = 33,
- BuiltinOptions_TopKV2Options = 34,
- BuiltinOptions_SplitOptions = 35,
- BuiltinOptions_LogSoftmaxOptions = 36,
- BuiltinOptions_CastOptions = 37,
- BuiltinOptions_DequantizeOptions = 38,
- BuiltinOptions_MaximumMinimumOptions = 39,
- BuiltinOptions_ArgMaxOptions = 40,
- BuiltinOptions_LessOptions = 41,
- BuiltinOptions_NegOptions = 42,
- BuiltinOptions_PadV2Options = 43,
- BuiltinOptions_GreaterOptions = 44,
- BuiltinOptions_GreaterEqualOptions = 45,
- BuiltinOptions_LessEqualOptions = 46,
- BuiltinOptions_SelectOptions = 47,
- BuiltinOptions_SliceOptions = 48,
- BuiltinOptions_TransposeConvOptions = 49,
- BuiltinOptions_SparseToDenseOptions = 50,
- BuiltinOptions_TileOptions = 51,
- BuiltinOptions_ExpandDimsOptions = 52,
- BuiltinOptions_EqualOptions = 53,
- BuiltinOptions_NotEqualOptions = 54,
- BuiltinOptions_ShapeOptions = 55,
- BuiltinOptions_PowOptions = 56,
- BuiltinOptions_ArgMinOptions = 57,
- BuiltinOptions_FakeQuantOptions = 58,
- BuiltinOptions_PackOptions = 59,
- BuiltinOptions_LogicalOrOptions = 60,
- BuiltinOptions_OneHotOptions = 61,
- BuiltinOptions_LogicalAndOptions = 62,
- BuiltinOptions_LogicalNotOptions = 63,
- BuiltinOptions_UnpackOptions = 64,
- BuiltinOptions_FloorDivOptions = 65,
- BuiltinOptions_SquareOptions = 66,
- BuiltinOptions_ZerosLikeOptions = 67,
- BuiltinOptions_FillOptions = 68,
- BuiltinOptions_BidirectionalSequenceLSTMOptions = 69,
- BuiltinOptions_BidirectionalSequenceRNNOptions = 70,
- BuiltinOptions_UnidirectionalSequenceLSTMOptions = 71,
- BuiltinOptions_FloorModOptions = 72,
- BuiltinOptions_RangeOptions = 73,
- BuiltinOptions_ResizeNearestNeighborOptions = 74,
- BuiltinOptions_LeakyReluOptions = 75,
- BuiltinOptions_SquaredDifferenceOptions = 76,
- BuiltinOptions_MirrorPadOptions = 77,
- BuiltinOptions_AbsOptions = 78,
- BuiltinOptions_SplitVOptions = 79,
- BuiltinOptions_MIN = BuiltinOptions_NONE,
- BuiltinOptions_MAX = BuiltinOptions_SplitVOptions
-};
-
-inline const BuiltinOptions (&EnumValuesBuiltinOptions())[80]
-{
- static const BuiltinOptions values[] = {BuiltinOptions_NONE,
- BuiltinOptions_Conv2DOptions,
- BuiltinOptions_DepthwiseConv2DOptions,
- BuiltinOptions_ConcatEmbeddingsOptions,
- BuiltinOptions_LSHProjectionOptions,
- BuiltinOptions_Pool2DOptions,
- BuiltinOptions_SVDFOptions,
- BuiltinOptions_RNNOptions,
- BuiltinOptions_FullyConnectedOptions,
- BuiltinOptions_SoftmaxOptions,
- BuiltinOptions_ConcatenationOptions,
- BuiltinOptions_AddOptions,
- BuiltinOptions_L2NormOptions,
- BuiltinOptions_LocalResponseNormalizationOptions,
- BuiltinOptions_LSTMOptions,
- BuiltinOptions_ResizeBilinearOptions,
- BuiltinOptions_CallOptions,
- BuiltinOptions_ReshapeOptions,
- BuiltinOptions_SkipGramOptions,
- BuiltinOptions_SpaceToDepthOptions,
- BuiltinOptions_EmbeddingLookupSparseOptions,
- BuiltinOptions_MulOptions,
- BuiltinOptions_PadOptions,
- BuiltinOptions_GatherOptions,
- BuiltinOptions_BatchToSpaceNDOptions,
- BuiltinOptions_SpaceToBatchNDOptions,
- BuiltinOptions_TransposeOptions,
- BuiltinOptions_ReducerOptions,
- BuiltinOptions_SubOptions,
- BuiltinOptions_DivOptions,
- BuiltinOptions_SqueezeOptions,
- BuiltinOptions_SequenceRNNOptions,
- BuiltinOptions_StridedSliceOptions,
- BuiltinOptions_ExpOptions,
- BuiltinOptions_TopKV2Options,
- BuiltinOptions_SplitOptions,
- BuiltinOptions_LogSoftmaxOptions,
- BuiltinOptions_CastOptions,
- BuiltinOptions_DequantizeOptions,
- BuiltinOptions_MaximumMinimumOptions,
- BuiltinOptions_ArgMaxOptions,
- BuiltinOptions_LessOptions,
- BuiltinOptions_NegOptions,
- BuiltinOptions_PadV2Options,
- BuiltinOptions_GreaterOptions,
- BuiltinOptions_GreaterEqualOptions,
- BuiltinOptions_LessEqualOptions,
- BuiltinOptions_SelectOptions,
- BuiltinOptions_SliceOptions,
- BuiltinOptions_TransposeConvOptions,
- BuiltinOptions_SparseToDenseOptions,
- BuiltinOptions_TileOptions,
- BuiltinOptions_ExpandDimsOptions,
- BuiltinOptions_EqualOptions,
- BuiltinOptions_NotEqualOptions,
- BuiltinOptions_ShapeOptions,
- BuiltinOptions_PowOptions,
- BuiltinOptions_ArgMinOptions,
- BuiltinOptions_FakeQuantOptions,
- BuiltinOptions_PackOptions,
- BuiltinOptions_LogicalOrOptions,
- BuiltinOptions_OneHotOptions,
- BuiltinOptions_LogicalAndOptions,
- BuiltinOptions_LogicalNotOptions,
- BuiltinOptions_UnpackOptions,
- BuiltinOptions_FloorDivOptions,
- BuiltinOptions_SquareOptions,
- BuiltinOptions_ZerosLikeOptions,
- BuiltinOptions_FillOptions,
- BuiltinOptions_BidirectionalSequenceLSTMOptions,
- BuiltinOptions_BidirectionalSequenceRNNOptions,
- BuiltinOptions_UnidirectionalSequenceLSTMOptions,
- BuiltinOptions_FloorModOptions,
- BuiltinOptions_RangeOptions,
- BuiltinOptions_ResizeNearestNeighborOptions,
- BuiltinOptions_LeakyReluOptions,
- BuiltinOptions_SquaredDifferenceOptions,
- BuiltinOptions_MirrorPadOptions,
- BuiltinOptions_AbsOptions,
- BuiltinOptions_SplitVOptions};
- return values;
-}
-
-inline const char *const *EnumNamesBuiltinOptions()
-{
- static const char *const names[] = {"NONE",
- "Conv2DOptions",
- "DepthwiseConv2DOptions",
- "ConcatEmbeddingsOptions",
- "LSHProjectionOptions",
- "Pool2DOptions",
- "SVDFOptions",
- "RNNOptions",
- "FullyConnectedOptions",
- "SoftmaxOptions",
- "ConcatenationOptions",
- "AddOptions",
- "L2NormOptions",
- "LocalResponseNormalizationOptions",
- "LSTMOptions",
- "ResizeBilinearOptions",
- "CallOptions",
- "ReshapeOptions",
- "SkipGramOptions",
- "SpaceToDepthOptions",
- "EmbeddingLookupSparseOptions",
- "MulOptions",
- "PadOptions",
- "GatherOptions",
- "BatchToSpaceNDOptions",
- "SpaceToBatchNDOptions",
- "TransposeOptions",
- "ReducerOptions",
- "SubOptions",
- "DivOptions",
- "SqueezeOptions",
- "SequenceRNNOptions",
- "StridedSliceOptions",
- "ExpOptions",
- "TopKV2Options",
- "SplitOptions",
- "LogSoftmaxOptions",
- "CastOptions",
- "DequantizeOptions",
- "MaximumMinimumOptions",
- "ArgMaxOptions",
- "LessOptions",
- "NegOptions",
- "PadV2Options",
- "GreaterOptions",
- "GreaterEqualOptions",
- "LessEqualOptions",
- "SelectOptions",
- "SliceOptions",
- "TransposeConvOptions",
- "SparseToDenseOptions",
- "TileOptions",
- "ExpandDimsOptions",
- "EqualOptions",
- "NotEqualOptions",
- "ShapeOptions",
- "PowOptions",
- "ArgMinOptions",
- "FakeQuantOptions",
- "PackOptions",
- "LogicalOrOptions",
- "OneHotOptions",
- "LogicalAndOptions",
- "LogicalNotOptions",
- "UnpackOptions",
- "FloorDivOptions",
- "SquareOptions",
- "ZerosLikeOptions",
- "FillOptions",
- "BidirectionalSequenceLSTMOptions",
- "BidirectionalSequenceRNNOptions",
- "UnidirectionalSequenceLSTMOptions",
- "FloorModOptions",
- "RangeOptions",
- "ResizeNearestNeighborOptions",
- "LeakyReluOptions",
- "SquaredDifferenceOptions",
- "MirrorPadOptions",
- "AbsOptions",
- "SplitVOptions",
- nullptr};
- return names;
-}
-
-inline const char *EnumNameBuiltinOptions(BuiltinOptions e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesBuiltinOptions()[index];
-}
-
-template <typename T> struct BuiltinOptionsTraits
-{
- static const BuiltinOptions enum_value = BuiltinOptions_NONE;
-};
-
-template <> struct BuiltinOptionsTraits<Conv2DOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_Conv2DOptions;
-};
-
-template <> struct BuiltinOptionsTraits<DepthwiseConv2DOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_DepthwiseConv2DOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ConcatEmbeddingsOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ConcatEmbeddingsOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LSHProjectionOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LSHProjectionOptions;
-};
-
-template <> struct BuiltinOptionsTraits<Pool2DOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_Pool2DOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SVDFOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SVDFOptions;
-};
-
-template <> struct BuiltinOptionsTraits<RNNOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_RNNOptions;
-};
-
-template <> struct BuiltinOptionsTraits<FullyConnectedOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_FullyConnectedOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SoftmaxOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SoftmaxOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ConcatenationOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ConcatenationOptions;
-};
-
-template <> struct BuiltinOptionsTraits<AddOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_AddOptions;
-};
-
-template <> struct BuiltinOptionsTraits<L2NormOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_L2NormOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LocalResponseNormalizationOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LocalResponseNormalizationOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LSTMOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LSTMOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ResizeBilinearOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ResizeBilinearOptions;
-};
-
-template <> struct BuiltinOptionsTraits<CallOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_CallOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ReshapeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ReshapeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SkipGramOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SkipGramOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SpaceToDepthOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SpaceToDepthOptions;
-};
-
-template <> struct BuiltinOptionsTraits<EmbeddingLookupSparseOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_EmbeddingLookupSparseOptions;
-};
-
-template <> struct BuiltinOptionsTraits<MulOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_MulOptions;
-};
-
-template <> struct BuiltinOptionsTraits<PadOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_PadOptions;
-};
-
-template <> struct BuiltinOptionsTraits<GatherOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_GatherOptions;
-};
-
-template <> struct BuiltinOptionsTraits<BatchToSpaceNDOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_BatchToSpaceNDOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SpaceToBatchNDOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SpaceToBatchNDOptions;
-};
-
-template <> struct BuiltinOptionsTraits<TransposeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_TransposeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ReducerOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ReducerOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SubOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SubOptions;
-};
-
-template <> struct BuiltinOptionsTraits<DivOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_DivOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SqueezeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SqueezeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SequenceRNNOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SequenceRNNOptions;
-};
-
-template <> struct BuiltinOptionsTraits<StridedSliceOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_StridedSliceOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ExpOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ExpOptions;
-};
-
-template <> struct BuiltinOptionsTraits<TopKV2Options>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_TopKV2Options;
-};
-
-template <> struct BuiltinOptionsTraits<SplitOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SplitOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LogSoftmaxOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LogSoftmaxOptions;
-};
-
-template <> struct BuiltinOptionsTraits<CastOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_CastOptions;
-};
-
-template <> struct BuiltinOptionsTraits<DequantizeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_DequantizeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<MaximumMinimumOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_MaximumMinimumOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ArgMaxOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ArgMaxOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LessOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LessOptions;
-};
-
-template <> struct BuiltinOptionsTraits<NegOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_NegOptions;
-};
-
-template <> struct BuiltinOptionsTraits<PadV2Options>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_PadV2Options;
-};
-
-template <> struct BuiltinOptionsTraits<GreaterOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_GreaterOptions;
-};
-
-template <> struct BuiltinOptionsTraits<GreaterEqualOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_GreaterEqualOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LessEqualOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LessEqualOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SelectOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SelectOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SliceOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SliceOptions;
-};
-
-template <> struct BuiltinOptionsTraits<TransposeConvOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_TransposeConvOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SparseToDenseOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SparseToDenseOptions;
-};
-
-template <> struct BuiltinOptionsTraits<TileOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_TileOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ExpandDimsOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ExpandDimsOptions;
-};
-
-template <> struct BuiltinOptionsTraits<EqualOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_EqualOptions;
-};
-
-template <> struct BuiltinOptionsTraits<NotEqualOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_NotEqualOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ShapeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ShapeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<PowOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_PowOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ArgMinOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ArgMinOptions;
-};
-
-template <> struct BuiltinOptionsTraits<FakeQuantOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_FakeQuantOptions;
-};
-
-template <> struct BuiltinOptionsTraits<PackOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_PackOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LogicalOrOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LogicalOrOptions;
-};
-
-template <> struct BuiltinOptionsTraits<OneHotOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_OneHotOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LogicalAndOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LogicalAndOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LogicalNotOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LogicalNotOptions;
-};
-
-template <> struct BuiltinOptionsTraits<UnpackOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_UnpackOptions;
-};
-
-template <> struct BuiltinOptionsTraits<FloorDivOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_FloorDivOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SquareOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SquareOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ZerosLikeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ZerosLikeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<FillOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_FillOptions;
-};
-
-template <> struct BuiltinOptionsTraits<BidirectionalSequenceLSTMOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceLSTMOptions;
-};
-
-template <> struct BuiltinOptionsTraits<BidirectionalSequenceRNNOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceRNNOptions;
-};
-
-template <> struct BuiltinOptionsTraits<UnidirectionalSequenceLSTMOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_UnidirectionalSequenceLSTMOptions;
-};
-
-template <> struct BuiltinOptionsTraits<FloorModOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_FloorModOptions;
-};
-
-template <> struct BuiltinOptionsTraits<RangeOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_RangeOptions;
-};
-
-template <> struct BuiltinOptionsTraits<ResizeNearestNeighborOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_ResizeNearestNeighborOptions;
-};
-
-template <> struct BuiltinOptionsTraits<LeakyReluOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_LeakyReluOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SquaredDifferenceOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SquaredDifferenceOptions;
-};
-
-template <> struct BuiltinOptionsTraits<MirrorPadOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_MirrorPadOptions;
-};
-
-template <> struct BuiltinOptionsTraits<AbsOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_AbsOptions;
-};
-
-template <> struct BuiltinOptionsTraits<SplitVOptions>
-{
- static const BuiltinOptions enum_value = BuiltinOptions_SplitVOptions;
-};
-
-bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type);
-bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier,
- const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
- const flatbuffers::Vector<uint8_t> *types);
-
-enum Padding
-{
- Padding_SAME = 0,
- Padding_VALID = 1,
- Padding_MIN = Padding_SAME,
- Padding_MAX = Padding_VALID
-};
-
-inline const Padding (&EnumValuesPadding())[2]
-{
- static const Padding values[] = {Padding_SAME, Padding_VALID};
- return values;
-}
-
-inline const char *const *EnumNamesPadding()
-{
- static const char *const names[] = {"SAME", "VALID", nullptr};
- return names;
-}
-
-inline const char *EnumNamePadding(Padding e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesPadding()[index];
-}
-
-enum ActivationFunctionType
-{
- ActivationFunctionType_NONE = 0,
- ActivationFunctionType_RELU = 1,
- ActivationFunctionType_RELU_N1_TO_1 = 2,
- ActivationFunctionType_RELU6 = 3,
- ActivationFunctionType_TANH = 4,
- ActivationFunctionType_SIGN_BIT = 5,
- ActivationFunctionType_MIN = ActivationFunctionType_NONE,
- ActivationFunctionType_MAX = ActivationFunctionType_SIGN_BIT
-};
-
-inline const ActivationFunctionType (&EnumValuesActivationFunctionType())[6]
-{
- static const ActivationFunctionType values[] = {
- ActivationFunctionType_NONE, ActivationFunctionType_RELU,
- ActivationFunctionType_RELU_N1_TO_1, ActivationFunctionType_RELU6,
- ActivationFunctionType_TANH, ActivationFunctionType_SIGN_BIT};
- return values;
-}
-
-inline const char *const *EnumNamesActivationFunctionType()
-{
- static const char *const names[] = {"NONE", "RELU", "RELU_N1_TO_1", "RELU6",
- "TANH", "SIGN_BIT", nullptr};
- return names;
-}
-
-inline const char *EnumNameActivationFunctionType(ActivationFunctionType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesActivationFunctionType()[index];
-}
-
-enum LSHProjectionType
-{
- LSHProjectionType_UNKNOWN = 0,
- LSHProjectionType_SPARSE = 1,
- LSHProjectionType_DENSE = 2,
- LSHProjectionType_MIN = LSHProjectionType_UNKNOWN,
- LSHProjectionType_MAX = LSHProjectionType_DENSE
-};
-
-inline const LSHProjectionType (&EnumValuesLSHProjectionType())[3]
-{
- static const LSHProjectionType values[] = {LSHProjectionType_UNKNOWN, LSHProjectionType_SPARSE,
- LSHProjectionType_DENSE};
- return values;
-}
-
-inline const char *const *EnumNamesLSHProjectionType()
-{
- static const char *const names[] = {"UNKNOWN", "SPARSE", "DENSE", nullptr};
- return names;
-}
-
-inline const char *EnumNameLSHProjectionType(LSHProjectionType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesLSHProjectionType()[index];
-}
-
-enum FullyConnectedOptionsWeightsFormat
-{
- FullyConnectedOptionsWeightsFormat_DEFAULT = 0,
- FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 = 1,
- FullyConnectedOptionsWeightsFormat_MIN = FullyConnectedOptionsWeightsFormat_DEFAULT,
- FullyConnectedOptionsWeightsFormat_MAX = FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8
-};
-
-inline const FullyConnectedOptionsWeightsFormat (&EnumValuesFullyConnectedOptionsWeightsFormat())[2]
-{
- static const FullyConnectedOptionsWeightsFormat values[] = {
- FullyConnectedOptionsWeightsFormat_DEFAULT,
- FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8};
- return values;
-}
-
-inline const char *const *EnumNamesFullyConnectedOptionsWeightsFormat()
-{
- static const char *const names[] = {"DEFAULT", "SHUFFLED4x16INT8", nullptr};
- return names;
-}
-
-inline const char *EnumNameFullyConnectedOptionsWeightsFormat(FullyConnectedOptionsWeightsFormat e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesFullyConnectedOptionsWeightsFormat()[index];
-}
-
-enum LSTMKernelType
-{
- LSTMKernelType_FULL = 0,
- LSTMKernelType_BASIC = 1,
- LSTMKernelType_MIN = LSTMKernelType_FULL,
- LSTMKernelType_MAX = LSTMKernelType_BASIC
-};
-
-inline const LSTMKernelType (&EnumValuesLSTMKernelType())[2]
-{
- static const LSTMKernelType values[] = {LSTMKernelType_FULL, LSTMKernelType_BASIC};
- return values;
-}
-
-inline const char *const *EnumNamesLSTMKernelType()
-{
- static const char *const names[] = {"FULL", "BASIC", nullptr};
- return names;
-}
-
-inline const char *EnumNameLSTMKernelType(LSTMKernelType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesLSTMKernelType()[index];
-}
-
-enum CombinerType
-{
- CombinerType_SUM = 0,
- CombinerType_MEAN = 1,
- CombinerType_SQRTN = 2,
- CombinerType_MIN = CombinerType_SUM,
- CombinerType_MAX = CombinerType_SQRTN
-};
-
-inline const CombinerType (&EnumValuesCombinerType())[3]
-{
- static const CombinerType values[] = {CombinerType_SUM, CombinerType_MEAN, CombinerType_SQRTN};
- return values;
-}
-
-inline const char *const *EnumNamesCombinerType()
-{
- static const char *const names[] = {"SUM", "MEAN", "SQRTN", nullptr};
- return names;
-}
-
-inline const char *EnumNameCombinerType(CombinerType e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesCombinerType()[index];
-}
-
-enum MirrorPadMode
-{
- MirrorPadMode_REFLECT = 0,
- MirrorPadMode_SYMMETRIC = 1,
- MirrorPadMode_MIN = MirrorPadMode_REFLECT,
- MirrorPadMode_MAX = MirrorPadMode_SYMMETRIC
-};
-
-inline const MirrorPadMode (&EnumValuesMirrorPadMode())[2]
-{
- static const MirrorPadMode values[] = {MirrorPadMode_REFLECT, MirrorPadMode_SYMMETRIC};
- return values;
-}
-
-inline const char *const *EnumNamesMirrorPadMode()
-{
- static const char *const names[] = {"REFLECT", "SYMMETRIC", nullptr};
- return names;
-}
-
-inline const char *EnumNameMirrorPadMode(MirrorPadMode e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesMirrorPadMode()[index];
-}
-
-enum CustomOptionsFormat
-{
- CustomOptionsFormat_FLEXBUFFERS = 0,
- CustomOptionsFormat_MIN = CustomOptionsFormat_FLEXBUFFERS,
- CustomOptionsFormat_MAX = CustomOptionsFormat_FLEXBUFFERS
-};
-
-inline const CustomOptionsFormat (&EnumValuesCustomOptionsFormat())[1]
-{
- static const CustomOptionsFormat values[] = {CustomOptionsFormat_FLEXBUFFERS};
- return values;
-}
-
-inline const char *const *EnumNamesCustomOptionsFormat()
-{
- static const char *const names[] = {"FLEXBUFFERS", nullptr};
- return names;
-}
-
-inline const char *EnumNameCustomOptionsFormat(CustomOptionsFormat e)
-{
- const size_t index = static_cast<int>(e);
- return EnumNamesCustomOptionsFormat()[index];
-}
-
-struct CustomQuantization FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_CUSTOM = 4
- };
- const flatbuffers::Vector<uint8_t> *custom() const
- {
- return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_CUSTOM) &&
- verifier.VerifyVector(custom()) && verifier.EndTable();
- }
-};
-
-struct CustomQuantizationBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_custom(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom)
- {
- fbb_.AddOffset(CustomQuantization::VT_CUSTOM, custom);
- }
- explicit CustomQuantizationBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- CustomQuantizationBuilder &operator=(const CustomQuantizationBuilder &);
- flatbuffers::Offset<CustomQuantization> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<CustomQuantization>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<CustomQuantization>
-CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom = 0)
-{
- CustomQuantizationBuilder builder_(_fbb);
- builder_.add_custom(custom);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<CustomQuantization>
-CreateCustomQuantizationDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<uint8_t> *custom = nullptr)
-{
- return neurun_tflite::CreateCustomQuantization(_fbb,
- custom ? _fbb.CreateVector<uint8_t>(*custom) : 0);
-}
-
-struct QuantizationParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_MIN = 4,
- VT_MAX = 6,
- VT_SCALE = 8,
- VT_ZERO_POINT = 10,
- VT_DETAILS_TYPE = 12,
- VT_DETAILS = 14
- };
- const flatbuffers::Vector<float> *min() const
- {
- return GetPointer<const flatbuffers::Vector<float> *>(VT_MIN);
- }
- const flatbuffers::Vector<float> *max() const
- {
- return GetPointer<const flatbuffers::Vector<float> *>(VT_MAX);
- }
- const flatbuffers::Vector<float> *scale() const
- {
- return GetPointer<const flatbuffers::Vector<float> *>(VT_SCALE);
- }
- const flatbuffers::Vector<int64_t> *zero_point() const
- {
- return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_ZERO_POINT);
- }
- QuantizationDetails details_type() const
- {
- return static_cast<QuantizationDetails>(GetField<uint8_t>(VT_DETAILS_TYPE, 0));
- }
- const void *details() const { return GetPointer<const void *>(VT_DETAILS); }
- template <typename T> const T *details_as() const;
- const CustomQuantization *details_as_CustomQuantization() const
- {
- return details_type() == QuantizationDetails_CustomQuantization
- ? static_cast<const CustomQuantization *>(details())
- : nullptr;
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_MIN) &&
- verifier.VerifyVector(min()) && VerifyOffset(verifier, VT_MAX) &&
- verifier.VerifyVector(max()) && VerifyOffset(verifier, VT_SCALE) &&
- verifier.VerifyVector(scale()) && VerifyOffset(verifier, VT_ZERO_POINT) &&
- verifier.VerifyVector(zero_point()) && VerifyField<uint8_t>(verifier, VT_DETAILS_TYPE) &&
- VerifyOffset(verifier, VT_DETAILS) &&
- VerifyQuantizationDetails(verifier, details(), details_type()) && verifier.EndTable();
- }
-};
-
-template <>
-inline const CustomQuantization *QuantizationParameters::details_as<CustomQuantization>() const
-{
- return details_as_CustomQuantization();
-}
-
-struct QuantizationParametersBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_min(flatbuffers::Offset<flatbuffers::Vector<float>> min)
- {
- fbb_.AddOffset(QuantizationParameters::VT_MIN, min);
- }
- void add_max(flatbuffers::Offset<flatbuffers::Vector<float>> max)
- {
- fbb_.AddOffset(QuantizationParameters::VT_MAX, max);
- }
- void add_scale(flatbuffers::Offset<flatbuffers::Vector<float>> scale)
- {
- fbb_.AddOffset(QuantizationParameters::VT_SCALE, scale);
- }
- void add_zero_point(flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point)
- {
- fbb_.AddOffset(QuantizationParameters::VT_ZERO_POINT, zero_point);
- }
- void add_details_type(QuantizationDetails details_type)
- {
- fbb_.AddElement<uint8_t>(QuantizationParameters::VT_DETAILS_TYPE,
- static_cast<uint8_t>(details_type), 0);
- }
- void add_details(flatbuffers::Offset<void> details)
- {
- fbb_.AddOffset(QuantizationParameters::VT_DETAILS, details);
- }
- explicit QuantizationParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- QuantizationParametersBuilder &operator=(const QuantizationParametersBuilder &);
- flatbuffers::Offset<QuantizationParameters> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<QuantizationParameters>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<QuantizationParameters>
-CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<float>> min = 0,
- flatbuffers::Offset<flatbuffers::Vector<float>> max = 0,
- flatbuffers::Offset<flatbuffers::Vector<float>> scale = 0,
- flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point = 0,
- QuantizationDetails details_type = QuantizationDetails_NONE,
- flatbuffers::Offset<void> details = 0)
-{
- QuantizationParametersBuilder builder_(_fbb);
- builder_.add_details(details);
- builder_.add_zero_point(zero_point);
- builder_.add_scale(scale);
- builder_.add_max(max);
- builder_.add_min(min);
- builder_.add_details_type(details_type);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<QuantizationParameters> CreateQuantizationParametersDirect(
- flatbuffers::FlatBufferBuilder &_fbb, const std::vector<float> *min = nullptr,
- const std::vector<float> *max = nullptr, const std::vector<float> *scale = nullptr,
- const std::vector<int64_t> *zero_point = nullptr,
- QuantizationDetails details_type = QuantizationDetails_NONE,
- flatbuffers::Offset<void> details = 0)
-{
- return neurun_tflite::CreateQuantizationParameters(
- _fbb, min ? _fbb.CreateVector<float>(*min) : 0, max ? _fbb.CreateVector<float>(*max) : 0,
- scale ? _fbb.CreateVector<float>(*scale) : 0,
- zero_point ? _fbb.CreateVector<int64_t>(*zero_point) : 0, details_type, details);
-}
-
-struct Tensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_SHAPE = 4,
- VT_TYPE = 6,
- VT_BUFFER = 8,
- VT_NAME = 10,
- VT_QUANTIZATION = 12,
- VT_IS_VARIABLE = 14
- };
- const flatbuffers::Vector<int32_t> *shape() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SHAPE);
- }
- TensorType type() const { return static_cast<TensorType>(GetField<int8_t>(VT_TYPE, 0)); }
- uint32_t buffer() const { return GetField<uint32_t>(VT_BUFFER, 0); }
- const flatbuffers::String *name() const
- {
- return GetPointer<const flatbuffers::String *>(VT_NAME);
- }
- const QuantizationParameters *quantization() const
- {
- return GetPointer<const QuantizationParameters *>(VT_QUANTIZATION);
- }
- bool is_variable() const { return GetField<uint8_t>(VT_IS_VARIABLE, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_SHAPE) &&
- verifier.VerifyVector(shape()) && VerifyField<int8_t>(verifier, VT_TYPE) &&
- VerifyField<uint32_t>(verifier, VT_BUFFER) && VerifyOffset(verifier, VT_NAME) &&
- verifier.VerifyString(name()) && VerifyOffset(verifier, VT_QUANTIZATION) &&
- verifier.VerifyTable(quantization()) && VerifyField<uint8_t>(verifier, VT_IS_VARIABLE) &&
- verifier.EndTable();
- }
-};
-
-struct TensorBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape)
- {
- fbb_.AddOffset(Tensor::VT_SHAPE, shape);
- }
- void add_type(TensorType type)
- {
- fbb_.AddElement<int8_t>(Tensor::VT_TYPE, static_cast<int8_t>(type), 0);
- }
- void add_buffer(uint32_t buffer) { fbb_.AddElement<uint32_t>(Tensor::VT_BUFFER, buffer, 0); }
- void add_name(flatbuffers::Offset<flatbuffers::String> name)
- {
- fbb_.AddOffset(Tensor::VT_NAME, name);
- }
- void add_quantization(flatbuffers::Offset<QuantizationParameters> quantization)
- {
- fbb_.AddOffset(Tensor::VT_QUANTIZATION, quantization);
- }
- void add_is_variable(bool is_variable)
- {
- fbb_.AddElement<uint8_t>(Tensor::VT_IS_VARIABLE, static_cast<uint8_t>(is_variable), 0);
- }
- explicit TensorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- TensorBuilder &operator=(const TensorBuilder &);
- flatbuffers::Offset<Tensor> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Tensor>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Tensor>
-CreateTensor(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape = 0,
- TensorType type = TensorType_FLOAT32, uint32_t buffer = 0,
- flatbuffers::Offset<flatbuffers::String> name = 0,
- flatbuffers::Offset<QuantizationParameters> quantization = 0, bool is_variable = false)
-{
- TensorBuilder builder_(_fbb);
- builder_.add_quantization(quantization);
- builder_.add_name(name);
- builder_.add_buffer(buffer);
- builder_.add_shape(shape);
- builder_.add_is_variable(is_variable);
- builder_.add_type(type);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Tensor> CreateTensorDirect(
- flatbuffers::FlatBufferBuilder &_fbb, const std::vector<int32_t> *shape = nullptr,
- TensorType type = TensorType_FLOAT32, uint32_t buffer = 0, const char *name = nullptr,
- flatbuffers::Offset<QuantizationParameters> quantization = 0, bool is_variable = false)
-{
- return neurun_tflite::CreateTensor(_fbb, shape ? _fbb.CreateVector<int32_t>(*shape) : 0, type,
- buffer, name ? _fbb.CreateString(name) : 0, quantization,
- is_variable);
-}
-
-struct Conv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_PADDING = 4,
- VT_STRIDE_W = 6,
- VT_STRIDE_H = 8,
- VT_FUSED_ACTIVATION_FUNCTION = 10,
- VT_DILATION_W_FACTOR = 12,
- VT_DILATION_H_FACTOR = 14
- };
- Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); }
- int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); }
- int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- int32_t dilation_w_factor() const { return GetField<int32_t>(VT_DILATION_W_FACTOR, 1); }
- int32_t dilation_h_factor() const { return GetField<int32_t>(VT_DILATION_H_FACTOR, 1); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_H) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<int32_t>(verifier, VT_DILATION_W_FACTOR) &&
- VerifyField<int32_t>(verifier, VT_DILATION_H_FACTOR) && verifier.EndTable();
- }
-};
-
-struct Conv2DOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_padding(Padding padding)
- {
- fbb_.AddElement<int8_t>(Conv2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
- }
- void add_stride_w(int32_t stride_w)
- {
- fbb_.AddElement<int32_t>(Conv2DOptions::VT_STRIDE_W, stride_w, 0);
- }
- void add_stride_h(int32_t stride_h)
- {
- fbb_.AddElement<int32_t>(Conv2DOptions::VT_STRIDE_H, stride_h, 0);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(Conv2DOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_dilation_w_factor(int32_t dilation_w_factor)
- {
- fbb_.AddElement<int32_t>(Conv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1);
- }
- void add_dilation_h_factor(int32_t dilation_h_factor)
- {
- fbb_.AddElement<int32_t>(Conv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1);
- }
- explicit Conv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- Conv2DOptionsBuilder &operator=(const Conv2DOptionsBuilder &);
- flatbuffers::Offset<Conv2DOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Conv2DOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Conv2DOptions>
-CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME,
- int32_t stride_w = 0, int32_t stride_h = 0,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- int32_t dilation_w_factor = 1, int32_t dilation_h_factor = 1)
-{
- Conv2DOptionsBuilder builder_(_fbb);
- builder_.add_dilation_h_factor(dilation_h_factor);
- builder_.add_dilation_w_factor(dilation_w_factor);
- builder_.add_stride_h(stride_h);
- builder_.add_stride_w(stride_w);
- builder_.add_fused_activation_function(fused_activation_function);
- builder_.add_padding(padding);
- return builder_.Finish();
-}
-
-struct Pool2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_PADDING = 4,
- VT_STRIDE_W = 6,
- VT_STRIDE_H = 8,
- VT_FILTER_WIDTH = 10,
- VT_FILTER_HEIGHT = 12,
- VT_FUSED_ACTIVATION_FUNCTION = 14
- };
- Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); }
- int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); }
- int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); }
- int32_t filter_width() const { return GetField<int32_t>(VT_FILTER_WIDTH, 0); }
- int32_t filter_height() const { return GetField<int32_t>(VT_FILTER_HEIGHT, 0); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_H) &&
- VerifyField<int32_t>(verifier, VT_FILTER_WIDTH) &&
- VerifyField<int32_t>(verifier, VT_FILTER_HEIGHT) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct Pool2DOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_padding(Padding padding)
- {
- fbb_.AddElement<int8_t>(Pool2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
- }
- void add_stride_w(int32_t stride_w)
- {
- fbb_.AddElement<int32_t>(Pool2DOptions::VT_STRIDE_W, stride_w, 0);
- }
- void add_stride_h(int32_t stride_h)
- {
- fbb_.AddElement<int32_t>(Pool2DOptions::VT_STRIDE_H, stride_h, 0);
- }
- void add_filter_width(int32_t filter_width)
- {
- fbb_.AddElement<int32_t>(Pool2DOptions::VT_FILTER_WIDTH, filter_width, 0);
- }
- void add_filter_height(int32_t filter_height)
- {
- fbb_.AddElement<int32_t>(Pool2DOptions::VT_FILTER_HEIGHT, filter_height, 0);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(Pool2DOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit Pool2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- Pool2DOptionsBuilder &operator=(const Pool2DOptionsBuilder &);
- flatbuffers::Offset<Pool2DOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Pool2DOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Pool2DOptions>
-CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME,
- int32_t stride_w = 0, int32_t stride_h = 0, int32_t filter_width = 0,
- int32_t filter_height = 0,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- Pool2DOptionsBuilder builder_(_fbb);
- builder_.add_filter_height(filter_height);
- builder_.add_filter_width(filter_width);
- builder_.add_stride_h(stride_h);
- builder_.add_stride_w(stride_w);
- builder_.add_fused_activation_function(fused_activation_function);
- builder_.add_padding(padding);
- return builder_.Finish();
-}
-
-struct DepthwiseConv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_PADDING = 4,
- VT_STRIDE_W = 6,
- VT_STRIDE_H = 8,
- VT_DEPTH_MULTIPLIER = 10,
- VT_FUSED_ACTIVATION_FUNCTION = 12,
- VT_DILATION_W_FACTOR = 14,
- VT_DILATION_H_FACTOR = 16
- };
- Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); }
- int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); }
- int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); }
- int32_t depth_multiplier() const { return GetField<int32_t>(VT_DEPTH_MULTIPLIER, 0); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- int32_t dilation_w_factor() const { return GetField<int32_t>(VT_DILATION_W_FACTOR, 1); }
- int32_t dilation_h_factor() const { return GetField<int32_t>(VT_DILATION_H_FACTOR, 1); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_H) &&
- VerifyField<int32_t>(verifier, VT_DEPTH_MULTIPLIER) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<int32_t>(verifier, VT_DILATION_W_FACTOR) &&
- VerifyField<int32_t>(verifier, VT_DILATION_H_FACTOR) && verifier.EndTable();
- }
-};
-
-struct DepthwiseConv2DOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_padding(Padding padding)
- {
- fbb_.AddElement<int8_t>(DepthwiseConv2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
- }
- void add_stride_w(int32_t stride_w)
- {
- fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_STRIDE_W, stride_w, 0);
- }
- void add_stride_h(int32_t stride_h)
- {
- fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_STRIDE_H, stride_h, 0);
- }
- void add_depth_multiplier(int32_t depth_multiplier)
- {
- fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DEPTH_MULTIPLIER, depth_multiplier, 0);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(DepthwiseConv2DOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_dilation_w_factor(int32_t dilation_w_factor)
- {
- fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1);
- }
- void add_dilation_h_factor(int32_t dilation_h_factor)
- {
- fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1);
- }
- explicit DepthwiseConv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- DepthwiseConv2DOptionsBuilder &operator=(const DepthwiseConv2DOptionsBuilder &);
- flatbuffers::Offset<DepthwiseConv2DOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<DepthwiseConv2DOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<DepthwiseConv2DOptions> CreateDepthwiseConv2DOptions(
- flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME, int32_t stride_w = 0,
- int32_t stride_h = 0, int32_t depth_multiplier = 0,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- int32_t dilation_w_factor = 1, int32_t dilation_h_factor = 1)
-{
- DepthwiseConv2DOptionsBuilder builder_(_fbb);
- builder_.add_dilation_h_factor(dilation_h_factor);
- builder_.add_dilation_w_factor(dilation_w_factor);
- builder_.add_depth_multiplier(depth_multiplier);
- builder_.add_stride_h(stride_h);
- builder_.add_stride_w(stride_w);
- builder_.add_fused_activation_function(fused_activation_function);
- builder_.add_padding(padding);
- return builder_.Finish();
-}
-
-struct ConcatEmbeddingsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NUM_CHANNELS = 4,
- VT_NUM_COLUMNS_PER_CHANNEL = 6,
- VT_EMBEDDING_DIM_PER_CHANNEL = 8
- };
- int32_t num_channels() const { return GetField<int32_t>(VT_NUM_CHANNELS, 0); }
- const flatbuffers::Vector<int32_t> *num_columns_per_channel() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_NUM_COLUMNS_PER_CHANNEL);
- }
- const flatbuffers::Vector<int32_t> *embedding_dim_per_channel() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_EMBEDDING_DIM_PER_CHANNEL);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM_CHANNELS) &&
- VerifyOffset(verifier, VT_NUM_COLUMNS_PER_CHANNEL) &&
- verifier.VerifyVector(num_columns_per_channel()) &&
- VerifyOffset(verifier, VT_EMBEDDING_DIM_PER_CHANNEL) &&
- verifier.VerifyVector(embedding_dim_per_channel()) && verifier.EndTable();
- }
-};
-
-struct ConcatEmbeddingsOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_num_channels(int32_t num_channels)
- {
- fbb_.AddElement<int32_t>(ConcatEmbeddingsOptions::VT_NUM_CHANNELS, num_channels, 0);
- }
- void add_num_columns_per_channel(
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> num_columns_per_channel)
- {
- fbb_.AddOffset(ConcatEmbeddingsOptions::VT_NUM_COLUMNS_PER_CHANNEL, num_columns_per_channel);
- }
- void add_embedding_dim_per_channel(
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> embedding_dim_per_channel)
- {
- fbb_.AddOffset(ConcatEmbeddingsOptions::VT_EMBEDDING_DIM_PER_CHANNEL,
- embedding_dim_per_channel);
- }
- explicit ConcatEmbeddingsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ConcatEmbeddingsOptionsBuilder &operator=(const ConcatEmbeddingsOptionsBuilder &);
- flatbuffers::Offset<ConcatEmbeddingsOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ConcatEmbeddingsOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ConcatEmbeddingsOptions> CreateConcatEmbeddingsOptions(
- flatbuffers::FlatBufferBuilder &_fbb, int32_t num_channels = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> num_columns_per_channel = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> embedding_dim_per_channel = 0)
-{
- ConcatEmbeddingsOptionsBuilder builder_(_fbb);
- builder_.add_embedding_dim_per_channel(embedding_dim_per_channel);
- builder_.add_num_columns_per_channel(num_columns_per_channel);
- builder_.add_num_channels(num_channels);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<ConcatEmbeddingsOptions>
-CreateConcatEmbeddingsOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb, int32_t num_channels = 0,
- const std::vector<int32_t> *num_columns_per_channel = nullptr,
- const std::vector<int32_t> *embedding_dim_per_channel = nullptr)
-{
- return neurun_tflite::CreateConcatEmbeddingsOptions(
- _fbb, num_channels,
- num_columns_per_channel ? _fbb.CreateVector<int32_t>(*num_columns_per_channel) : 0,
- embedding_dim_per_channel ? _fbb.CreateVector<int32_t>(*embedding_dim_per_channel) : 0);
-}
-
-struct LSHProjectionOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_TYPE = 4
- };
- LSHProjectionType type() const
- {
- return static_cast<LSHProjectionType>(GetField<int8_t>(VT_TYPE, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_TYPE) &&
- verifier.EndTable();
- }
-};
-
-struct LSHProjectionOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_type(LSHProjectionType type)
- {
- fbb_.AddElement<int8_t>(LSHProjectionOptions::VT_TYPE, static_cast<int8_t>(type), 0);
- }
- explicit LSHProjectionOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LSHProjectionOptionsBuilder &operator=(const LSHProjectionOptionsBuilder &);
- flatbuffers::Offset<LSHProjectionOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LSHProjectionOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LSHProjectionOptions>
-CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb,
- LSHProjectionType type = LSHProjectionType_UNKNOWN)
-{
- LSHProjectionOptionsBuilder builder_(_fbb);
- builder_.add_type(type);
- return builder_.Finish();
-}
-
-struct SVDFOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_RANK = 4,
- VT_FUSED_ACTIVATION_FUNCTION = 6
- };
- int32_t rank() const { return GetField<int32_t>(VT_RANK, 0); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_RANK) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct SVDFOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_rank(int32_t rank) { fbb_.AddElement<int32_t>(SVDFOptions::VT_RANK, rank, 0); }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(SVDFOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit SVDFOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SVDFOptionsBuilder &operator=(const SVDFOptionsBuilder &);
- flatbuffers::Offset<SVDFOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SVDFOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SVDFOptions>
-CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t rank = 0,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- SVDFOptionsBuilder builder_(_fbb);
- builder_.add_rank(rank);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct RNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct RNNOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(RNNOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit RNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- RNNOptionsBuilder &operator=(const RNNOptionsBuilder &);
- flatbuffers::Offset<RNNOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<RNNOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<RNNOptions>
-CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- RNNOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct SequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_TIME_MAJOR = 4,
- VT_FUSED_ACTIVATION_FUNCTION = 6
- };
- bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 0) != 0; }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct SequenceRNNOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_time_major(bool time_major)
- {
- fbb_.AddElement<uint8_t>(SequenceRNNOptions::VT_TIME_MAJOR, static_cast<uint8_t>(time_major),
- 0);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(SequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit SequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SequenceRNNOptionsBuilder &operator=(const SequenceRNNOptionsBuilder &);
- flatbuffers::Offset<SequenceRNNOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SequenceRNNOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SequenceRNNOptions> CreateSequenceRNNOptions(
- flatbuffers::FlatBufferBuilder &_fbb, bool time_major = false,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- SequenceRNNOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- builder_.add_time_major(time_major);
- return builder_.Finish();
-}
-
-struct BidirectionalSequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_TIME_MAJOR = 4,
- VT_FUSED_ACTIVATION_FUNCTION = 6,
- VT_MERGE_OUTPUTS = 8
- };
- bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 0) != 0; }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool merge_outputs() const { return GetField<uint8_t>(VT_MERGE_OUTPUTS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<uint8_t>(verifier, VT_MERGE_OUTPUTS) && verifier.EndTable();
- }
-};
-
-struct BidirectionalSequenceRNNOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_time_major(bool time_major)
- {
- fbb_.AddElement<uint8_t>(BidirectionalSequenceRNNOptions::VT_TIME_MAJOR,
- static_cast<uint8_t>(time_major), 0);
- }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(BidirectionalSequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_merge_outputs(bool merge_outputs)
- {
- fbb_.AddElement<uint8_t>(BidirectionalSequenceRNNOptions::VT_MERGE_OUTPUTS,
- static_cast<uint8_t>(merge_outputs), 0);
- }
- explicit BidirectionalSequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- BidirectionalSequenceRNNOptionsBuilder &operator=(const BidirectionalSequenceRNNOptionsBuilder &);
- flatbuffers::Offset<BidirectionalSequenceRNNOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<BidirectionalSequenceRNNOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<BidirectionalSequenceRNNOptions> CreateBidirectionalSequenceRNNOptions(
- flatbuffers::FlatBufferBuilder &_fbb, bool time_major = false,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- bool merge_outputs = false)
-{
- BidirectionalSequenceRNNOptionsBuilder builder_(_fbb);
- builder_.add_merge_outputs(merge_outputs);
- builder_.add_fused_activation_function(fused_activation_function);
- builder_.add_time_major(time_major);
- return builder_.Finish();
-}
-
-struct FullyConnectedOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4,
- VT_WEIGHTS_FORMAT = 6
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- FullyConnectedOptionsWeightsFormat weights_format() const
- {
- return static_cast<FullyConnectedOptionsWeightsFormat>(GetField<int8_t>(VT_WEIGHTS_FORMAT, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<int8_t>(verifier, VT_WEIGHTS_FORMAT) && verifier.EndTable();
- }
-};
-
-struct FullyConnectedOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(FullyConnectedOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_weights_format(FullyConnectedOptionsWeightsFormat weights_format)
- {
- fbb_.AddElement<int8_t>(FullyConnectedOptions::VT_WEIGHTS_FORMAT,
- static_cast<int8_t>(weights_format), 0);
- }
- explicit FullyConnectedOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- FullyConnectedOptionsBuilder &operator=(const FullyConnectedOptionsBuilder &);
- flatbuffers::Offset<FullyConnectedOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<FullyConnectedOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<FullyConnectedOptions> CreateFullyConnectedOptions(
- flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- FullyConnectedOptionsWeightsFormat weights_format = FullyConnectedOptionsWeightsFormat_DEFAULT)
-{
- FullyConnectedOptionsBuilder builder_(_fbb);
- builder_.add_weights_format(weights_format);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct SoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_BETA = 4
- };
- float beta() const { return GetField<float>(VT_BETA, 0.0f); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_BETA) &&
- verifier.EndTable();
- }
-};
-
-struct SoftmaxOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_beta(float beta) { fbb_.AddElement<float>(SoftmaxOptions::VT_BETA, beta, 0.0f); }
- explicit SoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SoftmaxOptionsBuilder &operator=(const SoftmaxOptionsBuilder &);
- flatbuffers::Offset<SoftmaxOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SoftmaxOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SoftmaxOptions>
-CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, float beta = 0.0f)
-{
- SoftmaxOptionsBuilder builder_(_fbb);
- builder_.add_beta(beta);
- return builder_.Finish();
-}
-
-struct ConcatenationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_AXIS = 4,
- VT_FUSED_ACTIVATION_FUNCTION = 6
- };
- int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_AXIS) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct ConcatenationOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(ConcatenationOptions::VT_AXIS, axis, 0); }
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(ConcatenationOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit ConcatenationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ConcatenationOptionsBuilder &operator=(const ConcatenationOptionsBuilder &);
- flatbuffers::Offset<ConcatenationOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ConcatenationOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ConcatenationOptions> CreateConcatenationOptions(
- flatbuffers::FlatBufferBuilder &_fbb, int32_t axis = 0,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- ConcatenationOptionsBuilder builder_(_fbb);
- builder_.add_axis(axis);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct AddOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct AddOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(AddOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit AddOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- AddOptionsBuilder &operator=(const AddOptionsBuilder &);
- flatbuffers::Offset<AddOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<AddOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<AddOptions>
-CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- AddOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct MulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct MulOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(MulOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit MulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- MulOptionsBuilder &operator=(const MulOptionsBuilder &);
- flatbuffers::Offset<MulOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<MulOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<MulOptions>
-CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- MulOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct L2NormOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct L2NormOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(L2NormOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit L2NormOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- L2NormOptionsBuilder &operator=(const L2NormOptionsBuilder &);
- flatbuffers::Offset<L2NormOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<L2NormOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<L2NormOptions>
-CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- L2NormOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct LocalResponseNormalizationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_RADIUS = 4,
- VT_BIAS = 6,
- VT_ALPHA = 8,
- VT_BETA = 10
- };
- int32_t radius() const { return GetField<int32_t>(VT_RADIUS, 0); }
- float bias() const { return GetField<float>(VT_BIAS, 0.0f); }
- float alpha() const { return GetField<float>(VT_ALPHA, 0.0f); }
- float beta() const { return GetField<float>(VT_BETA, 0.0f); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_RADIUS) &&
- VerifyField<float>(verifier, VT_BIAS) && VerifyField<float>(verifier, VT_ALPHA) &&
- VerifyField<float>(verifier, VT_BETA) && verifier.EndTable();
- }
-};
-
-struct LocalResponseNormalizationOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_radius(int32_t radius)
- {
- fbb_.AddElement<int32_t>(LocalResponseNormalizationOptions::VT_RADIUS, radius, 0);
- }
- void add_bias(float bias)
- {
- fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_BIAS, bias, 0.0f);
- }
- void add_alpha(float alpha)
- {
- fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_ALPHA, alpha, 0.0f);
- }
- void add_beta(float beta)
- {
- fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_BETA, beta, 0.0f);
- }
- explicit LocalResponseNormalizationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LocalResponseNormalizationOptionsBuilder &
- operator=(const LocalResponseNormalizationOptionsBuilder &);
- flatbuffers::Offset<LocalResponseNormalizationOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LocalResponseNormalizationOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LocalResponseNormalizationOptions>
-CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t radius = 0,
- float bias = 0.0f, float alpha = 0.0f, float beta = 0.0f)
-{
- LocalResponseNormalizationOptionsBuilder builder_(_fbb);
- builder_.add_beta(beta);
- builder_.add_alpha(alpha);
- builder_.add_bias(bias);
- builder_.add_radius(radius);
- return builder_.Finish();
-}
-
-struct LSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4,
- VT_CELL_CLIP = 6,
- VT_PROJ_CLIP = 8,
- VT_KERNEL_TYPE = 10
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- float cell_clip() const { return GetField<float>(VT_CELL_CLIP, 0.0f); }
- float proj_clip() const { return GetField<float>(VT_PROJ_CLIP, 0.0f); }
- LSTMKernelType kernel_type() const
- {
- return static_cast<LSTMKernelType>(GetField<int8_t>(VT_KERNEL_TYPE, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<float>(verifier, VT_CELL_CLIP) &&
- VerifyField<float>(verifier, VT_PROJ_CLIP) &&
- VerifyField<int8_t>(verifier, VT_KERNEL_TYPE) && verifier.EndTable();
- }
-};
-
-struct LSTMOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(LSTMOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_cell_clip(float cell_clip)
- {
- fbb_.AddElement<float>(LSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f);
- }
- void add_proj_clip(float proj_clip)
- {
- fbb_.AddElement<float>(LSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f);
- }
- void add_kernel_type(LSTMKernelType kernel_type)
- {
- fbb_.AddElement<int8_t>(LSTMOptions::VT_KERNEL_TYPE, static_cast<int8_t>(kernel_type), 0);
- }
- explicit LSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LSTMOptionsBuilder &operator=(const LSTMOptionsBuilder &);
- flatbuffers::Offset<LSTMOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LSTMOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LSTMOptions>
-CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- float cell_clip = 0.0f, float proj_clip = 0.0f,
- LSTMKernelType kernel_type = LSTMKernelType_FULL)
-{
- LSTMOptionsBuilder builder_(_fbb);
- builder_.add_proj_clip(proj_clip);
- builder_.add_cell_clip(cell_clip);
- builder_.add_kernel_type(kernel_type);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct UnidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4,
- VT_CELL_CLIP = 6,
- VT_PROJ_CLIP = 8,
- VT_TIME_MAJOR = 10
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- float cell_clip() const { return GetField<float>(VT_CELL_CLIP, 0.0f); }
- float proj_clip() const { return GetField<float>(VT_PROJ_CLIP, 0.0f); }
- bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<float>(verifier, VT_CELL_CLIP) &&
- VerifyField<float>(verifier, VT_PROJ_CLIP) &&
- VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) && verifier.EndTable();
- }
-};
-
-struct UnidirectionalSequenceLSTMOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(UnidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_cell_clip(float cell_clip)
- {
- fbb_.AddElement<float>(UnidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f);
- }
- void add_proj_clip(float proj_clip)
- {
- fbb_.AddElement<float>(UnidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f);
- }
- void add_time_major(bool time_major)
- {
- fbb_.AddElement<uint8_t>(UnidirectionalSequenceLSTMOptions::VT_TIME_MAJOR,
- static_cast<uint8_t>(time_major), 0);
- }
- explicit UnidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- UnidirectionalSequenceLSTMOptionsBuilder &
- operator=(const UnidirectionalSequenceLSTMOptionsBuilder &);
- flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<UnidirectionalSequenceLSTMOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<UnidirectionalSequenceLSTMOptions>
-CreateUnidirectionalSequenceLSTMOptions(
- flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- float cell_clip = 0.0f, float proj_clip = 0.0f, bool time_major = false)
-{
- UnidirectionalSequenceLSTMOptionsBuilder builder_(_fbb);
- builder_.add_proj_clip(proj_clip);
- builder_.add_cell_clip(cell_clip);
- builder_.add_time_major(time_major);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct BidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4,
- VT_CELL_CLIP = 6,
- VT_PROJ_CLIP = 8,
- VT_MERGE_OUTPUTS = 10
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- float cell_clip() const { return GetField<float>(VT_CELL_CLIP, 0.0f); }
- float proj_clip() const { return GetField<float>(VT_PROJ_CLIP, 0.0f); }
- bool merge_outputs() const { return GetField<uint8_t>(VT_MERGE_OUTPUTS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
- VerifyField<float>(verifier, VT_CELL_CLIP) &&
- VerifyField<float>(verifier, VT_PROJ_CLIP) &&
- VerifyField<uint8_t>(verifier, VT_MERGE_OUTPUTS) && verifier.EndTable();
- }
-};
-
-struct BidirectionalSequenceLSTMOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(BidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- void add_cell_clip(float cell_clip)
- {
- fbb_.AddElement<float>(BidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f);
- }
- void add_proj_clip(float proj_clip)
- {
- fbb_.AddElement<float>(BidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f);
- }
- void add_merge_outputs(bool merge_outputs)
- {
- fbb_.AddElement<uint8_t>(BidirectionalSequenceLSTMOptions::VT_MERGE_OUTPUTS,
- static_cast<uint8_t>(merge_outputs), 0);
- }
- explicit BidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- BidirectionalSequenceLSTMOptionsBuilder &
- operator=(const BidirectionalSequenceLSTMOptionsBuilder &);
- flatbuffers::Offset<BidirectionalSequenceLSTMOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<BidirectionalSequenceLSTMOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<BidirectionalSequenceLSTMOptions> CreateBidirectionalSequenceLSTMOptions(
- flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE,
- float cell_clip = 0.0f, float proj_clip = 0.0f, bool merge_outputs = false)
-{
- BidirectionalSequenceLSTMOptionsBuilder builder_(_fbb);
- builder_.add_proj_clip(proj_clip);
- builder_.add_cell_clip(cell_clip);
- builder_.add_merge_outputs(merge_outputs);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct ResizeBilinearOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_ALIGN_CORNERS = 8
- };
- bool align_corners() const { return GetField<uint8_t>(VT_ALIGN_CORNERS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_ALIGN_CORNERS) &&
- verifier.EndTable();
- }
-};
-
-struct ResizeBilinearOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_align_corners(bool align_corners)
- {
- fbb_.AddElement<uint8_t>(ResizeBilinearOptions::VT_ALIGN_CORNERS,
- static_cast<uint8_t>(align_corners), 0);
- }
- explicit ResizeBilinearOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ResizeBilinearOptionsBuilder &operator=(const ResizeBilinearOptionsBuilder &);
- flatbuffers::Offset<ResizeBilinearOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ResizeBilinearOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ResizeBilinearOptions>
-CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, bool align_corners = false)
-{
- ResizeBilinearOptionsBuilder builder_(_fbb);
- builder_.add_align_corners(align_corners);
- return builder_.Finish();
-}
-
-struct ResizeNearestNeighborOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_ALIGN_CORNERS = 4
- };
- bool align_corners() const { return GetField<uint8_t>(VT_ALIGN_CORNERS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_ALIGN_CORNERS) &&
- verifier.EndTable();
- }
-};
-
-struct ResizeNearestNeighborOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_align_corners(bool align_corners)
- {
- fbb_.AddElement<uint8_t>(ResizeNearestNeighborOptions::VT_ALIGN_CORNERS,
- static_cast<uint8_t>(align_corners), 0);
- }
- explicit ResizeNearestNeighborOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ResizeNearestNeighborOptionsBuilder &operator=(const ResizeNearestNeighborOptionsBuilder &);
- flatbuffers::Offset<ResizeNearestNeighborOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ResizeNearestNeighborOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ResizeNearestNeighborOptions>
-CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb, bool align_corners = false)
-{
- ResizeNearestNeighborOptionsBuilder builder_(_fbb);
- builder_.add_align_corners(align_corners);
- return builder_.Finish();
-}
-
-struct CallOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_SUBGRAPH = 4
- };
- uint32_t subgraph() const { return GetField<uint32_t>(VT_SUBGRAPH, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint32_t>(verifier, VT_SUBGRAPH) &&
- verifier.EndTable();
- }
-};
-
-struct CallOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_subgraph(uint32_t subgraph)
- {
- fbb_.AddElement<uint32_t>(CallOptions::VT_SUBGRAPH, subgraph, 0);
- }
- explicit CallOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- CallOptionsBuilder &operator=(const CallOptionsBuilder &);
- flatbuffers::Offset<CallOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<CallOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<CallOptions> CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb,
- uint32_t subgraph = 0)
-{
- CallOptionsBuilder builder_(_fbb);
- builder_.add_subgraph(subgraph);
- return builder_.Finish();
-}
-
-struct PadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct PadOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit PadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- PadOptionsBuilder &operator=(const PadOptionsBuilder &);
- flatbuffers::Offset<PadOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<PadOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<PadOptions> CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- PadOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct PadV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct PadV2OptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit PadV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- PadV2OptionsBuilder &operator=(const PadV2OptionsBuilder &);
- flatbuffers::Offset<PadV2Options> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<PadV2Options>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<PadV2Options> CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb)
-{
- PadV2OptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ReshapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NEW_SHAPE = 4
- };
- const flatbuffers::Vector<int32_t> *new_shape() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_NEW_SHAPE);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NEW_SHAPE) &&
- verifier.VerifyVector(new_shape()) && verifier.EndTable();
- }
-};
-
-struct ReshapeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_new_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> new_shape)
- {
- fbb_.AddOffset(ReshapeOptions::VT_NEW_SHAPE, new_shape);
- }
- explicit ReshapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ReshapeOptionsBuilder &operator=(const ReshapeOptionsBuilder &);
- flatbuffers::Offset<ReshapeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ReshapeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ReshapeOptions>
-CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> new_shape = 0)
-{
- ReshapeOptionsBuilder builder_(_fbb);
- builder_.add_new_shape(new_shape);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<ReshapeOptions>
-CreateReshapeOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *new_shape = nullptr)
-{
- return neurun_tflite::CreateReshapeOptions(
- _fbb, new_shape ? _fbb.CreateVector<int32_t>(*new_shape) : 0);
-}
-
-struct SpaceToBatchNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SpaceToBatchNDOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SpaceToBatchNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SpaceToBatchNDOptionsBuilder &operator=(const SpaceToBatchNDOptionsBuilder &);
- flatbuffers::Offset<SpaceToBatchNDOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SpaceToBatchNDOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SpaceToBatchNDOptions>
-CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SpaceToBatchNDOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct BatchToSpaceNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct BatchToSpaceNDOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit BatchToSpaceNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- BatchToSpaceNDOptionsBuilder &operator=(const BatchToSpaceNDOptionsBuilder &);
- flatbuffers::Offset<BatchToSpaceNDOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<BatchToSpaceNDOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<BatchToSpaceNDOptions>
-CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- BatchToSpaceNDOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SkipGramOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NGRAM_SIZE = 4,
- VT_MAX_SKIP_SIZE = 6,
- VT_INCLUDE_ALL_NGRAMS = 8
- };
- int32_t ngram_size() const { return GetField<int32_t>(VT_NGRAM_SIZE, 0); }
- int32_t max_skip_size() const { return GetField<int32_t>(VT_MAX_SKIP_SIZE, 0); }
- bool include_all_ngrams() const { return GetField<uint8_t>(VT_INCLUDE_ALL_NGRAMS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NGRAM_SIZE) &&
- VerifyField<int32_t>(verifier, VT_MAX_SKIP_SIZE) &&
- VerifyField<uint8_t>(verifier, VT_INCLUDE_ALL_NGRAMS) && verifier.EndTable();
- }
-};
-
-struct SkipGramOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_ngram_size(int32_t ngram_size)
- {
- fbb_.AddElement<int32_t>(SkipGramOptions::VT_NGRAM_SIZE, ngram_size, 0);
- }
- void add_max_skip_size(int32_t max_skip_size)
- {
- fbb_.AddElement<int32_t>(SkipGramOptions::VT_MAX_SKIP_SIZE, max_skip_size, 0);
- }
- void add_include_all_ngrams(bool include_all_ngrams)
- {
- fbb_.AddElement<uint8_t>(SkipGramOptions::VT_INCLUDE_ALL_NGRAMS,
- static_cast<uint8_t>(include_all_ngrams), 0);
- }
- explicit SkipGramOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SkipGramOptionsBuilder &operator=(const SkipGramOptionsBuilder &);
- flatbuffers::Offset<SkipGramOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SkipGramOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SkipGramOptions>
-CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t ngram_size = 0,
- int32_t max_skip_size = 0, bool include_all_ngrams = false)
-{
- SkipGramOptionsBuilder builder_(_fbb);
- builder_.add_max_skip_size(max_skip_size);
- builder_.add_ngram_size(ngram_size);
- builder_.add_include_all_ngrams(include_all_ngrams);
- return builder_.Finish();
-}
-
-struct SpaceToDepthOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_BLOCK_SIZE = 4
- };
- int32_t block_size() const { return GetField<int32_t>(VT_BLOCK_SIZE, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_BLOCK_SIZE) &&
- verifier.EndTable();
- }
-};
-
-struct SpaceToDepthOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_block_size(int32_t block_size)
- {
- fbb_.AddElement<int32_t>(SpaceToDepthOptions::VT_BLOCK_SIZE, block_size, 0);
- }
- explicit SpaceToDepthOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SpaceToDepthOptionsBuilder &operator=(const SpaceToDepthOptionsBuilder &);
- flatbuffers::Offset<SpaceToDepthOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SpaceToDepthOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SpaceToDepthOptions>
-CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t block_size = 0)
-{
- SpaceToDepthOptionsBuilder builder_(_fbb);
- builder_.add_block_size(block_size);
- return builder_.Finish();
-}
-
-struct SubOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct SubOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(SubOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit SubOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SubOptionsBuilder &operator=(const SubOptionsBuilder &);
- flatbuffers::Offset<SubOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SubOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SubOptions>
-CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- SubOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct DivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_FUSED_ACTIVATION_FUNCTION = 4
- };
- ActivationFunctionType fused_activation_function() const
- {
- return static_cast<ActivationFunctionType>(GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) &&
- VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
- }
-};
-
-struct DivOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_fused_activation_function(ActivationFunctionType fused_activation_function)
- {
- fbb_.AddElement<int8_t>(DivOptions::VT_FUSED_ACTIVATION_FUNCTION,
- static_cast<int8_t>(fused_activation_function), 0);
- }
- explicit DivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- DivOptionsBuilder &operator=(const DivOptionsBuilder &);
- flatbuffers::Offset<DivOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<DivOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<DivOptions>
-CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb,
- ActivationFunctionType fused_activation_function = ActivationFunctionType_NONE)
-{
- DivOptionsBuilder builder_(_fbb);
- builder_.add_fused_activation_function(fused_activation_function);
- return builder_.Finish();
-}
-
-struct TopKV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct TopKV2OptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit TopKV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- TopKV2OptionsBuilder &operator=(const TopKV2OptionsBuilder &);
- flatbuffers::Offset<TopKV2Options> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TopKV2Options>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TopKV2Options> CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb)
-{
- TopKV2OptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct EmbeddingLookupSparseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_COMBINER = 4
- };
- CombinerType combiner() const
- {
- return static_cast<CombinerType>(GetField<int8_t>(VT_COMBINER, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_COMBINER) &&
- verifier.EndTable();
- }
-};
-
-struct EmbeddingLookupSparseOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_combiner(CombinerType combiner)
- {
- fbb_.AddElement<int8_t>(EmbeddingLookupSparseOptions::VT_COMBINER,
- static_cast<int8_t>(combiner), 0);
- }
- explicit EmbeddingLookupSparseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- EmbeddingLookupSparseOptionsBuilder &operator=(const EmbeddingLookupSparseOptionsBuilder &);
- flatbuffers::Offset<EmbeddingLookupSparseOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<EmbeddingLookupSparseOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<EmbeddingLookupSparseOptions>
-CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb,
- CombinerType combiner = CombinerType_SUM)
-{
- EmbeddingLookupSparseOptionsBuilder builder_(_fbb);
- builder_.add_combiner(combiner);
- return builder_.Finish();
-}
-
-struct GatherOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_AXIS = 4
- };
- int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_AXIS) &&
- verifier.EndTable();
- }
-};
-
-struct GatherOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(GatherOptions::VT_AXIS, axis, 0); }
- explicit GatherOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- GatherOptionsBuilder &operator=(const GatherOptionsBuilder &);
- flatbuffers::Offset<GatherOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<GatherOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<GatherOptions> CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t axis = 0)
-{
- GatherOptionsBuilder builder_(_fbb);
- builder_.add_axis(axis);
- return builder_.Finish();
-}
-
-struct TransposeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct TransposeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit TransposeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- TransposeOptionsBuilder &operator=(const TransposeOptionsBuilder &);
- flatbuffers::Offset<TransposeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TransposeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TransposeOptions>
-CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- TransposeOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ExpOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct ExpOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit ExpOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ExpOptionsBuilder &operator=(const ExpOptionsBuilder &);
- flatbuffers::Offset<ExpOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ExpOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ExpOptions> CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- ExpOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ReducerOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_KEEP_DIMS = 4
- };
- bool keep_dims() const { return GetField<uint8_t>(VT_KEEP_DIMS, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_KEEP_DIMS) &&
- verifier.EndTable();
- }
-};
-
-struct ReducerOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_keep_dims(bool keep_dims)
- {
- fbb_.AddElement<uint8_t>(ReducerOptions::VT_KEEP_DIMS, static_cast<uint8_t>(keep_dims), 0);
- }
- explicit ReducerOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ReducerOptionsBuilder &operator=(const ReducerOptionsBuilder &);
- flatbuffers::Offset<ReducerOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ReducerOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ReducerOptions>
-CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, bool keep_dims = false)
-{
- ReducerOptionsBuilder builder_(_fbb);
- builder_.add_keep_dims(keep_dims);
- return builder_.Finish();
-}
-
-struct SqueezeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_SQUEEZE_DIMS = 4
- };
- const flatbuffers::Vector<int32_t> *squeeze_dims() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SQUEEZE_DIMS);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_SQUEEZE_DIMS) &&
- verifier.VerifyVector(squeeze_dims()) && verifier.EndTable();
- }
-};
-
-struct SqueezeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_squeeze_dims(flatbuffers::Offset<flatbuffers::Vector<int32_t>> squeeze_dims)
- {
- fbb_.AddOffset(SqueezeOptions::VT_SQUEEZE_DIMS, squeeze_dims);
- }
- explicit SqueezeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SqueezeOptionsBuilder &operator=(const SqueezeOptionsBuilder &);
- flatbuffers::Offset<SqueezeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SqueezeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SqueezeOptions>
-CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> squeeze_dims = 0)
-{
- SqueezeOptionsBuilder builder_(_fbb);
- builder_.add_squeeze_dims(squeeze_dims);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<SqueezeOptions>
-CreateSqueezeOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *squeeze_dims = nullptr)
-{
- return neurun_tflite::CreateSqueezeOptions(
- _fbb, squeeze_dims ? _fbb.CreateVector<int32_t>(*squeeze_dims) : 0);
-}
-
-struct SplitOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NUM_SPLITS = 4
- };
- int32_t num_splits() const { return GetField<int32_t>(VT_NUM_SPLITS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM_SPLITS) &&
- verifier.EndTable();
- }
-};
-
-struct SplitOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_num_splits(int32_t num_splits)
- {
- fbb_.AddElement<int32_t>(SplitOptions::VT_NUM_SPLITS, num_splits, 0);
- }
- explicit SplitOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SplitOptionsBuilder &operator=(const SplitOptionsBuilder &);
- flatbuffers::Offset<SplitOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SplitOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SplitOptions> CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t num_splits = 0)
-{
- SplitOptionsBuilder builder_(_fbb);
- builder_.add_num_splits(num_splits);
- return builder_.Finish();
-}
-
-struct SplitVOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NUM_SPLITS = 4
- };
- int32_t num_splits() const { return GetField<int32_t>(VT_NUM_SPLITS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM_SPLITS) &&
- verifier.EndTable();
- }
-};
-
-struct SplitVOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_num_splits(int32_t num_splits)
- {
- fbb_.AddElement<int32_t>(SplitVOptions::VT_NUM_SPLITS, num_splits, 0);
- }
- explicit SplitVOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SplitVOptionsBuilder &operator=(const SplitVOptionsBuilder &);
- flatbuffers::Offset<SplitVOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SplitVOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SplitVOptions> CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t num_splits = 0)
-{
- SplitVOptionsBuilder builder_(_fbb);
- builder_.add_num_splits(num_splits);
- return builder_.Finish();
-}
-
-struct StridedSliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_BEGIN_MASK = 4,
- VT_END_MASK = 6,
- VT_ELLIPSIS_MASK = 8,
- VT_NEW_AXIS_MASK = 10,
- VT_SHRINK_AXIS_MASK = 12
- };
- int32_t begin_mask() const { return GetField<int32_t>(VT_BEGIN_MASK, 0); }
- int32_t end_mask() const { return GetField<int32_t>(VT_END_MASK, 0); }
- int32_t ellipsis_mask() const { return GetField<int32_t>(VT_ELLIPSIS_MASK, 0); }
- int32_t new_axis_mask() const { return GetField<int32_t>(VT_NEW_AXIS_MASK, 0); }
- int32_t shrink_axis_mask() const { return GetField<int32_t>(VT_SHRINK_AXIS_MASK, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_BEGIN_MASK) &&
- VerifyField<int32_t>(verifier, VT_END_MASK) &&
- VerifyField<int32_t>(verifier, VT_ELLIPSIS_MASK) &&
- VerifyField<int32_t>(verifier, VT_NEW_AXIS_MASK) &&
- VerifyField<int32_t>(verifier, VT_SHRINK_AXIS_MASK) && verifier.EndTable();
- }
-};
-
-struct StridedSliceOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_begin_mask(int32_t begin_mask)
- {
- fbb_.AddElement<int32_t>(StridedSliceOptions::VT_BEGIN_MASK, begin_mask, 0);
- }
- void add_end_mask(int32_t end_mask)
- {
- fbb_.AddElement<int32_t>(StridedSliceOptions::VT_END_MASK, end_mask, 0);
- }
- void add_ellipsis_mask(int32_t ellipsis_mask)
- {
- fbb_.AddElement<int32_t>(StridedSliceOptions::VT_ELLIPSIS_MASK, ellipsis_mask, 0);
- }
- void add_new_axis_mask(int32_t new_axis_mask)
- {
- fbb_.AddElement<int32_t>(StridedSliceOptions::VT_NEW_AXIS_MASK, new_axis_mask, 0);
- }
- void add_shrink_axis_mask(int32_t shrink_axis_mask)
- {
- fbb_.AddElement<int32_t>(StridedSliceOptions::VT_SHRINK_AXIS_MASK, shrink_axis_mask, 0);
- }
- explicit StridedSliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- StridedSliceOptionsBuilder &operator=(const StridedSliceOptionsBuilder &);
- flatbuffers::Offset<StridedSliceOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<StridedSliceOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<StridedSliceOptions>
-CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t begin_mask = 0,
- int32_t end_mask = 0, int32_t ellipsis_mask = 0,
- int32_t new_axis_mask = 0, int32_t shrink_axis_mask = 0)
-{
- StridedSliceOptionsBuilder builder_(_fbb);
- builder_.add_shrink_axis_mask(shrink_axis_mask);
- builder_.add_new_axis_mask(new_axis_mask);
- builder_.add_ellipsis_mask(ellipsis_mask);
- builder_.add_end_mask(end_mask);
- builder_.add_begin_mask(begin_mask);
- return builder_.Finish();
-}
-
-struct LogSoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LogSoftmaxOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LogSoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LogSoftmaxOptionsBuilder &operator=(const LogSoftmaxOptionsBuilder &);
- flatbuffers::Offset<LogSoftmaxOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LogSoftmaxOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LogSoftmaxOptions>
-CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LogSoftmaxOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct CastOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_IN_DATA_TYPE = 4,
- VT_OUT_DATA_TYPE = 6
- };
- TensorType in_data_type() const
- {
- return static_cast<TensorType>(GetField<int8_t>(VT_IN_DATA_TYPE, 0));
- }
- TensorType out_data_type() const
- {
- return static_cast<TensorType>(GetField<int8_t>(VT_OUT_DATA_TYPE, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_IN_DATA_TYPE) &&
- VerifyField<int8_t>(verifier, VT_OUT_DATA_TYPE) && verifier.EndTable();
- }
-};
-
-struct CastOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_in_data_type(TensorType in_data_type)
- {
- fbb_.AddElement<int8_t>(CastOptions::VT_IN_DATA_TYPE, static_cast<int8_t>(in_data_type), 0);
- }
- void add_out_data_type(TensorType out_data_type)
- {
- fbb_.AddElement<int8_t>(CastOptions::VT_OUT_DATA_TYPE, static_cast<int8_t>(out_data_type), 0);
- }
- explicit CastOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- CastOptionsBuilder &operator=(const CastOptionsBuilder &);
- flatbuffers::Offset<CastOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<CastOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<CastOptions>
-CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb,
- TensorType in_data_type = TensorType_FLOAT32,
- TensorType out_data_type = TensorType_FLOAT32)
-{
- CastOptionsBuilder builder_(_fbb);
- builder_.add_out_data_type(out_data_type);
- builder_.add_in_data_type(in_data_type);
- return builder_.Finish();
-}
-
-struct DequantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct DequantizeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit DequantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- DequantizeOptionsBuilder &operator=(const DequantizeOptionsBuilder &);
- flatbuffers::Offset<DequantizeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<DequantizeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<DequantizeOptions>
-CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- DequantizeOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct MaximumMinimumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct MaximumMinimumOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit MaximumMinimumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- MaximumMinimumOptionsBuilder &operator=(const MaximumMinimumOptionsBuilder &);
- flatbuffers::Offset<MaximumMinimumOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<MaximumMinimumOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<MaximumMinimumOptions>
-CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- MaximumMinimumOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct TileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct TileOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit TileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- TileOptionsBuilder &operator=(const TileOptionsBuilder &);
- flatbuffers::Offset<TileOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TileOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TileOptions> CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- TileOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ArgMaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_OUTPUT_TYPE = 4
- };
- TensorType output_type() const
- {
- return static_cast<TensorType>(GetField<int8_t>(VT_OUTPUT_TYPE, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_OUTPUT_TYPE) &&
- verifier.EndTable();
- }
-};
-
-struct ArgMaxOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_output_type(TensorType output_type)
- {
- fbb_.AddElement<int8_t>(ArgMaxOptions::VT_OUTPUT_TYPE, static_cast<int8_t>(output_type), 0);
- }
- explicit ArgMaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ArgMaxOptionsBuilder &operator=(const ArgMaxOptionsBuilder &);
- flatbuffers::Offset<ArgMaxOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ArgMaxOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ArgMaxOptions>
-CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb,
- TensorType output_type = TensorType_FLOAT32)
-{
- ArgMaxOptionsBuilder builder_(_fbb);
- builder_.add_output_type(output_type);
- return builder_.Finish();
-}
-
-struct ArgMinOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_OUTPUT_TYPE = 4
- };
- TensorType output_type() const
- {
- return static_cast<TensorType>(GetField<int8_t>(VT_OUTPUT_TYPE, 0));
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_OUTPUT_TYPE) &&
- verifier.EndTable();
- }
-};
-
-struct ArgMinOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_output_type(TensorType output_type)
- {
- fbb_.AddElement<int8_t>(ArgMinOptions::VT_OUTPUT_TYPE, static_cast<int8_t>(output_type), 0);
- }
- explicit ArgMinOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ArgMinOptionsBuilder &operator=(const ArgMinOptionsBuilder &);
- flatbuffers::Offset<ArgMinOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ArgMinOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ArgMinOptions>
-CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb,
- TensorType output_type = TensorType_FLOAT32)
-{
- ArgMinOptionsBuilder builder_(_fbb);
- builder_.add_output_type(output_type);
- return builder_.Finish();
-}
-
-struct GreaterOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct GreaterOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit GreaterOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- GreaterOptionsBuilder &operator=(const GreaterOptionsBuilder &);
- flatbuffers::Offset<GreaterOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<GreaterOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<GreaterOptions>
-CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- GreaterOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct GreaterEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct GreaterEqualOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit GreaterEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- GreaterEqualOptionsBuilder &operator=(const GreaterEqualOptionsBuilder &);
- flatbuffers::Offset<GreaterEqualOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<GreaterEqualOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<GreaterEqualOptions>
-CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- GreaterEqualOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct LessOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LessOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LessOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LessOptionsBuilder &operator=(const LessOptionsBuilder &);
- flatbuffers::Offset<LessOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LessOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LessOptions> CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LessOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct LessEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LessEqualOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LessEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LessEqualOptionsBuilder &operator=(const LessEqualOptionsBuilder &);
- flatbuffers::Offset<LessEqualOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LessEqualOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LessEqualOptions>
-CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LessEqualOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct NegOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct NegOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit NegOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- NegOptionsBuilder &operator=(const NegOptionsBuilder &);
- flatbuffers::Offset<NegOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<NegOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<NegOptions> CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- NegOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SelectOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SelectOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SelectOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SelectOptionsBuilder &operator=(const SelectOptionsBuilder &);
- flatbuffers::Offset<SelectOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SelectOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SelectOptions> CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SelectOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SliceOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SliceOptionsBuilder &operator=(const SliceOptionsBuilder &);
- flatbuffers::Offset<SliceOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SliceOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SliceOptions> CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SliceOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct TransposeConvOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_PADDING = 4,
- VT_STRIDE_W = 6,
- VT_STRIDE_H = 8
- };
- Padding padding() const { return static_cast<Padding>(GetField<int8_t>(VT_PADDING, 0)); }
- int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); }
- int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
- VerifyField<int32_t>(verifier, VT_STRIDE_H) && verifier.EndTable();
- }
-};
-
-struct TransposeConvOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_padding(Padding padding)
- {
- fbb_.AddElement<int8_t>(TransposeConvOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
- }
- void add_stride_w(int32_t stride_w)
- {
- fbb_.AddElement<int32_t>(TransposeConvOptions::VT_STRIDE_W, stride_w, 0);
- }
- void add_stride_h(int32_t stride_h)
- {
- fbb_.AddElement<int32_t>(TransposeConvOptions::VT_STRIDE_H, stride_h, 0);
- }
- explicit TransposeConvOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- TransposeConvOptionsBuilder &operator=(const TransposeConvOptionsBuilder &);
- flatbuffers::Offset<TransposeConvOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TransposeConvOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TransposeConvOptions>
-CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, Padding padding = Padding_SAME,
- int32_t stride_w = 0, int32_t stride_h = 0)
-{
- TransposeConvOptionsBuilder builder_(_fbb);
- builder_.add_stride_h(stride_h);
- builder_.add_stride_w(stride_w);
- builder_.add_padding(padding);
- return builder_.Finish();
-}
-
-struct ExpandDimsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct ExpandDimsOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit ExpandDimsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ExpandDimsOptionsBuilder &operator=(const ExpandDimsOptionsBuilder &);
- flatbuffers::Offset<ExpandDimsOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ExpandDimsOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ExpandDimsOptions>
-CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- ExpandDimsOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SparseToDenseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_VALIDATE_INDICES = 4
- };
- bool validate_indices() const { return GetField<uint8_t>(VT_VALIDATE_INDICES, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_VALIDATE_INDICES) &&
- verifier.EndTable();
- }
-};
-
-struct SparseToDenseOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_validate_indices(bool validate_indices)
- {
- fbb_.AddElement<uint8_t>(SparseToDenseOptions::VT_VALIDATE_INDICES,
- static_cast<uint8_t>(validate_indices), 0);
- }
- explicit SparseToDenseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SparseToDenseOptionsBuilder &operator=(const SparseToDenseOptionsBuilder &);
- flatbuffers::Offset<SparseToDenseOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SparseToDenseOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SparseToDenseOptions>
-CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, bool validate_indices = false)
-{
- SparseToDenseOptionsBuilder builder_(_fbb);
- builder_.add_validate_indices(validate_indices);
- return builder_.Finish();
-}
-
-struct EqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct EqualOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit EqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- EqualOptionsBuilder &operator=(const EqualOptionsBuilder &);
- flatbuffers::Offset<EqualOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<EqualOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<EqualOptions> CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- EqualOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct NotEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct NotEqualOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit NotEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- NotEqualOptionsBuilder &operator=(const NotEqualOptionsBuilder &);
- flatbuffers::Offset<NotEqualOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<NotEqualOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<NotEqualOptions>
-CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- NotEqualOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ShapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_OUT_TYPE = 4
- };
- TensorType out_type() const { return static_cast<TensorType>(GetField<int8_t>(VT_OUT_TYPE, 0)); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_OUT_TYPE) &&
- verifier.EndTable();
- }
-};
-
-struct ShapeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_out_type(TensorType out_type)
- {
- fbb_.AddElement<int8_t>(ShapeOptions::VT_OUT_TYPE, static_cast<int8_t>(out_type), 0);
- }
- explicit ShapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ShapeOptionsBuilder &operator=(const ShapeOptionsBuilder &);
- flatbuffers::Offset<ShapeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ShapeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ShapeOptions>
-CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, TensorType out_type = TensorType_FLOAT32)
-{
- ShapeOptionsBuilder builder_(_fbb);
- builder_.add_out_type(out_type);
- return builder_.Finish();
-}
-
-struct PowOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct PowOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit PowOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- PowOptionsBuilder &operator=(const PowOptionsBuilder &);
- flatbuffers::Offset<PowOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<PowOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<PowOptions> CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- PowOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct FakeQuantOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_MIN = 4,
- VT_MAX = 6,
- VT_NUM_BITS = 8,
- VT_NARROW_RANGE = 10
- };
- float min() const { return GetField<float>(VT_MIN, 0.0f); }
- float max() const { return GetField<float>(VT_MAX, 0.0f); }
- int32_t num_bits() const { return GetField<int32_t>(VT_NUM_BITS, 0); }
- bool narrow_range() const { return GetField<uint8_t>(VT_NARROW_RANGE, 0) != 0; }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_MIN) &&
- VerifyField<float>(verifier, VT_MAX) && VerifyField<int32_t>(verifier, VT_NUM_BITS) &&
- VerifyField<uint8_t>(verifier, VT_NARROW_RANGE) && verifier.EndTable();
- }
-};
-
-struct FakeQuantOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_min(float min) { fbb_.AddElement<float>(FakeQuantOptions::VT_MIN, min, 0.0f); }
- void add_max(float max) { fbb_.AddElement<float>(FakeQuantOptions::VT_MAX, max, 0.0f); }
- void add_num_bits(int32_t num_bits)
- {
- fbb_.AddElement<int32_t>(FakeQuantOptions::VT_NUM_BITS, num_bits, 0);
- }
- void add_narrow_range(bool narrow_range)
- {
- fbb_.AddElement<uint8_t>(FakeQuantOptions::VT_NARROW_RANGE, static_cast<uint8_t>(narrow_range),
- 0);
- }
- explicit FakeQuantOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- FakeQuantOptionsBuilder &operator=(const FakeQuantOptionsBuilder &);
- flatbuffers::Offset<FakeQuantOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<FakeQuantOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<FakeQuantOptions>
-CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, float min = 0.0f, float max = 0.0f,
- int32_t num_bits = 0, bool narrow_range = false)
-{
- FakeQuantOptionsBuilder builder_(_fbb);
- builder_.add_num_bits(num_bits);
- builder_.add_max(max);
- builder_.add_min(min);
- builder_.add_narrow_range(narrow_range);
- return builder_.Finish();
-}
-
-struct PackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_VALUES_COUNT = 4,
- VT_AXIS = 6
- };
- int32_t values_count() const { return GetField<int32_t>(VT_VALUES_COUNT, 0); }
- int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_VALUES_COUNT) &&
- VerifyField<int32_t>(verifier, VT_AXIS) && verifier.EndTable();
- }
-};
-
-struct PackOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_values_count(int32_t values_count)
- {
- fbb_.AddElement<int32_t>(PackOptions::VT_VALUES_COUNT, values_count, 0);
- }
- void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(PackOptions::VT_AXIS, axis, 0); }
- explicit PackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- PackOptionsBuilder &operator=(const PackOptionsBuilder &);
- flatbuffers::Offset<PackOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<PackOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<PackOptions>
-CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t values_count = 0, int32_t axis = 0)
-{
- PackOptionsBuilder builder_(_fbb);
- builder_.add_axis(axis);
- builder_.add_values_count(values_count);
- return builder_.Finish();
-}
-
-struct LogicalOrOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LogicalOrOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LogicalOrOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LogicalOrOptionsBuilder &operator=(const LogicalOrOptionsBuilder &);
- flatbuffers::Offset<LogicalOrOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LogicalOrOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LogicalOrOptions>
-CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LogicalOrOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct OneHotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_AXIS = 4
- };
- int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_AXIS) &&
- verifier.EndTable();
- }
-};
-
-struct OneHotOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(OneHotOptions::VT_AXIS, axis, 0); }
- explicit OneHotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- OneHotOptionsBuilder &operator=(const OneHotOptionsBuilder &);
- flatbuffers::Offset<OneHotOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<OneHotOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<OneHotOptions> CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t axis = 0)
-{
- OneHotOptionsBuilder builder_(_fbb);
- builder_.add_axis(axis);
- return builder_.Finish();
-}
-
-struct AbsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct AbsOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit AbsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- AbsOptionsBuilder &operator=(const AbsOptionsBuilder &);
- flatbuffers::Offset<AbsOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<AbsOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<AbsOptions> CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- AbsOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct LogicalAndOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LogicalAndOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LogicalAndOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LogicalAndOptionsBuilder &operator=(const LogicalAndOptionsBuilder &);
- flatbuffers::Offset<LogicalAndOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LogicalAndOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LogicalAndOptions>
-CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LogicalAndOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct LogicalNotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct LogicalNotOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit LogicalNotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LogicalNotOptionsBuilder &operator=(const LogicalNotOptionsBuilder &);
- flatbuffers::Offset<LogicalNotOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LogicalNotOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LogicalNotOptions>
-CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- LogicalNotOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct UnpackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_NUM = 4,
- VT_AXIS = 6
- };
- int32_t num() const { return GetField<int32_t>(VT_NUM, 0); }
- int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM) &&
- VerifyField<int32_t>(verifier, VT_AXIS) && verifier.EndTable();
- }
-};
-
-struct UnpackOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_num(int32_t num) { fbb_.AddElement<int32_t>(UnpackOptions::VT_NUM, num, 0); }
- void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(UnpackOptions::VT_AXIS, axis, 0); }
- explicit UnpackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- UnpackOptionsBuilder &operator=(const UnpackOptionsBuilder &);
- flatbuffers::Offset<UnpackOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<UnpackOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<UnpackOptions> CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb,
- int32_t num = 0, int32_t axis = 0)
-{
- UnpackOptionsBuilder builder_(_fbb);
- builder_.add_axis(axis);
- builder_.add_num(num);
- return builder_.Finish();
-}
-
-struct FloorDivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct FloorDivOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit FloorDivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- FloorDivOptionsBuilder &operator=(const FloorDivOptionsBuilder &);
- flatbuffers::Offset<FloorDivOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<FloorDivOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<FloorDivOptions>
-CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- FloorDivOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct SquareOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SquareOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SquareOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SquareOptionsBuilder &operator=(const SquareOptionsBuilder &);
- flatbuffers::Offset<SquareOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SquareOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SquareOptions> CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SquareOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct ZerosLikeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct ZerosLikeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit ZerosLikeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ZerosLikeOptionsBuilder &operator=(const ZerosLikeOptionsBuilder &);
- flatbuffers::Offset<ZerosLikeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ZerosLikeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ZerosLikeOptions>
-CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- ZerosLikeOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct FillOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct FillOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit FillOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- FillOptionsBuilder &operator=(const FillOptionsBuilder &);
- flatbuffers::Offset<FillOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<FillOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<FillOptions> CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- FillOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct FloorModOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct FloorModOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit FloorModOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- FloorModOptionsBuilder &operator=(const FloorModOptionsBuilder &);
- flatbuffers::Offset<FloorModOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<FloorModOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<FloorModOptions>
-CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- FloorModOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct RangeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct RangeOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit RangeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- RangeOptionsBuilder &operator=(const RangeOptionsBuilder &);
- flatbuffers::Offset<RangeOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<RangeOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<RangeOptions> CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- RangeOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct LeakyReluOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_ALPHA = 4
- };
- float alpha() const { return GetField<float>(VT_ALPHA, 0.0f); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_ALPHA) &&
- verifier.EndTable();
- }
-};
-
-struct LeakyReluOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_alpha(float alpha) { fbb_.AddElement<float>(LeakyReluOptions::VT_ALPHA, alpha, 0.0f); }
- explicit LeakyReluOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- LeakyReluOptionsBuilder &operator=(const LeakyReluOptionsBuilder &);
- flatbuffers::Offset<LeakyReluOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<LeakyReluOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<LeakyReluOptions>
-CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, float alpha = 0.0f)
-{
- LeakyReluOptionsBuilder builder_(_fbb);
- builder_.add_alpha(alpha);
- return builder_.Finish();
-}
-
-struct SquaredDifferenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && verifier.EndTable();
- }
-};
-
-struct SquaredDifferenceOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- explicit SquaredDifferenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SquaredDifferenceOptionsBuilder &operator=(const SquaredDifferenceOptionsBuilder &);
- flatbuffers::Offset<SquaredDifferenceOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SquaredDifferenceOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SquaredDifferenceOptions>
-CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb)
-{
- SquaredDifferenceOptionsBuilder builder_(_fbb);
- return builder_.Finish();
-}
-
-struct MirrorPadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_MODE = 4
- };
- MirrorPadMode mode() const { return static_cast<MirrorPadMode>(GetField<int8_t>(VT_MODE, 0)); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_MODE) &&
- verifier.EndTable();
- }
-};
-
-struct MirrorPadOptionsBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_mode(MirrorPadMode mode)
- {
- fbb_.AddElement<int8_t>(MirrorPadOptions::VT_MODE, static_cast<int8_t>(mode), 0);
- }
- explicit MirrorPadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- MirrorPadOptionsBuilder &operator=(const MirrorPadOptionsBuilder &);
- flatbuffers::Offset<MirrorPadOptions> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<MirrorPadOptions>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<MirrorPadOptions>
-CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb,
- MirrorPadMode mode = MirrorPadMode_REFLECT)
-{
- MirrorPadOptionsBuilder builder_(_fbb);
- builder_.add_mode(mode);
- return builder_.Finish();
-}
-
-struct OperatorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_BUILTIN_CODE = 4,
- VT_CUSTOM_CODE = 6,
- VT_VERSION = 8
- };
- BuiltinOperator builtin_code() const
- {
- return static_cast<BuiltinOperator>(GetField<int8_t>(VT_BUILTIN_CODE, 0));
- }
- const flatbuffers::String *custom_code() const
- {
- return GetPointer<const flatbuffers::String *>(VT_CUSTOM_CODE);
- }
- int32_t version() const { return GetField<int32_t>(VT_VERSION, 1); }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_BUILTIN_CODE) &&
- VerifyOffset(verifier, VT_CUSTOM_CODE) && verifier.VerifyString(custom_code()) &&
- VerifyField<int32_t>(verifier, VT_VERSION) && verifier.EndTable();
- }
-};
-
-struct OperatorCodeBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_builtin_code(BuiltinOperator builtin_code)
- {
- fbb_.AddElement<int8_t>(OperatorCode::VT_BUILTIN_CODE, static_cast<int8_t>(builtin_code), 0);
- }
- void add_custom_code(flatbuffers::Offset<flatbuffers::String> custom_code)
- {
- fbb_.AddOffset(OperatorCode::VT_CUSTOM_CODE, custom_code);
- }
- void add_version(int32_t version)
- {
- fbb_.AddElement<int32_t>(OperatorCode::VT_VERSION, version, 1);
- }
- explicit OperatorCodeBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- OperatorCodeBuilder &operator=(const OperatorCodeBuilder &);
- flatbuffers::Offset<OperatorCode> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<OperatorCode>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<OperatorCode>
-CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb,
- BuiltinOperator builtin_code = BuiltinOperator_ADD,
- flatbuffers::Offset<flatbuffers::String> custom_code = 0, int32_t version = 1)
-{
- OperatorCodeBuilder builder_(_fbb);
- builder_.add_version(version);
- builder_.add_custom_code(custom_code);
- builder_.add_builtin_code(builtin_code);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<OperatorCode>
-CreateOperatorCodeDirect(flatbuffers::FlatBufferBuilder &_fbb,
- BuiltinOperator builtin_code = BuiltinOperator_ADD,
- const char *custom_code = nullptr, int32_t version = 1)
-{
- return neurun_tflite::CreateOperatorCode(
- _fbb, builtin_code, custom_code ? _fbb.CreateString(custom_code) : 0, version);
-}
-
-struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_OPCODE_INDEX = 4,
- VT_INPUTS = 6,
- VT_OUTPUTS = 8,
- VT_BUILTIN_OPTIONS_TYPE = 10,
- VT_BUILTIN_OPTIONS = 12,
- VT_CUSTOM_OPTIONS = 14,
- VT_CUSTOM_OPTIONS_FORMAT = 16,
- VT_MUTATING_VARIABLE_INPUTS = 18
- };
- uint32_t opcode_index() const { return GetField<uint32_t>(VT_OPCODE_INDEX, 0); }
- const flatbuffers::Vector<int32_t> *inputs() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUTS);
- }
- const flatbuffers::Vector<int32_t> *outputs() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUTS);
- }
- BuiltinOptions builtin_options_type() const
- {
- return static_cast<BuiltinOptions>(GetField<uint8_t>(VT_BUILTIN_OPTIONS_TYPE, 0));
- }
- const void *builtin_options() const { return GetPointer<const void *>(VT_BUILTIN_OPTIONS); }
- template <typename T> const T *builtin_options_as() const;
- const Conv2DOptions *builtin_options_as_Conv2DOptions() const
- {
- return builtin_options_type() == BuiltinOptions_Conv2DOptions
- ? static_cast<const Conv2DOptions *>(builtin_options())
- : nullptr;
- }
- const DepthwiseConv2DOptions *builtin_options_as_DepthwiseConv2DOptions() const
- {
- return builtin_options_type() == BuiltinOptions_DepthwiseConv2DOptions
- ? static_cast<const DepthwiseConv2DOptions *>(builtin_options())
- : nullptr;
- }
- const ConcatEmbeddingsOptions *builtin_options_as_ConcatEmbeddingsOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ConcatEmbeddingsOptions
- ? static_cast<const ConcatEmbeddingsOptions *>(builtin_options())
- : nullptr;
- }
- const LSHProjectionOptions *builtin_options_as_LSHProjectionOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LSHProjectionOptions
- ? static_cast<const LSHProjectionOptions *>(builtin_options())
- : nullptr;
- }
- const Pool2DOptions *builtin_options_as_Pool2DOptions() const
- {
- return builtin_options_type() == BuiltinOptions_Pool2DOptions
- ? static_cast<const Pool2DOptions *>(builtin_options())
- : nullptr;
- }
- const SVDFOptions *builtin_options_as_SVDFOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SVDFOptions
- ? static_cast<const SVDFOptions *>(builtin_options())
- : nullptr;
- }
- const RNNOptions *builtin_options_as_RNNOptions() const
- {
- return builtin_options_type() == BuiltinOptions_RNNOptions
- ? static_cast<const RNNOptions *>(builtin_options())
- : nullptr;
- }
- const FullyConnectedOptions *builtin_options_as_FullyConnectedOptions() const
- {
- return builtin_options_type() == BuiltinOptions_FullyConnectedOptions
- ? static_cast<const FullyConnectedOptions *>(builtin_options())
- : nullptr;
- }
- const SoftmaxOptions *builtin_options_as_SoftmaxOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SoftmaxOptions
- ? static_cast<const SoftmaxOptions *>(builtin_options())
- : nullptr;
- }
- const ConcatenationOptions *builtin_options_as_ConcatenationOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ConcatenationOptions
- ? static_cast<const ConcatenationOptions *>(builtin_options())
- : nullptr;
- }
- const AddOptions *builtin_options_as_AddOptions() const
- {
- return builtin_options_type() == BuiltinOptions_AddOptions
- ? static_cast<const AddOptions *>(builtin_options())
- : nullptr;
- }
- const L2NormOptions *builtin_options_as_L2NormOptions() const
- {
- return builtin_options_type() == BuiltinOptions_L2NormOptions
- ? static_cast<const L2NormOptions *>(builtin_options())
- : nullptr;
- }
- const LocalResponseNormalizationOptions *
- builtin_options_as_LocalResponseNormalizationOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LocalResponseNormalizationOptions
- ? static_cast<const LocalResponseNormalizationOptions *>(builtin_options())
- : nullptr;
- }
- const LSTMOptions *builtin_options_as_LSTMOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LSTMOptions
- ? static_cast<const LSTMOptions *>(builtin_options())
- : nullptr;
- }
- const ResizeBilinearOptions *builtin_options_as_ResizeBilinearOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ResizeBilinearOptions
- ? static_cast<const ResizeBilinearOptions *>(builtin_options())
- : nullptr;
- }
- const CallOptions *builtin_options_as_CallOptions() const
- {
- return builtin_options_type() == BuiltinOptions_CallOptions
- ? static_cast<const CallOptions *>(builtin_options())
- : nullptr;
- }
- const ReshapeOptions *builtin_options_as_ReshapeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ReshapeOptions
- ? static_cast<const ReshapeOptions *>(builtin_options())
- : nullptr;
- }
- const SkipGramOptions *builtin_options_as_SkipGramOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SkipGramOptions
- ? static_cast<const SkipGramOptions *>(builtin_options())
- : nullptr;
- }
- const SpaceToDepthOptions *builtin_options_as_SpaceToDepthOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SpaceToDepthOptions
- ? static_cast<const SpaceToDepthOptions *>(builtin_options())
- : nullptr;
- }
- const EmbeddingLookupSparseOptions *builtin_options_as_EmbeddingLookupSparseOptions() const
- {
- return builtin_options_type() == BuiltinOptions_EmbeddingLookupSparseOptions
- ? static_cast<const EmbeddingLookupSparseOptions *>(builtin_options())
- : nullptr;
- }
- const MulOptions *builtin_options_as_MulOptions() const
- {
- return builtin_options_type() == BuiltinOptions_MulOptions
- ? static_cast<const MulOptions *>(builtin_options())
- : nullptr;
- }
- const PadOptions *builtin_options_as_PadOptions() const
- {
- return builtin_options_type() == BuiltinOptions_PadOptions
- ? static_cast<const PadOptions *>(builtin_options())
- : nullptr;
- }
- const GatherOptions *builtin_options_as_GatherOptions() const
- {
- return builtin_options_type() == BuiltinOptions_GatherOptions
- ? static_cast<const GatherOptions *>(builtin_options())
- : nullptr;
- }
- const BatchToSpaceNDOptions *builtin_options_as_BatchToSpaceNDOptions() const
- {
- return builtin_options_type() == BuiltinOptions_BatchToSpaceNDOptions
- ? static_cast<const BatchToSpaceNDOptions *>(builtin_options())
- : nullptr;
- }
- const SpaceToBatchNDOptions *builtin_options_as_SpaceToBatchNDOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SpaceToBatchNDOptions
- ? static_cast<const SpaceToBatchNDOptions *>(builtin_options())
- : nullptr;
- }
- const TransposeOptions *builtin_options_as_TransposeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_TransposeOptions
- ? static_cast<const TransposeOptions *>(builtin_options())
- : nullptr;
- }
- const ReducerOptions *builtin_options_as_ReducerOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ReducerOptions
- ? static_cast<const ReducerOptions *>(builtin_options())
- : nullptr;
- }
- const SubOptions *builtin_options_as_SubOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SubOptions
- ? static_cast<const SubOptions *>(builtin_options())
- : nullptr;
- }
- const DivOptions *builtin_options_as_DivOptions() const
- {
- return builtin_options_type() == BuiltinOptions_DivOptions
- ? static_cast<const DivOptions *>(builtin_options())
- : nullptr;
- }
- const SqueezeOptions *builtin_options_as_SqueezeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SqueezeOptions
- ? static_cast<const SqueezeOptions *>(builtin_options())
- : nullptr;
- }
- const SequenceRNNOptions *builtin_options_as_SequenceRNNOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SequenceRNNOptions
- ? static_cast<const SequenceRNNOptions *>(builtin_options())
- : nullptr;
- }
- const StridedSliceOptions *builtin_options_as_StridedSliceOptions() const
- {
- return builtin_options_type() == BuiltinOptions_StridedSliceOptions
- ? static_cast<const StridedSliceOptions *>(builtin_options())
- : nullptr;
- }
- const ExpOptions *builtin_options_as_ExpOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ExpOptions
- ? static_cast<const ExpOptions *>(builtin_options())
- : nullptr;
- }
- const TopKV2Options *builtin_options_as_TopKV2Options() const
- {
- return builtin_options_type() == BuiltinOptions_TopKV2Options
- ? static_cast<const TopKV2Options *>(builtin_options())
- : nullptr;
- }
- const SplitOptions *builtin_options_as_SplitOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SplitOptions
- ? static_cast<const SplitOptions *>(builtin_options())
- : nullptr;
- }
- const LogSoftmaxOptions *builtin_options_as_LogSoftmaxOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LogSoftmaxOptions
- ? static_cast<const LogSoftmaxOptions *>(builtin_options())
- : nullptr;
- }
- const CastOptions *builtin_options_as_CastOptions() const
- {
- return builtin_options_type() == BuiltinOptions_CastOptions
- ? static_cast<const CastOptions *>(builtin_options())
- : nullptr;
- }
- const DequantizeOptions *builtin_options_as_DequantizeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_DequantizeOptions
- ? static_cast<const DequantizeOptions *>(builtin_options())
- : nullptr;
- }
- const MaximumMinimumOptions *builtin_options_as_MaximumMinimumOptions() const
- {
- return builtin_options_type() == BuiltinOptions_MaximumMinimumOptions
- ? static_cast<const MaximumMinimumOptions *>(builtin_options())
- : nullptr;
- }
- const ArgMaxOptions *builtin_options_as_ArgMaxOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ArgMaxOptions
- ? static_cast<const ArgMaxOptions *>(builtin_options())
- : nullptr;
- }
- const LessOptions *builtin_options_as_LessOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LessOptions
- ? static_cast<const LessOptions *>(builtin_options())
- : nullptr;
- }
- const NegOptions *builtin_options_as_NegOptions() const
- {
- return builtin_options_type() == BuiltinOptions_NegOptions
- ? static_cast<const NegOptions *>(builtin_options())
- : nullptr;
- }
- const PadV2Options *builtin_options_as_PadV2Options() const
- {
- return builtin_options_type() == BuiltinOptions_PadV2Options
- ? static_cast<const PadV2Options *>(builtin_options())
- : nullptr;
- }
- const GreaterOptions *builtin_options_as_GreaterOptions() const
- {
- return builtin_options_type() == BuiltinOptions_GreaterOptions
- ? static_cast<const GreaterOptions *>(builtin_options())
- : nullptr;
- }
- const GreaterEqualOptions *builtin_options_as_GreaterEqualOptions() const
- {
- return builtin_options_type() == BuiltinOptions_GreaterEqualOptions
- ? static_cast<const GreaterEqualOptions *>(builtin_options())
- : nullptr;
- }
- const LessEqualOptions *builtin_options_as_LessEqualOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LessEqualOptions
- ? static_cast<const LessEqualOptions *>(builtin_options())
- : nullptr;
- }
- const SelectOptions *builtin_options_as_SelectOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SelectOptions
- ? static_cast<const SelectOptions *>(builtin_options())
- : nullptr;
- }
- const SliceOptions *builtin_options_as_SliceOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SliceOptions
- ? static_cast<const SliceOptions *>(builtin_options())
- : nullptr;
- }
- const TransposeConvOptions *builtin_options_as_TransposeConvOptions() const
- {
- return builtin_options_type() == BuiltinOptions_TransposeConvOptions
- ? static_cast<const TransposeConvOptions *>(builtin_options())
- : nullptr;
- }
- const SparseToDenseOptions *builtin_options_as_SparseToDenseOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SparseToDenseOptions
- ? static_cast<const SparseToDenseOptions *>(builtin_options())
- : nullptr;
- }
- const TileOptions *builtin_options_as_TileOptions() const
- {
- return builtin_options_type() == BuiltinOptions_TileOptions
- ? static_cast<const TileOptions *>(builtin_options())
- : nullptr;
- }
- const ExpandDimsOptions *builtin_options_as_ExpandDimsOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ExpandDimsOptions
- ? static_cast<const ExpandDimsOptions *>(builtin_options())
- : nullptr;
- }
- const EqualOptions *builtin_options_as_EqualOptions() const
- {
- return builtin_options_type() == BuiltinOptions_EqualOptions
- ? static_cast<const EqualOptions *>(builtin_options())
- : nullptr;
- }
- const NotEqualOptions *builtin_options_as_NotEqualOptions() const
- {
- return builtin_options_type() == BuiltinOptions_NotEqualOptions
- ? static_cast<const NotEqualOptions *>(builtin_options())
- : nullptr;
- }
- const ShapeOptions *builtin_options_as_ShapeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ShapeOptions
- ? static_cast<const ShapeOptions *>(builtin_options())
- : nullptr;
- }
- const PowOptions *builtin_options_as_PowOptions() const
- {
- return builtin_options_type() == BuiltinOptions_PowOptions
- ? static_cast<const PowOptions *>(builtin_options())
- : nullptr;
- }
- const ArgMinOptions *builtin_options_as_ArgMinOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ArgMinOptions
- ? static_cast<const ArgMinOptions *>(builtin_options())
- : nullptr;
- }
- const FakeQuantOptions *builtin_options_as_FakeQuantOptions() const
- {
- return builtin_options_type() == BuiltinOptions_FakeQuantOptions
- ? static_cast<const FakeQuantOptions *>(builtin_options())
- : nullptr;
- }
- const PackOptions *builtin_options_as_PackOptions() const
- {
- return builtin_options_type() == BuiltinOptions_PackOptions
- ? static_cast<const PackOptions *>(builtin_options())
- : nullptr;
- }
- const LogicalOrOptions *builtin_options_as_LogicalOrOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LogicalOrOptions
- ? static_cast<const LogicalOrOptions *>(builtin_options())
- : nullptr;
- }
- const OneHotOptions *builtin_options_as_OneHotOptions() const
- {
- return builtin_options_type() == BuiltinOptions_OneHotOptions
- ? static_cast<const OneHotOptions *>(builtin_options())
- : nullptr;
- }
- const LogicalAndOptions *builtin_options_as_LogicalAndOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LogicalAndOptions
- ? static_cast<const LogicalAndOptions *>(builtin_options())
- : nullptr;
- }
- const LogicalNotOptions *builtin_options_as_LogicalNotOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LogicalNotOptions
- ? static_cast<const LogicalNotOptions *>(builtin_options())
- : nullptr;
- }
- const UnpackOptions *builtin_options_as_UnpackOptions() const
- {
- return builtin_options_type() == BuiltinOptions_UnpackOptions
- ? static_cast<const UnpackOptions *>(builtin_options())
- : nullptr;
- }
- const FloorDivOptions *builtin_options_as_FloorDivOptions() const
- {
- return builtin_options_type() == BuiltinOptions_FloorDivOptions
- ? static_cast<const FloorDivOptions *>(builtin_options())
- : nullptr;
- }
- const SquareOptions *builtin_options_as_SquareOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SquareOptions
- ? static_cast<const SquareOptions *>(builtin_options())
- : nullptr;
- }
- const ZerosLikeOptions *builtin_options_as_ZerosLikeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ZerosLikeOptions
- ? static_cast<const ZerosLikeOptions *>(builtin_options())
- : nullptr;
- }
- const FillOptions *builtin_options_as_FillOptions() const
- {
- return builtin_options_type() == BuiltinOptions_FillOptions
- ? static_cast<const FillOptions *>(builtin_options())
- : nullptr;
- }
- const BidirectionalSequenceLSTMOptions *
- builtin_options_as_BidirectionalSequenceLSTMOptions() const
- {
- return builtin_options_type() == BuiltinOptions_BidirectionalSequenceLSTMOptions
- ? static_cast<const BidirectionalSequenceLSTMOptions *>(builtin_options())
- : nullptr;
- }
- const BidirectionalSequenceRNNOptions *builtin_options_as_BidirectionalSequenceRNNOptions() const
- {
- return builtin_options_type() == BuiltinOptions_BidirectionalSequenceRNNOptions
- ? static_cast<const BidirectionalSequenceRNNOptions *>(builtin_options())
- : nullptr;
- }
- const UnidirectionalSequenceLSTMOptions *
- builtin_options_as_UnidirectionalSequenceLSTMOptions() const
- {
- return builtin_options_type() == BuiltinOptions_UnidirectionalSequenceLSTMOptions
- ? static_cast<const UnidirectionalSequenceLSTMOptions *>(builtin_options())
- : nullptr;
- }
- const FloorModOptions *builtin_options_as_FloorModOptions() const
- {
- return builtin_options_type() == BuiltinOptions_FloorModOptions
- ? static_cast<const FloorModOptions *>(builtin_options())
- : nullptr;
- }
- const RangeOptions *builtin_options_as_RangeOptions() const
- {
- return builtin_options_type() == BuiltinOptions_RangeOptions
- ? static_cast<const RangeOptions *>(builtin_options())
- : nullptr;
- }
- const ResizeNearestNeighborOptions *builtin_options_as_ResizeNearestNeighborOptions() const
- {
- return builtin_options_type() == BuiltinOptions_ResizeNearestNeighborOptions
- ? static_cast<const ResizeNearestNeighborOptions *>(builtin_options())
- : nullptr;
- }
- const LeakyReluOptions *builtin_options_as_LeakyReluOptions() const
- {
- return builtin_options_type() == BuiltinOptions_LeakyReluOptions
- ? static_cast<const LeakyReluOptions *>(builtin_options())
- : nullptr;
- }
- const SquaredDifferenceOptions *builtin_options_as_SquaredDifferenceOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SquaredDifferenceOptions
- ? static_cast<const SquaredDifferenceOptions *>(builtin_options())
- : nullptr;
- }
- const MirrorPadOptions *builtin_options_as_MirrorPadOptions() const
- {
- return builtin_options_type() == BuiltinOptions_MirrorPadOptions
- ? static_cast<const MirrorPadOptions *>(builtin_options())
- : nullptr;
- }
- const AbsOptions *builtin_options_as_AbsOptions() const
- {
- return builtin_options_type() == BuiltinOptions_AbsOptions
- ? static_cast<const AbsOptions *>(builtin_options())
- : nullptr;
- }
- const SplitVOptions *builtin_options_as_SplitVOptions() const
- {
- return builtin_options_type() == BuiltinOptions_SplitVOptions
- ? static_cast<const SplitVOptions *>(builtin_options())
- : nullptr;
- }
- const flatbuffers::Vector<uint8_t> *custom_options() const
- {
- return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM_OPTIONS);
- }
- CustomOptionsFormat custom_options_format() const
- {
- return static_cast<CustomOptionsFormat>(GetField<int8_t>(VT_CUSTOM_OPTIONS_FORMAT, 0));
- }
- const flatbuffers::Vector<uint8_t> *mutating_variable_inputs() const
- {
- return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_MUTATING_VARIABLE_INPUTS);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint32_t>(verifier, VT_OPCODE_INDEX) &&
- VerifyOffset(verifier, VT_INPUTS) && verifier.VerifyVector(inputs()) &&
- VerifyOffset(verifier, VT_OUTPUTS) && verifier.VerifyVector(outputs()) &&
- VerifyField<uint8_t>(verifier, VT_BUILTIN_OPTIONS_TYPE) &&
- VerifyOffset(verifier, VT_BUILTIN_OPTIONS) &&
- VerifyBuiltinOptions(verifier, builtin_options(), builtin_options_type()) &&
- VerifyOffset(verifier, VT_CUSTOM_OPTIONS) && verifier.VerifyVector(custom_options()) &&
- VerifyField<int8_t>(verifier, VT_CUSTOM_OPTIONS_FORMAT) &&
- VerifyOffset(verifier, VT_MUTATING_VARIABLE_INPUTS) &&
- verifier.VerifyVector(mutating_variable_inputs()) && verifier.EndTable();
- }
-};
-
-template <> inline const Conv2DOptions *Operator::builtin_options_as<Conv2DOptions>() const
-{
- return builtin_options_as_Conv2DOptions();
-}
-
-template <>
-inline const DepthwiseConv2DOptions *Operator::builtin_options_as<DepthwiseConv2DOptions>() const
-{
- return builtin_options_as_DepthwiseConv2DOptions();
-}
-
-template <>
-inline const ConcatEmbeddingsOptions *Operator::builtin_options_as<ConcatEmbeddingsOptions>() const
-{
- return builtin_options_as_ConcatEmbeddingsOptions();
-}
-
-template <>
-inline const LSHProjectionOptions *Operator::builtin_options_as<LSHProjectionOptions>() const
-{
- return builtin_options_as_LSHProjectionOptions();
-}
-
-template <> inline const Pool2DOptions *Operator::builtin_options_as<Pool2DOptions>() const
-{
- return builtin_options_as_Pool2DOptions();
-}
-
-template <> inline const SVDFOptions *Operator::builtin_options_as<SVDFOptions>() const
-{
- return builtin_options_as_SVDFOptions();
-}
-
-template <> inline const RNNOptions *Operator::builtin_options_as<RNNOptions>() const
-{
- return builtin_options_as_RNNOptions();
-}
-
-template <>
-inline const FullyConnectedOptions *Operator::builtin_options_as<FullyConnectedOptions>() const
-{
- return builtin_options_as_FullyConnectedOptions();
-}
-
-template <> inline const SoftmaxOptions *Operator::builtin_options_as<SoftmaxOptions>() const
-{
- return builtin_options_as_SoftmaxOptions();
-}
-
-template <>
-inline const ConcatenationOptions *Operator::builtin_options_as<ConcatenationOptions>() const
-{
- return builtin_options_as_ConcatenationOptions();
-}
-
-template <> inline const AddOptions *Operator::builtin_options_as<AddOptions>() const
-{
- return builtin_options_as_AddOptions();
-}
-
-template <> inline const L2NormOptions *Operator::builtin_options_as<L2NormOptions>() const
-{
- return builtin_options_as_L2NormOptions();
-}
-
-template <>
-inline const LocalResponseNormalizationOptions *
-Operator::builtin_options_as<LocalResponseNormalizationOptions>() const
-{
- return builtin_options_as_LocalResponseNormalizationOptions();
-}
-
-template <> inline const LSTMOptions *Operator::builtin_options_as<LSTMOptions>() const
-{
- return builtin_options_as_LSTMOptions();
-}
-
-template <>
-inline const ResizeBilinearOptions *Operator::builtin_options_as<ResizeBilinearOptions>() const
-{
- return builtin_options_as_ResizeBilinearOptions();
-}
-
-template <> inline const CallOptions *Operator::builtin_options_as<CallOptions>() const
-{
- return builtin_options_as_CallOptions();
-}
-
-template <> inline const ReshapeOptions *Operator::builtin_options_as<ReshapeOptions>() const
-{
- return builtin_options_as_ReshapeOptions();
-}
-
-template <> inline const SkipGramOptions *Operator::builtin_options_as<SkipGramOptions>() const
-{
- return builtin_options_as_SkipGramOptions();
-}
-
-template <>
-inline const SpaceToDepthOptions *Operator::builtin_options_as<SpaceToDepthOptions>() const
-{
- return builtin_options_as_SpaceToDepthOptions();
-}
-
-template <>
-inline const EmbeddingLookupSparseOptions *
-Operator::builtin_options_as<EmbeddingLookupSparseOptions>() const
-{
- return builtin_options_as_EmbeddingLookupSparseOptions();
-}
-
-template <> inline const MulOptions *Operator::builtin_options_as<MulOptions>() const
-{
- return builtin_options_as_MulOptions();
-}
-
-template <> inline const PadOptions *Operator::builtin_options_as<PadOptions>() const
-{
- return builtin_options_as_PadOptions();
-}
-
-template <> inline const GatherOptions *Operator::builtin_options_as<GatherOptions>() const
-{
- return builtin_options_as_GatherOptions();
-}
-
-template <>
-inline const BatchToSpaceNDOptions *Operator::builtin_options_as<BatchToSpaceNDOptions>() const
-{
- return builtin_options_as_BatchToSpaceNDOptions();
-}
-
-template <>
-inline const SpaceToBatchNDOptions *Operator::builtin_options_as<SpaceToBatchNDOptions>() const
-{
- return builtin_options_as_SpaceToBatchNDOptions();
-}
-
-template <> inline const TransposeOptions *Operator::builtin_options_as<TransposeOptions>() const
-{
- return builtin_options_as_TransposeOptions();
-}
-
-template <> inline const ReducerOptions *Operator::builtin_options_as<ReducerOptions>() const
-{
- return builtin_options_as_ReducerOptions();
-}
-
-template <> inline const SubOptions *Operator::builtin_options_as<SubOptions>() const
-{
- return builtin_options_as_SubOptions();
-}
-
-template <> inline const DivOptions *Operator::builtin_options_as<DivOptions>() const
-{
- return builtin_options_as_DivOptions();
-}
-
-template <> inline const SqueezeOptions *Operator::builtin_options_as<SqueezeOptions>() const
-{
- return builtin_options_as_SqueezeOptions();
-}
-
-template <>
-inline const SequenceRNNOptions *Operator::builtin_options_as<SequenceRNNOptions>() const
-{
- return builtin_options_as_SequenceRNNOptions();
-}
-
-template <>
-inline const StridedSliceOptions *Operator::builtin_options_as<StridedSliceOptions>() const
-{
- return builtin_options_as_StridedSliceOptions();
-}
-
-template <> inline const ExpOptions *Operator::builtin_options_as<ExpOptions>() const
-{
- return builtin_options_as_ExpOptions();
-}
-
-template <> inline const TopKV2Options *Operator::builtin_options_as<TopKV2Options>() const
-{
- return builtin_options_as_TopKV2Options();
-}
-
-template <> inline const SplitOptions *Operator::builtin_options_as<SplitOptions>() const
-{
- return builtin_options_as_SplitOptions();
-}
-
-template <> inline const LogSoftmaxOptions *Operator::builtin_options_as<LogSoftmaxOptions>() const
-{
- return builtin_options_as_LogSoftmaxOptions();
-}
-
-template <> inline const CastOptions *Operator::builtin_options_as<CastOptions>() const
-{
- return builtin_options_as_CastOptions();
-}
-
-template <> inline const DequantizeOptions *Operator::builtin_options_as<DequantizeOptions>() const
-{
- return builtin_options_as_DequantizeOptions();
-}
-
-template <>
-inline const MaximumMinimumOptions *Operator::builtin_options_as<MaximumMinimumOptions>() const
-{
- return builtin_options_as_MaximumMinimumOptions();
-}
-
-template <> inline const ArgMaxOptions *Operator::builtin_options_as<ArgMaxOptions>() const
-{
- return builtin_options_as_ArgMaxOptions();
-}
-
-template <> inline const LessOptions *Operator::builtin_options_as<LessOptions>() const
-{
- return builtin_options_as_LessOptions();
-}
-
-template <> inline const NegOptions *Operator::builtin_options_as<NegOptions>() const
-{
- return builtin_options_as_NegOptions();
-}
-
-template <> inline const PadV2Options *Operator::builtin_options_as<PadV2Options>() const
-{
- return builtin_options_as_PadV2Options();
-}
-
-template <> inline const GreaterOptions *Operator::builtin_options_as<GreaterOptions>() const
-{
- return builtin_options_as_GreaterOptions();
-}
-
-template <>
-inline const GreaterEqualOptions *Operator::builtin_options_as<GreaterEqualOptions>() const
-{
- return builtin_options_as_GreaterEqualOptions();
-}
-
-template <> inline const LessEqualOptions *Operator::builtin_options_as<LessEqualOptions>() const
-{
- return builtin_options_as_LessEqualOptions();
-}
-
-template <> inline const SelectOptions *Operator::builtin_options_as<SelectOptions>() const
-{
- return builtin_options_as_SelectOptions();
-}
-
-template <> inline const SliceOptions *Operator::builtin_options_as<SliceOptions>() const
-{
- return builtin_options_as_SliceOptions();
-}
-
-template <>
-inline const TransposeConvOptions *Operator::builtin_options_as<TransposeConvOptions>() const
-{
- return builtin_options_as_TransposeConvOptions();
-}
-
-template <>
-inline const SparseToDenseOptions *Operator::builtin_options_as<SparseToDenseOptions>() const
-{
- return builtin_options_as_SparseToDenseOptions();
-}
-
-template <> inline const TileOptions *Operator::builtin_options_as<TileOptions>() const
-{
- return builtin_options_as_TileOptions();
-}
-
-template <> inline const ExpandDimsOptions *Operator::builtin_options_as<ExpandDimsOptions>() const
-{
- return builtin_options_as_ExpandDimsOptions();
-}
-
-template <> inline const EqualOptions *Operator::builtin_options_as<EqualOptions>() const
-{
- return builtin_options_as_EqualOptions();
-}
-
-template <> inline const NotEqualOptions *Operator::builtin_options_as<NotEqualOptions>() const
-{
- return builtin_options_as_NotEqualOptions();
-}
-
-template <> inline const ShapeOptions *Operator::builtin_options_as<ShapeOptions>() const
-{
- return builtin_options_as_ShapeOptions();
-}
-
-template <> inline const PowOptions *Operator::builtin_options_as<PowOptions>() const
-{
- return builtin_options_as_PowOptions();
-}
-
-template <> inline const ArgMinOptions *Operator::builtin_options_as<ArgMinOptions>() const
-{
- return builtin_options_as_ArgMinOptions();
-}
-
-template <> inline const FakeQuantOptions *Operator::builtin_options_as<FakeQuantOptions>() const
-{
- return builtin_options_as_FakeQuantOptions();
-}
-
-template <> inline const PackOptions *Operator::builtin_options_as<PackOptions>() const
-{
- return builtin_options_as_PackOptions();
-}
-
-template <> inline const LogicalOrOptions *Operator::builtin_options_as<LogicalOrOptions>() const
-{
- return builtin_options_as_LogicalOrOptions();
-}
-
-template <> inline const OneHotOptions *Operator::builtin_options_as<OneHotOptions>() const
-{
- return builtin_options_as_OneHotOptions();
-}
-
-template <> inline const LogicalAndOptions *Operator::builtin_options_as<LogicalAndOptions>() const
-{
- return builtin_options_as_LogicalAndOptions();
-}
-
-template <> inline const LogicalNotOptions *Operator::builtin_options_as<LogicalNotOptions>() const
-{
- return builtin_options_as_LogicalNotOptions();
-}
-
-template <> inline const UnpackOptions *Operator::builtin_options_as<UnpackOptions>() const
-{
- return builtin_options_as_UnpackOptions();
-}
-
-template <> inline const FloorDivOptions *Operator::builtin_options_as<FloorDivOptions>() const
-{
- return builtin_options_as_FloorDivOptions();
-}
-
-template <> inline const SquareOptions *Operator::builtin_options_as<SquareOptions>() const
-{
- return builtin_options_as_SquareOptions();
-}
-
-template <> inline const ZerosLikeOptions *Operator::builtin_options_as<ZerosLikeOptions>() const
-{
- return builtin_options_as_ZerosLikeOptions();
-}
-
-template <> inline const FillOptions *Operator::builtin_options_as<FillOptions>() const
-{
- return builtin_options_as_FillOptions();
-}
-
-template <>
-inline const BidirectionalSequenceLSTMOptions *
-Operator::builtin_options_as<BidirectionalSequenceLSTMOptions>() const
-{
- return builtin_options_as_BidirectionalSequenceLSTMOptions();
-}
-
-template <>
-inline const BidirectionalSequenceRNNOptions *
-Operator::builtin_options_as<BidirectionalSequenceRNNOptions>() const
-{
- return builtin_options_as_BidirectionalSequenceRNNOptions();
-}
-
-template <>
-inline const UnidirectionalSequenceLSTMOptions *
-Operator::builtin_options_as<UnidirectionalSequenceLSTMOptions>() const
-{
- return builtin_options_as_UnidirectionalSequenceLSTMOptions();
-}
-
-template <> inline const FloorModOptions *Operator::builtin_options_as<FloorModOptions>() const
-{
- return builtin_options_as_FloorModOptions();
-}
-
-template <> inline const RangeOptions *Operator::builtin_options_as<RangeOptions>() const
-{
- return builtin_options_as_RangeOptions();
-}
-
-template <>
-inline const ResizeNearestNeighborOptions *
-Operator::builtin_options_as<ResizeNearestNeighborOptions>() const
-{
- return builtin_options_as_ResizeNearestNeighborOptions();
-}
-
-template <> inline const LeakyReluOptions *Operator::builtin_options_as<LeakyReluOptions>() const
-{
- return builtin_options_as_LeakyReluOptions();
-}
-
-template <>
-inline const SquaredDifferenceOptions *
-Operator::builtin_options_as<SquaredDifferenceOptions>() const
-{
- return builtin_options_as_SquaredDifferenceOptions();
-}
-
-template <> inline const MirrorPadOptions *Operator::builtin_options_as<MirrorPadOptions>() const
-{
- return builtin_options_as_MirrorPadOptions();
-}
-
-template <> inline const AbsOptions *Operator::builtin_options_as<AbsOptions>() const
-{
- return builtin_options_as_AbsOptions();
-}
-
-template <> inline const SplitVOptions *Operator::builtin_options_as<SplitVOptions>() const
-{
- return builtin_options_as_SplitVOptions();
-}
-
-struct OperatorBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_opcode_index(uint32_t opcode_index)
- {
- fbb_.AddElement<uint32_t>(Operator::VT_OPCODE_INDEX, opcode_index, 0);
- }
- void add_inputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs)
- {
- fbb_.AddOffset(Operator::VT_INPUTS, inputs);
- }
- void add_outputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs)
- {
- fbb_.AddOffset(Operator::VT_OUTPUTS, outputs);
- }
- void add_builtin_options_type(BuiltinOptions builtin_options_type)
- {
- fbb_.AddElement<uint8_t>(Operator::VT_BUILTIN_OPTIONS_TYPE,
- static_cast<uint8_t>(builtin_options_type), 0);
- }
- void add_builtin_options(flatbuffers::Offset<void> builtin_options)
- {
- fbb_.AddOffset(Operator::VT_BUILTIN_OPTIONS, builtin_options);
- }
- void add_custom_options(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom_options)
- {
- fbb_.AddOffset(Operator::VT_CUSTOM_OPTIONS, custom_options);
- }
- void add_custom_options_format(CustomOptionsFormat custom_options_format)
- {
- fbb_.AddElement<int8_t>(Operator::VT_CUSTOM_OPTIONS_FORMAT,
- static_cast<int8_t>(custom_options_format), 0);
- }
- void add_mutating_variable_inputs(
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> mutating_variable_inputs)
- {
- fbb_.AddOffset(Operator::VT_MUTATING_VARIABLE_INPUTS, mutating_variable_inputs);
- }
- explicit OperatorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- OperatorBuilder &operator=(const OperatorBuilder &);
- flatbuffers::Offset<Operator> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Operator>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Operator>
-CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, uint32_t opcode_index = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs = 0,
- BuiltinOptions builtin_options_type = BuiltinOptions_NONE,
- flatbuffers::Offset<void> builtin_options = 0,
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom_options = 0,
- CustomOptionsFormat custom_options_format = CustomOptionsFormat_FLEXBUFFERS,
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> mutating_variable_inputs = 0)
-{
- OperatorBuilder builder_(_fbb);
- builder_.add_mutating_variable_inputs(mutating_variable_inputs);
- builder_.add_custom_options(custom_options);
- builder_.add_builtin_options(builtin_options);
- builder_.add_outputs(outputs);
- builder_.add_inputs(inputs);
- builder_.add_opcode_index(opcode_index);
- builder_.add_custom_options_format(custom_options_format);
- builder_.add_builtin_options_type(builtin_options_type);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Operator>
-CreateOperatorDirect(flatbuffers::FlatBufferBuilder &_fbb, uint32_t opcode_index = 0,
- const std::vector<int32_t> *inputs = nullptr,
- const std::vector<int32_t> *outputs = nullptr,
- BuiltinOptions builtin_options_type = BuiltinOptions_NONE,
- flatbuffers::Offset<void> builtin_options = 0,
- const std::vector<uint8_t> *custom_options = nullptr,
- CustomOptionsFormat custom_options_format = CustomOptionsFormat_FLEXBUFFERS,
- const std::vector<uint8_t> *mutating_variable_inputs = nullptr)
-{
- return neurun_tflite::CreateOperator(
- _fbb, opcode_index, inputs ? _fbb.CreateVector<int32_t>(*inputs) : 0,
- outputs ? _fbb.CreateVector<int32_t>(*outputs) : 0, builtin_options_type, builtin_options,
- custom_options ? _fbb.CreateVector<uint8_t>(*custom_options) : 0, custom_options_format,
- mutating_variable_inputs ? _fbb.CreateVector<uint8_t>(*mutating_variable_inputs) : 0);
-}
-
-struct SubGraph FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_TENSORS = 4,
- VT_INPUTS = 6,
- VT_OUTPUTS = 8,
- VT_OPERATORS = 10,
- VT_NAME = 12
- };
- const flatbuffers::Vector<flatbuffers::Offset<Tensor>> *tensors() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Tensor>> *>(VT_TENSORS);
- }
- const flatbuffers::Vector<int32_t> *inputs() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUTS);
- }
- const flatbuffers::Vector<int32_t> *outputs() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUTS);
- }
- const flatbuffers::Vector<flatbuffers::Offset<Operator>> *operators() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Operator>> *>(VT_OPERATORS);
- }
- const flatbuffers::String *name() const
- {
- return GetPointer<const flatbuffers::String *>(VT_NAME);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_TENSORS) &&
- verifier.VerifyVector(tensors()) && verifier.VerifyVectorOfTables(tensors()) &&
- VerifyOffset(verifier, VT_INPUTS) && verifier.VerifyVector(inputs()) &&
- VerifyOffset(verifier, VT_OUTPUTS) && verifier.VerifyVector(outputs()) &&
- VerifyOffset(verifier, VT_OPERATORS) && verifier.VerifyVector(operators()) &&
- verifier.VerifyVectorOfTables(operators()) && VerifyOffset(verifier, VT_NAME) &&
- verifier.VerifyString(name()) && verifier.EndTable();
- }
-};
-
-struct SubGraphBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_tensors(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Tensor>>> tensors)
- {
- fbb_.AddOffset(SubGraph::VT_TENSORS, tensors);
- }
- void add_inputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs)
- {
- fbb_.AddOffset(SubGraph::VT_INPUTS, inputs);
- }
- void add_outputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs)
- {
- fbb_.AddOffset(SubGraph::VT_OUTPUTS, outputs);
- }
- void
- add_operators(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Operator>>> operators)
- {
- fbb_.AddOffset(SubGraph::VT_OPERATORS, operators);
- }
- void add_name(flatbuffers::Offset<flatbuffers::String> name)
- {
- fbb_.AddOffset(SubGraph::VT_NAME, name);
- }
- explicit SubGraphBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- SubGraphBuilder &operator=(const SubGraphBuilder &);
- flatbuffers::Offset<SubGraph> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SubGraph>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SubGraph> CreateSubGraph(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Tensor>>> tensors = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Operator>>> operators = 0,
- flatbuffers::Offset<flatbuffers::String> name = 0)
-{
- SubGraphBuilder builder_(_fbb);
- builder_.add_name(name);
- builder_.add_operators(operators);
- builder_.add_outputs(outputs);
- builder_.add_inputs(inputs);
- builder_.add_tensors(tensors);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<SubGraph>
-CreateSubGraphDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<flatbuffers::Offset<Tensor>> *tensors = nullptr,
- const std::vector<int32_t> *inputs = nullptr,
- const std::vector<int32_t> *outputs = nullptr,
- const std::vector<flatbuffers::Offset<Operator>> *operators = nullptr,
- const char *name = nullptr)
-{
- return neurun_tflite::CreateSubGraph(
- _fbb, tensors ? _fbb.CreateVector<flatbuffers::Offset<Tensor>>(*tensors) : 0,
- inputs ? _fbb.CreateVector<int32_t>(*inputs) : 0,
- outputs ? _fbb.CreateVector<int32_t>(*outputs) : 0,
- operators ? _fbb.CreateVector<flatbuffers::Offset<Operator>>(*operators) : 0,
- name ? _fbb.CreateString(name) : 0);
-}
-
-struct Buffer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_DATA = 4
- };
- const flatbuffers::Vector<uint8_t> *data() const
- {
- return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_DATA);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_DATA) &&
- verifier.VerifyVector(data()) && verifier.EndTable();
- }
-};
-
-struct BufferBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_data(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data)
- {
- fbb_.AddOffset(Buffer::VT_DATA, data);
- }
- explicit BufferBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- BufferBuilder &operator=(const BufferBuilder &);
- flatbuffers::Offset<Buffer> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Buffer>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Buffer>
-CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data = 0)
-{
- BufferBuilder builder_(_fbb);
- builder_.add_data(data);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Buffer> CreateBufferDirect(flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<uint8_t> *data = nullptr)
-{
- return neurun_tflite::CreateBuffer(_fbb, data ? _fbb.CreateVector<uint8_t>(*data) : 0);
-}
-
-struct Model FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
-{
- enum
- {
- VT_VERSION = 4,
- VT_OPERATOR_CODES = 6,
- VT_SUBGRAPHS = 8,
- VT_DESCRIPTION = 10,
- VT_BUFFERS = 12,
- VT_METADATA_BUFFER = 14
- };
- uint32_t version() const { return GetField<uint32_t>(VT_VERSION, 0); }
- const flatbuffers::Vector<flatbuffers::Offset<OperatorCode>> *operator_codes() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<OperatorCode>> *>(
- VT_OPERATOR_CODES);
- }
- const flatbuffers::Vector<flatbuffers::Offset<SubGraph>> *subgraphs() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<SubGraph>> *>(VT_SUBGRAPHS);
- }
- const flatbuffers::String *description() const
- {
- return GetPointer<const flatbuffers::String *>(VT_DESCRIPTION);
- }
- const flatbuffers::Vector<flatbuffers::Offset<Buffer>> *buffers() const
- {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<Buffer>> *>(VT_BUFFERS);
- }
- const flatbuffers::Vector<int32_t> *metadata_buffer() const
- {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_METADATA_BUFFER);
- }
- bool Verify(flatbuffers::Verifier &verifier) const
- {
- return VerifyTableStart(verifier) && VerifyField<uint32_t>(verifier, VT_VERSION) &&
- VerifyOffset(verifier, VT_OPERATOR_CODES) && verifier.VerifyVector(operator_codes()) &&
- verifier.VerifyVectorOfTables(operator_codes()) &&
- VerifyOffset(verifier, VT_SUBGRAPHS) && verifier.VerifyVector(subgraphs()) &&
- verifier.VerifyVectorOfTables(subgraphs()) && VerifyOffset(verifier, VT_DESCRIPTION) &&
- verifier.VerifyString(description()) && VerifyOffset(verifier, VT_BUFFERS) &&
- verifier.VerifyVector(buffers()) && verifier.VerifyVectorOfTables(buffers()) &&
- VerifyOffset(verifier, VT_METADATA_BUFFER) && verifier.VerifyVector(metadata_buffer()) &&
- verifier.EndTable();
- }
-};
-
-struct ModelBuilder
-{
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_version(uint32_t version) { fbb_.AddElement<uint32_t>(Model::VT_VERSION, version, 0); }
- void add_operator_codes(
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>> operator_codes)
- {
- fbb_.AddOffset(Model::VT_OPERATOR_CODES, operator_codes);
- }
- void
- add_subgraphs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<SubGraph>>> subgraphs)
- {
- fbb_.AddOffset(Model::VT_SUBGRAPHS, subgraphs);
- }
- void add_description(flatbuffers::Offset<flatbuffers::String> description)
- {
- fbb_.AddOffset(Model::VT_DESCRIPTION, description);
- }
- void add_buffers(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>> buffers)
- {
- fbb_.AddOffset(Model::VT_BUFFERS, buffers);
- }
- void add_metadata_buffer(flatbuffers::Offset<flatbuffers::Vector<int32_t>> metadata_buffer)
- {
- fbb_.AddOffset(Model::VT_METADATA_BUFFER, metadata_buffer);
- }
- explicit ModelBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
- {
- start_ = fbb_.StartTable();
- }
- ModelBuilder &operator=(const ModelBuilder &);
- flatbuffers::Offset<Model> Finish()
- {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Model>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<Model> CreateModel(
- flatbuffers::FlatBufferBuilder &_fbb, uint32_t version = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>> operator_codes = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<SubGraph>>> subgraphs = 0,
- flatbuffers::Offset<flatbuffers::String> description = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>> buffers = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> metadata_buffer = 0)
-{
- ModelBuilder builder_(_fbb);
- builder_.add_metadata_buffer(metadata_buffer);
- builder_.add_buffers(buffers);
- builder_.add_description(description);
- builder_.add_subgraphs(subgraphs);
- builder_.add_operator_codes(operator_codes);
- builder_.add_version(version);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<Model>
-CreateModelDirect(flatbuffers::FlatBufferBuilder &_fbb, uint32_t version = 0,
- const std::vector<flatbuffers::Offset<OperatorCode>> *operator_codes = nullptr,
- const std::vector<flatbuffers::Offset<SubGraph>> *subgraphs = nullptr,
- const char *description = nullptr,
- const std::vector<flatbuffers::Offset<Buffer>> *buffers = nullptr,
- const std::vector<int32_t> *metadata_buffer = nullptr)
-{
- return neurun_tflite::CreateModel(
- _fbb, version,
- operator_codes ? _fbb.CreateVector<flatbuffers::Offset<OperatorCode>>(*operator_codes) : 0,
- subgraphs ? _fbb.CreateVector<flatbuffers::Offset<SubGraph>>(*subgraphs) : 0,
- description ? _fbb.CreateString(description) : 0,
- buffers ? _fbb.CreateVector<flatbuffers::Offset<Buffer>>(*buffers) : 0,
- metadata_buffer ? _fbb.CreateVector<int32_t>(*metadata_buffer) : 0);
-}
-
-inline bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj,
- QuantizationDetails type)
-{
- switch (type)
- {
- case QuantizationDetails_NONE:
- {
- return true;
- }
- case QuantizationDetails_CustomQuantization:
- {
- auto ptr = reinterpret_cast<const CustomQuantization *>(obj);
- return verifier.VerifyTable(ptr);
- }
- default:
- return false;
- }
-}
-
-inline bool
-VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier,
- const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
- const flatbuffers::Vector<uint8_t> *types)
-{
- if (!values || !types)
- return !values && !types;
- if (values->size() != types->size())
- return false;
- for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i)
- {
- if (!VerifyQuantizationDetails(verifier, values->Get(i),
- types->GetEnum<QuantizationDetails>(i)))
- {
- return false;
- }
- }
- return true;
-}
-
-inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj,
- BuiltinOptions type)
-{
- switch (type)
- {
- case BuiltinOptions_NONE:
- {
- return true;
- }
- case BuiltinOptions_Conv2DOptions:
- {
- auto ptr = reinterpret_cast<const Conv2DOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_DepthwiseConv2DOptions:
- {
- auto ptr = reinterpret_cast<const DepthwiseConv2DOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ConcatEmbeddingsOptions:
- {
- auto ptr = reinterpret_cast<const ConcatEmbeddingsOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LSHProjectionOptions:
- {
- auto ptr = reinterpret_cast<const LSHProjectionOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_Pool2DOptions:
- {
- auto ptr = reinterpret_cast<const Pool2DOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SVDFOptions:
- {
- auto ptr = reinterpret_cast<const SVDFOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_RNNOptions:
- {
- auto ptr = reinterpret_cast<const RNNOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_FullyConnectedOptions:
- {
- auto ptr = reinterpret_cast<const FullyConnectedOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SoftmaxOptions:
- {
- auto ptr = reinterpret_cast<const SoftmaxOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ConcatenationOptions:
- {
- auto ptr = reinterpret_cast<const ConcatenationOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_AddOptions:
- {
- auto ptr = reinterpret_cast<const AddOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_L2NormOptions:
- {
- auto ptr = reinterpret_cast<const L2NormOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LocalResponseNormalizationOptions:
- {
- auto ptr = reinterpret_cast<const LocalResponseNormalizationOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LSTMOptions:
- {
- auto ptr = reinterpret_cast<const LSTMOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ResizeBilinearOptions:
- {
- auto ptr = reinterpret_cast<const ResizeBilinearOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_CallOptions:
- {
- auto ptr = reinterpret_cast<const CallOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ReshapeOptions:
- {
- auto ptr = reinterpret_cast<const ReshapeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SkipGramOptions:
- {
- auto ptr = reinterpret_cast<const SkipGramOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SpaceToDepthOptions:
- {
- auto ptr = reinterpret_cast<const SpaceToDepthOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_EmbeddingLookupSparseOptions:
- {
- auto ptr = reinterpret_cast<const EmbeddingLookupSparseOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_MulOptions:
- {
- auto ptr = reinterpret_cast<const MulOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_PadOptions:
- {
- auto ptr = reinterpret_cast<const PadOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_GatherOptions:
- {
- auto ptr = reinterpret_cast<const GatherOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_BatchToSpaceNDOptions:
- {
- auto ptr = reinterpret_cast<const BatchToSpaceNDOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SpaceToBatchNDOptions:
- {
- auto ptr = reinterpret_cast<const SpaceToBatchNDOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_TransposeOptions:
- {
- auto ptr = reinterpret_cast<const TransposeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ReducerOptions:
- {
- auto ptr = reinterpret_cast<const ReducerOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SubOptions:
- {
- auto ptr = reinterpret_cast<const SubOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_DivOptions:
- {
- auto ptr = reinterpret_cast<const DivOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SqueezeOptions:
- {
- auto ptr = reinterpret_cast<const SqueezeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SequenceRNNOptions:
- {
- auto ptr = reinterpret_cast<const SequenceRNNOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_StridedSliceOptions:
- {
- auto ptr = reinterpret_cast<const StridedSliceOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ExpOptions:
- {
- auto ptr = reinterpret_cast<const ExpOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_TopKV2Options:
- {
- auto ptr = reinterpret_cast<const TopKV2Options *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SplitOptions:
- {
- auto ptr = reinterpret_cast<const SplitOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LogSoftmaxOptions:
- {
- auto ptr = reinterpret_cast<const LogSoftmaxOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_CastOptions:
- {
- auto ptr = reinterpret_cast<const CastOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_DequantizeOptions:
- {
- auto ptr = reinterpret_cast<const DequantizeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_MaximumMinimumOptions:
- {
- auto ptr = reinterpret_cast<const MaximumMinimumOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ArgMaxOptions:
- {
- auto ptr = reinterpret_cast<const ArgMaxOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LessOptions:
- {
- auto ptr = reinterpret_cast<const LessOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_NegOptions:
- {
- auto ptr = reinterpret_cast<const NegOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_PadV2Options:
- {
- auto ptr = reinterpret_cast<const PadV2Options *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_GreaterOptions:
- {
- auto ptr = reinterpret_cast<const GreaterOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_GreaterEqualOptions:
- {
- auto ptr = reinterpret_cast<const GreaterEqualOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LessEqualOptions:
- {
- auto ptr = reinterpret_cast<const LessEqualOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SelectOptions:
- {
- auto ptr = reinterpret_cast<const SelectOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SliceOptions:
- {
- auto ptr = reinterpret_cast<const SliceOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_TransposeConvOptions:
- {
- auto ptr = reinterpret_cast<const TransposeConvOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SparseToDenseOptions:
- {
- auto ptr = reinterpret_cast<const SparseToDenseOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_TileOptions:
- {
- auto ptr = reinterpret_cast<const TileOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ExpandDimsOptions:
- {
- auto ptr = reinterpret_cast<const ExpandDimsOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_EqualOptions:
- {
- auto ptr = reinterpret_cast<const EqualOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_NotEqualOptions:
- {
- auto ptr = reinterpret_cast<const NotEqualOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ShapeOptions:
- {
- auto ptr = reinterpret_cast<const ShapeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_PowOptions:
- {
- auto ptr = reinterpret_cast<const PowOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ArgMinOptions:
- {
- auto ptr = reinterpret_cast<const ArgMinOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_FakeQuantOptions:
- {
- auto ptr = reinterpret_cast<const FakeQuantOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_PackOptions:
- {
- auto ptr = reinterpret_cast<const PackOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LogicalOrOptions:
- {
- auto ptr = reinterpret_cast<const LogicalOrOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_OneHotOptions:
- {
- auto ptr = reinterpret_cast<const OneHotOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LogicalAndOptions:
- {
- auto ptr = reinterpret_cast<const LogicalAndOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LogicalNotOptions:
- {
- auto ptr = reinterpret_cast<const LogicalNotOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_UnpackOptions:
- {
- auto ptr = reinterpret_cast<const UnpackOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_FloorDivOptions:
- {
- auto ptr = reinterpret_cast<const FloorDivOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SquareOptions:
- {
- auto ptr = reinterpret_cast<const SquareOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ZerosLikeOptions:
- {
- auto ptr = reinterpret_cast<const ZerosLikeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_FillOptions:
- {
- auto ptr = reinterpret_cast<const FillOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_BidirectionalSequenceLSTMOptions:
- {
- auto ptr = reinterpret_cast<const BidirectionalSequenceLSTMOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_BidirectionalSequenceRNNOptions:
- {
- auto ptr = reinterpret_cast<const BidirectionalSequenceRNNOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_UnidirectionalSequenceLSTMOptions:
- {
- auto ptr = reinterpret_cast<const UnidirectionalSequenceLSTMOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_FloorModOptions:
- {
- auto ptr = reinterpret_cast<const FloorModOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_RangeOptions:
- {
- auto ptr = reinterpret_cast<const RangeOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_ResizeNearestNeighborOptions:
- {
- auto ptr = reinterpret_cast<const ResizeNearestNeighborOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_LeakyReluOptions:
- {
- auto ptr = reinterpret_cast<const LeakyReluOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SquaredDifferenceOptions:
- {
- auto ptr = reinterpret_cast<const SquaredDifferenceOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_MirrorPadOptions:
- {
- auto ptr = reinterpret_cast<const MirrorPadOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_AbsOptions:
- {
- auto ptr = reinterpret_cast<const AbsOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case BuiltinOptions_SplitVOptions:
- {
- auto ptr = reinterpret_cast<const SplitVOptions *>(obj);
- return verifier.VerifyTable(ptr);
- }
- default:
- return false;
- }
-}
-
-inline bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier,
- const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
- const flatbuffers::Vector<uint8_t> *types)
-{
- if (!values || !types)
- return !values && !types;
- if (values->size() != types->size())
- return false;
- for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i)
- {
- if (!VerifyBuiltinOptions(verifier, values->Get(i), types->GetEnum<BuiltinOptions>(i)))
- {
- return false;
- }
- }
- return true;
-}
-
-inline const neurun_tflite::Model *GetModel(const void *buf)
-{
- return flatbuffers::GetRoot<neurun_tflite::Model>(buf);
-}
-
-inline const neurun_tflite::Model *GetSizePrefixedModel(const void *buf)
-{
- return flatbuffers::GetSizePrefixedRoot<neurun_tflite::Model>(buf);
-}
-
-inline const char *ModelIdentifier() { return "TFL3"; }
-
-inline bool ModelBufferHasIdentifier(const void *buf)
-{
- return flatbuffers::BufferHasIdentifier(buf, ModelIdentifier());
-}
-
-inline bool VerifyModelBuffer(flatbuffers::Verifier &verifier)
-{
- return verifier.VerifyBuffer<neurun_tflite::Model>(ModelIdentifier());
-}
-
-inline bool VerifySizePrefixedModelBuffer(flatbuffers::Verifier &verifier)
-{
- return verifier.VerifySizePrefixedBuffer<neurun_tflite::Model>(ModelIdentifier());
-}
-
-inline const char *ModelExtension() { return "tflite"; }
-
-inline void FinishModelBuffer(flatbuffers::FlatBufferBuilder &fbb,
- flatbuffers::Offset<neurun_tflite::Model> root)
-{
- fbb.Finish(root, ModelIdentifier());
-}
-
-inline void FinishSizePrefixedModelBuffer(flatbuffers::FlatBufferBuilder &fbb,
- flatbuffers::Offset<neurun_tflite::Model> root)
-{
- fbb.FinishSizePrefixed(root, ModelIdentifier());
-}
-
-} // namespace neurun_tflite
-
-#endif // FLATBUFFERS_GENERATED_SCHEMA_NEURUN_TFLITE_H_
diff --git a/runtime/neurun/frontend/tflite/tflite_schema.fbs b/runtime/neurun/frontend/tflite/tflite_schema.fbs
deleted file mode 100644
index ede4dfa3a..000000000
--- a/runtime/neurun/frontend/tflite/tflite_schema.fbs
+++ /dev/null
@@ -1,795 +0,0 @@
-// Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Revision History
-// Version 0: Initial version.
-// Version 1: Add subgraphs to schema.
-// Version 2: Rename operators to conform to NN API.
-// Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers.
-
-// Change namespace to neurun_tflite
-namespace neurun_tflite;
-
-// This corresponds to the version.
-file_identifier "TFL3";
-// File extension of any written files.
-file_extension "tflite";
-
-// IMPORTANT: All new members of tables, enums and unions must be added at the
-// end to ensure backwards compatibility.
-
-// The type of data stored in a tensor.
-enum TensorType : byte {
- FLOAT32 = 0,
- FLOAT16 = 1,
- INT32 = 2,
- UINT8 = 3,
- INT64 = 4,
- STRING = 5,
- BOOL = 6,
- INT16 = 7,
- COMPLEX64 = 8,
- INT8 = 9,
-}
-
-// Custom quantization parameters for experimenting with new quantization
-// techniques.
-table CustomQuantization {
- custom:[ubyte] (force_align: 16);
-}
-
-// Represents a specific quantization technique's parameters.
-union QuantizationDetails {
- CustomQuantization,
-}
-
-// Parameters for converting a quantized tensor back to float.
-table QuantizationParameters {
- // These four parameters are the asymmetric linear quantization parameters.
- // Given a quantized value q, the corresponding float value f should be:
- // f = scale * (q - zero_point)
- // For other quantization types, the QuantizationDetails below is used.
- min:[float]; // For importing back into tensorflow.
- max:[float]; // For importing back into tensorflow.
- scale:[float]; // For dequantizing the tensor's values.
- zero_point:[long];
-
- // If this is not none, the quantization parameters above are ignored and the
- // value of the QuantizationDetails union below should be used.
- details:QuantizationDetails;
-}
-
-table Tensor {
- // The tensor shape. The meaning of each entry is operator-specific but
- // builtin ops use: [batch size, height, width, number of channels] (That's
- // Tensorflow's NHWC).
- shape:[int];
- type:TensorType;
- // An index that refers to the buffers table at the root of the model. Or,
- // if there is no data buffer associated (i.e. intermediate results), then
- // this is 0 (which refers to an always existent empty buffer).
- //
- // The data_buffer itself is an opaque container, with the assumption that the
- // target device is little-endian. In addition, all builtin operators assume
- // the memory is ordered such that if `shape` is [4, 3, 2], then index
- // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k].
- buffer:uint;
- name:string; // For debugging and importing back into tensorflow.
- quantization:QuantizationParameters; // Optional.
-
- is_variable:bool = false;
-}
-
-// A list of builtin operators. Builtin operators are slightly faster than custom
-// ones, but not by much. Moreover, while custom operators accept an opaque
-// object containing configuration parameters, builtins have a predetermined
-// set of acceptable options.
-enum BuiltinOperator : byte {
- ADD = 0,
- AVERAGE_POOL_2D = 1,
- CONCATENATION = 2,
- CONV_2D = 3,
- DEPTHWISE_CONV_2D = 4,
- // DEPTH_TO_SPACE = 5,
- DEQUANTIZE = 6,
- EMBEDDING_LOOKUP = 7,
- FLOOR = 8,
- FULLY_CONNECTED = 9,
- HASHTABLE_LOOKUP = 10,
- L2_NORMALIZATION = 11,
- L2_POOL_2D = 12,
- LOCAL_RESPONSE_NORMALIZATION = 13,
- LOGISTIC = 14,
- LSH_PROJECTION = 15,
- LSTM = 16,
- MAX_POOL_2D = 17,
- MUL = 18,
- RELU = 19,
- // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
- // since different model developers use RELU1 in different ways. Never
- // create another op called RELU1.
- RELU_N1_TO_1 = 20,
- RELU6 = 21,
- RESHAPE = 22,
- RESIZE_BILINEAR = 23,
- RNN = 24,
- SOFTMAX = 25,
- SPACE_TO_DEPTH = 26,
- SVDF = 27,
- TANH = 28,
- // TODO(aselle): Consider rename to CONCATENATE_EMBEDDINGS
- CONCAT_EMBEDDINGS = 29,
- SKIP_GRAM = 30,
- CALL = 31,
- CUSTOM = 32,
- EMBEDDING_LOOKUP_SPARSE = 33,
- PAD = 34,
- UNIDIRECTIONAL_SEQUENCE_RNN = 35,
- GATHER = 36,
- BATCH_TO_SPACE_ND = 37,
- SPACE_TO_BATCH_ND = 38,
- TRANSPOSE = 39,
- MEAN = 40,
- SUB = 41,
- DIV = 42,
- SQUEEZE = 43,
- UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
- STRIDED_SLICE = 45,
- BIDIRECTIONAL_SEQUENCE_RNN = 46,
- EXP = 47,
- TOPK_V2 = 48,
- SPLIT = 49,
- LOG_SOFTMAX = 50,
- // DELEGATE is a special op type for the operations which are delegated to
- // other backends.
- // WARNING: Experimental interface, subject to change
- DELEGATE = 51,
- BIDIRECTIONAL_SEQUENCE_LSTM = 52,
- CAST = 53,
- PRELU = 54,
- MAXIMUM = 55,
- ARG_MAX = 56,
- MINIMUM = 57,
- LESS = 58,
- NEG = 59,
- PADV2 = 60,
- GREATER = 61,
- GREATER_EQUAL = 62,
- LESS_EQUAL = 63,
- SELECT = 64,
- SLICE = 65,
- SIN = 66,
- TRANSPOSE_CONV = 67,
- SPARSE_TO_DENSE = 68,
- TILE = 69,
- EXPAND_DIMS = 70,
- EQUAL = 71,
- NOT_EQUAL = 72,
- LOG = 73,
- SUM = 74,
- SQRT = 75,
- RSQRT = 76,
- SHAPE = 77,
- POW = 78,
- ARG_MIN = 79,
- FAKE_QUANT = 80,
- REDUCE_PROD = 81,
- REDUCE_MAX = 82,
- PACK = 83,
- LOGICAL_OR = 84,
- ONE_HOT = 85,
- LOGICAL_AND = 86,
- LOGICAL_NOT = 87,
- UNPACK = 88,
- REDUCE_MIN = 89,
- FLOOR_DIV = 90,
- REDUCE_ANY = 91,
- SQUARE = 92,
- ZEROS_LIKE = 93,
- FILL = 94,
- FLOOR_MOD = 95,
- RANGE = 96,
- RESIZE_NEAREST_NEIGHBOR = 97,
- LEAKY_RELU = 98,
- SQUARED_DIFFERENCE = 99,
- MIRROR_PAD = 100,
- ABS = 101,
- SPLIT_V = 102,
-}
-
-// Options for the builtin operators.
-union BuiltinOptions {
- Conv2DOptions,
- DepthwiseConv2DOptions,
- ConcatEmbeddingsOptions,
- LSHProjectionOptions,
- Pool2DOptions,
- SVDFOptions,
- RNNOptions,
- FullyConnectedOptions,
- SoftmaxOptions,
- ConcatenationOptions,
- AddOptions,
- L2NormOptions,
- LocalResponseNormalizationOptions,
- LSTMOptions,
- ResizeBilinearOptions,
- CallOptions,
- ReshapeOptions,
- SkipGramOptions,
- SpaceToDepthOptions,
- EmbeddingLookupSparseOptions,
- MulOptions,
- PadOptions,
- GatherOptions,
- BatchToSpaceNDOptions,
- SpaceToBatchNDOptions,
- TransposeOptions,
- ReducerOptions,
- SubOptions,
- DivOptions,
- SqueezeOptions,
- SequenceRNNOptions,
- StridedSliceOptions,
- ExpOptions,
- TopKV2Options,
- SplitOptions,
- LogSoftmaxOptions,
- CastOptions,
- DequantizeOptions,
- MaximumMinimumOptions,
- ArgMaxOptions,
- LessOptions,
- NegOptions,
- PadV2Options,
- GreaterOptions,
- GreaterEqualOptions,
- LessEqualOptions,
- SelectOptions,
- SliceOptions,
- TransposeConvOptions,
- SparseToDenseOptions,
- TileOptions,
- ExpandDimsOptions,
- EqualOptions,
- NotEqualOptions,
- ShapeOptions,
- PowOptions,
- ArgMinOptions,
- FakeQuantOptions,
- PackOptions,
- LogicalOrOptions,
- OneHotOptions,
- LogicalAndOptions,
- LogicalNotOptions,
- UnpackOptions,
- FloorDivOptions,
- SquareOptions,
- ZerosLikeOptions,
- FillOptions,
- BidirectionalSequenceLSTMOptions,
- BidirectionalSequenceRNNOptions,
- UnidirectionalSequenceLSTMOptions,
- FloorModOptions,
- RangeOptions,
- ResizeNearestNeighborOptions,
- LeakyReluOptions,
- SquaredDifferenceOptions,
- MirrorPadOptions,
- AbsOptions,
- SplitVOptions,
-}
-
-enum Padding : byte { SAME, VALID }
-
-enum ActivationFunctionType : byte {
- NONE = 0,
- RELU = 1,
- RELU_N1_TO_1 = 2,
- RELU6 = 3,
- TANH = 4,
- SIGN_BIT = 5,
-}
-
-table Conv2DOptions {
- padding:Padding;
- stride_w:int;
- stride_h:int;
- fused_activation_function:ActivationFunctionType;
- dilation_w_factor:int = 1;
- dilation_h_factor:int = 1;
-}
-
-table Pool2DOptions {
- padding:Padding;
- stride_w:int;
- stride_h:int;
- filter_width:int;
- filter_height:int;
- fused_activation_function:ActivationFunctionType;
-}
-
-table DepthwiseConv2DOptions {
- // Parameters for DepthwiseConv version 1 or above.
- padding:Padding;
- stride_w:int;
- stride_h:int;
- depth_multiplier:int;
- fused_activation_function:ActivationFunctionType;
- // Parameters for DepthwiseConv version 2 or above.
- dilation_w_factor:int = 1;
- dilation_h_factor:int = 1;
-}
-
-table ConcatEmbeddingsOptions {
- num_channels:int;
- num_columns_per_channel:[int];
- embedding_dim_per_channel:[int]; // This could be inferred from parameters.
-}
-
-enum LSHProjectionType: byte {
- UNKNOWN = 0,
- SPARSE = 1,
- DENSE = 2,
-}
-
-table LSHProjectionOptions {
- type: LSHProjectionType;
-}
-
-table SVDFOptions {
- rank:int;
- fused_activation_function:ActivationFunctionType;
-}
-
-// An implementation of TensorFlow RNNCell.
-table RNNOptions {
- fused_activation_function:ActivationFunctionType;
-}
-
-// An implementation of TensorFlow dynamic_rnn with RNNCell.
-table SequenceRNNOptions {
- time_major:bool;
- fused_activation_function:ActivationFunctionType;
-}
-
-// An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell.
-table BidirectionalSequenceRNNOptions {
- time_major:bool;
- fused_activation_function:ActivationFunctionType;
- merge_outputs: bool;
-}
-
-enum FullyConnectedOptionsWeightsFormat: byte {
- DEFAULT = 0,
- SHUFFLED4x16INT8 = 1,
-}
-
-// An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
-table FullyConnectedOptions {
- // Parameters for FullyConnected version 1 or above.
- fused_activation_function:ActivationFunctionType;
-
- // Parameters for FullyConnected version 2 or above.
- weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT;
-}
-
-table SoftmaxOptions {
- beta: float;
-}
-
-// An implementation of TensorFlow concat.
-table ConcatenationOptions {
- axis:int;
- fused_activation_function:ActivationFunctionType;
-}
-
-table AddOptions {
- fused_activation_function:ActivationFunctionType;
-}
-
-table MulOptions {
- fused_activation_function:ActivationFunctionType;
-}
-
-table L2NormOptions {
- fused_activation_function:ActivationFunctionType;
-}
-
-table LocalResponseNormalizationOptions {
- radius:int;
- bias:float;
- alpha:float;
- beta:float;
-}
-
-enum LSTMKernelType : byte {
- // Full LSTM kernel which supports peephole and projection.
- FULL = 0,
- // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell.
- BASIC = 1,
-}
-
-// An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
-table LSTMOptions {
- // Parameters for LSTM version 1 or above.
- fused_activation_function:ActivationFunctionType;
- cell_clip: float; // Optional, 0.0 means no clipping
- proj_clip: float; // Optional, 0.0 means no clipping
-
- // Parameters for LSTM version 2 or above.
- // Basic kernel is only supported in version 2 or above.
- kernel_type: LSTMKernelType = FULL;
-}
-
-// An implementation of TensorFlow dynamic_rnn with LSTMCell.
-table UnidirectionalSequenceLSTMOptions {
- fused_activation_function:ActivationFunctionType;
- cell_clip: float; // Optional, 0.0 means no clipping
- proj_clip: float; // Optional, 0.0 means no clipping
-
- // If true then first dimension is sequence, otherwise batch.
- time_major:bool;
-}
-
-table BidirectionalSequenceLSTMOptions {
- fused_activation_function:ActivationFunctionType;
- cell_clip: float; // Optional, 0.0 means no clipping
- proj_clip: float; // Optional, 0.0 means no clipping
-
- // If true, store the outputs of both directions into the first output.
- merge_outputs: bool;
-}
-
-table ResizeBilinearOptions {
- new_height: int (deprecated);
- new_width: int (deprecated);
- align_corners: bool;
-}
-
-table ResizeNearestNeighborOptions {
- align_corners: bool;
-}
-
-// A call operation options
-table CallOptions {
- // The subgraph index that needs to be called.
- subgraph:uint;
-}
-
-table PadOptions {
-}
-
-table PadV2Options {
-}
-
-table ReshapeOptions {
- new_shape:[int];
-}
-
-table SpaceToBatchNDOptions {
-}
-
-table BatchToSpaceNDOptions {
-}
-
-table SkipGramOptions {
- ngram_size: int;
- max_skip_size: int;
- include_all_ngrams: bool;
-}
-
-table SpaceToDepthOptions {
- block_size: int;
-}
-
-table SubOptions {
- fused_activation_function:ActivationFunctionType;
-}
-
-table DivOptions {
- fused_activation_function:ActivationFunctionType;
-}
-
-table TopKV2Options {
-}
-
-enum CombinerType : byte {
- SUM = 0,
- MEAN = 1,
- SQRTN = 2,
-}
-
-table EmbeddingLookupSparseOptions {
- combiner:CombinerType;
-}
-
-table GatherOptions {
- axis: int;
-}
-
-table TransposeOptions {
-}
-
-table ExpOptions {
-}
-
-table ReducerOptions {
- keep_dims: bool;
-}
-
-table SqueezeOptions {
- squeeze_dims:[int];
-}
-
-table SplitOptions {
- num_splits: int;
-}
-
-table SplitVOptions {
- num_splits: int;
-}
-
-table StridedSliceOptions {
- begin_mask: int;
- end_mask: int;
- ellipsis_mask: int;
- new_axis_mask: int;
- shrink_axis_mask: int;
-}
-
-table LogSoftmaxOptions {
-}
-
-table CastOptions {
- in_data_type: TensorType;
- out_data_type: TensorType;
-}
-
-table DequantizeOptions {
-}
-
-table MaximumMinimumOptions {
-}
-
-table TileOptions {
-}
-
-table ArgMaxOptions {
- output_type : TensorType;
-}
-
-table ArgMinOptions {
- output_type : TensorType;
-}
-
-table GreaterOptions {
-}
-
-table GreaterEqualOptions {
-}
-
-table LessOptions {
-}
-
-table LessEqualOptions {
-}
-
-table NegOptions {
-}
-
-table SelectOptions {
-}
-
-table SliceOptions {
-}
-
-table TransposeConvOptions {
- padding:Padding;
- stride_w:int;
- stride_h:int;
-}
-
-table ExpandDimsOptions {
-}
-
-table SparseToDenseOptions {
- validate_indices:bool;
-}
-
-table EqualOptions {
-}
-
-table NotEqualOptions {
-}
-
-table ShapeOptions {
- // Optional output type of the operation (int32 or int64). Defaults to int32.
- out_type : TensorType;
-}
-
-table PowOptions {
-}
-
-table FakeQuantOptions {
- // Parameters supported by version 1:
- min:float;
- max:float;
- num_bits:int;
-
- // Parameters supported by version 2:
- narrow_range:bool;
-}
-
-table PackOptions {
- values_count:int;
- axis:int;
-}
-
-table LogicalOrOptions {
-}
-
-table OneHotOptions {
- axis:int;
-}
-
-table AbsOptions {
-}
-
-
-table LogicalAndOptions {
-}
-
-table LogicalNotOptions {
-}
-
-table UnpackOptions {
- num:int;
- axis:int;
-}
-
-table FloorDivOptions {
-}
-
-table SquareOptions {
-}
-
-table ZerosLikeOptions {
-}
-
-table FillOptions {
-}
-
-table FloorModOptions {
-}
-
-table RangeOptions {
-}
-
-table LeakyReluOptions {
- alpha:float;
-}
-
-table SquaredDifferenceOptions {
-}
-
-enum MirrorPadMode : byte {
- // Doesn't include borders.
- REFLECT = 0,
- // Includes borders.
- SYMMETRIC = 1,
-}
-
-table MirrorPadOptions {
- mode:MirrorPadMode;
-}
-
-// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
-// builtin, or a string if the operator is custom.
-table OperatorCode {
- builtin_code:BuiltinOperator;
- custom_code:string;
-
- // The version of the operator. The version need to be bumped whenever new
- // parameters are introduced into an op.
- version:int = 1;
-}
-
-enum CustomOptionsFormat : byte {
- FLEXBUFFERS = 0,
-}
-
-// An operator takes tensors as inputs and outputs. The type of operation being
-// performed is determined by an index into the list of valid OperatorCodes,
-// while the specifics of each operations is configured using builtin_options
-// or custom_options.
-table Operator {
- // Index into the operator_codes array. Using an integer here avoids
- // complicate map lookups.
- opcode_index:uint;
-
- // Optional input and output tensors are indicated by -1.
- inputs:[int];
- outputs:[int];
-
- builtin_options:BuiltinOptions;
- custom_options:[ubyte];
- custom_options_format:CustomOptionsFormat;
-
- // A list of booleans indicating the input tensors which are being mutated by
- // this operator.(e.g. used by RNN and LSTM).
- // For example, if the "inputs" array refers to 5 tensors and the second and
- // fifth are mutable variables, then this list will contain
- // [false, true, false, false, true].
- //
- // If the list is empty, no variable is mutated in this operator.
- // The list either has the same length as `inputs`, or is empty.
- mutating_variable_inputs:[bool];
-}
-
-// The root type, defining a subgraph, which typically represents an entire
-// model.
-table SubGraph {
- // A list of all tensors used in this subgraph.
- tensors:[Tensor];
-
- // Indices of the tensors that are inputs into this subgraph. Note this is
- // the list of non-static tensors that feed into the subgraph for inference.
- inputs:[int];
-
- // Indices of the tensors that are outputs out of this subgraph. Note this is
- // the list of output tensors that are considered the product of the
- // subgraph's inference.
- outputs:[int];
-
- // All operators, in execution order.
- operators:[Operator];
-
- // Name of this subgraph (used for debugging).
- name:string;
-}
-
-// Table of raw data buffers (used for constant tensors). Referenced by tensors
-// by index. The generous alignment accommodates mmap-friendly data structures.
-table Buffer {
- data:[ubyte] (force_align: 16);
-}
-
-table Model {
- // Version of the schema.
- version:uint;
-
- // A list of all operator codes used in this model. This is
- // kept in order because operators carry an index into this
- // vector.
- operator_codes:[OperatorCode];
-
- // All the subgraphs of the model. The 0th is assumed to be the main
- // model.
- subgraphs:[SubGraph];
-
- // A description of the model.
- description:string;
-
- // Buffers of the model.
- // Note the 0th entry of this array must be an empty buffer (sentinel).
- // This is a convention so that tensors without a buffer can provide 0 as
- // their buffer.
- buffers:[Buffer];
-
- // Metadata about the model. Indirects into the existings buffers list.
- metadata_buffer:[int];
-}
-
-root_type Model;