summaryrefslogtreecommitdiff
path: root/onert-micro/externals
diff options
context:
space:
mode:
Diffstat (limited to 'onert-micro/externals')
-rw-r--r--onert-micro/externals/CMakeLists.txt9
-rw-r--r--onert-micro/externals/flatbuffers/base.h453
-rw-r--r--onert-micro/externals/flatbuffers/code_generators.h234
-rw-r--r--onert-micro/externals/flatbuffers/flatbuffers.h3078
-rw-r--r--onert-micro/externals/flatbuffers/flatc.h95
-rw-r--r--onert-micro/externals/flatbuffers/flexbuffers.h1852
-rw-r--r--onert-micro/externals/flatbuffers/grpc.h361
-rw-r--r--onert-micro/externals/flatbuffers/hash.h145
-rw-r--r--onert-micro/externals/flatbuffers/idl.h1145
-rw-r--r--onert-micro/externals/flatbuffers/minireflect.h507
-rw-r--r--onert-micro/externals/flatbuffers/pch/flatc_pch.h40
-rw-r--r--onert-micro/externals/flatbuffers/pch/pch.h39
-rw-r--r--onert-micro/externals/flatbuffers/reflection.h520
-rw-r--r--onert-micro/externals/flatbuffers/reflection_generated.h1257
-rw-r--r--onert-micro/externals/flatbuffers/registry.h140
-rw-r--r--onert-micro/externals/flatbuffers/stl_emulation.h674
-rw-r--r--onert-micro/externals/flatbuffers/util.h799
-rw-r--r--onert-micro/externals/gen/circle-generated/circle/schema_generated.h24984
18 files changed, 36332 insertions, 0 deletions
diff --git a/onert-micro/externals/CMakeLists.txt b/onert-micro/externals/CMakeLists.txt
new file mode 100644
index 000000000..221001cfe
--- /dev/null
+++ b/onert-micro/externals/CMakeLists.txt
@@ -0,0 +1,9 @@
+unset(OUTPUT_FILES)
+set(OUTPUT_FILES "${ONERT_MICRO_EXTERNAL_DIR}/gen/circle-generated/circle/schema_generated.h")
+set(TGT luci_micro_circle_schema)
+
+# NOTE This header-only library
+add_library(${TGT} STATIC ${OUTPUT_FILES})
+set_target_properties(${TGT} PROPERTIES LINKER_LANGUAGE CXX)
+target_include_directories(${TGT} PUBLIC "${ONERT_MICRO_EXTERNAL_DIR}/gen")
+target_include_directories(${TGT} PUBLIC "${ONERT_MICRO_EXTERNAL_DIR}")
diff --git a/onert-micro/externals/flatbuffers/base.h b/onert-micro/externals/flatbuffers/base.h
new file mode 100644
index 000000000..56bf9b427
--- /dev/null
+++ b/onert-micro/externals/flatbuffers/base.h
@@ -0,0 +1,453 @@
+/*
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef FLATBUFFERS_BASE_H_
+#define FLATBUFFERS_BASE_H_
+
+// clang-format off
+
+// If activate should be declared and included first.
+#if defined(FLATBUFFERS_MEMORY_LEAK_TRACKING) && \
+ defined(_MSC_VER) && defined(_DEBUG)
+ // The _CRTDBG_MAP_ALLOC inside <crtdbg.h> will replace
+ // calloc/free (etc) to its debug version using #define directives.
+ #define _CRTDBG_MAP_ALLOC
+ #include <stdlib.h>
+ #include <crtdbg.h>
+ // Replace operator new by trace-enabled version.
+ #define DEBUG_NEW new(_NORMAL_BLOCK, __FILE__, __LINE__)
+ #define new DEBUG_NEW
+#endif
+
+#if !defined(FLATBUFFERS_ASSERT)
+#include <assert.h>
+#define FLATBUFFERS_ASSERT assert
+#elif defined(FLATBUFFERS_ASSERT_INCLUDE)
+// Include file with forward declaration
+#include FLATBUFFERS_ASSERT_INCLUDE
+#endif
+
+#ifndef ARDUINO
+#include <cstdint>
+#endif
+
+#include <cstddef>
+#include <cstdlib>
+#include <cstring>
+
+#if defined(ARDUINO) && !defined(ARDUINOSTL_M_H)
+ #include <utility.h>
+#else
+ #include <utility>
+#endif
+
+#include <string>
+#include <type_traits>
+#include <vector>
+#include <set>
+#include <algorithm>
+#include <iterator>
+#include <memory>
+
+#if defined(__unix__) && !defined(FLATBUFFERS_LOCALE_INDEPENDENT)
+ #include <unistd.h>
+#endif
+
+#ifdef _STLPORT_VERSION
+ #define FLATBUFFERS_CPP98_STL
+#endif
+
+#ifdef __ANDROID__
+ #include <android/api-level.h>
+#endif
+
+#if defined(__ICCARM__)
+#include <intrinsics.h>
+#endif
+
+// Note the __clang__ check is needed, because clang presents itself
+// as an older GNUC compiler (4.2).
+// Clang 3.3 and later implement all of the ISO C++ 2011 standard.
+// Clang 3.4 and later implement all of the ISO C++ 2014 standard.
+// http://clang.llvm.org/cxx_status.html
+
+// Note the MSVC value '__cplusplus' may be incorrect:
+// The '__cplusplus' predefined macro in the MSVC stuck at the value 199711L,
+// indicating (erroneously!) that the compiler conformed to the C++98 Standard.
+// This value should be correct starting from MSVC2017-15.7-Preview-3.
+// The '__cplusplus' will be valid only if MSVC2017-15.7-P3 and the `/Zc:__cplusplus` switch is set.
+// Workaround (for details see MSDN):
+// Use the _MSC_VER and _MSVC_LANG definition instead of the __cplusplus for compatibility.
+// The _MSVC_LANG macro reports the Standard version regardless of the '/Zc:__cplusplus' switch.
+
+#if defined(__GNUC__) && !defined(__clang__)
+ #define FLATBUFFERS_GCC (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+#else
+ #define FLATBUFFERS_GCC 0
+#endif
+
+#if defined(__clang__)
+ #define FLATBUFFERS_CLANG (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__)
+#else
+ #define FLATBUFFERS_CLANG 0
+#endif
+
+/// @cond FLATBUFFERS_INTERNAL
+#if __cplusplus <= 199711L && \
+ (!defined(_MSC_VER) || _MSC_VER < 1600) && \
+ (!defined(__GNUC__) || \
+ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__ < 40400))
+ #error A C++11 compatible compiler with support for the auto typing is \
+ required for FlatBuffers.
+ #error __cplusplus _MSC_VER __GNUC__ __GNUC_MINOR__ __GNUC_PATCHLEVEL__
+#endif
+
+#if !defined(__clang__) && \
+ defined(__GNUC__) && \
+ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__ < 40600)
+ // Backwards compatibility for g++ 4.4, and 4.5 which don't have the nullptr
+ // and constexpr keywords. Note the __clang__ check is needed, because clang
+ // presents itself as an older GNUC compiler.
+ #ifndef nullptr_t
+ const class nullptr_t {
+ public:
+ template<class T> inline operator T*() const { return 0; }
+ private:
+ void operator&() const;
+ } nullptr = {};
+ #endif
+ #ifndef constexpr
+ #define constexpr const
+ #endif
+#endif
+
+// The wire format uses a little endian encoding (since that's efficient for
+// the common platforms).
+#if defined(__s390x__)
+ #define FLATBUFFERS_LITTLEENDIAN 0
+#endif // __s390x__
+#if !defined(FLATBUFFERS_LITTLEENDIAN)
+ #if defined(__GNUC__) || defined(__clang__) || defined(__ICCARM__)
+ #if (defined(__BIG_ENDIAN__) || \
+ (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
+ #define FLATBUFFERS_LITTLEENDIAN 0
+ #else
+ #define FLATBUFFERS_LITTLEENDIAN 1
+ #endif // __BIG_ENDIAN__
+ #elif defined(_MSC_VER)
+ #if defined(_M_PPC)
+ #define FLATBUFFERS_LITTLEENDIAN 0
+ #else
+ #define FLATBUFFERS_LITTLEENDIAN 1
+ #endif
+ #else
+ #error Unable to determine endianness, define FLATBUFFERS_LITTLEENDIAN.
+ #endif
+#endif // !defined(FLATBUFFERS_LITTLEENDIAN)
+
+#define FLATBUFFERS_VERSION_MAJOR 2
+#define FLATBUFFERS_VERSION_MINOR 0
+#define FLATBUFFERS_VERSION_REVISION 0
+#define FLATBUFFERS_STRING_EXPAND(X) #X
+#define FLATBUFFERS_STRING(X) FLATBUFFERS_STRING_EXPAND(X)
+namespace flatbuffers {
+ // Returns version as string "MAJOR.MINOR.REVISION".
+ const char* FLATBUFFERS_VERSION();
+}
+
+#if (!defined(_MSC_VER) || _MSC_VER > 1600) && \
+ (!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 407)) || \
+ defined(__clang__)
+ #define FLATBUFFERS_FINAL_CLASS final
+ #define FLATBUFFERS_OVERRIDE override
+ #define FLATBUFFERS_EXPLICIT_CPP11 explicit
+ #define FLATBUFFERS_VTABLE_UNDERLYING_TYPE : flatbuffers::voffset_t
+#else
+ #define FLATBUFFERS_FINAL_CLASS
+ #define FLATBUFFERS_OVERRIDE
+ #define FLATBUFFERS_EXPLICIT_CPP11
+ #define FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+#endif
+
+#if (!defined(_MSC_VER) || _MSC_VER >= 1900) && \
+ (!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 406)) || \
+ (defined(__cpp_constexpr) && __cpp_constexpr >= 200704)
+ #define FLATBUFFERS_CONSTEXPR constexpr
+ #define FLATBUFFERS_CONSTEXPR_CPP11 constexpr
+ #define FLATBUFFERS_CONSTEXPR_DEFINED
+#else
+ #define FLATBUFFERS_CONSTEXPR const
+ #define FLATBUFFERS_CONSTEXPR_CPP11
+#endif
+
+#if (defined(__cplusplus) && __cplusplus >= 201402L) || \
+ (defined(__cpp_constexpr) && __cpp_constexpr >= 201304)
+ #define FLATBUFFERS_CONSTEXPR_CPP14 FLATBUFFERS_CONSTEXPR_CPP11
+#else
+ #define FLATBUFFERS_CONSTEXPR_CPP14
+#endif
+
+#if (defined(__GXX_EXPERIMENTAL_CXX0X__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 406)) || \
+ (defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 190023026)) || \
+ defined(__clang__)
+ #define FLATBUFFERS_NOEXCEPT noexcept
+#else
+ #define FLATBUFFERS_NOEXCEPT
+#endif
+
+// NOTE: the FLATBUFFERS_DELETE_FUNC macro may change the access mode to
+// private, so be sure to put it at the end or reset access mode explicitly.
+#if (!defined(_MSC_VER) || _MSC_FULL_VER >= 180020827) && \
+ (!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 404)) || \
+ defined(__clang__)
+ #define FLATBUFFERS_DELETE_FUNC(func) func = delete
+#else
+ #define FLATBUFFERS_DELETE_FUNC(func) private: func
+#endif
+
+#if (!defined(_MSC_VER) || _MSC_VER >= 1900) && \
+ (!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 409)) || \
+ defined(__clang__)
+ #define FLATBUFFERS_DEFAULT_DECLARATION
+#endif
+
+// Check if we can use template aliases
+// Not possible if Microsoft Compiler before 2012
+// Possible is the language feature __cpp_alias_templates is defined well
+// Or possible if the C++ std is C+11 or newer
+#if (defined(_MSC_VER) && _MSC_VER > 1700 /* MSVC2012 */) \
+ || (defined(__cpp_alias_templates) && __cpp_alias_templates >= 200704) \
+ || (defined(__cplusplus) && __cplusplus >= 201103L)
+ #define FLATBUFFERS_TEMPLATES_ALIASES
+#endif
+
+#ifndef FLATBUFFERS_HAS_STRING_VIEW
+ // Only provide flatbuffers::string_view if __has_include can be used
+ // to detect a header that provides an implementation
+ #if defined(__has_include)
+ // Check for std::string_view (in c++17)
+ #if __has_include(<string_view>) && (__cplusplus >= 201606 || (defined(_HAS_CXX17) && _HAS_CXX17))
+ #include <string_view>
+ namespace flatbuffers {
+ typedef std::string_view string_view;
+ }
+ #define FLATBUFFERS_HAS_STRING_VIEW 1
+ // Check for std::experimental::string_view (in c++14, compiler-dependent)
+ #elif __has_include(<experimental/string_view>) && (__cplusplus >= 201411)
+ #include <experimental/string_view>
+ namespace flatbuffers {
+ typedef std::experimental::string_view string_view;
+ }
+ #define FLATBUFFERS_HAS_STRING_VIEW 1
+ // Check for absl::string_view
+ #elif __has_include("absl/strings/string_view.h")
+ #include "absl/strings/string_view.h"
+ namespace flatbuffers {
+ typedef absl::string_view string_view;
+ }
+ #define FLATBUFFERS_HAS_STRING_VIEW 1
+ #endif
+ #endif // __has_include
+#endif // !FLATBUFFERS_HAS_STRING_VIEW
+
+#ifndef FLATBUFFERS_HAS_NEW_STRTOD
+ // Modern (C++11) strtod and strtof functions are available for use.
+ // 1) nan/inf strings as argument of strtod;
+ // 2) hex-float as argument of strtod/strtof.
+ #if (defined(_MSC_VER) && _MSC_VER >= 1900) || \
+ (defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 409)) || \
+ (defined(__clang__))
+ #define FLATBUFFERS_HAS_NEW_STRTOD 1
+ #endif
+#endif // !FLATBUFFERS_HAS_NEW_STRTOD
+
+#ifndef FLATBUFFERS_LOCALE_INDEPENDENT
+ // Enable locale independent functions {strtof_l, strtod_l,strtoll_l, strtoull_l}.
+ #if ((defined(_MSC_VER) && _MSC_VER >= 1800) || \
+ (defined(_XOPEN_VERSION) && (_XOPEN_VERSION>=700)) && (!defined(__ANDROID_API__) || (defined(__ANDROID_API__) && (__ANDROID_API__>=21))))
+ #define FLATBUFFERS_LOCALE_INDEPENDENT 1
+ #else
+ #define FLATBUFFERS_LOCALE_INDEPENDENT 0
+ #endif
+#endif // !FLATBUFFERS_LOCALE_INDEPENDENT
+
+// Suppress Undefined Behavior Sanitizer (recoverable only). Usage:
+// - __supress_ubsan__("undefined")
+// - __supress_ubsan__("signed-integer-overflow")
+#if defined(__clang__) && (__clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >=7))
+ #define __supress_ubsan__(type) __attribute__((no_sanitize(type)))
+#elif defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 409)
+ #define __supress_ubsan__(type) __attribute__((no_sanitize_undefined))
+#else
+ #define __supress_ubsan__(type)
+#endif
+
+// This is constexpr function used for checking compile-time constants.
+// Avoid `#pragma warning(disable: 4127) // C4127: expression is constant`.
+template<typename T> FLATBUFFERS_CONSTEXPR inline bool IsConstTrue(T t) {
+ return !!t;
+}
+
+// Enable C++ attribute [[]] if std:c++17 or higher.
+#if ((__cplusplus >= 201703L) \
+ || (defined(_MSVC_LANG) && (_MSVC_LANG >= 201703L)))
+ // All attributes unknown to an implementation are ignored without causing an error.
+ #define FLATBUFFERS_ATTRIBUTE(attr) [[attr]]
+
+ #define FLATBUFFERS_FALLTHROUGH() [[fallthrough]]
+#else
+ #define FLATBUFFERS_ATTRIBUTE(attr)
+
+ #if FLATBUFFERS_CLANG >= 30800
+ #define FLATBUFFERS_FALLTHROUGH() [[clang::fallthrough]]
+ #elif FLATBUFFERS_GCC >= 70300
+ #define FLATBUFFERS_FALLTHROUGH() [[gnu::fallthrough]]
+ #else
+ #define FLATBUFFERS_FALLTHROUGH()
+ #endif
+#endif
+
+/// @endcond
+
+/// @file
+namespace flatbuffers {
+
+/// @cond FLATBUFFERS_INTERNAL
+// Our default offset / size type, 32bit on purpose on 64bit systems.
+// Also, using a consistent offset type maintains compatibility of serialized
+// offset values between 32bit and 64bit systems.
+typedef uint32_t uoffset_t;
+
+// Signed offsets for references that can go in both directions.
+typedef int32_t soffset_t;
+
+// Offset/index used in v-tables, can be changed to uint8_t in
+// format forks to save a bit of space if desired.
+typedef uint16_t voffset_t;
+
+typedef uintmax_t largest_scalar_t;
+
+// In 32bits, this evaluates to 2GB - 1
+#define FLATBUFFERS_MAX_BUFFER_SIZE ((1ULL << (sizeof(::flatbuffers::soffset_t) * 8 - 1)) - 1)
+
+// We support aligning the contents of buffers up to this size.
+#define FLATBUFFERS_MAX_ALIGNMENT 16
+
+inline bool VerifyAlignmentRequirements(size_t align, size_t min_align = 1) {
+ return (min_align <= align) && (align <= (FLATBUFFERS_MAX_ALIGNMENT)) &&
+ (align & (align - 1)) == 0; // must be power of 2
+}
+
+#if defined(_MSC_VER)
+ #pragma warning(disable: 4351) // C4351: new behavior: elements of array ... will be default initialized
+ #pragma warning(push)
+ #pragma warning(disable: 4127) // C4127: conditional expression is constant
+#endif
+
+template<typename T> T EndianSwap(T t) {
+ #if defined(_MSC_VER)
+ #define FLATBUFFERS_BYTESWAP16 _byteswap_ushort
+ #define FLATBUFFERS_BYTESWAP32 _byteswap_ulong
+ #define FLATBUFFERS_BYTESWAP64 _byteswap_uint64
+ #elif defined(__ICCARM__)
+ #define FLATBUFFERS_BYTESWAP16 __REV16
+ #define FLATBUFFERS_BYTESWAP32 __REV
+ #define FLATBUFFERS_BYTESWAP64(x) \
+ ((__REV(static_cast<uint32_t>(x >> 32U))) | (static_cast<uint64_t>(__REV(static_cast<uint32_t>(x)))) << 32U)
+ #else
+ #if defined(__GNUC__) && __GNUC__ * 100 + __GNUC_MINOR__ < 408 && !defined(__clang__)
+ // __builtin_bswap16 was missing prior to GCC 4.8.
+ #define FLATBUFFERS_BYTESWAP16(x) \
+ static_cast<uint16_t>(__builtin_bswap32(static_cast<uint32_t>(x) << 16))
+ #else
+ #define FLATBUFFERS_BYTESWAP16 __builtin_bswap16
+ #endif
+ #define FLATBUFFERS_BYTESWAP32 __builtin_bswap32
+ #define FLATBUFFERS_BYTESWAP64 __builtin_bswap64
+ #endif
+ if (sizeof(T) == 1) { // Compile-time if-then's.
+ return t;
+ } else if (sizeof(T) == 2) {
+ union { T t; uint16_t i; } u = { t };
+ u.i = FLATBUFFERS_BYTESWAP16(u.i);
+ return u.t;
+ } else if (sizeof(T) == 4) {
+ union { T t; uint32_t i; } u = { t };
+ u.i = FLATBUFFERS_BYTESWAP32(u.i);
+ return u.t;
+ } else if (sizeof(T) == 8) {
+ union { T t; uint64_t i; } u = { t };
+ u.i = FLATBUFFERS_BYTESWAP64(u.i);
+ return u.t;
+ } else {
+ FLATBUFFERS_ASSERT(0);
+ return t;
+ }
+}
+
+#if defined(_MSC_VER)
+ #pragma warning(pop)
+#endif
+
+
+template<typename T> T EndianScalar(T t) {
+ #if FLATBUFFERS_LITTLEENDIAN
+ return t;
+ #else
+ return EndianSwap(t);
+ #endif
+}
+
+template<typename T>
+// UBSAN: C++ aliasing type rules, see std::bit_cast<> for details.
+__supress_ubsan__("alignment")
+T ReadScalar(const void *p) {
+ return EndianScalar(*reinterpret_cast<const T *>(p));
+}
+
+// See https://github.com/google/flatbuffers/issues/5950
+
+#if (FLATBUFFERS_GCC >= 100000) && (FLATBUFFERS_GCC < 110000)
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wstringop-overflow"
+#endif
+
+template<typename T>
+// UBSAN: C++ aliasing type rules, see std::bit_cast<> for details.
+__supress_ubsan__("alignment")
+void WriteScalar(void *p, T t) {
+ *reinterpret_cast<T *>(p) = EndianScalar(t);
+}
+
+template<typename T> struct Offset;
+template<typename T> __supress_ubsan__("alignment") void WriteScalar(void *p, Offset<T> t) {
+ *reinterpret_cast<uoffset_t *>(p) = EndianScalar(t.o);
+}
+
+#if (FLATBUFFERS_GCC >= 100000) && (FLATBUFFERS_GCC < 110000)
+ #pragma GCC diagnostic pop
+#endif
+
+// Computes how many bytes you'd have to pad to be able to write an
+// "scalar_size" scalar if the buffer had grown to "buf_size" (downwards in
+// memory).
+__supress_ubsan__("unsigned-integer-overflow")
+inline size_t PaddingBytes(size_t buf_size, size_t scalar_size) {
+ return ((~buf_size) + 1) & (scalar_size - 1);
+}
+
+} // namespace flatbuffers
+#endif // FLATBUFFERS_BASE_H_
diff --git a/onert-micro/externals/flatbuffers/code_generators.h b/onert-micro/externals/flatbuffers/code_generators.h
new file mode 100644
index 000000000..3908ea582
--- /dev/null
+++ b/onert-micro/externals/flatbuffers/code_generators.h
@@ -0,0 +1,234 @@
+/*
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2014 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLATBUFFERS_CODE_GENERATORS_H_
+#define FLATBUFFERS_CODE_GENERATORS_H_
+
+#include <map>
+#include <sstream>
+
+#include "flatbuffers/idl.h"
+
+namespace flatbuffers
+{
+
+// Utility class to assist in generating code through use of text templates.
+//
+// Example code:
+// CodeWriter code("\t");
+// code.SetValue("NAME", "Foo");
+// code += "void {{NAME}}() { printf("%s", "{{NAME}}"); }";
+// code.SetValue("NAME", "Bar");
+// code += "void {{NAME}}() { printf("%s", "{{NAME}}"); }";
+// std::cout << code.ToString() << std::endl;
+//
+// Output:
+// void Foo() { printf("%s", "Foo"); }
+// void Bar() { printf("%s", "Bar"); }
+class CodeWriter
+{
+public:
+ CodeWriter(std::string pad = std::string()) : pad_(pad), cur_ident_lvl_(0), ignore_ident_(false)
+ {
+ }
+
+ // Clears the current "written" code.
+ void Clear()
+ {
+ stream_.str("");
+ stream_.clear();
+ }
+
+ // Associates a key with a value. All subsequent calls to operator+=, where
+ // the specified key is contained in {{ and }} delimiters will be replaced by
+ // the given value.
+ void SetValue(const std::string &key, const std::string &value) { value_map_[key] = value; }
+
+ std::string GetValue(const std::string &key) const
+ {
+ const auto it = value_map_.find(key);
+ return it == value_map_.end() ? "" : it->second;
+ }
+
+ // Appends the given text to the generated code as well as a newline
+ // character. Any text within {{ and }} delimiters is replaced by values
+ // previously stored in the CodeWriter by calling SetValue above. The newline
+ // will be suppressed if the text ends with the \\ character.
+ void operator+=(std::string text);
+
+ // Returns the current contents of the CodeWriter as a std::string.
+ std::string ToString() const { return stream_.str(); }
+
+ // Increase ident level for writing code
+ void IncrementIdentLevel() { cur_ident_lvl_++; }
+ // Decrease ident level for writing code
+ void DecrementIdentLevel()
+ {
+ if (cur_ident_lvl_)
+ cur_ident_lvl_--;
+ }
+
+ void SetPadding(const std::string &padding) { pad_ = padding; }
+
+private:
+ std::map<std::string, std::string> value_map_;
+ std::stringstream stream_;
+ std::string pad_;
+ int cur_ident_lvl_;
+ bool ignore_ident_;
+
+ // Add ident padding (tab or space) based on ident level
+ void AppendIdent(std::stringstream &stream);
+};
+
+class BaseGenerator
+{
+public:
+ virtual bool generate() = 0;
+
+ static std::string NamespaceDir(const Parser &parser, const std::string &path,
+ const Namespace &ns, const bool dasherize = false);
+
+ static std::string ToDasherizedCase(const std::string pascal_case);
+
+ std::string GeneratedFileName(const std::string &path, const std::string &file_name,
+ const IDLOptions &options) const;
+
+protected:
+ BaseGenerator(const Parser &parser, const std::string &path, const std::string &file_name,
+ std::string qualifying_start, std::string qualifying_separator,
+ std::string default_extension)
+ : parser_(parser), path_(path), file_name_(file_name), qualifying_start_(qualifying_start),
+ qualifying_separator_(qualifying_separator), default_extension_(default_extension)
+ {
+ }
+ virtual ~BaseGenerator() {}
+
+ // No copy/assign.
+ BaseGenerator &operator=(const BaseGenerator &);
+ BaseGenerator(const BaseGenerator &);
+
+ std::string NamespaceDir(const Namespace &ns, const bool dasherize = false) const;
+
+ static const char *FlatBuffersGeneratedWarning();
+
+ static std::string FullNamespace(const char *separator, const Namespace &ns);
+
+ static std::string LastNamespacePart(const Namespace &ns);
+
+ // tracks the current namespace for early exit in WrapInNameSpace
+ // c++, java and csharp returns a different namespace from
+ // the following default (no early exit, always fully qualify),
+ // which works for js and php
+ virtual const Namespace *CurrentNameSpace() const { return nullptr; }
+
+ // Ensure that a type is prefixed with its namespace even within
+ // its own namespace to avoid conflict between generated method
+ // names and similarly named classes or structs
+ std::string WrapInNameSpace(const Namespace *ns, const std::string &name) const;
+
+ std::string WrapInNameSpace(const Definition &def) const;
+
+ std::string GetNameSpace(const Definition &def) const;
+
+ const Parser &parser_;
+ const std::string &path_;
+ const std::string &file_name_;
+ const std::string qualifying_start_;
+ const std::string qualifying_separator_;
+ const std::string default_extension_;
+};
+
+struct CommentConfig
+{
+ const char *first_line;
+ const char *content_line_prefix;
+ const char *last_line;
+};
+
+extern void GenComment(const std::vector<std::string> &dc, std::string *code_ptr,
+ const CommentConfig *config, const char *prefix = "");
+
+class FloatConstantGenerator
+{
+public:
+ virtual ~FloatConstantGenerator() {}
+ std::string GenFloatConstant(const FieldDef &field) const;
+
+private:
+ virtual std::string Value(double v, const std::string &src) const = 0;
+ virtual std::string Inf(double v) const = 0;
+ virtual std::string NaN(double v) const = 0;
+
+ virtual std::string Value(float v, const std::string &src) const = 0;
+ virtual std::string Inf(float v) const = 0;
+ virtual std::string NaN(float v) const = 0;
+
+ template <typename T> std::string GenFloatConstantImpl(const FieldDef &field) const;
+};
+
+class SimpleFloatConstantGenerator : public FloatConstantGenerator
+{
+public:
+ SimpleFloatConstantGenerator(const char *nan_number, const char *pos_inf_number,
+ const char *neg_inf_number);
+
+private:
+ std::string Value(double v, const std::string &src) const FLATBUFFERS_OVERRIDE;
+ std::string Inf(double v) const FLATBUFFERS_OVERRIDE;
+ std::string NaN(double v) const FLATBUFFERS_OVERRIDE;
+
+ std::string Value(float v, const std::string &src) const FLATBUFFERS_OVERRIDE;
+ std::string Inf(float v) const FLATBUFFERS_OVERRIDE;
+ std::string NaN(float v) const FLATBUFFERS_OVERRIDE;
+
+ const std::string nan_number_;
+ const std::string pos_inf_number_;
+ const std::string neg_inf_number_;
+};
+
+// C++, C#, Java like generator.
+class TypedFloatConstantGenerator : public FloatConstantGenerator
+{
+public:
+ TypedFloatConstantGenerator(const char *double_prefix, const char *single_prefix,
+ const char *nan_number, const char *pos_inf_number,
+ const char *neg_inf_number = "");
+
+private:
+ std::string Value(double v, const std::string &src) const FLATBUFFERS_OVERRIDE;
+ std::string Inf(double v) const FLATBUFFERS_OVERRIDE;
+
+ std::string NaN(double v) const FLATBUFFERS_OVERRIDE;
+
+ std::string Value(float v, const std::string &src) const FLATBUFFERS_OVERRIDE;
+ std::string Inf(float v) const FLATBUFFERS_OVERRIDE;
+ std::string NaN(float v) const FLATBUFFERS_OVERRIDE;
+
+ std::string MakeNaN(const std::string &prefix) const;
+ std::string MakeInf(bool neg, const std::string &prefix) const;
+
+ const std::string double_prefix_;
+ const std::string single_prefix_;
+ const std::string nan_number_;
+ const std::string pos_inf_number_;
+ const std::string neg_inf_number_;
+};
+
+} // namespace flatbuffers
+
+#endif // FLATBUFFERS_CODE_GENERATORS_H_
diff --git a/onert-micro/externals/flatbuffers/flatbuffers.h b/onert-micro/externals/flatbuffers/flatbuffers.h
new file mode 100644
index 000000000..3005d8921
--- /dev/null
+++ b/onert-micro/externals/flatbuffers/flatbuffers.h
@@ -0,0 +1,3078 @@
+/*
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2014 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLATBUFFERS_H_
+#define FLATBUFFERS_H_
+
+#include "flatbuffers/base.h"
+#include "flatbuffers/stl_emulation.h"
+
+#ifndef FLATBUFFERS_CPP98_STL
+#include <functional>
+#endif
+
+#if defined(FLATBUFFERS_NAN_DEFAULTS)
+#include <cmath>
+#endif
+
+namespace flatbuffers
+{
+// Generic 'operator==' with conditional specialisations.
+// T e - new value of a scalar field.
+// T def - default of scalar (is known at compile-time).
+template <typename T> inline bool IsTheSameAs(T e, T def) { return e == def; }
+
+#if defined(FLATBUFFERS_NAN_DEFAULTS) && defined(FLATBUFFERS_HAS_NEW_STRTOD) && \
+ (FLATBUFFERS_HAS_NEW_STRTOD > 0)
+// Like `operator==(e, def)` with weak NaN if T=(float|double).
+template <typename T> inline bool IsFloatTheSameAs(T e, T def)
+{
+ return (e == def) || ((def != def) && (e != e));
+}
+template <> inline bool IsTheSameAs<float>(float e, float def) { return IsFloatTheSameAs(e, def); }
+template <> inline bool IsTheSameAs<double>(double e, double def)
+{
+ return IsFloatTheSameAs(e, def);
+}
+#endif
+
+// Check 'v' is out of closed range [low; high].
+// Workaround for GCC warning [-Werror=type-limits]:
+// comparison is always true due to limited range of data type.
+template <typename T> inline bool IsOutRange(const T &v, const T &low, const T &high)
+{
+ return (v < low) || (high < v);
+}
+
+// Check 'v' is in closed range [low; high].
+template <typename T> inline bool IsInRange(const T &v, const T &low, const T &high)
+{
+ return !IsOutRange(v, low, high);
+}
+
+// Wrapper for uoffset_t to allow safe template specialization.
+// Value is allowed to be 0 to indicate a null object (see e.g. AddOffset).
+template <typename T> struct Offset
+{
+ uoffset_t o;
+ Offset() : o(0) {}
+ Offset(uoffset_t _o) : o(_o) {}
+ Offset<void> Union() const { return Offset<void>(o); }
+ bool IsNull() const { return !o; }
+};
+
+inline void EndianCheck()
+{
+ int endiantest = 1;
+ // If this fails, see FLATBUFFERS_LITTLEENDIAN above.
+ FLATBUFFERS_ASSERT(*reinterpret_cast<char *>(&endiantest) == FLATBUFFERS_LITTLEENDIAN);
+ (void)endiantest;
+}
+
+template <typename T> FLATBUFFERS_CONSTEXPR size_t AlignOf()
+{
+#ifdef _MSC_VER
+ return __alignof(T);
+#else
+#ifndef alignof
+ return __alignof__(T);
+#else
+ return alignof(T);
+#endif
+#endif
+ // clang-format on
+}
+
+// When we read serialized data from memory, in the case of most scalars,
+// we want to just read T, but in the case of Offset, we want to actually
+// perform the indirection and return a pointer.
+// The template specialization below does just that.
+// It is wrapped in a struct since function templates can't overload on the
+// return type like this.
+// The typedef is for the convenience of callers of this function
+// (avoiding the need for a trailing return decltype)
+template <typename T> struct IndirectHelper
+{
+ typedef T return_type;
+ typedef T mutable_return_type;
+ static const size_t element_stride = sizeof(T);
+ static return_type Read(const uint8_t *p, uoffset_t i)
+ {
+ return EndianScalar((reinterpret_cast<const T *>(p))[i]);
+ }
+};
+template <typename T> struct IndirectHelper<Offset<T>>
+{
+ typedef const T *return_type;
+ typedef T *mutable_return_type;
+ static const size_t element_stride = sizeof(uoffset_t);
+ static return_type Read(const uint8_t *p, uoffset_t i)
+ {
+ p += i * sizeof(uoffset_t);
+ return reinterpret_cast<return_type>(p + ReadScalar<uoffset_t>(p));
+ }
+};
+template <typename T> struct IndirectHelper<const T *>
+{
+ typedef const T *return_type;
+ typedef T *mutable_return_type;
+ static const size_t element_stride = sizeof(T);
+ static return_type Read(const uint8_t *p, uoffset_t i)
+ {
+ return reinterpret_cast<const T *>(p + i * sizeof(T));
+ }
+};
+
+// An STL compatible iterator implementation for Vector below, effectively
+// calling Get() for every element.
+template <typename T, typename IT> struct VectorIterator
+{
+ typedef std::random_access_iterator_tag iterator_category;
+ typedef IT value_type;
+ typedef ptrdiff_t difference_type;
+ typedef IT *pointer;
+ typedef IT &reference;
+
+ VectorIterator(const uint8_t *data, uoffset_t i)
+ : data_(data + IndirectHelper<T>::element_stride * i)
+ {
+ }
+ VectorIterator(const VectorIterator &other) : data_(other.data_) {}
+ VectorIterator() : data_(nullptr) {}
+
+ VectorIterator &operator=(const VectorIterator &other)
+ {
+ data_ = other.data_;
+ return *this;
+ }
+
+#if !defined(FLATBUFFERS_CPP98_STL)
+ VectorIterator &operator=(VectorIterator &&other)
+ {
+ data_ = other.data_;
+ return *this;
+ }
+#endif // !defined(FLATBUFFERS_CPP98_STL)
+ // clang-format on
+
+ bool operator==(const VectorIterator &other) const { return data_ == other.data_; }
+
+ bool operator<(const VectorIterator &other) const { return data_ < other.data_; }
+
+ bool operator!=(const VectorIterator &other) const { return data_ != other.data_; }
+
+ difference_type operator-(const VectorIterator &other) const
+ {
+ return (data_ - other.data_) / IndirectHelper<T>::element_stride;
+ }
+
+ // Note: return type is incompatible with the standard
+ // `reference operator*()`.
+ IT operator*() const { return IndirectHelper<T>::Read(data_, 0); }
+
+ // Note: return type is incompatible with the standard
+ // `pointer operator->()`.
+ IT operator->() const { return IndirectHelper<T>::Read(data_, 0); }
+
+ VectorIterator &operator++()
+ {
+ data_ += IndirectHelper<T>::element_stride;
+ return *this;
+ }
+
+ VectorIterator operator++(int)
+ {
+ VectorIterator temp(data_, 0);
+ data_ += IndirectHelper<T>::element_stride;
+ return temp;
+ }
+
+ VectorIterator operator+(const uoffset_t &offset) const
+ {
+ return VectorIterator(data_ + offset * IndirectHelper<T>::element_stride, 0);
+ }
+
+ VectorIterator &operator+=(const uoffset_t &offset)
+ {
+ data_ += offset * IndirectHelper<T>::element_stride;
+ return *this;
+ }
+
+ VectorIterator &operator--()
+ {
+ data_ -= IndirectHelper<T>::element_stride;
+ return *this;
+ }
+
+ VectorIterator operator--(int)
+ {
+ VectorIterator temp(data_, 0);
+ data_ -= IndirectHelper<T>::element_stride;
+ return temp;
+ }
+
+ VectorIterator operator-(const uoffset_t &offset) const
+ {
+ return VectorIterator(data_ - offset * IndirectHelper<T>::element_stride, 0);
+ }
+
+ VectorIterator &operator-=(const uoffset_t &offset)
+ {
+ data_ -= offset * IndirectHelper<T>::element_stride;
+ return *this;
+ }
+
+private:
+ const uint8_t *data_;
+};
+
+template <typename Iterator> struct VectorReverseIterator : public std::reverse_iterator<Iterator>
+{
+ explicit VectorReverseIterator(Iterator iter) : std::reverse_iterator<Iterator>(iter) {}
+
+ // Note: return type is incompatible with the standard
+ // `reference operator*()`.
+ typename Iterator::value_type operator*() const
+ {
+ auto tmp = std::reverse_iterator<Iterator>::current;
+ return *--tmp;
+ }
+
+ // Note: return type is incompatible with the standard
+ // `pointer operator->()`.
+ typename Iterator::value_type operator->() const
+ {
+ auto tmp = std::reverse_iterator<Iterator>::current;
+ return *--tmp;
+ }
+};
+
+struct String;
+
+// This is used as a helper type for accessing vectors.
+// Vector::data() assumes the vector elements start after the length field.
+template <typename T> class Vector
+{
+public:
+ typedef VectorIterator<T, typename IndirectHelper<T>::mutable_return_type> iterator;
+ typedef VectorIterator<T, typename IndirectHelper<T>::return_type> const_iterator;
+ typedef VectorReverseIterator<iterator> reverse_iterator;
+ typedef VectorReverseIterator<const_iterator> const_reverse_iterator;
+
+ uoffset_t size() const { return EndianScalar(length_); }
+
+ // Deprecated: use size(). Here for backwards compatibility.
+ FLATBUFFERS_ATTRIBUTE(deprecated("use size() instead"))
+ uoffset_t Length() const { return size(); }
+
+ typedef typename IndirectHelper<T>::return_type return_type;
+ typedef typename IndirectHelper<T>::mutable_return_type mutable_return_type;
+ typedef return_type value_type;
+
+ return_type Get(uoffset_t i) const
+ {
+ FLATBUFFERS_ASSERT(i < size());
+ return IndirectHelper<T>::Read(Data(), i);
+ }
+
+ return_type operator[](uoffset_t i) const { return Get(i); }
+
+ // If this is a Vector of enums, T will be its storage type, not the enum
+ // type. This function makes it convenient to retrieve value with enum
+ // type E.
+ template <typename E> E GetEnum(uoffset_t i) const { return static_cast<E>(Get(i)); }
+
+ // If this a vector of unions, this does the cast for you. There's no check
+ // to make sure this is the right type!
+ template <typename U> const U *GetAs(uoffset_t i) const
+ {
+ return reinterpret_cast<const U *>(Get(i));
+ }
+
+ // If this a vector of unions, this does the cast for you. There's no check
+ // to make sure this is actually a string!
+ const String *GetAsString(uoffset_t i) const { return reinterpret_cast<const String *>(Get(i)); }
+
+ const void *GetStructFromOffset(size_t o) const
+ {
+ return reinterpret_cast<const void *>(Data() + o);
+ }
+
+ iterator begin() { return iterator(Data(), 0); }
+ const_iterator begin() const { return const_iterator(Data(), 0); }
+
+ iterator end() { return iterator(Data(), size()); }
+ const_iterator end() const { return const_iterator(Data(), size()); }
+
+ reverse_iterator rbegin() { return reverse_iterator(end()); }
+ const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); }
+
+ reverse_iterator rend() { return reverse_iterator(begin()); }
+ const_reverse_iterator rend() const { return const_reverse_iterator(begin()); }
+
+ const_iterator cbegin() const { return begin(); }
+
+ const_iterator cend() const { return end(); }
+
+ const_reverse_iterator crbegin() const { return rbegin(); }
+
+ const_reverse_iterator crend() const { return rend(); }
+
+ // Change elements if you have a non-const pointer to this object.
+ // Scalars only. See reflection.h, and the documentation.
+ void Mutate(uoffset_t i, const T &val)
+ {
+ FLATBUFFERS_ASSERT(i < size());
+ WriteScalar(data() + i, val);
+ }
+
+ // Change an element of a vector of tables (or strings).
+ // "val" points to the new table/string, as you can obtain from
+ // e.g. reflection::AddFlatBuffer().
+ void MutateOffset(uoffset_t i, const uint8_t *val)
+ {
+ FLATBUFFERS_ASSERT(i < size());
+ static_assert(sizeof(T) == sizeof(uoffset_t), "Unrelated types");
+ WriteScalar(data() + i, static_cast<uoffset_t>(val - (Data() + i * sizeof(uoffset_t))));
+ }
+
+ // Get a mutable pointer to tables/strings inside this vector.
+ mutable_return_type GetMutableObject(uoffset_t i) const
+ {
+ FLATBUFFERS_ASSERT(i < size());
+ return const_cast<mutable_return_type>(IndirectHelper<T>::Read(Data(), i));
+ }
+
+ // The raw data in little endian format. Use with care.
+ const uint8_t *Data() const { return reinterpret_cast<const uint8_t *>(&length_ + 1); }
+
+ uint8_t *Data() { return reinterpret_cast<uint8_t *>(&length_ + 1); }
+
+ // Similarly, but typed, much like std::vector::data
+ const T *data() const { return reinterpret_cast<const T *>(Data()); }
+ T *data() { return reinterpret_cast<T *>(Data()); }
+
+ template <typename K> return_type LookupByKey(K key) const
+ {
+ void *search_result =
+ std::bsearch(&key, Data(), size(), IndirectHelper<T>::element_stride, KeyCompare<K>);
+
+ if (!search_result)
+ {
+ return nullptr; // Key not found.
+ }
+
+ const uint8_t *element = reinterpret_cast<const uint8_t *>(search_result);
+
+ return IndirectHelper<T>::Read(element, 0);
+ }
+
+protected:
+ // This class is only used to access pre-existing data. Don't ever
+ // try to construct these manually.
+ Vector();
+
+ uoffset_t length_;
+
+private:
+ // This class is a pointer. Copying will therefore create an invalid object.
+ // Private and unimplemented copy constructor.
+ Vector(const Vector &);
+ Vector &operator=(const Vector &);
+
+ template <typename K> static int KeyCompare(const void *ap, const void *bp)
+ {
+ const K *key = reinterpret_cast<const K *>(ap);
+ const uint8_t *data = reinterpret_cast<const uint8_t *>(bp);
+ auto table = IndirectHelper<T>::Read(data, 0);
+
+ // std::bsearch compares with the operands transposed, so we negate the
+ // result here.
+ return -table->KeyCompareWithValue(*key);
+ }
+};
+
+// Represent a vector much like the template above, but in this case we
+// don't know what the element types are (used with reflection.h).
+class VectorOfAny
+{
+public:
+ uoffset_t size() const { return EndianScalar(length_); }
+
+ const uint8_t *Data() const { return reinterpret_cast<const uint8_t *>(&length_ + 1); }
+ uint8_t *Data() { return reinterpret_cast<uint8_t *>(&length_ + 1); }
+
+protected:
+ VectorOfAny();
+
+ uoffset_t length_;
+
+private:
+ VectorOfAny(const VectorOfAny &);
+ VectorOfAny &operator=(const VectorOfAny &);
+};
+
+#ifndef FLATBUFFERS_CPP98_STL
+template <typename T, typename U> Vector<Offset<T>> *VectorCast(Vector<Offset<U>> *ptr)
+{
+ static_assert(std::is_base_of<T, U>::value, "Unrelated types");
+ return reinterpret_cast<Vector<Offset<T>> *>(ptr);
+}
+
+template <typename T, typename U> const Vector<Offset<T>> *VectorCast(const Vector<Offset<U>> *ptr)
+{
+ static_assert(std::is_base_of<T, U>::value, "Unrelated types");
+ return reinterpret_cast<const Vector<Offset<T>> *>(ptr);
+}
+#endif
+
+// Convenient helper function to get the length of any vector, regardless
+// of whether it is null or not (the field is not set).
+template <typename T> static inline size_t VectorLength(const Vector<T> *v)
+{
+ return v ? v->size() : 0;
+}
+
+// This is used as a helper type for accessing arrays.
+template <typename T, uint16_t length> class Array
+{
+ typedef typename flatbuffers::integral_constant<bool, flatbuffers::is_scalar<T>::value>
+ scalar_tag;
+ typedef
+ typename flatbuffers::conditional<scalar_tag::value, T, const T *>::type IndirectHelperType;
+
+public:
+ typedef uint16_t size_type;
+ typedef typename IndirectHelper<IndirectHelperType>::return_type return_type;
+ typedef VectorIterator<T, return_type> const_iterator;
+ typedef VectorReverseIterator<const_iterator> const_reverse_iterator;
+
+ FLATBUFFERS_CONSTEXPR uint16_t size() const { return length; }
+
+ return_type Get(uoffset_t i) const
+ {
+ FLATBUFFERS_ASSERT(i < size());
+ return IndirectHelper<IndirectHelperType>::Read(Data(), i);
+ }
+
+ return_type operator[](uoffset_t i) const { return Get(i); }
+
+ // If this is a Vector of enums, T will be its storage type, not the enum
+ // type. This function makes it convenient to retrieve value with enum
+ // type E.
+ template <typename E> E GetEnum(uoffset_t i) const { return static_cast<E>(Get(i)); }
+
+ const_iterator begin() const { return const_iterator(Data(), 0); }
+ const_iterator end() const { return const_iterator(Data(), size()); }
+
+ const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); }
+ const_reverse_iterator rend() const { return const_reverse_iterator(begin()); }
+
+ const_iterator cbegin() const { return begin(); }
+ const_iterator cend() const { return end(); }
+
+ const_reverse_iterator crbegin() const { return rbegin(); }
+ const_reverse_iterator crend() const { return rend(); }
+
+ // Get a mutable pointer to elements inside this array.
+ // This method used to mutate arrays of structs followed by a @p Mutate
+ // operation. For primitive types use @p Mutate directly.
+ // @warning Assignments and reads to/from the dereferenced pointer are not
+ // automatically converted to the correct endianness.
+ typename flatbuffers::conditional<scalar_tag::value, void, T *>::type
+ GetMutablePointer(uoffset_t i) const
+ {
+ FLATBUFFERS_ASSERT(i < size());
+ return const_cast<T *>(&data()[i]);
+ }
+
+ // Change elements if you have a non-const pointer to this object.
+ void Mutate(uoffset_t i, const T &val) { MutateImpl(scalar_tag(), i, val); }
+
+ // The raw data in little endian format. Use with care.
+ const uint8_t *Data() const { return data_; }
+
+ uint8_t *Data() { return data_; }
+
+ // Similarly, but typed, much like std::vector::data
+ const T *data() const { return reinterpret_cast<const T *>(Data()); }
+ T *data() { return reinterpret_cast<T *>(Data()); }
+
+ // Copy data from a span with endian conversion.
+ // If this Array and the span overlap, the behavior is undefined.
+ void CopyFromSpan(flatbuffers::span<const T, length> src)
+ {
+ const auto p1 = reinterpret_cast<const uint8_t *>(src.data());
+ const auto p2 = Data();
+ FLATBUFFERS_ASSERT(!(p1 >= p2 && p1 < (p2 + length)) && !(p2 >= p1 && p2 < (p1 + length)));
+ (void)p1;
+ (void)p2;
+
+ CopyFromSpanImpl(flatbuffers::integral_constant < bool,
+ !scalar_tag::value || sizeof(T) == 1 || FLATBUFFERS_LITTLEENDIAN > (), src);
+ }
+
+protected:
+ void MutateImpl(flatbuffers::integral_constant<bool, true>, uoffset_t i, const T &val)
+ {
+ FLATBUFFERS_ASSERT(i < size());
+ WriteScalar(data() + i, val);
+ }
+
+ void MutateImpl(flatbuffers::integral_constant<bool, false>, uoffset_t i, const T &val)
+ {
+ *(GetMutablePointer(i)) = val;
+ }
+
+ void CopyFromSpanImpl(flatbuffers::integral_constant<bool, true>,
+ flatbuffers::span<const T, length> src)
+ {
+ // Use std::memcpy() instead of std::copy() to avoid preformance degradation
+ // due to aliasing if T is char or unsigned char.
+ // The size is known at compile time, so memcpy would be inlined.
+ std::memcpy(data(), src.data(), length * sizeof(T));
+ }
+
+ // Copy data from flatbuffers::span with endian conversion.
+ void CopyFromSpanImpl(flatbuffers::integral_constant<bool, false>,
+ flatbuffers::span<const T, length> src)
+ {
+ for (size_type k = 0; k < length; k++)
+ {
+ Mutate(k, src[k]);
+ }
+ }
+
+ // This class is only used to access pre-existing data. Don't ever
+ // try to construct these manually.
+ // 'constexpr' allows us to use 'size()' at compile time.
+ // @note Must not use 'FLATBUFFERS_CONSTEXPR' here, as const is not allowed on
+ // a constructor.
+#if defined(__cpp_constexpr)
+ constexpr Array();
+#else
+ Array();
+#endif
+
+ uint8_t data_[length * sizeof(T)];
+
+private:
+ // This class is a pointer. Copying will therefore create an invalid object.
+ // Private and unimplemented copy constructor.
+ Array(const Array &);
+ Array &operator=(const Array &);
+};
+
+// Specialization for Array[struct] with access using Offset<void> pointer.
+// This specialization used by idl_gen_text.cpp.
+template <typename T, uint16_t length> class Array<Offset<T>, length>
+{
+ static_assert(flatbuffers::is_same<T, void>::value, "unexpected type T");
+
+public:
+ typedef const void *return_type;
+
+ const uint8_t *Data() const { return data_; }
+
+ // Make idl_gen_text.cpp::PrintContainer happy.
+ return_type operator[](uoffset_t) const
+ {
+ FLATBUFFERS_ASSERT(false);
+ return nullptr;
+ }
+
+private:
+ // This class is only used to access pre-existing data.
+ Array();
+ Array(const Array &);
+ Array &operator=(const Array &);
+
+ uint8_t data_[1];
+};
+
+// Cast a raw T[length] to a raw flatbuffers::Array<T, length>
+// without endian conversion. Use with care.
+template <typename T, uint16_t length> Array<T, length> &CastToArray(T (&arr)[length])
+{
+ return *reinterpret_cast<Array<T, length> *>(arr);
+}
+
+template <typename T, uint16_t length> const Array<T, length> &CastToArray(const T (&arr)[length])
+{
+ return *reinterpret_cast<const Array<T, length> *>(arr);
+}
+
+template <typename E, typename T, uint16_t length>
+Array<E, length> &CastToArrayOfEnum(T (&arr)[length])
+{
+ static_assert(sizeof(E) == sizeof(T), "invalid enum type E");
+ return *reinterpret_cast<Array<E, length> *>(arr);
+}
+
+template <typename E, typename T, uint16_t length>
+const Array<E, length> &CastToArrayOfEnum(const T (&arr)[length])
+{
+ static_assert(sizeof(E) == sizeof(T), "invalid enum type E");
+ return *reinterpret_cast<const Array<E, length> *>(arr);
+}
+
+// Lexicographically compare two strings (possibly containing nulls), and
+// return true if the first is less than the second.
+static inline bool StringLessThan(const char *a_data, uoffset_t a_size, const char *b_data,
+ uoffset_t b_size)
+{
+ const auto cmp = memcmp(a_data, b_data, (std::min)(a_size, b_size));
+ return cmp == 0 ? a_size < b_size : cmp < 0;
+}
+
+struct String : public Vector<char>
+{
+ const char *c_str() const { return reinterpret_cast<const char *>(Data()); }
+ std::string str() const { return std::string(c_str(), size()); }
+
+#ifdef FLATBUFFERS_HAS_STRING_VIEW
+ flatbuffers::string_view string_view() const { return flatbuffers::string_view(c_str(), size()); }
+#endif // FLATBUFFERS_HAS_STRING_VIEW
+ // clang-format on
+
+ bool operator<(const String &o) const
+ {
+ return StringLessThan(this->data(), this->size(), o.data(), o.size());
+ }
+};
+
+// Convenience function to get std::string from a String returning an empty
+// string on null pointer.
+static inline std::string GetString(const String *str) { return str ? str->str() : ""; }
+
+// Convenience function to get char* from a String returning an empty string on
+// null pointer.
+static inline const char *GetCstring(const String *str) { return str ? str->c_str() : ""; }
+
+#ifdef FLATBUFFERS_HAS_STRING_VIEW
+// Convenience function to get string_view from a String returning an empty
+// string_view on null pointer.
+static inline flatbuffers::string_view GetStringView(const String *str)
+{
+ return str ? str->string_view() : flatbuffers::string_view();
+}
+#endif // FLATBUFFERS_HAS_STRING_VIEW
+
+// Allocator interface. This is flatbuffers-specific and meant only for
+// `vector_downward` usage.
+class Allocator
+{
+public:
+ virtual ~Allocator() {}
+
+ // Allocate `size` bytes of memory.
+ virtual uint8_t *allocate(size_t size) = 0;
+
+ // Deallocate `size` bytes of memory at `p` allocated by this allocator.
+ virtual void deallocate(uint8_t *p, size_t size) = 0;
+
+ // Reallocate `new_size` bytes of memory, replacing the old region of size
+ // `old_size` at `p`. In contrast to a normal realloc, this grows downwards,
+ // and is intended specifcally for `vector_downward` use.
+ // `in_use_back` and `in_use_front` indicate how much of `old_size` is
+ // actually in use at each end, and needs to be copied.
+ virtual uint8_t *reallocate_downward(uint8_t *old_p, size_t old_size, size_t new_size,
+ size_t in_use_back, size_t in_use_front)
+ {
+ FLATBUFFERS_ASSERT(new_size > old_size); // vector_downward only grows
+ uint8_t *new_p = allocate(new_size);
+ memcpy_downward(old_p, old_size, new_p, new_size, in_use_back, in_use_front);
+ deallocate(old_p, old_size);
+ return new_p;
+ }
+
+protected:
+ // Called by `reallocate_downward` to copy memory from `old_p` of `old_size`
+ // to `new_p` of `new_size`. Only memory of size `in_use_front` and
+ // `in_use_back` will be copied from the front and back of the old memory
+ // allocation.
+ void memcpy_downward(uint8_t *old_p, size_t old_size, uint8_t *new_p, size_t new_size,
+ size_t in_use_back, size_t in_use_front)
+ {
+ memcpy(new_p + new_size - in_use_back, old_p + old_size - in_use_back, in_use_back);
+ memcpy(new_p, old_p, in_use_front);
+ }
+};
+
+// DefaultAllocator uses new/delete to allocate memory regions
+class DefaultAllocator : public Allocator
+{
+public:
+ uint8_t *allocate(size_t size) FLATBUFFERS_OVERRIDE { return new uint8_t[size]; }
+
+ void deallocate(uint8_t *p, size_t) FLATBUFFERS_OVERRIDE { delete[] p; }
+
+ static void dealloc(void *p, size_t) { delete[] static_cast<uint8_t *>(p); }
+};
+
+// These functions allow for a null allocator to mean use the default allocator,
+// as used by DetachedBuffer and vector_downward below.
+// This is to avoid having a statically or dynamically allocated default
+// allocator, or having to move it between the classes that may own it.
+inline uint8_t *Allocate(Allocator *allocator, size_t size)
+{
+ return allocator ? allocator->allocate(size) : DefaultAllocator().allocate(size);
+}
+
+inline void Deallocate(Allocator *allocator, uint8_t *p, size_t size)
+{
+ if (allocator)
+ allocator->deallocate(p, size);
+ else
+ DefaultAllocator().deallocate(p, size);
+}
+
+inline uint8_t *ReallocateDownward(Allocator *allocator, uint8_t *old_p, size_t old_size,
+ size_t new_size, size_t in_use_back, size_t in_use_front)
+{
+ return allocator
+ ? allocator->reallocate_downward(old_p, old_size, new_size, in_use_back, in_use_front)
+ : DefaultAllocator().reallocate_downward(old_p, old_size, new_size, in_use_back,
+ in_use_front);
+}
+
+// DetachedBuffer is a finished flatbuffer memory region, detached from its
+// builder. The original memory region and allocator are also stored so that
+// the DetachedBuffer can manage the memory lifetime.
+class DetachedBuffer
+{
+public:
+ DetachedBuffer()
+ : allocator_(nullptr), own_allocator_(false), buf_(nullptr), reserved_(0), cur_(nullptr),
+ size_(0)
+ {
+ }
+
+ DetachedBuffer(Allocator *allocator, bool own_allocator, uint8_t *buf, size_t reserved,
+ uint8_t *cur, size_t sz)
+ : allocator_(allocator), own_allocator_(own_allocator), buf_(buf), reserved_(reserved),
+ cur_(cur), size_(sz)
+ {
+ }
+
+#if !defined(FLATBUFFERS_CPP98_STL)
+ // clang-format on
+ DetachedBuffer(DetachedBuffer &&other)
+ : allocator_(other.allocator_), own_allocator_(other.own_allocator_), buf_(other.buf_),
+ reserved_(other.reserved_), cur_(other.cur_), size_(other.size_)
+ {
+ other.reset();
+ }
+#endif // !defined(FLATBUFFERS_CPP98_STL)
+
+#if !defined(FLATBUFFERS_CPP98_STL)
+ // clang-format on
+ DetachedBuffer &operator=(DetachedBuffer &&other)
+ {
+ if (this == &other)
+ return *this;
+
+ destroy();
+
+ allocator_ = other.allocator_;
+ own_allocator_ = other.own_allocator_;
+ buf_ = other.buf_;
+ reserved_ = other.reserved_;
+ cur_ = other.cur_;
+ size_ = other.size_;
+
+ other.reset();
+
+ return *this;
+ }
+#endif // !defined(FLATBUFFERS_CPP98_STL)
+ // clang-format on
+
+ ~DetachedBuffer() { destroy(); }
+
+ const uint8_t *data() const { return cur_; }
+
+ uint8_t *data() { return cur_; }
+
+ size_t size() const { return size_; }
+
+#if 0 // disabled for now due to the ordering of classes in this header
+ template <class T>
+ bool Verify() const {
+ Verifier verifier(data(), size());
+ return verifier.Verify<T>(nullptr);
+ }
+
+ template <class T>
+ const T* GetRoot() const {
+ return flatbuffers::GetRoot<T>(data());
+ }
+
+ template <class T>
+ T* GetRoot() {
+ return flatbuffers::GetRoot<T>(data());
+ }
+#endif
+
+#if !defined(FLATBUFFERS_CPP98_STL)
+ // clang-format on
+ // These may change access mode, leave these at end of public section
+ FLATBUFFERS_DELETE_FUNC(DetachedBuffer(const DetachedBuffer &other));
+ FLATBUFFERS_DELETE_FUNC(DetachedBuffer &operator=(const DetachedBuffer &other));
+#endif // !defined(FLATBUFFERS_CPP98_STL)
+ // clang-format on
+
+protected:
+ Allocator *allocator_;
+ bool own_allocator_;
+ uint8_t *buf_;
+ size_t reserved_;
+ uint8_t *cur_;
+ size_t size_;
+
+ inline void destroy()
+ {
+ if (buf_)
+ Deallocate(allocator_, buf_, reserved_);
+ if (own_allocator_ && allocator_)
+ {
+ delete allocator_;
+ }
+ reset();
+ }
+
+ inline void reset()
+ {
+ allocator_ = nullptr;
+ own_allocator_ = false;
+ buf_ = nullptr;
+ reserved_ = 0;
+ cur_ = nullptr;
+ size_ = 0;
+ }
+};
+
+// This is a minimal replication of std::vector<uint8_t> functionality,
+// except growing from higher to lower addresses. i.e push_back() inserts data
+// in the lowest address in the vector.
+// Since this vector leaves the lower part unused, we support a "scratch-pad"
+// that can be stored there for temporary data, to share the allocated space.
+// Essentially, this supports 2 std::vectors in a single buffer.
+class vector_downward
+{
+public:
+ explicit vector_downward(size_t initial_size, Allocator *allocator, bool own_allocator,
+ size_t buffer_minalign)
+ : allocator_(allocator), own_allocator_(own_allocator), initial_size_(initial_size),
+ buffer_minalign_(buffer_minalign), reserved_(0), buf_(nullptr), cur_(nullptr),
+ scratch_(nullptr)
+ {
+ }
+
+#if !defined(FLATBUFFERS_CPP98_STL)
+ vector_downward(vector_downward &&other)
+#else
+ vector_downward(vector_downward &other)
+#endif // defined(FLATBUFFERS_CPP98_STL)
+ // clang-format on
+ : allocator_(other.allocator_), own_allocator_(other.own_allocator_),
+ initial_size_(other.initial_size_), buffer_minalign_(other.buffer_minalign_),
+ reserved_(other.reserved_), buf_(other.buf_), cur_(other.cur_), scratch_(other.scratch_)
+ {
+ // No change in other.allocator_
+ // No change in other.initial_size_
+ // No change in other.buffer_minalign_
+ other.own_allocator_ = false;
+ other.reserved_ = 0;
+ other.buf_ = nullptr;
+ other.cur_ = nullptr;
+ other.scratch_ = nullptr;
+ }
+
+#if !defined(FLATBUFFERS_CPP98_STL)
+ // clang-format on
+ vector_downward &operator=(vector_downward &&other)
+ {
+ // Move construct a temporary and swap idiom
+ vector_downward temp(std::move(other));
+ swap(temp);
+ return *this;
+ }
+#endif // defined(FLATBUFFERS_CPP98_STL)
+ // clang-format on
+
+ ~vector_downward()
+ {
+ clear_buffer();
+ clear_allocator();
+ }
+
+ void reset()
+ {
+ clear_buffer();
+ clear();
+ }
+
+ void clear()
+ {
+ if (buf_)
+ {
+ cur_ = buf_ + reserved_;
+ }
+ else
+ {
+ reserved_ = 0;
+ cur_ = nullptr;
+ }
+ clear_scratch();
+ }
+
+ void clear_scratch() { scratch_ = buf_; }
+
+ void clear_allocator()
+ {
+ if (own_allocator_ && allocator_)
+ {
+ delete allocator_;
+ }
+ allocator_ = nullptr;
+ own_allocator_ = false;
+ }
+
+ void clear_buffer()
+ {
+ if (buf_)
+ Deallocate(allocator_, buf_, reserved_);
+ buf_ = nullptr;
+ }
+
+ // Relinquish the pointer to the caller.
+ uint8_t *release_raw(size_t &allocated_bytes, size_t &offset)
+ {
+ auto *buf = buf_;
+ allocated_bytes = reserved_;
+ offset = static_cast<size_t>(cur_ - buf_);
+
+ // release_raw only relinquishes the buffer ownership.
+ // Does not deallocate or reset the allocator. Destructor will do that.
+ buf_ = nullptr;
+ clear();
+ return buf;
+ }
+
+ // Relinquish the pointer to the caller.
+ DetachedBuffer release()
+ {
+ // allocator ownership (if any) is transferred to DetachedBuffer.
+ DetachedBuffer fb(allocator_, own_allocator_, buf_, reserved_, cur_, size());
+ if (own_allocator_)
+ {
+ allocator_ = nullptr;
+ own_allocator_ = false;
+ }
+ buf_ = nullptr;
+ clear();
+ return fb;
+ }
+
+ size_t ensure_space(size_t len)
+ {
+ FLATBUFFERS_ASSERT(cur_ >= scratch_ && scratch_ >= buf_);
+ if (len > static_cast<size_t>(cur_ - scratch_))
+ {
+ reallocate(len);
+ }
+ // Beyond this, signed offsets may not have enough range:
+ // (FlatBuffers > 2GB not supported).
+ FLATBUFFERS_ASSERT(size() < FLATBUFFERS_MAX_BUFFER_SIZE);
+ return len;
+ }
+
+ inline uint8_t *make_space(size_t len)
+ {
+ size_t space = ensure_space(len);
+ cur_ -= space;
+ return cur_;
+ }
+
+ // Returns nullptr if using the DefaultAllocator.
+ Allocator *get_custom_allocator() { return allocator_; }
+
+ uoffset_t size() const
+ {
+ return static_cast<uoffset_t>(reserved_ - static_cast<size_t>(cur_ - buf_));
+ }
+
+ uoffset_t scratch_size() const { return static_cast<uoffset_t>(scratch_ - buf_); }
+
+ size_t capacity() const { return reserved_; }
+
+ uint8_t *data() const
+ {
+ FLATBUFFERS_ASSERT(cur_);
+ return cur_;
+ }
+
+ uint8_t *scratch_data() const
+ {
+ FLATBUFFERS_ASSERT(buf_);
+ return buf_;
+ }
+
+ uint8_t *scratch_end() const
+ {
+ FLATBUFFERS_ASSERT(scratch_);
+ return scratch_;
+ }
+
+ uint8_t *data_at(size_t offset) const { return buf_ + reserved_ - offset; }
+
+ void push(const uint8_t *bytes, size_t num)
+ {
+ if (num > 0)
+ {
+ memcpy(make_space(num), bytes, num);
+ }
+ }
+
+ // Specialized version of push() that avoids memcpy call for small data.
+ template <typename T> void push_small(const T &little_endian_t)
+ {
+ make_space(sizeof(T));
+ *reinterpret_cast<T *>(cur_) = little_endian_t;
+ }
+
+ template <typename T> void scratch_push_small(const T &t)
+ {
+ ensure_space(sizeof(T));
+ *reinterpret_cast<T *>(scratch_) = t;
+ scratch_ += sizeof(T);
+ }
+
+ // fill() is most frequently called with small byte counts (<= 4),
+ // which is why we're using loops rather than calling memset.
+ void fill(size_t zero_pad_bytes)
+ {
+ make_space(zero_pad_bytes);
+ for (size_t i = 0; i < zero_pad_bytes; i++)
+ cur_[i] = 0;
+ }
+
+ // Version for when we know the size is larger.
+ // Precondition: zero_pad_bytes > 0
+ void fill_big(size_t zero_pad_bytes) { memset(make_space(zero_pad_bytes), 0, zero_pad_bytes); }
+
+ void pop(size_t bytes_to_remove) { cur_ += bytes_to_remove; }
+ void scratch_pop(size_t bytes_to_remove) { scratch_ -= bytes_to_remove; }
+
+ void swap(vector_downward &other)
+ {
+ using std::swap;
+ swap(allocator_, other.allocator_);
+ swap(own_allocator_, other.own_allocator_);
+ swap(initial_size_, other.initial_size_);
+ swap(buffer_minalign_, other.buffer_minalign_);
+ swap(reserved_, other.reserved_);
+ swap(buf_, other.buf_);
+ swap(cur_, other.cur_);
+ swap(scratch_, other.scratch_);
+ }
+
+ void swap_allocator(vector_downward &other)
+ {
+ using std::swap;
+ swap(allocator_, other.allocator_);
+ swap(own_allocator_, other.own_allocator_);
+ }
+
+private:
+ // You shouldn't really be copying instances of this class.
+ FLATBUFFERS_DELETE_FUNC(vector_downward(const vector_downward &));
+ FLATBUFFERS_DELETE_FUNC(vector_downward &operator=(const vector_downward &));
+
+ Allocator *allocator_;
+ bool own_allocator_;
+ size_t initial_size_;
+ size_t buffer_minalign_;
+ size_t reserved_;
+ uint8_t *buf_;
+ uint8_t *cur_; // Points at location between empty (below) and used (above).
+ uint8_t *scratch_; // Points to the end of the scratchpad in use.
+
+ void reallocate(size_t len)
+ {
+ auto old_reserved = reserved_;
+ auto old_size = size();
+ auto old_scratch_size = scratch_size();
+ reserved_ += (std::max)(len, old_reserved ? old_reserved / 2 : initial_size_);
+ reserved_ = (reserved_ + buffer_minalign_ - 1) & ~(buffer_minalign_ - 1);
+ if (buf_)
+ {
+ buf_ =
+ ReallocateDownward(allocator_, buf_, old_reserved, reserved_, old_size, old_scratch_size);
+ }
+ else
+ {
+ buf_ = Allocate(allocator_, reserved_);
+ }
+ cur_ = buf_ + reserved_ - old_size;
+ scratch_ = buf_ + old_scratch_size;
+ }
+};
+
+// Converts a Field ID to a virtual table offset.
+inline voffset_t FieldIndexToOffset(voffset_t field_id)
+{
+ // Should correspond to what EndTable() below builds up.
+ const int fixed_fields = 2; // Vtable size and Object Size.
+ return static_cast<voffset_t>((field_id + fixed_fields) * sizeof(voffset_t));
+}
+
+template <typename T, typename Alloc> const T *data(const std::vector<T, Alloc> &v)
+{
+ // Eventually the returned pointer gets passed down to memcpy, so
+ // we need it to be non-null to avoid undefined behavior.
+ static uint8_t t;
+ return v.empty() ? reinterpret_cast<const T *>(&t) : &v.front();
+}
+template <typename T, typename Alloc> T *data(std::vector<T, Alloc> &v)
+{
+ // Eventually the returned pointer gets passed down to memcpy, so
+ // we need it to be non-null to avoid undefined behavior.
+ static uint8_t t;
+ return v.empty() ? reinterpret_cast<T *>(&t) : &v.front();
+}
+
+/// @endcond
+
+/// @addtogroup flatbuffers_cpp_api
+/// @{
+/// @class FlatBufferBuilder
+/// @brief Helper class to hold data needed in creation of a FlatBuffer.
+/// To serialize data, you typically call one of the `Create*()` functions in
+/// the generated code, which in turn call a sequence of `StartTable`/
+/// `PushElement`/`AddElement`/`EndTable`, or the builtin `CreateString`/
+/// `CreateVector` functions. Do this is depth-first order to build up a tree to
+/// the root. `Finish()` wraps up the buffer ready for transport.
+class FlatBufferBuilder
+{
+public:
+ /// @brief Default constructor for FlatBufferBuilder.
+ /// @param[in] initial_size The initial size of the buffer, in bytes. Defaults
+ /// to `1024`.
+ /// @param[in] allocator An `Allocator` to use. If null will use
+ /// `DefaultAllocator`.
+ /// @param[in] own_allocator Whether the builder/vector should own the
+ /// allocator. Defaults to / `false`.
+ /// @param[in] buffer_minalign Force the buffer to be aligned to the given
+ /// minimum alignment upon reallocation. Only needed if you intend to store
+ /// types with custom alignment AND you wish to read the buffer in-place
+ /// directly after creation.
+ explicit FlatBufferBuilder(size_t initial_size = 1024, Allocator *allocator = nullptr,
+ bool own_allocator = false,
+ size_t buffer_minalign = AlignOf<largest_scalar_t>())
+ : buf_(initial_size, allocator, own_allocator, buffer_minalign), num_field_loc(0),
+ max_voffset_(0), nested(false), finished(false), minalign_(1), force_defaults_(false),
+ dedup_vtables_(true), string_pool(nullptr)
+ {
+ EndianCheck();
+ }
+
+/// @brief Move constructor for FlatBufferBuilder.
+#if !defined(FLATBUFFERS_CPP98_STL)
+ FlatBufferBuilder(FlatBufferBuilder &&other)
+#else
+ FlatBufferBuilder(FlatBufferBuilder &other)
+#endif // #if !defined(FLATBUFFERS_CPP98_STL)
+ : buf_(1024, nullptr, false, AlignOf<largest_scalar_t>()), num_field_loc(0), max_voffset_(0),
+ nested(false), finished(false), minalign_(1), force_defaults_(false), dedup_vtables_(true),
+ string_pool(nullptr)
+ {
+ EndianCheck();
+ // Default construct and swap idiom.
+ // Lack of delegating constructors in vs2010 makes it more verbose than needed.
+ Swap(other);
+ }
+
+#if !defined(FLATBUFFERS_CPP98_STL)
+ // clang-format on
+ /// @brief Move assignment operator for FlatBufferBuilder.
+ FlatBufferBuilder &operator=(FlatBufferBuilder &&other)
+ {
+ // Move construct a temporary and swap idiom
+ FlatBufferBuilder temp(std::move(other));
+ Swap(temp);
+ return *this;
+ }
+#endif // defined(FLATBUFFERS_CPP98_STL)
+ // clang-format on
+
+ void Swap(FlatBufferBuilder &other)
+ {
+ using std::swap;
+ buf_.swap(other.buf_);
+ swap(num_field_loc, other.num_field_loc);
+ swap(max_voffset_, other.max_voffset_);
+ swap(nested, other.nested);
+ swap(finished, other.finished);
+ swap(minalign_, other.minalign_);
+ swap(force_defaults_, other.force_defaults_);
+ swap(dedup_vtables_, other.dedup_vtables_);
+ swap(string_pool, other.string_pool);
+ }
+
+ ~FlatBufferBuilder()
+ {
+ if (string_pool)
+ delete string_pool;
+ }
+
+ void Reset()
+ {
+ Clear(); // clear builder state
+ buf_.reset(); // deallocate buffer
+ }
+
+ /// @brief Reset all the state in this FlatBufferBuilder so it can be reused
+ /// to construct another buffer.
+ void Clear()
+ {
+ ClearOffsets();
+ buf_.clear();
+ nested = false;
+ finished = false;
+ minalign_ = 1;
+ if (string_pool)
+ string_pool->clear();
+ }
+
+ /// @brief The current size of the serialized buffer, counting from the end.
+ /// @return Returns an `uoffset_t` with the current size of the buffer.
+ uoffset_t GetSize() const { return buf_.size(); }
+
+ /// @brief Get the serialized buffer (after you call `Finish()`).
+ /// @return Returns an `uint8_t` pointer to the FlatBuffer data inside the
+ /// buffer.
+ uint8_t *GetBufferPointer() const
+ {
+ Finished();
+ return buf_.data();
+ }
+
+ /// @brief Get the serialized buffer (after you call `Finish()`) as a span.
+ /// @return Returns a constructed flatbuffers::span that is a view over the
+ /// FlatBuffer data inside the buffer.
+ flatbuffers::span<uint8_t> GetBufferSpan() const
+ {
+ Finished();
+ return flatbuffers::span<uint8_t>(buf_.data(), buf_.size());
+ }
+
+ /// @brief Get a pointer to an unfinished buffer.
+ /// @return Returns a `uint8_t` pointer to the unfinished buffer.
+ uint8_t *GetCurrentBufferPointer() const { return buf_.data(); }
+
+ /// @brief Get the released pointer to the serialized buffer.
+ /// @warning Do NOT attempt to use this FlatBufferBuilder afterwards!
+ /// @return A `FlatBuffer` that owns the buffer and its allocator and
+ /// behaves similar to a `unique_ptr` with a deleter.
+ FLATBUFFERS_ATTRIBUTE(deprecated("use Release() instead"))
+ DetachedBuffer ReleaseBufferPointer()
+ {
+ Finished();
+ return buf_.release();
+ }
+
+ /// @brief Get the released DetachedBuffer.
+ /// @return A `DetachedBuffer` that owns the buffer and its allocator.
+ DetachedBuffer Release()
+ {
+ Finished();
+ return buf_.release();
+ }
+
+ /// @brief Get the released pointer to the serialized buffer.
+ /// @param size The size of the memory block containing
+ /// the serialized `FlatBuffer`.
+ /// @param offset The offset from the released pointer where the finished
+ /// `FlatBuffer` starts.
+ /// @return A raw pointer to the start of the memory block containing
+ /// the serialized `FlatBuffer`.
+ /// @remark If the allocator is owned, it gets deleted when the destructor is
+ /// called..
+ uint8_t *ReleaseRaw(size_t &size, size_t &offset)
+ {
+ Finished();
+ return buf_.release_raw(size, offset);
+ }
+
+ /// @brief get the minimum alignment this buffer needs to be accessed
+ /// properly. This is only known once all elements have been written (after
+ /// you call Finish()). You can use this information if you need to embed
+ /// a FlatBuffer in some other buffer, such that you can later read it
+ /// without first having to copy it into its own buffer.
+ size_t GetBufferMinAlignment() const
+ {
+ Finished();
+ return minalign_;
+ }
+
+ /// @cond FLATBUFFERS_INTERNAL
+ void Finished() const
+ {
+ // If you get this assert, you're attempting to get access a buffer
+ // which hasn't been finished yet. Be sure to call
+ // FlatBufferBuilder::Finish with your root table.
+ // If you really need to access an unfinished buffer, call
+ // GetCurrentBufferPointer instead.
+ FLATBUFFERS_ASSERT(finished);
+ }
+ /// @endcond
+
+ /// @brief In order to save space, fields that are set to their default value
+ /// don't get serialized into the buffer.
+ /// @param[in] fd When set to `true`, always serializes default values that
+ /// are set. Optional fields which are not set explicitly, will still not be
+ /// serialized.
+ void ForceDefaults(bool fd) { force_defaults_ = fd; }
+
+ /// @brief By default vtables are deduped in order to save space.
+ /// @param[in] dedup When set to `true`, dedup vtables.
+ void DedupVtables(bool dedup) { dedup_vtables_ = dedup; }
+
+ /// @cond FLATBUFFERS_INTERNAL
+ void Pad(size_t num_bytes) { buf_.fill(num_bytes); }
+
+ void TrackMinAlign(size_t elem_size)
+ {
+ if (elem_size > minalign_)
+ minalign_ = elem_size;
+ }
+
+ void Align(size_t elem_size)
+ {
+ TrackMinAlign(elem_size);
+ buf_.fill(PaddingBytes(buf_.size(), elem_size));
+ }
+
+ void PushFlatBuffer(const uint8_t *bytes, size_t size)
+ {
+ PushBytes(bytes, size);
+ finished = true;
+ }
+
+ void PushBytes(const uint8_t *bytes, size_t size) { buf_.push(bytes, size); }
+
+ void PopBytes(size_t amount) { buf_.pop(amount); }
+
+ template <typename T> void AssertScalarT()
+ {
+ // The code assumes power of 2 sizes and endian-swap-ability.
+ static_assert(flatbuffers::is_scalar<T>::value, "T must be a scalar type");
+ }
+
+ // Write a single aligned scalar to the buffer
+ template <typename T> uoffset_t PushElement(T element)
+ {
+ AssertScalarT<T>();
+ T litle_endian_element = EndianScalar(element);
+ Align(sizeof(T));
+ buf_.push_small(litle_endian_element);
+ return GetSize();
+ }
+
+ template <typename T> uoffset_t PushElement(Offset<T> off)
+ {
+ // Special case for offsets: see ReferTo below.
+ return PushElement(ReferTo(off.o));
+ }
+
+ // When writing fields, we track where they are, so we can create correct
+ // vtables later.
+ void TrackField(voffset_t field, uoffset_t off)
+ {
+ FieldLoc fl = {off, field};
+ buf_.scratch_push_small(fl);
+ num_field_loc++;
+ max_voffset_ = (std::max)(max_voffset_, field);
+ }
+
+ // Like PushElement, but additionally tracks the field this represents.
+ template <typename T> void AddElement(voffset_t field, T e, T def)
+ {
+ // We don't serialize values equal to the default.
+ if (IsTheSameAs(e, def) && !force_defaults_)
+ return;
+ auto off = PushElement(e);
+ TrackField(field, off);
+ }
+
+ template <typename T> void AddElement(voffset_t field, T e)
+ {
+ auto off = PushElement(e);
+ TrackField(field, off);
+ }
+
+ template <typename T> void AddOffset(voffset_t field, Offset<T> off)
+ {
+ if (off.IsNull())
+ return; // Don't store.
+ AddElement(field, ReferTo(off.o), static_cast<uoffset_t>(0));
+ }
+
+ template <typename T> void AddStruct(voffset_t field, const T *structptr)
+ {
+ if (!structptr)
+ return; // Default, don't store.
+ Align(AlignOf<T>());
+ buf_.push_small(*structptr);
+ TrackField(field, GetSize());
+ }
+
+ void AddStructOffset(voffset_t field, uoffset_t off) { TrackField(field, off); }
+
+ // Offsets initially are relative to the end of the buffer (downwards).
+ // This function converts them to be relative to the current location
+ // in the buffer (when stored here), pointing upwards.
+ uoffset_t ReferTo(uoffset_t off)
+ {
+ // Align to ensure GetSize() below is correct.
+ Align(sizeof(uoffset_t));
+ // Offset must refer to something already in buffer.
+ FLATBUFFERS_ASSERT(off && off <= GetSize());
+ return GetSize() - off + static_cast<uoffset_t>(sizeof(uoffset_t));
+ }
+
+ void NotNested()
+ {
+ // If you hit this, you're trying to construct a Table/Vector/String
+ // during the construction of its parent table (between the MyTableBuilder
+ // and table.Finish().
+ // Move the creation of these sub-objects to above the MyTableBuilder to
+ // not get this assert.
+ // Ignoring this assert may appear to work in simple cases, but the reason
+ // it is here is that storing objects in-line may cause vtable offsets
+ // to not fit anymore. It also leads to vtable duplication.
+ FLATBUFFERS_ASSERT(!nested);
+ // If you hit this, fields were added outside the scope of a table.
+ FLATBUFFERS_ASSERT(!num_field_loc);
+ }
+
+ // From generated code (or from the parser), we call StartTable/EndTable
+ // with a sequence of AddElement calls in between.
+ uoffset_t StartTable()
+ {
+ NotNested();
+ nested = true;
+ return GetSize();
+ }
+
+ // This finishes one serialized object by generating the vtable if it's a
+ // table, comparing it against existing vtables, and writing the
+ // resulting vtable offset.
+ uoffset_t EndTable(uoffset_t start)
+ {
+ // If you get this assert, a corresponding StartTable wasn't called.
+ FLATBUFFERS_ASSERT(nested);
+ // Write the vtable offset, which is the start of any Table.
+ // We fill it's value later.
+ auto vtableoffsetloc = PushElement<soffset_t>(0);
+ // Write a vtable, which consists entirely of voffset_t elements.
+ // It starts with the number of offsets, followed by a type id, followed
+ // by the offsets themselves. In reverse:
+ // Include space for the last offset and ensure empty tables have a
+ // minimum size.
+ max_voffset_ =
+ (std::max)(static_cast<voffset_t>(max_voffset_ + sizeof(voffset_t)), FieldIndexToOffset(0));
+ buf_.fill_big(max_voffset_);
+ auto table_object_size = vtableoffsetloc - start;
+ // Vtable use 16bit offsets.
+ FLATBUFFERS_ASSERT(table_object_size < 0x10000);
+ WriteScalar<voffset_t>(buf_.data() + sizeof(voffset_t),
+ static_cast<voffset_t>(table_object_size));
+ WriteScalar<voffset_t>(buf_.data(), max_voffset_);
+ // Write the offsets into the table
+ for (auto it = buf_.scratch_end() - num_field_loc * sizeof(FieldLoc); it < buf_.scratch_end();
+ it += sizeof(FieldLoc))
+ {
+ auto field_location = reinterpret_cast<FieldLoc *>(it);
+ auto pos = static_cast<voffset_t>(vtableoffsetloc - field_location->off);
+ // If this asserts, it means you've set a field twice.
+ FLATBUFFERS_ASSERT(!ReadScalar<voffset_t>(buf_.data() + field_location->id));
+ WriteScalar<voffset_t>(buf_.data() + field_location->id, pos);
+ }
+ ClearOffsets();
+ auto vt1 = reinterpret_cast<voffset_t *>(buf_.data());
+ auto vt1_size = ReadScalar<voffset_t>(vt1);
+ auto vt_use = GetSize();
+ // See if we already have generated a vtable with this exact same
+ // layout before. If so, make it point to the old one, remove this one.
+ if (dedup_vtables_)
+ {
+ for (auto it = buf_.scratch_data(); it < buf_.scratch_end(); it += sizeof(uoffset_t))
+ {
+ auto vt_offset_ptr = reinterpret_cast<uoffset_t *>(it);
+ auto vt2 = reinterpret_cast<voffset_t *>(buf_.data_at(*vt_offset_ptr));
+ auto vt2_size = ReadScalar<voffset_t>(vt2);
+ if (vt1_size != vt2_size || 0 != memcmp(vt2, vt1, vt1_size))
+ continue;
+ vt_use = *vt_offset_ptr;
+ buf_.pop(GetSize() - vtableoffsetloc);
+ break;
+ }
+ }
+ // If this is a new vtable, remember it.
+ if (vt_use == GetSize())
+ {
+ buf_.scratch_push_small(vt_use);
+ }
+ // Fill the vtable offset we created above.
+ // The offset points from the beginning of the object to where the
+ // vtable is stored.
+ // Offsets default direction is downward in memory for future format
+ // flexibility (storing all vtables at the start of the file).
+ WriteScalar(buf_.data_at(vtableoffsetloc),
+ static_cast<soffset_t>(vt_use) - static_cast<soffset_t>(vtableoffsetloc));
+
+ nested = false;
+ return vtableoffsetloc;
+ }
+
+ FLATBUFFERS_ATTRIBUTE(deprecated("call the version above instead"))
+ uoffset_t EndTable(uoffset_t start, voffset_t /*numfields*/) { return EndTable(start); }
+
+ // This checks a required field has been set in a given table that has
+ // just been constructed.
+ template <typename T> void Required(Offset<T> table, voffset_t field);
+
+ uoffset_t StartStruct(size_t alignment)
+ {
+ Align(alignment);
+ return GetSize();
+ }
+
+ uoffset_t EndStruct() { return GetSize(); }
+
+ void ClearOffsets()
+ {
+ buf_.scratch_pop(num_field_loc * sizeof(FieldLoc));
+ num_field_loc = 0;
+ max_voffset_ = 0;
+ }
+
+ // Aligns such that when "len" bytes are written, an object can be written
+ // after it with "alignment" without padding.
+ void PreAlign(size_t len, size_t alignment)
+ {
+ TrackMinAlign(alignment);
+ buf_.fill(PaddingBytes(GetSize() + len, alignment));
+ }
+ template <typename T> void PreAlign(size_t len)
+ {
+ AssertScalarT<T>();
+ PreAlign(len, sizeof(T));
+ }
+ /// @endcond
+
+ /// @brief Store a string in the buffer, which can contain any binary data.
+ /// @param[in] str A const char pointer to the data to be stored as a string.
+ /// @param[in] len The number of bytes that should be stored from `str`.
+ /// @return Returns the offset in the buffer where the string starts.
+ Offset<String> CreateString(const char *str, size_t len)
+ {
+ NotNested();
+ PreAlign<uoffset_t>(len + 1); // Always 0-terminated.
+ buf_.fill(1);
+ PushBytes(reinterpret_cast<const uint8_t *>(str), len);
+ PushElement(static_cast<uoffset_t>(len));
+ return Offset<String>(GetSize());
+ }
+
+ /// @brief Store a string in the buffer, which is null-terminated.
+ /// @param[in] str A const char pointer to a C-string to add to the buffer.
+ /// @return Returns the offset in the buffer where the string starts.
+ Offset<String> CreateString(const char *str) { return CreateString(str, strlen(str)); }
+
+ /// @brief Store a string in the buffer, which is null-terminated.
+ /// @param[in] str A char pointer to a C-string to add to the buffer.
+ /// @return Returns the offset in the buffer where the string starts.
+ Offset<String> CreateString(char *str) { return CreateString(str, strlen(str)); }
+
+ /// @brief Store a string in the buffer, which can contain any binary data.
+ /// @param[in] str A const reference to a std::string to store in the buffer.
+ /// @return Returns the offset in the buffer where the string starts.
+ Offset<String> CreateString(const std::string &str)
+ {
+ return CreateString(str.c_str(), str.length());
+ }
+#ifdef FLATBUFFERS_HAS_STRING_VIEW
+ /// @brief Store a string in the buffer, which can contain any binary data.
+ /// @param[in] str A const string_view to copy in to the buffer.
+ /// @return Returns the offset in the buffer where the string starts.
+ Offset<String> CreateString(flatbuffers::string_view str)
+ {
+ return CreateString(str.data(), str.size());
+ }
+#endif // FLATBUFFERS_HAS_STRING_VIEW
+ // clang-format on
+
+ /// @brief Store a string in the buffer, which can contain any binary data.
+ /// @param[in] str A const pointer to a `String` struct to add to the buffer.
+ /// @return Returns the offset in the buffer where the string starts
+ Offset<String> CreateString(const String *str)
+ {
+ return str ? CreateString(str->c_str(), str->size()) : 0;
+ }
+
+ /// @brief Store a string in the buffer, which can contain any binary data.
+ /// @param[in] str A const reference to a std::string like type with support
+ /// of T::c_str() and T::length() to store in the buffer.
+ /// @return Returns the offset in the buffer where the string starts.
+ template <typename T> Offset<String> CreateString(const T &str)
+ {
+ return CreateString(str.c_str(), str.length());
+ }
+
+ /// @brief Store a string in the buffer, which can contain any binary data.
+ /// If a string with this exact contents has already been serialized before,
+ /// instead simply returns the offset of the existing string.
+ /// @param[in] str A const char pointer to the data to be stored as a string.
+ /// @param[in] len The number of bytes that should be stored from `str`.
+ /// @return Returns the offset in the buffer where the string starts.
+ Offset<String> CreateSharedString(const char *str, size_t len)
+ {
+ if (!string_pool)
+ string_pool = new StringOffsetMap(StringOffsetCompare(buf_));
+ auto size_before_string = buf_.size();
+ // Must first serialize the string, since the set is all offsets into
+ // buffer.
+ auto off = CreateString(str, len);
+ auto it = string_pool->find(off);
+ // If it exists we reuse existing serialized data!
+ if (it != string_pool->end())
+ {
+ // We can remove the string we serialized.
+ buf_.pop(buf_.size() - size_before_string);
+ return *it;
+ }
+ // Record this string for future use.
+ string_pool->insert(off);
+ return off;
+ }
+
+#ifdef FLATBUFFERS_HAS_STRING_VIEW
+ /// @brief Store a string in the buffer, which can contain any binary data.
+ /// If a string with this exact contents has already been serialized before,
+ /// instead simply returns the offset of the existing string.
+ /// @param[in] str A const std::string_view to store in the buffer.
+ /// @return Returns the offset in the buffer where the string starts
+ Offset<String> CreateSharedString(const flatbuffers::string_view str)
+ {
+ return CreateSharedString(str.data(), str.size());
+ }
+#else
+ /// @brief Store a string in the buffer, which null-terminated.
+ /// If a string with this exact contents has already been serialized before,
+ /// instead simply returns the offset of the existing string.
+ /// @param[in] str A const char pointer to a C-string to add to the buffer.
+ /// @return Returns the offset in the buffer where the string starts.
+ Offset<String> CreateSharedString(const char *str)
+ {
+ return CreateSharedString(str, strlen(str));
+ }
+
+ /// @brief Store a string in the buffer, which can contain any binary data.
+ /// If a string with this exact contents has already been serialized before,
+ /// instead simply returns the offset of the existing string.
+ /// @param[in] str A const reference to a std::string to store in the buffer.
+ /// @return Returns the offset in the buffer where the string starts.
+ Offset<String> CreateSharedString(const std::string &str)
+ {
+ return CreateSharedString(str.c_str(), str.length());
+ }
+#endif
+
+ /// @brief Store a string in the buffer, which can contain any binary data.
+ /// If a string with this exact contents has already been serialized before,
+ /// instead simply returns the offset of the existing string.
+ /// @param[in] str A const pointer to a `String` struct to add to the buffer.
+ /// @return Returns the offset in the buffer where the string starts
+ Offset<String> CreateSharedString(const String *str)
+ {
+ return CreateSharedString(str->c_str(), str->size());
+ }
+
+ /// @cond FLATBUFFERS_INTERNAL
+ uoffset_t EndVector(size_t len)
+ {
+ FLATBUFFERS_ASSERT(nested); // Hit if no corresponding StartVector.
+ nested = false;
+ return PushElement(static_cast<uoffset_t>(len));
+ }
+
+ void StartVector(size_t len, size_t elemsize)
+ {
+ NotNested();
+ nested = true;
+ PreAlign<uoffset_t>(len * elemsize);
+ PreAlign(len * elemsize, elemsize); // Just in case elemsize > uoffset_t.
+ }
+
+ // Call this right before StartVector/CreateVector if you want to force the
+ // alignment to be something different than what the element size would
+ // normally dictate.
+ // This is useful when storing a nested_flatbuffer in a vector of bytes,
+ // or when storing SIMD floats, etc.
+ void ForceVectorAlignment(size_t len, size_t elemsize, size_t alignment)
+ {
+ FLATBUFFERS_ASSERT(VerifyAlignmentRequirements(alignment));
+ PreAlign(len * elemsize, alignment);
+ }
+
+ // Similar to ForceVectorAlignment but for String fields.
+ void ForceStringAlignment(size_t len, size_t alignment)
+ {
+ FLATBUFFERS_ASSERT(VerifyAlignmentRequirements(alignment));
+ PreAlign((len + 1) * sizeof(char), alignment);
+ }
+
+ /// @endcond
+
+ /// @brief Serialize an array into a FlatBuffer `vector`.
+ /// @tparam T The data type of the array elements.
+ /// @param[in] v A pointer to the array of type `T` to serialize into the
+ /// buffer as a `vector`.
+ /// @param[in] len The number of elements to serialize.
+ /// @return Returns a typed `Offset` into the serialized data indicating
+ /// where the vector is stored.
+ template <typename T> Offset<Vector<T>> CreateVector(const T *v, size_t len)
+ {
+ // If this assert hits, you're specifying a template argument that is
+ // causing the wrong overload to be selected, remove it.
+ AssertScalarT<T>();
+ StartVector(len, sizeof(T));
+ if (len == 0)
+ {
+ return Offset<Vector<T>>(EndVector(len));
+ }
+
+#if FLATBUFFERS_LITTLEENDIAN
+ PushBytes(reinterpret_cast<const uint8_t *>(v), len * sizeof(T));
+#else
+ if (sizeof(T) == 1)
+ {
+ PushBytes(reinterpret_cast<const uint8_t *>(v), len);
+ }
+ else
+ {
+ for (auto i = len; i > 0;)
+ {
+ PushElement(v[--i]);
+ }
+ }
+#endif
+ // clang-format on
+ return Offset<Vector<T>>(EndVector(len));
+ }
+
+ template <typename T> Offset<Vector<Offset<T>>> CreateVector(const Offset<T> *v, size_t len)
+ {
+ StartVector(len, sizeof(Offset<T>));
+ for (auto i = len; i > 0;)
+ {
+ PushElement(v[--i]);
+ }
+ return Offset<Vector<Offset<T>>>(EndVector(len));
+ }
+
+ /// @brief Serialize a `std::vector` into a FlatBuffer `vector`.
+ /// @tparam T The data type of the `std::vector` elements.
+ /// @param v A const reference to the `std::vector` to serialize into the
+ /// buffer as a `vector`.
+ /// @return Returns a typed `Offset` into the serialized data indicating
+ /// where the vector is stored.
+ template <typename T> Offset<Vector<T>> CreateVector(const std::vector<T> &v)
+ {
+ return CreateVector(data(v), v.size());
+ }
+
+ // vector<bool> may be implemented using a bit-set, so we can't access it as
+ // an array. Instead, read elements manually.
+ // Background: https://isocpp.org/blog/2012/11/on-vectorbool
+ Offset<Vector<uint8_t>> CreateVector(const std::vector<bool> &v)
+ {
+ StartVector(v.size(), sizeof(uint8_t));
+ for (auto i = v.size(); i > 0;)
+ {
+ PushElement(static_cast<uint8_t>(v[--i]));
+ }
+ return Offset<Vector<uint8_t>>(EndVector(v.size()));
+ }
+
+#ifndef FLATBUFFERS_CPP98_STL
+ /// @brief Serialize values returned by a function into a FlatBuffer `vector`.
+ /// This is a convenience function that takes care of iteration for you.
+ /// @tparam T The data type of the `std::vector` elements.
+ /// @param f A function that takes the current iteration 0..vector_size-1 and
+ /// returns any type that you can construct a FlatBuffers vector out of.
+ /// @return Returns a typed `Offset` into the serialized data indicating
+ /// where the vector is stored.
+ template <typename T>
+ Offset<Vector<T>> CreateVector(size_t vector_size, const std::function<T(size_t i)> &f)
+ {
+ std::vector<T> elems(vector_size);
+ for (size_t i = 0; i < vector_size; i++)
+ elems[i] = f(i);
+ return CreateVector(elems);
+ }
+#endif
+ // clang-format on
+
+ /// @brief Serialize values returned by a function into a FlatBuffer `vector`.
+ /// This is a convenience function that takes care of iteration for you.
+ /// @tparam T The data type of the `std::vector` elements.
+ /// @param f A function that takes the current iteration 0..vector_size-1,
+ /// and the state parameter returning any type that you can construct a
+ /// FlatBuffers vector out of.
+ /// @param state State passed to f.
+ /// @return Returns a typed `Offset` into the serialized data indicating
+ /// where the vector is stored.
+ template <typename T, typename F, typename S>
+ Offset<Vector<T>> CreateVector(size_t vector_size, F f, S *state)
+ {
+ std::vector<T> elems(vector_size);
+ for (size_t i = 0; i < vector_size; i++)
+ elems[i] = f(i, state);
+ return CreateVector(elems);
+ }
+
+ /// @brief Serialize a `std::vector<std::string>` into a FlatBuffer `vector`.
+ /// This is a convenience function for a common case.
+ /// @param v A const reference to the `std::vector` to serialize into the
+ /// buffer as a `vector`.
+ /// @return Returns a typed `Offset` into the serialized data indicating
+ /// where the vector is stored.
+ Offset<Vector<Offset<String>>> CreateVectorOfStrings(const std::vector<std::string> &v)
+ {
+ std::vector<Offset<String>> offsets(v.size());
+ for (size_t i = 0; i < v.size(); i++)
+ offsets[i] = CreateString(v[i]);
+ return CreateVector(offsets);
+ }
+
+ /// @brief Serialize an array of structs into a FlatBuffer `vector`.
+ /// @tparam T The data type of the struct array elements.
+ /// @param[in] v A pointer to the array of type `T` to serialize into the
+ /// buffer as a `vector`.
+ /// @param[in] len The number of elements to serialize.
+ /// @return Returns a typed `Offset` into the serialized data indicating
+ /// where the vector is stored.
+ template <typename T> Offset<Vector<const T *>> CreateVectorOfStructs(const T *v, size_t len)
+ {
+ StartVector(len * sizeof(T) / AlignOf<T>(), AlignOf<T>());
+ PushBytes(reinterpret_cast<const uint8_t *>(v), sizeof(T) * len);
+ return Offset<Vector<const T *>>(EndVector(len));
+ }
+
+ /// @brief Serialize an array of native structs into a FlatBuffer `vector`.
+ /// @tparam T The data type of the struct array elements.
+ /// @tparam S The data type of the native struct array elements.
+ /// @param[in] v A pointer to the array of type `S` to serialize into the
+ /// buffer as a `vector`.
+ /// @param[in] len The number of elements to serialize.
+ /// @param[in] pack_func Pointer to a function to convert the native struct
+ /// to the FlatBuffer struct.
+ /// @return Returns a typed `Offset` into the serialized data indicating
+ /// where the vector is stored.
+ template <typename T, typename S>
+ Offset<Vector<const T *>> CreateVectorOfNativeStructs(const S *v, size_t len,
+ T((*const pack_func)(const S &)))
+ {
+ FLATBUFFERS_ASSERT(pack_func);
+ std::vector<T> vv(len);
+ std::transform(v, v + len, vv.begin(), pack_func);
+ return CreateVectorOfStructs<T>(data(vv), vv.size());
+ }
+
+ /// @brief Serialize an array of native structs into a FlatBuffer `vector`.
+ /// @tparam T The data type of the struct array elements.
+ /// @tparam S The data type of the native struct array elements.
+ /// @param[in] v A pointer to the array of type `S` to serialize into the
+ /// buffer as a `vector`.
+ /// @param[in] len The number of elements to serialize.
+ /// @return Returns a typed `Offset` into the serialized data indicating
+ /// where the vector is stored.
+ template <typename T, typename S>
+ Offset<Vector<const T *>> CreateVectorOfNativeStructs(const S *v, size_t len)
+ {
+ extern T Pack(const S &);
+ return CreateVectorOfNativeStructs(v, len, Pack);
+ }
+
+#ifndef FLATBUFFERS_CPP98_STL
+ /// @brief Serialize an array of structs into a FlatBuffer `vector`.
+ /// @tparam T The data type of the struct array elements.
+ /// @param[in] filler A function that takes the current iteration 0..vector_size-1
+ /// and a pointer to the struct that must be filled.
+ /// @return Returns a typed `Offset` into the serialized data indicating
+ /// where the vector is stored.
+ /// This is mostly useful when flatbuffers are generated with mutation
+ /// accessors.
+ template <typename T>
+ Offset<Vector<const T *>> CreateVectorOfStructs(size_t vector_size,
+ const std::function<void(size_t i, T *)> &filler)
+ {
+ T *structs = StartVectorOfStructs<T>(vector_size);
+ for (size_t i = 0; i < vector_size; i++)
+ {
+ filler(i, structs);
+ structs++;
+ }
+ return EndVectorOfStructs<T>(vector_size);
+ }
+#endif
+ // clang-format on
+
+ /// @brief Serialize an array of structs into a FlatBuffer `vector`.
+ /// @tparam T The data type of the struct array elements.
+ /// @param[in] f A function that takes the current iteration 0..vector_size-1,
+ /// a pointer to the struct that must be filled and the state argument.
+ /// @param[in] state Arbitrary state to pass to f.
+ /// @return Returns a typed `Offset` into the serialized data indicating
+ /// where the vector is stored.
+ /// This is mostly useful when flatbuffers are generated with mutation
+ /// accessors.
+ template <typename T, typename F, typename S>
+ Offset<Vector<const T *>> CreateVectorOfStructs(size_t vector_size, F f, S *state)
+ {
+ T *structs = StartVectorOfStructs<T>(vector_size);
+ for (size_t i = 0; i < vector_size; i++)
+ {
+ f(i, structs, state);
+ structs++;
+ }
+ return EndVectorOfStructs<T>(vector_size);
+ }
+
+ /// @brief Serialize a `std::vector` of structs into a FlatBuffer `vector`.
+ /// @tparam T The data type of the `std::vector` struct elements.
+ /// @param[in] v A const reference to the `std::vector` of structs to
+ /// serialize into the buffer as a `vector`.
+ /// @return Returns a typed `Offset` into the serialized data indicating
+ /// where the vector is stored.
+ template <typename T, typename Alloc>
+ Offset<Vector<const T *>> CreateVectorOfStructs(const std::vector<T, Alloc> &v)
+ {
+ return CreateVectorOfStructs(data(v), v.size());
+ }
+
+ /// @brief Serialize a `std::vector` of native structs into a FlatBuffer
+ /// `vector`.
+ /// @tparam T The data type of the `std::vector` struct elements.
+ /// @tparam S The data type of the `std::vector` native struct elements.
+ /// @param[in] v A const reference to the `std::vector` of structs to
+ /// serialize into the buffer as a `vector`.
+ /// @param[in] pack_func Pointer to a function to convert the native struct
+ /// to the FlatBuffer struct.
+ /// @return Returns a typed `Offset` into the serialized data indicating
+ /// where the vector is stored.
+ template <typename T, typename S>
+ Offset<Vector<const T *>> CreateVectorOfNativeStructs(const std::vector<S> &v,
+ T((*const pack_func)(const S &)))
+ {
+ return CreateVectorOfNativeStructs<T, S>(data(v), v.size(), pack_func);
+ }
+
+ /// @brief Serialize a `std::vector` of native structs into a FlatBuffer
+ /// `vector`.
+ /// @tparam T The data type of the `std::vector` struct elements.
+ /// @tparam S The data type of the `std::vector` native struct elements.
+ /// @param[in] v A const reference to the `std::vector` of structs to
+ /// serialize into the buffer as a `vector`.
+ /// @return Returns a typed `Offset` into the serialized data indicating
+ /// where the vector is stored.
+ template <typename T, typename S>
+ Offset<Vector<const T *>> CreateVectorOfNativeStructs(const std::vector<S> &v)
+ {
+ return CreateVectorOfNativeStructs<T, S>(data(v), v.size());
+ }
+
+ /// @cond FLATBUFFERS_INTERNAL
+ template <typename T> struct StructKeyComparator
+ {
+ bool operator()(const T &a, const T &b) const { return a.KeyCompareLessThan(&b); }
+
+ FLATBUFFERS_DELETE_FUNC(StructKeyComparator &operator=(const StructKeyComparator &));
+ };
+ /// @endcond
+
+ /// @brief Serialize a `std::vector` of structs into a FlatBuffer `vector`
+ /// in sorted order.
+ /// @tparam T The data type of the `std::vector` struct elements.
+ /// @param[in] v A const reference to the `std::vector` of structs to
+ /// serialize into the buffer as a `vector`.
+ /// @return Returns a typed `Offset` into the serialized data indicating
+ /// where the vector is stored.
+ template <typename T> Offset<Vector<const T *>> CreateVectorOfSortedStructs(std::vector<T> *v)
+ {
+ return CreateVectorOfSortedStructs(data(*v), v->size());
+ }
+
+ /// @brief Serialize a `std::vector` of native structs into a FlatBuffer
+ /// `vector` in sorted order.
+ /// @tparam T The data type of the `std::vector` struct elements.
+ /// @tparam S The data type of the `std::vector` native struct elements.
+ /// @param[in] v A const reference to the `std::vector` of structs to
+ /// serialize into the buffer as a `vector`.
+ /// @return Returns a typed `Offset` into the serialized data indicating
+ /// where the vector is stored.
+ template <typename T, typename S>
+ Offset<Vector<const T *>> CreateVectorOfSortedNativeStructs(std::vector<S> *v)
+ {
+ return CreateVectorOfSortedNativeStructs<T, S>(data(*v), v->size());
+ }
+
+ /// @brief Serialize an array of structs into a FlatBuffer `vector` in sorted
+ /// order.
+ /// @tparam T The data type of the struct array elements.
+ /// @param[in] v A pointer to the array of type `T` to serialize into the
+ /// buffer as a `vector`.
+ /// @param[in] len The number of elements to serialize.
+ /// @return Returns a typed `Offset` into the serialized data indicating
+ /// where the vector is stored.
+ template <typename T> Offset<Vector<const T *>> CreateVectorOfSortedStructs(T *v, size_t len)
+ {
+ std::sort(v, v + len, StructKeyComparator<T>());
+ return CreateVectorOfStructs(v, len);
+ }
+
+ /// @brief Serialize an array of native structs into a FlatBuffer `vector` in
+ /// sorted order.
+ /// @tparam T The data type of the struct array elements.
+ /// @tparam S The data type of the native struct array elements.
+ /// @param[in] v A pointer to the array of type `S` to serialize into the
+ /// buffer as a `vector`.
+ /// @param[in] len The number of elements to serialize.
+ /// @return Returns a typed `Offset` into the serialized data indicating
+ /// where the vector is stored.
+ template <typename T, typename S>
+ Offset<Vector<const T *>> CreateVectorOfSortedNativeStructs(S *v, size_t len)
+ {
+ extern T Pack(const S &);
+ typedef T (*Pack_t)(const S &);
+ std::vector<T> vv(len);
+ std::transform(v, v + len, vv.begin(), static_cast<Pack_t &>(Pack));
+ return CreateVectorOfSortedStructs<T>(vv, len);
+ }
+
+ /// @cond FLATBUFFERS_INTERNAL
+ template <typename T> struct TableKeyComparator
+ {
+ TableKeyComparator(vector_downward &buf) : buf_(buf) {}
+ TableKeyComparator(const TableKeyComparator &other) : buf_(other.buf_) {}
+ bool operator()(const Offset<T> &a, const Offset<T> &b) const
+ {
+ auto table_a = reinterpret_cast<T *>(buf_.data_at(a.o));
+ auto table_b = reinterpret_cast<T *>(buf_.data_at(b.o));
+ return table_a->KeyCompareLessThan(table_b);
+ }
+ vector_downward &buf_;
+
+ private:
+ FLATBUFFERS_DELETE_FUNC(TableKeyComparator &operator=(const TableKeyComparator &other));
+ };
+ /// @endcond
+
+ /// @brief Serialize an array of `table` offsets as a `vector` in the buffer
+ /// in sorted order.
+ /// @tparam T The data type that the offset refers to.
+ /// @param[in] v An array of type `Offset<T>` that contains the `table`
+ /// offsets to store in the buffer in sorted order.
+ /// @param[in] len The number of elements to store in the `vector`.
+ /// @return Returns a typed `Offset` into the serialized data indicating
+ /// where the vector is stored.
+ template <typename T>
+ Offset<Vector<Offset<T>>> CreateVectorOfSortedTables(Offset<T> *v, size_t len)
+ {
+ std::sort(v, v + len, TableKeyComparator<T>(buf_));
+ return CreateVector(v, len);
+ }
+
+ /// @brief Serialize an array of `table` offsets as a `vector` in the buffer
+ /// in sorted order.
+ /// @tparam T The data type that the offset refers to.
+ /// @param[in] v An array of type `Offset<T>` that contains the `table`
+ /// offsets to store in the buffer in sorted order.
+ /// @return Returns a typed `Offset` into the serialized data indicating
+ /// where the vector is stored.
+ template <typename T>
+ Offset<Vector<Offset<T>>> CreateVectorOfSortedTables(std::vector<Offset<T>> *v)
+ {
+ return CreateVectorOfSortedTables(data(*v), v->size());
+ }
+
+ /// @brief Specialized version of `CreateVector` for non-copying use cases.
+ /// Write the data any time later to the returned buffer pointer `buf`.
+ /// @param[in] len The number of elements to store in the `vector`.
+ /// @param[in] elemsize The size of each element in the `vector`.
+ /// @param[out] buf A pointer to a `uint8_t` pointer that can be
+ /// written to at a later time to serialize the data into a `vector`
+ /// in the buffer.
+ uoffset_t CreateUninitializedVector(size_t len, size_t elemsize, uint8_t **buf)
+ {
+ NotNested();
+ StartVector(len, elemsize);
+ buf_.make_space(len * elemsize);
+ auto vec_start = GetSize();
+ auto vec_end = EndVector(len);
+ *buf = buf_.data_at(vec_start);
+ return vec_end;
+ }
+
+ /// @brief Specialized version of `CreateVector` for non-copying use cases.
+ /// Write the data any time later to the returned buffer pointer `buf`.
+ /// @tparam T The data type of the data that will be stored in the buffer
+ /// as a `vector`.
+ /// @param[in] len The number of elements to store in the `vector`.
+ /// @param[out] buf A pointer to a pointer of type `T` that can be
+ /// written to at a later time to serialize the data into a `vector`
+ /// in the buffer.
+ template <typename T> Offset<Vector<T>> CreateUninitializedVector(size_t len, T **buf)
+ {
+ AssertScalarT<T>();
+ return CreateUninitializedVector(len, sizeof(T), reinterpret_cast<uint8_t **>(buf));
+ }
+
+ template <typename T>
+ Offset<Vector<const T *>> CreateUninitializedVectorOfStructs(size_t len, T **buf)
+ {
+ return CreateUninitializedVector(len, sizeof(T), reinterpret_cast<uint8_t **>(buf));
+ }
+
+ // @brief Create a vector of scalar type T given as input a vector of scalar
+ // type U, useful with e.g. pre "enum class" enums, or any existing scalar
+ // data of the wrong type.
+ template <typename T, typename U> Offset<Vector<T>> CreateVectorScalarCast(const U *v, size_t len)
+ {
+ AssertScalarT<T>();
+ AssertScalarT<U>();
+ StartVector(len, sizeof(T));
+ for (auto i = len; i > 0;)
+ {
+ PushElement(static_cast<T>(v[--i]));
+ }
+ return Offset<Vector<T>>(EndVector(len));
+ }
+
+ /// @brief Write a struct by itself, typically to be part of a union.
+ template <typename T> Offset<const T *> CreateStruct(const T &structobj)
+ {
+ NotNested();
+ Align(AlignOf<T>());
+ buf_.push_small(structobj);
+ return Offset<const T *>(GetSize());
+ }
+
+ /// @brief The length of a FlatBuffer file header.
+ static const size_t kFileIdentifierLength = 4;
+
+ /// @brief Finish serializing a buffer by writing the root offset.
+ /// @param[in] file_identifier If a `file_identifier` is given, the buffer
+ /// will be prefixed with a standard FlatBuffers file header.
+ template <typename T> void Finish(Offset<T> root, const char *file_identifier = nullptr)
+ {
+ Finish(root.o, file_identifier, false);
+ }
+
+ /// @brief Finish a buffer with a 32 bit size field pre-fixed (size of the
+ /// buffer following the size field). These buffers are NOT compatible
+ /// with standard buffers created by Finish, i.e. you can't call GetRoot
+ /// on them, you have to use GetSizePrefixedRoot instead.
+ /// All >32 bit quantities in this buffer will be aligned when the whole
+ /// size pre-fixed buffer is aligned.
+ /// These kinds of buffers are useful for creating a stream of FlatBuffers.
+ template <typename T>
+ void FinishSizePrefixed(Offset<T> root, const char *file_identifier = nullptr)
+ {
+ Finish(root.o, file_identifier, true);
+ }
+
+ void SwapBufAllocator(FlatBufferBuilder &other) { buf_.swap_allocator(other.buf_); }
+
+protected:
+ // You shouldn't really be copying instances of this class.
+ FlatBufferBuilder(const FlatBufferBuilder &);
+ FlatBufferBuilder &operator=(const FlatBufferBuilder &);
+
+ void Finish(uoffset_t root, const char *file_identifier, bool size_prefix)
+ {
+ NotNested();
+ buf_.clear_scratch();
+ // This will cause the whole buffer to be aligned.
+ PreAlign((size_prefix ? sizeof(uoffset_t) : 0) + sizeof(uoffset_t) +
+ (file_identifier ? kFileIdentifierLength : 0),
+ minalign_);
+ if (file_identifier)
+ {
+ FLATBUFFERS_ASSERT(strlen(file_identifier) == kFileIdentifierLength);
+ PushBytes(reinterpret_cast<const uint8_t *>(file_identifier), kFileIdentifierLength);
+ }
+ PushElement(ReferTo(root)); // Location of root.
+ if (size_prefix)
+ {
+ PushElement(GetSize());
+ }
+ finished = true;
+ }
+
+ struct FieldLoc
+ {
+ uoffset_t off;
+ voffset_t id;
+ };
+
+ vector_downward buf_;
+
+ // Accumulating offsets of table members while it is being built.
+ // We store these in the scratch pad of buf_, after the vtable offsets.
+ uoffset_t num_field_loc;
+ // Track how much of the vtable is in use, so we can output the most compact
+ // possible vtable.
+ voffset_t max_voffset_;
+
+ // Ensure objects are not nested.
+ bool nested;
+
+ // Ensure the buffer is finished before it is being accessed.
+ bool finished;
+
+ size_t minalign_;
+
+ bool force_defaults_; // Serialize values equal to their defaults anyway.
+
+ bool dedup_vtables_;
+
+ struct StringOffsetCompare
+ {
+ StringOffsetCompare(const vector_downward &buf) : buf_(&buf) {}
+ bool operator()(const Offset<String> &a, const Offset<String> &b) const
+ {
+ auto stra = reinterpret_cast<const String *>(buf_->data_at(a.o));
+ auto strb = reinterpret_cast<const String *>(buf_->data_at(b.o));
+ return StringLessThan(stra->data(), stra->size(), strb->data(), strb->size());
+ }
+ const vector_downward *buf_;
+ };
+
+ // For use with CreateSharedString. Instantiated on first use only.
+ typedef std::set<Offset<String>, StringOffsetCompare> StringOffsetMap;
+ StringOffsetMap *string_pool;
+
+private:
+ // Allocates space for a vector of structures.
+ // Must be completed with EndVectorOfStructs().
+ template <typename T> T *StartVectorOfStructs(size_t vector_size)
+ {
+ StartVector(vector_size * sizeof(T) / AlignOf<T>(), AlignOf<T>());
+ return reinterpret_cast<T *>(buf_.make_space(vector_size * sizeof(T)));
+ }
+
+ // End the vector of structues in the flatbuffers.
+ // Vector should have previously be started with StartVectorOfStructs().
+ template <typename T> Offset<Vector<const T *>> EndVectorOfStructs(size_t vector_size)
+ {
+ return Offset<Vector<const T *>>(EndVector(vector_size));
+ }
+};
+/// @}
+
+/// @cond FLATBUFFERS_INTERNAL
+// Helpers to get a typed pointer to the root object contained in the buffer.
+template <typename T> T *GetMutableRoot(void *buf)
+{
+ EndianCheck();
+ return reinterpret_cast<T *>(reinterpret_cast<uint8_t *>(buf) +
+ EndianScalar(*reinterpret_cast<uoffset_t *>(buf)));
+}
+
+template <typename T> const T *GetRoot(const void *buf)
+{
+ return GetMutableRoot<T>(const_cast<void *>(buf));
+}
+
+template <typename T> const T *GetSizePrefixedRoot(const void *buf)
+{
+ return GetRoot<T>(reinterpret_cast<const uint8_t *>(buf) + sizeof(uoffset_t));
+}
+
+/// Helpers to get a typed pointer to objects that are currently being built.
+/// @warning Creating new objects will lead to reallocations and invalidates
+/// the pointer!
+template <typename T> T *GetMutableTemporaryPointer(FlatBufferBuilder &fbb, Offset<T> offset)
+{
+ return reinterpret_cast<T *>(fbb.GetCurrentBufferPointer() + fbb.GetSize() - offset.o);
+}
+
+template <typename T> const T *GetTemporaryPointer(FlatBufferBuilder &fbb, Offset<T> offset)
+{
+ return GetMutableTemporaryPointer<T>(fbb, offset);
+}
+
+/// @brief Get a pointer to the the file_identifier section of the buffer.
+/// @return Returns a const char pointer to the start of the file_identifier
+/// characters in the buffer. The returned char * has length
+/// 'flatbuffers::FlatBufferBuilder::kFileIdentifierLength'.
+/// This function is UNDEFINED for FlatBuffers whose schema does not include
+/// a file_identifier (likely points at padding or the start of a the root
+/// vtable).
+inline const char *GetBufferIdentifier(const void *buf, bool size_prefixed = false)
+{
+ return reinterpret_cast<const char *>(buf) +
+ ((size_prefixed) ? 2 * sizeof(uoffset_t) : sizeof(uoffset_t));
+}
+
+// Helper to see if the identifier in a buffer has the expected value.
+inline bool BufferHasIdentifier(const void *buf, const char *identifier, bool size_prefixed = false)
+{
+ return strncmp(GetBufferIdentifier(buf, size_prefixed), identifier,
+ FlatBufferBuilder::kFileIdentifierLength) == 0;
+}
+
+// Helper class to verify the integrity of a FlatBuffer
+class Verifier FLATBUFFERS_FINAL_CLASS
+{
+public:
+ Verifier(const uint8_t *buf, size_t buf_len, uoffset_t _max_depth = 64,
+ uoffset_t _max_tables = 1000000, bool _check_alignment = true)
+ : buf_(buf), size_(buf_len), depth_(0), max_depth_(_max_depth), num_tables_(0),
+ max_tables_(_max_tables), upper_bound_(0), check_alignment_(_check_alignment)
+ {
+ FLATBUFFERS_ASSERT(size_ < FLATBUFFERS_MAX_BUFFER_SIZE);
+ }
+
+ // Central location where any verification failures register.
+ bool Check(bool ok) const
+ {
+#ifdef FLATBUFFERS_DEBUG_VERIFICATION_FAILURE
+ FLATBUFFERS_ASSERT(ok);
+#endif
+#ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE
+ if (!ok)
+ upper_bound_ = 0;
+#endif
+ // clang-format on
+ return ok;
+ }
+
+ // Verify any range within the buffer.
+ bool Verify(size_t elem, size_t elem_len) const
+ {
+#ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE
+ auto upper_bound = elem + elem_len;
+ if (upper_bound_ < upper_bound)
+ upper_bound_ = upper_bound;
+#endif
+ // clang-format on
+ return Check(elem_len < size_ && elem <= size_ - elem_len);
+ }
+
+ template <typename T> bool VerifyAlignment(size_t elem) const
+ {
+ return Check((elem & (sizeof(T) - 1)) == 0 || !check_alignment_);
+ }
+
+ // Verify a range indicated by sizeof(T).
+ template <typename T> bool Verify(size_t elem) const
+ {
+ return VerifyAlignment<T>(elem) && Verify(elem, sizeof(T));
+ }
+
+ bool VerifyFromPointer(const uint8_t *p, size_t len)
+ {
+ auto o = static_cast<size_t>(p - buf_);
+ return Verify(o, len);
+ }
+
+ // Verify relative to a known-good base pointer.
+ bool Verify(const uint8_t *base, voffset_t elem_off, size_t elem_len) const
+ {
+ return Verify(static_cast<size_t>(base - buf_) + elem_off, elem_len);
+ }
+
+ template <typename T> bool Verify(const uint8_t *base, voffset_t elem_off) const
+ {
+ return Verify(static_cast<size_t>(base - buf_) + elem_off, sizeof(T));
+ }
+
+ // Verify a pointer (may be NULL) of a table type.
+ template <typename T> bool VerifyTable(const T *table) { return !table || table->Verify(*this); }
+
+ // Verify a pointer (may be NULL) of any vector type.
+ template <typename T> bool VerifyVector(const Vector<T> *vec) const
+ {
+ return !vec || VerifyVectorOrString(reinterpret_cast<const uint8_t *>(vec), sizeof(T));
+ }
+
+ // Verify a pointer (may be NULL) of a vector to struct.
+ template <typename T> bool VerifyVector(const Vector<const T *> *vec) const
+ {
+ return VerifyVector(reinterpret_cast<const Vector<T> *>(vec));
+ }
+
+ // Verify a pointer (may be NULL) to string.
+ bool VerifyString(const String *str) const
+ {
+ size_t end;
+ return !str || (VerifyVectorOrString(reinterpret_cast<const uint8_t *>(str), 1, &end) &&
+ Verify(end, 1) && // Must have terminator
+ Check(buf_[end] == '\0')); // Terminating byte must be 0.
+ }
+
+ // Common code between vectors and strings.
+ bool VerifyVectorOrString(const uint8_t *vec, size_t elem_size, size_t *end = nullptr) const
+ {
+ auto veco = static_cast<size_t>(vec - buf_);
+ // Check we can read the size field.
+ if (!Verify<uoffset_t>(veco))
+ return false;
+ // Check the whole array. If this is a string, the byte past the array
+ // must be 0.
+ auto size = ReadScalar<uoffset_t>(vec);
+ auto max_elems = FLATBUFFERS_MAX_BUFFER_SIZE / elem_size;
+ if (!Check(size < max_elems))
+ return false; // Protect against byte_size overflowing.
+ auto byte_size = sizeof(size) + elem_size * size;
+ if (end)
+ *end = veco + byte_size;
+ return Verify(veco, byte_size);
+ }
+
+ // Special case for string contents, after the above has been called.
+ bool VerifyVectorOfStrings(const Vector<Offset<String>> *vec) const
+ {
+ if (vec)
+ {
+ for (uoffset_t i = 0; i < vec->size(); i++)
+ {
+ if (!VerifyString(vec->Get(i)))
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // Special case for table contents, after the above has been called.
+ template <typename T> bool VerifyVectorOfTables(const Vector<Offset<T>> *vec)
+ {
+ if (vec)
+ {
+ for (uoffset_t i = 0; i < vec->size(); i++)
+ {
+ if (!vec->Get(i)->Verify(*this))
+ return false;
+ }
+ }
+ return true;
+ }
+
+ __supress_ubsan__("unsigned-integer-overflow") bool VerifyTableStart(const uint8_t *table)
+ {
+ // Check the vtable offset.
+ auto tableo = static_cast<size_t>(table - buf_);
+ if (!Verify<soffset_t>(tableo))
+ return false;
+ // This offset may be signed, but doing the subtraction unsigned always
+ // gives the result we want.
+ auto vtableo = tableo - static_cast<size_t>(ReadScalar<soffset_t>(table));
+ // Check the vtable size field, then check vtable fits in its entirety.
+ return VerifyComplexity() && Verify<voffset_t>(vtableo) &&
+ VerifyAlignment<voffset_t>(ReadScalar<voffset_t>(buf_ + vtableo)) &&
+ Verify(vtableo, ReadScalar<voffset_t>(buf_ + vtableo));
+ }
+
+ template <typename T> bool VerifyBufferFromStart(const char *identifier, size_t start)
+ {
+ if (identifier && !Check((size_ >= 2 * sizeof(flatbuffers::uoffset_t) &&
+ BufferHasIdentifier(buf_ + start, identifier))))
+ {
+ return false;
+ }
+
+ // Call T::Verify, which must be in the generated code for this type.
+ auto o = VerifyOffset(start);
+ return o && reinterpret_cast<const T *>(buf_ + start + o)->Verify(*this)
+#ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE
+ && GetComputedSize()
+#endif
+ ;
+ // clang-format on
+ }
+
+ // Verify this whole buffer, starting with root type T.
+ template <typename T> bool VerifyBuffer() { return VerifyBuffer<T>(nullptr); }
+
+ template <typename T> bool VerifyBuffer(const char *identifier)
+ {
+ return VerifyBufferFromStart<T>(identifier, 0);
+ }
+
+ template <typename T> bool VerifySizePrefixedBuffer(const char *identifier)
+ {
+ return Verify<uoffset_t>(0U) && ReadScalar<uoffset_t>(buf_) == size_ - sizeof(uoffset_t) &&
+ VerifyBufferFromStart<T>(identifier, sizeof(uoffset_t));
+ }
+
+ uoffset_t VerifyOffset(size_t start) const
+ {
+ if (!Verify<uoffset_t>(start))
+ return 0;
+ auto o = ReadScalar<uoffset_t>(buf_ + start);
+ // May not point to itself.
+ if (!Check(o != 0))
+ return 0;
+ // Can't wrap around / buffers are max 2GB.
+ if (!Check(static_cast<soffset_t>(o) >= 0))
+ return 0;
+ // Must be inside the buffer to create a pointer from it (pointer outside
+ // buffer is UB).
+ if (!Verify(start + o, 1))
+ return 0;
+ return o;
+ }
+
+ uoffset_t VerifyOffset(const uint8_t *base, voffset_t start) const
+ {
+ return VerifyOffset(static_cast<size_t>(base - buf_) + start);
+ }
+
+ // Called at the start of a table to increase counters measuring data
+ // structure depth and amount, and possibly bails out with false if
+ // limits set by the constructor have been hit. Needs to be balanced
+ // with EndTable().
+ bool VerifyComplexity()
+ {
+ depth_++;
+ num_tables_++;
+ return Check(depth_ <= max_depth_ && num_tables_ <= max_tables_);
+ }
+
+ // Called at the end of a table to pop the depth count.
+ bool EndTable()
+ {
+ depth_--;
+ return true;
+ }
+
+ // Returns the message size in bytes
+ size_t GetComputedSize() const
+ {
+#ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE
+ uintptr_t size = upper_bound_;
+ // Align the size to uoffset_t
+ size = (size - 1 + sizeof(uoffset_t)) & ~(sizeof(uoffset_t) - 1);
+ return (size > size_) ? 0 : size;
+#else
+ // Must turn on FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE for this to work.
+ (void)upper_bound_;
+ FLATBUFFERS_ASSERT(false);
+ return 0;
+#endif
+ // clang-format on
+ }
+
+private:
+ const uint8_t *buf_;
+ size_t size_;
+ uoffset_t depth_;
+ uoffset_t max_depth_;
+ uoffset_t num_tables_;
+ uoffset_t max_tables_;
+ mutable size_t upper_bound_;
+ bool check_alignment_;
+};
+
+// Convenient way to bundle a buffer and its length, to pass it around
+// typed by its root.
+// A BufferRef does not own its buffer.
+struct BufferRefBase
+{
+}; // for std::is_base_of
+template <typename T> struct BufferRef : BufferRefBase
+{
+ BufferRef() : buf(nullptr), len(0), must_free(false) {}
+ BufferRef(uint8_t *_buf, uoffset_t _len) : buf(_buf), len(_len), must_free(false) {}
+
+ ~BufferRef()
+ {
+ if (must_free)
+ free(buf);
+ }
+
+ const T *GetRoot() const { return flatbuffers::GetRoot<T>(buf); }
+
+ bool Verify()
+ {
+ Verifier verifier(buf, len);
+ return verifier.VerifyBuffer<T>(nullptr);
+ }
+
+ uint8_t *buf;
+ uoffset_t len;
+ bool must_free;
+};
+
+// "structs" are flat structures that do not have an offset table, thus
+// always have all members present and do not support forwards/backwards
+// compatible extensions.
+
+class Struct FLATBUFFERS_FINAL_CLASS
+{
+public:
+ template <typename T> T GetField(uoffset_t o) const { return ReadScalar<T>(&data_[o]); }
+
+ template <typename T> T GetStruct(uoffset_t o) const { return reinterpret_cast<T>(&data_[o]); }
+
+ const uint8_t *GetAddressOf(uoffset_t o) const { return &data_[o]; }
+ uint8_t *GetAddressOf(uoffset_t o) { return &data_[o]; }
+
+private:
+ // private constructor & copy constructor: you obtain instances of this
+ // class by pointing to existing data only
+ Struct();
+ Struct(const Struct &);
+ Struct &operator=(const Struct &);
+
+ uint8_t data_[1];
+};
+
+// "tables" use an offset table (possibly shared) that allows fields to be
+// omitted and added at will, but uses an extra indirection to read.
+class Table
+{
+public:
+ const uint8_t *GetVTable() const { return data_ - ReadScalar<soffset_t>(data_); }
+
+ // This gets the field offset for any of the functions below it, or 0
+ // if the field was not present.
+ voffset_t GetOptionalFieldOffset(voffset_t field) const
+ {
+ // The vtable offset is always at the start.
+ auto vtable = GetVTable();
+ // The first element is the size of the vtable (fields + type id + itself).
+ auto vtsize = ReadScalar<voffset_t>(vtable);
+ // If the field we're accessing is outside the vtable, we're reading older
+ // data, so it's the same as if the offset was 0 (not present).
+ return field < vtsize ? ReadScalar<voffset_t>(vtable + field) : 0;
+ }
+
+ template <typename T> T GetField(voffset_t field, T defaultval) const
+ {
+ auto field_offset = GetOptionalFieldOffset(field);
+ return field_offset ? ReadScalar<T>(data_ + field_offset) : defaultval;
+ }
+
+ template <typename P> P GetPointer(voffset_t field)
+ {
+ auto field_offset = GetOptionalFieldOffset(field);
+ auto p = data_ + field_offset;
+ return field_offset ? reinterpret_cast<P>(p + ReadScalar<uoffset_t>(p)) : nullptr;
+ }
+ template <typename P> P GetPointer(voffset_t field) const
+ {
+ return const_cast<Table *>(this)->GetPointer<P>(field);
+ }
+
+ template <typename P> P GetStruct(voffset_t field) const
+ {
+ auto field_offset = GetOptionalFieldOffset(field);
+ auto p = const_cast<uint8_t *>(data_ + field_offset);
+ return field_offset ? reinterpret_cast<P>(p) : nullptr;
+ }
+
+ template <typename Raw, typename Face>
+ flatbuffers::Optional<Face> GetOptional(voffset_t field) const
+ {
+ auto field_offset = GetOptionalFieldOffset(field);
+ auto p = data_ + field_offset;
+ return field_offset ? Optional<Face>(static_cast<Face>(ReadScalar<Raw>(p))) : Optional<Face>();
+ }
+
+ template <typename T> bool SetField(voffset_t field, T val, T def)
+ {
+ auto field_offset = GetOptionalFieldOffset(field);
+ if (!field_offset)
+ return IsTheSameAs(val, def);
+ WriteScalar(data_ + field_offset, val);
+ return true;
+ }
+ template <typename T> bool SetField(voffset_t field, T val)
+ {
+ auto field_offset = GetOptionalFieldOffset(field);
+ if (!field_offset)
+ return false;
+ WriteScalar(data_ + field_offset, val);
+ return true;
+ }
+
+ bool SetPointer(voffset_t field, const uint8_t *val)
+ {
+ auto field_offset = GetOptionalFieldOffset(field);
+ if (!field_offset)
+ return false;
+ WriteScalar(data_ + field_offset, static_cast<uoffset_t>(val - (data_ + field_offset)));
+ return true;
+ }
+
+ uint8_t *GetAddressOf(voffset_t field)
+ {
+ auto field_offset = GetOptionalFieldOffset(field);
+ return field_offset ? data_ + field_offset : nullptr;
+ }
+ const uint8_t *GetAddressOf(voffset_t field) const
+ {
+ return const_cast<Table *>(this)->GetAddressOf(field);
+ }
+
+ bool CheckField(voffset_t field) const { return GetOptionalFieldOffset(field) != 0; }
+
+ // Verify the vtable of this table.
+ // Call this once per table, followed by VerifyField once per field.
+ bool VerifyTableStart(Verifier &verifier) const { return verifier.VerifyTableStart(data_); }
+
+ // Verify a particular field.
+ template <typename T> bool VerifyField(const Verifier &verifier, voffset_t field) const
+ {
+ // Calling GetOptionalFieldOffset should be safe now thanks to
+ // VerifyTable().
+ auto field_offset = GetOptionalFieldOffset(field);
+ // Check the actual field.
+ return !field_offset || verifier.Verify<T>(data_, field_offset);
+ }
+
+ // VerifyField for required fields.
+ template <typename T> bool VerifyFieldRequired(const Verifier &verifier, voffset_t field) const
+ {
+ auto field_offset = GetOptionalFieldOffset(field);
+ return verifier.Check(field_offset != 0) && verifier.Verify<T>(data_, field_offset);
+ }
+
+ // Versions for offsets.
+ bool VerifyOffset(const Verifier &verifier, voffset_t field) const
+ {
+ auto field_offset = GetOptionalFieldOffset(field);
+ return !field_offset || verifier.VerifyOffset(data_, field_offset);
+ }
+
+ bool VerifyOffsetRequired(const Verifier &verifier, voffset_t field) const
+ {
+ auto field_offset = GetOptionalFieldOffset(field);
+ return verifier.Check(field_offset != 0) && verifier.VerifyOffset(data_, field_offset);
+ }
+
+private:
+ // private constructor & copy constructor: you obtain instances of this
+ // class by pointing to existing data only
+ Table();
+ Table(const Table &other);
+ Table &operator=(const Table &);
+
+ uint8_t data_[1];
+};
+
+// This specialization allows avoiding warnings like:
+// MSVC C4800: type: forcing value to bool 'true' or 'false'.
+template <>
+inline flatbuffers::Optional<bool> Table::GetOptional<uint8_t, bool>(voffset_t field) const
+{
+ auto field_offset = GetOptionalFieldOffset(field);
+ auto p = data_ + field_offset;
+ return field_offset ? Optional<bool>(ReadScalar<uint8_t>(p) != 0) : Optional<bool>();
+}
+
+template <typename T> void FlatBufferBuilder::Required(Offset<T> table, voffset_t field)
+{
+ auto table_ptr = reinterpret_cast<const Table *>(buf_.data_at(table.o));
+ bool ok = table_ptr->GetOptionalFieldOffset(field) != 0;
+ // If this fails, the caller will show what field needs to be set.
+ FLATBUFFERS_ASSERT(ok);
+ (void)ok;
+}
+
+/// @brief This can compute the start of a FlatBuffer from a root pointer, i.e.
+/// it is the opposite transformation of GetRoot().
+/// This may be useful if you want to pass on a root and have the recipient
+/// delete the buffer afterwards.
+inline const uint8_t *GetBufferStartFromRootPointer(const void *root)
+{
+ auto table = reinterpret_cast<const Table *>(root);
+ auto vtable = table->GetVTable();
+ // Either the vtable is before the root or after the root.
+ auto start = (std::min)(vtable, reinterpret_cast<const uint8_t *>(root));
+ // Align to at least sizeof(uoffset_t).
+ start = reinterpret_cast<const uint8_t *>(reinterpret_cast<uintptr_t>(start) &
+ ~(sizeof(uoffset_t) - 1));
+ // Additionally, there may be a file_identifier in the buffer, and the root
+ // offset. The buffer may have been aligned to any size between
+ // sizeof(uoffset_t) and FLATBUFFERS_MAX_ALIGNMENT (see "force_align").
+ // Sadly, the exact alignment is only known when constructing the buffer,
+ // since it depends on the presence of values with said alignment properties.
+ // So instead, we simply look at the next uoffset_t values (root,
+ // file_identifier, and alignment padding) to see which points to the root.
+ // None of the other values can "impersonate" the root since they will either
+ // be 0 or four ASCII characters.
+ static_assert(FlatBufferBuilder::kFileIdentifierLength == sizeof(uoffset_t),
+ "file_identifier is assumed to be the same size as uoffset_t");
+ for (auto possible_roots = FLATBUFFERS_MAX_ALIGNMENT / sizeof(uoffset_t) + 1; possible_roots;
+ possible_roots--)
+ {
+ start -= sizeof(uoffset_t);
+ if (ReadScalar<uoffset_t>(start) + start == reinterpret_cast<const uint8_t *>(root))
+ return start;
+ }
+ // We didn't find the root, either the "root" passed isn't really a root,
+ // or the buffer is corrupt.
+ // Assert, because calling this function with bad data may cause reads
+ // outside of buffer boundaries.
+ FLATBUFFERS_ASSERT(false);
+ return nullptr;
+}
+
+/// @brief This return the prefixed size of a FlatBuffer.
+inline uoffset_t GetPrefixedSize(const uint8_t *buf) { return ReadScalar<uoffset_t>(buf); }
+
+// Base class for native objects (FlatBuffer data de-serialized into native
+// C++ data structures).
+// Contains no functionality, purely documentative.
+struct NativeTable
+{
+};
+
+/// @brief Function types to be used with resolving hashes into objects and
+/// back again. The resolver gets a pointer to a field inside an object API
+/// object that is of the type specified in the schema using the attribute
+/// `cpp_type` (it is thus important whatever you write to this address
+/// matches that type). The value of this field is initially null, so you
+/// may choose to implement a delayed binding lookup using this function
+/// if you wish. The resolver does the opposite lookup, for when the object
+/// is being serialized again.
+typedef uint64_t hash_value_t;
+// clang-format off
+#ifdef FLATBUFFERS_CPP98_STL
+ typedef void (*resolver_function_t)(void **pointer_adr, hash_value_t hash);
+ typedef hash_value_t (*rehasher_function_t)(void *pointer);
+#else
+ typedef std::function<void (void **pointer_adr, hash_value_t hash)>
+ resolver_function_t;
+ typedef std::function<hash_value_t (void *pointer)> rehasher_function_t;
+#endif
+// clang-format on
+
+// Helper function to test if a field is present, using any of the field
+// enums in the generated code.
+// `table` must be a generated table type. Since this is a template parameter,
+// this is not typechecked to be a subclass of Table, so beware!
+// Note: this function will return false for fields equal to the default
+// value, since they're not stored in the buffer (unless force_defaults was
+// used).
+template <typename T> bool IsFieldPresent(const T *table, typename T::FlatBuffersVTableOffset field)
+{
+ // Cast, since Table is a private baseclass of any table types.
+ return reinterpret_cast<const Table *>(table)->CheckField(static_cast<voffset_t>(field));
+}
+
+// Utility function for reverse lookups on the EnumNames*() functions
+// (in the generated C++ code)
+// names must be NULL terminated.
+inline int LookupEnum(const char **names, const char *name)
+{
+ for (const char **p = names; *p; p++)
+ if (!strcmp(*p, name))
+ return static_cast<int>(p - names);
+ return -1;
+}
+
+// These macros allow us to layout a struct with a guarantee that they'll end
+// up looking the same on different compilers and platforms.
+// It does this by disallowing the compiler to do any padding, and then
+// does padding itself by inserting extra padding fields that make every
+// element aligned to its own size.
+// Additionally, it manually sets the alignment of the struct as a whole,
+// which is typically its largest element, or a custom size set in the schema
+// by the force_align attribute.
+// These are used in the generated code only.
+
+// clang-format off
+#if defined(_MSC_VER)
+ #define FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(alignment) \
+ __pragma(pack(1)) \
+ struct __declspec(align(alignment))
+ #define FLATBUFFERS_STRUCT_END(name, size) \
+ __pragma(pack()) \
+ static_assert(sizeof(name) == size, "compiler breaks packing rules")
+#elif defined(__GNUC__) || defined(__clang__) || defined(__ICCARM__)
+ #define FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(alignment) \
+ _Pragma("pack(1)") \
+ struct __attribute__((aligned(alignment)))
+ #define FLATBUFFERS_STRUCT_END(name, size) \
+ _Pragma("pack()") \
+ static_assert(sizeof(name) == size, "compiler breaks packing rules")
+#else
+ #error Unknown compiler, please define structure alignment macros
+#endif
+// clang-format on
+
+// Minimal reflection via code generation.
+// Besides full-fat reflection (see reflection.h) and parsing/printing by
+// loading schemas (see idl.h), we can also have code generation for mimimal
+// reflection data which allows pretty-printing and other uses without needing
+// a schema or a parser.
+// Generate code with --reflect-types (types only) or --reflect-names (names
+// also) to enable.
+// See minireflect.h for utilities using this functionality.
+
+// These types are organized slightly differently as the ones in idl.h.
+enum SequenceType
+{
+ ST_TABLE,
+ ST_STRUCT,
+ ST_UNION,
+ ST_ENUM
+};
+
+// Scalars have the same order as in idl.h
+// clang-format off
+#define FLATBUFFERS_GEN_ELEMENTARY_TYPES(ET) \
+ ET(ET_UTYPE) \
+ ET(ET_BOOL) \
+ ET(ET_CHAR) \
+ ET(ET_UCHAR) \
+ ET(ET_SHORT) \
+ ET(ET_USHORT) \
+ ET(ET_INT) \
+ ET(ET_UINT) \
+ ET(ET_LONG) \
+ ET(ET_ULONG) \
+ ET(ET_FLOAT) \
+ ET(ET_DOUBLE) \
+ ET(ET_STRING) \
+ ET(ET_SEQUENCE) // See SequenceType.
+
+enum ElementaryType {
+ #define FLATBUFFERS_ET(E) E,
+ FLATBUFFERS_GEN_ELEMENTARY_TYPES(FLATBUFFERS_ET)
+ #undef FLATBUFFERS_ET
+};
+
+inline const char * const *ElementaryTypeNames() {
+ static const char * const names[] = {
+ #define FLATBUFFERS_ET(E) #E,
+ FLATBUFFERS_GEN_ELEMENTARY_TYPES(FLATBUFFERS_ET)
+ #undef FLATBUFFERS_ET
+ };
+ return names;
+}
+// clang-format on
+
+// Basic type info cost just 16bits per field!
+// We're explicitly defining the signedness since the signedness of integer
+// bitfields is otherwise implementation-defined and causes warnings on older
+// GCC compilers.
+struct TypeCode
+{
+ // ElementaryType
+ unsigned short base_type : 4;
+ // Either vector (in table) or array (in struct)
+ unsigned short is_repeating : 1;
+ // Index into type_refs below, or -1 for none.
+ signed short sequence_ref : 11;
+};
+
+static_assert(sizeof(TypeCode) == 2, "TypeCode");
+
+struct TypeTable;
+
+// Signature of the static method present in each type.
+typedef const TypeTable *(*TypeFunction)();
+
+struct TypeTable
+{
+ SequenceType st;
+ size_t num_elems; // of type_codes, values, names (but not type_refs).
+ const TypeCode *type_codes; // num_elems count
+ const TypeFunction *type_refs; // less than num_elems entries (see TypeCode).
+ const int16_t *array_sizes; // less than num_elems entries (see TypeCode).
+ const int64_t *values; // Only set for non-consecutive enum/union or structs.
+ const char *const *names; // Only set if compiled with --reflect-names.
+};
+
+// String which identifies the current version of FlatBuffers.
+// flatbuffer_version_string is used by Google developers to identify which
+// applications uploaded to Google Play are using this library. This allows
+// the development team at Google to determine the popularity of the library.
+// How it works: Applications that are uploaded to the Google Play Store are
+// scanned for this version string. We track which applications are using it
+// to measure popularity. You are free to remove it (of course) but we would
+// appreciate if you left it in.
+
+// Weak linkage is culled by VS & doesn't work on cygwin.
+// clang-format off
+#if !defined(_WIN32) && !defined(__CYGWIN__)
+
+extern volatile __attribute__((weak)) const char *flatbuffer_version_string;
+volatile __attribute__((weak)) const char *flatbuffer_version_string =
+ "FlatBuffers "
+ FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MAJOR) "."
+ FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MINOR) "."
+ FLATBUFFERS_STRING(FLATBUFFERS_VERSION_REVISION);
+
+#endif // !defined(_WIN32) && !defined(__CYGWIN__)
+
+#define FLATBUFFERS_DEFINE_BITMASK_OPERATORS(E, T)\
+ inline E operator | (E lhs, E rhs){\
+ return E(T(lhs) | T(rhs));\
+ }\
+ inline E operator & (E lhs, E rhs){\
+ return E(T(lhs) & T(rhs));\
+ }\
+ inline E operator ^ (E lhs, E rhs){\
+ return E(T(lhs) ^ T(rhs));\
+ }\
+ inline E operator ~ (E lhs){\
+ return E(~T(lhs));\
+ }\
+ inline E operator |= (E &lhs, E rhs){\
+ lhs = lhs | rhs;\
+ return lhs;\
+ }\
+ inline E operator &= (E &lhs, E rhs){\
+ lhs = lhs & rhs;\
+ return lhs;\
+ }\
+ inline E operator ^= (E &lhs, E rhs){\
+ lhs = lhs ^ rhs;\
+ return lhs;\
+ }\
+ inline bool operator !(E rhs) \
+ {\
+ return !bool(T(rhs)); \
+ }
+/// @endcond
+} // namespace flatbuffers
+
+// clang-format on
+
+#endif // FLATBUFFERS_H_
diff --git a/onert-micro/externals/flatbuffers/flatc.h b/onert-micro/externals/flatbuffers/flatc.h
new file mode 100644
index 000000000..594bf792a
--- /dev/null
+++ b/onert-micro/externals/flatbuffers/flatc.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLATBUFFERS_FLATC_H_
+#define FLATBUFFERS_FLATC_H_
+
+#include <functional>
+#include <limits>
+#include <string>
+
+#include "flatbuffers/flatbuffers.h"
+#include "flatbuffers/idl.h"
+#include "flatbuffers/util.h"
+
+namespace flatbuffers
+{
+
+extern void LogCompilerWarn(const std::string &warn);
+extern void LogCompilerError(const std::string &err);
+
+class FlatCompiler
+{
+public:
+ // Output generator for the various programming languages and formats we
+ // support.
+ struct Generator
+ {
+ typedef bool (*GenerateFn)(const flatbuffers::Parser &parser, const std::string &path,
+ const std::string &file_name);
+ typedef std::string (*MakeRuleFn)(const flatbuffers::Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+ GenerateFn generate;
+ const char *generator_opt_short;
+ const char *generator_opt_long;
+ const char *lang_name;
+ bool schema_only;
+ GenerateFn generateGRPC;
+ flatbuffers::IDLOptions::Language lang;
+ const char *generator_help;
+ MakeRuleFn make_rule;
+ };
+
+ typedef void (*WarnFn)(const FlatCompiler *flatc, const std::string &warn, bool show_exe_name);
+
+ typedef void (*ErrorFn)(const FlatCompiler *flatc, const std::string &err, bool usage,
+ bool show_exe_name);
+
+ // Parameters required to initialize the FlatCompiler.
+ struct InitParams
+ {
+ InitParams() : generators(nullptr), num_generators(0), warn_fn(nullptr), error_fn(nullptr) {}
+
+ const Generator *generators;
+ size_t num_generators;
+ WarnFn warn_fn;
+ ErrorFn error_fn;
+ };
+
+ explicit FlatCompiler(const InitParams &params) : params_(params) {}
+
+ int Compile(int argc, const char **argv);
+
+ std::string GetUsageString(const char *program_name) const;
+
+private:
+ void ParseFile(flatbuffers::Parser &parser, const std::string &filename,
+ const std::string &contents, std::vector<const char *> &include_directories) const;
+
+ void LoadBinarySchema(Parser &parser, const std::string &filename, const std::string &contents);
+
+ void Warn(const std::string &warn, bool show_exe_name = true) const;
+
+ void Error(const std::string &err, bool usage = true, bool show_exe_name = true) const;
+
+ InitParams params_;
+};
+
+} // namespace flatbuffers
+
+#endif // FLATBUFFERS_FLATC_H_
diff --git a/onert-micro/externals/flatbuffers/flexbuffers.h b/onert-micro/externals/flatbuffers/flexbuffers.h
new file mode 100644
index 000000000..f6fcbf34b
--- /dev/null
+++ b/onert-micro/externals/flatbuffers/flexbuffers.h
@@ -0,0 +1,1852 @@
+/*
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLATBUFFERS_FLEXBUFFERS_H_
+#define FLATBUFFERS_FLEXBUFFERS_H_
+
+#include <map>
+// Used to select STL variant.
+#include "flatbuffers/base.h"
+// We use the basic binary writing functions from the regular FlatBuffers.
+#include "flatbuffers/util.h"
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif
+
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable : 4127) // C4127: conditional expression is constant
+#endif
+
+namespace flexbuffers
+{
+
+class Reference;
+class Map;
+
+// These are used in the lower 2 bits of a type field to determine the size of
+// the elements (and or size field) of the item pointed to (e.g. vector).
+enum BitWidth
+{
+ BIT_WIDTH_8 = 0,
+ BIT_WIDTH_16 = 1,
+ BIT_WIDTH_32 = 2,
+ BIT_WIDTH_64 = 3,
+};
+
+// These are used as the upper 6 bits of a type field to indicate the actual
+// type.
+enum Type
+{
+ FBT_NULL = 0,
+ FBT_INT = 1,
+ FBT_UINT = 2,
+ FBT_FLOAT = 3,
+ // Types above stored inline, types below store an offset.
+ FBT_KEY = 4,
+ FBT_STRING = 5,
+ FBT_INDIRECT_INT = 6,
+ FBT_INDIRECT_UINT = 7,
+ FBT_INDIRECT_FLOAT = 8,
+ FBT_MAP = 9,
+ FBT_VECTOR = 10, // Untyped.
+ FBT_VECTOR_INT = 11, // Typed any size (stores no type table).
+ FBT_VECTOR_UINT = 12,
+ FBT_VECTOR_FLOAT = 13,
+ FBT_VECTOR_KEY = 14,
+ // DEPRECATED, use FBT_VECTOR or FBT_VECTOR_KEY instead.
+ // Read test.cpp/FlexBuffersDeprecatedTest() for details on why.
+ FBT_VECTOR_STRING_DEPRECATED = 15,
+ FBT_VECTOR_INT2 = 16, // Typed tuple (no type table, no size field).
+ FBT_VECTOR_UINT2 = 17,
+ FBT_VECTOR_FLOAT2 = 18,
+ FBT_VECTOR_INT3 = 19, // Typed triple (no type table, no size field).
+ FBT_VECTOR_UINT3 = 20,
+ FBT_VECTOR_FLOAT3 = 21,
+ FBT_VECTOR_INT4 = 22, // Typed quad (no type table, no size field).
+ FBT_VECTOR_UINT4 = 23,
+ FBT_VECTOR_FLOAT4 = 24,
+ FBT_BLOB = 25,
+ FBT_BOOL = 26,
+ FBT_VECTOR_BOOL = 36, // To Allow the same type of conversion of type to vector type
+};
+
+inline bool IsInline(Type t) { return t <= FBT_FLOAT || t == FBT_BOOL; }
+
+inline bool IsTypedVectorElementType(Type t)
+{
+ return (t >= FBT_INT && t <= FBT_STRING) || t == FBT_BOOL;
+}
+
+inline bool IsTypedVector(Type t)
+{
+ return (t >= FBT_VECTOR_INT && t <= FBT_VECTOR_STRING_DEPRECATED) || t == FBT_VECTOR_BOOL;
+}
+
+inline bool IsFixedTypedVector(Type t) { return t >= FBT_VECTOR_INT2 && t <= FBT_VECTOR_FLOAT4; }
+
+inline Type ToTypedVector(Type t, size_t fixed_len = 0)
+{
+ FLATBUFFERS_ASSERT(IsTypedVectorElementType(t));
+ switch (fixed_len)
+ {
+ case 0:
+ return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT);
+ case 2:
+ return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT2);
+ case 3:
+ return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT3);
+ case 4:
+ return static_cast<Type>(t - FBT_INT + FBT_VECTOR_INT4);
+ default:
+ FLATBUFFERS_ASSERT(0);
+ return FBT_NULL;
+ }
+}
+
+inline Type ToTypedVectorElementType(Type t)
+{
+ FLATBUFFERS_ASSERT(IsTypedVector(t));
+ return static_cast<Type>(t - FBT_VECTOR_INT + FBT_INT);
+}
+
+inline Type ToFixedTypedVectorElementType(Type t, uint8_t *len)
+{
+ FLATBUFFERS_ASSERT(IsFixedTypedVector(t));
+ auto fixed_type = t - FBT_VECTOR_INT2;
+ *len = static_cast<uint8_t>(fixed_type / 3 + 2); // 3 types each, starting from length 2.
+ return static_cast<Type>(fixed_type % 3 + FBT_INT);
+}
+
+// TODO: implement proper support for 8/16bit floats, or decide not to
+// support them.
+typedef int16_t half;
+typedef int8_t quarter;
+
+// TODO: can we do this without conditionals using intrinsics or inline asm
+// on some platforms? Given branch prediction the method below should be
+// decently quick, but it is the most frequently executed function.
+// We could do an (unaligned) 64-bit read if we ifdef out the platforms for
+// which that doesn't work (or where we'd read into un-owned memory).
+template <typename R, typename T1, typename T2, typename T4, typename T8>
+R ReadSizedScalar(const uint8_t *data, uint8_t byte_width)
+{
+ return byte_width < 4 ? (byte_width < 2 ? static_cast<R>(flatbuffers::ReadScalar<T1>(data))
+ : static_cast<R>(flatbuffers::ReadScalar<T2>(data)))
+ : (byte_width < 8 ? static_cast<R>(flatbuffers::ReadScalar<T4>(data))
+ : static_cast<R>(flatbuffers::ReadScalar<T8>(data)));
+}
+
+inline int64_t ReadInt64(const uint8_t *data, uint8_t byte_width)
+{
+ return ReadSizedScalar<int64_t, int8_t, int16_t, int32_t, int64_t>(data, byte_width);
+}
+
+inline uint64_t ReadUInt64(const uint8_t *data, uint8_t byte_width)
+{
+#if defined(_MSC_VER) && ((defined(_M_X64) && !defined(_M_ARM64EC)) || defined _M_IX86)
+ uint64_t u = 0;
+ __movsb(reinterpret_cast<uint8_t *>(&u), reinterpret_cast<const uint8_t *>(data), byte_width);
+ return flatbuffers::EndianScalar(u);
+#else
+ return ReadSizedScalar<uint64_t, uint8_t, uint16_t, uint32_t, uint64_t>(data, byte_width);
+#endif
+ // clang-format on
+}
+
+inline double ReadDouble(const uint8_t *data, uint8_t byte_width)
+{
+ return ReadSizedScalar<double, quarter, half, float, double>(data, byte_width);
+}
+
+inline const uint8_t *Indirect(const uint8_t *offset, uint8_t byte_width)
+{
+ return offset - ReadUInt64(offset, byte_width);
+}
+
+template <typename T> const uint8_t *Indirect(const uint8_t *offset)
+{
+ return offset - flatbuffers::ReadScalar<T>(offset);
+}
+
+inline BitWidth WidthU(uint64_t u)
+{
+#define FLATBUFFERS_GET_FIELD_BIT_WIDTH(value, width) \
+ { \
+ if (!((u) & ~((1ULL << (width)) - 1ULL))) \
+ return BIT_WIDTH_##width; \
+ }
+ FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 8);
+ FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 16);
+ FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 32);
+#undef FLATBUFFERS_GET_FIELD_BIT_WIDTH
+ return BIT_WIDTH_64;
+}
+
+inline BitWidth WidthI(int64_t i)
+{
+ auto u = static_cast<uint64_t>(i) << 1;
+ return WidthU(i >= 0 ? u : ~u);
+}
+
+inline BitWidth WidthF(double f)
+{
+ return static_cast<double>(static_cast<float>(f)) == f ? BIT_WIDTH_32 : BIT_WIDTH_64;
+}
+
+// Base class of all types below.
+// Points into the data buffer and allows access to one type.
+class Object
+{
+public:
+ Object(const uint8_t *data, uint8_t byte_width) : data_(data), byte_width_(byte_width) {}
+
+protected:
+ const uint8_t *data_;
+ uint8_t byte_width_;
+};
+
+// Object that has a size, obtained either from size prefix, or elsewhere.
+class Sized : public Object
+{
+public:
+ // Size prefix.
+ Sized(const uint8_t *data, uint8_t byte_width) : Object(data, byte_width), size_(read_size()) {}
+ // Manual size.
+ Sized(const uint8_t *data, uint8_t byte_width, size_t sz) : Object(data, byte_width), size_(sz) {}
+ size_t size() const { return size_; }
+ // Access size stored in `byte_width_` bytes before data_ pointer.
+ size_t read_size() const
+ {
+ return static_cast<size_t>(ReadUInt64(data_ - byte_width_, byte_width_));
+ }
+
+protected:
+ size_t size_;
+};
+
+class String : public Sized
+{
+public:
+ // Size prefix.
+ String(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {}
+ // Manual size.
+ String(const uint8_t *data, uint8_t byte_width, size_t sz) : Sized(data, byte_width, sz) {}
+
+ size_t length() const { return size(); }
+ const char *c_str() const { return reinterpret_cast<const char *>(data_); }
+ std::string str() const { return std::string(c_str(), size()); }
+
+ static String EmptyString()
+ {
+ static const char *empty_string = "";
+ return String(reinterpret_cast<const uint8_t *>(empty_string), 1, 0);
+ }
+ bool IsTheEmptyString() const { return data_ == EmptyString().data_; }
+};
+
+class Blob : public Sized
+{
+public:
+ Blob(const uint8_t *data_buf, uint8_t byte_width) : Sized(data_buf, byte_width) {}
+
+ static Blob EmptyBlob()
+ {
+ static const uint8_t empty_blob[] = {0 /*len*/};
+ return Blob(empty_blob + 1, 1);
+ }
+ bool IsTheEmptyBlob() const { return data_ == EmptyBlob().data_; }
+ const uint8_t *data() const { return data_; }
+};
+
+class Vector : public Sized
+{
+public:
+ Vector(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {}
+
+ Reference operator[](size_t i) const;
+
+ static Vector EmptyVector()
+ {
+ static const uint8_t empty_vector[] = {0 /*len*/};
+ return Vector(empty_vector + 1, 1);
+ }
+ bool IsTheEmptyVector() const { return data_ == EmptyVector().data_; }
+};
+
+class TypedVector : public Sized
+{
+public:
+ TypedVector(const uint8_t *data, uint8_t byte_width, Type element_type)
+ : Sized(data, byte_width), type_(element_type)
+ {
+ }
+
+ Reference operator[](size_t i) const;
+
+ static TypedVector EmptyTypedVector()
+ {
+ static const uint8_t empty_typed_vector[] = {0 /*len*/};
+ return TypedVector(empty_typed_vector + 1, 1, FBT_INT);
+ }
+ bool IsTheEmptyVector() const { return data_ == TypedVector::EmptyTypedVector().data_; }
+
+ Type ElementType() { return type_; }
+
+ friend Reference;
+
+private:
+ Type type_;
+
+ friend Map;
+};
+
+class FixedTypedVector : public Object
+{
+public:
+ FixedTypedVector(const uint8_t *data, uint8_t byte_width, Type element_type, uint8_t len)
+ : Object(data, byte_width), type_(element_type), len_(len)
+ {
+ }
+
+ Reference operator[](size_t i) const;
+
+ static FixedTypedVector EmptyFixedTypedVector()
+ {
+ static const uint8_t fixed_empty_vector[] = {0 /* unused */};
+ return FixedTypedVector(fixed_empty_vector, 1, FBT_INT, 0);
+ }
+ bool IsTheEmptyFixedTypedVector() const
+ {
+ return data_ == FixedTypedVector::EmptyFixedTypedVector().data_;
+ }
+
+ Type ElementType() { return type_; }
+ uint8_t size() { return len_; }
+
+private:
+ Type type_;
+ uint8_t len_;
+};
+
+class Map : public Vector
+{
+public:
+ Map(const uint8_t *data, uint8_t byte_width) : Vector(data, byte_width) {}
+
+ Reference operator[](const char *key) const;
+ Reference operator[](const std::string &key) const;
+
+ Vector Values() const { return Vector(data_, byte_width_); }
+
+ TypedVector Keys() const
+ {
+ const size_t num_prefixed_fields = 3;
+ auto keys_offset = data_ - byte_width_ * num_prefixed_fields;
+ return TypedVector(Indirect(keys_offset, byte_width_),
+ static_cast<uint8_t>(ReadUInt64(keys_offset + byte_width_, byte_width_)),
+ FBT_KEY);
+ }
+
+ static Map EmptyMap()
+ {
+ static const uint8_t empty_map[] = {
+ 0 /*keys_len*/, 0 /*keys_offset*/, 1 /*keys_width*/, 0 /*len*/
+ };
+ return Map(empty_map + 4, 1);
+ }
+
+ bool IsTheEmptyMap() const { return data_ == EmptyMap().data_; }
+};
+
+template <typename T> void AppendToString(std::string &s, T &&v, bool keys_quoted)
+{
+ s += "[ ";
+ for (size_t i = 0; i < v.size(); i++)
+ {
+ if (i)
+ s += ", ";
+ v[i].ToString(true, keys_quoted, s);
+ }
+ s += " ]";
+}
+
+class Reference
+{
+public:
+ Reference() : data_(nullptr), parent_width_(0), byte_width_(BIT_WIDTH_8), type_(FBT_NULL) {}
+
+ Reference(const uint8_t *data, uint8_t parent_width, uint8_t byte_width, Type type)
+ : data_(data), parent_width_(parent_width), byte_width_(byte_width), type_(type)
+ {
+ }
+
+ Reference(const uint8_t *data, uint8_t parent_width, uint8_t packed_type)
+ : data_(data), parent_width_(parent_width)
+ {
+ byte_width_ = 1U << static_cast<BitWidth>(packed_type & 3);
+ type_ = static_cast<Type>(packed_type >> 2);
+ }
+
+ Type GetType() const { return type_; }
+
+ bool IsNull() const { return type_ == FBT_NULL; }
+ bool IsBool() const { return type_ == FBT_BOOL; }
+ bool IsInt() const { return type_ == FBT_INT || type_ == FBT_INDIRECT_INT; }
+ bool IsUInt() const { return type_ == FBT_UINT || type_ == FBT_INDIRECT_UINT; }
+ bool IsIntOrUint() const { return IsInt() || IsUInt(); }
+ bool IsFloat() const { return type_ == FBT_FLOAT || type_ == FBT_INDIRECT_FLOAT; }
+ bool IsNumeric() const { return IsIntOrUint() || IsFloat(); }
+ bool IsString() const { return type_ == FBT_STRING; }
+ bool IsKey() const { return type_ == FBT_KEY; }
+ bool IsVector() const { return type_ == FBT_VECTOR || type_ == FBT_MAP; }
+ bool IsUntypedVector() const { return type_ == FBT_VECTOR; }
+ bool IsTypedVector() const { return flexbuffers::IsTypedVector(type_); }
+ bool IsFixedTypedVector() const { return flexbuffers::IsFixedTypedVector(type_); }
+ bool IsAnyVector() const { return (IsTypedVector() || IsFixedTypedVector() || IsVector()); }
+ bool IsMap() const { return type_ == FBT_MAP; }
+ bool IsBlob() const { return type_ == FBT_BLOB; }
+ bool AsBool() const
+ {
+ return (type_ == FBT_BOOL ? ReadUInt64(data_, parent_width_) : AsUInt64()) != 0;
+ }
+
+ // Reads any type as a int64_t. Never fails, does most sensible conversion.
+ // Truncates floats, strings are attempted to be parsed for a number,
+ // vectors/maps return their size. Returns 0 if all else fails.
+ int64_t AsInt64() const
+ {
+ if (type_ == FBT_INT)
+ {
+ // A fast path for the common case.
+ return ReadInt64(data_, parent_width_);
+ }
+ else
+ switch (type_)
+ {
+ case FBT_INDIRECT_INT:
+ return ReadInt64(Indirect(), byte_width_);
+ case FBT_UINT:
+ return ReadUInt64(data_, parent_width_);
+ case FBT_INDIRECT_UINT:
+ return ReadUInt64(Indirect(), byte_width_);
+ case FBT_FLOAT:
+ return static_cast<int64_t>(ReadDouble(data_, parent_width_));
+ case FBT_INDIRECT_FLOAT:
+ return static_cast<int64_t>(ReadDouble(Indirect(), byte_width_));
+ case FBT_NULL:
+ return 0;
+ case FBT_STRING:
+ return flatbuffers::StringToInt(AsString().c_str());
+ case FBT_VECTOR:
+ return static_cast<int64_t>(AsVector().size());
+ case FBT_BOOL:
+ return ReadInt64(data_, parent_width_);
+ default:
+ // Convert other things to int.
+ return 0;
+ }
+ }
+
+ // TODO: could specialize these to not use AsInt64() if that saves
+ // extension ops in generated code, and use a faster op than ReadInt64.
+ int32_t AsInt32() const { return static_cast<int32_t>(AsInt64()); }
+ int16_t AsInt16() const { return static_cast<int16_t>(AsInt64()); }
+ int8_t AsInt8() const { return static_cast<int8_t>(AsInt64()); }
+
+ uint64_t AsUInt64() const
+ {
+ if (type_ == FBT_UINT)
+ {
+ // A fast path for the common case.
+ return ReadUInt64(data_, parent_width_);
+ }
+ else
+ switch (type_)
+ {
+ case FBT_INDIRECT_UINT:
+ return ReadUInt64(Indirect(), byte_width_);
+ case FBT_INT:
+ return ReadInt64(data_, parent_width_);
+ case FBT_INDIRECT_INT:
+ return ReadInt64(Indirect(), byte_width_);
+ case FBT_FLOAT:
+ return static_cast<uint64_t>(ReadDouble(data_, parent_width_));
+ case FBT_INDIRECT_FLOAT:
+ return static_cast<uint64_t>(ReadDouble(Indirect(), byte_width_));
+ case FBT_NULL:
+ return 0;
+ case FBT_STRING:
+ return flatbuffers::StringToUInt(AsString().c_str());
+ case FBT_VECTOR:
+ return static_cast<uint64_t>(AsVector().size());
+ case FBT_BOOL:
+ return ReadUInt64(data_, parent_width_);
+ default:
+ // Convert other things to uint.
+ return 0;
+ }
+ }
+
+ uint32_t AsUInt32() const { return static_cast<uint32_t>(AsUInt64()); }
+ uint16_t AsUInt16() const { return static_cast<uint16_t>(AsUInt64()); }
+ uint8_t AsUInt8() const { return static_cast<uint8_t>(AsUInt64()); }
+
+ double AsDouble() const
+ {
+ if (type_ == FBT_FLOAT)
+ {
+ // A fast path for the common case.
+ return ReadDouble(data_, parent_width_);
+ }
+ else
+ switch (type_)
+ {
+ case FBT_INDIRECT_FLOAT:
+ return ReadDouble(Indirect(), byte_width_);
+ case FBT_INT:
+ return static_cast<double>(ReadInt64(data_, parent_width_));
+ case FBT_UINT:
+ return static_cast<double>(ReadUInt64(data_, parent_width_));
+ case FBT_INDIRECT_INT:
+ return static_cast<double>(ReadInt64(Indirect(), byte_width_));
+ case FBT_INDIRECT_UINT:
+ return static_cast<double>(ReadUInt64(Indirect(), byte_width_));
+ case FBT_NULL:
+ return 0.0;
+ case FBT_STRING:
+ {
+ double d;
+ flatbuffers::StringToNumber(AsString().c_str(), &d);
+ return d;
+ }
+ case FBT_VECTOR:
+ return static_cast<double>(AsVector().size());
+ case FBT_BOOL:
+ return static_cast<double>(ReadUInt64(data_, parent_width_));
+ default:
+ // Convert strings and other things to float.
+ return 0;
+ }
+ }
+
+ float AsFloat() const { return static_cast<float>(AsDouble()); }
+
+ const char *AsKey() const
+ {
+ if (type_ == FBT_KEY || type_ == FBT_STRING)
+ {
+ return reinterpret_cast<const char *>(Indirect());
+ }
+ else
+ {
+ return "";
+ }
+ }
+
+ // This function returns the empty string if you try to read something that
+ // is not a string or key.
+ String AsString() const
+ {
+ if (type_ == FBT_STRING)
+ {
+ return String(Indirect(), byte_width_);
+ }
+ else if (type_ == FBT_KEY)
+ {
+ auto key = Indirect();
+ return String(key, byte_width_, strlen(reinterpret_cast<const char *>(key)));
+ }
+ else
+ {
+ return String::EmptyString();
+ }
+ }
+
+ // Unlike AsString(), this will convert any type to a std::string.
+ std::string ToString() const
+ {
+ std::string s;
+ ToString(false, false, s);
+ return s;
+ }
+
+ // Convert any type to a JSON-like string. strings_quoted determines if
+ // string values at the top level receive "" quotes (inside other values
+ // they always do). keys_quoted determines if keys are quoted, at any level.
+ // TODO(wvo): add further options to have indentation/newlines.
+ void ToString(bool strings_quoted, bool keys_quoted, std::string &s) const
+ {
+ if (type_ == FBT_STRING)
+ {
+ String str(Indirect(), byte_width_);
+ if (strings_quoted)
+ {
+ flatbuffers::EscapeString(str.c_str(), str.length(), &s, true, false);
+ }
+ else
+ {
+ s.append(str.c_str(), str.length());
+ }
+ }
+ else if (IsKey())
+ {
+ auto str = AsKey();
+ if (keys_quoted)
+ {
+ flatbuffers::EscapeString(str, strlen(str), &s, true, false);
+ }
+ else
+ {
+ s += str;
+ }
+ }
+ else if (IsInt())
+ {
+ s += flatbuffers::NumToString(AsInt64());
+ }
+ else if (IsUInt())
+ {
+ s += flatbuffers::NumToString(AsUInt64());
+ }
+ else if (IsFloat())
+ {
+ s += flatbuffers::NumToString(AsDouble());
+ }
+ else if (IsNull())
+ {
+ s += "null";
+ }
+ else if (IsBool())
+ {
+ s += AsBool() ? "true" : "false";
+ }
+ else if (IsMap())
+ {
+ s += "{ ";
+ auto m = AsMap();
+ auto keys = m.Keys();
+ auto vals = m.Values();
+ for (size_t i = 0; i < keys.size(); i++)
+ {
+ keys[i].ToString(true, keys_quoted, s);
+ s += ": ";
+ vals[i].ToString(true, keys_quoted, s);
+ if (i < keys.size() - 1)
+ s += ", ";
+ }
+ s += " }";
+ }
+ else if (IsVector())
+ {
+ AppendToString<Vector>(s, AsVector(), keys_quoted);
+ }
+ else if (IsTypedVector())
+ {
+ AppendToString<TypedVector>(s, AsTypedVector(), keys_quoted);
+ }
+ else if (IsFixedTypedVector())
+ {
+ AppendToString<FixedTypedVector>(s, AsFixedTypedVector(), keys_quoted);
+ }
+ else if (IsBlob())
+ {
+ auto blob = AsBlob();
+ flatbuffers::EscapeString(reinterpret_cast<const char *>(blob.data()), blob.size(), &s, true,
+ false);
+ }
+ else
+ {
+ s += "(?)";
+ }
+ }
+
+ // This function returns the empty blob if you try to read a not-blob.
+ // Strings can be viewed as blobs too.
+ Blob AsBlob() const
+ {
+ if (type_ == FBT_BLOB || type_ == FBT_STRING)
+ {
+ return Blob(Indirect(), byte_width_);
+ }
+ else
+ {
+ return Blob::EmptyBlob();
+ }
+ }
+
+ // This function returns the empty vector if you try to read a not-vector.
+ // Maps can be viewed as vectors too.
+ Vector AsVector() const
+ {
+ if (type_ == FBT_VECTOR || type_ == FBT_MAP)
+ {
+ return Vector(Indirect(), byte_width_);
+ }
+ else
+ {
+ return Vector::EmptyVector();
+ }
+ }
+
+ TypedVector AsTypedVector() const
+ {
+ if (IsTypedVector())
+ {
+ auto tv = TypedVector(Indirect(), byte_width_, ToTypedVectorElementType(type_));
+ if (tv.type_ == FBT_STRING)
+ {
+ // These can't be accessed as strings, since we don't know the bit-width
+ // of the size field, see the declaration of
+ // FBT_VECTOR_STRING_DEPRECATED above for details.
+ // We change the type here to be keys, which are a subtype of strings,
+ // and will ignore the size field. This will truncate strings with
+ // embedded nulls.
+ tv.type_ = FBT_KEY;
+ }
+ return tv;
+ }
+ else
+ {
+ return TypedVector::EmptyTypedVector();
+ }
+ }
+
+ FixedTypedVector AsFixedTypedVector() const
+ {
+ if (IsFixedTypedVector())
+ {
+ uint8_t len = 0;
+ auto vtype = ToFixedTypedVectorElementType(type_, &len);
+ return FixedTypedVector(Indirect(), byte_width_, vtype, len);
+ }
+ else
+ {
+ return FixedTypedVector::EmptyFixedTypedVector();
+ }
+ }
+
+ Map AsMap() const
+ {
+ if (type_ == FBT_MAP)
+ {
+ return Map(Indirect(), byte_width_);
+ }
+ else
+ {
+ return Map::EmptyMap();
+ }
+ }
+
+ template <typename T> T As() const;
+
+ // Experimental: Mutation functions.
+ // These allow scalars in an already created buffer to be updated in-place.
+ // Since by default scalars are stored in the smallest possible space,
+ // the new value may not fit, in which case these functions return false.
+ // To avoid this, you can construct the values you intend to mutate using
+ // Builder::ForceMinimumBitWidth.
+ bool MutateInt(int64_t i)
+ {
+ if (type_ == FBT_INT)
+ {
+ return Mutate(data_, i, parent_width_, WidthI(i));
+ }
+ else if (type_ == FBT_INDIRECT_INT)
+ {
+ return Mutate(Indirect(), i, byte_width_, WidthI(i));
+ }
+ else if (type_ == FBT_UINT)
+ {
+ auto u = static_cast<uint64_t>(i);
+ return Mutate(data_, u, parent_width_, WidthU(u));
+ }
+ else if (type_ == FBT_INDIRECT_UINT)
+ {
+ auto u = static_cast<uint64_t>(i);
+ return Mutate(Indirect(), u, byte_width_, WidthU(u));
+ }
+ else
+ {
+ return false;
+ }
+ }
+
+ bool MutateBool(bool b)
+ {
+ return type_ == FBT_BOOL && Mutate(data_, b, parent_width_, BIT_WIDTH_8);
+ }
+
+ bool MutateUInt(uint64_t u)
+ {
+ if (type_ == FBT_UINT)
+ {
+ return Mutate(data_, u, parent_width_, WidthU(u));
+ }
+ else if (type_ == FBT_INDIRECT_UINT)
+ {
+ return Mutate(Indirect(), u, byte_width_, WidthU(u));
+ }
+ else if (type_ == FBT_INT)
+ {
+ auto i = static_cast<int64_t>(u);
+ return Mutate(data_, i, parent_width_, WidthI(i));
+ }
+ else if (type_ == FBT_INDIRECT_INT)
+ {
+ auto i = static_cast<int64_t>(u);
+ return Mutate(Indirect(), i, byte_width_, WidthI(i));
+ }
+ else
+ {
+ return false;
+ }
+ }
+
+ bool MutateFloat(float f)
+ {
+ if (type_ == FBT_FLOAT)
+ {
+ return MutateF(data_, f, parent_width_, BIT_WIDTH_32);
+ }
+ else if (type_ == FBT_INDIRECT_FLOAT)
+ {
+ return MutateF(Indirect(), f, byte_width_, BIT_WIDTH_32);
+ }
+ else
+ {
+ return false;
+ }
+ }
+
+ bool MutateFloat(double d)
+ {
+ if (type_ == FBT_FLOAT)
+ {
+ return MutateF(data_, d, parent_width_, WidthF(d));
+ }
+ else if (type_ == FBT_INDIRECT_FLOAT)
+ {
+ return MutateF(Indirect(), d, byte_width_, WidthF(d));
+ }
+ else
+ {
+ return false;
+ }
+ }
+
+ bool MutateString(const char *str, size_t len)
+ {
+ auto s = AsString();
+ if (s.IsTheEmptyString())
+ return false;
+ // This is very strict, could allow shorter strings, but that creates
+ // garbage.
+ if (s.length() != len)
+ return false;
+ memcpy(const_cast<char *>(s.c_str()), str, len);
+ return true;
+ }
+ bool MutateString(const char *str) { return MutateString(str, strlen(str)); }
+ bool MutateString(const std::string &str) { return MutateString(str.data(), str.length()); }
+
+private:
+ const uint8_t *Indirect() const { return flexbuffers::Indirect(data_, parent_width_); }
+
+ template <typename T>
+ bool Mutate(const uint8_t *dest, T t, size_t byte_width, BitWidth value_width)
+ {
+ auto fits = static_cast<size_t>(static_cast<size_t>(1U) << value_width) <= byte_width;
+ if (fits)
+ {
+ t = flatbuffers::EndianScalar(t);
+ memcpy(const_cast<uint8_t *>(dest), &t, byte_width);
+ }
+ return fits;
+ }
+
+ template <typename T>
+ bool MutateF(const uint8_t *dest, T t, size_t byte_width, BitWidth value_width)
+ {
+ if (byte_width == sizeof(double))
+ return Mutate(dest, static_cast<double>(t), byte_width, value_width);
+ if (byte_width == sizeof(float))
+ return Mutate(dest, static_cast<float>(t), byte_width, value_width);
+ FLATBUFFERS_ASSERT(false);
+ return false;
+ }
+
+ const uint8_t *data_;
+ uint8_t parent_width_;
+ uint8_t byte_width_;
+ Type type_;
+};
+
+// Template specialization for As().
+template <> inline bool Reference::As<bool>() const { return AsBool(); }
+
+template <> inline int8_t Reference::As<int8_t>() const { return AsInt8(); }
+template <> inline int16_t Reference::As<int16_t>() const { return AsInt16(); }
+template <> inline int32_t Reference::As<int32_t>() const { return AsInt32(); }
+template <> inline int64_t Reference::As<int64_t>() const { return AsInt64(); }
+
+template <> inline uint8_t Reference::As<uint8_t>() const { return AsUInt8(); }
+template <> inline uint16_t Reference::As<uint16_t>() const { return AsUInt16(); }
+template <> inline uint32_t Reference::As<uint32_t>() const { return AsUInt32(); }
+template <> inline uint64_t Reference::As<uint64_t>() const { return AsUInt64(); }
+
+template <> inline double Reference::As<double>() const { return AsDouble(); }
+template <> inline float Reference::As<float>() const { return AsFloat(); }
+
+template <> inline String Reference::As<String>() const { return AsString(); }
+template <> inline std::string Reference::As<std::string>() const { return AsString().str(); }
+
+template <> inline Blob Reference::As<Blob>() const { return AsBlob(); }
+template <> inline Vector Reference::As<Vector>() const { return AsVector(); }
+template <> inline TypedVector Reference::As<TypedVector>() const { return AsTypedVector(); }
+template <> inline FixedTypedVector Reference::As<FixedTypedVector>() const
+{
+ return AsFixedTypedVector();
+}
+template <> inline Map Reference::As<Map>() const { return AsMap(); }
+
+inline uint8_t PackedType(BitWidth bit_width, Type type)
+{
+ return static_cast<uint8_t>(bit_width | (type << 2));
+}
+
+inline uint8_t NullPackedType() { return PackedType(BIT_WIDTH_8, FBT_NULL); }
+
+// Vector accessors.
+// Note: if you try to access outside of bounds, you get a Null value back
+// instead. Normally this would be an assert, but since this is "dynamically
+// typed" data, you may not want that (someone sends you a 2d vector and you
+// wanted 3d).
+// The Null converts seamlessly into a default value for any other type.
+// TODO(wvo): Could introduce an #ifdef that makes this into an assert?
+inline Reference Vector::operator[](size_t i) const
+{
+ auto len = size();
+ if (i >= len)
+ return Reference(nullptr, 1, NullPackedType());
+ auto packed_type = (data_ + len * byte_width_)[i];
+ auto elem = data_ + i * byte_width_;
+ return Reference(elem, byte_width_, packed_type);
+}
+
+inline Reference TypedVector::operator[](size_t i) const
+{
+ auto len = size();
+ if (i >= len)
+ return Reference(nullptr, 1, NullPackedType());
+ auto elem = data_ + i * byte_width_;
+ return Reference(elem, byte_width_, 1, type_);
+}
+
+inline Reference FixedTypedVector::operator[](size_t i) const
+{
+ if (i >= len_)
+ return Reference(nullptr, 1, NullPackedType());
+ auto elem = data_ + i * byte_width_;
+ return Reference(elem, byte_width_, 1, type_);
+}
+
+template <typename T> int KeyCompare(const void *key, const void *elem)
+{
+ auto str_elem =
+ reinterpret_cast<const char *>(Indirect<T>(reinterpret_cast<const uint8_t *>(elem)));
+ auto skey = reinterpret_cast<const char *>(key);
+ return strcmp(skey, str_elem);
+}
+
+inline Reference Map::operator[](const char *key) const
+{
+ auto keys = Keys();
+ // We can't pass keys.byte_width_ to the comparison function, so we have
+ // to pick the right one ahead of time.
+ int (*comp)(const void *, const void *) = nullptr;
+ switch (keys.byte_width_)
+ {
+ case 1:
+ comp = KeyCompare<uint8_t>;
+ break;
+ case 2:
+ comp = KeyCompare<uint16_t>;
+ break;
+ case 4:
+ comp = KeyCompare<uint32_t>;
+ break;
+ case 8:
+ comp = KeyCompare<uint64_t>;
+ break;
+ }
+ auto res = std::bsearch(key, keys.data_, keys.size(), keys.byte_width_, comp);
+ if (!res)
+ return Reference(nullptr, 1, NullPackedType());
+ auto i = (reinterpret_cast<uint8_t *>(res) - keys.data_) / keys.byte_width_;
+ return (*static_cast<const Vector *>(this))[i];
+}
+
+inline Reference Map::operator[](const std::string &key) const { return (*this)[key.c_str()]; }
+
+inline Reference GetRoot(const uint8_t *buffer, size_t size)
+{
+ // See Finish() below for the serialization counterpart of this.
+ // The root starts at the end of the buffer, so we parse backwards from there.
+ auto end = buffer + size;
+ auto byte_width = *--end;
+ auto packed_type = *--end;
+ end -= byte_width; // The root data item.
+ return Reference(end, byte_width, packed_type);
+}
+
+inline Reference GetRoot(const std::vector<uint8_t> &buffer)
+{
+ return GetRoot(flatbuffers::vector_data(buffer), buffer.size());
+}
+
+// Flags that configure how the Builder behaves.
+// The "Share" flags determine if the Builder automatically tries to pool
+// this type. Pooling can reduce the size of serialized data if there are
+// multiple maps of the same kind, at the expense of slightly slower
+// serialization (the cost of lookups) and more memory use (std::set).
+// By default this is on for keys, but off for strings.
+// Turn keys off if you have e.g. only one map.
+// Turn strings on if you expect many non-unique string values.
+// Additionally, sharing key vectors can save space if you have maps with
+// identical field populations.
+enum BuilderFlag
+{
+ BUILDER_FLAG_NONE = 0,
+ BUILDER_FLAG_SHARE_KEYS = 1,
+ BUILDER_FLAG_SHARE_STRINGS = 2,
+ BUILDER_FLAG_SHARE_KEYS_AND_STRINGS = 3,
+ BUILDER_FLAG_SHARE_KEY_VECTORS = 4,
+ BUILDER_FLAG_SHARE_ALL = 7,
+};
+
+class Builder FLATBUFFERS_FINAL_CLASS
+{
+public:
+ Builder(size_t initial_size = 256, BuilderFlag flags = BUILDER_FLAG_SHARE_KEYS)
+ : buf_(initial_size), finished_(false), has_duplicate_keys_(false), flags_(flags),
+ force_min_bit_width_(BIT_WIDTH_8), key_pool(KeyOffsetCompare(buf_)),
+ string_pool(StringOffsetCompare(buf_))
+ {
+ buf_.clear();
+ }
+
+#ifdef FLATBUFFERS_DEFAULT_DECLARATION
+ Builder(Builder &&) = default;
+ Builder &operator=(Builder &&) = default;
+#endif
+
+ /// @brief Get the serialized buffer (after you call `Finish()`).
+ /// @return Returns a vector owned by this class.
+ const std::vector<uint8_t> &GetBuffer() const
+ {
+ Finished();
+ return buf_;
+ }
+
+ // Size of the buffer. Does not include unfinished values.
+ size_t GetSize() const { return buf_.size(); }
+
+ // Reset all state so we can re-use the buffer.
+ void Clear()
+ {
+ buf_.clear();
+ stack_.clear();
+ finished_ = false;
+ // flags_ remains as-is;
+ force_min_bit_width_ = BIT_WIDTH_8;
+ key_pool.clear();
+ string_pool.clear();
+ }
+
+ // All value constructing functions below have two versions: one that
+ // takes a key (for placement inside a map) and one that doesn't (for inside
+ // vectors and elsewhere).
+
+ void Null() { stack_.push_back(Value()); }
+ void Null(const char *key)
+ {
+ Key(key);
+ Null();
+ }
+
+ void Int(int64_t i) { stack_.push_back(Value(i, FBT_INT, WidthI(i))); }
+ void Int(const char *key, int64_t i)
+ {
+ Key(key);
+ Int(i);
+ }
+
+ void UInt(uint64_t u) { stack_.push_back(Value(u, FBT_UINT, WidthU(u))); }
+ void UInt(const char *key, uint64_t u)
+ {
+ Key(key);
+ UInt(u);
+ }
+
+ void Float(float f) { stack_.push_back(Value(f)); }
+ void Float(const char *key, float f)
+ {
+ Key(key);
+ Float(f);
+ }
+
+ void Double(double f) { stack_.push_back(Value(f)); }
+ void Double(const char *key, double d)
+ {
+ Key(key);
+ Double(d);
+ }
+
+ void Bool(bool b) { stack_.push_back(Value(b)); }
+ void Bool(const char *key, bool b)
+ {
+ Key(key);
+ Bool(b);
+ }
+
+ void IndirectInt(int64_t i) { PushIndirect(i, FBT_INDIRECT_INT, WidthI(i)); }
+ void IndirectInt(const char *key, int64_t i)
+ {
+ Key(key);
+ IndirectInt(i);
+ }
+
+ void IndirectUInt(uint64_t u) { PushIndirect(u, FBT_INDIRECT_UINT, WidthU(u)); }
+ void IndirectUInt(const char *key, uint64_t u)
+ {
+ Key(key);
+ IndirectUInt(u);
+ }
+
+ void IndirectFloat(float f) { PushIndirect(f, FBT_INDIRECT_FLOAT, BIT_WIDTH_32); }
+ void IndirectFloat(const char *key, float f)
+ {
+ Key(key);
+ IndirectFloat(f);
+ }
+
+ void IndirectDouble(double f) { PushIndirect(f, FBT_INDIRECT_FLOAT, WidthF(f)); }
+ void IndirectDouble(const char *key, double d)
+ {
+ Key(key);
+ IndirectDouble(d);
+ }
+
+ size_t Key(const char *str, size_t len)
+ {
+ auto sloc = buf_.size();
+ WriteBytes(str, len + 1);
+ if (flags_ & BUILDER_FLAG_SHARE_KEYS)
+ {
+ auto it = key_pool.find(sloc);
+ if (it != key_pool.end())
+ {
+ // Already in the buffer. Remove key we just serialized, and use
+ // existing offset instead.
+ buf_.resize(sloc);
+ sloc = *it;
+ }
+ else
+ {
+ key_pool.insert(sloc);
+ }
+ }
+ stack_.push_back(Value(static_cast<uint64_t>(sloc), FBT_KEY, BIT_WIDTH_8));
+ return sloc;
+ }
+
+ size_t Key(const char *str) { return Key(str, strlen(str)); }
+ size_t Key(const std::string &str) { return Key(str.c_str(), str.size()); }
+
+ size_t String(const char *str, size_t len)
+ {
+ auto reset_to = buf_.size();
+ auto sloc = CreateBlob(str, len, 1, FBT_STRING);
+ if (flags_ & BUILDER_FLAG_SHARE_STRINGS)
+ {
+ StringOffset so(sloc, len);
+ auto it = string_pool.find(so);
+ if (it != string_pool.end())
+ {
+ // Already in the buffer. Remove string we just serialized, and use
+ // existing offset instead.
+ buf_.resize(reset_to);
+ sloc = it->first;
+ stack_.back().u_ = sloc;
+ }
+ else
+ {
+ string_pool.insert(so);
+ }
+ }
+ return sloc;
+ }
+ size_t String(const char *str) { return String(str, strlen(str)); }
+ size_t String(const std::string &str) { return String(str.c_str(), str.size()); }
+ void String(const flexbuffers::String &str) { String(str.c_str(), str.length()); }
+
+ void String(const char *key, const char *str)
+ {
+ Key(key);
+ String(str);
+ }
+ void String(const char *key, const std::string &str)
+ {
+ Key(key);
+ String(str);
+ }
+ void String(const char *key, const flexbuffers::String &str)
+ {
+ Key(key);
+ String(str);
+ }
+
+ size_t Blob(const void *data, size_t len) { return CreateBlob(data, len, 0, FBT_BLOB); }
+ size_t Blob(const std::vector<uint8_t> &v)
+ {
+ return CreateBlob(flatbuffers::vector_data(v), v.size(), 0, FBT_BLOB);
+ }
+
+ // TODO(wvo): support all the FlexBuffer types (like flexbuffers::String),
+ // e.g. Vector etc. Also in overloaded versions.
+ // Also some FlatBuffers types?
+
+ size_t StartVector() { return stack_.size(); }
+ size_t StartVector(const char *key)
+ {
+ Key(key);
+ return stack_.size();
+ }
+ size_t StartMap() { return stack_.size(); }
+ size_t StartMap(const char *key)
+ {
+ Key(key);
+ return stack_.size();
+ }
+
+ // TODO(wvo): allow this to specify an aligment greater than the natural
+ // alignment.
+ size_t EndVector(size_t start, bool typed, bool fixed)
+ {
+ auto vec = CreateVector(start, stack_.size() - start, 1, typed, fixed);
+ // Remove temp elements and return vector.
+ stack_.resize(start);
+ stack_.push_back(vec);
+ return static_cast<size_t>(vec.u_);
+ }
+
+ size_t EndMap(size_t start)
+ {
+ // We should have interleaved keys and values on the stack.
+ // Make sure it is an even number:
+ auto len = stack_.size() - start;
+ FLATBUFFERS_ASSERT(!(len & 1));
+ len /= 2;
+ // Make sure keys are all strings:
+ for (auto key = start; key < stack_.size(); key += 2)
+ {
+ FLATBUFFERS_ASSERT(stack_[key].type_ == FBT_KEY);
+ }
+ // Now sort values, so later we can do a binary search lookup.
+ // We want to sort 2 array elements at a time.
+ struct TwoValue
+ {
+ Value key;
+ Value val;
+ };
+ // TODO(wvo): strict aliasing?
+ // TODO(wvo): allow the caller to indicate the data is already sorted
+ // for maximum efficiency? With an assert to check sortedness to make sure
+ // we're not breaking binary search.
+ // Or, we can track if the map is sorted as keys are added which would be
+ // be quite cheap (cheaper than checking it here), so we can skip this
+ // step automatically when appliccable, and encourage people to write in
+ // sorted fashion.
+ // std::sort is typically already a lot faster on sorted data though.
+ auto dict = reinterpret_cast<TwoValue *>(flatbuffers::vector_data(stack_) + start);
+ std::sort(dict, dict + len, [&](const TwoValue &a, const TwoValue &b) -> bool {
+ auto as = reinterpret_cast<const char *>(flatbuffers::vector_data(buf_) + a.key.u_);
+ auto bs = reinterpret_cast<const char *>(flatbuffers::vector_data(buf_) + b.key.u_);
+ auto comp = strcmp(as, bs);
+ // We want to disallow duplicate keys, since this results in a
+ // map where values cannot be found.
+ // But we can't assert here (since we don't want to fail on
+ // random JSON input) or have an error mechanism.
+ // Instead, we set has_duplicate_keys_ in the builder to
+ // signal this.
+ // TODO: Have to check for pointer equality, as some sort
+ // implementation apparently call this function with the same
+ // element?? Why?
+ if (!comp && &a != &b)
+ has_duplicate_keys_ = true;
+ return comp < 0;
+ });
+ // First create a vector out of all keys.
+ // TODO(wvo): if kBuilderFlagShareKeyVectors is true, see if we can share
+ // the first vector.
+ auto keys = CreateVector(start, len, 2, true, false);
+ auto vec = CreateVector(start + 1, len, 2, false, false, &keys);
+ // Remove temp elements and return map.
+ stack_.resize(start);
+ stack_.push_back(vec);
+ return static_cast<size_t>(vec.u_);
+ }
+
+ // Call this after EndMap to see if the map had any duplicate keys.
+ // Any map with such keys won't be able to retrieve all values.
+ bool HasDuplicateKeys() const { return has_duplicate_keys_; }
+
+ template <typename F> size_t Vector(F f)
+ {
+ auto start = StartVector();
+ f();
+ return EndVector(start, false, false);
+ }
+ template <typename F, typename T> size_t Vector(F f, T &state)
+ {
+ auto start = StartVector();
+ f(state);
+ return EndVector(start, false, false);
+ }
+ template <typename F> size_t Vector(const char *key, F f)
+ {
+ auto start = StartVector(key);
+ f();
+ return EndVector(start, false, false);
+ }
+ template <typename F, typename T> size_t Vector(const char *key, F f, T &state)
+ {
+ auto start = StartVector(key);
+ f(state);
+ return EndVector(start, false, false);
+ }
+
+ template <typename T> void Vector(const T *elems, size_t len)
+ {
+ if (flatbuffers::is_scalar<T>::value)
+ {
+ // This path should be a lot quicker and use less space.
+ ScalarVector(elems, len, false);
+ }
+ else
+ {
+ auto start = StartVector();
+ for (size_t i = 0; i < len; i++)
+ Add(elems[i]);
+ EndVector(start, false, false);
+ }
+ }
+ template <typename T> void Vector(const char *key, const T *elems, size_t len)
+ {
+ Key(key);
+ Vector(elems, len);
+ }
+ template <typename T> void Vector(const std::vector<T> &vec)
+ {
+ Vector(flatbuffers::vector_data(vec), vec.size());
+ }
+
+ template <typename F> size_t TypedVector(F f)
+ {
+ auto start = StartVector();
+ f();
+ return EndVector(start, true, false);
+ }
+ template <typename F, typename T> size_t TypedVector(F f, T &state)
+ {
+ auto start = StartVector();
+ f(state);
+ return EndVector(start, true, false);
+ }
+ template <typename F> size_t TypedVector(const char *key, F f)
+ {
+ auto start = StartVector(key);
+ f();
+ return EndVector(start, true, false);
+ }
+ template <typename F, typename T> size_t TypedVector(const char *key, F f, T &state)
+ {
+ auto start = StartVector(key);
+ f(state);
+ return EndVector(start, true, false);
+ }
+
+ template <typename T> size_t FixedTypedVector(const T *elems, size_t len)
+ {
+ // We only support a few fixed vector lengths. Anything bigger use a
+ // regular typed vector.
+ FLATBUFFERS_ASSERT(len >= 2 && len <= 4);
+ // And only scalar values.
+ static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types");
+ return ScalarVector(elems, len, true);
+ }
+
+ template <typename T> size_t FixedTypedVector(const char *key, const T *elems, size_t len)
+ {
+ Key(key);
+ return FixedTypedVector(elems, len);
+ }
+
+ template <typename F> size_t Map(F f)
+ {
+ auto start = StartMap();
+ f();
+ return EndMap(start);
+ }
+ template <typename F, typename T> size_t Map(F f, T &state)
+ {
+ auto start = StartMap();
+ f(state);
+ return EndMap(start);
+ }
+ template <typename F> size_t Map(const char *key, F f)
+ {
+ auto start = StartMap(key);
+ f();
+ return EndMap(start);
+ }
+ template <typename F, typename T> size_t Map(const char *key, F f, T &state)
+ {
+ auto start = StartMap(key);
+ f(state);
+ return EndMap(start);
+ }
+ template <typename T> void Map(const std::map<std::string, T> &map)
+ {
+ auto start = StartMap();
+ for (auto it = map.begin(); it != map.end(); ++it)
+ Add(it->first.c_str(), it->second);
+ EndMap(start);
+ }
+
+ // If you wish to share a value explicitly (a value not shared automatically
+ // through one of the BUILDER_FLAG_SHARE_* flags) you can do so with these
+ // functions. Or if you wish to turn those flags off for performance reasons
+ // and still do some explicit sharing. For example:
+ // builder.IndirectDouble(M_PI);
+ // auto id = builder.LastValue(); // Remember where we stored it.
+ // .. more code goes here ..
+ // builder.ReuseValue(id); // Refers to same double by offset.
+ // LastValue works regardless of whether the value has a key or not.
+ // Works on any data type.
+ struct Value;
+ Value LastValue() { return stack_.back(); }
+ void ReuseValue(Value v) { stack_.push_back(v); }
+ void ReuseValue(const char *key, Value v)
+ {
+ Key(key);
+ ReuseValue(v);
+ }
+
+ // Overloaded Add that tries to call the correct function above.
+ void Add(int8_t i) { Int(i); }
+ void Add(int16_t i) { Int(i); }
+ void Add(int32_t i) { Int(i); }
+ void Add(int64_t i) { Int(i); }
+ void Add(uint8_t u) { UInt(u); }
+ void Add(uint16_t u) { UInt(u); }
+ void Add(uint32_t u) { UInt(u); }
+ void Add(uint64_t u) { UInt(u); }
+ void Add(float f) { Float(f); }
+ void Add(double d) { Double(d); }
+ void Add(bool b) { Bool(b); }
+ void Add(const char *str) { String(str); }
+ void Add(const std::string &str) { String(str); }
+ void Add(const flexbuffers::String &str) { String(str); }
+
+ template <typename T> void Add(const std::vector<T> &vec) { Vector(vec); }
+
+ template <typename T> void Add(const char *key, const T &t)
+ {
+ Key(key);
+ Add(t);
+ }
+
+ template <typename T> void Add(const std::map<std::string, T> &map) { Map(map); }
+
+ template <typename T> void operator+=(const T &t) { Add(t); }
+
+ // This function is useful in combination with the Mutate* functions above.
+ // It forces elements of vectors and maps to have a minimum size, such that
+ // they can later be updated without failing.
+ // Call with no arguments to reset.
+ void ForceMinimumBitWidth(BitWidth bw = BIT_WIDTH_8) { force_min_bit_width_ = bw; }
+
+ void Finish()
+ {
+ // If you hit this assert, you likely have objects that were never included
+ // in a parent. You need to have exactly one root to finish a buffer.
+ // Check your Start/End calls are matched, and all objects are inside
+ // some other object.
+ FLATBUFFERS_ASSERT(stack_.size() == 1);
+
+ // Write root value.
+ auto byte_width = Align(stack_[0].ElemWidth(buf_.size(), 0));
+ WriteAny(stack_[0], byte_width);
+ // Write root type.
+ Write(stack_[0].StoredPackedType(), 1);
+ // Write root size. Normally determined by parent, but root has no parent :)
+ Write(byte_width, 1);
+
+ finished_ = true;
+ }
+
+private:
+ void Finished() const
+ {
+ // If you get this assert, you're attempting to get access a buffer
+ // which hasn't been finished yet. Be sure to call
+ // Builder::Finish with your root object.
+ FLATBUFFERS_ASSERT(finished_);
+ }
+
+ // Align to prepare for writing a scalar with a certain size.
+ uint8_t Align(BitWidth alignment)
+ {
+ auto byte_width = 1U << alignment;
+ buf_.insert(buf_.end(), flatbuffers::PaddingBytes(buf_.size(), byte_width), 0);
+ return static_cast<uint8_t>(byte_width);
+ }
+
+ void WriteBytes(const void *val, size_t size)
+ {
+ buf_.insert(buf_.end(), reinterpret_cast<const uint8_t *>(val),
+ reinterpret_cast<const uint8_t *>(val) + size);
+ }
+
+ template <typename T> void Write(T val, size_t byte_width)
+ {
+ FLATBUFFERS_ASSERT(sizeof(T) >= byte_width);
+ val = flatbuffers::EndianScalar(val);
+ WriteBytes(&val, byte_width);
+ }
+
+ void WriteDouble(double f, uint8_t byte_width)
+ {
+ switch (byte_width)
+ {
+ case 8:
+ Write(f, byte_width);
+ break;
+ case 4:
+ Write(static_cast<float>(f), byte_width);
+ break;
+ // case 2: Write(static_cast<half>(f), byte_width); break;
+ // case 1: Write(static_cast<quarter>(f), byte_width); break;
+ default:
+ FLATBUFFERS_ASSERT(0);
+ }
+ }
+
+ void WriteOffset(uint64_t o, uint8_t byte_width)
+ {
+ auto reloff = buf_.size() - o;
+ FLATBUFFERS_ASSERT(byte_width == 8 || reloff < 1ULL << (byte_width * 8));
+ Write(reloff, byte_width);
+ }
+
+ template <typename T> void PushIndirect(T val, Type type, BitWidth bit_width)
+ {
+ auto byte_width = Align(bit_width);
+ auto iloc = buf_.size();
+ Write(val, byte_width);
+ stack_.push_back(Value(static_cast<uint64_t>(iloc), type, bit_width));
+ }
+
+ static BitWidth WidthB(size_t byte_width)
+ {
+ switch (byte_width)
+ {
+ case 1:
+ return BIT_WIDTH_8;
+ case 2:
+ return BIT_WIDTH_16;
+ case 4:
+ return BIT_WIDTH_32;
+ case 8:
+ return BIT_WIDTH_64;
+ default:
+ FLATBUFFERS_ASSERT(false);
+ return BIT_WIDTH_64;
+ }
+ }
+
+ template <typename T> static Type GetScalarType()
+ {
+ static_assert(flatbuffers::is_scalar<T>::value, "Unrelated types");
+ return flatbuffers::is_floating_point<T>::value
+ ? FBT_FLOAT
+ : flatbuffers::is_same<T, bool>::value
+ ? FBT_BOOL
+ : (flatbuffers::is_unsigned<T>::value ? FBT_UINT : FBT_INT);
+ }
+
+public:
+ // This was really intended to be private, except for LastValue/ReuseValue.
+ struct Value
+ {
+ union {
+ int64_t i_;
+ uint64_t u_;
+ double f_;
+ };
+
+ Type type_;
+
+ // For scalars: of itself, for vector: of its elements, for string: length.
+ BitWidth min_bit_width_;
+
+ Value() : i_(0), type_(FBT_NULL), min_bit_width_(BIT_WIDTH_8) {}
+
+ Value(bool b) : u_(static_cast<uint64_t>(b)), type_(FBT_BOOL), min_bit_width_(BIT_WIDTH_8) {}
+
+ Value(int64_t i, Type t, BitWidth bw) : i_(i), type_(t), min_bit_width_(bw) {}
+ Value(uint64_t u, Type t, BitWidth bw) : u_(u), type_(t), min_bit_width_(bw) {}
+
+ Value(float f) : f_(static_cast<double>(f)), type_(FBT_FLOAT), min_bit_width_(BIT_WIDTH_32) {}
+ Value(double f) : f_(f), type_(FBT_FLOAT), min_bit_width_(WidthF(f)) {}
+
+ uint8_t StoredPackedType(BitWidth parent_bit_width_ = BIT_WIDTH_8) const
+ {
+ return PackedType(StoredWidth(parent_bit_width_), type_);
+ }
+
+ BitWidth ElemWidth(size_t buf_size, size_t elem_index) const
+ {
+ if (IsInline(type_))
+ {
+ return min_bit_width_;
+ }
+ else
+ {
+ // We have an absolute offset, but want to store a relative offset
+ // elem_index elements beyond the current buffer end. Since whether
+ // the relative offset fits in a certain byte_width depends on
+ // the size of the elements before it (and their alignment), we have
+ // to test for each size in turn.
+ for (size_t byte_width = 1; byte_width <= sizeof(flatbuffers::largest_scalar_t);
+ byte_width *= 2)
+ {
+ // Where are we going to write this offset?
+ auto offset_loc =
+ buf_size + flatbuffers::PaddingBytes(buf_size, byte_width) + elem_index * byte_width;
+ // Compute relative offset.
+ auto offset = offset_loc - u_;
+ // Does it fit?
+ auto bit_width = WidthU(offset);
+ if (static_cast<size_t>(static_cast<size_t>(1U) << bit_width) == byte_width)
+ return bit_width;
+ }
+ FLATBUFFERS_ASSERT(false); // Must match one of the sizes above.
+ return BIT_WIDTH_64;
+ }
+ }
+
+ BitWidth StoredWidth(BitWidth parent_bit_width_ = BIT_WIDTH_8) const
+ {
+ if (IsInline(type_))
+ {
+ return (std::max)(min_bit_width_, parent_bit_width_);
+ }
+ else
+ {
+ return min_bit_width_;
+ }
+ }
+ };
+
+private:
+ void WriteAny(const Value &val, uint8_t byte_width)
+ {
+ switch (val.type_)
+ {
+ case FBT_NULL:
+ case FBT_INT:
+ Write(val.i_, byte_width);
+ break;
+ case FBT_BOOL:
+ case FBT_UINT:
+ Write(val.u_, byte_width);
+ break;
+ case FBT_FLOAT:
+ WriteDouble(val.f_, byte_width);
+ break;
+ default:
+ WriteOffset(val.u_, byte_width);
+ break;
+ }
+ }
+
+ size_t CreateBlob(const void *data, size_t len, size_t trailing, Type type)
+ {
+ auto bit_width = WidthU(len);
+ auto byte_width = Align(bit_width);
+ Write<uint64_t>(len, byte_width);
+ auto sloc = buf_.size();
+ WriteBytes(data, len + trailing);
+ stack_.push_back(Value(static_cast<uint64_t>(sloc), type, bit_width));
+ return sloc;
+ }
+
+ template <typename T> size_t ScalarVector(const T *elems, size_t len, bool fixed)
+ {
+ auto vector_type = GetScalarType<T>();
+ auto byte_width = sizeof(T);
+ auto bit_width = WidthB(byte_width);
+ // If you get this assert, you're trying to write a vector with a size
+ // field that is bigger than the scalars you're trying to write (e.g. a
+ // byte vector > 255 elements). For such types, write a "blob" instead.
+ // TODO: instead of asserting, could write vector with larger elements
+ // instead, though that would be wasteful.
+ FLATBUFFERS_ASSERT(WidthU(len) <= bit_width);
+ Align(bit_width);
+ if (!fixed)
+ Write<uint64_t>(len, byte_width);
+ auto vloc = buf_.size();
+ for (size_t i = 0; i < len; i++)
+ Write(elems[i], byte_width);
+ stack_.push_back(
+ Value(static_cast<uint64_t>(vloc), ToTypedVector(vector_type, fixed ? len : 0), bit_width));
+ return vloc;
+ }
+
+ Value CreateVector(size_t start, size_t vec_len, size_t step, bool typed, bool fixed,
+ const Value *keys = nullptr)
+ {
+ FLATBUFFERS_ASSERT(!fixed || typed); // typed=false, fixed=true combination is not supported.
+ // Figure out smallest bit width we can store this vector with.
+ auto bit_width = (std::max)(force_min_bit_width_, WidthU(vec_len));
+ auto prefix_elems = 1;
+ if (keys)
+ {
+ // If this vector is part of a map, we will pre-fix an offset to the keys
+ // to this vector.
+ bit_width = (std::max)(bit_width, keys->ElemWidth(buf_.size(), 0));
+ prefix_elems += 2;
+ }
+ Type vector_type = FBT_KEY;
+ // Check bit widths and types for all elements.
+ for (size_t i = start; i < stack_.size(); i += step)
+ {
+ auto elem_width = stack_[i].ElemWidth(buf_.size(), i - start + prefix_elems);
+ bit_width = (std::max)(bit_width, elem_width);
+ if (typed)
+ {
+ if (i == start)
+ {
+ vector_type = stack_[i].type_;
+ }
+ else
+ {
+ // If you get this assert, you are writing a typed vector with
+ // elements that are not all the same type.
+ FLATBUFFERS_ASSERT(vector_type == stack_[i].type_);
+ }
+ }
+ }
+ // If you get this assert, your fixed types are not one of:
+ // Int / UInt / Float / Key.
+ FLATBUFFERS_ASSERT(!fixed || IsTypedVectorElementType(vector_type));
+ auto byte_width = Align(bit_width);
+ // Write vector. First the keys width/offset if available, and size.
+ if (keys)
+ {
+ WriteOffset(keys->u_, byte_width);
+ Write<uint64_t>(1ULL << keys->min_bit_width_, byte_width);
+ }
+ if (!fixed)
+ Write<uint64_t>(vec_len, byte_width);
+ // Then the actual data.
+ auto vloc = buf_.size();
+ for (size_t i = start; i < stack_.size(); i += step)
+ {
+ WriteAny(stack_[i], byte_width);
+ }
+ // Then the types.
+ if (!typed)
+ {
+ for (size_t i = start; i < stack_.size(); i += step)
+ {
+ buf_.push_back(stack_[i].StoredPackedType(bit_width));
+ }
+ }
+ return Value(static_cast<uint64_t>(vloc),
+ keys ? FBT_MAP
+ : (typed ? ToTypedVector(vector_type, fixed ? vec_len : 0) : FBT_VECTOR),
+ bit_width);
+ }
+
+ // You shouldn't really be copying instances of this class.
+ Builder(const Builder &);
+ Builder &operator=(const Builder &);
+
+ std::vector<uint8_t> buf_;
+ std::vector<Value> stack_;
+
+ bool finished_;
+ bool has_duplicate_keys_;
+
+ BuilderFlag flags_;
+
+ BitWidth force_min_bit_width_;
+
+ struct KeyOffsetCompare
+ {
+ explicit KeyOffsetCompare(const std::vector<uint8_t> &buf) : buf_(&buf) {}
+ bool operator()(size_t a, size_t b) const
+ {
+ auto stra = reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + a);
+ auto strb = reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + b);
+ return strcmp(stra, strb) < 0;
+ }
+ const std::vector<uint8_t> *buf_;
+ };
+
+ typedef std::pair<size_t, size_t> StringOffset;
+ struct StringOffsetCompare
+ {
+ explicit StringOffsetCompare(const std::vector<uint8_t> &buf) : buf_(&buf) {}
+ bool operator()(const StringOffset &a, const StringOffset &b) const
+ {
+ auto stra = reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + a.first);
+ auto strb = reinterpret_cast<const char *>(flatbuffers::vector_data(*buf_) + b.first);
+ return strncmp(stra, strb, (std::min)(a.second, b.second) + 1) < 0;
+ }
+ const std::vector<uint8_t> *buf_;
+ };
+
+ typedef std::set<size_t, KeyOffsetCompare> KeyOffsetMap;
+ typedef std::set<StringOffset, StringOffsetCompare> StringOffsetMap;
+
+ KeyOffsetMap key_pool;
+ StringOffsetMap string_pool;
+};
+
+} // namespace flexbuffers
+
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
+
+#endif // FLATBUFFERS_FLEXBUFFERS_H_
diff --git a/onert-micro/externals/flatbuffers/grpc.h b/onert-micro/externals/flatbuffers/grpc.h
new file mode 100644
index 000000000..184c89e08
--- /dev/null
+++ b/onert-micro/externals/flatbuffers/grpc.h
@@ -0,0 +1,361 @@
+/*
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2014 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLATBUFFERS_GRPC_H_
+#define FLATBUFFERS_GRPC_H_
+
+// Helper functionality to glue FlatBuffers and GRPC.
+
+#include "flatbuffers/flatbuffers.h"
+#include "grpc/byte_buffer_reader.h"
+#include "grpcpp/support/byte_buffer.h"
+
+namespace flatbuffers
+{
+namespace grpc
+{
+
+// Message is a typed wrapper around a buffer that manages the underlying
+// `grpc_slice` and also provides flatbuffers-specific helpers such as `Verify`
+// and `GetRoot`. Since it is backed by a `grpc_slice`, the underlying buffer
+// is refcounted and ownership is be managed automatically.
+template <class T> class Message
+{
+public:
+ Message() : slice_(grpc_empty_slice()) {}
+
+ Message(grpc_slice slice, bool add_ref) : slice_(add_ref ? grpc_slice_ref(slice) : slice) {}
+
+ Message &operator=(const Message &other) = delete;
+
+ Message(Message &&other) : slice_(other.slice_) { other.slice_ = grpc_empty_slice(); }
+
+ Message(const Message &other) = delete;
+
+ Message &operator=(Message &&other)
+ {
+ grpc_slice_unref(slice_);
+ slice_ = other.slice_;
+ other.slice_ = grpc_empty_slice();
+ return *this;
+ }
+
+ ~Message() { grpc_slice_unref(slice_); }
+
+ const uint8_t *mutable_data() const { return GRPC_SLICE_START_PTR(slice_); }
+
+ const uint8_t *data() const { return GRPC_SLICE_START_PTR(slice_); }
+
+ size_t size() const { return GRPC_SLICE_LENGTH(slice_); }
+
+ bool Verify() const
+ {
+ Verifier verifier(data(), size());
+ return verifier.VerifyBuffer<T>(nullptr);
+ }
+
+ T *GetMutableRoot() { return flatbuffers::GetMutableRoot<T>(mutable_data()); }
+
+ const T *GetRoot() const { return flatbuffers::GetRoot<T>(data()); }
+
+ // This is only intended for serializer use, or if you know what you're doing
+ const grpc_slice &BorrowSlice() const { return slice_; }
+
+private:
+ grpc_slice slice_;
+};
+
+class MessageBuilder;
+
+// SliceAllocator is a gRPC-specific allocator that uses the `grpc_slice`
+// refcounted slices to manage memory ownership. This makes it easy and
+// efficient to transfer buffers to gRPC.
+class SliceAllocator : public Allocator
+{
+public:
+ SliceAllocator() : slice_(grpc_empty_slice()) {}
+
+ SliceAllocator(const SliceAllocator &other) = delete;
+ SliceAllocator &operator=(const SliceAllocator &other) = delete;
+
+ SliceAllocator(SliceAllocator &&other) : slice_(grpc_empty_slice())
+ {
+ // default-construct and swap idiom
+ swap(other);
+ }
+
+ SliceAllocator &operator=(SliceAllocator &&other)
+ {
+ // move-construct and swap idiom
+ SliceAllocator temp(std::move(other));
+ swap(temp);
+ return *this;
+ }
+
+ void swap(SliceAllocator &other)
+ {
+ using std::swap;
+ swap(slice_, other.slice_);
+ }
+
+ virtual ~SliceAllocator() { grpc_slice_unref(slice_); }
+
+ virtual uint8_t *allocate(size_t size) override
+ {
+ FLATBUFFERS_ASSERT(GRPC_SLICE_IS_EMPTY(slice_));
+ slice_ = grpc_slice_malloc(size);
+ return GRPC_SLICE_START_PTR(slice_);
+ }
+
+ virtual void deallocate(uint8_t *p, size_t size) override
+ {
+ FLATBUFFERS_ASSERT(p == GRPC_SLICE_START_PTR(slice_));
+ FLATBUFFERS_ASSERT(size == GRPC_SLICE_LENGTH(slice_));
+ grpc_slice_unref(slice_);
+ slice_ = grpc_empty_slice();
+ }
+
+ virtual uint8_t *reallocate_downward(uint8_t *old_p, size_t old_size, size_t new_size,
+ size_t in_use_back, size_t in_use_front) override
+ {
+ FLATBUFFERS_ASSERT(old_p == GRPC_SLICE_START_PTR(slice_));
+ FLATBUFFERS_ASSERT(old_size == GRPC_SLICE_LENGTH(slice_));
+ FLATBUFFERS_ASSERT(new_size > old_size);
+ grpc_slice old_slice = slice_;
+ grpc_slice new_slice = grpc_slice_malloc(new_size);
+ uint8_t *new_p = GRPC_SLICE_START_PTR(new_slice);
+ memcpy_downward(old_p, old_size, new_p, new_size, in_use_back, in_use_front);
+ slice_ = new_slice;
+ grpc_slice_unref(old_slice);
+ return new_p;
+ }
+
+private:
+ grpc_slice &get_slice(uint8_t *p, size_t size)
+ {
+ FLATBUFFERS_ASSERT(p == GRPC_SLICE_START_PTR(slice_));
+ FLATBUFFERS_ASSERT(size == GRPC_SLICE_LENGTH(slice_));
+ return slice_;
+ }
+
+ grpc_slice slice_;
+
+ friend class MessageBuilder;
+};
+
+// SliceAllocatorMember is a hack to ensure that the MessageBuilder's
+// slice_allocator_ member is constructed before the FlatBufferBuilder, since
+// the allocator is used in the FlatBufferBuilder ctor.
+namespace detail
+{
+struct SliceAllocatorMember
+{
+ SliceAllocator slice_allocator_;
+};
+} // namespace detail
+
+// MessageBuilder is a gRPC-specific FlatBufferBuilder that uses SliceAllocator
+// to allocate gRPC buffers.
+class MessageBuilder : private detail::SliceAllocatorMember, public FlatBufferBuilder
+{
+public:
+ explicit MessageBuilder(uoffset_t initial_size = 1024)
+ : FlatBufferBuilder(initial_size, &slice_allocator_, false)
+ {
+ }
+
+ MessageBuilder(const MessageBuilder &other) = delete;
+ MessageBuilder &operator=(const MessageBuilder &other) = delete;
+
+ MessageBuilder(MessageBuilder &&other) : FlatBufferBuilder(1024, &slice_allocator_, false)
+ {
+ // Default construct and swap idiom.
+ Swap(other);
+ }
+
+ /// Create a MessageBuilder from a FlatBufferBuilder.
+ explicit MessageBuilder(FlatBufferBuilder &&src,
+ void (*dealloc)(void *, size_t) = &DefaultAllocator::dealloc)
+ : FlatBufferBuilder(1024, &slice_allocator_, false)
+ {
+ src.Swap(*this);
+ src.SwapBufAllocator(*this);
+ if (buf_.capacity())
+ {
+ uint8_t *buf = buf_.scratch_data(); // pointer to memory
+ size_t capacity = buf_.capacity(); // size of memory
+ slice_allocator_.slice_ = grpc_slice_new_with_len(buf, capacity, dealloc);
+ }
+ else
+ {
+ slice_allocator_.slice_ = grpc_empty_slice();
+ }
+ }
+
+ /// Move-assign a FlatBufferBuilder to a MessageBuilder.
+ /// Only FlatBufferBuilder with default allocator (basically, nullptr) is
+ /// supported.
+ MessageBuilder &operator=(FlatBufferBuilder &&src)
+ {
+ // Move construct a temporary and swap
+ MessageBuilder temp(std::move(src));
+ Swap(temp);
+ return *this;
+ }
+
+ MessageBuilder &operator=(MessageBuilder &&other)
+ {
+ // Move construct a temporary and swap
+ MessageBuilder temp(std::move(other));
+ Swap(temp);
+ return *this;
+ }
+
+ void Swap(MessageBuilder &other)
+ {
+ slice_allocator_.swap(other.slice_allocator_);
+ FlatBufferBuilder::Swap(other);
+ // After swapping the FlatBufferBuilder, we swap back the allocator, which
+ // restores the original allocator back in place. This is necessary because
+ // MessageBuilder's allocator is its own member (SliceAllocatorMember). The
+ // allocator passed to FlatBufferBuilder::vector_downward must point to this
+ // member.
+ buf_.swap_allocator(other.buf_);
+ }
+
+ // Releases the ownership of the buffer pointer.
+ // Returns the size, offset, and the original grpc_slice that
+ // allocated the buffer. Also see grpc_slice_unref().
+ uint8_t *ReleaseRaw(size_t &size, size_t &offset, grpc_slice &slice)
+ {
+ uint8_t *buf = FlatBufferBuilder::ReleaseRaw(size, offset);
+ slice = slice_allocator_.slice_;
+ slice_allocator_.slice_ = grpc_empty_slice();
+ return buf;
+ }
+
+ ~MessageBuilder() {}
+
+ // GetMessage extracts the subslice of the buffer corresponding to the
+ // flatbuffers-encoded region and wraps it in a `Message<T>` to handle buffer
+ // ownership.
+ template <class T> Message<T> GetMessage()
+ {
+ auto buf_data = buf_.scratch_data(); // pointer to memory
+ auto buf_size = buf_.capacity(); // size of memory
+ auto msg_data = buf_.data(); // pointer to msg
+ auto msg_size = buf_.size(); // size of msg
+ // Do some sanity checks on data/size
+ FLATBUFFERS_ASSERT(msg_data);
+ FLATBUFFERS_ASSERT(msg_size);
+ FLATBUFFERS_ASSERT(msg_data >= buf_data);
+ FLATBUFFERS_ASSERT(msg_data + msg_size <= buf_data + buf_size);
+ // Calculate offsets from the buffer start
+ auto begin = msg_data - buf_data;
+ auto end = begin + msg_size;
+ // Get the slice we are working with (no refcount change)
+ grpc_slice slice = slice_allocator_.get_slice(buf_data, buf_size);
+ // Extract a subslice of the existing slice (increment refcount)
+ grpc_slice subslice = grpc_slice_sub(slice, begin, end);
+ // Wrap the subslice in a `Message<T>`, but don't increment refcount
+ Message<T> msg(subslice, false);
+ return msg;
+ }
+
+ template <class T> Message<T> ReleaseMessage()
+ {
+ Message<T> msg = GetMessage<T>();
+ Reset();
+ return msg;
+ }
+
+private:
+ // SliceAllocator slice_allocator_; // part of SliceAllocatorMember
+};
+
+} // namespace grpc
+} // namespace flatbuffers
+
+namespace grpc
+{
+
+template <class T> class SerializationTraits<flatbuffers::grpc::Message<T>>
+{
+public:
+ static grpc::Status Serialize(const flatbuffers::grpc::Message<T> &msg, grpc_byte_buffer **buffer,
+ bool *own_buffer)
+ {
+ // We are passed in a `Message<T>`, which is a wrapper around a
+ // `grpc_slice`. We extract it here using `BorrowSlice()`. The const cast
+ // is necessary because the `grpc_raw_byte_buffer_create` func expects
+ // non-const slices in order to increment their refcounts.
+ grpc_slice *slice = const_cast<grpc_slice *>(&msg.BorrowSlice());
+ // Now use `grpc_raw_byte_buffer_create` to package the single slice into a
+ // `grpc_byte_buffer`, incrementing the refcount in the process.
+ *buffer = grpc_raw_byte_buffer_create(slice, 1);
+ *own_buffer = true;
+ return grpc::Status::OK;
+ }
+
+ // Deserialize by pulling the
+ static grpc::Status Deserialize(ByteBuffer *buf, flatbuffers::grpc::Message<T> *msg)
+ {
+ grpc_byte_buffer *buffer = *reinterpret_cast<grpc_byte_buffer **>(buf);
+ if (!buffer)
+ {
+ return ::grpc::Status(::grpc::StatusCode::INTERNAL, "No payload");
+ }
+ // Check if this is a single uncompressed slice.
+ if ((buffer->type == GRPC_BB_RAW) && (buffer->data.raw.compression == GRPC_COMPRESS_NONE) &&
+ (buffer->data.raw.slice_buffer.count == 1))
+ {
+ // If it is, then we can reference the `grpc_slice` directly.
+ grpc_slice slice = buffer->data.raw.slice_buffer.slices[0];
+ // We wrap a `Message<T>` around the slice, incrementing the refcount.
+ *msg = flatbuffers::grpc::Message<T>(slice, true);
+ }
+ else
+ {
+ // Otherwise, we need to use `grpc_byte_buffer_reader_readall` to read
+ // `buffer` into a single contiguous `grpc_slice`. The gRPC reader gives
+ // us back a new slice with the refcount already incremented.
+ grpc_byte_buffer_reader reader;
+ grpc_byte_buffer_reader_init(&reader, buffer);
+ grpc_slice slice = grpc_byte_buffer_reader_readall(&reader);
+ grpc_byte_buffer_reader_destroy(&reader);
+ // We wrap a `Message<T>` around the slice, but don't increment refcount
+ *msg = flatbuffers::grpc::Message<T>(slice, false);
+ }
+ grpc_byte_buffer_destroy(buffer);
+#if FLATBUFFERS_GRPC_DISABLE_AUTO_VERIFICATION
+ return ::grpc::Status::OK;
+#else
+ if (msg->Verify())
+ {
+ return ::grpc::Status::OK;
+ }
+ else
+ {
+ return ::grpc::Status(::grpc::StatusCode::INTERNAL, "Message verification failed");
+ }
+#endif
+ }
+};
+
+} // namespace grpc
+
+#endif // FLATBUFFERS_GRPC_H_
diff --git a/onert-micro/externals/flatbuffers/hash.h b/onert-micro/externals/flatbuffers/hash.h
new file mode 100644
index 000000000..a83c0ff20
--- /dev/null
+++ b/onert-micro/externals/flatbuffers/hash.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2015 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLATBUFFERS_HASH_H_
+#define FLATBUFFERS_HASH_H_
+
+#include <cstdint>
+#include <cstring>
+
+#include "flatbuffers/flatbuffers.h"
+
+namespace flatbuffers
+{
+
+template <typename T> struct FnvTraits
+{
+ static const T kFnvPrime;
+ static const T kOffsetBasis;
+};
+
+template <> struct FnvTraits<uint32_t>
+{
+ static const uint32_t kFnvPrime = 0x01000193;
+ static const uint32_t kOffsetBasis = 0x811C9DC5;
+};
+
+template <> struct FnvTraits<uint64_t>
+{
+ static const uint64_t kFnvPrime = 0x00000100000001b3ULL;
+ static const uint64_t kOffsetBasis = 0xcbf29ce484222645ULL;
+};
+
+template <typename T> T HashFnv1(const char *input)
+{
+ T hash = FnvTraits<T>::kOffsetBasis;
+ for (const char *c = input; *c; ++c)
+ {
+ hash *= FnvTraits<T>::kFnvPrime;
+ hash ^= static_cast<unsigned char>(*c);
+ }
+ return hash;
+}
+
+template <typename T> T HashFnv1a(const char *input)
+{
+ T hash = FnvTraits<T>::kOffsetBasis;
+ for (const char *c = input; *c; ++c)
+ {
+ hash ^= static_cast<unsigned char>(*c);
+ hash *= FnvTraits<T>::kFnvPrime;
+ }
+ return hash;
+}
+
+template <> inline uint16_t HashFnv1<uint16_t>(const char *input)
+{
+ uint32_t hash = HashFnv1<uint32_t>(input);
+ return (hash >> 16) ^ (hash & 0xffff);
+}
+
+template <> inline uint16_t HashFnv1a<uint16_t>(const char *input)
+{
+ uint32_t hash = HashFnv1a<uint32_t>(input);
+ return (hash >> 16) ^ (hash & 0xffff);
+}
+
+template <typename T> struct NamedHashFunction
+{
+ const char *name;
+
+ typedef T (*HashFunction)(const char *);
+ HashFunction function;
+};
+
+const NamedHashFunction<uint16_t> kHashFunctions16[] = {
+ {"fnv1_16", HashFnv1<uint16_t>},
+ {"fnv1a_16", HashFnv1a<uint16_t>},
+};
+
+const NamedHashFunction<uint32_t> kHashFunctions32[] = {
+ {"fnv1_32", HashFnv1<uint32_t>},
+ {"fnv1a_32", HashFnv1a<uint32_t>},
+};
+
+const NamedHashFunction<uint64_t> kHashFunctions64[] = {
+ {"fnv1_64", HashFnv1<uint64_t>},
+ {"fnv1a_64", HashFnv1a<uint64_t>},
+};
+
+inline NamedHashFunction<uint16_t>::HashFunction FindHashFunction16(const char *name)
+{
+ std::size_t size = sizeof(kHashFunctions16) / sizeof(kHashFunctions16[0]);
+ for (std::size_t i = 0; i < size; ++i)
+ {
+ if (std::strcmp(name, kHashFunctions16[i].name) == 0)
+ {
+ return kHashFunctions16[i].function;
+ }
+ }
+ return nullptr;
+}
+
+inline NamedHashFunction<uint32_t>::HashFunction FindHashFunction32(const char *name)
+{
+ std::size_t size = sizeof(kHashFunctions32) / sizeof(kHashFunctions32[0]);
+ for (std::size_t i = 0; i < size; ++i)
+ {
+ if (std::strcmp(name, kHashFunctions32[i].name) == 0)
+ {
+ return kHashFunctions32[i].function;
+ }
+ }
+ return nullptr;
+}
+
+inline NamedHashFunction<uint64_t>::HashFunction FindHashFunction64(const char *name)
+{
+ std::size_t size = sizeof(kHashFunctions64) / sizeof(kHashFunctions64[0]);
+ for (std::size_t i = 0; i < size; ++i)
+ {
+ if (std::strcmp(name, kHashFunctions64[i].name) == 0)
+ {
+ return kHashFunctions64[i].function;
+ }
+ }
+ return nullptr;
+}
+
+} // namespace flatbuffers
+
+#endif // FLATBUFFERS_HASH_H_
diff --git a/onert-micro/externals/flatbuffers/idl.h b/onert-micro/externals/flatbuffers/idl.h
new file mode 100644
index 000000000..de0a22ab1
--- /dev/null
+++ b/onert-micro/externals/flatbuffers/idl.h
@@ -0,0 +1,1145 @@
+/*
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2014 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLATBUFFERS_IDL_H_
+#define FLATBUFFERS_IDL_H_
+
+#include <map>
+#include <memory>
+#include <stack>
+
+#include "flatbuffers/base.h"
+#include "flatbuffers/flatbuffers.h"
+#include "flatbuffers/flexbuffers.h"
+#include "flatbuffers/hash.h"
+#include "flatbuffers/reflection.h"
+
+#if !defined(FLATBUFFERS_CPP98_STL)
+#include <functional>
+#endif // !defined(FLATBUFFERS_CPP98_STL)
+
+// This file defines the data types representing a parsed IDL (Interface
+// Definition Language) / schema file.
+
+// Limits maximum depth of nested objects.
+// Prevents stack overflow while parse scheme, or json, or flexbuffer.
+#if !defined(FLATBUFFERS_MAX_PARSING_DEPTH)
+#define FLATBUFFERS_MAX_PARSING_DEPTH 64
+#endif
+
+namespace flatbuffers
+{
+
+// The order of these matters for Is*() functions below.
+// Additionally, Parser::ParseType assumes bool..string is a contiguous range
+// of type tokens.
+// clang-format off
+#define FLATBUFFERS_GEN_TYPES_SCALAR(TD) \
+ TD(NONE, "", uint8_t, byte, byte, byte, uint8, u8, UByte, UInt8) \
+ TD(UTYPE, "", uint8_t, byte, byte, byte, uint8, u8, UByte, UInt8) /* begin scalar/int */ \
+ TD(BOOL, "bool", uint8_t, boolean,bool, bool, bool, bool, Boolean, Bool) \
+ TD(CHAR, "byte", int8_t, byte, int8, sbyte, int8, i8, Byte, Int8) \
+ TD(UCHAR, "ubyte", uint8_t, byte, byte, byte, uint8, u8, UByte, UInt8) \
+ TD(SHORT, "short", int16_t, short, int16, short, int16, i16, Short, Int16) \
+ TD(USHORT, "ushort", uint16_t, short, uint16, ushort, uint16, u16, UShort, UInt16) \
+ TD(INT, "int", int32_t, int, int32, int, int32, i32, Int, Int32) \
+ TD(UINT, "uint", uint32_t, int, uint32, uint, uint32, u32, UInt, UInt32) \
+ TD(LONG, "long", int64_t, long, int64, long, int64, i64, Long, Int64) \
+ TD(ULONG, "ulong", uint64_t, long, uint64, ulong, uint64, u64, ULong, UInt64) /* end int */ \
+ TD(FLOAT, "float", float, float, float32, float, float32, f32, Float, Float32) /* begin float */ \
+ TD(DOUBLE, "double", double, double, float64, double, float64, f64, Double, Double) /* end float/scalar */
+#define FLATBUFFERS_GEN_TYPES_POINTER(TD) \
+ TD(STRING, "string", Offset<void>, int, int, StringOffset, int, unused, Int, Offset<String>) \
+ TD(VECTOR, "", Offset<void>, int, int, VectorOffset, int, unused, Int, Offset<UOffset>) \
+ TD(STRUCT, "", Offset<void>, int, int, int, int, unused, Int, Offset<UOffset>) \
+ TD(UNION, "", Offset<void>, int, int, int, int, unused, Int, Offset<UOffset>)
+#define FLATBUFFERS_GEN_TYPE_ARRAY(TD) \
+ TD(ARRAY, "", int, int, int, int, int, unused, Int, Offset<UOffset>)
+// The fields are:
+// - enum
+// - FlatBuffers schema type.
+// - C++ type.
+// - Java type.
+// - Go type.
+// - C# / .Net type.
+// - Python type.
+// - Rust type.
+// - Kotlin type.
+
+// using these macros, we can now write code dealing with types just once, e.g.
+
+/*
+switch (type) {
+ #define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE, \
+ RTYPE, KTYPE) \
+ case BASE_TYPE_ ## ENUM: \
+ // do something specific to CTYPE here
+ FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
+ #undef FLATBUFFERS_TD
+}
+*/
+
+// If not all FLATBUFFERS_GEN_() arguments are necessary for implementation
+// of FLATBUFFERS_TD, you can use a variadic macro (with __VA_ARGS__ if needed).
+// In the above example, only CTYPE is used to generate the code, it can be rewritten:
+
+/*
+switch (type) {
+ #define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, ...) \
+ case BASE_TYPE_ ## ENUM: \
+ // do something specific to CTYPE here
+ FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
+ #undef FLATBUFFERS_TD
+}
+*/
+
+#define FLATBUFFERS_GEN_TYPES(TD) \
+ FLATBUFFERS_GEN_TYPES_SCALAR(TD) \
+ FLATBUFFERS_GEN_TYPES_POINTER(TD) \
+ FLATBUFFERS_GEN_TYPE_ARRAY(TD)
+
+// Create an enum for all the types above.
+#ifdef __GNUC__
+__extension__ // Stop GCC complaining about trailing comma with -Wpendantic.
+#endif
+enum BaseType {
+ #define FLATBUFFERS_TD(ENUM, ...) \
+ BASE_TYPE_ ## ENUM,
+ FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
+ #undef FLATBUFFERS_TD
+};
+
+#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, ...) \
+ static_assert(sizeof(CTYPE) <= sizeof(largest_scalar_t), \
+ "define largest_scalar_t as " #CTYPE);
+ FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
+#undef FLATBUFFERS_TD
+
+inline bool IsScalar (BaseType t) { return t >= BASE_TYPE_UTYPE &&
+ t <= BASE_TYPE_DOUBLE; }
+inline bool IsInteger(BaseType t) { return t >= BASE_TYPE_UTYPE &&
+ t <= BASE_TYPE_ULONG; }
+inline bool IsFloat (BaseType t) { return t == BASE_TYPE_FLOAT ||
+ t == BASE_TYPE_DOUBLE; }
+inline bool IsLong (BaseType t) { return t == BASE_TYPE_LONG ||
+ t == BASE_TYPE_ULONG; }
+inline bool IsBool (BaseType t) { return t == BASE_TYPE_BOOL; }
+inline bool IsOneByte(BaseType t) { return t >= BASE_TYPE_UTYPE &&
+ t <= BASE_TYPE_UCHAR; }
+
+inline bool IsUnsigned(BaseType t) {
+ return (t == BASE_TYPE_UTYPE) || (t == BASE_TYPE_UCHAR) ||
+ (t == BASE_TYPE_USHORT) || (t == BASE_TYPE_UINT) ||
+ (t == BASE_TYPE_ULONG);
+}
+
+// clang-format on
+
+extern const char *const kTypeNames[];
+extern const char kTypeSizes[];
+
+inline size_t SizeOf(BaseType t) { return kTypeSizes[t]; }
+
+struct StructDef;
+struct EnumDef;
+class Parser;
+
+// Represents any type in the IDL, which is a combination of the BaseType
+// and additional information for vectors/structs_.
+struct Type
+{
+ explicit Type(BaseType _base_type = BASE_TYPE_NONE, StructDef *_sd = nullptr,
+ EnumDef *_ed = nullptr, uint16_t _fixed_length = 0)
+ : base_type(_base_type), element(BASE_TYPE_NONE), struct_def(_sd), enum_def(_ed),
+ fixed_length(_fixed_length)
+ {
+ }
+
+ bool operator==(const Type &o)
+ {
+ return base_type == o.base_type && element == o.element && struct_def == o.struct_def &&
+ enum_def == o.enum_def;
+ }
+
+ Type VectorType() const { return Type(element, struct_def, enum_def, fixed_length); }
+
+ Offset<reflection::Type> Serialize(FlatBufferBuilder *builder) const;
+
+ bool Deserialize(const Parser &parser, const reflection::Type *type);
+
+ BaseType base_type;
+ BaseType element; // only set if t == BASE_TYPE_VECTOR
+ StructDef *struct_def; // only set if t or element == BASE_TYPE_STRUCT
+ EnumDef *enum_def; // set if t == BASE_TYPE_UNION / BASE_TYPE_UTYPE,
+ // or for an integral type derived from an enum.
+ uint16_t fixed_length; // only set if t == BASE_TYPE_ARRAY
+};
+
+// Represents a parsed scalar value, it's type, and field offset.
+struct Value
+{
+ Value() : constant("0"), offset(static_cast<voffset_t>(~(static_cast<voffset_t>(0U)))) {}
+ Type type;
+ std::string constant;
+ voffset_t offset;
+};
+
+// Helper class that retains the original order of a set of identifiers and
+// also provides quick lookup.
+template <typename T> class SymbolTable
+{
+public:
+ ~SymbolTable()
+ {
+ for (auto it = vec.begin(); it != vec.end(); ++it)
+ {
+ delete *it;
+ }
+ }
+
+ bool Add(const std::string &name, T *e)
+ {
+ vector_emplace_back(&vec, e);
+ auto it = dict.find(name);
+ if (it != dict.end())
+ return true;
+ dict[name] = e;
+ return false;
+ }
+
+ void Move(const std::string &oldname, const std::string &newname)
+ {
+ auto it = dict.find(oldname);
+ if (it != dict.end())
+ {
+ auto obj = it->second;
+ dict.erase(it);
+ dict[newname] = obj;
+ }
+ else
+ {
+ FLATBUFFERS_ASSERT(false);
+ }
+ }
+
+ T *Lookup(const std::string &name) const
+ {
+ auto it = dict.find(name);
+ return it == dict.end() ? nullptr : it->second;
+ }
+
+public:
+ std::map<std::string, T *> dict; // quick lookup
+ std::vector<T *> vec; // Used to iterate in order of insertion
+};
+
+// A name space, as set in the schema.
+struct Namespace
+{
+ Namespace() : from_table(0) {}
+
+ // Given a (potentially unqualified) name, return the "fully qualified" name
+ // which has a full namespaced descriptor.
+ // With max_components you can request less than the number of components
+ // the current namespace has.
+ std::string GetFullyQualifiedName(const std::string &name, size_t max_components = 1000) const;
+
+ std::vector<std::string> components;
+ size_t from_table; // Part of the namespace corresponds to a message/table.
+};
+
+inline bool operator<(const Namespace &a, const Namespace &b)
+{
+ size_t min_size = std::min(a.components.size(), b.components.size());
+ for (size_t i = 0; i < min_size; ++i)
+ {
+ if (a.components[i] != b.components[i])
+ return a.components[i] < b.components[i];
+ }
+ return a.components.size() < b.components.size();
+}
+
+// Base class for all definition types (fields, structs_, enums_).
+struct Definition
+{
+ Definition()
+ : generated(false), defined_namespace(nullptr), serialized_location(0), index(-1), refcount(1)
+ {
+ }
+
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>>
+ SerializeAttributes(FlatBufferBuilder *builder, const Parser &parser) const;
+
+ bool DeserializeAttributes(Parser &parser, const Vector<Offset<reflection::KeyValue>> *attrs);
+
+ std::string name;
+ std::string file;
+ std::vector<std::string> doc_comment;
+ SymbolTable<Value> attributes;
+ bool generated; // did we already output code for this definition?
+ Namespace *defined_namespace; // Where it was defined.
+
+ // For use with Serialize()
+ uoffset_t serialized_location;
+ int index; // Inside the vector it is stored.
+ int refcount;
+};
+
+struct FieldDef : public Definition
+{
+ FieldDef()
+ : deprecated(false), key(false), shared(false), native_inline(false), flexbuffer(false),
+ presence(kDefault), nested_flatbuffer(NULL), padding(0)
+ {
+ }
+
+ Offset<reflection::Field> Serialize(FlatBufferBuilder *builder, uint16_t id,
+ const Parser &parser) const;
+
+ bool Deserialize(Parser &parser, const reflection::Field *field);
+
+ bool IsScalarOptional() const { return IsScalar(value.type.base_type) && IsOptional(); }
+ bool IsOptional() const { return presence == kOptional; }
+ bool IsRequired() const { return presence == kRequired; }
+ bool IsDefault() const { return presence == kDefault; }
+
+ Value value;
+ bool deprecated; // Field is allowed to be present in old data, but can't be.
+ // written in new data nor accessed in new code.
+ bool key; // Field functions as a key for creating sorted vectors.
+ bool shared; // Field will be using string pooling (i.e. CreateSharedString)
+ // as default serialization behavior if field is a string.
+ bool native_inline; // Field will be defined inline (instead of as a pointer)
+ // for native tables if field is a struct.
+ bool flexbuffer; // This field contains FlexBuffer data.
+
+ enum Presence
+ {
+ // Field must always be present.
+ kRequired,
+ // Non-presence should be signalled to and controlled by users.
+ kOptional,
+ // Non-presence is hidden from users.
+ // Implementations may omit writing default values.
+ kDefault,
+ };
+ Presence static MakeFieldPresence(bool optional, bool required)
+ {
+ FLATBUFFERS_ASSERT(!(required && optional));
+ // clang-format off
+ return required ? FieldDef::kRequired
+ : optional ? FieldDef::kOptional
+ : FieldDef::kDefault;
+ // clang-format on
+ }
+ Presence presence;
+
+ StructDef *nested_flatbuffer; // This field contains nested FlatBuffer data.
+ size_t padding; // Bytes to always pad after this field.
+};
+
+struct StructDef : public Definition
+{
+ StructDef()
+ : fixed(false), predecl(true), sortbysize(true), has_key(false), minalign(1), bytesize(0)
+ {
+ }
+
+ void PadLastField(size_t min_align)
+ {
+ auto padding = PaddingBytes(bytesize, min_align);
+ bytesize += padding;
+ if (fields.vec.size())
+ fields.vec.back()->padding = padding;
+ }
+
+ Offset<reflection::Object> Serialize(FlatBufferBuilder *builder, const Parser &parser) const;
+
+ bool Deserialize(Parser &parser, const reflection::Object *object);
+
+ SymbolTable<FieldDef> fields;
+
+ bool fixed; // If it's struct, not a table.
+ bool predecl; // If it's used before it was defined.
+ bool sortbysize; // Whether fields come in the declaration or size order.
+ bool has_key; // It has a key field.
+ size_t minalign; // What the whole object needs to be aligned to.
+ size_t bytesize; // Size if fixed.
+
+ flatbuffers::unique_ptr<std::string> original_location;
+};
+
+struct EnumDef;
+struct EnumValBuilder;
+
+struct EnumVal
+{
+ Offset<reflection::EnumVal> Serialize(FlatBufferBuilder *builder, const Parser &parser) const;
+
+ bool Deserialize(const Parser &parser, const reflection::EnumVal *val);
+
+ uint64_t GetAsUInt64() const { return static_cast<uint64_t>(value); }
+ int64_t GetAsInt64() const { return value; }
+ bool IsZero() const { return 0 == value; }
+ bool IsNonZero() const { return !IsZero(); }
+
+ std::string name;
+ std::vector<std::string> doc_comment;
+ Type union_type;
+
+private:
+ friend EnumDef;
+ friend EnumValBuilder;
+ friend bool operator==(const EnumVal &lhs, const EnumVal &rhs);
+
+ EnumVal(const std::string &_name, int64_t _val) : name(_name), value(_val) {}
+ EnumVal() : value(0) {}
+
+ int64_t value;
+};
+
+struct EnumDef : public Definition
+{
+ EnumDef() : is_union(false), uses_multiple_type_instances(false) {}
+
+ Offset<reflection::Enum> Serialize(FlatBufferBuilder *builder, const Parser &parser) const;
+
+ bool Deserialize(Parser &parser, const reflection::Enum *values);
+
+ template <typename T> void ChangeEnumValue(EnumVal *ev, T new_val);
+ void SortByValue();
+ void RemoveDuplicates();
+
+ std::string AllFlags() const;
+ const EnumVal *MinValue() const;
+ const EnumVal *MaxValue() const;
+ // Returns the number of integer steps from v1 to v2.
+ uint64_t Distance(const EnumVal *v1, const EnumVal *v2) const;
+ // Returns the number of integer steps from Min to Max.
+ uint64_t Distance() const { return Distance(MinValue(), MaxValue()); }
+
+ EnumVal *ReverseLookup(int64_t enum_idx, bool skip_union_default = false) const;
+ EnumVal *FindByValue(const std::string &constant) const;
+
+ std::string ToString(const EnumVal &ev) const
+ {
+ return IsUInt64() ? NumToString(ev.GetAsUInt64()) : NumToString(ev.GetAsInt64());
+ }
+
+ size_t size() const { return vals.vec.size(); }
+
+ const std::vector<EnumVal *> &Vals() const { return vals.vec; }
+
+ const EnumVal *Lookup(const std::string &enum_name) const { return vals.Lookup(enum_name); }
+
+ bool is_union;
+ // Type is a union which uses type aliases where at least one type is
+ // available under two different names.
+ bool uses_multiple_type_instances;
+ Type underlying_type;
+
+private:
+ bool IsUInt64() const { return (BASE_TYPE_ULONG == underlying_type.base_type); }
+
+ friend EnumValBuilder;
+ SymbolTable<EnumVal> vals;
+};
+
+inline bool IsString(const Type &type) { return type.base_type == BASE_TYPE_STRING; }
+
+inline bool IsStruct(const Type &type)
+{
+ return type.base_type == BASE_TYPE_STRUCT && type.struct_def->fixed;
+}
+
+inline bool IsUnion(const Type &type)
+{
+ return type.enum_def != nullptr && type.enum_def->is_union;
+}
+
+inline bool IsVector(const Type &type) { return type.base_type == BASE_TYPE_VECTOR; }
+
+inline bool IsArray(const Type &type) { return type.base_type == BASE_TYPE_ARRAY; }
+
+inline bool IsSeries(const Type &type) { return IsVector(type) || IsArray(type); }
+
+inline bool IsEnum(const Type &type)
+{
+ return type.enum_def != nullptr && IsInteger(type.base_type);
+}
+
+inline size_t InlineSize(const Type &type)
+{
+ return IsStruct(type) ? type.struct_def->bytesize
+ : (IsArray(type) ? InlineSize(type.VectorType()) * type.fixed_length
+ : SizeOf(type.base_type));
+}
+
+inline size_t InlineAlignment(const Type &type)
+{
+ if (IsStruct(type))
+ {
+ return type.struct_def->minalign;
+ }
+ else if (IsArray(type))
+ {
+ return IsStruct(type.VectorType()) ? type.struct_def->minalign : SizeOf(type.element);
+ }
+ else
+ {
+ return SizeOf(type.base_type);
+ }
+}
+inline bool operator==(const EnumVal &lhs, const EnumVal &rhs) { return lhs.value == rhs.value; }
+inline bool operator!=(const EnumVal &lhs, const EnumVal &rhs) { return !(lhs == rhs); }
+
+inline bool EqualByName(const Type &a, const Type &b)
+{
+ return a.base_type == b.base_type && a.element == b.element &&
+ (a.struct_def == b.struct_def || a.struct_def->name == b.struct_def->name) &&
+ (a.enum_def == b.enum_def || a.enum_def->name == b.enum_def->name);
+}
+
+struct RPCCall : public Definition
+{
+ Offset<reflection::RPCCall> Serialize(FlatBufferBuilder *builder, const Parser &parser) const;
+
+ bool Deserialize(Parser &parser, const reflection::RPCCall *call);
+
+ StructDef *request, *response;
+};
+
+struct ServiceDef : public Definition
+{
+ Offset<reflection::Service> Serialize(FlatBufferBuilder *builder, const Parser &parser) const;
+ bool Deserialize(Parser &parser, const reflection::Service *service);
+
+ SymbolTable<RPCCall> calls;
+};
+
+// Container of options that may apply to any of the source/text generators.
+struct IDLOptions
+{
+ bool gen_jvmstatic;
+ // Use flexbuffers instead for binary and text generation
+ bool use_flexbuffers;
+ bool strict_json;
+ bool output_default_scalars_in_json;
+ int indent_step;
+ bool output_enum_identifiers;
+ bool prefixed_enums;
+ bool scoped_enums;
+ bool include_dependence_headers;
+ bool mutable_buffer;
+ bool one_file;
+ bool proto_mode;
+ bool proto_oneof_union;
+ bool generate_all;
+ bool skip_unexpected_fields_in_json;
+ bool generate_name_strings;
+ bool generate_object_based_api;
+ bool gen_compare;
+ std::string cpp_object_api_pointer_type;
+ std::string cpp_object_api_string_type;
+ bool cpp_object_api_string_flexible_constructor;
+ bool cpp_direct_copy;
+ bool gen_nullable;
+ bool java_checkerframework;
+ bool gen_generated;
+ std::string object_prefix;
+ std::string object_suffix;
+ bool union_value_namespacing;
+ bool allow_non_utf8;
+ bool natural_utf8;
+ std::string include_prefix;
+ bool keep_include_path;
+ bool binary_schema_comments;
+ bool binary_schema_builtins;
+ bool binary_schema_gen_embed;
+ std::string go_import;
+ std::string go_namespace;
+ bool protobuf_ascii_alike;
+ bool size_prefixed;
+ std::string root_type;
+ bool force_defaults;
+ bool java_primitive_has_method;
+ bool cs_gen_json_serializer;
+ std::vector<std::string> cpp_includes;
+ std::string cpp_std;
+ bool cpp_static_reflection;
+ std::string proto_namespace_suffix;
+ std::string filename_suffix;
+ std::string filename_extension;
+ bool no_warnings;
+
+ // Possible options for the more general generator below.
+ enum Language
+ {
+ kJava = 1 << 0,
+ kCSharp = 1 << 1,
+ kGo = 1 << 2,
+ kCpp = 1 << 3,
+ kPython = 1 << 5,
+ kPhp = 1 << 6,
+ kJson = 1 << 7,
+ kBinary = 1 << 8,
+ kTs = 1 << 9,
+ kJsonSchema = 1 << 10,
+ kDart = 1 << 11,
+ kLua = 1 << 12,
+ kLobster = 1 << 13,
+ kRust = 1 << 14,
+ kKotlin = 1 << 15,
+ kSwift = 1 << 16,
+ kMAX
+ };
+
+ Language lang;
+
+ enum MiniReflect
+ {
+ kNone,
+ kTypes,
+ kTypesAndNames
+ };
+
+ MiniReflect mini_reflect;
+
+ // If set, require all fields in a table to be explicitly numbered.
+ bool require_explicit_ids;
+
+ // The corresponding language bit will be set if a language is included
+ // for code generation.
+ unsigned long lang_to_generate;
+
+ // If set (default behavior), empty string fields will be set to nullptr to
+ // make the flatbuffer more compact.
+ bool set_empty_strings_to_null;
+
+ // If set (default behavior), empty vector fields will be set to nullptr to
+ // make the flatbuffer more compact.
+ bool set_empty_vectors_to_null;
+
+ IDLOptions()
+ : gen_jvmstatic(false), use_flexbuffers(false), strict_json(false),
+ output_default_scalars_in_json(false), indent_step(2), output_enum_identifiers(true),
+ prefixed_enums(true), scoped_enums(false), include_dependence_headers(true),
+ mutable_buffer(false), one_file(false), proto_mode(false), proto_oneof_union(false),
+ generate_all(false), skip_unexpected_fields_in_json(false), generate_name_strings(false),
+ generate_object_based_api(false), gen_compare(false),
+ cpp_object_api_pointer_type("std::unique_ptr"),
+ cpp_object_api_string_flexible_constructor(false), cpp_direct_copy(true), gen_nullable(false),
+ java_checkerframework(false), gen_generated(false), object_suffix("T"),
+ union_value_namespacing(true), allow_non_utf8(false), natural_utf8(false),
+ keep_include_path(false), binary_schema_comments(false), binary_schema_builtins(false),
+ binary_schema_gen_embed(false), protobuf_ascii_alike(false), size_prefixed(false),
+ force_defaults(false), java_primitive_has_method(false), cs_gen_json_serializer(false),
+ cpp_static_reflection(false), filename_suffix("_generated"), filename_extension(),
+ no_warnings(false), lang(IDLOptions::kJava), mini_reflect(IDLOptions::kNone),
+ require_explicit_ids(false), lang_to_generate(0), set_empty_strings_to_null(true),
+ set_empty_vectors_to_null(true)
+ {
+ }
+};
+
+// This encapsulates where the parser is in the current source file.
+struct ParserState
+{
+ ParserState()
+ : cursor_(nullptr), line_start_(nullptr), line_(0), token_(-1),
+ attr_is_trivial_ascii_string_(true)
+ {
+ }
+
+protected:
+ void ResetState(const char *source)
+ {
+ cursor_ = source;
+ line_ = 0;
+ MarkNewLine();
+ }
+
+ void MarkNewLine()
+ {
+ line_start_ = cursor_;
+ line_ += 1;
+ }
+
+ int64_t CursorPosition() const
+ {
+ FLATBUFFERS_ASSERT(cursor_ && line_start_ && cursor_ >= line_start_);
+ return static_cast<int64_t>(cursor_ - line_start_);
+ }
+
+ const char *cursor_;
+ const char *line_start_;
+ int line_; // the current line being parsed
+ int token_;
+
+ // Flag: text in attribute_ is true ASCII string without escape
+ // sequences. Only printable ASCII (without [\t\r\n]).
+ // Used for number-in-string (and base64 string in future).
+ bool attr_is_trivial_ascii_string_;
+ std::string attribute_;
+ std::vector<std::string> doc_comment_;
+};
+
+// A way to make error propagation less error prone by requiring values to be
+// checked.
+// Once you create a value of this type you must either:
+// - Call Check() on it.
+// - Copy or assign it to another value.
+// Failure to do so leads to an assert.
+// This guarantees that this as return value cannot be ignored.
+class CheckedError
+{
+public:
+ explicit CheckedError(bool error) : is_error_(error), has_been_checked_(false) {}
+
+ CheckedError &operator=(const CheckedError &other)
+ {
+ is_error_ = other.is_error_;
+ has_been_checked_ = false;
+ other.has_been_checked_ = true;
+ return *this;
+ }
+
+ CheckedError(const CheckedError &other)
+ {
+ *this = other; // Use assignment operator.
+ }
+
+ ~CheckedError() { FLATBUFFERS_ASSERT(has_been_checked_); }
+
+ bool Check()
+ {
+ has_been_checked_ = true;
+ return is_error_;
+ }
+
+private:
+ bool is_error_;
+ mutable bool has_been_checked_;
+};
+
+// Additionally, in GCC we can get these errors statically, for additional
+// assurance:
+// clang-format off
+#ifdef __GNUC__
+#define FLATBUFFERS_CHECKED_ERROR CheckedError \
+ __attribute__((warn_unused_result))
+#else
+#define FLATBUFFERS_CHECKED_ERROR CheckedError
+#endif
+// clang-format on
+
+class Parser : public ParserState
+{
+public:
+ explicit Parser(const IDLOptions &options = IDLOptions())
+ : current_namespace_(nullptr), empty_namespace_(nullptr),
+ flex_builder_(256, flexbuffers::BUILDER_FLAG_SHARE_ALL), root_struct_def_(nullptr),
+ opts(options), uses_flexbuffers_(false), advanced_features_(0), source_(nullptr),
+ anonymous_counter_(0), parse_depth_counter_(0)
+ {
+ if (opts.force_defaults)
+ {
+ builder_.ForceDefaults(true);
+ }
+ // Start out with the empty namespace being current.
+ empty_namespace_ = new Namespace();
+ namespaces_.push_back(empty_namespace_);
+ current_namespace_ = empty_namespace_;
+ known_attributes_["deprecated"] = true;
+ known_attributes_["required"] = true;
+ known_attributes_["key"] = true;
+ known_attributes_["shared"] = true;
+ known_attributes_["hash"] = true;
+ known_attributes_["id"] = true;
+ known_attributes_["force_align"] = true;
+ known_attributes_["bit_flags"] = true;
+ known_attributes_["original_order"] = true;
+ known_attributes_["nested_flatbuffer"] = true;
+ known_attributes_["csharp_partial"] = true;
+ known_attributes_["streaming"] = true;
+ known_attributes_["idempotent"] = true;
+ known_attributes_["cpp_type"] = true;
+ known_attributes_["cpp_ptr_type"] = true;
+ known_attributes_["cpp_ptr_type_get"] = true;
+ known_attributes_["cpp_str_type"] = true;
+ known_attributes_["cpp_str_flex_ctor"] = true;
+ known_attributes_["native_inline"] = true;
+ known_attributes_["native_custom_alloc"] = true;
+ known_attributes_["native_type"] = true;
+ known_attributes_["native_type_pack_name"] = true;
+ known_attributes_["native_default"] = true;
+ known_attributes_["flexbuffer"] = true;
+ known_attributes_["private"] = true;
+ }
+
+ ~Parser()
+ {
+ for (auto it = namespaces_.begin(); it != namespaces_.end(); ++it)
+ {
+ delete *it;
+ }
+ }
+
+ // Parse the string containing either schema or JSON data, which will
+ // populate the SymbolTable's or the FlatBufferBuilder above.
+ // include_paths is used to resolve any include statements, and typically
+ // should at least include the project path (where you loaded source_ from).
+ // include_paths must be nullptr terminated if specified.
+ // If include_paths is nullptr, it will attempt to load from the current
+ // directory.
+ // If the source was loaded from a file and isn't an include file,
+ // supply its name in source_filename.
+ // All paths specified in this call must be in posix format, if you accept
+ // paths from user input, please call PosixPath on them first.
+ bool Parse(const char *_source, const char **include_paths = nullptr,
+ const char *source_filename = nullptr);
+
+ bool ParseJson(const char *json, const char *json_filename = nullptr);
+
+ // Set the root type. May override the one set in the schema.
+ bool SetRootType(const char *name);
+
+ // Mark all definitions as already having code generated.
+ void MarkGenerated();
+
+ // Get the files recursively included by the given file. The returned
+ // container will have at least the given file.
+ std::set<std::string> GetIncludedFilesRecursive(const std::string &file_name) const;
+
+ // Fills builder_ with a binary version of the schema parsed.
+ // See reflection/reflection.fbs
+ void Serialize();
+
+ // Deserialize a schema buffer
+ bool Deserialize(const uint8_t *buf, const size_t size);
+
+ // Fills internal structure as if the schema passed had been loaded by parsing
+ // with Parse except that included filenames will not be populated.
+ bool Deserialize(const reflection::Schema *schema);
+
+ Type *DeserializeType(const reflection::Type *type);
+
+ // Checks that the schema represented by this parser is a safe evolution
+ // of the schema provided. Returns non-empty error on any problems.
+ std::string ConformTo(const Parser &base);
+
+ // Similar to Parse(), but now only accepts JSON to be parsed into a
+ // FlexBuffer.
+ bool ParseFlexBuffer(const char *source, const char *source_filename,
+ flexbuffers::Builder *builder);
+
+ StructDef *LookupStruct(const std::string &id) const;
+ StructDef *LookupStructThruParentNamespaces(const std::string &id) const;
+
+ std::string UnqualifiedName(const std::string &fullQualifiedName);
+
+ FLATBUFFERS_CHECKED_ERROR Error(const std::string &msg);
+
+ // @brief Verify that any of 'opts.lang_to_generate' supports Optional scalars
+ // in a schema.
+ // @param opts Options used to parce a schema and generate code.
+ static bool SupportsOptionalScalars(const flatbuffers::IDLOptions &opts);
+
+private:
+ class ParseDepthGuard;
+
+ void Message(const std::string &msg);
+ void Warning(const std::string &msg);
+ FLATBUFFERS_CHECKED_ERROR ParseHexNum(int nibbles, uint64_t *val);
+ FLATBUFFERS_CHECKED_ERROR Next();
+ FLATBUFFERS_CHECKED_ERROR SkipByteOrderMark();
+ bool Is(int t) const;
+ bool IsIdent(const char *id) const;
+ FLATBUFFERS_CHECKED_ERROR Expect(int t);
+ std::string TokenToStringId(int t) const;
+ EnumDef *LookupEnum(const std::string &id);
+ FLATBUFFERS_CHECKED_ERROR ParseNamespacing(std::string *id, std::string *last);
+ FLATBUFFERS_CHECKED_ERROR ParseTypeIdent(Type &type);
+ FLATBUFFERS_CHECKED_ERROR ParseType(Type &type);
+ FLATBUFFERS_CHECKED_ERROR AddField(StructDef &struct_def, const std::string &name,
+ const Type &type, FieldDef **dest);
+ FLATBUFFERS_CHECKED_ERROR ParseField(StructDef &struct_def);
+ FLATBUFFERS_CHECKED_ERROR ParseString(Value &val, bool use_string_pooling);
+ FLATBUFFERS_CHECKED_ERROR ParseComma();
+ FLATBUFFERS_CHECKED_ERROR ParseAnyValue(Value &val, FieldDef *field, size_t parent_fieldn,
+ const StructDef *parent_struct_def, uoffset_t count,
+ bool inside_vector = false);
+ template <typename F>
+ FLATBUFFERS_CHECKED_ERROR ParseTableDelimiters(size_t &fieldn, const StructDef *struct_def,
+ F body);
+ FLATBUFFERS_CHECKED_ERROR ParseTable(const StructDef &struct_def, std::string *value,
+ uoffset_t *ovalue);
+ void SerializeStruct(const StructDef &struct_def, const Value &val);
+ void SerializeStruct(FlatBufferBuilder &builder, const StructDef &struct_def, const Value &val);
+ template <typename F> FLATBUFFERS_CHECKED_ERROR ParseVectorDelimiters(uoffset_t &count, F body);
+ FLATBUFFERS_CHECKED_ERROR ParseVector(const Type &type, uoffset_t *ovalue, FieldDef *field,
+ size_t fieldn);
+ FLATBUFFERS_CHECKED_ERROR ParseArray(Value &array);
+ FLATBUFFERS_CHECKED_ERROR ParseNestedFlatbuffer(Value &val, FieldDef *field, size_t fieldn,
+ const StructDef *parent_struct_def);
+ FLATBUFFERS_CHECKED_ERROR ParseMetaData(SymbolTable<Value> *attributes);
+ FLATBUFFERS_CHECKED_ERROR TryTypedValue(const std::string *name, int dtoken, bool check, Value &e,
+ BaseType req, bool *destmatch);
+ FLATBUFFERS_CHECKED_ERROR ParseHash(Value &e, FieldDef *field);
+ FLATBUFFERS_CHECKED_ERROR TokenError();
+ FLATBUFFERS_CHECKED_ERROR ParseSingleValue(const std::string *name, Value &e, bool check_now);
+ FLATBUFFERS_CHECKED_ERROR ParseFunction(const std::string *name, Value &e);
+ FLATBUFFERS_CHECKED_ERROR ParseEnumFromString(const Type &type, std::string *result);
+ StructDef *LookupCreateStruct(const std::string &name, bool create_if_new = true,
+ bool definition = false);
+ FLATBUFFERS_CHECKED_ERROR ParseEnum(bool is_union, EnumDef **dest);
+ FLATBUFFERS_CHECKED_ERROR ParseNamespace();
+ FLATBUFFERS_CHECKED_ERROR StartStruct(const std::string &name, StructDef **dest);
+ FLATBUFFERS_CHECKED_ERROR StartEnum(const std::string &name, bool is_union, EnumDef **dest);
+ FLATBUFFERS_CHECKED_ERROR ParseDecl();
+ FLATBUFFERS_CHECKED_ERROR ParseService();
+ FLATBUFFERS_CHECKED_ERROR ParseProtoFields(StructDef *struct_def, bool isextend,
+ bool inside_oneof);
+ FLATBUFFERS_CHECKED_ERROR ParseProtoOption();
+ FLATBUFFERS_CHECKED_ERROR ParseProtoKey();
+ FLATBUFFERS_CHECKED_ERROR ParseProtoDecl();
+ FLATBUFFERS_CHECKED_ERROR ParseProtoCurliesOrIdent();
+ FLATBUFFERS_CHECKED_ERROR ParseTypeFromProtoType(Type *type);
+ FLATBUFFERS_CHECKED_ERROR SkipAnyJsonValue();
+ FLATBUFFERS_CHECKED_ERROR ParseFlexBufferNumericConstant(flexbuffers::Builder *builder);
+ FLATBUFFERS_CHECKED_ERROR ParseFlexBufferValue(flexbuffers::Builder *builder);
+ FLATBUFFERS_CHECKED_ERROR StartParseFile(const char *source, const char *source_filename);
+ FLATBUFFERS_CHECKED_ERROR ParseRoot(const char *_source, const char **include_paths,
+ const char *source_filename);
+ FLATBUFFERS_CHECKED_ERROR DoParse(const char *_source, const char **include_paths,
+ const char *source_filename, const char *include_filename);
+ FLATBUFFERS_CHECKED_ERROR DoParseJson();
+ FLATBUFFERS_CHECKED_ERROR CheckClash(std::vector<FieldDef *> &fields, StructDef *struct_def,
+ const char *suffix, BaseType baseType);
+ FLATBUFFERS_CHECKED_ERROR ParseAlignAttribute(const std::string &align_constant, size_t min_align,
+ size_t *align);
+
+ bool SupportsAdvancedUnionFeatures() const;
+ bool SupportsAdvancedArrayFeatures() const;
+ bool SupportsOptionalScalars() const;
+ bool SupportsDefaultVectorsAndStrings() const;
+ Namespace *UniqueNamespace(Namespace *ns);
+
+ FLATBUFFERS_CHECKED_ERROR RecurseError();
+ template <typename F> CheckedError Recurse(F f);
+
+public:
+ SymbolTable<Type> types_;
+ SymbolTable<StructDef> structs_;
+ SymbolTable<EnumDef> enums_;
+ SymbolTable<ServiceDef> services_;
+ std::vector<Namespace *> namespaces_;
+ Namespace *current_namespace_;
+ Namespace *empty_namespace_;
+ std::string error_; // User readable error_ if Parse() == false
+
+ FlatBufferBuilder builder_; // any data contained in the file
+ flexbuffers::Builder flex_builder_;
+ flexbuffers::Reference flex_root_;
+ StructDef *root_struct_def_;
+ std::string file_identifier_;
+ std::string file_extension_;
+
+ std::map<uint64_t, std::string> included_files_;
+ std::map<std::string, std::set<std::string>> files_included_per_file_;
+ std::vector<std::string> native_included_files_;
+
+ std::map<std::string, bool> known_attributes_;
+
+ IDLOptions opts;
+ bool uses_flexbuffers_;
+
+ uint64_t advanced_features_;
+
+private:
+ const char *source_;
+
+ std::string file_being_parsed_;
+
+ std::vector<std::pair<Value, FieldDef *>> field_stack_;
+
+ int anonymous_counter_;
+ int parse_depth_counter_; // stack-overflow guard
+};
+
+// Utility functions for multiple generators:
+
+extern std::string MakeCamel(const std::string &in, bool first = true);
+
+extern std::string MakeScreamingCamel(const std::string &in);
+
+// Generate text (JSON) from a given FlatBuffer, and a given Parser
+// object that has been populated with the corresponding schema.
+// If ident_step is 0, no indentation will be generated. Additionally,
+// if it is less than 0, no linefeeds will be generated either.
+// See idl_gen_text.cpp.
+// strict_json adds "quotes" around field names if true.
+// If the flatbuffer cannot be encoded in JSON (e.g., it contains non-UTF-8
+// byte arrays in String values), returns false.
+extern bool GenerateTextFromTable(const Parser &parser, const void *table,
+ const std::string &tablename, std::string *text);
+extern bool GenerateText(const Parser &parser, const void *flatbuffer, std::string *text);
+extern bool GenerateTextFile(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+// Generate Json schema to string
+// See idl_gen_json_schema.cpp.
+extern bool GenerateJsonSchema(const Parser &parser, std::string *json);
+
+// Generate binary files from a given FlatBuffer, and a given Parser
+// object that has been populated with the corresponding schema.
+// See code_generators.cpp.
+extern bool GenerateBinary(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+// Generate a C++ header from the definitions in the Parser object.
+// See idl_gen_cpp.
+extern bool GenerateCPP(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+// Generate C# files from the definitions in the Parser object.
+// See idl_gen_csharp.cpp.
+extern bool GenerateCSharp(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+extern bool GenerateDart(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+// Generate Java files from the definitions in the Parser object.
+// See idl_gen_java.cpp.
+extern bool GenerateJava(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+// Generate JavaScript or TypeScript code from the definitions in the Parser
+// object. See idl_gen_js.
+extern bool GenerateTS(const Parser &parser, const std::string &path, const std::string &file_name);
+
+// Generate Go files from the definitions in the Parser object.
+// See idl_gen_go.cpp.
+extern bool GenerateGo(const Parser &parser, const std::string &path, const std::string &file_name);
+
+// Generate Php code from the definitions in the Parser object.
+// See idl_gen_php.
+extern bool GeneratePhp(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+// Generate Python files from the definitions in the Parser object.
+// See idl_gen_python.cpp.
+extern bool GeneratePython(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+// Generate Lobster files from the definitions in the Parser object.
+// See idl_gen_lobster.cpp.
+extern bool GenerateLobster(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+// Generate Lua files from the definitions in the Parser object.
+// See idl_gen_lua.cpp.
+extern bool GenerateLua(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+// Generate Rust files from the definitions in the Parser object.
+// See idl_gen_rust.cpp.
+extern bool GenerateRust(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+// Generate Json schema file
+// See idl_gen_json_schema.cpp.
+extern bool GenerateJsonSchema(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+extern bool GenerateKotlin(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+// Generate Swift classes.
+// See idl_gen_swift.cpp
+extern bool GenerateSwift(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+// Generate a schema file from the internal representation, useful after
+// parsing a .proto schema.
+extern std::string GenerateFBS(const Parser &parser, const std::string &file_name);
+extern bool GenerateFBS(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+// Generate a make rule for the generated TypeScript code.
+// See idl_gen_ts.cpp.
+extern std::string TSMakeRule(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+// Generate a make rule for the generated C++ header.
+// See idl_gen_cpp.cpp.
+extern std::string CPPMakeRule(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+// Generate a make rule for the generated Dart code
+// see idl_gen_dart.cpp
+extern std::string DartMakeRule(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+// Generate a make rule for the generated Rust code.
+// See idl_gen_rust.cpp.
+extern std::string RustMakeRule(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+// Generate a make rule for generated Java or C# files.
+// See code_generators.cpp.
+extern std::string JavaCSharpMakeRule(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+// Generate a make rule for the generated text (JSON) files.
+// See idl_gen_text.cpp.
+extern std::string TextMakeRule(const Parser &parser, const std::string &path,
+ const std::string &file_names);
+
+// Generate a make rule for the generated binary files.
+// See code_generators.cpp.
+extern std::string BinaryMakeRule(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+// Generate GRPC Cpp interfaces.
+// See idl_gen_grpc.cpp.
+bool GenerateCppGRPC(const Parser &parser, const std::string &path, const std::string &file_name);
+
+// Generate GRPC Go interfaces.
+// See idl_gen_grpc.cpp.
+bool GenerateGoGRPC(const Parser &parser, const std::string &path, const std::string &file_name);
+
+// Generate GRPC Java classes.
+// See idl_gen_grpc.cpp
+bool GenerateJavaGRPC(const Parser &parser, const std::string &path, const std::string &file_name);
+
+// Generate GRPC Python interfaces.
+// See idl_gen_grpc.cpp.
+bool GeneratePythonGRPC(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+// Generate GRPC Swift interfaces.
+// See idl_gen_grpc.cpp.
+extern bool GenerateSwiftGRPC(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+
+extern bool GenerateTSGRPC(const Parser &parser, const std::string &path,
+ const std::string &file_name);
+} // namespace flatbuffers
+
+#endif // FLATBUFFERS_IDL_H_
diff --git a/onert-micro/externals/flatbuffers/minireflect.h b/onert-micro/externals/flatbuffers/minireflect.h
new file mode 100644
index 000000000..8b733a4a5
--- /dev/null
+++ b/onert-micro/externals/flatbuffers/minireflect.h
@@ -0,0 +1,507 @@
+/*
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLATBUFFERS_MINIREFLECT_H_
+#define FLATBUFFERS_MINIREFLECT_H_
+
+#include "flatbuffers/flatbuffers.h"
+#include "flatbuffers/util.h"
+
+namespace flatbuffers
+{
+
+// Utilities that can be used with the "mini reflection" tables present
+// in generated code with --reflect-types (only types) or --reflect-names
+// (also names).
+// This allows basic reflection functionality such as pretty-printing
+// that does not require the use of the schema parser or loading of binary
+// schema files at runtime (reflection.h).
+
+// For any of the functions below that take `const TypeTable *`, you pass
+// `FooTypeTable()` if the type of the root is `Foo`.
+
+// First, a generic iterator that can be used by multiple algorithms.
+
+struct IterationVisitor
+{
+ // These mark the scope of a table or struct.
+ virtual void StartSequence() {}
+ virtual void EndSequence() {}
+ // Called for each field regardless of whether it is present or not.
+ // If not present, val == nullptr. set_idx is the index of all set fields.
+ virtual void Field(size_t /*field_idx*/, size_t /*set_idx*/, ElementaryType /*type*/,
+ bool /*is_vector*/, const TypeTable * /*type_table*/, const char * /*name*/,
+ const uint8_t * /*val*/)
+ {
+ }
+ // Called for a value that is actually present, after a field, or as part
+ // of a vector.
+ virtual void UType(uint8_t, const char *) {}
+ virtual void Bool(bool) {}
+ virtual void Char(int8_t, const char *) {}
+ virtual void UChar(uint8_t, const char *) {}
+ virtual void Short(int16_t, const char *) {}
+ virtual void UShort(uint16_t, const char *) {}
+ virtual void Int(int32_t, const char *) {}
+ virtual void UInt(uint32_t, const char *) {}
+ virtual void Long(int64_t) {}
+ virtual void ULong(uint64_t) {}
+ virtual void Float(float) {}
+ virtual void Double(double) {}
+ virtual void String(const String *) {}
+ virtual void Unknown(const uint8_t *) {} // From a future version.
+ // These mark the scope of a vector.
+ virtual void StartVector() {}
+ virtual void EndVector() {}
+ virtual void Element(size_t /*i*/, ElementaryType /*type*/, const TypeTable * /*type_table*/,
+ const uint8_t * /*val*/)
+ {
+ }
+ virtual ~IterationVisitor() {}
+};
+
+inline size_t InlineSize(ElementaryType type, const TypeTable *type_table)
+{
+ switch (type)
+ {
+ case ET_UTYPE:
+ case ET_BOOL:
+ case ET_CHAR:
+ case ET_UCHAR:
+ return 1;
+ case ET_SHORT:
+ case ET_USHORT:
+ return 2;
+ case ET_INT:
+ case ET_UINT:
+ case ET_FLOAT:
+ case ET_STRING:
+ return 4;
+ case ET_LONG:
+ case ET_ULONG:
+ case ET_DOUBLE:
+ return 8;
+ case ET_SEQUENCE:
+ switch (type_table->st)
+ {
+ case ST_TABLE:
+ case ST_UNION:
+ return 4;
+ case ST_STRUCT:
+ return static_cast<size_t>(type_table->values[type_table->num_elems]);
+ default:
+ FLATBUFFERS_ASSERT(false);
+ return 1;
+ }
+ default:
+ FLATBUFFERS_ASSERT(false);
+ return 1;
+ }
+}
+
+inline int64_t LookupEnum(int64_t enum_val, const int64_t *values, size_t num_values)
+{
+ if (!values)
+ return enum_val;
+ for (size_t i = 0; i < num_values; i++)
+ {
+ if (enum_val == values[i])
+ return static_cast<int64_t>(i);
+ }
+ return -1; // Unknown enum value.
+}
+
+template <typename T> const char *EnumName(T tval, const TypeTable *type_table)
+{
+ if (!type_table || !type_table->names)
+ return nullptr;
+ auto i = LookupEnum(static_cast<int64_t>(tval), type_table->values, type_table->num_elems);
+ if (i >= 0 && i < static_cast<int64_t>(type_table->num_elems))
+ {
+ return type_table->names[i];
+ }
+ return nullptr;
+}
+
+void IterateObject(const uint8_t *obj, const TypeTable *type_table, IterationVisitor *visitor);
+
+inline void IterateValue(ElementaryType type, const uint8_t *val, const TypeTable *type_table,
+ const uint8_t *prev_val, soffset_t vector_index, IterationVisitor *visitor)
+{
+ switch (type)
+ {
+ case ET_UTYPE:
+ {
+ auto tval = ReadScalar<uint8_t>(val);
+ visitor->UType(tval, EnumName(tval, type_table));
+ break;
+ }
+ case ET_BOOL:
+ {
+ visitor->Bool(ReadScalar<uint8_t>(val) != 0);
+ break;
+ }
+ case ET_CHAR:
+ {
+ auto tval = ReadScalar<int8_t>(val);
+ visitor->Char(tval, EnumName(tval, type_table));
+ break;
+ }
+ case ET_UCHAR:
+ {
+ auto tval = ReadScalar<uint8_t>(val);
+ visitor->UChar(tval, EnumName(tval, type_table));
+ break;
+ }
+ case ET_SHORT:
+ {
+ auto tval = ReadScalar<int16_t>(val);
+ visitor->Short(tval, EnumName(tval, type_table));
+ break;
+ }
+ case ET_USHORT:
+ {
+ auto tval = ReadScalar<uint16_t>(val);
+ visitor->UShort(tval, EnumName(tval, type_table));
+ break;
+ }
+ case ET_INT:
+ {
+ auto tval = ReadScalar<int32_t>(val);
+ visitor->Int(tval, EnumName(tval, type_table));
+ break;
+ }
+ case ET_UINT:
+ {
+ auto tval = ReadScalar<uint32_t>(val);
+ visitor->UInt(tval, EnumName(tval, type_table));
+ break;
+ }
+ case ET_LONG:
+ {
+ visitor->Long(ReadScalar<int64_t>(val));
+ break;
+ }
+ case ET_ULONG:
+ {
+ visitor->ULong(ReadScalar<uint64_t>(val));
+ break;
+ }
+ case ET_FLOAT:
+ {
+ visitor->Float(ReadScalar<float>(val));
+ break;
+ }
+ case ET_DOUBLE:
+ {
+ visitor->Double(ReadScalar<double>(val));
+ break;
+ }
+ case ET_STRING:
+ {
+ val += ReadScalar<uoffset_t>(val);
+ visitor->String(reinterpret_cast<const String *>(val));
+ break;
+ }
+ case ET_SEQUENCE:
+ {
+ switch (type_table->st)
+ {
+ case ST_TABLE:
+ val += ReadScalar<uoffset_t>(val);
+ IterateObject(val, type_table, visitor);
+ break;
+ case ST_STRUCT:
+ IterateObject(val, type_table, visitor);
+ break;
+ case ST_UNION:
+ {
+ val += ReadScalar<uoffset_t>(val);
+ FLATBUFFERS_ASSERT(prev_val);
+ auto union_type = *prev_val; // Always a uint8_t.
+ if (vector_index >= 0)
+ {
+ auto type_vec = reinterpret_cast<const Vector<uint8_t> *>(prev_val);
+ union_type = type_vec->Get(static_cast<uoffset_t>(vector_index));
+ }
+ auto type_code_idx = LookupEnum(union_type, type_table->values, type_table->num_elems);
+ if (type_code_idx >= 0 && type_code_idx < static_cast<int32_t>(type_table->num_elems))
+ {
+ auto type_code = type_table->type_codes[type_code_idx];
+ switch (type_code.base_type)
+ {
+ case ET_SEQUENCE:
+ {
+ auto ref = type_table->type_refs[type_code.sequence_ref]();
+ IterateObject(val, ref, visitor);
+ break;
+ }
+ case ET_STRING:
+ visitor->String(reinterpret_cast<const String *>(val));
+ break;
+ default:
+ visitor->Unknown(val);
+ }
+ }
+ else
+ {
+ visitor->Unknown(val);
+ }
+ break;
+ }
+ case ST_ENUM:
+ FLATBUFFERS_ASSERT(false);
+ break;
+ }
+ break;
+ }
+ default:
+ {
+ visitor->Unknown(val);
+ break;
+ }
+ }
+}
+
+inline void IterateObject(const uint8_t *obj, const TypeTable *type_table,
+ IterationVisitor *visitor)
+{
+ visitor->StartSequence();
+ const uint8_t *prev_val = nullptr;
+ size_t set_idx = 0;
+ size_t array_idx = 0;
+ for (size_t i = 0; i < type_table->num_elems; i++)
+ {
+ auto type_code = type_table->type_codes[i];
+ auto type = static_cast<ElementaryType>(type_code.base_type);
+ auto is_repeating = type_code.is_repeating != 0;
+ auto ref_idx = type_code.sequence_ref;
+ const TypeTable *ref = nullptr;
+ if (ref_idx >= 0)
+ {
+ ref = type_table->type_refs[ref_idx]();
+ }
+ auto name = type_table->names ? type_table->names[i] : nullptr;
+ const uint8_t *val = nullptr;
+ if (type_table->st == ST_TABLE)
+ {
+ val = reinterpret_cast<const Table *>(obj)->GetAddressOf(
+ FieldIndexToOffset(static_cast<voffset_t>(i)));
+ }
+ else
+ {
+ val = obj + type_table->values[i];
+ }
+ visitor->Field(i, set_idx, type, is_repeating, ref, name, val);
+ if (val)
+ {
+ set_idx++;
+ if (is_repeating)
+ {
+ auto elem_ptr = val;
+ size_t size = 0;
+ if (type_table->st == ST_TABLE)
+ {
+ // variable length vector
+ val += ReadScalar<uoffset_t>(val);
+ auto vec = reinterpret_cast<const Vector<uint8_t> *>(val);
+ elem_ptr = vec->Data();
+ size = vec->size();
+ }
+ else
+ {
+ // otherwise fixed size array
+ size = type_table->array_sizes[array_idx];
+ ++array_idx;
+ }
+ visitor->StartVector();
+ for (size_t j = 0; j < size; j++)
+ {
+ visitor->Element(j, type, ref, elem_ptr);
+ IterateValue(type, elem_ptr, ref, prev_val, static_cast<soffset_t>(j), visitor);
+ elem_ptr += InlineSize(type, ref);
+ }
+ visitor->EndVector();
+ }
+ else
+ {
+ IterateValue(type, val, ref, prev_val, -1, visitor);
+ }
+ }
+ prev_val = val;
+ }
+ visitor->EndSequence();
+}
+
+inline void IterateFlatBuffer(const uint8_t *buffer, const TypeTable *type_table,
+ IterationVisitor *callback)
+{
+ IterateObject(GetRoot<uint8_t>(buffer), type_table, callback);
+}
+
+// Outputting a Flatbuffer to a string. Tries to conform as close to JSON /
+// the output generated by idl_gen_text.cpp.
+
+struct ToStringVisitor : public IterationVisitor
+{
+ std::string s;
+ std::string d;
+ bool q;
+ std::string in;
+ size_t indent_level;
+ bool vector_delimited;
+ ToStringVisitor(std::string delimiter, bool quotes, std::string indent, bool vdelimited = true)
+ : d(delimiter), q(quotes), in(indent), indent_level(0), vector_delimited(vdelimited)
+ {
+ }
+ ToStringVisitor(std::string delimiter)
+ : d(delimiter), q(false), in(""), indent_level(0), vector_delimited(true)
+ {
+ }
+
+ void append_indent()
+ {
+ for (size_t i = 0; i < indent_level; i++)
+ {
+ s += in;
+ }
+ }
+
+ void StartSequence()
+ {
+ s += "{";
+ s += d;
+ indent_level++;
+ }
+ void EndSequence()
+ {
+ s += d;
+ indent_level--;
+ append_indent();
+ s += "}";
+ }
+ void Field(size_t /*field_idx*/, size_t set_idx, ElementaryType /*type*/, bool /*is_vector*/,
+ const TypeTable * /*type_table*/, const char *name, const uint8_t *val)
+ {
+ if (!val)
+ return;
+ if (set_idx)
+ {
+ s += ",";
+ s += d;
+ }
+ append_indent();
+ if (name)
+ {
+ if (q)
+ s += "\"";
+ s += name;
+ if (q)
+ s += "\"";
+ s += ": ";
+ }
+ }
+ template <typename T> void Named(T x, const char *name)
+ {
+ if (name)
+ {
+ if (q)
+ s += "\"";
+ s += name;
+ if (q)
+ s += "\"";
+ }
+ else
+ {
+ s += NumToString(x);
+ }
+ }
+ void UType(uint8_t x, const char *name) { Named(x, name); }
+ void Bool(bool x) { s += x ? "true" : "false"; }
+ void Char(int8_t x, const char *name) { Named(x, name); }
+ void UChar(uint8_t x, const char *name) { Named(x, name); }
+ void Short(int16_t x, const char *name) { Named(x, name); }
+ void UShort(uint16_t x, const char *name) { Named(x, name); }
+ void Int(int32_t x, const char *name) { Named(x, name); }
+ void UInt(uint32_t x, const char *name) { Named(x, name); }
+ void Long(int64_t x) { s += NumToString(x); }
+ void ULong(uint64_t x) { s += NumToString(x); }
+ void Float(float x) { s += NumToString(x); }
+ void Double(double x) { s += NumToString(x); }
+ void String(const struct String *str)
+ {
+ EscapeString(str->c_str(), str->size(), &s, true, false);
+ }
+ void Unknown(const uint8_t *) { s += "(?)"; }
+ void StartVector()
+ {
+ s += "[";
+ if (vector_delimited)
+ {
+ s += d;
+ indent_level++;
+ append_indent();
+ }
+ else
+ {
+ s += " ";
+ }
+ }
+ void EndVector()
+ {
+ if (vector_delimited)
+ {
+ s += d;
+ indent_level--;
+ append_indent();
+ }
+ else
+ {
+ s += " ";
+ }
+ s += "]";
+ }
+ void Element(size_t i, ElementaryType /*type*/, const TypeTable * /*type_table*/,
+ const uint8_t * /*val*/)
+ {
+ if (i)
+ {
+ s += ",";
+ if (vector_delimited)
+ {
+ s += d;
+ append_indent();
+ }
+ else
+ {
+ s += " ";
+ }
+ }
+ }
+};
+
+inline std::string FlatBufferToString(const uint8_t *buffer, const TypeTable *type_table,
+ bool multi_line = false, bool vector_delimited = true)
+{
+ ToStringVisitor tostring_visitor(multi_line ? "\n" : " ", false, "", vector_delimited);
+ IterateFlatBuffer(buffer, type_table, &tostring_visitor);
+ return tostring_visitor.s;
+}
+
+} // namespace flatbuffers
+
+#endif // FLATBUFFERS_MINIREFLECT_H_
diff --git a/onert-micro/externals/flatbuffers/pch/flatc_pch.h b/onert-micro/externals/flatbuffers/pch/flatc_pch.h
new file mode 100644
index 000000000..988fcf371
--- /dev/null
+++ b/onert-micro/externals/flatbuffers/pch/flatc_pch.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLATBUFFERS_FLATC_PCH_H_
+#define FLATBUFFERS_FLATC_PCH_H_
+
+// stl
+#include <cmath>
+#include <sstream>
+#include <cassert>
+#include <unordered_set>
+#include <unordered_map>
+#include <iostream>
+#include <functional>
+#include <set>
+#include <iterator>
+#include <tuple>
+
+// flatbuffers
+#include "flatbuffers/pch/pch.h"
+#include "flatbuffers/code_generators.h"
+#include "flatbuffers/flatbuffers.h"
+#include "flatbuffers/flexbuffers.h"
+#include "flatbuffers/idl.h"
+
+#endif // FLATBUFFERS_FLATC_PCH_H_
diff --git a/onert-micro/externals/flatbuffers/pch/pch.h b/onert-micro/externals/flatbuffers/pch/pch.h
new file mode 100644
index 000000000..0e7886fb4
--- /dev/null
+++ b/onert-micro/externals/flatbuffers/pch/pch.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLATBUFFERS_PCH_H_
+#define FLATBUFFERS_PCH_H_
+
+// stl
+#include <cstdint>
+#include <cstring>
+#include <algorithm>
+#include <list>
+#include <string>
+#include <utility>
+#include <iomanip>
+#include <map>
+#include <memory>
+#include <limits>
+#include <stack>
+#include <vector>
+#include <type_traits>
+
+// flatbuffers
+#include "flatbuffers/util.h"
+
+#endif // FLATBUFFERS_PCH_H_
diff --git a/onert-micro/externals/flatbuffers/reflection.h b/onert-micro/externals/flatbuffers/reflection.h
new file mode 100644
index 000000000..8e2b155f7
--- /dev/null
+++ b/onert-micro/externals/flatbuffers/reflection.h
@@ -0,0 +1,520 @@
+/*
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2015 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLATBUFFERS_REFLECTION_H_
+#define FLATBUFFERS_REFLECTION_H_
+
+// This is somewhat of a circular dependency because flatc (and thus this
+// file) is needed to generate this header in the first place.
+// Should normally not be a problem since it can be generated by the
+// previous version of flatc whenever this code needs to change.
+// See reflection/generate_code.sh
+#include "flatbuffers/reflection_generated.h"
+
+// Helper functionality for reflection.
+
+namespace flatbuffers
+{
+
+// ------------------------- GETTERS -------------------------
+
+inline bool IsScalar(reflection::BaseType t)
+{
+ return t >= reflection::UType && t <= reflection::Double;
+}
+inline bool IsInteger(reflection::BaseType t)
+{
+ return t >= reflection::UType && t <= reflection::ULong;
+}
+inline bool IsFloat(reflection::BaseType t)
+{
+ return t == reflection::Float || t == reflection::Double;
+}
+inline bool IsLong(reflection::BaseType t)
+{
+ return t == reflection::Long || t == reflection::ULong;
+}
+
+// Size of a basic type, don't use with structs.
+inline size_t GetTypeSize(reflection::BaseType base_type)
+{
+ // This needs to correspond to the BaseType enum.
+ static size_t sizes[] = {
+ 0, // None
+ 1, // UType
+ 1, // Bool
+ 1, // Byte
+ 1, // UByte
+ 2, // Short
+ 2, // UShort
+ 4, // Int
+ 4, // UInt
+ 8, // Long
+ 8, // ULong
+ 4, // Float
+ 8, // Double
+ 4, // String
+ 4, // Vector
+ 4, // Obj
+ 4, // Union
+ 0, // Array. Only used in structs. 0 was chosen to prevent out-of-bounds
+ // errors.
+
+ 0 // MaxBaseType. This must be kept the last entry in this array.
+ };
+ static_assert(sizeof(sizes) / sizeof(size_t) == reflection::MaxBaseType + 1,
+ "Size of sizes[] array does not match the count of BaseType "
+ "enum values.");
+ return sizes[base_type];
+}
+
+// Same as above, but now correctly returns the size of a struct if
+// the field (or vector element) is a struct.
+inline size_t GetTypeSizeInline(reflection::BaseType base_type, int type_index,
+ const reflection::Schema &schema)
+{
+ if (base_type == reflection::Obj && schema.objects()->Get(type_index)->is_struct())
+ {
+ return schema.objects()->Get(type_index)->bytesize();
+ }
+ else
+ {
+ return GetTypeSize(base_type);
+ }
+}
+
+// Get the root, regardless of what type it is.
+inline Table *GetAnyRoot(uint8_t *flatbuf) { return GetMutableRoot<Table>(flatbuf); }
+inline const Table *GetAnyRoot(const uint8_t *flatbuf) { return GetRoot<Table>(flatbuf); }
+
+// Get a field's default, if you know it's an integer, and its exact type.
+template <typename T> T GetFieldDefaultI(const reflection::Field &field)
+{
+ FLATBUFFERS_ASSERT(sizeof(T) == GetTypeSize(field.type()->base_type()));
+ return static_cast<T>(field.default_integer());
+}
+
+// Get a field's default, if you know it's floating point and its exact type.
+template <typename T> T GetFieldDefaultF(const reflection::Field &field)
+{
+ FLATBUFFERS_ASSERT(sizeof(T) == GetTypeSize(field.type()->base_type()));
+ return static_cast<T>(field.default_real());
+}
+
+// Get a field, if you know it's an integer, and its exact type.
+template <typename T> T GetFieldI(const Table &table, const reflection::Field &field)
+{
+ FLATBUFFERS_ASSERT(sizeof(T) == GetTypeSize(field.type()->base_type()));
+ return table.GetField<T>(field.offset(), static_cast<T>(field.default_integer()));
+}
+
+// Get a field, if you know it's floating point and its exact type.
+template <typename T> T GetFieldF(const Table &table, const reflection::Field &field)
+{
+ FLATBUFFERS_ASSERT(sizeof(T) == GetTypeSize(field.type()->base_type()));
+ return table.GetField<T>(field.offset(), static_cast<T>(field.default_real()));
+}
+
+// Get a field, if you know it's a string.
+inline const String *GetFieldS(const Table &table, const reflection::Field &field)
+{
+ FLATBUFFERS_ASSERT(field.type()->base_type() == reflection::String);
+ return table.GetPointer<const String *>(field.offset());
+}
+
+// Get a field, if you know it's a vector.
+template <typename T> Vector<T> *GetFieldV(const Table &table, const reflection::Field &field)
+{
+ FLATBUFFERS_ASSERT(field.type()->base_type() == reflection::Vector &&
+ sizeof(T) == GetTypeSize(field.type()->element()));
+ return table.GetPointer<Vector<T> *>(field.offset());
+}
+
+// Get a field, if you know it's a vector, generically.
+// To actually access elements, use the return value together with
+// field.type()->element() in any of GetAnyVectorElemI below etc.
+inline VectorOfAny *GetFieldAnyV(const Table &table, const reflection::Field &field)
+{
+ return table.GetPointer<VectorOfAny *>(field.offset());
+}
+
+// Get a field, if you know it's a table.
+inline Table *GetFieldT(const Table &table, const reflection::Field &field)
+{
+ FLATBUFFERS_ASSERT(field.type()->base_type() == reflection::Obj ||
+ field.type()->base_type() == reflection::Union);
+ return table.GetPointer<Table *>(field.offset());
+}
+
+// Get a field, if you know it's a struct.
+inline const Struct *GetFieldStruct(const Table &table, const reflection::Field &field)
+{
+ // TODO: This does NOT check if the field is a table or struct, but we'd need
+ // access to the schema to check the is_struct flag.
+ FLATBUFFERS_ASSERT(field.type()->base_type() == reflection::Obj);
+ return table.GetStruct<const Struct *>(field.offset());
+}
+
+// Get a structure's field, if you know it's a struct.
+inline const Struct *GetFieldStruct(const Struct &structure, const reflection::Field &field)
+{
+ FLATBUFFERS_ASSERT(field.type()->base_type() == reflection::Obj);
+ return structure.GetStruct<const Struct *>(field.offset());
+}
+
+// Raw helper functions used below: get any value in memory as a 64bit int, a
+// double or a string.
+// All scalars get static_cast to an int64_t, strings use strtoull, every other
+// data type returns 0.
+int64_t GetAnyValueI(reflection::BaseType type, const uint8_t *data);
+// All scalars static cast to double, strings use strtod, every other data
+// type is 0.0.
+double GetAnyValueF(reflection::BaseType type, const uint8_t *data);
+// All scalars converted using stringstream, strings as-is, and all other
+// data types provide some level of debug-pretty-printing.
+std::string GetAnyValueS(reflection::BaseType type, const uint8_t *data,
+ const reflection::Schema *schema, int type_index);
+
+// Get any table field as a 64bit int, regardless of what type it is.
+inline int64_t GetAnyFieldI(const Table &table, const reflection::Field &field)
+{
+ auto field_ptr = table.GetAddressOf(field.offset());
+ return field_ptr ? GetAnyValueI(field.type()->base_type(), field_ptr) : field.default_integer();
+}
+
+// Get any table field as a double, regardless of what type it is.
+inline double GetAnyFieldF(const Table &table, const reflection::Field &field)
+{
+ auto field_ptr = table.GetAddressOf(field.offset());
+ return field_ptr ? GetAnyValueF(field.type()->base_type(), field_ptr) : field.default_real();
+}
+
+// Get any table field as a string, regardless of what type it is.
+// You may pass nullptr for the schema if you don't care to have fields that
+// are of table type pretty-printed.
+inline std::string GetAnyFieldS(const Table &table, const reflection::Field &field,
+ const reflection::Schema *schema)
+{
+ auto field_ptr = table.GetAddressOf(field.offset());
+ return field_ptr
+ ? GetAnyValueS(field.type()->base_type(), field_ptr, schema, field.type()->index())
+ : "";
+}
+
+// Get any struct field as a 64bit int, regardless of what type it is.
+inline int64_t GetAnyFieldI(const Struct &st, const reflection::Field &field)
+{
+ return GetAnyValueI(field.type()->base_type(), st.GetAddressOf(field.offset()));
+}
+
+// Get any struct field as a double, regardless of what type it is.
+inline double GetAnyFieldF(const Struct &st, const reflection::Field &field)
+{
+ return GetAnyValueF(field.type()->base_type(), st.GetAddressOf(field.offset()));
+}
+
+// Get any struct field as a string, regardless of what type it is.
+inline std::string GetAnyFieldS(const Struct &st, const reflection::Field &field)
+{
+ return GetAnyValueS(field.type()->base_type(), st.GetAddressOf(field.offset()), nullptr, -1);
+}
+
+// Get any vector element as a 64bit int, regardless of what type it is.
+inline int64_t GetAnyVectorElemI(const VectorOfAny *vec, reflection::BaseType elem_type, size_t i)
+{
+ return GetAnyValueI(elem_type, vec->Data() + GetTypeSize(elem_type) * i);
+}
+
+// Get any vector element as a double, regardless of what type it is.
+inline double GetAnyVectorElemF(const VectorOfAny *vec, reflection::BaseType elem_type, size_t i)
+{
+ return GetAnyValueF(elem_type, vec->Data() + GetTypeSize(elem_type) * i);
+}
+
+// Get any vector element as a string, regardless of what type it is.
+inline std::string GetAnyVectorElemS(const VectorOfAny *vec, reflection::BaseType elem_type,
+ size_t i)
+{
+ return GetAnyValueS(elem_type, vec->Data() + GetTypeSize(elem_type) * i, nullptr, -1);
+}
+
+// Get a vector element that's a table/string/vector from a generic vector.
+// Pass Table/String/VectorOfAny as template parameter.
+// Warning: does no typechecking.
+template <typename T> T *GetAnyVectorElemPointer(const VectorOfAny *vec, size_t i)
+{
+ auto elem_ptr = vec->Data() + sizeof(uoffset_t) * i;
+ return reinterpret_cast<T *>(elem_ptr + ReadScalar<uoffset_t>(elem_ptr));
+}
+
+// Get the inline-address of a vector element. Useful for Structs (pass Struct
+// as template arg), or being able to address a range of scalars in-line.
+// Get elem_size from GetTypeSizeInline().
+// Note: little-endian data on all platforms, use EndianScalar() instead of
+// raw pointer access with scalars).
+template <typename T>
+T *GetAnyVectorElemAddressOf(const VectorOfAny *vec, size_t i, size_t elem_size)
+{
+ return reinterpret_cast<T *>(vec->Data() + elem_size * i);
+}
+
+// Similarly, for elements of tables.
+template <typename T> T *GetAnyFieldAddressOf(const Table &table, const reflection::Field &field)
+{
+ return reinterpret_cast<T *>(table.GetAddressOf(field.offset()));
+}
+
+// Similarly, for elements of structs.
+template <typename T> T *GetAnyFieldAddressOf(const Struct &st, const reflection::Field &field)
+{
+ return reinterpret_cast<T *>(st.GetAddressOf(field.offset()));
+}
+
+// ------------------------- SETTERS -------------------------
+
+// Set any scalar field, if you know its exact type.
+template <typename T> bool SetField(Table *table, const reflection::Field &field, T val)
+{
+ reflection::BaseType type = field.type()->base_type();
+ if (!IsScalar(type))
+ {
+ return false;
+ }
+ FLATBUFFERS_ASSERT(sizeof(T) == GetTypeSize(type));
+ T def;
+ if (IsInteger(type))
+ {
+ def = GetFieldDefaultI<T>(field);
+ }
+ else
+ {
+ FLATBUFFERS_ASSERT(IsFloat(type));
+ def = GetFieldDefaultF<T>(field);
+ }
+ return table->SetField(field.offset(), val, def);
+}
+
+// Raw helper functions used below: set any value in memory as a 64bit int, a
+// double or a string.
+// These work for all scalar values, but do nothing for other data types.
+// To set a string, see SetString below.
+void SetAnyValueI(reflection::BaseType type, uint8_t *data, int64_t val);
+void SetAnyValueF(reflection::BaseType type, uint8_t *data, double val);
+void SetAnyValueS(reflection::BaseType type, uint8_t *data, const char *val);
+
+// Set any table field as a 64bit int, regardless of type what it is.
+inline bool SetAnyFieldI(Table *table, const reflection::Field &field, int64_t val)
+{
+ auto field_ptr = table->GetAddressOf(field.offset());
+ if (!field_ptr)
+ return val == GetFieldDefaultI<int64_t>(field);
+ SetAnyValueI(field.type()->base_type(), field_ptr, val);
+ return true;
+}
+
+// Set any table field as a double, regardless of what type it is.
+inline bool SetAnyFieldF(Table *table, const reflection::Field &field, double val)
+{
+ auto field_ptr = table->GetAddressOf(field.offset());
+ if (!field_ptr)
+ return val == GetFieldDefaultF<double>(field);
+ SetAnyValueF(field.type()->base_type(), field_ptr, val);
+ return true;
+}
+
+// Set any table field as a string, regardless of what type it is.
+inline bool SetAnyFieldS(Table *table, const reflection::Field &field, const char *val)
+{
+ auto field_ptr = table->GetAddressOf(field.offset());
+ if (!field_ptr)
+ return false;
+ SetAnyValueS(field.type()->base_type(), field_ptr, val);
+ return true;
+}
+
+// Set any struct field as a 64bit int, regardless of type what it is.
+inline void SetAnyFieldI(Struct *st, const reflection::Field &field, int64_t val)
+{
+ SetAnyValueI(field.type()->base_type(), st->GetAddressOf(field.offset()), val);
+}
+
+// Set any struct field as a double, regardless of type what it is.
+inline void SetAnyFieldF(Struct *st, const reflection::Field &field, double val)
+{
+ SetAnyValueF(field.type()->base_type(), st->GetAddressOf(field.offset()), val);
+}
+
+// Set any struct field as a string, regardless of type what it is.
+inline void SetAnyFieldS(Struct *st, const reflection::Field &field, const char *val)
+{
+ SetAnyValueS(field.type()->base_type(), st->GetAddressOf(field.offset()), val);
+}
+
+// Set any vector element as a 64bit int, regardless of type what it is.
+inline void SetAnyVectorElemI(VectorOfAny *vec, reflection::BaseType elem_type, size_t i,
+ int64_t val)
+{
+ SetAnyValueI(elem_type, vec->Data() + GetTypeSize(elem_type) * i, val);
+}
+
+// Set any vector element as a double, regardless of type what it is.
+inline void SetAnyVectorElemF(VectorOfAny *vec, reflection::BaseType elem_type, size_t i,
+ double val)
+{
+ SetAnyValueF(elem_type, vec->Data() + GetTypeSize(elem_type) * i, val);
+}
+
+// Set any vector element as a string, regardless of type what it is.
+inline void SetAnyVectorElemS(VectorOfAny *vec, reflection::BaseType elem_type, size_t i,
+ const char *val)
+{
+ SetAnyValueS(elem_type, vec->Data() + GetTypeSize(elem_type) * i, val);
+}
+
+// ------------------------- RESIZING SETTERS -------------------------
+
+// "smart" pointer for use with resizing vectors: turns a pointer inside
+// a vector into a relative offset, such that it is not affected by resizes.
+template <typename T, typename U> class pointer_inside_vector
+{
+public:
+ pointer_inside_vector(T *ptr, std::vector<U> &vec)
+ : offset_(reinterpret_cast<uint8_t *>(ptr) -
+ reinterpret_cast<uint8_t *>(flatbuffers::vector_data(vec))),
+ vec_(vec)
+ {
+ }
+
+ T *operator*() const
+ {
+ return reinterpret_cast<T *>(reinterpret_cast<uint8_t *>(flatbuffers::vector_data(vec_)) +
+ offset_);
+ }
+ T *operator->() const { return operator*(); }
+
+private:
+ size_t offset_;
+ std::vector<U> &vec_;
+};
+
+// Helper to create the above easily without specifying template args.
+template <typename T, typename U> pointer_inside_vector<T, U> piv(T *ptr, std::vector<U> &vec)
+{
+ return pointer_inside_vector<T, U>(ptr, vec);
+}
+
+inline const char *UnionTypeFieldSuffix() { return "_type"; }
+
+// Helper to figure out the actual table type a union refers to.
+inline const reflection::Object &GetUnionType(const reflection::Schema &schema,
+ const reflection::Object &parent,
+ const reflection::Field &unionfield,
+ const Table &table)
+{
+ auto enumdef = schema.enums()->Get(unionfield.type()->index());
+ // TODO: this is clumsy and slow, but no other way to find it?
+ auto type_field =
+ parent.fields()->LookupByKey((unionfield.name()->str() + UnionTypeFieldSuffix()).c_str());
+ FLATBUFFERS_ASSERT(type_field);
+ auto union_type = GetFieldI<uint8_t>(table, *type_field);
+ auto enumval = enumdef->values()->LookupByKey(union_type);
+ return *enumval->object();
+}
+
+// Changes the contents of a string inside a FlatBuffer. FlatBuffer must
+// live inside a std::vector so we can resize the buffer if needed.
+// "str" must live inside "flatbuf" and may be invalidated after this call.
+// If your FlatBuffer's root table is not the schema's root table, you should
+// pass in your root_table type as well.
+void SetString(const reflection::Schema &schema, const std::string &val, const String *str,
+ std::vector<uint8_t> *flatbuf, const reflection::Object *root_table = nullptr);
+
+// Resizes a flatbuffers::Vector inside a FlatBuffer. FlatBuffer must
+// live inside a std::vector so we can resize the buffer if needed.
+// "vec" must live inside "flatbuf" and may be invalidated after this call.
+// If your FlatBuffer's root table is not the schema's root table, you should
+// pass in your root_table type as well.
+uint8_t *ResizeAnyVector(const reflection::Schema &schema, uoffset_t newsize,
+ const VectorOfAny *vec, uoffset_t num_elems, uoffset_t elem_size,
+ std::vector<uint8_t> *flatbuf,
+ const reflection::Object *root_table = nullptr);
+
+template <typename T>
+void ResizeVector(const reflection::Schema &schema, uoffset_t newsize, T val, const Vector<T> *vec,
+ std::vector<uint8_t> *flatbuf, const reflection::Object *root_table = nullptr)
+{
+ auto delta_elem = static_cast<int>(newsize) - static_cast<int>(vec->size());
+ auto newelems =
+ ResizeAnyVector(schema, newsize, reinterpret_cast<const VectorOfAny *>(vec), vec->size(),
+ static_cast<uoffset_t>(sizeof(T)), flatbuf, root_table);
+ // Set new elements to "val".
+ for (int i = 0; i < delta_elem; i++)
+ {
+ auto loc = newelems + i * sizeof(T);
+ auto is_scalar = flatbuffers::is_scalar<T>::value;
+ if (is_scalar)
+ {
+ WriteScalar(loc, val);
+ }
+ else
+ { // struct
+ *reinterpret_cast<T *>(loc) = val;
+ }
+ }
+}
+
+// Adds any new data (in the form of a new FlatBuffer) to an existing
+// FlatBuffer. This can be used when any of the above methods are not
+// sufficient, in particular for adding new tables and new fields.
+// This is potentially slightly less efficient than a FlatBuffer constructed
+// in one piece, since the new FlatBuffer doesn't share any vtables with the
+// existing one.
+// The return value can now be set using Vector::MutateOffset or SetFieldT
+// below.
+const uint8_t *AddFlatBuffer(std::vector<uint8_t> &flatbuf, const uint8_t *newbuf, size_t newlen);
+
+inline bool SetFieldT(Table *table, const reflection::Field &field, const uint8_t *val)
+{
+ FLATBUFFERS_ASSERT(sizeof(uoffset_t) == GetTypeSize(field.type()->base_type()));
+ return table->SetPointer(field.offset(), val);
+}
+
+// ------------------------- COPYING -------------------------
+
+// Generic copying of tables from a FlatBuffer into a FlatBuffer builder.
+// Can be used to do any kind of merging/selecting you may want to do out
+// of existing buffers. Also useful to reconstruct a whole buffer if the
+// above resizing functionality has introduced garbage in a buffer you want
+// to remove.
+// Note: this does not deal with DAGs correctly. If the table passed forms a
+// DAG, the copy will be a tree instead (with duplicates). Strings can be
+// shared however, by passing true for use_string_pooling.
+
+Offset<const Table *> CopyTable(FlatBufferBuilder &fbb, const reflection::Schema &schema,
+ const reflection::Object &objectdef, const Table &table,
+ bool use_string_pooling = false);
+
+// Verifies the provided flatbuffer using reflection.
+// root should point to the root type for this flatbuffer.
+// buf should point to the start of flatbuffer data.
+// length specifies the size of the flatbuffer data.
+bool Verify(const reflection::Schema &schema, const reflection::Object &root, const uint8_t *buf,
+ size_t length, uoffset_t max_depth = 64, uoffset_t max_tables = 1000000);
+
+} // namespace flatbuffers
+
+#endif // FLATBUFFERS_REFLECTION_H_
diff --git a/onert-micro/externals/flatbuffers/reflection_generated.h b/onert-micro/externals/flatbuffers/reflection_generated.h
new file mode 100644
index 000000000..9c57dd1f3
--- /dev/null
+++ b/onert-micro/externals/flatbuffers/reflection_generated.h
@@ -0,0 +1,1257 @@
+/*
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// automatically generated by the FlatBuffers compiler, do not modify
+
+#ifndef FLATBUFFERS_GENERATED_REFLECTION_REFLECTION_H_
+#define FLATBUFFERS_GENERATED_REFLECTION_REFLECTION_H_
+
+#include "flatbuffers/flatbuffers.h"
+
+namespace reflection
+{
+
+struct Type;
+struct TypeBuilder;
+
+struct KeyValue;
+struct KeyValueBuilder;
+
+struct EnumVal;
+struct EnumValBuilder;
+
+struct Enum;
+struct EnumBuilder;
+
+struct Field;
+struct FieldBuilder;
+
+struct Object;
+struct ObjectBuilder;
+
+struct RPCCall;
+struct RPCCallBuilder;
+
+struct Service;
+struct ServiceBuilder;
+
+struct Schema;
+struct SchemaBuilder;
+
+enum BaseType
+{
+ None = 0,
+ UType = 1,
+ Bool = 2,
+ Byte = 3,
+ UByte = 4,
+ Short = 5,
+ UShort = 6,
+ Int = 7,
+ UInt = 8,
+ Long = 9,
+ ULong = 10,
+ Float = 11,
+ Double = 12,
+ String = 13,
+ Vector = 14,
+ Obj = 15,
+ Union = 16,
+ Array = 17,
+ MaxBaseType = 18
+};
+
+inline const BaseType (&EnumValuesBaseType())[19]
+{
+ static const BaseType values[] = {None, UType, Bool, Byte, UByte, Short, UShort,
+ Int, UInt, Long, ULong, Float, Double, String,
+ Vector, Obj, Union, Array, MaxBaseType};
+ return values;
+}
+
+inline const char *const *EnumNamesBaseType()
+{
+ static const char *const names[20] = {"None", "UType", "Bool", "Byte", "UByte",
+ "Short", "UShort", "Int", "UInt", "Long",
+ "ULong", "Float", "Double", "String", "Vector",
+ "Obj", "Union", "Array", "MaxBaseType", nullptr};
+ return names;
+}
+
+inline const char *EnumNameBaseType(BaseType e)
+{
+ if (flatbuffers::IsOutRange(e, None, MaxBaseType))
+ return "";
+ const size_t index = static_cast<size_t>(e);
+ return EnumNamesBaseType()[index];
+}
+
+enum AdvancedFeatures
+{
+ AdvancedArrayFeatures = 1ULL,
+ AdvancedUnionFeatures = 2ULL,
+ OptionalScalars = 4ULL,
+ DefaultVectorsAndStrings = 8ULL
+};
+
+inline const AdvancedFeatures (&EnumValuesAdvancedFeatures())[4]
+{
+ static const AdvancedFeatures values[] = {AdvancedArrayFeatures, AdvancedUnionFeatures,
+ OptionalScalars, DefaultVectorsAndStrings};
+ return values;
+}
+
+inline const char *const *EnumNamesAdvancedFeatures()
+{
+ static const char *const names[9] = {"AdvancedArrayFeatures",
+ "AdvancedUnionFeatures",
+ "",
+ "OptionalScalars",
+ "",
+ "",
+ "",
+ "DefaultVectorsAndStrings",
+ nullptr};
+ return names;
+}
+
+inline const char *EnumNameAdvancedFeatures(AdvancedFeatures e)
+{
+ if (flatbuffers::IsOutRange(e, AdvancedArrayFeatures, DefaultVectorsAndStrings))
+ return "";
+ const size_t index = static_cast<size_t>(e) - static_cast<size_t>(AdvancedArrayFeatures);
+ return EnumNamesAdvancedFeatures()[index];
+}
+
+struct Type FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef TypeBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_BASE_TYPE = 4,
+ VT_ELEMENT = 6,
+ VT_INDEX = 8,
+ VT_FIXED_LENGTH = 10
+ };
+ reflection::BaseType base_type() const
+ {
+ return static_cast<reflection::BaseType>(GetField<int8_t>(VT_BASE_TYPE, 0));
+ }
+ reflection::BaseType element() const
+ {
+ return static_cast<reflection::BaseType>(GetField<int8_t>(VT_ELEMENT, 0));
+ }
+ int32_t index() const { return GetField<int32_t>(VT_INDEX, -1); }
+ uint16_t fixed_length() const { return GetField<uint16_t>(VT_FIXED_LENGTH, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_BASE_TYPE) &&
+ VerifyField<int8_t>(verifier, VT_ELEMENT) && VerifyField<int32_t>(verifier, VT_INDEX) &&
+ VerifyField<uint16_t>(verifier, VT_FIXED_LENGTH) && verifier.EndTable();
+ }
+};
+
+struct TypeBuilder
+{
+ typedef Type Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_base_type(reflection::BaseType base_type)
+ {
+ fbb_.AddElement<int8_t>(Type::VT_BASE_TYPE, static_cast<int8_t>(base_type), 0);
+ }
+ void add_element(reflection::BaseType element)
+ {
+ fbb_.AddElement<int8_t>(Type::VT_ELEMENT, static_cast<int8_t>(element), 0);
+ }
+ void add_index(int32_t index) { fbb_.AddElement<int32_t>(Type::VT_INDEX, index, -1); }
+ void add_fixed_length(uint16_t fixed_length)
+ {
+ fbb_.AddElement<uint16_t>(Type::VT_FIXED_LENGTH, fixed_length, 0);
+ }
+ explicit TypeBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<Type> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Type>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Type> CreateType(flatbuffers::FlatBufferBuilder &_fbb,
+ reflection::BaseType base_type = reflection::None,
+ reflection::BaseType element = reflection::None,
+ int32_t index = -1, uint16_t fixed_length = 0)
+{
+ TypeBuilder builder_(_fbb);
+ builder_.add_index(index);
+ builder_.add_fixed_length(fixed_length);
+ builder_.add_element(element);
+ builder_.add_base_type(base_type);
+ return builder_.Finish();
+}
+
+struct KeyValue FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef KeyValueBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_KEY = 4,
+ VT_VALUE = 6
+ };
+ const flatbuffers::String *key() const { return GetPointer<const flatbuffers::String *>(VT_KEY); }
+ bool KeyCompareLessThan(const KeyValue *o) const { return *key() < *o->key(); }
+ int KeyCompareWithValue(const char *val) const { return strcmp(key()->c_str(), val); }
+ const flatbuffers::String *value() const
+ {
+ return GetPointer<const flatbuffers::String *>(VT_VALUE);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffsetRequired(verifier, VT_KEY) &&
+ verifier.VerifyString(key()) && VerifyOffset(verifier, VT_VALUE) &&
+ verifier.VerifyString(value()) && verifier.EndTable();
+ }
+};
+
+struct KeyValueBuilder
+{
+ typedef KeyValue Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_key(flatbuffers::Offset<flatbuffers::String> key)
+ {
+ fbb_.AddOffset(KeyValue::VT_KEY, key);
+ }
+ void add_value(flatbuffers::Offset<flatbuffers::String> value)
+ {
+ fbb_.AddOffset(KeyValue::VT_VALUE, value);
+ }
+ explicit KeyValueBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<KeyValue> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<KeyValue>(end);
+ fbb_.Required(o, KeyValue::VT_KEY);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<KeyValue>
+CreateKeyValue(flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::String> key = 0,
+ flatbuffers::Offset<flatbuffers::String> value = 0)
+{
+ KeyValueBuilder builder_(_fbb);
+ builder_.add_value(value);
+ builder_.add_key(key);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<KeyValue> CreateKeyValueDirect(flatbuffers::FlatBufferBuilder &_fbb,
+ const char *key = nullptr,
+ const char *value = nullptr)
+{
+ auto key__ = key ? _fbb.CreateString(key) : 0;
+ auto value__ = value ? _fbb.CreateString(value) : 0;
+ return reflection::CreateKeyValue(_fbb, key__, value__);
+}
+
+struct EnumVal FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef EnumValBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_NAME = 4,
+ VT_VALUE = 6,
+ VT_OBJECT = 8,
+ VT_UNION_TYPE = 10,
+ VT_DOCUMENTATION = 12
+ };
+ const flatbuffers::String *name() const
+ {
+ return GetPointer<const flatbuffers::String *>(VT_NAME);
+ }
+ int64_t value() const { return GetField<int64_t>(VT_VALUE, 0); }
+ bool KeyCompareLessThan(const EnumVal *o) const { return value() < o->value(); }
+ int KeyCompareWithValue(int64_t val) const
+ {
+ return static_cast<int>(value() > val) - static_cast<int>(value() < val);
+ }
+ const reflection::Object *object() const
+ {
+ return GetPointer<const reflection::Object *>(VT_OBJECT);
+ }
+ const reflection::Type *union_type() const
+ {
+ return GetPointer<const reflection::Type *>(VT_UNION_TYPE);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *documentation() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(
+ VT_DOCUMENTATION);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffsetRequired(verifier, VT_NAME) &&
+ verifier.VerifyString(name()) && VerifyField<int64_t>(verifier, VT_VALUE) &&
+ VerifyOffset(verifier, VT_OBJECT) && verifier.VerifyTable(object()) &&
+ VerifyOffset(verifier, VT_UNION_TYPE) && verifier.VerifyTable(union_type()) &&
+ VerifyOffset(verifier, VT_DOCUMENTATION) && verifier.VerifyVector(documentation()) &&
+ verifier.VerifyVectorOfStrings(documentation()) && verifier.EndTable();
+ }
+};
+
+struct EnumValBuilder
+{
+ typedef EnumVal Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_name(flatbuffers::Offset<flatbuffers::String> name)
+ {
+ fbb_.AddOffset(EnumVal::VT_NAME, name);
+ }
+ void add_value(int64_t value) { fbb_.AddElement<int64_t>(EnumVal::VT_VALUE, value, 0); }
+ void add_object(flatbuffers::Offset<reflection::Object> object)
+ {
+ fbb_.AddOffset(EnumVal::VT_OBJECT, object);
+ }
+ void add_union_type(flatbuffers::Offset<reflection::Type> union_type)
+ {
+ fbb_.AddOffset(EnumVal::VT_UNION_TYPE, union_type);
+ }
+ void add_documentation(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>>
+ documentation)
+ {
+ fbb_.AddOffset(EnumVal::VT_DOCUMENTATION, documentation);
+ }
+ explicit EnumValBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<EnumVal> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<EnumVal>(end);
+ fbb_.Required(o, EnumVal::VT_NAME);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<EnumVal> CreateEnumVal(
+ flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset<flatbuffers::String> name = 0,
+ int64_t value = 0, flatbuffers::Offset<reflection::Object> object = 0,
+ flatbuffers::Offset<reflection::Type> union_type = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation =
+ 0)
+{
+ EnumValBuilder builder_(_fbb);
+ builder_.add_value(value);
+ builder_.add_documentation(documentation);
+ builder_.add_union_type(union_type);
+ builder_.add_object(object);
+ builder_.add_name(name);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<EnumVal> CreateEnumValDirect(
+ flatbuffers::FlatBufferBuilder &_fbb, const char *name = nullptr, int64_t value = 0,
+ flatbuffers::Offset<reflection::Object> object = 0,
+ flatbuffers::Offset<reflection::Type> union_type = 0,
+ const std::vector<flatbuffers::Offset<flatbuffers::String>> *documentation = nullptr)
+{
+ auto name__ = name ? _fbb.CreateString(name) : 0;
+ auto documentation__ =
+ documentation ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*documentation) : 0;
+ return reflection::CreateEnumVal(_fbb, name__, value, object, union_type, documentation__);
+}
+
+struct Enum FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef EnumBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_NAME = 4,
+ VT_VALUES = 6,
+ VT_IS_UNION = 8,
+ VT_UNDERLYING_TYPE = 10,
+ VT_ATTRIBUTES = 12,
+ VT_DOCUMENTATION = 14
+ };
+ const flatbuffers::String *name() const
+ {
+ return GetPointer<const flatbuffers::String *>(VT_NAME);
+ }
+ bool KeyCompareLessThan(const Enum *o) const { return *name() < *o->name(); }
+ int KeyCompareWithValue(const char *val) const { return strcmp(name()->c_str(), val); }
+ const flatbuffers::Vector<flatbuffers::Offset<reflection::EnumVal>> *values() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::EnumVal>> *>(
+ VT_VALUES);
+ }
+ bool is_union() const { return GetField<uint8_t>(VT_IS_UNION, 0) != 0; }
+ const reflection::Type *underlying_type() const
+ {
+ return GetPointer<const reflection::Type *>(VT_UNDERLYING_TYPE);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>> *attributes() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>> *>(
+ VT_ATTRIBUTES);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *documentation() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(
+ VT_DOCUMENTATION);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffsetRequired(verifier, VT_NAME) &&
+ verifier.VerifyString(name()) && VerifyOffsetRequired(verifier, VT_VALUES) &&
+ verifier.VerifyVector(values()) && verifier.VerifyVectorOfTables(values()) &&
+ VerifyField<uint8_t>(verifier, VT_IS_UNION) &&
+ VerifyOffsetRequired(verifier, VT_UNDERLYING_TYPE) &&
+ verifier.VerifyTable(underlying_type()) && VerifyOffset(verifier, VT_ATTRIBUTES) &&
+ verifier.VerifyVector(attributes()) && verifier.VerifyVectorOfTables(attributes()) &&
+ VerifyOffset(verifier, VT_DOCUMENTATION) && verifier.VerifyVector(documentation()) &&
+ verifier.VerifyVectorOfStrings(documentation()) && verifier.EndTable();
+ }
+};
+
+struct EnumBuilder
+{
+ typedef Enum Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_name(flatbuffers::Offset<flatbuffers::String> name)
+ {
+ fbb_.AddOffset(Enum::VT_NAME, name);
+ }
+ void add_values(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::EnumVal>>> values)
+ {
+ fbb_.AddOffset(Enum::VT_VALUES, values);
+ }
+ void add_is_union(bool is_union)
+ {
+ fbb_.AddElement<uint8_t>(Enum::VT_IS_UNION, static_cast<uint8_t>(is_union), 0);
+ }
+ void add_underlying_type(flatbuffers::Offset<reflection::Type> underlying_type)
+ {
+ fbb_.AddOffset(Enum::VT_UNDERLYING_TYPE, underlying_type);
+ }
+ void add_attributes(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes)
+ {
+ fbb_.AddOffset(Enum::VT_ATTRIBUTES, attributes);
+ }
+ void add_documentation(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>>
+ documentation)
+ {
+ fbb_.AddOffset(Enum::VT_DOCUMENTATION, documentation);
+ }
+ explicit EnumBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<Enum> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Enum>(end);
+ fbb_.Required(o, Enum::VT_NAME);
+ fbb_.Required(o, Enum::VT_VALUES);
+ fbb_.Required(o, Enum::VT_UNDERLYING_TYPE);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Enum> CreateEnum(
+ flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset<flatbuffers::String> name = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::EnumVal>>> values = 0,
+ bool is_union = false, flatbuffers::Offset<reflection::Type> underlying_type = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes =
+ 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation =
+ 0)
+{
+ EnumBuilder builder_(_fbb);
+ builder_.add_documentation(documentation);
+ builder_.add_attributes(attributes);
+ builder_.add_underlying_type(underlying_type);
+ builder_.add_values(values);
+ builder_.add_name(name);
+ builder_.add_is_union(is_union);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<Enum> CreateEnumDirect(
+ flatbuffers::FlatBufferBuilder &_fbb, const char *name = nullptr,
+ std::vector<flatbuffers::Offset<reflection::EnumVal>> *values = nullptr, bool is_union = false,
+ flatbuffers::Offset<reflection::Type> underlying_type = 0,
+ std::vector<flatbuffers::Offset<reflection::KeyValue>> *attributes = nullptr,
+ const std::vector<flatbuffers::Offset<flatbuffers::String>> *documentation = nullptr)
+{
+ auto name__ = name ? _fbb.CreateString(name) : 0;
+ auto values__ = values ? _fbb.CreateVectorOfSortedTables<reflection::EnumVal>(values) : 0;
+ auto attributes__ =
+ attributes ? _fbb.CreateVectorOfSortedTables<reflection::KeyValue>(attributes) : 0;
+ auto documentation__ =
+ documentation ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*documentation) : 0;
+ return reflection::CreateEnum(_fbb, name__, values__, is_union, underlying_type, attributes__,
+ documentation__);
+}
+
+struct Field FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef FieldBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_NAME = 4,
+ VT_TYPE = 6,
+ VT_ID = 8,
+ VT_OFFSET = 10,
+ VT_DEFAULT_INTEGER = 12,
+ VT_DEFAULT_REAL = 14,
+ VT_DEPRECATED = 16,
+ VT_REQUIRED = 18,
+ VT_KEY = 20,
+ VT_ATTRIBUTES = 22,
+ VT_DOCUMENTATION = 24,
+ VT_OPTIONAL = 26
+ };
+ const flatbuffers::String *name() const
+ {
+ return GetPointer<const flatbuffers::String *>(VT_NAME);
+ }
+ bool KeyCompareLessThan(const Field *o) const { return *name() < *o->name(); }
+ int KeyCompareWithValue(const char *val) const { return strcmp(name()->c_str(), val); }
+ const reflection::Type *type() const { return GetPointer<const reflection::Type *>(VT_TYPE); }
+ uint16_t id() const { return GetField<uint16_t>(VT_ID, 0); }
+ uint16_t offset() const { return GetField<uint16_t>(VT_OFFSET, 0); }
+ int64_t default_integer() const { return GetField<int64_t>(VT_DEFAULT_INTEGER, 0); }
+ double default_real() const { return GetField<double>(VT_DEFAULT_REAL, 0.0); }
+ bool deprecated() const { return GetField<uint8_t>(VT_DEPRECATED, 0) != 0; }
+ bool required() const { return GetField<uint8_t>(VT_REQUIRED, 0) != 0; }
+ bool key() const { return GetField<uint8_t>(VT_KEY, 0) != 0; }
+ const flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>> *attributes() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>> *>(
+ VT_ATTRIBUTES);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *documentation() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(
+ VT_DOCUMENTATION);
+ }
+ bool optional() const { return GetField<uint8_t>(VT_OPTIONAL, 0) != 0; }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffsetRequired(verifier, VT_NAME) &&
+ verifier.VerifyString(name()) && VerifyOffsetRequired(verifier, VT_TYPE) &&
+ verifier.VerifyTable(type()) && VerifyField<uint16_t>(verifier, VT_ID) &&
+ VerifyField<uint16_t>(verifier, VT_OFFSET) &&
+ VerifyField<int64_t>(verifier, VT_DEFAULT_INTEGER) &&
+ VerifyField<double>(verifier, VT_DEFAULT_REAL) &&
+ VerifyField<uint8_t>(verifier, VT_DEPRECATED) &&
+ VerifyField<uint8_t>(verifier, VT_REQUIRED) && VerifyField<uint8_t>(verifier, VT_KEY) &&
+ VerifyOffset(verifier, VT_ATTRIBUTES) && verifier.VerifyVector(attributes()) &&
+ verifier.VerifyVectorOfTables(attributes()) &&
+ VerifyOffset(verifier, VT_DOCUMENTATION) && verifier.VerifyVector(documentation()) &&
+ verifier.VerifyVectorOfStrings(documentation()) &&
+ VerifyField<uint8_t>(verifier, VT_OPTIONAL) && verifier.EndTable();
+ }
+};
+
+struct FieldBuilder
+{
+ typedef Field Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_name(flatbuffers::Offset<flatbuffers::String> name)
+ {
+ fbb_.AddOffset(Field::VT_NAME, name);
+ }
+ void add_type(flatbuffers::Offset<reflection::Type> type)
+ {
+ fbb_.AddOffset(Field::VT_TYPE, type);
+ }
+ void add_id(uint16_t id) { fbb_.AddElement<uint16_t>(Field::VT_ID, id, 0); }
+ void add_offset(uint16_t offset) { fbb_.AddElement<uint16_t>(Field::VT_OFFSET, offset, 0); }
+ void add_default_integer(int64_t default_integer)
+ {
+ fbb_.AddElement<int64_t>(Field::VT_DEFAULT_INTEGER, default_integer, 0);
+ }
+ void add_default_real(double default_real)
+ {
+ fbb_.AddElement<double>(Field::VT_DEFAULT_REAL, default_real, 0.0);
+ }
+ void add_deprecated(bool deprecated)
+ {
+ fbb_.AddElement<uint8_t>(Field::VT_DEPRECATED, static_cast<uint8_t>(deprecated), 0);
+ }
+ void add_required(bool required)
+ {
+ fbb_.AddElement<uint8_t>(Field::VT_REQUIRED, static_cast<uint8_t>(required), 0);
+ }
+ void add_key(bool key) { fbb_.AddElement<uint8_t>(Field::VT_KEY, static_cast<uint8_t>(key), 0); }
+ void add_attributes(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes)
+ {
+ fbb_.AddOffset(Field::VT_ATTRIBUTES, attributes);
+ }
+ void add_documentation(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>>
+ documentation)
+ {
+ fbb_.AddOffset(Field::VT_DOCUMENTATION, documentation);
+ }
+ void add_optional(bool optional)
+ {
+ fbb_.AddElement<uint8_t>(Field::VT_OPTIONAL, static_cast<uint8_t>(optional), 0);
+ }
+ explicit FieldBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<Field> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Field>(end);
+ fbb_.Required(o, Field::VT_NAME);
+ fbb_.Required(o, Field::VT_TYPE);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Field> CreateField(
+ flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset<flatbuffers::String> name = 0,
+ flatbuffers::Offset<reflection::Type> type = 0, uint16_t id = 0, uint16_t offset = 0,
+ int64_t default_integer = 0, double default_real = 0.0, bool deprecated = false,
+ bool required = false, bool key = false,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes =
+ 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation =
+ 0,
+ bool optional = false)
+{
+ FieldBuilder builder_(_fbb);
+ builder_.add_default_real(default_real);
+ builder_.add_default_integer(default_integer);
+ builder_.add_documentation(documentation);
+ builder_.add_attributes(attributes);
+ builder_.add_type(type);
+ builder_.add_name(name);
+ builder_.add_offset(offset);
+ builder_.add_id(id);
+ builder_.add_optional(optional);
+ builder_.add_key(key);
+ builder_.add_required(required);
+ builder_.add_deprecated(deprecated);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<Field> CreateFieldDirect(
+ flatbuffers::FlatBufferBuilder &_fbb, const char *name = nullptr,
+ flatbuffers::Offset<reflection::Type> type = 0, uint16_t id = 0, uint16_t offset = 0,
+ int64_t default_integer = 0, double default_real = 0.0, bool deprecated = false,
+ bool required = false, bool key = false,
+ std::vector<flatbuffers::Offset<reflection::KeyValue>> *attributes = nullptr,
+ const std::vector<flatbuffers::Offset<flatbuffers::String>> *documentation = nullptr,
+ bool optional = false)
+{
+ auto name__ = name ? _fbb.CreateString(name) : 0;
+ auto attributes__ =
+ attributes ? _fbb.CreateVectorOfSortedTables<reflection::KeyValue>(attributes) : 0;
+ auto documentation__ =
+ documentation ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*documentation) : 0;
+ return reflection::CreateField(_fbb, name__, type, id, offset, default_integer, default_real,
+ deprecated, required, key, attributes__, documentation__,
+ optional);
+}
+
+struct Object FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef ObjectBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_NAME = 4,
+ VT_FIELDS = 6,
+ VT_IS_STRUCT = 8,
+ VT_MINALIGN = 10,
+ VT_BYTESIZE = 12,
+ VT_ATTRIBUTES = 14,
+ VT_DOCUMENTATION = 16
+ };
+ const flatbuffers::String *name() const
+ {
+ return GetPointer<const flatbuffers::String *>(VT_NAME);
+ }
+ bool KeyCompareLessThan(const Object *o) const { return *name() < *o->name(); }
+ int KeyCompareWithValue(const char *val) const { return strcmp(name()->c_str(), val); }
+ const flatbuffers::Vector<flatbuffers::Offset<reflection::Field>> *fields() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::Field>> *>(
+ VT_FIELDS);
+ }
+ bool is_struct() const { return GetField<uint8_t>(VT_IS_STRUCT, 0) != 0; }
+ int32_t minalign() const { return GetField<int32_t>(VT_MINALIGN, 0); }
+ int32_t bytesize() const { return GetField<int32_t>(VT_BYTESIZE, 0); }
+ const flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>> *attributes() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>> *>(
+ VT_ATTRIBUTES);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *documentation() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(
+ VT_DOCUMENTATION);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffsetRequired(verifier, VT_NAME) &&
+ verifier.VerifyString(name()) && VerifyOffsetRequired(verifier, VT_FIELDS) &&
+ verifier.VerifyVector(fields()) && verifier.VerifyVectorOfTables(fields()) &&
+ VerifyField<uint8_t>(verifier, VT_IS_STRUCT) &&
+ VerifyField<int32_t>(verifier, VT_MINALIGN) &&
+ VerifyField<int32_t>(verifier, VT_BYTESIZE) && VerifyOffset(verifier, VT_ATTRIBUTES) &&
+ verifier.VerifyVector(attributes()) && verifier.VerifyVectorOfTables(attributes()) &&
+ VerifyOffset(verifier, VT_DOCUMENTATION) && verifier.VerifyVector(documentation()) &&
+ verifier.VerifyVectorOfStrings(documentation()) && verifier.EndTable();
+ }
+};
+
+struct ObjectBuilder
+{
+ typedef Object Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_name(flatbuffers::Offset<flatbuffers::String> name)
+ {
+ fbb_.AddOffset(Object::VT_NAME, name);
+ }
+ void add_fields(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::Field>>> fields)
+ {
+ fbb_.AddOffset(Object::VT_FIELDS, fields);
+ }
+ void add_is_struct(bool is_struct)
+ {
+ fbb_.AddElement<uint8_t>(Object::VT_IS_STRUCT, static_cast<uint8_t>(is_struct), 0);
+ }
+ void add_minalign(int32_t minalign)
+ {
+ fbb_.AddElement<int32_t>(Object::VT_MINALIGN, minalign, 0);
+ }
+ void add_bytesize(int32_t bytesize)
+ {
+ fbb_.AddElement<int32_t>(Object::VT_BYTESIZE, bytesize, 0);
+ }
+ void add_attributes(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes)
+ {
+ fbb_.AddOffset(Object::VT_ATTRIBUTES, attributes);
+ }
+ void add_documentation(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>>
+ documentation)
+ {
+ fbb_.AddOffset(Object::VT_DOCUMENTATION, documentation);
+ }
+ explicit ObjectBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<Object> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Object>(end);
+ fbb_.Required(o, Object::VT_NAME);
+ fbb_.Required(o, Object::VT_FIELDS);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Object> CreateObject(
+ flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset<flatbuffers::String> name = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::Field>>> fields = 0,
+ bool is_struct = false, int32_t minalign = 0, int32_t bytesize = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes =
+ 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation =
+ 0)
+{
+ ObjectBuilder builder_(_fbb);
+ builder_.add_documentation(documentation);
+ builder_.add_attributes(attributes);
+ builder_.add_bytesize(bytesize);
+ builder_.add_minalign(minalign);
+ builder_.add_fields(fields);
+ builder_.add_name(name);
+ builder_.add_is_struct(is_struct);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<Object> CreateObjectDirect(
+ flatbuffers::FlatBufferBuilder &_fbb, const char *name = nullptr,
+ std::vector<flatbuffers::Offset<reflection::Field>> *fields = nullptr, bool is_struct = false,
+ int32_t minalign = 0, int32_t bytesize = 0,
+ std::vector<flatbuffers::Offset<reflection::KeyValue>> *attributes = nullptr,
+ const std::vector<flatbuffers::Offset<flatbuffers::String>> *documentation = nullptr)
+{
+ auto name__ = name ? _fbb.CreateString(name) : 0;
+ auto fields__ = fields ? _fbb.CreateVectorOfSortedTables<reflection::Field>(fields) : 0;
+ auto attributes__ =
+ attributes ? _fbb.CreateVectorOfSortedTables<reflection::KeyValue>(attributes) : 0;
+ auto documentation__ =
+ documentation ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*documentation) : 0;
+ return reflection::CreateObject(_fbb, name__, fields__, is_struct, minalign, bytesize,
+ attributes__, documentation__);
+}
+
+struct RPCCall FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef RPCCallBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_NAME = 4,
+ VT_REQUEST = 6,
+ VT_RESPONSE = 8,
+ VT_ATTRIBUTES = 10,
+ VT_DOCUMENTATION = 12
+ };
+ const flatbuffers::String *name() const
+ {
+ return GetPointer<const flatbuffers::String *>(VT_NAME);
+ }
+ bool KeyCompareLessThan(const RPCCall *o) const { return *name() < *o->name(); }
+ int KeyCompareWithValue(const char *val) const { return strcmp(name()->c_str(), val); }
+ const reflection::Object *request() const
+ {
+ return GetPointer<const reflection::Object *>(VT_REQUEST);
+ }
+ const reflection::Object *response() const
+ {
+ return GetPointer<const reflection::Object *>(VT_RESPONSE);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>> *attributes() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>> *>(
+ VT_ATTRIBUTES);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *documentation() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(
+ VT_DOCUMENTATION);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffsetRequired(verifier, VT_NAME) &&
+ verifier.VerifyString(name()) && VerifyOffsetRequired(verifier, VT_REQUEST) &&
+ verifier.VerifyTable(request()) && VerifyOffsetRequired(verifier, VT_RESPONSE) &&
+ verifier.VerifyTable(response()) && VerifyOffset(verifier, VT_ATTRIBUTES) &&
+ verifier.VerifyVector(attributes()) && verifier.VerifyVectorOfTables(attributes()) &&
+ VerifyOffset(verifier, VT_DOCUMENTATION) && verifier.VerifyVector(documentation()) &&
+ verifier.VerifyVectorOfStrings(documentation()) && verifier.EndTable();
+ }
+};
+
+struct RPCCallBuilder
+{
+ typedef RPCCall Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_name(flatbuffers::Offset<flatbuffers::String> name)
+ {
+ fbb_.AddOffset(RPCCall::VT_NAME, name);
+ }
+ void add_request(flatbuffers::Offset<reflection::Object> request)
+ {
+ fbb_.AddOffset(RPCCall::VT_REQUEST, request);
+ }
+ void add_response(flatbuffers::Offset<reflection::Object> response)
+ {
+ fbb_.AddOffset(RPCCall::VT_RESPONSE, response);
+ }
+ void add_attributes(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes)
+ {
+ fbb_.AddOffset(RPCCall::VT_ATTRIBUTES, attributes);
+ }
+ void add_documentation(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>>
+ documentation)
+ {
+ fbb_.AddOffset(RPCCall::VT_DOCUMENTATION, documentation);
+ }
+ explicit RPCCallBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<RPCCall> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<RPCCall>(end);
+ fbb_.Required(o, RPCCall::VT_NAME);
+ fbb_.Required(o, RPCCall::VT_REQUEST);
+ fbb_.Required(o, RPCCall::VT_RESPONSE);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<RPCCall> CreateRPCCall(
+ flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset<flatbuffers::String> name = 0,
+ flatbuffers::Offset<reflection::Object> request = 0,
+ flatbuffers::Offset<reflection::Object> response = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes =
+ 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation =
+ 0)
+{
+ RPCCallBuilder builder_(_fbb);
+ builder_.add_documentation(documentation);
+ builder_.add_attributes(attributes);
+ builder_.add_response(response);
+ builder_.add_request(request);
+ builder_.add_name(name);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<RPCCall> CreateRPCCallDirect(
+ flatbuffers::FlatBufferBuilder &_fbb, const char *name = nullptr,
+ flatbuffers::Offset<reflection::Object> request = 0,
+ flatbuffers::Offset<reflection::Object> response = 0,
+ std::vector<flatbuffers::Offset<reflection::KeyValue>> *attributes = nullptr,
+ const std::vector<flatbuffers::Offset<flatbuffers::String>> *documentation = nullptr)
+{
+ auto name__ = name ? _fbb.CreateString(name) : 0;
+ auto attributes__ =
+ attributes ? _fbb.CreateVectorOfSortedTables<reflection::KeyValue>(attributes) : 0;
+ auto documentation__ =
+ documentation ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*documentation) : 0;
+ return reflection::CreateRPCCall(_fbb, name__, request, response, attributes__, documentation__);
+}
+
+struct Service FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef ServiceBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_NAME = 4,
+ VT_CALLS = 6,
+ VT_ATTRIBUTES = 8,
+ VT_DOCUMENTATION = 10
+ };
+ const flatbuffers::String *name() const
+ {
+ return GetPointer<const flatbuffers::String *>(VT_NAME);
+ }
+ bool KeyCompareLessThan(const Service *o) const { return *name() < *o->name(); }
+ int KeyCompareWithValue(const char *val) const { return strcmp(name()->c_str(), val); }
+ const flatbuffers::Vector<flatbuffers::Offset<reflection::RPCCall>> *calls() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::RPCCall>> *>(
+ VT_CALLS);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>> *attributes() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>> *>(
+ VT_ATTRIBUTES);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *documentation() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(
+ VT_DOCUMENTATION);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffsetRequired(verifier, VT_NAME) &&
+ verifier.VerifyString(name()) && VerifyOffset(verifier, VT_CALLS) &&
+ verifier.VerifyVector(calls()) && verifier.VerifyVectorOfTables(calls()) &&
+ VerifyOffset(verifier, VT_ATTRIBUTES) && verifier.VerifyVector(attributes()) &&
+ verifier.VerifyVectorOfTables(attributes()) &&
+ VerifyOffset(verifier, VT_DOCUMENTATION) && verifier.VerifyVector(documentation()) &&
+ verifier.VerifyVectorOfStrings(documentation()) && verifier.EndTable();
+ }
+};
+
+struct ServiceBuilder
+{
+ typedef Service Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_name(flatbuffers::Offset<flatbuffers::String> name)
+ {
+ fbb_.AddOffset(Service::VT_NAME, name);
+ }
+ void add_calls(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::RPCCall>>> calls)
+ {
+ fbb_.AddOffset(Service::VT_CALLS, calls);
+ }
+ void add_attributes(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes)
+ {
+ fbb_.AddOffset(Service::VT_ATTRIBUTES, attributes);
+ }
+ void add_documentation(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>>
+ documentation)
+ {
+ fbb_.AddOffset(Service::VT_DOCUMENTATION, documentation);
+ }
+ explicit ServiceBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<Service> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Service>(end);
+ fbb_.Required(o, Service::VT_NAME);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Service> CreateService(
+ flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset<flatbuffers::String> name = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::RPCCall>>> calls = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>> attributes =
+ 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> documentation =
+ 0)
+{
+ ServiceBuilder builder_(_fbb);
+ builder_.add_documentation(documentation);
+ builder_.add_attributes(attributes);
+ builder_.add_calls(calls);
+ builder_.add_name(name);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<Service> CreateServiceDirect(
+ flatbuffers::FlatBufferBuilder &_fbb, const char *name = nullptr,
+ std::vector<flatbuffers::Offset<reflection::RPCCall>> *calls = nullptr,
+ std::vector<flatbuffers::Offset<reflection::KeyValue>> *attributes = nullptr,
+ const std::vector<flatbuffers::Offset<flatbuffers::String>> *documentation = nullptr)
+{
+ auto name__ = name ? _fbb.CreateString(name) : 0;
+ auto calls__ = calls ? _fbb.CreateVectorOfSortedTables<reflection::RPCCall>(calls) : 0;
+ auto attributes__ =
+ attributes ? _fbb.CreateVectorOfSortedTables<reflection::KeyValue>(attributes) : 0;
+ auto documentation__ =
+ documentation ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*documentation) : 0;
+ return reflection::CreateService(_fbb, name__, calls__, attributes__, documentation__);
+}
+
+struct Schema FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef SchemaBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_OBJECTS = 4,
+ VT_ENUMS = 6,
+ VT_FILE_IDENT = 8,
+ VT_FILE_EXT = 10,
+ VT_ROOT_TABLE = 12,
+ VT_SERVICES = 14,
+ VT_ADVANCED_FEATURES = 16
+ };
+ const flatbuffers::Vector<flatbuffers::Offset<reflection::Object>> *objects() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::Object>> *>(
+ VT_OBJECTS);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<reflection::Enum>> *enums() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::Enum>> *>(VT_ENUMS);
+ }
+ const flatbuffers::String *file_ident() const
+ {
+ return GetPointer<const flatbuffers::String *>(VT_FILE_IDENT);
+ }
+ const flatbuffers::String *file_ext() const
+ {
+ return GetPointer<const flatbuffers::String *>(VT_FILE_EXT);
+ }
+ const reflection::Object *root_table() const
+ {
+ return GetPointer<const reflection::Object *>(VT_ROOT_TABLE);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<reflection::Service>> *services() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<reflection::Service>> *>(
+ VT_SERVICES);
+ }
+ reflection::AdvancedFeatures advanced_features() const
+ {
+ return static_cast<reflection::AdvancedFeatures>(GetField<uint64_t>(VT_ADVANCED_FEATURES, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffsetRequired(verifier, VT_OBJECTS) &&
+ verifier.VerifyVector(objects()) && verifier.VerifyVectorOfTables(objects()) &&
+ VerifyOffsetRequired(verifier, VT_ENUMS) && verifier.VerifyVector(enums()) &&
+ verifier.VerifyVectorOfTables(enums()) && VerifyOffset(verifier, VT_FILE_IDENT) &&
+ verifier.VerifyString(file_ident()) && VerifyOffset(verifier, VT_FILE_EXT) &&
+ verifier.VerifyString(file_ext()) && VerifyOffset(verifier, VT_ROOT_TABLE) &&
+ verifier.VerifyTable(root_table()) && VerifyOffset(verifier, VT_SERVICES) &&
+ verifier.VerifyVector(services()) && verifier.VerifyVectorOfTables(services()) &&
+ VerifyField<uint64_t>(verifier, VT_ADVANCED_FEATURES) && verifier.EndTable();
+ }
+};
+
+struct SchemaBuilder
+{
+ typedef Schema Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_objects(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::Object>>> objects)
+ {
+ fbb_.AddOffset(Schema::VT_OBJECTS, objects);
+ }
+ void
+ add_enums(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::Enum>>> enums)
+ {
+ fbb_.AddOffset(Schema::VT_ENUMS, enums);
+ }
+ void add_file_ident(flatbuffers::Offset<flatbuffers::String> file_ident)
+ {
+ fbb_.AddOffset(Schema::VT_FILE_IDENT, file_ident);
+ }
+ void add_file_ext(flatbuffers::Offset<flatbuffers::String> file_ext)
+ {
+ fbb_.AddOffset(Schema::VT_FILE_EXT, file_ext);
+ }
+ void add_root_table(flatbuffers::Offset<reflection::Object> root_table)
+ {
+ fbb_.AddOffset(Schema::VT_ROOT_TABLE, root_table);
+ }
+ void add_services(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::Service>>> services)
+ {
+ fbb_.AddOffset(Schema::VT_SERVICES, services);
+ }
+ void add_advanced_features(reflection::AdvancedFeatures advanced_features)
+ {
+ fbb_.AddElement<uint64_t>(Schema::VT_ADVANCED_FEATURES,
+ static_cast<uint64_t>(advanced_features), 0);
+ }
+ explicit SchemaBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<Schema> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Schema>(end);
+ fbb_.Required(o, Schema::VT_OBJECTS);
+ fbb_.Required(o, Schema::VT_ENUMS);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Schema> CreateSchema(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::Object>>> objects = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::Enum>>> enums = 0,
+ flatbuffers::Offset<flatbuffers::String> file_ident = 0,
+ flatbuffers::Offset<flatbuffers::String> file_ext = 0,
+ flatbuffers::Offset<reflection::Object> root_table = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<reflection::Service>>> services = 0,
+ reflection::AdvancedFeatures advanced_features = static_cast<reflection::AdvancedFeatures>(0))
+{
+ SchemaBuilder builder_(_fbb);
+ builder_.add_advanced_features(advanced_features);
+ builder_.add_services(services);
+ builder_.add_root_table(root_table);
+ builder_.add_file_ext(file_ext);
+ builder_.add_file_ident(file_ident);
+ builder_.add_enums(enums);
+ builder_.add_objects(objects);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<Schema> CreateSchemaDirect(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ std::vector<flatbuffers::Offset<reflection::Object>> *objects = nullptr,
+ std::vector<flatbuffers::Offset<reflection::Enum>> *enums = nullptr,
+ const char *file_ident = nullptr, const char *file_ext = nullptr,
+ flatbuffers::Offset<reflection::Object> root_table = 0,
+ std::vector<flatbuffers::Offset<reflection::Service>> *services = nullptr,
+ reflection::AdvancedFeatures advanced_features = static_cast<reflection::AdvancedFeatures>(0))
+{
+ auto objects__ = objects ? _fbb.CreateVectorOfSortedTables<reflection::Object>(objects) : 0;
+ auto enums__ = enums ? _fbb.CreateVectorOfSortedTables<reflection::Enum>(enums) : 0;
+ auto file_ident__ = file_ident ? _fbb.CreateString(file_ident) : 0;
+ auto file_ext__ = file_ext ? _fbb.CreateString(file_ext) : 0;
+ auto services__ = services ? _fbb.CreateVectorOfSortedTables<reflection::Service>(services) : 0;
+ return reflection::CreateSchema(_fbb, objects__, enums__, file_ident__, file_ext__, root_table,
+ services__, advanced_features);
+}
+
+inline const reflection::Schema *GetSchema(const void *buf)
+{
+ return flatbuffers::GetRoot<reflection::Schema>(buf);
+}
+
+inline const reflection::Schema *GetSizePrefixedSchema(const void *buf)
+{
+ return flatbuffers::GetSizePrefixedRoot<reflection::Schema>(buf);
+}
+
+inline const char *SchemaIdentifier() { return "BFBS"; }
+
+inline bool SchemaBufferHasIdentifier(const void *buf)
+{
+ return flatbuffers::BufferHasIdentifier(buf, SchemaIdentifier());
+}
+
+inline bool VerifySchemaBuffer(flatbuffers::Verifier &verifier)
+{
+ return verifier.VerifyBuffer<reflection::Schema>(SchemaIdentifier());
+}
+
+inline bool VerifySizePrefixedSchemaBuffer(flatbuffers::Verifier &verifier)
+{
+ return verifier.VerifySizePrefixedBuffer<reflection::Schema>(SchemaIdentifier());
+}
+
+inline const char *SchemaExtension() { return "bfbs"; }
+
+inline void FinishSchemaBuffer(flatbuffers::FlatBufferBuilder &fbb,
+ flatbuffers::Offset<reflection::Schema> root)
+{
+ fbb.Finish(root, SchemaIdentifier());
+}
+
+inline void FinishSizePrefixedSchemaBuffer(flatbuffers::FlatBufferBuilder &fbb,
+ flatbuffers::Offset<reflection::Schema> root)
+{
+ fbb.FinishSizePrefixed(root, SchemaIdentifier());
+}
+
+} // namespace reflection
+
+#endif // FLATBUFFERS_GENERATED_REFLECTION_REFLECTION_H_
diff --git a/onert-micro/externals/flatbuffers/registry.h b/onert-micro/externals/flatbuffers/registry.h
new file mode 100644
index 000000000..c06bd5687
--- /dev/null
+++ b/onert-micro/externals/flatbuffers/registry.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLATBUFFERS_REGISTRY_H_
+#define FLATBUFFERS_REGISTRY_H_
+
+#include "flatbuffers/idl.h"
+
+namespace flatbuffers
+{
+
+// Convenience class to easily parse or generate text for arbitrary FlatBuffers.
+// Simply pre-populate it with all schema filenames that may be in use, and
+// This class will look them up using the file_identifier declared in the
+// schema.
+class Registry
+{
+public:
+ // Call this for all schemas that may be in use. The identifier has
+ // a function in the generated code, e.g. MonsterIdentifier().
+ void Register(const char *file_identifier, const char *schema_path)
+ {
+ Schema schema;
+ schema.path_ = schema_path;
+ schemas_[file_identifier] = schema;
+ }
+
+ // Generate text from an arbitrary FlatBuffer by looking up its
+ // file_identifier in the registry.
+ bool FlatBufferToText(const uint8_t *flatbuf, size_t len, std::string *dest)
+ {
+ // Get the identifier out of the buffer.
+ // If the buffer is truncated, exit.
+ if (len < sizeof(uoffset_t) + FlatBufferBuilder::kFileIdentifierLength)
+ {
+ lasterror_ = "buffer truncated";
+ return false;
+ }
+ std::string ident(reinterpret_cast<const char *>(flatbuf) + sizeof(uoffset_t),
+ FlatBufferBuilder::kFileIdentifierLength);
+ // Load and parse the schema.
+ Parser parser;
+ if (!LoadSchema(ident, &parser))
+ return false;
+ // Now we're ready to generate text.
+ if (!GenerateText(parser, flatbuf, dest))
+ {
+ lasterror_ = "unable to generate text for FlatBuffer binary";
+ return false;
+ }
+ return true;
+ }
+
+ // Converts a binary buffer to text using one of the schemas in the registry,
+ // use the file_identifier to indicate which.
+ // If DetachedBuffer::data() is null then parsing failed.
+ DetachedBuffer TextToFlatBuffer(const char *text, const char *file_identifier)
+ {
+ // Load and parse the schema.
+ Parser parser;
+ if (!LoadSchema(file_identifier, &parser))
+ return DetachedBuffer();
+ // Parse the text.
+ if (!parser.Parse(text))
+ {
+ lasterror_ = parser.error_;
+ return DetachedBuffer();
+ }
+ // We have a valid FlatBuffer. Detach it from the builder and return.
+ return parser.builder_.Release();
+ }
+
+ // Modify any parsing / output options used by the other functions.
+ void SetOptions(const IDLOptions &opts) { opts_ = opts; }
+
+ // If schemas used contain include statements, call this function for every
+ // directory the parser should search them for.
+ void AddIncludeDirectory(const char *path) { include_paths_.push_back(path); }
+
+ // Returns a human readable error if any of the above functions fail.
+ const std::string &GetLastError() { return lasterror_; }
+
+private:
+ bool LoadSchema(const std::string &ident, Parser *parser)
+ {
+ // Find the schema, if not, exit.
+ auto it = schemas_.find(ident);
+ if (it == schemas_.end())
+ {
+ // Don't attach the identifier, since it may not be human readable.
+ lasterror_ = "identifier for this buffer not in the registry";
+ return false;
+ }
+ auto &schema = it->second;
+ // Load the schema from disk. If not, exit.
+ std::string schematext;
+ if (!LoadFile(schema.path_.c_str(), false, &schematext))
+ {
+ lasterror_ = "could not load schema: " + schema.path_;
+ return false;
+ }
+ // Parse schema.
+ parser->opts = opts_;
+ if (!parser->Parse(schematext.c_str(), vector_data(include_paths_), schema.path_.c_str()))
+ {
+ lasterror_ = parser->error_;
+ return false;
+ }
+ return true;
+ }
+
+ struct Schema
+ {
+ std::string path_;
+ // TODO(wvo) optionally cache schema file or parsed schema here.
+ };
+
+ std::string lasterror_;
+ IDLOptions opts_;
+ std::vector<const char *> include_paths_;
+ std::map<std::string, Schema> schemas_;
+};
+
+} // namespace flatbuffers
+
+#endif // FLATBUFFERS_REGISTRY_H_
diff --git a/onert-micro/externals/flatbuffers/stl_emulation.h b/onert-micro/externals/flatbuffers/stl_emulation.h
new file mode 100644
index 000000000..3f11fb9cb
--- /dev/null
+++ b/onert-micro/externals/flatbuffers/stl_emulation.h
@@ -0,0 +1,674 @@
+/*
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2017 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLATBUFFERS_STL_EMULATION_H_
+#define FLATBUFFERS_STL_EMULATION_H_
+
+// clang-format off
+#include "flatbuffers/base.h"
+
+#include <string>
+#include <type_traits>
+#include <vector>
+#include <memory>
+#include <limits>
+
+#if defined(_STLPORT_VERSION) && !defined(FLATBUFFERS_CPP98_STL)
+ #define FLATBUFFERS_CPP98_STL
+#endif // defined(_STLPORT_VERSION) && !defined(FLATBUFFERS_CPP98_STL)
+
+#if defined(FLATBUFFERS_CPP98_STL)
+ #include <cctype>
+#endif // defined(FLATBUFFERS_CPP98_STL)
+
+// Detect C++17 compatible compiler.
+// __cplusplus >= 201703L - a compiler has support of 'static inline' variables.
+#if defined(FLATBUFFERS_USE_STD_OPTIONAL) \
+ || (defined(__cplusplus) && __cplusplus >= 201703L) \
+ || (defined(_MSVC_LANG) && (_MSVC_LANG >= 201703L))
+ #include <optional>
+ #ifndef FLATBUFFERS_USE_STD_OPTIONAL
+ #define FLATBUFFERS_USE_STD_OPTIONAL
+ #endif
+#endif // defined(FLATBUFFERS_USE_STD_OPTIONAL) ...
+
+// The __cpp_lib_span is the predefined feature macro.
+#if defined(FLATBUFFERS_USE_STD_SPAN)
+ #include <span>
+#elif defined(__cpp_lib_span) && defined(__has_include)
+ #if __has_include(<span>)
+ #include <span>
+ #define FLATBUFFERS_USE_STD_SPAN
+ #endif
+#else
+ // Disable non-trivial ctors if FLATBUFFERS_SPAN_MINIMAL defined.
+ #if !defined(FLATBUFFERS_TEMPLATES_ALIASES) || defined(FLATBUFFERS_CPP98_STL)
+ #define FLATBUFFERS_SPAN_MINIMAL
+ #else
+ // Enable implicit construction of a span<T,N> from a std::array<T,N>.
+ #include <array>
+ #endif
+#endif // defined(FLATBUFFERS_USE_STD_SPAN)
+
+// This header provides backwards compatibility for C++98 STLs like stlport.
+namespace flatbuffers {
+
+// Retrieve ::back() from a string in a way that is compatible with pre C++11
+// STLs (e.g stlport).
+inline char& string_back(std::string &value) {
+ return value[value.length() - 1];
+}
+
+inline char string_back(const std::string &value) {
+ return value[value.length() - 1];
+}
+
+// Helper method that retrieves ::data() from a vector in a way that is
+// compatible with pre C++11 STLs (e.g stlport).
+template <typename T> inline T *vector_data(std::vector<T> &vector) {
+ // In some debug environments, operator[] does bounds checking, so &vector[0]
+ // can't be used.
+ return vector.empty() ? nullptr : &vector[0];
+}
+
+template <typename T> inline const T *vector_data(
+ const std::vector<T> &vector) {
+ return vector.empty() ? nullptr : &vector[0];
+}
+
+template <typename T, typename V>
+inline void vector_emplace_back(std::vector<T> *vector, V &&data) {
+ #if defined(FLATBUFFERS_CPP98_STL)
+ vector->push_back(data);
+ #else
+ vector->emplace_back(std::forward<V>(data));
+ #endif // defined(FLATBUFFERS_CPP98_STL)
+}
+
+#ifndef FLATBUFFERS_CPP98_STL
+ #if defined(FLATBUFFERS_TEMPLATES_ALIASES)
+ template <typename T>
+ using numeric_limits = std::numeric_limits<T>;
+ #else
+ template <typename T> class numeric_limits :
+ public std::numeric_limits<T> {};
+ #endif // defined(FLATBUFFERS_TEMPLATES_ALIASES)
+#else
+ template <typename T> class numeric_limits :
+ public std::numeric_limits<T> {
+ public:
+ // Android NDK fix.
+ static T lowest() {
+ return std::numeric_limits<T>::min();
+ }
+ };
+
+ template <> class numeric_limits<float> :
+ public std::numeric_limits<float> {
+ public:
+ static float lowest() { return -FLT_MAX; }
+ };
+
+ template <> class numeric_limits<double> :
+ public std::numeric_limits<double> {
+ public:
+ static double lowest() { return -DBL_MAX; }
+ };
+
+ template <> class numeric_limits<unsigned long long> {
+ public:
+ static unsigned long long min() { return 0ULL; }
+ static unsigned long long max() { return ~0ULL; }
+ static unsigned long long lowest() {
+ return numeric_limits<unsigned long long>::min();
+ }
+ };
+
+ template <> class numeric_limits<long long> {
+ public:
+ static long long min() {
+ return static_cast<long long>(1ULL << ((sizeof(long long) << 3) - 1));
+ }
+ static long long max() {
+ return static_cast<long long>(
+ (1ULL << ((sizeof(long long) << 3) - 1)) - 1);
+ }
+ static long long lowest() {
+ return numeric_limits<long long>::min();
+ }
+ };
+#endif // FLATBUFFERS_CPP98_STL
+
+#if defined(FLATBUFFERS_TEMPLATES_ALIASES)
+ #ifndef FLATBUFFERS_CPP98_STL
+ template <typename T> using is_scalar = std::is_scalar<T>;
+ template <typename T, typename U> using is_same = std::is_same<T,U>;
+ template <typename T> using is_floating_point = std::is_floating_point<T>;
+ template <typename T> using is_unsigned = std::is_unsigned<T>;
+ template <typename T> using is_enum = std::is_enum<T>;
+ template <typename T> using make_unsigned = std::make_unsigned<T>;
+ template<bool B, class T, class F>
+ using conditional = std::conditional<B, T, F>;
+ template<class T, T v>
+ using integral_constant = std::integral_constant<T, v>;
+ template <bool B>
+ using bool_constant = integral_constant<bool, B>;
+ #else
+ // Map C++ TR1 templates defined by stlport.
+ template <typename T> using is_scalar = std::tr1::is_scalar<T>;
+ template <typename T, typename U> using is_same = std::tr1::is_same<T,U>;
+ template <typename T> using is_floating_point =
+ std::tr1::is_floating_point<T>;
+ template <typename T> using is_unsigned = std::tr1::is_unsigned<T>;
+ template <typename T> using is_enum = std::tr1::is_enum<T>;
+ // Android NDK doesn't have std::make_unsigned or std::tr1::make_unsigned.
+ template<typename T> struct make_unsigned {
+ static_assert(is_unsigned<T>::value, "Specialization not implemented!");
+ using type = T;
+ };
+ template<> struct make_unsigned<char> { using type = unsigned char; };
+ template<> struct make_unsigned<short> { using type = unsigned short; };
+ template<> struct make_unsigned<int> { using type = unsigned int; };
+ template<> struct make_unsigned<long> { using type = unsigned long; };
+ template<>
+ struct make_unsigned<long long> { using type = unsigned long long; };
+ template<bool B, class T, class F>
+ using conditional = std::tr1::conditional<B, T, F>;
+ template<class T, T v>
+ using integral_constant = std::tr1::integral_constant<T, v>;
+ template <bool B>
+ using bool_constant = integral_constant<bool, B>;
+ #endif // !FLATBUFFERS_CPP98_STL
+#else
+ // MSVC 2010 doesn't support C++11 aliases.
+ template <typename T> struct is_scalar : public std::is_scalar<T> {};
+ template <typename T, typename U> struct is_same : public std::is_same<T,U> {};
+ template <typename T> struct is_floating_point :
+ public std::is_floating_point<T> {};
+ template <typename T> struct is_unsigned : public std::is_unsigned<T> {};
+ template <typename T> struct is_enum : public std::is_enum<T> {};
+ template <typename T> struct make_unsigned : public std::make_unsigned<T> {};
+ template<bool B, class T, class F>
+ struct conditional : public std::conditional<B, T, F> {};
+ template<class T, T v>
+ struct integral_constant : public std::integral_constant<T, v> {};
+ template <bool B>
+ struct bool_constant : public integral_constant<bool, B> {};
+#endif // defined(FLATBUFFERS_TEMPLATES_ALIASES)
+
+#ifndef FLATBUFFERS_CPP98_STL
+ #if defined(FLATBUFFERS_TEMPLATES_ALIASES)
+ template <class T> using unique_ptr = std::unique_ptr<T>;
+ #else
+ // MSVC 2010 doesn't support C++11 aliases.
+ // We're manually "aliasing" the class here as we want to bring unique_ptr
+ // into the flatbuffers namespace. We have unique_ptr in the flatbuffers
+ // namespace we have a completely independent implementation (see below)
+ // for C++98 STL implementations.
+ template <class T> class unique_ptr : public std::unique_ptr<T> {
+ public:
+ unique_ptr() {}
+ explicit unique_ptr(T* p) : std::unique_ptr<T>(p) {}
+ unique_ptr(std::unique_ptr<T>&& u) { *this = std::move(u); }
+ unique_ptr(unique_ptr&& u) { *this = std::move(u); }
+ unique_ptr& operator=(std::unique_ptr<T>&& u) {
+ std::unique_ptr<T>::reset(u.release());
+ return *this;
+ }
+ unique_ptr& operator=(unique_ptr&& u) {
+ std::unique_ptr<T>::reset(u.release());
+ return *this;
+ }
+ unique_ptr& operator=(T* p) {
+ return std::unique_ptr<T>::operator=(p);
+ }
+ };
+ #endif // defined(FLATBUFFERS_TEMPLATES_ALIASES)
+#else
+ // Very limited implementation of unique_ptr.
+ // This is provided simply to allow the C++ code generated from the default
+ // settings to function in C++98 environments with no modifications.
+ template <class T> class unique_ptr {
+ public:
+ typedef T element_type;
+
+ unique_ptr() : ptr_(nullptr) {}
+ explicit unique_ptr(T* p) : ptr_(p) {}
+ unique_ptr(unique_ptr&& u) : ptr_(nullptr) { reset(u.release()); }
+ unique_ptr(const unique_ptr& u) : ptr_(nullptr) {
+ reset(const_cast<unique_ptr*>(&u)->release());
+ }
+ ~unique_ptr() { reset(); }
+
+ unique_ptr& operator=(const unique_ptr& u) {
+ reset(const_cast<unique_ptr*>(&u)->release());
+ return *this;
+ }
+
+ unique_ptr& operator=(unique_ptr&& u) {
+ reset(u.release());
+ return *this;
+ }
+
+ unique_ptr& operator=(T* p) {
+ reset(p);
+ return *this;
+ }
+
+ const T& operator*() const { return *ptr_; }
+ T* operator->() const { return ptr_; }
+ T* get() const noexcept { return ptr_; }
+ explicit operator bool() const { return ptr_ != nullptr; }
+
+ // modifiers
+ T* release() {
+ T* value = ptr_;
+ ptr_ = nullptr;
+ return value;
+ }
+
+ void reset(T* p = nullptr) {
+ T* value = ptr_;
+ ptr_ = p;
+ if (value) delete value;
+ }
+
+ void swap(unique_ptr& u) {
+ T* temp_ptr = ptr_;
+ ptr_ = u.ptr_;
+ u.ptr_ = temp_ptr;
+ }
+
+ private:
+ T* ptr_;
+ };
+
+ template <class T> bool operator==(const unique_ptr<T>& x,
+ const unique_ptr<T>& y) {
+ return x.get() == y.get();
+ }
+
+ template <class T, class D> bool operator==(const unique_ptr<T>& x,
+ const D* y) {
+ return static_cast<D*>(x.get()) == y;
+ }
+
+ template <class T> bool operator==(const unique_ptr<T>& x, intptr_t y) {
+ return reinterpret_cast<intptr_t>(x.get()) == y;
+ }
+
+ template <class T> bool operator!=(const unique_ptr<T>& x, decltype(nullptr)) {
+ return !!x;
+ }
+
+ template <class T> bool operator!=(decltype(nullptr), const unique_ptr<T>& x) {
+ return !!x;
+ }
+
+ template <class T> bool operator==(const unique_ptr<T>& x, decltype(nullptr)) {
+ return !x;
+ }
+
+ template <class T> bool operator==(decltype(nullptr), const unique_ptr<T>& x) {
+ return !x;
+ }
+
+#endif // !FLATBUFFERS_CPP98_STL
+
+#ifdef FLATBUFFERS_USE_STD_OPTIONAL
+template<class T>
+using Optional = std::optional<T>;
+using nullopt_t = std::nullopt_t;
+inline constexpr nullopt_t nullopt = std::nullopt;
+
+#else
+// Limited implementation of Optional<T> type for a scalar T.
+// This implementation limited by trivial types compatible with
+// std::is_arithmetic<T> or std::is_enum<T> type traits.
+
+// A tag to indicate an empty flatbuffers::optional<T>.
+struct nullopt_t {
+ explicit FLATBUFFERS_CONSTEXPR_CPP11 nullopt_t(int) {}
+};
+
+#if defined(FLATBUFFERS_CONSTEXPR_DEFINED)
+ namespace internal {
+ template <class> struct nullopt_holder {
+ static constexpr nullopt_t instance_ = nullopt_t(0);
+ };
+ template<class Dummy>
+ constexpr nullopt_t nullopt_holder<Dummy>::instance_;
+ }
+ static constexpr const nullopt_t &nullopt = internal::nullopt_holder<void>::instance_;
+
+#else
+ namespace internal {
+ template <class> struct nullopt_holder {
+ static const nullopt_t instance_;
+ };
+ template<class Dummy>
+ const nullopt_t nullopt_holder<Dummy>::instance_ = nullopt_t(0);
+ }
+ static const nullopt_t &nullopt = internal::nullopt_holder<void>::instance_;
+
+#endif
+
+template<class T>
+class Optional FLATBUFFERS_FINAL_CLASS {
+ // Non-scalar 'T' would extremely complicated Optional<T>.
+ // Use is_scalar<T> checking because flatbuffers flatbuffers::is_arithmetic<T>
+ // isn't implemented.
+ static_assert(flatbuffers::is_scalar<T>::value, "unexpected type T");
+
+ public:
+ ~Optional() {}
+
+ FLATBUFFERS_CONSTEXPR_CPP11 Optional() FLATBUFFERS_NOEXCEPT
+ : value_(), has_value_(false) {}
+
+ FLATBUFFERS_CONSTEXPR_CPP11 Optional(nullopt_t) FLATBUFFERS_NOEXCEPT
+ : value_(), has_value_(false) {}
+
+ FLATBUFFERS_CONSTEXPR_CPP11 Optional(T val) FLATBUFFERS_NOEXCEPT
+ : value_(val), has_value_(true) {}
+
+ FLATBUFFERS_CONSTEXPR_CPP11 Optional(const Optional &other) FLATBUFFERS_NOEXCEPT
+ : value_(other.value_), has_value_(other.has_value_) {}
+
+ FLATBUFFERS_CONSTEXPR_CPP14 Optional &operator=(const Optional &other) FLATBUFFERS_NOEXCEPT {
+ value_ = other.value_;
+ has_value_ = other.has_value_;
+ return *this;
+ }
+
+ FLATBUFFERS_CONSTEXPR_CPP14 Optional &operator=(nullopt_t) FLATBUFFERS_NOEXCEPT {
+ value_ = T();
+ has_value_ = false;
+ return *this;
+ }
+
+ FLATBUFFERS_CONSTEXPR_CPP14 Optional &operator=(T val) FLATBUFFERS_NOEXCEPT {
+ value_ = val;
+ has_value_ = true;
+ return *this;
+ }
+
+ void reset() FLATBUFFERS_NOEXCEPT {
+ *this = nullopt;
+ }
+
+ void swap(Optional &other) FLATBUFFERS_NOEXCEPT {
+ std::swap(value_, other.value_);
+ std::swap(has_value_, other.has_value_);
+ }
+
+ FLATBUFFERS_CONSTEXPR_CPP11 FLATBUFFERS_EXPLICIT_CPP11 operator bool() const FLATBUFFERS_NOEXCEPT {
+ return has_value_;
+ }
+
+ FLATBUFFERS_CONSTEXPR_CPP11 bool has_value() const FLATBUFFERS_NOEXCEPT {
+ return has_value_;
+ }
+
+ FLATBUFFERS_CONSTEXPR_CPP11 const T& operator*() const FLATBUFFERS_NOEXCEPT {
+ return value_;
+ }
+
+ const T& value() const {
+ FLATBUFFERS_ASSERT(has_value());
+ return value_;
+ }
+
+ T value_or(T default_value) const FLATBUFFERS_NOEXCEPT {
+ return has_value() ? value_ : default_value;
+ }
+
+ private:
+ T value_;
+ bool has_value_;
+};
+
+template<class T>
+FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const Optional<T>& opt, nullopt_t) FLATBUFFERS_NOEXCEPT {
+ return !opt;
+}
+template<class T>
+FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(nullopt_t, const Optional<T>& opt) FLATBUFFERS_NOEXCEPT {
+ return !opt;
+}
+
+template<class T, class U>
+FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const Optional<T>& lhs, const U& rhs) FLATBUFFERS_NOEXCEPT {
+ return static_cast<bool>(lhs) && (*lhs == rhs);
+}
+
+template<class T, class U>
+FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const T& lhs, const Optional<U>& rhs) FLATBUFFERS_NOEXCEPT {
+ return static_cast<bool>(rhs) && (lhs == *rhs);
+}
+
+template<class T, class U>
+FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const Optional<T>& lhs, const Optional<U>& rhs) FLATBUFFERS_NOEXCEPT {
+ return static_cast<bool>(lhs) != static_cast<bool>(rhs)
+ ? false
+ : !static_cast<bool>(lhs) ? false : (*lhs == *rhs);
+}
+#endif // FLATBUFFERS_USE_STD_OPTIONAL
+
+
+// Very limited and naive partial implementation of C++20 std::span<T,Extent>.
+#if defined(FLATBUFFERS_USE_STD_SPAN)
+ inline constexpr std::size_t dynamic_extent = std::dynamic_extent;
+ template<class T, std::size_t Extent = std::dynamic_extent>
+ using span = std::span<T, Extent>;
+
+#else // !defined(FLATBUFFERS_USE_STD_SPAN)
+FLATBUFFERS_CONSTEXPR std::size_t dynamic_extent = static_cast<std::size_t>(-1);
+
+// Exclude this code if MSVC2010 or non-STL Android is active.
+// The non-STL Android doesn't have `std::is_convertible` required for SFINAE.
+#if !defined(FLATBUFFERS_SPAN_MINIMAL)
+namespace internal {
+ // This is SFINAE helper class for checking of a common condition:
+ // > This overload only participates in overload resolution
+ // > Check whether a pointer to an array of U can be converted
+ // > to a pointer to an array of E.
+ // This helper is used for checking of 'U -> const U'.
+ template<class E, std::size_t Extent, class U, std::size_t N>
+ struct is_span_convertable {
+ using type =
+ typename std::conditional<std::is_convertible<U (*)[], E (*)[]>::value
+ && (Extent == dynamic_extent || N == Extent),
+ int, void>::type;
+ };
+
+} // namespace internal
+#endif // !defined(FLATBUFFERS_SPAN_MINIMAL)
+
+// T - element type; must be a complete type that is not an abstract
+// class type.
+// Extent - the number of elements in the sequence, or dynamic.
+template<class T, std::size_t Extent = dynamic_extent>
+class span FLATBUFFERS_FINAL_CLASS {
+ public:
+ typedef T element_type;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef std::size_t size_type;
+
+ static FLATBUFFERS_CONSTEXPR size_type extent = Extent;
+
+ // Returns the number of elements in the span.
+ FLATBUFFERS_CONSTEXPR_CPP11 size_type size() const FLATBUFFERS_NOEXCEPT {
+ return count_;
+ }
+
+ // Returns the size of the sequence in bytes.
+ FLATBUFFERS_CONSTEXPR_CPP11
+ size_type size_bytes() const FLATBUFFERS_NOEXCEPT {
+ return size() * sizeof(element_type);
+ }
+
+ // Checks if the span is empty.
+ FLATBUFFERS_CONSTEXPR_CPP11 bool empty() const FLATBUFFERS_NOEXCEPT {
+ return size() == 0;
+ }
+
+ // Returns a pointer to the beginning of the sequence.
+ FLATBUFFERS_CONSTEXPR_CPP11 pointer data() const FLATBUFFERS_NOEXCEPT {
+ return data_;
+ }
+
+ // Returns a reference to the idx-th element of the sequence.
+ // The behavior is undefined if the idx is greater than or equal to size().
+ FLATBUFFERS_CONSTEXPR_CPP11 reference operator[](size_type idx) const {
+ return data()[idx];
+ }
+
+ FLATBUFFERS_CONSTEXPR_CPP11 span(const span &other) FLATBUFFERS_NOEXCEPT
+ : data_(other.data_), count_(other.count_) {}
+
+ FLATBUFFERS_CONSTEXPR_CPP14 span &operator=(const span &other)
+ FLATBUFFERS_NOEXCEPT {
+ data_ = other.data_;
+ count_ = other.count_;
+ }
+
+ // Limited implementation of
+ // `template <class It> constexpr std::span(It first, size_type count);`.
+ //
+ // Constructs a span that is a view over the range [first, first + count);
+ // the resulting span has: data() == first and size() == count.
+ // The behavior is undefined if [first, first + count) is not a valid range,
+ // or if (extent != flatbuffers::dynamic_extent && count != extent).
+ FLATBUFFERS_CONSTEXPR_CPP11
+ explicit span(pointer first, size_type count) FLATBUFFERS_NOEXCEPT
+ : data_ (Extent == dynamic_extent ? first : (Extent == count ? first : nullptr)),
+ count_(Extent == dynamic_extent ? count : (Extent == count ? Extent : 0)) {
+ // Make span empty if the count argument is incompatible with span<T,N>.
+ }
+
+ // Exclude this code if MSVC2010 is active. The MSVC2010 isn't C++11
+ // compliant, it doesn't support default template arguments for functions.
+ #if defined(FLATBUFFERS_SPAN_MINIMAL)
+ FLATBUFFERS_CONSTEXPR_CPP11 span() FLATBUFFERS_NOEXCEPT : data_(nullptr),
+ count_(0) {
+ static_assert(extent == 0 || extent == dynamic_extent, "invalid span");
+ }
+
+ #else
+ // Constructs an empty span whose data() == nullptr and size() == 0.
+ // This overload only participates in overload resolution if
+ // extent == 0 || extent == flatbuffers::dynamic_extent.
+ // A dummy template argument N is need dependency for SFINAE.
+ template<std::size_t N = 0,
+ typename internal::is_span_convertable<element_type, Extent, element_type, (N - N)>::type = 0>
+ FLATBUFFERS_CONSTEXPR_CPP11 span() FLATBUFFERS_NOEXCEPT : data_(nullptr),
+ count_(0) {
+ static_assert(extent == 0 || extent == dynamic_extent, "invalid span");
+ }
+
+ // Constructs a span that is a view over the array arr; the resulting span
+ // has size() == N and data() == std::data(arr). These overloads only
+ // participate in overload resolution if
+ // extent == std::dynamic_extent || N == extent is true and
+ // std::remove_pointer_t<decltype(std::data(arr))>(*)[]
+ // is convertible to element_type (*)[].
+ template<std::size_t N,
+ typename internal::is_span_convertable<element_type, Extent, element_type, N>::type = 0>
+ FLATBUFFERS_CONSTEXPR_CPP11 span(element_type (&arr)[N]) FLATBUFFERS_NOEXCEPT
+ : data_(arr), count_(N) {}
+
+ template<class U, std::size_t N,
+ typename internal::is_span_convertable<element_type, Extent, U, N>::type = 0>
+ FLATBUFFERS_CONSTEXPR_CPP11 span(std::array<U, N> &arr) FLATBUFFERS_NOEXCEPT
+ : data_(arr.data()), count_(N) {}
+
+ //template<class U, std::size_t N,
+ // int = 0>
+ //FLATBUFFERS_CONSTEXPR_CPP11 span(std::array<U, N> &arr) FLATBUFFERS_NOEXCEPT
+ // : data_(arr.data()), count_(N) {}
+
+ template<class U, std::size_t N,
+ typename internal::is_span_convertable<element_type, Extent, U, N>::type = 0>
+ FLATBUFFERS_CONSTEXPR_CPP11 span(const std::array<U, N> &arr) FLATBUFFERS_NOEXCEPT
+ : data_(arr.data()), count_(N) {}
+
+ // Converting constructor from another span s;
+ // the resulting span has size() == s.size() and data() == s.data().
+ // This overload only participates in overload resolution
+ // if extent == std::dynamic_extent || N == extent is true and U (*)[]
+ // is convertible to element_type (*)[].
+ template<class U, std::size_t N,
+ typename internal::is_span_convertable<element_type, Extent, U, N>::type = 0>
+ FLATBUFFERS_CONSTEXPR_CPP11 span(const flatbuffers::span<U, N> &s) FLATBUFFERS_NOEXCEPT
+ : span(s.data(), s.size()) {
+ }
+
+ #endif // !defined(FLATBUFFERS_SPAN_MINIMAL)
+
+ private:
+ // This is a naive implementation with 'count_' member even if (Extent != dynamic_extent).
+ pointer const data_;
+ const size_type count_;
+};
+
+ #if !defined(FLATBUFFERS_SPAN_MINIMAL)
+ template<class U, std::size_t N>
+ FLATBUFFERS_CONSTEXPR_CPP11
+ flatbuffers::span<U, N> make_span(U(&arr)[N]) FLATBUFFERS_NOEXCEPT {
+ return span<U, N>(arr);
+ }
+
+ template<class U, std::size_t N>
+ FLATBUFFERS_CONSTEXPR_CPP11
+ flatbuffers::span<const U, N> make_span(const U(&arr)[N]) FLATBUFFERS_NOEXCEPT {
+ return span<const U, N>(arr);
+ }
+
+ template<class U, std::size_t N>
+ FLATBUFFERS_CONSTEXPR_CPP11
+ flatbuffers::span<U, N> make_span(std::array<U, N> &arr) FLATBUFFERS_NOEXCEPT {
+ return span<U, N>(arr);
+ }
+
+ template<class U, std::size_t N>
+ FLATBUFFERS_CONSTEXPR_CPP11
+ flatbuffers::span<const U, N> make_span(const std::array<U, N> &arr) FLATBUFFERS_NOEXCEPT {
+ return span<const U, N>(arr);
+ }
+
+ template<class U, std::size_t N>
+ FLATBUFFERS_CONSTEXPR_CPP11
+ flatbuffers::span<U, dynamic_extent> make_span(U *first, std::size_t count) FLATBUFFERS_NOEXCEPT {
+ return span<U, dynamic_extent>(first, count);
+ }
+
+ template<class U, std::size_t N>
+ FLATBUFFERS_CONSTEXPR_CPP11
+ flatbuffers::span<const U, dynamic_extent> make_span(const U *first, std::size_t count) FLATBUFFERS_NOEXCEPT {
+ return span<const U, dynamic_extent>(first, count);
+ }
+#endif
+
+#endif // defined(FLATBUFFERS_USE_STD_SPAN)
+
+} // namespace flatbuffers
+
+#endif // FLATBUFFERS_STL_EMULATION_H_
diff --git a/onert-micro/externals/flatbuffers/util.h b/onert-micro/externals/flatbuffers/util.h
new file mode 100644
index 000000000..e255801af
--- /dev/null
+++ b/onert-micro/externals/flatbuffers/util.h
@@ -0,0 +1,799 @@
+/*
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright 2014 Google Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLATBUFFERS_UTIL_H_
+#define FLATBUFFERS_UTIL_H_
+
+#include <errno.h>
+
+#include "flatbuffers/base.h"
+#include "flatbuffers/stl_emulation.h"
+
+#ifndef FLATBUFFERS_PREFER_PRINTF
+#include <sstream>
+#else // FLATBUFFERS_PREFER_PRINTF
+#include <float.h>
+#include <stdio.h>
+#endif // FLATBUFFERS_PREFER_PRINTF
+
+#include <iomanip>
+#include <string>
+
+namespace flatbuffers
+{
+
+// @locale-independent functions for ASCII characters set.
+
+// Fast checking that character lies in closed range: [a <= x <= b]
+// using one compare (conditional branch) operator.
+inline bool check_ascii_range(char x, char a, char b)
+{
+ FLATBUFFERS_ASSERT(a <= b);
+ // (Hacker's Delight): `a <= x <= b` <=> `(x-a) <={u} (b-a)`.
+ // The x, a, b will be promoted to int and subtracted without overflow.
+ return static_cast<unsigned int>(x - a) <= static_cast<unsigned int>(b - a);
+}
+
+// Case-insensitive isalpha
+inline bool is_alpha(char c)
+{
+ // ASCII only: alpha to upper case => reset bit 0x20 (~0x20 = 0xDF).
+ return check_ascii_range(c & 0xDF, 'a' & 0xDF, 'z' & 0xDF);
+}
+
+// Check for uppercase alpha
+inline bool is_alpha_upper(char c) { return check_ascii_range(c, 'A', 'Z'); }
+
+// Check (case-insensitive) that `c` is equal to alpha.
+inline bool is_alpha_char(char c, char alpha)
+{
+ FLATBUFFERS_ASSERT(is_alpha(alpha));
+ // ASCII only: alpha to upper case => reset bit 0x20 (~0x20 = 0xDF).
+ return ((c & 0xDF) == (alpha & 0xDF));
+}
+
+// https://en.cppreference.com/w/cpp/string/byte/isxdigit
+// isdigit and isxdigit are the only standard narrow character classification
+// functions that are not affected by the currently installed C locale. although
+// some implementations (e.g. Microsoft in 1252 codepage) may classify
+// additional single-byte characters as digits.
+inline bool is_digit(char c) { return check_ascii_range(c, '0', '9'); }
+
+inline bool is_xdigit(char c)
+{
+ // Replace by look-up table.
+ return is_digit(c) || check_ascii_range(c & 0xDF, 'a' & 0xDF, 'f' & 0xDF);
+}
+
+// Case-insensitive isalnum
+inline bool is_alnum(char c) { return is_alpha(c) || is_digit(c); }
+
+inline char CharToUpper(char c)
+{
+ return static_cast<char>(::toupper(static_cast<unsigned char>(c)));
+}
+
+inline char CharToLower(char c)
+{
+ return static_cast<char>(::tolower(static_cast<unsigned char>(c)));
+}
+
+// @end-locale-independent functions for ASCII character set
+
+#ifdef FLATBUFFERS_PREFER_PRINTF
+template <typename T> size_t IntToDigitCount(T t)
+{
+ size_t digit_count = 0;
+ // Count the sign for negative numbers
+ if (t < 0)
+ digit_count++;
+ // Count a single 0 left of the dot for fractional numbers
+ if (-1 < t && t < 1)
+ digit_count++;
+ // Count digits until fractional part
+ T eps = std::numeric_limits<float>::epsilon();
+ while (t <= (-1 + eps) || (1 - eps) <= t)
+ {
+ t /= 10;
+ digit_count++;
+ }
+ return digit_count;
+}
+
+template <typename T> size_t NumToStringWidth(T t, int precision = 0)
+{
+ size_t string_width = IntToDigitCount(t);
+ // Count the dot for floating point numbers
+ if (precision)
+ string_width += (precision + 1);
+ return string_width;
+}
+
+template <typename T> std::string NumToStringImplWrapper(T t, const char *fmt, int precision = 0)
+{
+ size_t string_width = NumToStringWidth(t, precision);
+ std::string s(string_width, 0x00);
+ // Allow snprintf to use std::string trailing null to detect buffer overflow
+ snprintf(const_cast<char *>(s.data()), (s.size() + 1), fmt, string_width, t);
+ return s;
+}
+#endif // FLATBUFFERS_PREFER_PRINTF
+
+// Convert an integer or floating point value to a string.
+// In contrast to std::stringstream, "char" values are
+// converted to a string of digits, and we don't use scientific notation.
+template <typename T> std::string NumToString(T t)
+{
+ // clang-format off
+
+ #ifndef FLATBUFFERS_PREFER_PRINTF
+ std::stringstream ss;
+ ss << t;
+ return ss.str();
+ #else // FLATBUFFERS_PREFER_PRINTF
+ auto v = static_cast<long long>(t);
+ return NumToStringImplWrapper(v, "%.*lld");
+ #endif // FLATBUFFERS_PREFER_PRINTF
+ // clang-format on
+}
+// Avoid char types used as character data.
+template <> inline std::string NumToString<signed char>(signed char t)
+{
+ return NumToString(static_cast<int>(t));
+}
+template <> inline std::string NumToString<unsigned char>(unsigned char t)
+{
+ return NumToString(static_cast<int>(t));
+}
+template <> inline std::string NumToString<char>(char t)
+{
+ return NumToString(static_cast<int>(t));
+}
+#if defined(FLATBUFFERS_CPP98_STL)
+template <> inline std::string NumToString<long long>(long long t)
+{
+ char buf[21]; // (log((1 << 63) - 1) / log(10)) + 2
+ snprintf(buf, sizeof(buf), "%lld", t);
+ return std::string(buf);
+}
+
+template <> inline std::string NumToString<unsigned long long>(unsigned long long t)
+{
+ char buf[22]; // (log((1 << 63) - 1) / log(10)) + 1
+ snprintf(buf, sizeof(buf), "%llu", t);
+ return std::string(buf);
+}
+#endif // defined(FLATBUFFERS_CPP98_STL)
+
+// Special versions for floats/doubles.
+template <typename T> std::string FloatToString(T t, int precision)
+{
+ // clang-format off
+
+ #ifndef FLATBUFFERS_PREFER_PRINTF
+ // to_string() prints different numbers of digits for floats depending on
+ // platform and isn't available on Android, so we use stringstream
+ std::stringstream ss;
+ // Use std::fixed to suppress scientific notation.
+ ss << std::fixed;
+ // Default precision is 6, we want that to be higher for doubles.
+ ss << std::setprecision(precision);
+ ss << t;
+ auto s = ss.str();
+ #else // FLATBUFFERS_PREFER_PRINTF
+ auto v = static_cast<double>(t);
+ auto s = NumToStringImplWrapper(v, "%0.*f", precision);
+ #endif // FLATBUFFERS_PREFER_PRINTF
+ // clang-format on
+ // Sadly, std::fixed turns "1" into "1.00000", so here we undo that.
+ auto p = s.find_last_not_of('0');
+ if (p != std::string::npos)
+ {
+ // Strip trailing zeroes. If it is a whole number, keep one zero.
+ s.resize(p + (s[p] == '.' ? 2 : 1));
+ }
+ return s;
+}
+
+template <> inline std::string NumToString<double>(double t) { return FloatToString(t, 12); }
+template <> inline std::string NumToString<float>(float t) { return FloatToString(t, 6); }
+
+// Convert an integer value to a hexadecimal string.
+// The returned string length is always xdigits long, prefixed by 0 digits.
+// For example, IntToStringHex(0x23, 8) returns the string "00000023".
+inline std::string IntToStringHex(int i, int xdigits)
+{
+ FLATBUFFERS_ASSERT(i >= 0);
+ // clang-format off
+
+ #ifndef FLATBUFFERS_PREFER_PRINTF
+ std::stringstream ss;
+ ss << std::setw(xdigits) << std::setfill('0') << std::hex << std::uppercase
+ << i;
+ return ss.str();
+ #else // FLATBUFFERS_PREFER_PRINTF
+ return NumToStringImplWrapper(i, "%.*X", xdigits);
+ #endif // FLATBUFFERS_PREFER_PRINTF
+ // clang-format on
+}
+
+// clang-format off
+// Use locale independent functions {strtod_l, strtof_l, strtoll_l, strtoull_l}.
+#if defined(FLATBUFFERS_LOCALE_INDEPENDENT) && (FLATBUFFERS_LOCALE_INDEPENDENT > 0)
+ class ClassicLocale {
+ #ifdef _MSC_VER
+ typedef _locale_t locale_type;
+ #else
+ typedef locale_t locale_type; // POSIX.1-2008 locale_t type
+ #endif
+ ClassicLocale();
+ ~ClassicLocale();
+ locale_type locale_;
+ static ClassicLocale instance_;
+ public:
+ static locale_type Get() { return instance_.locale_; }
+ };
+
+ #ifdef _MSC_VER
+ #define __strtoull_impl(s, pe, b) _strtoui64_l(s, pe, b, ClassicLocale::Get())
+ #define __strtoll_impl(s, pe, b) _strtoi64_l(s, pe, b, ClassicLocale::Get())
+ #define __strtod_impl(s, pe) _strtod_l(s, pe, ClassicLocale::Get())
+ #define __strtof_impl(s, pe) _strtof_l(s, pe, ClassicLocale::Get())
+ #else
+ #define __strtoull_impl(s, pe, b) strtoull_l(s, pe, b, ClassicLocale::Get())
+ #define __strtoll_impl(s, pe, b) strtoll_l(s, pe, b, ClassicLocale::Get())
+ #define __strtod_impl(s, pe) strtod_l(s, pe, ClassicLocale::Get())
+ #define __strtof_impl(s, pe) strtof_l(s, pe, ClassicLocale::Get())
+ #endif
+#else
+ #define __strtod_impl(s, pe) strtod(s, pe)
+ #define __strtof_impl(s, pe) static_cast<float>(strtod(s, pe))
+ #ifdef _MSC_VER
+ #define __strtoull_impl(s, pe, b) _strtoui64(s, pe, b)
+ #define __strtoll_impl(s, pe, b) _strtoi64(s, pe, b)
+ #else
+ #define __strtoull_impl(s, pe, b) strtoull(s, pe, b)
+ #define __strtoll_impl(s, pe, b) strtoll(s, pe, b)
+ #endif
+#endif
+
+inline void strtoval_impl(int64_t *val, const char *str, char **endptr,
+ int base) {
+ *val = __strtoll_impl(str, endptr, base);
+}
+
+inline void strtoval_impl(uint64_t *val, const char *str, char **endptr,
+ int base) {
+ *val = __strtoull_impl(str, endptr, base);
+}
+
+inline void strtoval_impl(double *val, const char *str, char **endptr) {
+ *val = __strtod_impl(str, endptr);
+}
+
+// UBSAN: double to float is safe if numeric_limits<float>::is_iec559 is true.
+__supress_ubsan__("float-cast-overflow")
+inline void strtoval_impl(float *val, const char *str, char **endptr) {
+ *val = __strtof_impl(str, endptr);
+}
+#undef __strtoull_impl
+#undef __strtoll_impl
+#undef __strtod_impl
+#undef __strtof_impl
+// clang-format on
+
+// Adaptor for strtoull()/strtoll().
+// Flatbuffers accepts numbers with any count of leading zeros (-009 is -9),
+// while strtoll with base=0 interprets first leading zero as octal prefix.
+// In future, it is possible to add prefixed 0b0101.
+// 1) Checks errno code for overflow condition (out of range).
+// 2) If base <= 0, function try to detect base of number by prefix.
+//
+// Return value (like strtoull and strtoll, but reject partial result):
+// - If successful, an integer value corresponding to the str is returned.
+// - If full string conversion can't be performed, 0 is returned.
+// - If the converted value falls out of range of corresponding return type, a
+// range error occurs. In this case value MAX(T)/MIN(T) is returned.
+template <typename T>
+inline bool StringToIntegerImpl(T *val, const char *const str, const int base = 0,
+ const bool check_errno = true)
+{
+ // T is int64_t or uint64_T
+ FLATBUFFERS_ASSERT(str);
+ if (base <= 0)
+ {
+ auto s = str;
+ while (*s && !is_digit(*s))
+ s++;
+ if (s[0] == '0' && is_alpha_char(s[1], 'X'))
+ return StringToIntegerImpl(val, str, 16, check_errno);
+ // if a prefix not match, try base=10
+ return StringToIntegerImpl(val, str, 10, check_errno);
+ }
+ else
+ {
+ if (check_errno)
+ errno = 0; // clear thread-local errno
+ auto endptr = str;
+ strtoval_impl(val, str, const_cast<char **>(&endptr), base);
+ if ((*endptr != '\0') || (endptr == str))
+ {
+ *val = 0; // erase partial result
+ return false; // invalid string
+ }
+ // errno is out-of-range, return MAX/MIN
+ if (check_errno && errno)
+ return false;
+ return true;
+ }
+}
+
+template <typename T> inline bool StringToFloatImpl(T *val, const char *const str)
+{
+ // Type T must be either float or double.
+ FLATBUFFERS_ASSERT(str && val);
+ auto end = str;
+ strtoval_impl(val, str, const_cast<char **>(&end));
+ auto done = (end != str) && (*end == '\0');
+ if (!done)
+ *val = 0; // erase partial result
+ return done;
+}
+
+// Convert a string to an instance of T.
+// Return value (matched with StringToInteger64Impl and strtod):
+// - If successful, a numeric value corresponding to the str is returned.
+// - If full string conversion can't be performed, 0 is returned.
+// - If the converted value falls out of range of corresponding return type, a
+// range error occurs. In this case value MAX(T)/MIN(T) is returned.
+template <typename T> inline bool StringToNumber(const char *s, T *val)
+{
+ // Assert on `unsigned long` and `signed long` on LP64.
+ // If it is necessary, it could be solved with flatbuffers::enable_if<B,T>.
+ static_assert(sizeof(T) < sizeof(int64_t), "unexpected type T");
+ FLATBUFFERS_ASSERT(s && val);
+ int64_t i64;
+ // The errno check isn't needed, will return MAX/MIN on overflow.
+ if (StringToIntegerImpl(&i64, s, 0, false))
+ {
+ const int64_t max = (flatbuffers::numeric_limits<T>::max)();
+ const int64_t min = flatbuffers::numeric_limits<T>::lowest();
+ if (i64 > max)
+ {
+ *val = static_cast<T>(max);
+ return false;
+ }
+ if (i64 < min)
+ {
+ // For unsigned types return max to distinguish from
+ // "no conversion can be performed" when 0 is returned.
+ *val = static_cast<T>(flatbuffers::is_unsigned<T>::value ? max : min);
+ return false;
+ }
+ *val = static_cast<T>(i64);
+ return true;
+ }
+ *val = 0;
+ return false;
+}
+
+template <> inline bool StringToNumber<int64_t>(const char *str, int64_t *val)
+{
+ return StringToIntegerImpl(val, str);
+}
+
+template <> inline bool StringToNumber<uint64_t>(const char *str, uint64_t *val)
+{
+ if (!StringToIntegerImpl(val, str))
+ return false;
+ // The strtoull accepts negative numbers:
+ // If the minus sign was part of the input sequence, the numeric value
+ // calculated from the sequence of digits is negated as if by unary minus
+ // in the result type, which applies unsigned integer wraparound rules.
+ // Fix this behaviour (except -0).
+ if (*val)
+ {
+ auto s = str;
+ while (*s && !is_digit(*s))
+ s++;
+ s = (s > str) ? (s - 1) : s; // step back to one symbol
+ if (*s == '-')
+ {
+ // For unsigned types return the max to distinguish from
+ // "no conversion can be performed".
+ *val = (flatbuffers::numeric_limits<uint64_t>::max)();
+ return false;
+ }
+ }
+ return true;
+}
+
+template <> inline bool StringToNumber(const char *s, float *val)
+{
+ return StringToFloatImpl(val, s);
+}
+
+template <> inline bool StringToNumber(const char *s, double *val)
+{
+ return StringToFloatImpl(val, s);
+}
+
+inline int64_t StringToInt(const char *s, int base = 10)
+{
+ int64_t val;
+ return StringToIntegerImpl(&val, s, base) ? val : 0;
+}
+
+inline uint64_t StringToUInt(const char *s, int base = 10)
+{
+ uint64_t val;
+ return StringToIntegerImpl(&val, s, base) ? val : 0;
+}
+
+typedef bool (*LoadFileFunction)(const char *filename, bool binary, std::string *dest);
+typedef bool (*FileExistsFunction)(const char *filename);
+
+LoadFileFunction SetLoadFileFunction(LoadFileFunction load_file_function);
+
+FileExistsFunction SetFileExistsFunction(FileExistsFunction file_exists_function);
+
+// Check if file "name" exists.
+bool FileExists(const char *name);
+
+// Check if "name" exists and it is also a directory.
+bool DirExists(const char *name);
+
+// Load file "name" into "buf" returning true if successful
+// false otherwise. If "binary" is false data is read
+// using ifstream's text mode, otherwise data is read with
+// no transcoding.
+bool LoadFile(const char *name, bool binary, std::string *buf);
+
+// Save data "buf" of length "len" bytes into a file
+// "name" returning true if successful, false otherwise.
+// If "binary" is false data is written using ifstream's
+// text mode, otherwise data is written with no
+// transcoding.
+bool SaveFile(const char *name, const char *buf, size_t len, bool binary);
+
+// Save data "buf" into file "name" returning true if
+// successful, false otherwise. If "binary" is false
+// data is written using ifstream's text mode, otherwise
+// data is written with no transcoding.
+inline bool SaveFile(const char *name, const std::string &buf, bool binary)
+{
+ return SaveFile(name, buf.c_str(), buf.size(), binary);
+}
+
+// Functionality for minimalistic portable path handling.
+
+// The functions below behave correctly regardless of whether posix ('/') or
+// Windows ('/' or '\\') separators are used.
+
+// Any new separators inserted are always posix.
+FLATBUFFERS_CONSTEXPR char kPathSeparator = '/';
+
+// Returns the path with the extension, if any, removed.
+std::string StripExtension(const std::string &filepath);
+
+// Returns the extension, if any.
+std::string GetExtension(const std::string &filepath);
+
+// Return the last component of the path, after the last separator.
+std::string StripPath(const std::string &filepath);
+
+// Strip the last component of the path + separator.
+std::string StripFileName(const std::string &filepath);
+
+// Concatenates a path with a filename, regardless of whether the path
+// ends in a separator or not.
+std::string ConCatPathFileName(const std::string &path, const std::string &filename);
+
+// Replaces any '\\' separators with '/'
+std::string PosixPath(const char *path);
+
+// This function ensure a directory exists, by recursively
+// creating dirs for any parts of the path that don't exist yet.
+void EnsureDirExists(const std::string &filepath);
+
+// Obtains the absolute path from any other path.
+// Returns the input path if the absolute path couldn't be resolved.
+std::string AbsolutePath(const std::string &filepath);
+
+// To and from UTF-8 unicode conversion functions
+
+// Convert a unicode code point into a UTF-8 representation by appending it
+// to a string. Returns the number of bytes generated.
+inline int ToUTF8(uint32_t ucc, std::string *out)
+{
+ FLATBUFFERS_ASSERT(!(ucc & 0x80000000)); // Top bit can't be set.
+ // 6 possible encodings: http://en.wikipedia.org/wiki/UTF-8
+ for (int i = 0; i < 6; i++)
+ {
+ // Max bits this encoding can represent.
+ uint32_t max_bits = 6 + i * 5 + static_cast<int>(!i);
+ if (ucc < (1u << max_bits))
+ { // does it fit?
+ // Remaining bits not encoded in the first byte, store 6 bits each
+ uint32_t remain_bits = i * 6;
+ // Store first byte:
+ (*out) += static_cast<char>((0xFE << (max_bits - remain_bits)) | (ucc >> remain_bits));
+ // Store remaining bytes:
+ for (int j = i - 1; j >= 0; j--)
+ {
+ (*out) += static_cast<char>(((ucc >> (j * 6)) & 0x3F) | 0x80);
+ }
+ return i + 1; // Return the number of bytes added.
+ }
+ }
+ FLATBUFFERS_ASSERT(0); // Impossible to arrive here.
+ return -1;
+}
+
+// Converts whatever prefix of the incoming string corresponds to a valid
+// UTF-8 sequence into a unicode code. The incoming pointer will have been
+// advanced past all bytes parsed.
+// returns -1 upon corrupt UTF-8 encoding (ignore the incoming pointer in
+// this case).
+inline int FromUTF8(const char **in)
+{
+ int len = 0;
+ // Count leading 1 bits.
+ for (int mask = 0x80; mask >= 0x04; mask >>= 1)
+ {
+ if (**in & mask)
+ {
+ len++;
+ }
+ else
+ {
+ break;
+ }
+ }
+ if ((static_cast<unsigned char>(**in) << len) & 0x80)
+ return -1; // Bit after leading 1's must be 0.
+ if (!len)
+ return *(*in)++;
+ // UTF-8 encoded values with a length are between 2 and 4 bytes.
+ if (len < 2 || len > 4)
+ {
+ return -1;
+ }
+ // Grab initial bits of the code.
+ int ucc = *(*in)++ & ((1 << (7 - len)) - 1);
+ for (int i = 0; i < len - 1; i++)
+ {
+ if ((**in & 0xC0) != 0x80)
+ return -1; // Upper bits must 1 0.
+ ucc <<= 6;
+ ucc |= *(*in)++ & 0x3F; // Grab 6 more bits of the code.
+ }
+ // UTF-8 cannot encode values between 0xD800 and 0xDFFF (reserved for
+ // UTF-16 surrogate pairs).
+ if (ucc >= 0xD800 && ucc <= 0xDFFF)
+ {
+ return -1;
+ }
+ // UTF-8 must represent code points in their shortest possible encoding.
+ switch (len)
+ {
+ case 2:
+ // Two bytes of UTF-8 can represent code points from U+0080 to U+07FF.
+ if (ucc < 0x0080 || ucc > 0x07FF)
+ {
+ return -1;
+ }
+ break;
+ case 3:
+ // Three bytes of UTF-8 can represent code points from U+0800 to U+FFFF.
+ if (ucc < 0x0800 || ucc > 0xFFFF)
+ {
+ return -1;
+ }
+ break;
+ case 4:
+ // Four bytes of UTF-8 can represent code points from U+10000 to U+10FFFF.
+ if (ucc < 0x10000 || ucc > 0x10FFFF)
+ {
+ return -1;
+ }
+ break;
+ }
+ return ucc;
+}
+
+#ifndef FLATBUFFERS_PREFER_PRINTF
+// Wraps a string to a maximum length, inserting new lines where necessary. Any
+// existing whitespace will be collapsed down to a single space. A prefix or
+// suffix can be provided, which will be inserted before or after a wrapped
+// line, respectively.
+inline std::string WordWrap(const std::string in, size_t max_length,
+ const std::string wrapped_line_prefix,
+ const std::string wrapped_line_suffix)
+{
+ std::istringstream in_stream(in);
+ std::string wrapped, line, word;
+
+ in_stream >> word;
+ line = word;
+
+ while (in_stream >> word)
+ {
+ if ((line.length() + 1 + word.length() + wrapped_line_suffix.length()) < max_length)
+ {
+ line += " " + word;
+ }
+ else
+ {
+ wrapped += line + wrapped_line_suffix + "\n";
+ line = wrapped_line_prefix + word;
+ }
+ }
+ wrapped += line;
+
+ return wrapped;
+}
+#endif // !FLATBUFFERS_PREFER_PRINTF
+
+inline bool EscapeString(const char *s, size_t length, std::string *_text, bool allow_non_utf8,
+ bool natural_utf8)
+{
+ std::string &text = *_text;
+ text += "\"";
+ for (uoffset_t i = 0; i < length; i++)
+ {
+ char c = s[i];
+ switch (c)
+ {
+ case '\n':
+ text += "\\n";
+ break;
+ case '\t':
+ text += "\\t";
+ break;
+ case '\r':
+ text += "\\r";
+ break;
+ case '\b':
+ text += "\\b";
+ break;
+ case '\f':
+ text += "\\f";
+ break;
+ case '\"':
+ text += "\\\"";
+ break;
+ case '\\':
+ text += "\\\\";
+ break;
+ default:
+ if (c >= ' ' && c <= '~')
+ {
+ text += c;
+ }
+ else
+ {
+ // Not printable ASCII data. Let's see if it's valid UTF-8 first:
+ const char *utf8 = s + i;
+ int ucc = FromUTF8(&utf8);
+ if (ucc < 0)
+ {
+ if (allow_non_utf8)
+ {
+ text += "\\x";
+ text += IntToStringHex(static_cast<uint8_t>(c), 2);
+ }
+ else
+ {
+ // There are two cases here:
+ //
+ // 1) We reached here by parsing an IDL file. In that case,
+ // we previously checked for non-UTF-8, so we shouldn't reach
+ // here.
+ //
+ // 2) We reached here by someone calling GenerateText()
+ // on a previously-serialized flatbuffer. The data might have
+ // non-UTF-8 Strings, or might be corrupt.
+ //
+ // In both cases, we have to give up and inform the caller
+ // they have no JSON.
+ return false;
+ }
+ }
+ else
+ {
+ if (natural_utf8)
+ {
+ // utf8 points to past all utf-8 bytes parsed
+ text.append(s + i, static_cast<size_t>(utf8 - s - i));
+ }
+ else if (ucc <= 0xFFFF)
+ {
+ // Parses as Unicode within JSON's \uXXXX range, so use that.
+ text += "\\u";
+ text += IntToStringHex(ucc, 4);
+ }
+ else if (ucc <= 0x10FFFF)
+ {
+ // Encode Unicode SMP values to a surrogate pair using two \u
+ // escapes.
+ uint32_t base = ucc - 0x10000;
+ auto high_surrogate = (base >> 10) + 0xD800;
+ auto low_surrogate = (base & 0x03FF) + 0xDC00;
+ text += "\\u";
+ text += IntToStringHex(high_surrogate, 4);
+ text += "\\u";
+ text += IntToStringHex(low_surrogate, 4);
+ }
+ // Skip past characters recognized.
+ i = static_cast<uoffset_t>(utf8 - s - 1);
+ }
+ }
+ break;
+ }
+ }
+ text += "\"";
+ return true;
+}
+
+inline std::string BufferToHexText(const void *buffer, size_t buffer_size, size_t max_length,
+ const std::string &wrapped_line_prefix,
+ const std::string &wrapped_line_suffix)
+{
+ std::string text = wrapped_line_prefix;
+ size_t start_offset = 0;
+ const char *s = reinterpret_cast<const char *>(buffer);
+ for (size_t i = 0; s && i < buffer_size; i++)
+ {
+ // Last iteration or do we have more?
+ bool have_more = i + 1 < buffer_size;
+ text += "0x";
+ text += IntToStringHex(static_cast<uint8_t>(s[i]), 2);
+ if (have_more)
+ {
+ text += ',';
+ }
+ // If we have more to process and we reached max_length
+ if (have_more && text.size() + wrapped_line_suffix.size() >= start_offset + max_length)
+ {
+ text += wrapped_line_suffix;
+ text += '\n';
+ start_offset = text.size();
+ text += wrapped_line_prefix;
+ }
+ }
+ text += wrapped_line_suffix;
+ return text;
+}
+
+// Remove paired quotes in a string: "text"|'text' -> text.
+std::string RemoveStringQuotes(const std::string &s);
+
+// Change th global C-locale to locale with name <locale_name>.
+// Returns an actual locale name in <_value>, useful if locale_name is "" or
+// null.
+bool SetGlobalTestLocale(const char *locale_name, std::string *_value = nullptr);
+
+// Read (or test) a value of environment variable.
+bool ReadEnvironmentVariable(const char *var_name, std::string *_value = nullptr);
+
+// MSVC specific: Send all assert reports to STDOUT to prevent CI hangs.
+void SetupDefaultCRTReportMode();
+
+} // namespace flatbuffers
+
+#endif // FLATBUFFERS_UTIL_H_
diff --git a/onert-micro/externals/gen/circle-generated/circle/schema_generated.h b/onert-micro/externals/gen/circle-generated/circle/schema_generated.h
new file mode 100644
index 000000000..2531319f9
--- /dev/null
+++ b/onert-micro/externals/gen/circle-generated/circle/schema_generated.h
@@ -0,0 +1,24984 @@
+/*
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// automatically generated by the FlatBuffers compiler, do not modify
+
+#ifndef FLATBUFFERS_GENERATED_SCHEMA_CIRCLE_H_
+#define FLATBUFFERS_GENERATED_SCHEMA_CIRCLE_H_
+
+#include "flatbuffers/flatbuffers.h"
+
+namespace circle
+{
+
+struct CustomQuantization;
+struct CustomQuantizationBuilder;
+struct CustomQuantizationT;
+
+struct QuantizationParameters;
+struct QuantizationParametersBuilder;
+struct QuantizationParametersT;
+
+struct Int32Vector;
+struct Int32VectorBuilder;
+struct Int32VectorT;
+
+struct Uint16Vector;
+struct Uint16VectorBuilder;
+struct Uint16VectorT;
+
+struct Uint8Vector;
+struct Uint8VectorBuilder;
+struct Uint8VectorT;
+
+struct DimensionMetadata;
+struct DimensionMetadataBuilder;
+struct DimensionMetadataT;
+
+struct SparsityParameters;
+struct SparsityParametersBuilder;
+struct SparsityParametersT;
+
+struct Tensor;
+struct TensorBuilder;
+struct TensorT;
+
+struct Conv2DOptions;
+struct Conv2DOptionsBuilder;
+struct Conv2DOptionsT;
+
+struct Conv3DOptions;
+struct Conv3DOptionsBuilder;
+struct Conv3DOptionsT;
+
+struct Pool2DOptions;
+struct Pool2DOptionsBuilder;
+struct Pool2DOptionsT;
+
+struct DepthwiseConv2DOptions;
+struct DepthwiseConv2DOptionsBuilder;
+struct DepthwiseConv2DOptionsT;
+
+struct ConcatEmbeddingsOptions;
+struct ConcatEmbeddingsOptionsBuilder;
+struct ConcatEmbeddingsOptionsT;
+
+struct LSHProjectionOptions;
+struct LSHProjectionOptionsBuilder;
+struct LSHProjectionOptionsT;
+
+struct SVDFOptions;
+struct SVDFOptionsBuilder;
+struct SVDFOptionsT;
+
+struct RNNOptions;
+struct RNNOptionsBuilder;
+struct RNNOptionsT;
+
+struct SequenceRNNOptions;
+struct SequenceRNNOptionsBuilder;
+struct SequenceRNNOptionsT;
+
+struct BidirectionalSequenceRNNOptions;
+struct BidirectionalSequenceRNNOptionsBuilder;
+struct BidirectionalSequenceRNNOptionsT;
+
+struct FullyConnectedOptions;
+struct FullyConnectedOptionsBuilder;
+struct FullyConnectedOptionsT;
+
+struct SoftmaxOptions;
+struct SoftmaxOptionsBuilder;
+struct SoftmaxOptionsT;
+
+struct ConcatenationOptions;
+struct ConcatenationOptionsBuilder;
+struct ConcatenationOptionsT;
+
+struct AddOptions;
+struct AddOptionsBuilder;
+struct AddOptionsT;
+
+struct MulOptions;
+struct MulOptionsBuilder;
+struct MulOptionsT;
+
+struct L2NormOptions;
+struct L2NormOptionsBuilder;
+struct L2NormOptionsT;
+
+struct LocalResponseNormalizationOptions;
+struct LocalResponseNormalizationOptionsBuilder;
+struct LocalResponseNormalizationOptionsT;
+
+struct LSTMOptions;
+struct LSTMOptionsBuilder;
+struct LSTMOptionsT;
+
+struct UnidirectionalSequenceLSTMOptions;
+struct UnidirectionalSequenceLSTMOptionsBuilder;
+struct UnidirectionalSequenceLSTMOptionsT;
+
+struct BidirectionalSequenceLSTMOptions;
+struct BidirectionalSequenceLSTMOptionsBuilder;
+struct BidirectionalSequenceLSTMOptionsT;
+
+struct ResizeBilinearOptions;
+struct ResizeBilinearOptionsBuilder;
+struct ResizeBilinearOptionsT;
+
+struct ResizeNearestNeighborOptions;
+struct ResizeNearestNeighborOptionsBuilder;
+struct ResizeNearestNeighborOptionsT;
+
+struct CallOptions;
+struct CallOptionsBuilder;
+struct CallOptionsT;
+
+struct PadOptions;
+struct PadOptionsBuilder;
+struct PadOptionsT;
+
+struct PadV2Options;
+struct PadV2OptionsBuilder;
+struct PadV2OptionsT;
+
+struct ReshapeOptions;
+struct ReshapeOptionsBuilder;
+struct ReshapeOptionsT;
+
+struct SpaceToBatchNDOptions;
+struct SpaceToBatchNDOptionsBuilder;
+struct SpaceToBatchNDOptionsT;
+
+struct BatchToSpaceNDOptions;
+struct BatchToSpaceNDOptionsBuilder;
+struct BatchToSpaceNDOptionsT;
+
+struct SkipGramOptions;
+struct SkipGramOptionsBuilder;
+struct SkipGramOptionsT;
+
+struct SpaceToDepthOptions;
+struct SpaceToDepthOptionsBuilder;
+struct SpaceToDepthOptionsT;
+
+struct DepthToSpaceOptions;
+struct DepthToSpaceOptionsBuilder;
+struct DepthToSpaceOptionsT;
+
+struct SubOptions;
+struct SubOptionsBuilder;
+struct SubOptionsT;
+
+struct DivOptions;
+struct DivOptionsBuilder;
+struct DivOptionsT;
+
+struct TopKV2Options;
+struct TopKV2OptionsBuilder;
+struct TopKV2OptionsT;
+
+struct EmbeddingLookupSparseOptions;
+struct EmbeddingLookupSparseOptionsBuilder;
+struct EmbeddingLookupSparseOptionsT;
+
+struct GatherOptions;
+struct GatherOptionsBuilder;
+struct GatherOptionsT;
+
+struct TransposeOptions;
+struct TransposeOptionsBuilder;
+struct TransposeOptionsT;
+
+struct ExpOptions;
+struct ExpOptionsBuilder;
+struct ExpOptionsT;
+
+struct CosOptions;
+struct CosOptionsBuilder;
+struct CosOptionsT;
+
+struct ReducerOptions;
+struct ReducerOptionsBuilder;
+struct ReducerOptionsT;
+
+struct SqueezeOptions;
+struct SqueezeOptionsBuilder;
+struct SqueezeOptionsT;
+
+struct SplitOptions;
+struct SplitOptionsBuilder;
+struct SplitOptionsT;
+
+struct SplitVOptions;
+struct SplitVOptionsBuilder;
+struct SplitVOptionsT;
+
+struct StridedSliceOptions;
+struct StridedSliceOptionsBuilder;
+struct StridedSliceOptionsT;
+
+struct LogSoftmaxOptions;
+struct LogSoftmaxOptionsBuilder;
+struct LogSoftmaxOptionsT;
+
+struct CastOptions;
+struct CastOptionsBuilder;
+struct CastOptionsT;
+
+struct DequantizeOptions;
+struct DequantizeOptionsBuilder;
+struct DequantizeOptionsT;
+
+struct MaximumMinimumOptions;
+struct MaximumMinimumOptionsBuilder;
+struct MaximumMinimumOptionsT;
+
+struct TileOptions;
+struct TileOptionsBuilder;
+struct TileOptionsT;
+
+struct ArgMaxOptions;
+struct ArgMaxOptionsBuilder;
+struct ArgMaxOptionsT;
+
+struct ArgMinOptions;
+struct ArgMinOptionsBuilder;
+struct ArgMinOptionsT;
+
+struct GreaterOptions;
+struct GreaterOptionsBuilder;
+struct GreaterOptionsT;
+
+struct GreaterEqualOptions;
+struct GreaterEqualOptionsBuilder;
+struct GreaterEqualOptionsT;
+
+struct LessOptions;
+struct LessOptionsBuilder;
+struct LessOptionsT;
+
+struct LessEqualOptions;
+struct LessEqualOptionsBuilder;
+struct LessEqualOptionsT;
+
+struct NegOptions;
+struct NegOptionsBuilder;
+struct NegOptionsT;
+
+struct SelectOptions;
+struct SelectOptionsBuilder;
+struct SelectOptionsT;
+
+struct SliceOptions;
+struct SliceOptionsBuilder;
+struct SliceOptionsT;
+
+struct TransposeConvOptions;
+struct TransposeConvOptionsBuilder;
+struct TransposeConvOptionsT;
+
+struct ExpandDimsOptions;
+struct ExpandDimsOptionsBuilder;
+struct ExpandDimsOptionsT;
+
+struct SparseToDenseOptions;
+struct SparseToDenseOptionsBuilder;
+struct SparseToDenseOptionsT;
+
+struct EqualOptions;
+struct EqualOptionsBuilder;
+struct EqualOptionsT;
+
+struct NotEqualOptions;
+struct NotEqualOptionsBuilder;
+struct NotEqualOptionsT;
+
+struct ShapeOptions;
+struct ShapeOptionsBuilder;
+struct ShapeOptionsT;
+
+struct RankOptions;
+struct RankOptionsBuilder;
+struct RankOptionsT;
+
+struct PowOptions;
+struct PowOptionsBuilder;
+struct PowOptionsT;
+
+struct FakeQuantOptions;
+struct FakeQuantOptionsBuilder;
+struct FakeQuantOptionsT;
+
+struct PackOptions;
+struct PackOptionsBuilder;
+struct PackOptionsT;
+
+struct LogicalOrOptions;
+struct LogicalOrOptionsBuilder;
+struct LogicalOrOptionsT;
+
+struct OneHotOptions;
+struct OneHotOptionsBuilder;
+struct OneHotOptionsT;
+
+struct AbsOptions;
+struct AbsOptionsBuilder;
+struct AbsOptionsT;
+
+struct HardSwishOptions;
+struct HardSwishOptionsBuilder;
+struct HardSwishOptionsT;
+
+struct LogicalAndOptions;
+struct LogicalAndOptionsBuilder;
+struct LogicalAndOptionsT;
+
+struct LogicalNotOptions;
+struct LogicalNotOptionsBuilder;
+struct LogicalNotOptionsT;
+
+struct UnpackOptions;
+struct UnpackOptionsBuilder;
+struct UnpackOptionsT;
+
+struct FloorDivOptions;
+struct FloorDivOptionsBuilder;
+struct FloorDivOptionsT;
+
+struct SquareOptions;
+struct SquareOptionsBuilder;
+struct SquareOptionsT;
+
+struct ZerosLikeOptions;
+struct ZerosLikeOptionsBuilder;
+struct ZerosLikeOptionsT;
+
+struct FillOptions;
+struct FillOptionsBuilder;
+struct FillOptionsT;
+
+struct FloorModOptions;
+struct FloorModOptionsBuilder;
+struct FloorModOptionsT;
+
+struct RangeOptions;
+struct RangeOptionsBuilder;
+struct RangeOptionsT;
+
+struct LeakyReluOptions;
+struct LeakyReluOptionsBuilder;
+struct LeakyReluOptionsT;
+
+struct SquaredDifferenceOptions;
+struct SquaredDifferenceOptionsBuilder;
+struct SquaredDifferenceOptionsT;
+
+struct MirrorPadOptions;
+struct MirrorPadOptionsBuilder;
+struct MirrorPadOptionsT;
+
+struct UniqueOptions;
+struct UniqueOptionsBuilder;
+struct UniqueOptionsT;
+
+struct ReverseV2Options;
+struct ReverseV2OptionsBuilder;
+struct ReverseV2OptionsT;
+
+struct AddNOptions;
+struct AddNOptionsBuilder;
+struct AddNOptionsT;
+
+struct GatherNdOptions;
+struct GatherNdOptionsBuilder;
+struct GatherNdOptionsT;
+
+struct WhereOptions;
+struct WhereOptionsBuilder;
+struct WhereOptionsT;
+
+struct ReverseSequenceOptions;
+struct ReverseSequenceOptionsBuilder;
+struct ReverseSequenceOptionsT;
+
+struct MatrixDiagOptions;
+struct MatrixDiagOptionsBuilder;
+struct MatrixDiagOptionsT;
+
+struct QuantizeOptions;
+struct QuantizeOptionsBuilder;
+struct QuantizeOptionsT;
+
+struct MatrixSetDiagOptions;
+struct MatrixSetDiagOptionsBuilder;
+struct MatrixSetDiagOptionsT;
+
+struct IfOptions;
+struct IfOptionsBuilder;
+struct IfOptionsT;
+
+struct CallOnceOptions;
+struct CallOnceOptionsBuilder;
+struct CallOnceOptionsT;
+
+struct WhileOptions;
+struct WhileOptionsBuilder;
+struct WhileOptionsT;
+
+struct NonMaxSuppressionV4Options;
+struct NonMaxSuppressionV4OptionsBuilder;
+struct NonMaxSuppressionV4OptionsT;
+
+struct NonMaxSuppressionV5Options;
+struct NonMaxSuppressionV5OptionsBuilder;
+struct NonMaxSuppressionV5OptionsT;
+
+struct ScatterNdOptions;
+struct ScatterNdOptionsBuilder;
+struct ScatterNdOptionsT;
+
+struct SelectV2Options;
+struct SelectV2OptionsBuilder;
+struct SelectV2OptionsT;
+
+struct DensifyOptions;
+struct DensifyOptionsBuilder;
+struct DensifyOptionsT;
+
+struct SegmentSumOptions;
+struct SegmentSumOptionsBuilder;
+struct SegmentSumOptionsT;
+
+struct BatchMatMulOptions;
+struct BatchMatMulOptionsBuilder;
+struct BatchMatMulOptionsT;
+
+struct CumsumOptions;
+struct CumsumOptionsBuilder;
+struct CumsumOptionsT;
+
+struct BroadcastToOptions;
+struct BroadcastToOptionsBuilder;
+struct BroadcastToOptionsT;
+
+struct Rfft2dOptions;
+struct Rfft2dOptionsBuilder;
+struct Rfft2dOptionsT;
+
+struct HashtableOptions;
+struct HashtableOptionsBuilder;
+struct HashtableOptionsT;
+
+struct HashtableFindOptions;
+struct HashtableFindOptionsBuilder;
+struct HashtableFindOptionsT;
+
+struct HashtableImportOptions;
+struct HashtableImportOptionsBuilder;
+struct HashtableImportOptionsT;
+
+struct HashtableSizeOptions;
+struct HashtableSizeOptionsBuilder;
+struct HashtableSizeOptionsT;
+
+struct VarHandleOptions;
+struct VarHandleOptionsBuilder;
+struct VarHandleOptionsT;
+
+struct ReadVariableOptions;
+struct ReadVariableOptionsBuilder;
+struct ReadVariableOptionsT;
+
+struct AssignVariableOptions;
+struct AssignVariableOptionsBuilder;
+struct AssignVariableOptionsT;
+
+struct RandomOptions;
+struct RandomOptionsBuilder;
+struct RandomOptionsT;
+
+struct BCQGatherOptions;
+struct BCQGatherOptionsBuilder;
+struct BCQGatherOptionsT;
+
+struct BCQFullyConnectedOptions;
+struct BCQFullyConnectedOptionsBuilder;
+struct BCQFullyConnectedOptionsT;
+
+struct InstanceNormOptions;
+struct InstanceNormOptionsBuilder;
+struct InstanceNormOptionsT;
+
+struct OperatorCode;
+struct OperatorCodeBuilder;
+struct OperatorCodeT;
+
+struct Operator;
+struct OperatorBuilder;
+struct OperatorT;
+
+struct SubGraph;
+struct SubGraphBuilder;
+struct SubGraphT;
+
+struct Buffer;
+struct BufferBuilder;
+struct BufferT;
+
+struct Metadata;
+struct MetadataBuilder;
+struct MetadataT;
+
+struct TensorMap;
+struct TensorMapBuilder;
+struct TensorMapT;
+
+struct SignatureDef;
+struct SignatureDefBuilder;
+struct SignatureDefT;
+
+struct Model;
+struct ModelBuilder;
+struct ModelT;
+
+enum TensorType : int8_t
+{
+ TensorType_FLOAT32 = 0,
+ TensorType_FLOAT16 = 1,
+ TensorType_INT32 = 2,
+ TensorType_UINT8 = 3,
+ TensorType_INT64 = 4,
+ TensorType_STRING = 5,
+ TensorType_BOOL = 6,
+ TensorType_INT16 = 7,
+ TensorType_COMPLEX64 = 8,
+ TensorType_INT8 = 9,
+ TensorType_FLOAT64 = 10,
+ TensorType_COMPLEX128 = 11,
+ TensorType_UINT64 = 12,
+ TensorType_RESOURCE = 13,
+ TensorType_VARIANT = 14,
+ TensorType_UINT32 = 15,
+ TensorType_MIN = TensorType_FLOAT32,
+ TensorType_MAX = TensorType_UINT32
+};
+
+inline const TensorType (&EnumValuesTensorType())[16]
+{
+ static const TensorType values[] = {
+ TensorType_FLOAT32, TensorType_FLOAT16, TensorType_INT32, TensorType_UINT8,
+ TensorType_INT64, TensorType_STRING, TensorType_BOOL, TensorType_INT16,
+ TensorType_COMPLEX64, TensorType_INT8, TensorType_FLOAT64, TensorType_COMPLEX128,
+ TensorType_UINT64, TensorType_RESOURCE, TensorType_VARIANT, TensorType_UINT32};
+ return values;
+}
+
+inline const char *const *EnumNamesTensorType()
+{
+ static const char *const names[17] = {"FLOAT32", "FLOAT16", "INT32", "UINT8", "INT64",
+ "STRING", "BOOL", "INT16", "COMPLEX64", "INT8",
+ "FLOAT64", "COMPLEX128", "UINT64", "RESOURCE", "VARIANT",
+ "UINT32", nullptr};
+ return names;
+}
+
+inline const char *EnumNameTensorType(TensorType e)
+{
+ if (flatbuffers::IsOutRange(e, TensorType_FLOAT32, TensorType_UINT32))
+ return "";
+ const size_t index = static_cast<size_t>(e);
+ return EnumNamesTensorType()[index];
+}
+
+enum QuantizationDetails : uint8_t
+{
+ QuantizationDetails_NONE = 0,
+ QuantizationDetails_CustomQuantization = 1,
+ QuantizationDetails_MIN = QuantizationDetails_NONE,
+ QuantizationDetails_MAX = QuantizationDetails_CustomQuantization
+};
+
+inline const QuantizationDetails (&EnumValuesQuantizationDetails())[2]
+{
+ static const QuantizationDetails values[] = {QuantizationDetails_NONE,
+ QuantizationDetails_CustomQuantization};
+ return values;
+}
+
+inline const char *const *EnumNamesQuantizationDetails()
+{
+ static const char *const names[3] = {"NONE", "CustomQuantization", nullptr};
+ return names;
+}
+
+inline const char *EnumNameQuantizationDetails(QuantizationDetails e)
+{
+ if (flatbuffers::IsOutRange(e, QuantizationDetails_NONE, QuantizationDetails_CustomQuantization))
+ return "";
+ const size_t index = static_cast<size_t>(e);
+ return EnumNamesQuantizationDetails()[index];
+}
+
+template <typename T> struct QuantizationDetailsTraits
+{
+ static const QuantizationDetails enum_value = QuantizationDetails_NONE;
+};
+
+template <> struct QuantizationDetailsTraits<circle::CustomQuantization>
+{
+ static const QuantizationDetails enum_value = QuantizationDetails_CustomQuantization;
+};
+
+struct QuantizationDetailsUnion
+{
+ QuantizationDetails type;
+ void *value;
+
+ QuantizationDetailsUnion() : type(QuantizationDetails_NONE), value(nullptr) {}
+ QuantizationDetailsUnion(QuantizationDetailsUnion &&u) FLATBUFFERS_NOEXCEPT
+ : type(QuantizationDetails_NONE),
+ value(nullptr)
+ {
+ std::swap(type, u.type);
+ std::swap(value, u.value);
+ }
+ QuantizationDetailsUnion(const QuantizationDetailsUnion &);
+ QuantizationDetailsUnion &operator=(const QuantizationDetailsUnion &u)
+ {
+ QuantizationDetailsUnion t(u);
+ std::swap(type, t.type);
+ std::swap(value, t.value);
+ return *this;
+ }
+ QuantizationDetailsUnion &operator=(QuantizationDetailsUnion &&u) FLATBUFFERS_NOEXCEPT
+ {
+ std::swap(type, u.type);
+ std::swap(value, u.value);
+ return *this;
+ }
+ ~QuantizationDetailsUnion() { Reset(); }
+
+ void Reset();
+
+#ifndef FLATBUFFERS_CPP98_STL
+ template <typename T> void Set(T &&val)
+ {
+ using RT = typename std::remove_reference<T>::type;
+ Reset();
+ type = QuantizationDetailsTraits<typename RT::TableType>::enum_value;
+ if (type != QuantizationDetails_NONE)
+ {
+ value = new RT(std::forward<T>(val));
+ }
+ }
+#endif // FLATBUFFERS_CPP98_STL
+
+ static void *UnPack(const void *obj, QuantizationDetails type,
+ const flatbuffers::resolver_function_t *resolver);
+ flatbuffers::Offset<void> Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr) const;
+
+ circle::CustomQuantizationT *AsCustomQuantization()
+ {
+ return type == QuantizationDetails_CustomQuantization
+ ? reinterpret_cast<circle::CustomQuantizationT *>(value)
+ : nullptr;
+ }
+ const circle::CustomQuantizationT *AsCustomQuantization() const
+ {
+ return type == QuantizationDetails_CustomQuantization
+ ? reinterpret_cast<const circle::CustomQuantizationT *>(value)
+ : nullptr;
+ }
+};
+
+bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj,
+ QuantizationDetails type);
+bool VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier,
+ const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
+ const flatbuffers::Vector<uint8_t> *types);
+
+enum DimensionType : int8_t
+{
+ DimensionType_DENSE = 0,
+ DimensionType_SPARSE_CSR = 1,
+ DimensionType_MIN = DimensionType_DENSE,
+ DimensionType_MAX = DimensionType_SPARSE_CSR
+};
+
+inline const DimensionType (&EnumValuesDimensionType())[2]
+{
+ static const DimensionType values[] = {DimensionType_DENSE, DimensionType_SPARSE_CSR};
+ return values;
+}
+
+inline const char *const *EnumNamesDimensionType()
+{
+ static const char *const names[3] = {"DENSE", "SPARSE_CSR", nullptr};
+ return names;
+}
+
+inline const char *EnumNameDimensionType(DimensionType e)
+{
+ if (flatbuffers::IsOutRange(e, DimensionType_DENSE, DimensionType_SPARSE_CSR))
+ return "";
+ const size_t index = static_cast<size_t>(e);
+ return EnumNamesDimensionType()[index];
+}
+
+enum SparseIndexVector : uint8_t
+{
+ SparseIndexVector_NONE = 0,
+ SparseIndexVector_Int32Vector = 1,
+ SparseIndexVector_Uint16Vector = 2,
+ SparseIndexVector_Uint8Vector = 3,
+ SparseIndexVector_MIN = SparseIndexVector_NONE,
+ SparseIndexVector_MAX = SparseIndexVector_Uint8Vector
+};
+
+inline const SparseIndexVector (&EnumValuesSparseIndexVector())[4]
+{
+ static const SparseIndexVector values[] = {SparseIndexVector_NONE, SparseIndexVector_Int32Vector,
+ SparseIndexVector_Uint16Vector,
+ SparseIndexVector_Uint8Vector};
+ return values;
+}
+
+inline const char *const *EnumNamesSparseIndexVector()
+{
+ static const char *const names[5] = {"NONE", "Int32Vector", "Uint16Vector", "Uint8Vector",
+ nullptr};
+ return names;
+}
+
+inline const char *EnumNameSparseIndexVector(SparseIndexVector e)
+{
+ if (flatbuffers::IsOutRange(e, SparseIndexVector_NONE, SparseIndexVector_Uint8Vector))
+ return "";
+ const size_t index = static_cast<size_t>(e);
+ return EnumNamesSparseIndexVector()[index];
+}
+
+template <typename T> struct SparseIndexVectorTraits
+{
+ static const SparseIndexVector enum_value = SparseIndexVector_NONE;
+};
+
+template <> struct SparseIndexVectorTraits<circle::Int32Vector>
+{
+ static const SparseIndexVector enum_value = SparseIndexVector_Int32Vector;
+};
+
+template <> struct SparseIndexVectorTraits<circle::Uint16Vector>
+{
+ static const SparseIndexVector enum_value = SparseIndexVector_Uint16Vector;
+};
+
+template <> struct SparseIndexVectorTraits<circle::Uint8Vector>
+{
+ static const SparseIndexVector enum_value = SparseIndexVector_Uint8Vector;
+};
+
+struct SparseIndexVectorUnion
+{
+ SparseIndexVector type;
+ void *value;
+
+ SparseIndexVectorUnion() : type(SparseIndexVector_NONE), value(nullptr) {}
+ SparseIndexVectorUnion(SparseIndexVectorUnion &&u) FLATBUFFERS_NOEXCEPT
+ : type(SparseIndexVector_NONE),
+ value(nullptr)
+ {
+ std::swap(type, u.type);
+ std::swap(value, u.value);
+ }
+ SparseIndexVectorUnion(const SparseIndexVectorUnion &);
+ SparseIndexVectorUnion &operator=(const SparseIndexVectorUnion &u)
+ {
+ SparseIndexVectorUnion t(u);
+ std::swap(type, t.type);
+ std::swap(value, t.value);
+ return *this;
+ }
+ SparseIndexVectorUnion &operator=(SparseIndexVectorUnion &&u) FLATBUFFERS_NOEXCEPT
+ {
+ std::swap(type, u.type);
+ std::swap(value, u.value);
+ return *this;
+ }
+ ~SparseIndexVectorUnion() { Reset(); }
+
+ void Reset();
+
+#ifndef FLATBUFFERS_CPP98_STL
+ template <typename T> void Set(T &&val)
+ {
+ using RT = typename std::remove_reference<T>::type;
+ Reset();
+ type = SparseIndexVectorTraits<typename RT::TableType>::enum_value;
+ if (type != SparseIndexVector_NONE)
+ {
+ value = new RT(std::forward<T>(val));
+ }
+ }
+#endif // FLATBUFFERS_CPP98_STL
+
+ static void *UnPack(const void *obj, SparseIndexVector type,
+ const flatbuffers::resolver_function_t *resolver);
+ flatbuffers::Offset<void> Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr) const;
+
+ circle::Int32VectorT *AsInt32Vector()
+ {
+ return type == SparseIndexVector_Int32Vector ? reinterpret_cast<circle::Int32VectorT *>(value)
+ : nullptr;
+ }
+ const circle::Int32VectorT *AsInt32Vector() const
+ {
+ return type == SparseIndexVector_Int32Vector
+ ? reinterpret_cast<const circle::Int32VectorT *>(value)
+ : nullptr;
+ }
+ circle::Uint16VectorT *AsUint16Vector()
+ {
+ return type == SparseIndexVector_Uint16Vector ? reinterpret_cast<circle::Uint16VectorT *>(value)
+ : nullptr;
+ }
+ const circle::Uint16VectorT *AsUint16Vector() const
+ {
+ return type == SparseIndexVector_Uint16Vector
+ ? reinterpret_cast<const circle::Uint16VectorT *>(value)
+ : nullptr;
+ }
+ circle::Uint8VectorT *AsUint8Vector()
+ {
+ return type == SparseIndexVector_Uint8Vector ? reinterpret_cast<circle::Uint8VectorT *>(value)
+ : nullptr;
+ }
+ const circle::Uint8VectorT *AsUint8Vector() const
+ {
+ return type == SparseIndexVector_Uint8Vector
+ ? reinterpret_cast<const circle::Uint8VectorT *>(value)
+ : nullptr;
+ }
+};
+
+bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj,
+ SparseIndexVector type);
+bool VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier,
+ const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
+ const flatbuffers::Vector<uint8_t> *types);
+
+enum BuiltinOperator : int32_t
+{
+ BuiltinOperator_BCQ_GATHER = -4,
+ BuiltinOperator_BCQ_FULLY_CONNECTED = -3,
+ BuiltinOperator_INSTANCE_NORM = -2,
+ BuiltinOperator_ADD = 0,
+ BuiltinOperator_AVERAGE_POOL_2D = 1,
+ BuiltinOperator_CONCATENATION = 2,
+ BuiltinOperator_CONV_2D = 3,
+ BuiltinOperator_DEPTHWISE_CONV_2D = 4,
+ BuiltinOperator_DEPTH_TO_SPACE = 5,
+ BuiltinOperator_DEQUANTIZE = 6,
+ BuiltinOperator_EMBEDDING_LOOKUP = 7,
+ BuiltinOperator_FLOOR = 8,
+ BuiltinOperator_FULLY_CONNECTED = 9,
+ BuiltinOperator_HASHTABLE_LOOKUP = 10,
+ BuiltinOperator_L2_NORMALIZATION = 11,
+ BuiltinOperator_L2_POOL_2D = 12,
+ BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION = 13,
+ BuiltinOperator_LOGISTIC = 14,
+ BuiltinOperator_LSH_PROJECTION = 15,
+ BuiltinOperator_LSTM = 16,
+ BuiltinOperator_MAX_POOL_2D = 17,
+ BuiltinOperator_MUL = 18,
+ BuiltinOperator_RELU = 19,
+ BuiltinOperator_RELU_N1_TO_1 = 20,
+ BuiltinOperator_RELU6 = 21,
+ BuiltinOperator_RESHAPE = 22,
+ BuiltinOperator_RESIZE_BILINEAR = 23,
+ BuiltinOperator_RNN = 24,
+ BuiltinOperator_SOFTMAX = 25,
+ BuiltinOperator_SPACE_TO_DEPTH = 26,
+ BuiltinOperator_SVDF = 27,
+ BuiltinOperator_TANH = 28,
+ BuiltinOperator_CONCAT_EMBEDDINGS = 29,
+ BuiltinOperator_SKIP_GRAM = 30,
+ BuiltinOperator_CALL = 31,
+ BuiltinOperator_CUSTOM = 32,
+ BuiltinOperator_EMBEDDING_LOOKUP_SPARSE = 33,
+ BuiltinOperator_PAD = 34,
+ BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN = 35,
+ BuiltinOperator_GATHER = 36,
+ BuiltinOperator_BATCH_TO_SPACE_ND = 37,
+ BuiltinOperator_SPACE_TO_BATCH_ND = 38,
+ BuiltinOperator_TRANSPOSE = 39,
+ BuiltinOperator_MEAN = 40,
+ BuiltinOperator_SUB = 41,
+ BuiltinOperator_DIV = 42,
+ BuiltinOperator_SQUEEZE = 43,
+ BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
+ BuiltinOperator_STRIDED_SLICE = 45,
+ BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN = 46,
+ BuiltinOperator_EXP = 47,
+ BuiltinOperator_TOPK_V2 = 48,
+ BuiltinOperator_SPLIT = 49,
+ BuiltinOperator_LOG_SOFTMAX = 50,
+ BuiltinOperator_DELEGATE = 51,
+ BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM = 52,
+ BuiltinOperator_CAST = 53,
+ BuiltinOperator_PRELU = 54,
+ BuiltinOperator_MAXIMUM = 55,
+ BuiltinOperator_ARG_MAX = 56,
+ BuiltinOperator_MINIMUM = 57,
+ BuiltinOperator_LESS = 58,
+ BuiltinOperator_NEG = 59,
+ BuiltinOperator_PADV2 = 60,
+ BuiltinOperator_GREATER = 61,
+ BuiltinOperator_GREATER_EQUAL = 62,
+ BuiltinOperator_LESS_EQUAL = 63,
+ BuiltinOperator_SELECT = 64,
+ BuiltinOperator_SLICE = 65,
+ BuiltinOperator_SIN = 66,
+ BuiltinOperator_TRANSPOSE_CONV = 67,
+ BuiltinOperator_SPARSE_TO_DENSE = 68,
+ BuiltinOperator_TILE = 69,
+ BuiltinOperator_EXPAND_DIMS = 70,
+ BuiltinOperator_EQUAL = 71,
+ BuiltinOperator_NOT_EQUAL = 72,
+ BuiltinOperator_LOG = 73,
+ BuiltinOperator_SUM = 74,
+ BuiltinOperator_SQRT = 75,
+ BuiltinOperator_RSQRT = 76,
+ BuiltinOperator_SHAPE = 77,
+ BuiltinOperator_POW = 78,
+ BuiltinOperator_ARG_MIN = 79,
+ BuiltinOperator_FAKE_QUANT = 80,
+ BuiltinOperator_REDUCE_PROD = 81,
+ BuiltinOperator_REDUCE_MAX = 82,
+ BuiltinOperator_PACK = 83,
+ BuiltinOperator_LOGICAL_OR = 84,
+ BuiltinOperator_ONE_HOT = 85,
+ BuiltinOperator_LOGICAL_AND = 86,
+ BuiltinOperator_LOGICAL_NOT = 87,
+ BuiltinOperator_UNPACK = 88,
+ BuiltinOperator_REDUCE_MIN = 89,
+ BuiltinOperator_FLOOR_DIV = 90,
+ BuiltinOperator_REDUCE_ANY = 91,
+ BuiltinOperator_SQUARE = 92,
+ BuiltinOperator_ZEROS_LIKE = 93,
+ BuiltinOperator_FILL = 94,
+ BuiltinOperator_FLOOR_MOD = 95,
+ BuiltinOperator_RANGE = 96,
+ BuiltinOperator_RESIZE_NEAREST_NEIGHBOR = 97,
+ BuiltinOperator_LEAKY_RELU = 98,
+ BuiltinOperator_SQUARED_DIFFERENCE = 99,
+ BuiltinOperator_MIRROR_PAD = 100,
+ BuiltinOperator_ABS = 101,
+ BuiltinOperator_SPLIT_V = 102,
+ BuiltinOperator_UNIQUE = 103,
+ BuiltinOperator_CEIL = 104,
+ BuiltinOperator_REVERSE_V2 = 105,
+ BuiltinOperator_ADD_N = 106,
+ BuiltinOperator_GATHER_ND = 107,
+ BuiltinOperator_COS = 108,
+ BuiltinOperator_WHERE = 109,
+ BuiltinOperator_RANK = 110,
+ BuiltinOperator_ELU = 111,
+ BuiltinOperator_REVERSE_SEQUENCE = 112,
+ BuiltinOperator_MATRIX_DIAG = 113,
+ BuiltinOperator_QUANTIZE = 114,
+ BuiltinOperator_MATRIX_SET_DIAG = 115,
+ BuiltinOperator_ROUND = 116,
+ BuiltinOperator_HARD_SWISH = 117,
+ BuiltinOperator_IF = 118,
+ BuiltinOperator_WHILE = 119,
+ BuiltinOperator_NON_MAX_SUPPRESSION_V4 = 120,
+ BuiltinOperator_NON_MAX_SUPPRESSION_V5 = 121,
+ BuiltinOperator_SCATTER_ND = 122,
+ BuiltinOperator_SELECT_V2 = 123,
+ BuiltinOperator_DENSIFY = 124,
+ BuiltinOperator_SEGMENT_SUM = 125,
+ BuiltinOperator_BATCH_MATMUL = 126,
+ BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES = 127,
+ BuiltinOperator_CUMSUM = 128,
+ BuiltinOperator_CALL_ONCE = 129,
+ BuiltinOperator_BROADCAST_TO = 130,
+ BuiltinOperator_RFFT2D = 131,
+ BuiltinOperator_CONV_3D = 132,
+ BuiltinOperator_IMAG = 133,
+ BuiltinOperator_REAL = 134,
+ BuiltinOperator_COMPLEX_ABS = 135,
+ BuiltinOperator_HASHTABLE = 136,
+ BuiltinOperator_HASHTABLE_FIND = 137,
+ BuiltinOperator_HASHTABLE_IMPORT = 138,
+ BuiltinOperator_HASHTABLE_SIZE = 139,
+ BuiltinOperator_REDUCE_ALL = 140,
+ BuiltinOperator_CONV_3D_TRANSPOSE = 141,
+ BuiltinOperator_VAR_HANDLE = 142,
+ BuiltinOperator_READ_VARIABLE = 143,
+ BuiltinOperator_ASSIGN_VARIABLE = 144,
+ BuiltinOperator_BROADCAST_ARGS = 145,
+ BuiltinOperator_RANDOM_STANDARD_NORMAL = 146,
+ BuiltinOperator_MIN = BuiltinOperator_BCQ_GATHER,
+ BuiltinOperator_MAX = BuiltinOperator_RANDOM_STANDARD_NORMAL
+};
+
+inline const BuiltinOperator (&EnumValuesBuiltinOperator())[150]
+{
+ static const BuiltinOperator values[] = {BuiltinOperator_BCQ_GATHER,
+ BuiltinOperator_BCQ_FULLY_CONNECTED,
+ BuiltinOperator_INSTANCE_NORM,
+ BuiltinOperator_ADD,
+ BuiltinOperator_AVERAGE_POOL_2D,
+ BuiltinOperator_CONCATENATION,
+ BuiltinOperator_CONV_2D,
+ BuiltinOperator_DEPTHWISE_CONV_2D,
+ BuiltinOperator_DEPTH_TO_SPACE,
+ BuiltinOperator_DEQUANTIZE,
+ BuiltinOperator_EMBEDDING_LOOKUP,
+ BuiltinOperator_FLOOR,
+ BuiltinOperator_FULLY_CONNECTED,
+ BuiltinOperator_HASHTABLE_LOOKUP,
+ BuiltinOperator_L2_NORMALIZATION,
+ BuiltinOperator_L2_POOL_2D,
+ BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
+ BuiltinOperator_LOGISTIC,
+ BuiltinOperator_LSH_PROJECTION,
+ BuiltinOperator_LSTM,
+ BuiltinOperator_MAX_POOL_2D,
+ BuiltinOperator_MUL,
+ BuiltinOperator_RELU,
+ BuiltinOperator_RELU_N1_TO_1,
+ BuiltinOperator_RELU6,
+ BuiltinOperator_RESHAPE,
+ BuiltinOperator_RESIZE_BILINEAR,
+ BuiltinOperator_RNN,
+ BuiltinOperator_SOFTMAX,
+ BuiltinOperator_SPACE_TO_DEPTH,
+ BuiltinOperator_SVDF,
+ BuiltinOperator_TANH,
+ BuiltinOperator_CONCAT_EMBEDDINGS,
+ BuiltinOperator_SKIP_GRAM,
+ BuiltinOperator_CALL,
+ BuiltinOperator_CUSTOM,
+ BuiltinOperator_EMBEDDING_LOOKUP_SPARSE,
+ BuiltinOperator_PAD,
+ BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN,
+ BuiltinOperator_GATHER,
+ BuiltinOperator_BATCH_TO_SPACE_ND,
+ BuiltinOperator_SPACE_TO_BATCH_ND,
+ BuiltinOperator_TRANSPOSE,
+ BuiltinOperator_MEAN,
+ BuiltinOperator_SUB,
+ BuiltinOperator_DIV,
+ BuiltinOperator_SQUEEZE,
+ BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM,
+ BuiltinOperator_STRIDED_SLICE,
+ BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN,
+ BuiltinOperator_EXP,
+ BuiltinOperator_TOPK_V2,
+ BuiltinOperator_SPLIT,
+ BuiltinOperator_LOG_SOFTMAX,
+ BuiltinOperator_DELEGATE,
+ BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM,
+ BuiltinOperator_CAST,
+ BuiltinOperator_PRELU,
+ BuiltinOperator_MAXIMUM,
+ BuiltinOperator_ARG_MAX,
+ BuiltinOperator_MINIMUM,
+ BuiltinOperator_LESS,
+ BuiltinOperator_NEG,
+ BuiltinOperator_PADV2,
+ BuiltinOperator_GREATER,
+ BuiltinOperator_GREATER_EQUAL,
+ BuiltinOperator_LESS_EQUAL,
+ BuiltinOperator_SELECT,
+ BuiltinOperator_SLICE,
+ BuiltinOperator_SIN,
+ BuiltinOperator_TRANSPOSE_CONV,
+ BuiltinOperator_SPARSE_TO_DENSE,
+ BuiltinOperator_TILE,
+ BuiltinOperator_EXPAND_DIMS,
+ BuiltinOperator_EQUAL,
+ BuiltinOperator_NOT_EQUAL,
+ BuiltinOperator_LOG,
+ BuiltinOperator_SUM,
+ BuiltinOperator_SQRT,
+ BuiltinOperator_RSQRT,
+ BuiltinOperator_SHAPE,
+ BuiltinOperator_POW,
+ BuiltinOperator_ARG_MIN,
+ BuiltinOperator_FAKE_QUANT,
+ BuiltinOperator_REDUCE_PROD,
+ BuiltinOperator_REDUCE_MAX,
+ BuiltinOperator_PACK,
+ BuiltinOperator_LOGICAL_OR,
+ BuiltinOperator_ONE_HOT,
+ BuiltinOperator_LOGICAL_AND,
+ BuiltinOperator_LOGICAL_NOT,
+ BuiltinOperator_UNPACK,
+ BuiltinOperator_REDUCE_MIN,
+ BuiltinOperator_FLOOR_DIV,
+ BuiltinOperator_REDUCE_ANY,
+ BuiltinOperator_SQUARE,
+ BuiltinOperator_ZEROS_LIKE,
+ BuiltinOperator_FILL,
+ BuiltinOperator_FLOOR_MOD,
+ BuiltinOperator_RANGE,
+ BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
+ BuiltinOperator_LEAKY_RELU,
+ BuiltinOperator_SQUARED_DIFFERENCE,
+ BuiltinOperator_MIRROR_PAD,
+ BuiltinOperator_ABS,
+ BuiltinOperator_SPLIT_V,
+ BuiltinOperator_UNIQUE,
+ BuiltinOperator_CEIL,
+ BuiltinOperator_REVERSE_V2,
+ BuiltinOperator_ADD_N,
+ BuiltinOperator_GATHER_ND,
+ BuiltinOperator_COS,
+ BuiltinOperator_WHERE,
+ BuiltinOperator_RANK,
+ BuiltinOperator_ELU,
+ BuiltinOperator_REVERSE_SEQUENCE,
+ BuiltinOperator_MATRIX_DIAG,
+ BuiltinOperator_QUANTIZE,
+ BuiltinOperator_MATRIX_SET_DIAG,
+ BuiltinOperator_ROUND,
+ BuiltinOperator_HARD_SWISH,
+ BuiltinOperator_IF,
+ BuiltinOperator_WHILE,
+ BuiltinOperator_NON_MAX_SUPPRESSION_V4,
+ BuiltinOperator_NON_MAX_SUPPRESSION_V5,
+ BuiltinOperator_SCATTER_ND,
+ BuiltinOperator_SELECT_V2,
+ BuiltinOperator_DENSIFY,
+ BuiltinOperator_SEGMENT_SUM,
+ BuiltinOperator_BATCH_MATMUL,
+ BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES,
+ BuiltinOperator_CUMSUM,
+ BuiltinOperator_CALL_ONCE,
+ BuiltinOperator_BROADCAST_TO,
+ BuiltinOperator_RFFT2D,
+ BuiltinOperator_CONV_3D,
+ BuiltinOperator_IMAG,
+ BuiltinOperator_REAL,
+ BuiltinOperator_COMPLEX_ABS,
+ BuiltinOperator_HASHTABLE,
+ BuiltinOperator_HASHTABLE_FIND,
+ BuiltinOperator_HASHTABLE_IMPORT,
+ BuiltinOperator_HASHTABLE_SIZE,
+ BuiltinOperator_REDUCE_ALL,
+ BuiltinOperator_CONV_3D_TRANSPOSE,
+ BuiltinOperator_VAR_HANDLE,
+ BuiltinOperator_READ_VARIABLE,
+ BuiltinOperator_ASSIGN_VARIABLE,
+ BuiltinOperator_BROADCAST_ARGS,
+ BuiltinOperator_RANDOM_STANDARD_NORMAL};
+ return values;
+}
+
+inline const char *const *EnumNamesBuiltinOperator()
+{
+ static const char *const names[152] = {"BCQ_GATHER",
+ "BCQ_FULLY_CONNECTED",
+ "INSTANCE_NORM",
+ "",
+ "ADD",
+ "AVERAGE_POOL_2D",
+ "CONCATENATION",
+ "CONV_2D",
+ "DEPTHWISE_CONV_2D",
+ "DEPTH_TO_SPACE",
+ "DEQUANTIZE",
+ "EMBEDDING_LOOKUP",
+ "FLOOR",
+ "FULLY_CONNECTED",
+ "HASHTABLE_LOOKUP",
+ "L2_NORMALIZATION",
+ "L2_POOL_2D",
+ "LOCAL_RESPONSE_NORMALIZATION",
+ "LOGISTIC",
+ "LSH_PROJECTION",
+ "LSTM",
+ "MAX_POOL_2D",
+ "MUL",
+ "RELU",
+ "RELU_N1_TO_1",
+ "RELU6",
+ "RESHAPE",
+ "RESIZE_BILINEAR",
+ "RNN",
+ "SOFTMAX",
+ "SPACE_TO_DEPTH",
+ "SVDF",
+ "TANH",
+ "CONCAT_EMBEDDINGS",
+ "SKIP_GRAM",
+ "CALL",
+ "CUSTOM",
+ "EMBEDDING_LOOKUP_SPARSE",
+ "PAD",
+ "UNIDIRECTIONAL_SEQUENCE_RNN",
+ "GATHER",
+ "BATCH_TO_SPACE_ND",
+ "SPACE_TO_BATCH_ND",
+ "TRANSPOSE",
+ "MEAN",
+ "SUB",
+ "DIV",
+ "SQUEEZE",
+ "UNIDIRECTIONAL_SEQUENCE_LSTM",
+ "STRIDED_SLICE",
+ "BIDIRECTIONAL_SEQUENCE_RNN",
+ "EXP",
+ "TOPK_V2",
+ "SPLIT",
+ "LOG_SOFTMAX",
+ "DELEGATE",
+ "BIDIRECTIONAL_SEQUENCE_LSTM",
+ "CAST",
+ "PRELU",
+ "MAXIMUM",
+ "ARG_MAX",
+ "MINIMUM",
+ "LESS",
+ "NEG",
+ "PADV2",
+ "GREATER",
+ "GREATER_EQUAL",
+ "LESS_EQUAL",
+ "SELECT",
+ "SLICE",
+ "SIN",
+ "TRANSPOSE_CONV",
+ "SPARSE_TO_DENSE",
+ "TILE",
+ "EXPAND_DIMS",
+ "EQUAL",
+ "NOT_EQUAL",
+ "LOG",
+ "SUM",
+ "SQRT",
+ "RSQRT",
+ "SHAPE",
+ "POW",
+ "ARG_MIN",
+ "FAKE_QUANT",
+ "REDUCE_PROD",
+ "REDUCE_MAX",
+ "PACK",
+ "LOGICAL_OR",
+ "ONE_HOT",
+ "LOGICAL_AND",
+ "LOGICAL_NOT",
+ "UNPACK",
+ "REDUCE_MIN",
+ "FLOOR_DIV",
+ "REDUCE_ANY",
+ "SQUARE",
+ "ZEROS_LIKE",
+ "FILL",
+ "FLOOR_MOD",
+ "RANGE",
+ "RESIZE_NEAREST_NEIGHBOR",
+ "LEAKY_RELU",
+ "SQUARED_DIFFERENCE",
+ "MIRROR_PAD",
+ "ABS",
+ "SPLIT_V",
+ "UNIQUE",
+ "CEIL",
+ "REVERSE_V2",
+ "ADD_N",
+ "GATHER_ND",
+ "COS",
+ "WHERE",
+ "RANK",
+ "ELU",
+ "REVERSE_SEQUENCE",
+ "MATRIX_DIAG",
+ "QUANTIZE",
+ "MATRIX_SET_DIAG",
+ "ROUND",
+ "HARD_SWISH",
+ "IF",
+ "WHILE",
+ "NON_MAX_SUPPRESSION_V4",
+ "NON_MAX_SUPPRESSION_V5",
+ "SCATTER_ND",
+ "SELECT_V2",
+ "DENSIFY",
+ "SEGMENT_SUM",
+ "BATCH_MATMUL",
+ "PLACEHOLDER_FOR_GREATER_OP_CODES",
+ "CUMSUM",
+ "CALL_ONCE",
+ "BROADCAST_TO",
+ "RFFT2D",
+ "CONV_3D",
+ "IMAG",
+ "REAL",
+ "COMPLEX_ABS",
+ "HASHTABLE",
+ "HASHTABLE_FIND",
+ "HASHTABLE_IMPORT",
+ "HASHTABLE_SIZE",
+ "REDUCE_ALL",
+ "CONV_3D_TRANSPOSE",
+ "VAR_HANDLE",
+ "READ_VARIABLE",
+ "ASSIGN_VARIABLE",
+ "BROADCAST_ARGS",
+ "RANDOM_STANDARD_NORMAL",
+ nullptr};
+ return names;
+}
+
+inline const char *EnumNameBuiltinOperator(BuiltinOperator e)
+{
+ if (flatbuffers::IsOutRange(e, BuiltinOperator_BCQ_GATHER,
+ BuiltinOperator_RANDOM_STANDARD_NORMAL))
+ return "";
+ const size_t index = static_cast<size_t>(e) - static_cast<size_t>(BuiltinOperator_BCQ_GATHER);
+ return EnumNamesBuiltinOperator()[index];
+}
+
+enum BuiltinOptions : uint8_t
+{
+ BuiltinOptions_NONE = 0,
+ BuiltinOptions_Conv2DOptions = 1,
+ BuiltinOptions_DepthwiseConv2DOptions = 2,
+ BuiltinOptions_ConcatEmbeddingsOptions = 3,
+ BuiltinOptions_LSHProjectionOptions = 4,
+ BuiltinOptions_Pool2DOptions = 5,
+ BuiltinOptions_SVDFOptions = 6,
+ BuiltinOptions_RNNOptions = 7,
+ BuiltinOptions_FullyConnectedOptions = 8,
+ BuiltinOptions_SoftmaxOptions = 9,
+ BuiltinOptions_ConcatenationOptions = 10,
+ BuiltinOptions_AddOptions = 11,
+ BuiltinOptions_L2NormOptions = 12,
+ BuiltinOptions_LocalResponseNormalizationOptions = 13,
+ BuiltinOptions_LSTMOptions = 14,
+ BuiltinOptions_ResizeBilinearOptions = 15,
+ BuiltinOptions_CallOptions = 16,
+ BuiltinOptions_ReshapeOptions = 17,
+ BuiltinOptions_SkipGramOptions = 18,
+ BuiltinOptions_SpaceToDepthOptions = 19,
+ BuiltinOptions_EmbeddingLookupSparseOptions = 20,
+ BuiltinOptions_MulOptions = 21,
+ BuiltinOptions_PadOptions = 22,
+ BuiltinOptions_GatherOptions = 23,
+ BuiltinOptions_BatchToSpaceNDOptions = 24,
+ BuiltinOptions_SpaceToBatchNDOptions = 25,
+ BuiltinOptions_TransposeOptions = 26,
+ BuiltinOptions_ReducerOptions = 27,
+ BuiltinOptions_SubOptions = 28,
+ BuiltinOptions_DivOptions = 29,
+ BuiltinOptions_SqueezeOptions = 30,
+ BuiltinOptions_SequenceRNNOptions = 31,
+ BuiltinOptions_StridedSliceOptions = 32,
+ BuiltinOptions_ExpOptions = 33,
+ BuiltinOptions_TopKV2Options = 34,
+ BuiltinOptions_SplitOptions = 35,
+ BuiltinOptions_LogSoftmaxOptions = 36,
+ BuiltinOptions_CastOptions = 37,
+ BuiltinOptions_DequantizeOptions = 38,
+ BuiltinOptions_MaximumMinimumOptions = 39,
+ BuiltinOptions_ArgMaxOptions = 40,
+ BuiltinOptions_LessOptions = 41,
+ BuiltinOptions_NegOptions = 42,
+ BuiltinOptions_PadV2Options = 43,
+ BuiltinOptions_GreaterOptions = 44,
+ BuiltinOptions_GreaterEqualOptions = 45,
+ BuiltinOptions_LessEqualOptions = 46,
+ BuiltinOptions_SelectOptions = 47,
+ BuiltinOptions_SliceOptions = 48,
+ BuiltinOptions_TransposeConvOptions = 49,
+ BuiltinOptions_SparseToDenseOptions = 50,
+ BuiltinOptions_TileOptions = 51,
+ BuiltinOptions_ExpandDimsOptions = 52,
+ BuiltinOptions_EqualOptions = 53,
+ BuiltinOptions_NotEqualOptions = 54,
+ BuiltinOptions_ShapeOptions = 55,
+ BuiltinOptions_PowOptions = 56,
+ BuiltinOptions_ArgMinOptions = 57,
+ BuiltinOptions_FakeQuantOptions = 58,
+ BuiltinOptions_PackOptions = 59,
+ BuiltinOptions_LogicalOrOptions = 60,
+ BuiltinOptions_OneHotOptions = 61,
+ BuiltinOptions_LogicalAndOptions = 62,
+ BuiltinOptions_LogicalNotOptions = 63,
+ BuiltinOptions_UnpackOptions = 64,
+ BuiltinOptions_FloorDivOptions = 65,
+ BuiltinOptions_SquareOptions = 66,
+ BuiltinOptions_ZerosLikeOptions = 67,
+ BuiltinOptions_FillOptions = 68,
+ BuiltinOptions_BidirectionalSequenceLSTMOptions = 69,
+ BuiltinOptions_BidirectionalSequenceRNNOptions = 70,
+ BuiltinOptions_UnidirectionalSequenceLSTMOptions = 71,
+ BuiltinOptions_FloorModOptions = 72,
+ BuiltinOptions_RangeOptions = 73,
+ BuiltinOptions_ResizeNearestNeighborOptions = 74,
+ BuiltinOptions_LeakyReluOptions = 75,
+ BuiltinOptions_SquaredDifferenceOptions = 76,
+ BuiltinOptions_MirrorPadOptions = 77,
+ BuiltinOptions_AbsOptions = 78,
+ BuiltinOptions_SplitVOptions = 79,
+ BuiltinOptions_UniqueOptions = 80,
+ BuiltinOptions_ReverseV2Options = 81,
+ BuiltinOptions_AddNOptions = 82,
+ BuiltinOptions_GatherNdOptions = 83,
+ BuiltinOptions_CosOptions = 84,
+ BuiltinOptions_WhereOptions = 85,
+ BuiltinOptions_RankOptions = 86,
+ BuiltinOptions_ReverseSequenceOptions = 87,
+ BuiltinOptions_MatrixDiagOptions = 88,
+ BuiltinOptions_QuantizeOptions = 89,
+ BuiltinOptions_MatrixSetDiagOptions = 90,
+ BuiltinOptions_HardSwishOptions = 91,
+ BuiltinOptions_IfOptions = 92,
+ BuiltinOptions_WhileOptions = 93,
+ BuiltinOptions_DepthToSpaceOptions = 94,
+ BuiltinOptions_NonMaxSuppressionV4Options = 95,
+ BuiltinOptions_NonMaxSuppressionV5Options = 96,
+ BuiltinOptions_ScatterNdOptions = 97,
+ BuiltinOptions_SelectV2Options = 98,
+ BuiltinOptions_DensifyOptions = 99,
+ BuiltinOptions_SegmentSumOptions = 100,
+ BuiltinOptions_BatchMatMulOptions = 101,
+ BuiltinOptions_CumsumOptions = 102,
+ BuiltinOptions_CallOnceOptions = 103,
+ BuiltinOptions_BroadcastToOptions = 104,
+ BuiltinOptions_Rfft2dOptions = 105,
+ BuiltinOptions_Conv3DOptions = 106,
+ BuiltinOptions_HashtableOptions = 107,
+ BuiltinOptions_HashtableFindOptions = 108,
+ BuiltinOptions_HashtableImportOptions = 109,
+ BuiltinOptions_HashtableSizeOptions = 110,
+ BuiltinOptions_VarHandleOptions = 111,
+ BuiltinOptions_ReadVariableOptions = 112,
+ BuiltinOptions_AssignVariableOptions = 113,
+ BuiltinOptions_RandomOptions = 114,
+ BuiltinOptions_BCQGatherOptions = 252,
+ BuiltinOptions_BCQFullyConnectedOptions = 253,
+ BuiltinOptions_InstanceNormOptions = 254,
+ BuiltinOptions_MIN = BuiltinOptions_NONE,
+ BuiltinOptions_MAX = BuiltinOptions_InstanceNormOptions
+};
+
+inline const BuiltinOptions (&EnumValuesBuiltinOptions())[118]
+{
+ static const BuiltinOptions values[] = {BuiltinOptions_NONE,
+ BuiltinOptions_Conv2DOptions,
+ BuiltinOptions_DepthwiseConv2DOptions,
+ BuiltinOptions_ConcatEmbeddingsOptions,
+ BuiltinOptions_LSHProjectionOptions,
+ BuiltinOptions_Pool2DOptions,
+ BuiltinOptions_SVDFOptions,
+ BuiltinOptions_RNNOptions,
+ BuiltinOptions_FullyConnectedOptions,
+ BuiltinOptions_SoftmaxOptions,
+ BuiltinOptions_ConcatenationOptions,
+ BuiltinOptions_AddOptions,
+ BuiltinOptions_L2NormOptions,
+ BuiltinOptions_LocalResponseNormalizationOptions,
+ BuiltinOptions_LSTMOptions,
+ BuiltinOptions_ResizeBilinearOptions,
+ BuiltinOptions_CallOptions,
+ BuiltinOptions_ReshapeOptions,
+ BuiltinOptions_SkipGramOptions,
+ BuiltinOptions_SpaceToDepthOptions,
+ BuiltinOptions_EmbeddingLookupSparseOptions,
+ BuiltinOptions_MulOptions,
+ BuiltinOptions_PadOptions,
+ BuiltinOptions_GatherOptions,
+ BuiltinOptions_BatchToSpaceNDOptions,
+ BuiltinOptions_SpaceToBatchNDOptions,
+ BuiltinOptions_TransposeOptions,
+ BuiltinOptions_ReducerOptions,
+ BuiltinOptions_SubOptions,
+ BuiltinOptions_DivOptions,
+ BuiltinOptions_SqueezeOptions,
+ BuiltinOptions_SequenceRNNOptions,
+ BuiltinOptions_StridedSliceOptions,
+ BuiltinOptions_ExpOptions,
+ BuiltinOptions_TopKV2Options,
+ BuiltinOptions_SplitOptions,
+ BuiltinOptions_LogSoftmaxOptions,
+ BuiltinOptions_CastOptions,
+ BuiltinOptions_DequantizeOptions,
+ BuiltinOptions_MaximumMinimumOptions,
+ BuiltinOptions_ArgMaxOptions,
+ BuiltinOptions_LessOptions,
+ BuiltinOptions_NegOptions,
+ BuiltinOptions_PadV2Options,
+ BuiltinOptions_GreaterOptions,
+ BuiltinOptions_GreaterEqualOptions,
+ BuiltinOptions_LessEqualOptions,
+ BuiltinOptions_SelectOptions,
+ BuiltinOptions_SliceOptions,
+ BuiltinOptions_TransposeConvOptions,
+ BuiltinOptions_SparseToDenseOptions,
+ BuiltinOptions_TileOptions,
+ BuiltinOptions_ExpandDimsOptions,
+ BuiltinOptions_EqualOptions,
+ BuiltinOptions_NotEqualOptions,
+ BuiltinOptions_ShapeOptions,
+ BuiltinOptions_PowOptions,
+ BuiltinOptions_ArgMinOptions,
+ BuiltinOptions_FakeQuantOptions,
+ BuiltinOptions_PackOptions,
+ BuiltinOptions_LogicalOrOptions,
+ BuiltinOptions_OneHotOptions,
+ BuiltinOptions_LogicalAndOptions,
+ BuiltinOptions_LogicalNotOptions,
+ BuiltinOptions_UnpackOptions,
+ BuiltinOptions_FloorDivOptions,
+ BuiltinOptions_SquareOptions,
+ BuiltinOptions_ZerosLikeOptions,
+ BuiltinOptions_FillOptions,
+ BuiltinOptions_BidirectionalSequenceLSTMOptions,
+ BuiltinOptions_BidirectionalSequenceRNNOptions,
+ BuiltinOptions_UnidirectionalSequenceLSTMOptions,
+ BuiltinOptions_FloorModOptions,
+ BuiltinOptions_RangeOptions,
+ BuiltinOptions_ResizeNearestNeighborOptions,
+ BuiltinOptions_LeakyReluOptions,
+ BuiltinOptions_SquaredDifferenceOptions,
+ BuiltinOptions_MirrorPadOptions,
+ BuiltinOptions_AbsOptions,
+ BuiltinOptions_SplitVOptions,
+ BuiltinOptions_UniqueOptions,
+ BuiltinOptions_ReverseV2Options,
+ BuiltinOptions_AddNOptions,
+ BuiltinOptions_GatherNdOptions,
+ BuiltinOptions_CosOptions,
+ BuiltinOptions_WhereOptions,
+ BuiltinOptions_RankOptions,
+ BuiltinOptions_ReverseSequenceOptions,
+ BuiltinOptions_MatrixDiagOptions,
+ BuiltinOptions_QuantizeOptions,
+ BuiltinOptions_MatrixSetDiagOptions,
+ BuiltinOptions_HardSwishOptions,
+ BuiltinOptions_IfOptions,
+ BuiltinOptions_WhileOptions,
+ BuiltinOptions_DepthToSpaceOptions,
+ BuiltinOptions_NonMaxSuppressionV4Options,
+ BuiltinOptions_NonMaxSuppressionV5Options,
+ BuiltinOptions_ScatterNdOptions,
+ BuiltinOptions_SelectV2Options,
+ BuiltinOptions_DensifyOptions,
+ BuiltinOptions_SegmentSumOptions,
+ BuiltinOptions_BatchMatMulOptions,
+ BuiltinOptions_CumsumOptions,
+ BuiltinOptions_CallOnceOptions,
+ BuiltinOptions_BroadcastToOptions,
+ BuiltinOptions_Rfft2dOptions,
+ BuiltinOptions_Conv3DOptions,
+ BuiltinOptions_HashtableOptions,
+ BuiltinOptions_HashtableFindOptions,
+ BuiltinOptions_HashtableImportOptions,
+ BuiltinOptions_HashtableSizeOptions,
+ BuiltinOptions_VarHandleOptions,
+ BuiltinOptions_ReadVariableOptions,
+ BuiltinOptions_AssignVariableOptions,
+ BuiltinOptions_RandomOptions,
+ BuiltinOptions_BCQGatherOptions,
+ BuiltinOptions_BCQFullyConnectedOptions,
+ BuiltinOptions_InstanceNormOptions};
+ return values;
+}
+
+inline const char *const *EnumNamesBuiltinOptions()
+{
+ static const char *const names[256] = {"NONE",
+ "Conv2DOptions",
+ "DepthwiseConv2DOptions",
+ "ConcatEmbeddingsOptions",
+ "LSHProjectionOptions",
+ "Pool2DOptions",
+ "SVDFOptions",
+ "RNNOptions",
+ "FullyConnectedOptions",
+ "SoftmaxOptions",
+ "ConcatenationOptions",
+ "AddOptions",
+ "L2NormOptions",
+ "LocalResponseNormalizationOptions",
+ "LSTMOptions",
+ "ResizeBilinearOptions",
+ "CallOptions",
+ "ReshapeOptions",
+ "SkipGramOptions",
+ "SpaceToDepthOptions",
+ "EmbeddingLookupSparseOptions",
+ "MulOptions",
+ "PadOptions",
+ "GatherOptions",
+ "BatchToSpaceNDOptions",
+ "SpaceToBatchNDOptions",
+ "TransposeOptions",
+ "ReducerOptions",
+ "SubOptions",
+ "DivOptions",
+ "SqueezeOptions",
+ "SequenceRNNOptions",
+ "StridedSliceOptions",
+ "ExpOptions",
+ "TopKV2Options",
+ "SplitOptions",
+ "LogSoftmaxOptions",
+ "CastOptions",
+ "DequantizeOptions",
+ "MaximumMinimumOptions",
+ "ArgMaxOptions",
+ "LessOptions",
+ "NegOptions",
+ "PadV2Options",
+ "GreaterOptions",
+ "GreaterEqualOptions",
+ "LessEqualOptions",
+ "SelectOptions",
+ "SliceOptions",
+ "TransposeConvOptions",
+ "SparseToDenseOptions",
+ "TileOptions",
+ "ExpandDimsOptions",
+ "EqualOptions",
+ "NotEqualOptions",
+ "ShapeOptions",
+ "PowOptions",
+ "ArgMinOptions",
+ "FakeQuantOptions",
+ "PackOptions",
+ "LogicalOrOptions",
+ "OneHotOptions",
+ "LogicalAndOptions",
+ "LogicalNotOptions",
+ "UnpackOptions",
+ "FloorDivOptions",
+ "SquareOptions",
+ "ZerosLikeOptions",
+ "FillOptions",
+ "BidirectionalSequenceLSTMOptions",
+ "BidirectionalSequenceRNNOptions",
+ "UnidirectionalSequenceLSTMOptions",
+ "FloorModOptions",
+ "RangeOptions",
+ "ResizeNearestNeighborOptions",
+ "LeakyReluOptions",
+ "SquaredDifferenceOptions",
+ "MirrorPadOptions",
+ "AbsOptions",
+ "SplitVOptions",
+ "UniqueOptions",
+ "ReverseV2Options",
+ "AddNOptions",
+ "GatherNdOptions",
+ "CosOptions",
+ "WhereOptions",
+ "RankOptions",
+ "ReverseSequenceOptions",
+ "MatrixDiagOptions",
+ "QuantizeOptions",
+ "MatrixSetDiagOptions",
+ "HardSwishOptions",
+ "IfOptions",
+ "WhileOptions",
+ "DepthToSpaceOptions",
+ "NonMaxSuppressionV4Options",
+ "NonMaxSuppressionV5Options",
+ "ScatterNdOptions",
+ "SelectV2Options",
+ "DensifyOptions",
+ "SegmentSumOptions",
+ "BatchMatMulOptions",
+ "CumsumOptions",
+ "CallOnceOptions",
+ "BroadcastToOptions",
+ "Rfft2dOptions",
+ "Conv3DOptions",
+ "HashtableOptions",
+ "HashtableFindOptions",
+ "HashtableImportOptions",
+ "HashtableSizeOptions",
+ "VarHandleOptions",
+ "ReadVariableOptions",
+ "AssignVariableOptions",
+ "RandomOptions",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "BCQGatherOptions",
+ "BCQFullyConnectedOptions",
+ "InstanceNormOptions",
+ nullptr};
+ return names;
+}
+
+inline const char *EnumNameBuiltinOptions(BuiltinOptions e)
+{
+ if (flatbuffers::IsOutRange(e, BuiltinOptions_NONE, BuiltinOptions_InstanceNormOptions))
+ return "";
+ const size_t index = static_cast<size_t>(e);
+ return EnumNamesBuiltinOptions()[index];
+}
+
+template <typename T> struct BuiltinOptionsTraits
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_NONE;
+};
+
+template <> struct BuiltinOptionsTraits<circle::Conv2DOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_Conv2DOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::DepthwiseConv2DOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_DepthwiseConv2DOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::ConcatEmbeddingsOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_ConcatEmbeddingsOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::LSHProjectionOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_LSHProjectionOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::Pool2DOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_Pool2DOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::SVDFOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_SVDFOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::RNNOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_RNNOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::FullyConnectedOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_FullyConnectedOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::SoftmaxOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_SoftmaxOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::ConcatenationOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_ConcatenationOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::AddOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_AddOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::L2NormOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_L2NormOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::LocalResponseNormalizationOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_LocalResponseNormalizationOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::LSTMOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_LSTMOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::ResizeBilinearOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_ResizeBilinearOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::CallOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_CallOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::ReshapeOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_ReshapeOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::SkipGramOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_SkipGramOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::SpaceToDepthOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_SpaceToDepthOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::EmbeddingLookupSparseOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_EmbeddingLookupSparseOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::MulOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_MulOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::PadOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_PadOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::GatherOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_GatherOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::BatchToSpaceNDOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_BatchToSpaceNDOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::SpaceToBatchNDOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_SpaceToBatchNDOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::TransposeOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_TransposeOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::ReducerOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_ReducerOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::SubOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_SubOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::DivOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_DivOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::SqueezeOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_SqueezeOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::SequenceRNNOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_SequenceRNNOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::StridedSliceOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_StridedSliceOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::ExpOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_ExpOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::TopKV2Options>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_TopKV2Options;
+};
+
+template <> struct BuiltinOptionsTraits<circle::SplitOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_SplitOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::LogSoftmaxOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_LogSoftmaxOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::CastOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_CastOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::DequantizeOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_DequantizeOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::MaximumMinimumOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_MaximumMinimumOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::ArgMaxOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_ArgMaxOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::LessOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_LessOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::NegOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_NegOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::PadV2Options>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_PadV2Options;
+};
+
+template <> struct BuiltinOptionsTraits<circle::GreaterOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_GreaterOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::GreaterEqualOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_GreaterEqualOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::LessEqualOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_LessEqualOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::SelectOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_SelectOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::SliceOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_SliceOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::TransposeConvOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_TransposeConvOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::SparseToDenseOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_SparseToDenseOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::TileOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_TileOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::ExpandDimsOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_ExpandDimsOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::EqualOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_EqualOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::NotEqualOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_NotEqualOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::ShapeOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_ShapeOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::PowOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_PowOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::ArgMinOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_ArgMinOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::FakeQuantOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_FakeQuantOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::PackOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_PackOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::LogicalOrOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_LogicalOrOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::OneHotOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_OneHotOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::LogicalAndOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_LogicalAndOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::LogicalNotOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_LogicalNotOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::UnpackOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_UnpackOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::FloorDivOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_FloorDivOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::SquareOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_SquareOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::ZerosLikeOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_ZerosLikeOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::FillOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_FillOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::BidirectionalSequenceLSTMOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceLSTMOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::BidirectionalSequenceRNNOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceRNNOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::UnidirectionalSequenceLSTMOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_UnidirectionalSequenceLSTMOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::FloorModOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_FloorModOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::RangeOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_RangeOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::ResizeNearestNeighborOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_ResizeNearestNeighborOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::LeakyReluOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_LeakyReluOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::SquaredDifferenceOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_SquaredDifferenceOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::MirrorPadOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_MirrorPadOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::AbsOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_AbsOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::SplitVOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_SplitVOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::UniqueOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_UniqueOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::ReverseV2Options>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_ReverseV2Options;
+};
+
+template <> struct BuiltinOptionsTraits<circle::AddNOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_AddNOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::GatherNdOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_GatherNdOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::CosOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_CosOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::WhereOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_WhereOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::RankOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_RankOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::ReverseSequenceOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_ReverseSequenceOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::MatrixDiagOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_MatrixDiagOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::QuantizeOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_QuantizeOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::MatrixSetDiagOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_MatrixSetDiagOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::HardSwishOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_HardSwishOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::IfOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_IfOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::WhileOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_WhileOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::DepthToSpaceOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_DepthToSpaceOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::NonMaxSuppressionV4Options>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV4Options;
+};
+
+template <> struct BuiltinOptionsTraits<circle::NonMaxSuppressionV5Options>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV5Options;
+};
+
+template <> struct BuiltinOptionsTraits<circle::ScatterNdOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_ScatterNdOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::SelectV2Options>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_SelectV2Options;
+};
+
+template <> struct BuiltinOptionsTraits<circle::DensifyOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_DensifyOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::SegmentSumOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_SegmentSumOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::BatchMatMulOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_BatchMatMulOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::CumsumOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_CumsumOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::CallOnceOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_CallOnceOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::BroadcastToOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_BroadcastToOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::Rfft2dOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_Rfft2dOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::Conv3DOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_Conv3DOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::HashtableOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_HashtableOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::HashtableFindOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_HashtableFindOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::HashtableImportOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_HashtableImportOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::HashtableSizeOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_HashtableSizeOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::VarHandleOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_VarHandleOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::ReadVariableOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_ReadVariableOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::AssignVariableOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_AssignVariableOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::RandomOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_RandomOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::BCQGatherOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_BCQGatherOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::BCQFullyConnectedOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_BCQFullyConnectedOptions;
+};
+
+template <> struct BuiltinOptionsTraits<circle::InstanceNormOptions>
+{
+ static const BuiltinOptions enum_value = BuiltinOptions_InstanceNormOptions;
+};
+
+struct BuiltinOptionsUnion
+{
+ BuiltinOptions type;
+ void *value;
+
+ BuiltinOptionsUnion() : type(BuiltinOptions_NONE), value(nullptr) {}
+ BuiltinOptionsUnion(BuiltinOptionsUnion &&u) FLATBUFFERS_NOEXCEPT : type(BuiltinOptions_NONE),
+ value(nullptr)
+ {
+ std::swap(type, u.type);
+ std::swap(value, u.value);
+ }
+ BuiltinOptionsUnion(const BuiltinOptionsUnion &);
+ BuiltinOptionsUnion &operator=(const BuiltinOptionsUnion &u)
+ {
+ BuiltinOptionsUnion t(u);
+ std::swap(type, t.type);
+ std::swap(value, t.value);
+ return *this;
+ }
+ BuiltinOptionsUnion &operator=(BuiltinOptionsUnion &&u) FLATBUFFERS_NOEXCEPT
+ {
+ std::swap(type, u.type);
+ std::swap(value, u.value);
+ return *this;
+ }
+ ~BuiltinOptionsUnion() { Reset(); }
+
+ void Reset();
+
+#ifndef FLATBUFFERS_CPP98_STL
+ template <typename T> void Set(T &&val)
+ {
+ using RT = typename std::remove_reference<T>::type;
+ Reset();
+ type = BuiltinOptionsTraits<typename RT::TableType>::enum_value;
+ if (type != BuiltinOptions_NONE)
+ {
+ value = new RT(std::forward<T>(val));
+ }
+ }
+#endif // FLATBUFFERS_CPP98_STL
+
+ static void *UnPack(const void *obj, BuiltinOptions type,
+ const flatbuffers::resolver_function_t *resolver);
+ flatbuffers::Offset<void> Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr) const;
+
+ circle::Conv2DOptionsT *AsConv2DOptions()
+ {
+ return type == BuiltinOptions_Conv2DOptions ? reinterpret_cast<circle::Conv2DOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::Conv2DOptionsT *AsConv2DOptions() const
+ {
+ return type == BuiltinOptions_Conv2DOptions
+ ? reinterpret_cast<const circle::Conv2DOptionsT *>(value)
+ : nullptr;
+ }
+ circle::DepthwiseConv2DOptionsT *AsDepthwiseConv2DOptions()
+ {
+ return type == BuiltinOptions_DepthwiseConv2DOptions
+ ? reinterpret_cast<circle::DepthwiseConv2DOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::DepthwiseConv2DOptionsT *AsDepthwiseConv2DOptions() const
+ {
+ return type == BuiltinOptions_DepthwiseConv2DOptions
+ ? reinterpret_cast<const circle::DepthwiseConv2DOptionsT *>(value)
+ : nullptr;
+ }
+ circle::ConcatEmbeddingsOptionsT *AsConcatEmbeddingsOptions()
+ {
+ return type == BuiltinOptions_ConcatEmbeddingsOptions
+ ? reinterpret_cast<circle::ConcatEmbeddingsOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::ConcatEmbeddingsOptionsT *AsConcatEmbeddingsOptions() const
+ {
+ return type == BuiltinOptions_ConcatEmbeddingsOptions
+ ? reinterpret_cast<const circle::ConcatEmbeddingsOptionsT *>(value)
+ : nullptr;
+ }
+ circle::LSHProjectionOptionsT *AsLSHProjectionOptions()
+ {
+ return type == BuiltinOptions_LSHProjectionOptions
+ ? reinterpret_cast<circle::LSHProjectionOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::LSHProjectionOptionsT *AsLSHProjectionOptions() const
+ {
+ return type == BuiltinOptions_LSHProjectionOptions
+ ? reinterpret_cast<const circle::LSHProjectionOptionsT *>(value)
+ : nullptr;
+ }
+ circle::Pool2DOptionsT *AsPool2DOptions()
+ {
+ return type == BuiltinOptions_Pool2DOptions ? reinterpret_cast<circle::Pool2DOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::Pool2DOptionsT *AsPool2DOptions() const
+ {
+ return type == BuiltinOptions_Pool2DOptions
+ ? reinterpret_cast<const circle::Pool2DOptionsT *>(value)
+ : nullptr;
+ }
+ circle::SVDFOptionsT *AsSVDFOptions()
+ {
+ return type == BuiltinOptions_SVDFOptions ? reinterpret_cast<circle::SVDFOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::SVDFOptionsT *AsSVDFOptions() const
+ {
+ return type == BuiltinOptions_SVDFOptions
+ ? reinterpret_cast<const circle::SVDFOptionsT *>(value)
+ : nullptr;
+ }
+ circle::RNNOptionsT *AsRNNOptions()
+ {
+ return type == BuiltinOptions_RNNOptions ? reinterpret_cast<circle::RNNOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::RNNOptionsT *AsRNNOptions() const
+ {
+ return type == BuiltinOptions_RNNOptions ? reinterpret_cast<const circle::RNNOptionsT *>(value)
+ : nullptr;
+ }
+ circle::FullyConnectedOptionsT *AsFullyConnectedOptions()
+ {
+ return type == BuiltinOptions_FullyConnectedOptions
+ ? reinterpret_cast<circle::FullyConnectedOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::FullyConnectedOptionsT *AsFullyConnectedOptions() const
+ {
+ return type == BuiltinOptions_FullyConnectedOptions
+ ? reinterpret_cast<const circle::FullyConnectedOptionsT *>(value)
+ : nullptr;
+ }
+ circle::SoftmaxOptionsT *AsSoftmaxOptions()
+ {
+ return type == BuiltinOptions_SoftmaxOptions
+ ? reinterpret_cast<circle::SoftmaxOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::SoftmaxOptionsT *AsSoftmaxOptions() const
+ {
+ return type == BuiltinOptions_SoftmaxOptions
+ ? reinterpret_cast<const circle::SoftmaxOptionsT *>(value)
+ : nullptr;
+ }
+ circle::ConcatenationOptionsT *AsConcatenationOptions()
+ {
+ return type == BuiltinOptions_ConcatenationOptions
+ ? reinterpret_cast<circle::ConcatenationOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::ConcatenationOptionsT *AsConcatenationOptions() const
+ {
+ return type == BuiltinOptions_ConcatenationOptions
+ ? reinterpret_cast<const circle::ConcatenationOptionsT *>(value)
+ : nullptr;
+ }
+ circle::AddOptionsT *AsAddOptions()
+ {
+ return type == BuiltinOptions_AddOptions ? reinterpret_cast<circle::AddOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::AddOptionsT *AsAddOptions() const
+ {
+ return type == BuiltinOptions_AddOptions ? reinterpret_cast<const circle::AddOptionsT *>(value)
+ : nullptr;
+ }
+ circle::L2NormOptionsT *AsL2NormOptions()
+ {
+ return type == BuiltinOptions_L2NormOptions ? reinterpret_cast<circle::L2NormOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::L2NormOptionsT *AsL2NormOptions() const
+ {
+ return type == BuiltinOptions_L2NormOptions
+ ? reinterpret_cast<const circle::L2NormOptionsT *>(value)
+ : nullptr;
+ }
+ circle::LocalResponseNormalizationOptionsT *AsLocalResponseNormalizationOptions()
+ {
+ return type == BuiltinOptions_LocalResponseNormalizationOptions
+ ? reinterpret_cast<circle::LocalResponseNormalizationOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::LocalResponseNormalizationOptionsT *AsLocalResponseNormalizationOptions() const
+ {
+ return type == BuiltinOptions_LocalResponseNormalizationOptions
+ ? reinterpret_cast<const circle::LocalResponseNormalizationOptionsT *>(value)
+ : nullptr;
+ }
+ circle::LSTMOptionsT *AsLSTMOptions()
+ {
+ return type == BuiltinOptions_LSTMOptions ? reinterpret_cast<circle::LSTMOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::LSTMOptionsT *AsLSTMOptions() const
+ {
+ return type == BuiltinOptions_LSTMOptions
+ ? reinterpret_cast<const circle::LSTMOptionsT *>(value)
+ : nullptr;
+ }
+ circle::ResizeBilinearOptionsT *AsResizeBilinearOptions()
+ {
+ return type == BuiltinOptions_ResizeBilinearOptions
+ ? reinterpret_cast<circle::ResizeBilinearOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::ResizeBilinearOptionsT *AsResizeBilinearOptions() const
+ {
+ return type == BuiltinOptions_ResizeBilinearOptions
+ ? reinterpret_cast<const circle::ResizeBilinearOptionsT *>(value)
+ : nullptr;
+ }
+ circle::CallOptionsT *AsCallOptions()
+ {
+ return type == BuiltinOptions_CallOptions ? reinterpret_cast<circle::CallOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::CallOptionsT *AsCallOptions() const
+ {
+ return type == BuiltinOptions_CallOptions
+ ? reinterpret_cast<const circle::CallOptionsT *>(value)
+ : nullptr;
+ }
+ circle::ReshapeOptionsT *AsReshapeOptions()
+ {
+ return type == BuiltinOptions_ReshapeOptions
+ ? reinterpret_cast<circle::ReshapeOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::ReshapeOptionsT *AsReshapeOptions() const
+ {
+ return type == BuiltinOptions_ReshapeOptions
+ ? reinterpret_cast<const circle::ReshapeOptionsT *>(value)
+ : nullptr;
+ }
+ circle::SkipGramOptionsT *AsSkipGramOptions()
+ {
+ return type == BuiltinOptions_SkipGramOptions
+ ? reinterpret_cast<circle::SkipGramOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::SkipGramOptionsT *AsSkipGramOptions() const
+ {
+ return type == BuiltinOptions_SkipGramOptions
+ ? reinterpret_cast<const circle::SkipGramOptionsT *>(value)
+ : nullptr;
+ }
+ circle::SpaceToDepthOptionsT *AsSpaceToDepthOptions()
+ {
+ return type == BuiltinOptions_SpaceToDepthOptions
+ ? reinterpret_cast<circle::SpaceToDepthOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::SpaceToDepthOptionsT *AsSpaceToDepthOptions() const
+ {
+ return type == BuiltinOptions_SpaceToDepthOptions
+ ? reinterpret_cast<const circle::SpaceToDepthOptionsT *>(value)
+ : nullptr;
+ }
+ circle::EmbeddingLookupSparseOptionsT *AsEmbeddingLookupSparseOptions()
+ {
+ return type == BuiltinOptions_EmbeddingLookupSparseOptions
+ ? reinterpret_cast<circle::EmbeddingLookupSparseOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::EmbeddingLookupSparseOptionsT *AsEmbeddingLookupSparseOptions() const
+ {
+ return type == BuiltinOptions_EmbeddingLookupSparseOptions
+ ? reinterpret_cast<const circle::EmbeddingLookupSparseOptionsT *>(value)
+ : nullptr;
+ }
+ circle::MulOptionsT *AsMulOptions()
+ {
+ return type == BuiltinOptions_MulOptions ? reinterpret_cast<circle::MulOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::MulOptionsT *AsMulOptions() const
+ {
+ return type == BuiltinOptions_MulOptions ? reinterpret_cast<const circle::MulOptionsT *>(value)
+ : nullptr;
+ }
+ circle::PadOptionsT *AsPadOptions()
+ {
+ return type == BuiltinOptions_PadOptions ? reinterpret_cast<circle::PadOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::PadOptionsT *AsPadOptions() const
+ {
+ return type == BuiltinOptions_PadOptions ? reinterpret_cast<const circle::PadOptionsT *>(value)
+ : nullptr;
+ }
+ circle::GatherOptionsT *AsGatherOptions()
+ {
+ return type == BuiltinOptions_GatherOptions ? reinterpret_cast<circle::GatherOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::GatherOptionsT *AsGatherOptions() const
+ {
+ return type == BuiltinOptions_GatherOptions
+ ? reinterpret_cast<const circle::GatherOptionsT *>(value)
+ : nullptr;
+ }
+ circle::BatchToSpaceNDOptionsT *AsBatchToSpaceNDOptions()
+ {
+ return type == BuiltinOptions_BatchToSpaceNDOptions
+ ? reinterpret_cast<circle::BatchToSpaceNDOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::BatchToSpaceNDOptionsT *AsBatchToSpaceNDOptions() const
+ {
+ return type == BuiltinOptions_BatchToSpaceNDOptions
+ ? reinterpret_cast<const circle::BatchToSpaceNDOptionsT *>(value)
+ : nullptr;
+ }
+ circle::SpaceToBatchNDOptionsT *AsSpaceToBatchNDOptions()
+ {
+ return type == BuiltinOptions_SpaceToBatchNDOptions
+ ? reinterpret_cast<circle::SpaceToBatchNDOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::SpaceToBatchNDOptionsT *AsSpaceToBatchNDOptions() const
+ {
+ return type == BuiltinOptions_SpaceToBatchNDOptions
+ ? reinterpret_cast<const circle::SpaceToBatchNDOptionsT *>(value)
+ : nullptr;
+ }
+ circle::TransposeOptionsT *AsTransposeOptions()
+ {
+ return type == BuiltinOptions_TransposeOptions
+ ? reinterpret_cast<circle::TransposeOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::TransposeOptionsT *AsTransposeOptions() const
+ {
+ return type == BuiltinOptions_TransposeOptions
+ ? reinterpret_cast<const circle::TransposeOptionsT *>(value)
+ : nullptr;
+ }
+ circle::ReducerOptionsT *AsReducerOptions()
+ {
+ return type == BuiltinOptions_ReducerOptions
+ ? reinterpret_cast<circle::ReducerOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::ReducerOptionsT *AsReducerOptions() const
+ {
+ return type == BuiltinOptions_ReducerOptions
+ ? reinterpret_cast<const circle::ReducerOptionsT *>(value)
+ : nullptr;
+ }
+ circle::SubOptionsT *AsSubOptions()
+ {
+ return type == BuiltinOptions_SubOptions ? reinterpret_cast<circle::SubOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::SubOptionsT *AsSubOptions() const
+ {
+ return type == BuiltinOptions_SubOptions ? reinterpret_cast<const circle::SubOptionsT *>(value)
+ : nullptr;
+ }
+ circle::DivOptionsT *AsDivOptions()
+ {
+ return type == BuiltinOptions_DivOptions ? reinterpret_cast<circle::DivOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::DivOptionsT *AsDivOptions() const
+ {
+ return type == BuiltinOptions_DivOptions ? reinterpret_cast<const circle::DivOptionsT *>(value)
+ : nullptr;
+ }
+ circle::SqueezeOptionsT *AsSqueezeOptions()
+ {
+ return type == BuiltinOptions_SqueezeOptions
+ ? reinterpret_cast<circle::SqueezeOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::SqueezeOptionsT *AsSqueezeOptions() const
+ {
+ return type == BuiltinOptions_SqueezeOptions
+ ? reinterpret_cast<const circle::SqueezeOptionsT *>(value)
+ : nullptr;
+ }
+ circle::SequenceRNNOptionsT *AsSequenceRNNOptions()
+ {
+ return type == BuiltinOptions_SequenceRNNOptions
+ ? reinterpret_cast<circle::SequenceRNNOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::SequenceRNNOptionsT *AsSequenceRNNOptions() const
+ {
+ return type == BuiltinOptions_SequenceRNNOptions
+ ? reinterpret_cast<const circle::SequenceRNNOptionsT *>(value)
+ : nullptr;
+ }
+ circle::StridedSliceOptionsT *AsStridedSliceOptions()
+ {
+ return type == BuiltinOptions_StridedSliceOptions
+ ? reinterpret_cast<circle::StridedSliceOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::StridedSliceOptionsT *AsStridedSliceOptions() const
+ {
+ return type == BuiltinOptions_StridedSliceOptions
+ ? reinterpret_cast<const circle::StridedSliceOptionsT *>(value)
+ : nullptr;
+ }
+ circle::ExpOptionsT *AsExpOptions()
+ {
+ return type == BuiltinOptions_ExpOptions ? reinterpret_cast<circle::ExpOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::ExpOptionsT *AsExpOptions() const
+ {
+ return type == BuiltinOptions_ExpOptions ? reinterpret_cast<const circle::ExpOptionsT *>(value)
+ : nullptr;
+ }
+ circle::TopKV2OptionsT *AsTopKV2Options()
+ {
+ return type == BuiltinOptions_TopKV2Options ? reinterpret_cast<circle::TopKV2OptionsT *>(value)
+ : nullptr;
+ }
+ const circle::TopKV2OptionsT *AsTopKV2Options() const
+ {
+ return type == BuiltinOptions_TopKV2Options
+ ? reinterpret_cast<const circle::TopKV2OptionsT *>(value)
+ : nullptr;
+ }
+ circle::SplitOptionsT *AsSplitOptions()
+ {
+ return type == BuiltinOptions_SplitOptions ? reinterpret_cast<circle::SplitOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::SplitOptionsT *AsSplitOptions() const
+ {
+ return type == BuiltinOptions_SplitOptions
+ ? reinterpret_cast<const circle::SplitOptionsT *>(value)
+ : nullptr;
+ }
+ circle::LogSoftmaxOptionsT *AsLogSoftmaxOptions()
+ {
+ return type == BuiltinOptions_LogSoftmaxOptions
+ ? reinterpret_cast<circle::LogSoftmaxOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::LogSoftmaxOptionsT *AsLogSoftmaxOptions() const
+ {
+ return type == BuiltinOptions_LogSoftmaxOptions
+ ? reinterpret_cast<const circle::LogSoftmaxOptionsT *>(value)
+ : nullptr;
+ }
+ circle::CastOptionsT *AsCastOptions()
+ {
+ return type == BuiltinOptions_CastOptions ? reinterpret_cast<circle::CastOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::CastOptionsT *AsCastOptions() const
+ {
+ return type == BuiltinOptions_CastOptions
+ ? reinterpret_cast<const circle::CastOptionsT *>(value)
+ : nullptr;
+ }
+ circle::DequantizeOptionsT *AsDequantizeOptions()
+ {
+ return type == BuiltinOptions_DequantizeOptions
+ ? reinterpret_cast<circle::DequantizeOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::DequantizeOptionsT *AsDequantizeOptions() const
+ {
+ return type == BuiltinOptions_DequantizeOptions
+ ? reinterpret_cast<const circle::DequantizeOptionsT *>(value)
+ : nullptr;
+ }
+ circle::MaximumMinimumOptionsT *AsMaximumMinimumOptions()
+ {
+ return type == BuiltinOptions_MaximumMinimumOptions
+ ? reinterpret_cast<circle::MaximumMinimumOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::MaximumMinimumOptionsT *AsMaximumMinimumOptions() const
+ {
+ return type == BuiltinOptions_MaximumMinimumOptions
+ ? reinterpret_cast<const circle::MaximumMinimumOptionsT *>(value)
+ : nullptr;
+ }
+ circle::ArgMaxOptionsT *AsArgMaxOptions()
+ {
+ return type == BuiltinOptions_ArgMaxOptions ? reinterpret_cast<circle::ArgMaxOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::ArgMaxOptionsT *AsArgMaxOptions() const
+ {
+ return type == BuiltinOptions_ArgMaxOptions
+ ? reinterpret_cast<const circle::ArgMaxOptionsT *>(value)
+ : nullptr;
+ }
+ circle::LessOptionsT *AsLessOptions()
+ {
+ return type == BuiltinOptions_LessOptions ? reinterpret_cast<circle::LessOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::LessOptionsT *AsLessOptions() const
+ {
+ return type == BuiltinOptions_LessOptions
+ ? reinterpret_cast<const circle::LessOptionsT *>(value)
+ : nullptr;
+ }
+ circle::NegOptionsT *AsNegOptions()
+ {
+ return type == BuiltinOptions_NegOptions ? reinterpret_cast<circle::NegOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::NegOptionsT *AsNegOptions() const
+ {
+ return type == BuiltinOptions_NegOptions ? reinterpret_cast<const circle::NegOptionsT *>(value)
+ : nullptr;
+ }
+ circle::PadV2OptionsT *AsPadV2Options()
+ {
+ return type == BuiltinOptions_PadV2Options ? reinterpret_cast<circle::PadV2OptionsT *>(value)
+ : nullptr;
+ }
+ const circle::PadV2OptionsT *AsPadV2Options() const
+ {
+ return type == BuiltinOptions_PadV2Options
+ ? reinterpret_cast<const circle::PadV2OptionsT *>(value)
+ : nullptr;
+ }
+ circle::GreaterOptionsT *AsGreaterOptions()
+ {
+ return type == BuiltinOptions_GreaterOptions
+ ? reinterpret_cast<circle::GreaterOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::GreaterOptionsT *AsGreaterOptions() const
+ {
+ return type == BuiltinOptions_GreaterOptions
+ ? reinterpret_cast<const circle::GreaterOptionsT *>(value)
+ : nullptr;
+ }
+ circle::GreaterEqualOptionsT *AsGreaterEqualOptions()
+ {
+ return type == BuiltinOptions_GreaterEqualOptions
+ ? reinterpret_cast<circle::GreaterEqualOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::GreaterEqualOptionsT *AsGreaterEqualOptions() const
+ {
+ return type == BuiltinOptions_GreaterEqualOptions
+ ? reinterpret_cast<const circle::GreaterEqualOptionsT *>(value)
+ : nullptr;
+ }
+ circle::LessEqualOptionsT *AsLessEqualOptions()
+ {
+ return type == BuiltinOptions_LessEqualOptions
+ ? reinterpret_cast<circle::LessEqualOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::LessEqualOptionsT *AsLessEqualOptions() const
+ {
+ return type == BuiltinOptions_LessEqualOptions
+ ? reinterpret_cast<const circle::LessEqualOptionsT *>(value)
+ : nullptr;
+ }
+ circle::SelectOptionsT *AsSelectOptions()
+ {
+ return type == BuiltinOptions_SelectOptions ? reinterpret_cast<circle::SelectOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::SelectOptionsT *AsSelectOptions() const
+ {
+ return type == BuiltinOptions_SelectOptions
+ ? reinterpret_cast<const circle::SelectOptionsT *>(value)
+ : nullptr;
+ }
+ circle::SliceOptionsT *AsSliceOptions()
+ {
+ return type == BuiltinOptions_SliceOptions ? reinterpret_cast<circle::SliceOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::SliceOptionsT *AsSliceOptions() const
+ {
+ return type == BuiltinOptions_SliceOptions
+ ? reinterpret_cast<const circle::SliceOptionsT *>(value)
+ : nullptr;
+ }
+ circle::TransposeConvOptionsT *AsTransposeConvOptions()
+ {
+ return type == BuiltinOptions_TransposeConvOptions
+ ? reinterpret_cast<circle::TransposeConvOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::TransposeConvOptionsT *AsTransposeConvOptions() const
+ {
+ return type == BuiltinOptions_TransposeConvOptions
+ ? reinterpret_cast<const circle::TransposeConvOptionsT *>(value)
+ : nullptr;
+ }
+ circle::SparseToDenseOptionsT *AsSparseToDenseOptions()
+ {
+ return type == BuiltinOptions_SparseToDenseOptions
+ ? reinterpret_cast<circle::SparseToDenseOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::SparseToDenseOptionsT *AsSparseToDenseOptions() const
+ {
+ return type == BuiltinOptions_SparseToDenseOptions
+ ? reinterpret_cast<const circle::SparseToDenseOptionsT *>(value)
+ : nullptr;
+ }
+ circle::TileOptionsT *AsTileOptions()
+ {
+ return type == BuiltinOptions_TileOptions ? reinterpret_cast<circle::TileOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::TileOptionsT *AsTileOptions() const
+ {
+ return type == BuiltinOptions_TileOptions
+ ? reinterpret_cast<const circle::TileOptionsT *>(value)
+ : nullptr;
+ }
+ circle::ExpandDimsOptionsT *AsExpandDimsOptions()
+ {
+ return type == BuiltinOptions_ExpandDimsOptions
+ ? reinterpret_cast<circle::ExpandDimsOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::ExpandDimsOptionsT *AsExpandDimsOptions() const
+ {
+ return type == BuiltinOptions_ExpandDimsOptions
+ ? reinterpret_cast<const circle::ExpandDimsOptionsT *>(value)
+ : nullptr;
+ }
+ circle::EqualOptionsT *AsEqualOptions()
+ {
+ return type == BuiltinOptions_EqualOptions ? reinterpret_cast<circle::EqualOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::EqualOptionsT *AsEqualOptions() const
+ {
+ return type == BuiltinOptions_EqualOptions
+ ? reinterpret_cast<const circle::EqualOptionsT *>(value)
+ : nullptr;
+ }
+ circle::NotEqualOptionsT *AsNotEqualOptions()
+ {
+ return type == BuiltinOptions_NotEqualOptions
+ ? reinterpret_cast<circle::NotEqualOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::NotEqualOptionsT *AsNotEqualOptions() const
+ {
+ return type == BuiltinOptions_NotEqualOptions
+ ? reinterpret_cast<const circle::NotEqualOptionsT *>(value)
+ : nullptr;
+ }
+ circle::ShapeOptionsT *AsShapeOptions()
+ {
+ return type == BuiltinOptions_ShapeOptions ? reinterpret_cast<circle::ShapeOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::ShapeOptionsT *AsShapeOptions() const
+ {
+ return type == BuiltinOptions_ShapeOptions
+ ? reinterpret_cast<const circle::ShapeOptionsT *>(value)
+ : nullptr;
+ }
+ circle::PowOptionsT *AsPowOptions()
+ {
+ return type == BuiltinOptions_PowOptions ? reinterpret_cast<circle::PowOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::PowOptionsT *AsPowOptions() const
+ {
+ return type == BuiltinOptions_PowOptions ? reinterpret_cast<const circle::PowOptionsT *>(value)
+ : nullptr;
+ }
+ circle::ArgMinOptionsT *AsArgMinOptions()
+ {
+ return type == BuiltinOptions_ArgMinOptions ? reinterpret_cast<circle::ArgMinOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::ArgMinOptionsT *AsArgMinOptions() const
+ {
+ return type == BuiltinOptions_ArgMinOptions
+ ? reinterpret_cast<const circle::ArgMinOptionsT *>(value)
+ : nullptr;
+ }
+ circle::FakeQuantOptionsT *AsFakeQuantOptions()
+ {
+ return type == BuiltinOptions_FakeQuantOptions
+ ? reinterpret_cast<circle::FakeQuantOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::FakeQuantOptionsT *AsFakeQuantOptions() const
+ {
+ return type == BuiltinOptions_FakeQuantOptions
+ ? reinterpret_cast<const circle::FakeQuantOptionsT *>(value)
+ : nullptr;
+ }
+ circle::PackOptionsT *AsPackOptions()
+ {
+ return type == BuiltinOptions_PackOptions ? reinterpret_cast<circle::PackOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::PackOptionsT *AsPackOptions() const
+ {
+ return type == BuiltinOptions_PackOptions
+ ? reinterpret_cast<const circle::PackOptionsT *>(value)
+ : nullptr;
+ }
+ circle::LogicalOrOptionsT *AsLogicalOrOptions()
+ {
+ return type == BuiltinOptions_LogicalOrOptions
+ ? reinterpret_cast<circle::LogicalOrOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::LogicalOrOptionsT *AsLogicalOrOptions() const
+ {
+ return type == BuiltinOptions_LogicalOrOptions
+ ? reinterpret_cast<const circle::LogicalOrOptionsT *>(value)
+ : nullptr;
+ }
+ circle::OneHotOptionsT *AsOneHotOptions()
+ {
+ return type == BuiltinOptions_OneHotOptions ? reinterpret_cast<circle::OneHotOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::OneHotOptionsT *AsOneHotOptions() const
+ {
+ return type == BuiltinOptions_OneHotOptions
+ ? reinterpret_cast<const circle::OneHotOptionsT *>(value)
+ : nullptr;
+ }
+ circle::LogicalAndOptionsT *AsLogicalAndOptions()
+ {
+ return type == BuiltinOptions_LogicalAndOptions
+ ? reinterpret_cast<circle::LogicalAndOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::LogicalAndOptionsT *AsLogicalAndOptions() const
+ {
+ return type == BuiltinOptions_LogicalAndOptions
+ ? reinterpret_cast<const circle::LogicalAndOptionsT *>(value)
+ : nullptr;
+ }
+ circle::LogicalNotOptionsT *AsLogicalNotOptions()
+ {
+ return type == BuiltinOptions_LogicalNotOptions
+ ? reinterpret_cast<circle::LogicalNotOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::LogicalNotOptionsT *AsLogicalNotOptions() const
+ {
+ return type == BuiltinOptions_LogicalNotOptions
+ ? reinterpret_cast<const circle::LogicalNotOptionsT *>(value)
+ : nullptr;
+ }
+ circle::UnpackOptionsT *AsUnpackOptions()
+ {
+ return type == BuiltinOptions_UnpackOptions ? reinterpret_cast<circle::UnpackOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::UnpackOptionsT *AsUnpackOptions() const
+ {
+ return type == BuiltinOptions_UnpackOptions
+ ? reinterpret_cast<const circle::UnpackOptionsT *>(value)
+ : nullptr;
+ }
+ circle::FloorDivOptionsT *AsFloorDivOptions()
+ {
+ return type == BuiltinOptions_FloorDivOptions
+ ? reinterpret_cast<circle::FloorDivOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::FloorDivOptionsT *AsFloorDivOptions() const
+ {
+ return type == BuiltinOptions_FloorDivOptions
+ ? reinterpret_cast<const circle::FloorDivOptionsT *>(value)
+ : nullptr;
+ }
+ circle::SquareOptionsT *AsSquareOptions()
+ {
+ return type == BuiltinOptions_SquareOptions ? reinterpret_cast<circle::SquareOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::SquareOptionsT *AsSquareOptions() const
+ {
+ return type == BuiltinOptions_SquareOptions
+ ? reinterpret_cast<const circle::SquareOptionsT *>(value)
+ : nullptr;
+ }
+ circle::ZerosLikeOptionsT *AsZerosLikeOptions()
+ {
+ return type == BuiltinOptions_ZerosLikeOptions
+ ? reinterpret_cast<circle::ZerosLikeOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::ZerosLikeOptionsT *AsZerosLikeOptions() const
+ {
+ return type == BuiltinOptions_ZerosLikeOptions
+ ? reinterpret_cast<const circle::ZerosLikeOptionsT *>(value)
+ : nullptr;
+ }
+ circle::FillOptionsT *AsFillOptions()
+ {
+ return type == BuiltinOptions_FillOptions ? reinterpret_cast<circle::FillOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::FillOptionsT *AsFillOptions() const
+ {
+ return type == BuiltinOptions_FillOptions
+ ? reinterpret_cast<const circle::FillOptionsT *>(value)
+ : nullptr;
+ }
+ circle::BidirectionalSequenceLSTMOptionsT *AsBidirectionalSequenceLSTMOptions()
+ {
+ return type == BuiltinOptions_BidirectionalSequenceLSTMOptions
+ ? reinterpret_cast<circle::BidirectionalSequenceLSTMOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::BidirectionalSequenceLSTMOptionsT *AsBidirectionalSequenceLSTMOptions() const
+ {
+ return type == BuiltinOptions_BidirectionalSequenceLSTMOptions
+ ? reinterpret_cast<const circle::BidirectionalSequenceLSTMOptionsT *>(value)
+ : nullptr;
+ }
+ circle::BidirectionalSequenceRNNOptionsT *AsBidirectionalSequenceRNNOptions()
+ {
+ return type == BuiltinOptions_BidirectionalSequenceRNNOptions
+ ? reinterpret_cast<circle::BidirectionalSequenceRNNOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::BidirectionalSequenceRNNOptionsT *AsBidirectionalSequenceRNNOptions() const
+ {
+ return type == BuiltinOptions_BidirectionalSequenceRNNOptions
+ ? reinterpret_cast<const circle::BidirectionalSequenceRNNOptionsT *>(value)
+ : nullptr;
+ }
+ circle::UnidirectionalSequenceLSTMOptionsT *AsUnidirectionalSequenceLSTMOptions()
+ {
+ return type == BuiltinOptions_UnidirectionalSequenceLSTMOptions
+ ? reinterpret_cast<circle::UnidirectionalSequenceLSTMOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::UnidirectionalSequenceLSTMOptionsT *AsUnidirectionalSequenceLSTMOptions() const
+ {
+ return type == BuiltinOptions_UnidirectionalSequenceLSTMOptions
+ ? reinterpret_cast<const circle::UnidirectionalSequenceLSTMOptionsT *>(value)
+ : nullptr;
+ }
+ circle::FloorModOptionsT *AsFloorModOptions()
+ {
+ return type == BuiltinOptions_FloorModOptions
+ ? reinterpret_cast<circle::FloorModOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::FloorModOptionsT *AsFloorModOptions() const
+ {
+ return type == BuiltinOptions_FloorModOptions
+ ? reinterpret_cast<const circle::FloorModOptionsT *>(value)
+ : nullptr;
+ }
+ circle::RangeOptionsT *AsRangeOptions()
+ {
+ return type == BuiltinOptions_RangeOptions ? reinterpret_cast<circle::RangeOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::RangeOptionsT *AsRangeOptions() const
+ {
+ return type == BuiltinOptions_RangeOptions
+ ? reinterpret_cast<const circle::RangeOptionsT *>(value)
+ : nullptr;
+ }
+ circle::ResizeNearestNeighborOptionsT *AsResizeNearestNeighborOptions()
+ {
+ return type == BuiltinOptions_ResizeNearestNeighborOptions
+ ? reinterpret_cast<circle::ResizeNearestNeighborOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::ResizeNearestNeighborOptionsT *AsResizeNearestNeighborOptions() const
+ {
+ return type == BuiltinOptions_ResizeNearestNeighborOptions
+ ? reinterpret_cast<const circle::ResizeNearestNeighborOptionsT *>(value)
+ : nullptr;
+ }
+ circle::LeakyReluOptionsT *AsLeakyReluOptions()
+ {
+ return type == BuiltinOptions_LeakyReluOptions
+ ? reinterpret_cast<circle::LeakyReluOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::LeakyReluOptionsT *AsLeakyReluOptions() const
+ {
+ return type == BuiltinOptions_LeakyReluOptions
+ ? reinterpret_cast<const circle::LeakyReluOptionsT *>(value)
+ : nullptr;
+ }
+ circle::SquaredDifferenceOptionsT *AsSquaredDifferenceOptions()
+ {
+ return type == BuiltinOptions_SquaredDifferenceOptions
+ ? reinterpret_cast<circle::SquaredDifferenceOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::SquaredDifferenceOptionsT *AsSquaredDifferenceOptions() const
+ {
+ return type == BuiltinOptions_SquaredDifferenceOptions
+ ? reinterpret_cast<const circle::SquaredDifferenceOptionsT *>(value)
+ : nullptr;
+ }
+ circle::MirrorPadOptionsT *AsMirrorPadOptions()
+ {
+ return type == BuiltinOptions_MirrorPadOptions
+ ? reinterpret_cast<circle::MirrorPadOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::MirrorPadOptionsT *AsMirrorPadOptions() const
+ {
+ return type == BuiltinOptions_MirrorPadOptions
+ ? reinterpret_cast<const circle::MirrorPadOptionsT *>(value)
+ : nullptr;
+ }
+ circle::AbsOptionsT *AsAbsOptions()
+ {
+ return type == BuiltinOptions_AbsOptions ? reinterpret_cast<circle::AbsOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::AbsOptionsT *AsAbsOptions() const
+ {
+ return type == BuiltinOptions_AbsOptions ? reinterpret_cast<const circle::AbsOptionsT *>(value)
+ : nullptr;
+ }
+ circle::SplitVOptionsT *AsSplitVOptions()
+ {
+ return type == BuiltinOptions_SplitVOptions ? reinterpret_cast<circle::SplitVOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::SplitVOptionsT *AsSplitVOptions() const
+ {
+ return type == BuiltinOptions_SplitVOptions
+ ? reinterpret_cast<const circle::SplitVOptionsT *>(value)
+ : nullptr;
+ }
+ circle::UniqueOptionsT *AsUniqueOptions()
+ {
+ return type == BuiltinOptions_UniqueOptions ? reinterpret_cast<circle::UniqueOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::UniqueOptionsT *AsUniqueOptions() const
+ {
+ return type == BuiltinOptions_UniqueOptions
+ ? reinterpret_cast<const circle::UniqueOptionsT *>(value)
+ : nullptr;
+ }
+ circle::ReverseV2OptionsT *AsReverseV2Options()
+ {
+ return type == BuiltinOptions_ReverseV2Options
+ ? reinterpret_cast<circle::ReverseV2OptionsT *>(value)
+ : nullptr;
+ }
+ const circle::ReverseV2OptionsT *AsReverseV2Options() const
+ {
+ return type == BuiltinOptions_ReverseV2Options
+ ? reinterpret_cast<const circle::ReverseV2OptionsT *>(value)
+ : nullptr;
+ }
+ circle::AddNOptionsT *AsAddNOptions()
+ {
+ return type == BuiltinOptions_AddNOptions ? reinterpret_cast<circle::AddNOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::AddNOptionsT *AsAddNOptions() const
+ {
+ return type == BuiltinOptions_AddNOptions
+ ? reinterpret_cast<const circle::AddNOptionsT *>(value)
+ : nullptr;
+ }
+ circle::GatherNdOptionsT *AsGatherNdOptions()
+ {
+ return type == BuiltinOptions_GatherNdOptions
+ ? reinterpret_cast<circle::GatherNdOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::GatherNdOptionsT *AsGatherNdOptions() const
+ {
+ return type == BuiltinOptions_GatherNdOptions
+ ? reinterpret_cast<const circle::GatherNdOptionsT *>(value)
+ : nullptr;
+ }
+ circle::CosOptionsT *AsCosOptions()
+ {
+ return type == BuiltinOptions_CosOptions ? reinterpret_cast<circle::CosOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::CosOptionsT *AsCosOptions() const
+ {
+ return type == BuiltinOptions_CosOptions ? reinterpret_cast<const circle::CosOptionsT *>(value)
+ : nullptr;
+ }
+ circle::WhereOptionsT *AsWhereOptions()
+ {
+ return type == BuiltinOptions_WhereOptions ? reinterpret_cast<circle::WhereOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::WhereOptionsT *AsWhereOptions() const
+ {
+ return type == BuiltinOptions_WhereOptions
+ ? reinterpret_cast<const circle::WhereOptionsT *>(value)
+ : nullptr;
+ }
+ circle::RankOptionsT *AsRankOptions()
+ {
+ return type == BuiltinOptions_RankOptions ? reinterpret_cast<circle::RankOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::RankOptionsT *AsRankOptions() const
+ {
+ return type == BuiltinOptions_RankOptions
+ ? reinterpret_cast<const circle::RankOptionsT *>(value)
+ : nullptr;
+ }
+ circle::ReverseSequenceOptionsT *AsReverseSequenceOptions()
+ {
+ return type == BuiltinOptions_ReverseSequenceOptions
+ ? reinterpret_cast<circle::ReverseSequenceOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::ReverseSequenceOptionsT *AsReverseSequenceOptions() const
+ {
+ return type == BuiltinOptions_ReverseSequenceOptions
+ ? reinterpret_cast<const circle::ReverseSequenceOptionsT *>(value)
+ : nullptr;
+ }
+ circle::MatrixDiagOptionsT *AsMatrixDiagOptions()
+ {
+ return type == BuiltinOptions_MatrixDiagOptions
+ ? reinterpret_cast<circle::MatrixDiagOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::MatrixDiagOptionsT *AsMatrixDiagOptions() const
+ {
+ return type == BuiltinOptions_MatrixDiagOptions
+ ? reinterpret_cast<const circle::MatrixDiagOptionsT *>(value)
+ : nullptr;
+ }
+ circle::QuantizeOptionsT *AsQuantizeOptions()
+ {
+ return type == BuiltinOptions_QuantizeOptions
+ ? reinterpret_cast<circle::QuantizeOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::QuantizeOptionsT *AsQuantizeOptions() const
+ {
+ return type == BuiltinOptions_QuantizeOptions
+ ? reinterpret_cast<const circle::QuantizeOptionsT *>(value)
+ : nullptr;
+ }
+ circle::MatrixSetDiagOptionsT *AsMatrixSetDiagOptions()
+ {
+ return type == BuiltinOptions_MatrixSetDiagOptions
+ ? reinterpret_cast<circle::MatrixSetDiagOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::MatrixSetDiagOptionsT *AsMatrixSetDiagOptions() const
+ {
+ return type == BuiltinOptions_MatrixSetDiagOptions
+ ? reinterpret_cast<const circle::MatrixSetDiagOptionsT *>(value)
+ : nullptr;
+ }
+ circle::HardSwishOptionsT *AsHardSwishOptions()
+ {
+ return type == BuiltinOptions_HardSwishOptions
+ ? reinterpret_cast<circle::HardSwishOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::HardSwishOptionsT *AsHardSwishOptions() const
+ {
+ return type == BuiltinOptions_HardSwishOptions
+ ? reinterpret_cast<const circle::HardSwishOptionsT *>(value)
+ : nullptr;
+ }
+ circle::IfOptionsT *AsIfOptions()
+ {
+ return type == BuiltinOptions_IfOptions ? reinterpret_cast<circle::IfOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::IfOptionsT *AsIfOptions() const
+ {
+ return type == BuiltinOptions_IfOptions ? reinterpret_cast<const circle::IfOptionsT *>(value)
+ : nullptr;
+ }
+ circle::WhileOptionsT *AsWhileOptions()
+ {
+ return type == BuiltinOptions_WhileOptions ? reinterpret_cast<circle::WhileOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::WhileOptionsT *AsWhileOptions() const
+ {
+ return type == BuiltinOptions_WhileOptions
+ ? reinterpret_cast<const circle::WhileOptionsT *>(value)
+ : nullptr;
+ }
+ circle::DepthToSpaceOptionsT *AsDepthToSpaceOptions()
+ {
+ return type == BuiltinOptions_DepthToSpaceOptions
+ ? reinterpret_cast<circle::DepthToSpaceOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::DepthToSpaceOptionsT *AsDepthToSpaceOptions() const
+ {
+ return type == BuiltinOptions_DepthToSpaceOptions
+ ? reinterpret_cast<const circle::DepthToSpaceOptionsT *>(value)
+ : nullptr;
+ }
+ circle::NonMaxSuppressionV4OptionsT *AsNonMaxSuppressionV4Options()
+ {
+ return type == BuiltinOptions_NonMaxSuppressionV4Options
+ ? reinterpret_cast<circle::NonMaxSuppressionV4OptionsT *>(value)
+ : nullptr;
+ }
+ const circle::NonMaxSuppressionV4OptionsT *AsNonMaxSuppressionV4Options() const
+ {
+ return type == BuiltinOptions_NonMaxSuppressionV4Options
+ ? reinterpret_cast<const circle::NonMaxSuppressionV4OptionsT *>(value)
+ : nullptr;
+ }
+ circle::NonMaxSuppressionV5OptionsT *AsNonMaxSuppressionV5Options()
+ {
+ return type == BuiltinOptions_NonMaxSuppressionV5Options
+ ? reinterpret_cast<circle::NonMaxSuppressionV5OptionsT *>(value)
+ : nullptr;
+ }
+ const circle::NonMaxSuppressionV5OptionsT *AsNonMaxSuppressionV5Options() const
+ {
+ return type == BuiltinOptions_NonMaxSuppressionV5Options
+ ? reinterpret_cast<const circle::NonMaxSuppressionV5OptionsT *>(value)
+ : nullptr;
+ }
+ circle::ScatterNdOptionsT *AsScatterNdOptions()
+ {
+ return type == BuiltinOptions_ScatterNdOptions
+ ? reinterpret_cast<circle::ScatterNdOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::ScatterNdOptionsT *AsScatterNdOptions() const
+ {
+ return type == BuiltinOptions_ScatterNdOptions
+ ? reinterpret_cast<const circle::ScatterNdOptionsT *>(value)
+ : nullptr;
+ }
+ circle::SelectV2OptionsT *AsSelectV2Options()
+ {
+ return type == BuiltinOptions_SelectV2Options
+ ? reinterpret_cast<circle::SelectV2OptionsT *>(value)
+ : nullptr;
+ }
+ const circle::SelectV2OptionsT *AsSelectV2Options() const
+ {
+ return type == BuiltinOptions_SelectV2Options
+ ? reinterpret_cast<const circle::SelectV2OptionsT *>(value)
+ : nullptr;
+ }
+ circle::DensifyOptionsT *AsDensifyOptions()
+ {
+ return type == BuiltinOptions_DensifyOptions
+ ? reinterpret_cast<circle::DensifyOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::DensifyOptionsT *AsDensifyOptions() const
+ {
+ return type == BuiltinOptions_DensifyOptions
+ ? reinterpret_cast<const circle::DensifyOptionsT *>(value)
+ : nullptr;
+ }
+ circle::SegmentSumOptionsT *AsSegmentSumOptions()
+ {
+ return type == BuiltinOptions_SegmentSumOptions
+ ? reinterpret_cast<circle::SegmentSumOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::SegmentSumOptionsT *AsSegmentSumOptions() const
+ {
+ return type == BuiltinOptions_SegmentSumOptions
+ ? reinterpret_cast<const circle::SegmentSumOptionsT *>(value)
+ : nullptr;
+ }
+ circle::BatchMatMulOptionsT *AsBatchMatMulOptions()
+ {
+ return type == BuiltinOptions_BatchMatMulOptions
+ ? reinterpret_cast<circle::BatchMatMulOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::BatchMatMulOptionsT *AsBatchMatMulOptions() const
+ {
+ return type == BuiltinOptions_BatchMatMulOptions
+ ? reinterpret_cast<const circle::BatchMatMulOptionsT *>(value)
+ : nullptr;
+ }
+ circle::CumsumOptionsT *AsCumsumOptions()
+ {
+ return type == BuiltinOptions_CumsumOptions ? reinterpret_cast<circle::CumsumOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::CumsumOptionsT *AsCumsumOptions() const
+ {
+ return type == BuiltinOptions_CumsumOptions
+ ? reinterpret_cast<const circle::CumsumOptionsT *>(value)
+ : nullptr;
+ }
+ circle::CallOnceOptionsT *AsCallOnceOptions()
+ {
+ return type == BuiltinOptions_CallOnceOptions
+ ? reinterpret_cast<circle::CallOnceOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::CallOnceOptionsT *AsCallOnceOptions() const
+ {
+ return type == BuiltinOptions_CallOnceOptions
+ ? reinterpret_cast<const circle::CallOnceOptionsT *>(value)
+ : nullptr;
+ }
+ circle::BroadcastToOptionsT *AsBroadcastToOptions()
+ {
+ return type == BuiltinOptions_BroadcastToOptions
+ ? reinterpret_cast<circle::BroadcastToOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::BroadcastToOptionsT *AsBroadcastToOptions() const
+ {
+ return type == BuiltinOptions_BroadcastToOptions
+ ? reinterpret_cast<const circle::BroadcastToOptionsT *>(value)
+ : nullptr;
+ }
+ circle::Rfft2dOptionsT *AsRfft2dOptions()
+ {
+ return type == BuiltinOptions_Rfft2dOptions ? reinterpret_cast<circle::Rfft2dOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::Rfft2dOptionsT *AsRfft2dOptions() const
+ {
+ return type == BuiltinOptions_Rfft2dOptions
+ ? reinterpret_cast<const circle::Rfft2dOptionsT *>(value)
+ : nullptr;
+ }
+ circle::Conv3DOptionsT *AsConv3DOptions()
+ {
+ return type == BuiltinOptions_Conv3DOptions ? reinterpret_cast<circle::Conv3DOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::Conv3DOptionsT *AsConv3DOptions() const
+ {
+ return type == BuiltinOptions_Conv3DOptions
+ ? reinterpret_cast<const circle::Conv3DOptionsT *>(value)
+ : nullptr;
+ }
+ circle::HashtableOptionsT *AsHashtableOptions()
+ {
+ return type == BuiltinOptions_HashtableOptions
+ ? reinterpret_cast<circle::HashtableOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::HashtableOptionsT *AsHashtableOptions() const
+ {
+ return type == BuiltinOptions_HashtableOptions
+ ? reinterpret_cast<const circle::HashtableOptionsT *>(value)
+ : nullptr;
+ }
+ circle::HashtableFindOptionsT *AsHashtableFindOptions()
+ {
+ return type == BuiltinOptions_HashtableFindOptions
+ ? reinterpret_cast<circle::HashtableFindOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::HashtableFindOptionsT *AsHashtableFindOptions() const
+ {
+ return type == BuiltinOptions_HashtableFindOptions
+ ? reinterpret_cast<const circle::HashtableFindOptionsT *>(value)
+ : nullptr;
+ }
+ circle::HashtableImportOptionsT *AsHashtableImportOptions()
+ {
+ return type == BuiltinOptions_HashtableImportOptions
+ ? reinterpret_cast<circle::HashtableImportOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::HashtableImportOptionsT *AsHashtableImportOptions() const
+ {
+ return type == BuiltinOptions_HashtableImportOptions
+ ? reinterpret_cast<const circle::HashtableImportOptionsT *>(value)
+ : nullptr;
+ }
+ circle::HashtableSizeOptionsT *AsHashtableSizeOptions()
+ {
+ return type == BuiltinOptions_HashtableSizeOptions
+ ? reinterpret_cast<circle::HashtableSizeOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::HashtableSizeOptionsT *AsHashtableSizeOptions() const
+ {
+ return type == BuiltinOptions_HashtableSizeOptions
+ ? reinterpret_cast<const circle::HashtableSizeOptionsT *>(value)
+ : nullptr;
+ }
+ circle::VarHandleOptionsT *AsVarHandleOptions()
+ {
+ return type == BuiltinOptions_VarHandleOptions
+ ? reinterpret_cast<circle::VarHandleOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::VarHandleOptionsT *AsVarHandleOptions() const
+ {
+ return type == BuiltinOptions_VarHandleOptions
+ ? reinterpret_cast<const circle::VarHandleOptionsT *>(value)
+ : nullptr;
+ }
+ circle::ReadVariableOptionsT *AsReadVariableOptions()
+ {
+ return type == BuiltinOptions_ReadVariableOptions
+ ? reinterpret_cast<circle::ReadVariableOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::ReadVariableOptionsT *AsReadVariableOptions() const
+ {
+ return type == BuiltinOptions_ReadVariableOptions
+ ? reinterpret_cast<const circle::ReadVariableOptionsT *>(value)
+ : nullptr;
+ }
+ circle::AssignVariableOptionsT *AsAssignVariableOptions()
+ {
+ return type == BuiltinOptions_AssignVariableOptions
+ ? reinterpret_cast<circle::AssignVariableOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::AssignVariableOptionsT *AsAssignVariableOptions() const
+ {
+ return type == BuiltinOptions_AssignVariableOptions
+ ? reinterpret_cast<const circle::AssignVariableOptionsT *>(value)
+ : nullptr;
+ }
+ circle::RandomOptionsT *AsRandomOptions()
+ {
+ return type == BuiltinOptions_RandomOptions ? reinterpret_cast<circle::RandomOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::RandomOptionsT *AsRandomOptions() const
+ {
+ return type == BuiltinOptions_RandomOptions
+ ? reinterpret_cast<const circle::RandomOptionsT *>(value)
+ : nullptr;
+ }
+ circle::BCQGatherOptionsT *AsBCQGatherOptions()
+ {
+ return type == BuiltinOptions_BCQGatherOptions
+ ? reinterpret_cast<circle::BCQGatherOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::BCQGatherOptionsT *AsBCQGatherOptions() const
+ {
+ return type == BuiltinOptions_BCQGatherOptions
+ ? reinterpret_cast<const circle::BCQGatherOptionsT *>(value)
+ : nullptr;
+ }
+ circle::BCQFullyConnectedOptionsT *AsBCQFullyConnectedOptions()
+ {
+ return type == BuiltinOptions_BCQFullyConnectedOptions
+ ? reinterpret_cast<circle::BCQFullyConnectedOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::BCQFullyConnectedOptionsT *AsBCQFullyConnectedOptions() const
+ {
+ return type == BuiltinOptions_BCQFullyConnectedOptions
+ ? reinterpret_cast<const circle::BCQFullyConnectedOptionsT *>(value)
+ : nullptr;
+ }
+ circle::InstanceNormOptionsT *AsInstanceNormOptions()
+ {
+ return type == BuiltinOptions_InstanceNormOptions
+ ? reinterpret_cast<circle::InstanceNormOptionsT *>(value)
+ : nullptr;
+ }
+ const circle::InstanceNormOptionsT *AsInstanceNormOptions() const
+ {
+ return type == BuiltinOptions_InstanceNormOptions
+ ? reinterpret_cast<const circle::InstanceNormOptionsT *>(value)
+ : nullptr;
+ }
+};
+
+bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type);
+bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier,
+ const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
+ const flatbuffers::Vector<uint8_t> *types);
+
+enum Padding : int8_t
+{
+ Padding_SAME = 0,
+ Padding_VALID = 1,
+ Padding_MIN = Padding_SAME,
+ Padding_MAX = Padding_VALID
+};
+
+inline const Padding (&EnumValuesPadding())[2]
+{
+ static const Padding values[] = {Padding_SAME, Padding_VALID};
+ return values;
+}
+
+inline const char *const *EnumNamesPadding()
+{
+ static const char *const names[3] = {"SAME", "VALID", nullptr};
+ return names;
+}
+
+inline const char *EnumNamePadding(Padding e)
+{
+ if (flatbuffers::IsOutRange(e, Padding_SAME, Padding_VALID))
+ return "";
+ const size_t index = static_cast<size_t>(e);
+ return EnumNamesPadding()[index];
+}
+
+enum ActivationFunctionType : int8_t
+{
+ ActivationFunctionType_NONE = 0,
+ ActivationFunctionType_RELU = 1,
+ ActivationFunctionType_RELU_N1_TO_1 = 2,
+ ActivationFunctionType_RELU6 = 3,
+ ActivationFunctionType_TANH = 4,
+ ActivationFunctionType_SIGN_BIT = 5,
+ ActivationFunctionType_MIN = ActivationFunctionType_NONE,
+ ActivationFunctionType_MAX = ActivationFunctionType_SIGN_BIT
+};
+
+inline const ActivationFunctionType (&EnumValuesActivationFunctionType())[6]
+{
+ static const ActivationFunctionType values[] = {
+ ActivationFunctionType_NONE, ActivationFunctionType_RELU, ActivationFunctionType_RELU_N1_TO_1,
+ ActivationFunctionType_RELU6, ActivationFunctionType_TANH, ActivationFunctionType_SIGN_BIT};
+ return values;
+}
+
+inline const char *const *EnumNamesActivationFunctionType()
+{
+ static const char *const names[7] = {"NONE", "RELU", "RELU_N1_TO_1", "RELU6",
+ "TANH", "SIGN_BIT", nullptr};
+ return names;
+}
+
+inline const char *EnumNameActivationFunctionType(ActivationFunctionType e)
+{
+ if (flatbuffers::IsOutRange(e, ActivationFunctionType_NONE, ActivationFunctionType_SIGN_BIT))
+ return "";
+ const size_t index = static_cast<size_t>(e);
+ return EnumNamesActivationFunctionType()[index];
+}
+
+enum LSHProjectionType : int8_t
+{
+ LSHProjectionType_UNKNOWN = 0,
+ LSHProjectionType_SPARSE = 1,
+ LSHProjectionType_DENSE = 2,
+ LSHProjectionType_MIN = LSHProjectionType_UNKNOWN,
+ LSHProjectionType_MAX = LSHProjectionType_DENSE
+};
+
+inline const LSHProjectionType (&EnumValuesLSHProjectionType())[3]
+{
+ static const LSHProjectionType values[] = {LSHProjectionType_UNKNOWN, LSHProjectionType_SPARSE,
+ LSHProjectionType_DENSE};
+ return values;
+}
+
+inline const char *const *EnumNamesLSHProjectionType()
+{
+ static const char *const names[4] = {"UNKNOWN", "SPARSE", "DENSE", nullptr};
+ return names;
+}
+
+inline const char *EnumNameLSHProjectionType(LSHProjectionType e)
+{
+ if (flatbuffers::IsOutRange(e, LSHProjectionType_UNKNOWN, LSHProjectionType_DENSE))
+ return "";
+ const size_t index = static_cast<size_t>(e);
+ return EnumNamesLSHProjectionType()[index];
+}
+
+enum FullyConnectedOptionsWeightsFormat : int8_t
+{
+ FullyConnectedOptionsWeightsFormat_DEFAULT = 0,
+ FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 = 1,
+ FullyConnectedOptionsWeightsFormat_SHUFFLED16x1FLOAT32 = 127,
+ FullyConnectedOptionsWeightsFormat_MIN = FullyConnectedOptionsWeightsFormat_DEFAULT,
+ FullyConnectedOptionsWeightsFormat_MAX = FullyConnectedOptionsWeightsFormat_SHUFFLED16x1FLOAT32
+};
+
+inline const FullyConnectedOptionsWeightsFormat (&EnumValuesFullyConnectedOptionsWeightsFormat())[3]
+{
+ static const FullyConnectedOptionsWeightsFormat values[] = {
+ FullyConnectedOptionsWeightsFormat_DEFAULT, FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8,
+ FullyConnectedOptionsWeightsFormat_SHUFFLED16x1FLOAT32};
+ return values;
+}
+
+inline const char *EnumNameFullyConnectedOptionsWeightsFormat(FullyConnectedOptionsWeightsFormat e)
+{
+ switch (e)
+ {
+ case FullyConnectedOptionsWeightsFormat_DEFAULT:
+ return "DEFAULT";
+ case FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:
+ return "SHUFFLED4x16INT8";
+ case FullyConnectedOptionsWeightsFormat_SHUFFLED16x1FLOAT32:
+ return "SHUFFLED16x1FLOAT32";
+ default:
+ return "";
+ }
+}
+
+enum LSTMKernelType : int8_t
+{
+ LSTMKernelType_FULL = 0,
+ LSTMKernelType_BASIC = 1,
+ LSTMKernelType_MIN = LSTMKernelType_FULL,
+ LSTMKernelType_MAX = LSTMKernelType_BASIC
+};
+
+inline const LSTMKernelType (&EnumValuesLSTMKernelType())[2]
+{
+ static const LSTMKernelType values[] = {LSTMKernelType_FULL, LSTMKernelType_BASIC};
+ return values;
+}
+
+inline const char *const *EnumNamesLSTMKernelType()
+{
+ static const char *const names[3] = {"FULL", "BASIC", nullptr};
+ return names;
+}
+
+inline const char *EnumNameLSTMKernelType(LSTMKernelType e)
+{
+ if (flatbuffers::IsOutRange(e, LSTMKernelType_FULL, LSTMKernelType_BASIC))
+ return "";
+ const size_t index = static_cast<size_t>(e);
+ return EnumNamesLSTMKernelType()[index];
+}
+
+enum CombinerType : int8_t
+{
+ CombinerType_SUM = 0,
+ CombinerType_MEAN = 1,
+ CombinerType_SQRTN = 2,
+ CombinerType_MIN = CombinerType_SUM,
+ CombinerType_MAX = CombinerType_SQRTN
+};
+
+inline const CombinerType (&EnumValuesCombinerType())[3]
+{
+ static const CombinerType values[] = {CombinerType_SUM, CombinerType_MEAN, CombinerType_SQRTN};
+ return values;
+}
+
+inline const char *const *EnumNamesCombinerType()
+{
+ static const char *const names[4] = {"SUM", "MEAN", "SQRTN", nullptr};
+ return names;
+}
+
+inline const char *EnumNameCombinerType(CombinerType e)
+{
+ if (flatbuffers::IsOutRange(e, CombinerType_SUM, CombinerType_SQRTN))
+ return "";
+ const size_t index = static_cast<size_t>(e);
+ return EnumNamesCombinerType()[index];
+}
+
+enum MirrorPadMode : int8_t
+{
+ MirrorPadMode_REFLECT = 0,
+ MirrorPadMode_SYMMETRIC = 1,
+ MirrorPadMode_MIN = MirrorPadMode_REFLECT,
+ MirrorPadMode_MAX = MirrorPadMode_SYMMETRIC
+};
+
+inline const MirrorPadMode (&EnumValuesMirrorPadMode())[2]
+{
+ static const MirrorPadMode values[] = {MirrorPadMode_REFLECT, MirrorPadMode_SYMMETRIC};
+ return values;
+}
+
+inline const char *const *EnumNamesMirrorPadMode()
+{
+ static const char *const names[3] = {"REFLECT", "SYMMETRIC", nullptr};
+ return names;
+}
+
+inline const char *EnumNameMirrorPadMode(MirrorPadMode e)
+{
+ if (flatbuffers::IsOutRange(e, MirrorPadMode_REFLECT, MirrorPadMode_SYMMETRIC))
+ return "";
+ const size_t index = static_cast<size_t>(e);
+ return EnumNamesMirrorPadMode()[index];
+}
+
+enum CustomOptionsFormat : int8_t
+{
+ CustomOptionsFormat_FLEXBUFFERS = 0,
+ CustomOptionsFormat_MIN = CustomOptionsFormat_FLEXBUFFERS,
+ CustomOptionsFormat_MAX = CustomOptionsFormat_FLEXBUFFERS
+};
+
+inline const CustomOptionsFormat (&EnumValuesCustomOptionsFormat())[1]
+{
+ static const CustomOptionsFormat values[] = {CustomOptionsFormat_FLEXBUFFERS};
+ return values;
+}
+
+inline const char *const *EnumNamesCustomOptionsFormat()
+{
+ static const char *const names[2] = {"FLEXBUFFERS", nullptr};
+ return names;
+}
+
+inline const char *EnumNameCustomOptionsFormat(CustomOptionsFormat e)
+{
+ if (flatbuffers::IsOutRange(e, CustomOptionsFormat_FLEXBUFFERS, CustomOptionsFormat_FLEXBUFFERS))
+ return "";
+ const size_t index = static_cast<size_t>(e);
+ return EnumNamesCustomOptionsFormat()[index];
+}
+
+enum DataFormat : int8_t
+{
+ DataFormat_CHANNELS_LAST = 0,
+ DataFormat_CHANNELS_FIRST = 1,
+ DataFormat_MIN = DataFormat_CHANNELS_LAST,
+ DataFormat_MAX = DataFormat_CHANNELS_FIRST
+};
+
+inline const DataFormat (&EnumValuesDataFormat())[2]
+{
+ static const DataFormat values[] = {DataFormat_CHANNELS_LAST, DataFormat_CHANNELS_FIRST};
+ return values;
+}
+
+inline const char *const *EnumNamesDataFormat()
+{
+ static const char *const names[3] = {"CHANNELS_LAST", "CHANNELS_FIRST", nullptr};
+ return names;
+}
+
+inline const char *EnumNameDataFormat(DataFormat e)
+{
+ if (flatbuffers::IsOutRange(e, DataFormat_CHANNELS_LAST, DataFormat_CHANNELS_FIRST))
+ return "";
+ const size_t index = static_cast<size_t>(e);
+ return EnumNamesDataFormat()[index];
+}
+
+struct CustomQuantizationT : public flatbuffers::NativeTable
+{
+ typedef CustomQuantization TableType;
+ std::vector<uint8_t> custom{};
+};
+
+struct CustomQuantization FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef CustomQuantizationT NativeTableType;
+ typedef CustomQuantizationBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_CUSTOM = 4
+ };
+ const flatbuffers::Vector<uint8_t> *custom() const
+ {
+ return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_CUSTOM) &&
+ verifier.VerifyVector(custom()) && verifier.EndTable();
+ }
+ CustomQuantizationT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(CustomQuantizationT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<CustomQuantization>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct CustomQuantizationBuilder
+{
+ typedef CustomQuantization Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_custom(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom)
+ {
+ fbb_.AddOffset(CustomQuantization::VT_CUSTOM, custom);
+ }
+ explicit CustomQuantizationBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<CustomQuantization> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<CustomQuantization>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<CustomQuantization>
+CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom = 0)
+{
+ CustomQuantizationBuilder builder_(_fbb);
+ builder_.add_custom(custom);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<CustomQuantization>
+CreateCustomQuantizationDirect(flatbuffers::FlatBufferBuilder &_fbb,
+ const std::vector<uint8_t> *custom = nullptr)
+{
+ if (custom)
+ {
+ _fbb.ForceVectorAlignment(custom->size(), sizeof(uint8_t), 16);
+ }
+ auto custom__ = custom ? _fbb.CreateVector<uint8_t>(*custom) : 0;
+ return circle::CreateCustomQuantization(_fbb, custom__);
+}
+
+flatbuffers::Offset<CustomQuantization>
+CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct QuantizationParametersT : public flatbuffers::NativeTable
+{
+ typedef QuantizationParameters TableType;
+ std::vector<float> min{};
+ std::vector<float> max{};
+ std::vector<float> scale{};
+ std::vector<int64_t> zero_point{};
+ circle::QuantizationDetailsUnion details{};
+ int32_t quantized_dimension = 0;
+};
+
+struct QuantizationParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef QuantizationParametersT NativeTableType;
+ typedef QuantizationParametersBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_MIN = 4,
+ VT_MAX = 6,
+ VT_SCALE = 8,
+ VT_ZERO_POINT = 10,
+ VT_DETAILS_TYPE = 12,
+ VT_DETAILS = 14,
+ VT_QUANTIZED_DIMENSION = 16
+ };
+ const flatbuffers::Vector<float> *min() const
+ {
+ return GetPointer<const flatbuffers::Vector<float> *>(VT_MIN);
+ }
+ const flatbuffers::Vector<float> *max() const
+ {
+ return GetPointer<const flatbuffers::Vector<float> *>(VT_MAX);
+ }
+ const flatbuffers::Vector<float> *scale() const
+ {
+ return GetPointer<const flatbuffers::Vector<float> *>(VT_SCALE);
+ }
+ const flatbuffers::Vector<int64_t> *zero_point() const
+ {
+ return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_ZERO_POINT);
+ }
+ circle::QuantizationDetails details_type() const
+ {
+ return static_cast<circle::QuantizationDetails>(GetField<uint8_t>(VT_DETAILS_TYPE, 0));
+ }
+ const void *details() const { return GetPointer<const void *>(VT_DETAILS); }
+ template <typename T> const T *details_as() const;
+ const circle::CustomQuantization *details_as_CustomQuantization() const
+ {
+ return details_type() == circle::QuantizationDetails_CustomQuantization
+ ? static_cast<const circle::CustomQuantization *>(details())
+ : nullptr;
+ }
+ int32_t quantized_dimension() const { return GetField<int32_t>(VT_QUANTIZED_DIMENSION, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_MIN) &&
+ verifier.VerifyVector(min()) && VerifyOffset(verifier, VT_MAX) &&
+ verifier.VerifyVector(max()) && VerifyOffset(verifier, VT_SCALE) &&
+ verifier.VerifyVector(scale()) && VerifyOffset(verifier, VT_ZERO_POINT) &&
+ verifier.VerifyVector(zero_point()) && VerifyField<uint8_t>(verifier, VT_DETAILS_TYPE) &&
+ VerifyOffset(verifier, VT_DETAILS) &&
+ VerifyQuantizationDetails(verifier, details(), details_type()) &&
+ VerifyField<int32_t>(verifier, VT_QUANTIZED_DIMENSION) && verifier.EndTable();
+ }
+ QuantizationParametersT *
+ UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(QuantizationParametersT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<QuantizationParameters>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+template <>
+inline const circle::CustomQuantization *
+QuantizationParameters::details_as<circle::CustomQuantization>() const
+{
+ return details_as_CustomQuantization();
+}
+
+struct QuantizationParametersBuilder
+{
+ typedef QuantizationParameters Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_min(flatbuffers::Offset<flatbuffers::Vector<float>> min)
+ {
+ fbb_.AddOffset(QuantizationParameters::VT_MIN, min);
+ }
+ void add_max(flatbuffers::Offset<flatbuffers::Vector<float>> max)
+ {
+ fbb_.AddOffset(QuantizationParameters::VT_MAX, max);
+ }
+ void add_scale(flatbuffers::Offset<flatbuffers::Vector<float>> scale)
+ {
+ fbb_.AddOffset(QuantizationParameters::VT_SCALE, scale);
+ }
+ void add_zero_point(flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point)
+ {
+ fbb_.AddOffset(QuantizationParameters::VT_ZERO_POINT, zero_point);
+ }
+ void add_details_type(circle::QuantizationDetails details_type)
+ {
+ fbb_.AddElement<uint8_t>(QuantizationParameters::VT_DETAILS_TYPE,
+ static_cast<uint8_t>(details_type), 0);
+ }
+ void add_details(flatbuffers::Offset<void> details)
+ {
+ fbb_.AddOffset(QuantizationParameters::VT_DETAILS, details);
+ }
+ void add_quantized_dimension(int32_t quantized_dimension)
+ {
+ fbb_.AddElement<int32_t>(QuantizationParameters::VT_QUANTIZED_DIMENSION, quantized_dimension,
+ 0);
+ }
+ explicit QuantizationParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<QuantizationParameters> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<QuantizationParameters>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<QuantizationParameters> CreateQuantizationParameters(
+ flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset<flatbuffers::Vector<float>> min = 0,
+ flatbuffers::Offset<flatbuffers::Vector<float>> max = 0,
+ flatbuffers::Offset<flatbuffers::Vector<float>> scale = 0,
+ flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point = 0,
+ circle::QuantizationDetails details_type = circle::QuantizationDetails_NONE,
+ flatbuffers::Offset<void> details = 0, int32_t quantized_dimension = 0)
+{
+ QuantizationParametersBuilder builder_(_fbb);
+ builder_.add_quantized_dimension(quantized_dimension);
+ builder_.add_details(details);
+ builder_.add_zero_point(zero_point);
+ builder_.add_scale(scale);
+ builder_.add_max(max);
+ builder_.add_min(min);
+ builder_.add_details_type(details_type);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<QuantizationParameters> CreateQuantizationParametersDirect(
+ flatbuffers::FlatBufferBuilder &_fbb, const std::vector<float> *min = nullptr,
+ const std::vector<float> *max = nullptr, const std::vector<float> *scale = nullptr,
+ const std::vector<int64_t> *zero_point = nullptr,
+ circle::QuantizationDetails details_type = circle::QuantizationDetails_NONE,
+ flatbuffers::Offset<void> details = 0, int32_t quantized_dimension = 0)
+{
+ auto min__ = min ? _fbb.CreateVector<float>(*min) : 0;
+ auto max__ = max ? _fbb.CreateVector<float>(*max) : 0;
+ auto scale__ = scale ? _fbb.CreateVector<float>(*scale) : 0;
+ auto zero_point__ = zero_point ? _fbb.CreateVector<int64_t>(*zero_point) : 0;
+ return circle::CreateQuantizationParameters(_fbb, min__, max__, scale__, zero_point__,
+ details_type, details, quantized_dimension);
+}
+
+flatbuffers::Offset<QuantizationParameters>
+CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb,
+ const QuantizationParametersT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct Int32VectorT : public flatbuffers::NativeTable
+{
+ typedef Int32Vector TableType;
+ std::vector<int32_t> values{};
+};
+
+struct Int32Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef Int32VectorT NativeTableType;
+ typedef Int32VectorBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_VALUES = 4
+ };
+ const flatbuffers::Vector<int32_t> *values() const
+ {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_VALUES);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_VALUES) &&
+ verifier.VerifyVector(values()) && verifier.EndTable();
+ }
+ Int32VectorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(Int32VectorT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<Int32Vector>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct Int32VectorBuilder
+{
+ typedef Int32Vector Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_values(flatbuffers::Offset<flatbuffers::Vector<int32_t>> values)
+ {
+ fbb_.AddOffset(Int32Vector::VT_VALUES, values);
+ }
+ explicit Int32VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<Int32Vector> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Int32Vector>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Int32Vector>
+CreateInt32Vector(flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> values = 0)
+{
+ Int32VectorBuilder builder_(_fbb);
+ builder_.add_values(values);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<Int32Vector>
+CreateInt32VectorDirect(flatbuffers::FlatBufferBuilder &_fbb,
+ const std::vector<int32_t> *values = nullptr)
+{
+ auto values__ = values ? _fbb.CreateVector<int32_t>(*values) : 0;
+ return circle::CreateInt32Vector(_fbb, values__);
+}
+
+flatbuffers::Offset<Int32Vector>
+CreateInt32Vector(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct Uint16VectorT : public flatbuffers::NativeTable
+{
+ typedef Uint16Vector TableType;
+ std::vector<uint16_t> values{};
+};
+
+struct Uint16Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef Uint16VectorT NativeTableType;
+ typedef Uint16VectorBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_VALUES = 4
+ };
+ const flatbuffers::Vector<uint16_t> *values() const
+ {
+ return GetPointer<const flatbuffers::Vector<uint16_t> *>(VT_VALUES);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_VALUES) &&
+ verifier.VerifyVector(values()) && verifier.EndTable();
+ }
+ Uint16VectorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(Uint16VectorT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<Uint16Vector>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct Uint16VectorBuilder
+{
+ typedef Uint16Vector Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_values(flatbuffers::Offset<flatbuffers::Vector<uint16_t>> values)
+ {
+ fbb_.AddOffset(Uint16Vector::VT_VALUES, values);
+ }
+ explicit Uint16VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<Uint16Vector> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Uint16Vector>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Uint16Vector>
+CreateUint16Vector(flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<uint16_t>> values = 0)
+{
+ Uint16VectorBuilder builder_(_fbb);
+ builder_.add_values(values);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<Uint16Vector>
+CreateUint16VectorDirect(flatbuffers::FlatBufferBuilder &_fbb,
+ const std::vector<uint16_t> *values = nullptr)
+{
+ if (values)
+ {
+ _fbb.ForceVectorAlignment(values->size(), sizeof(uint16_t), 4);
+ }
+ auto values__ = values ? _fbb.CreateVector<uint16_t>(*values) : 0;
+ return circle::CreateUint16Vector(_fbb, values__);
+}
+
+flatbuffers::Offset<Uint16Vector>
+CreateUint16Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct Uint8VectorT : public flatbuffers::NativeTable
+{
+ typedef Uint8Vector TableType;
+ std::vector<uint8_t> values{};
+};
+
+struct Uint8Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef Uint8VectorT NativeTableType;
+ typedef Uint8VectorBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_VALUES = 4
+ };
+ const flatbuffers::Vector<uint8_t> *values() const
+ {
+ return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_VALUES);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_VALUES) &&
+ verifier.VerifyVector(values()) && verifier.EndTable();
+ }
+ Uint8VectorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(Uint8VectorT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<Uint8Vector>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct Uint8VectorBuilder
+{
+ typedef Uint8Vector Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_values(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> values)
+ {
+ fbb_.AddOffset(Uint8Vector::VT_VALUES, values);
+ }
+ explicit Uint8VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<Uint8Vector> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Uint8Vector>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Uint8Vector>
+CreateUint8Vector(flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>> values = 0)
+{
+ Uint8VectorBuilder builder_(_fbb);
+ builder_.add_values(values);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<Uint8Vector>
+CreateUint8VectorDirect(flatbuffers::FlatBufferBuilder &_fbb,
+ const std::vector<uint8_t> *values = nullptr)
+{
+ if (values)
+ {
+ _fbb.ForceVectorAlignment(values->size(), sizeof(uint8_t), 4);
+ }
+ auto values__ = values ? _fbb.CreateVector<uint8_t>(*values) : 0;
+ return circle::CreateUint8Vector(_fbb, values__);
+}
+
+flatbuffers::Offset<Uint8Vector>
+CreateUint8Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct DimensionMetadataT : public flatbuffers::NativeTable
+{
+ typedef DimensionMetadata TableType;
+ circle::DimensionType format = circle::DimensionType_DENSE;
+ int32_t dense_size = 0;
+ circle::SparseIndexVectorUnion array_segments{};
+ circle::SparseIndexVectorUnion array_indices{};
+};
+
+struct DimensionMetadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef DimensionMetadataT NativeTableType;
+ typedef DimensionMetadataBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_FORMAT = 4,
+ VT_DENSE_SIZE = 6,
+ VT_ARRAY_SEGMENTS_TYPE = 8,
+ VT_ARRAY_SEGMENTS = 10,
+ VT_ARRAY_INDICES_TYPE = 12,
+ VT_ARRAY_INDICES = 14
+ };
+ circle::DimensionType format() const
+ {
+ return static_cast<circle::DimensionType>(GetField<int8_t>(VT_FORMAT, 0));
+ }
+ int32_t dense_size() const { return GetField<int32_t>(VT_DENSE_SIZE, 0); }
+ circle::SparseIndexVector array_segments_type() const
+ {
+ return static_cast<circle::SparseIndexVector>(GetField<uint8_t>(VT_ARRAY_SEGMENTS_TYPE, 0));
+ }
+ const void *array_segments() const { return GetPointer<const void *>(VT_ARRAY_SEGMENTS); }
+ template <typename T> const T *array_segments_as() const;
+ const circle::Int32Vector *array_segments_as_Int32Vector() const
+ {
+ return array_segments_type() == circle::SparseIndexVector_Int32Vector
+ ? static_cast<const circle::Int32Vector *>(array_segments())
+ : nullptr;
+ }
+ const circle::Uint16Vector *array_segments_as_Uint16Vector() const
+ {
+ return array_segments_type() == circle::SparseIndexVector_Uint16Vector
+ ? static_cast<const circle::Uint16Vector *>(array_segments())
+ : nullptr;
+ }
+ const circle::Uint8Vector *array_segments_as_Uint8Vector() const
+ {
+ return array_segments_type() == circle::SparseIndexVector_Uint8Vector
+ ? static_cast<const circle::Uint8Vector *>(array_segments())
+ : nullptr;
+ }
+ circle::SparseIndexVector array_indices_type() const
+ {
+ return static_cast<circle::SparseIndexVector>(GetField<uint8_t>(VT_ARRAY_INDICES_TYPE, 0));
+ }
+ const void *array_indices() const { return GetPointer<const void *>(VT_ARRAY_INDICES); }
+ template <typename T> const T *array_indices_as() const;
+ const circle::Int32Vector *array_indices_as_Int32Vector() const
+ {
+ return array_indices_type() == circle::SparseIndexVector_Int32Vector
+ ? static_cast<const circle::Int32Vector *>(array_indices())
+ : nullptr;
+ }
+ const circle::Uint16Vector *array_indices_as_Uint16Vector() const
+ {
+ return array_indices_type() == circle::SparseIndexVector_Uint16Vector
+ ? static_cast<const circle::Uint16Vector *>(array_indices())
+ : nullptr;
+ }
+ const circle::Uint8Vector *array_indices_as_Uint8Vector() const
+ {
+ return array_indices_type() == circle::SparseIndexVector_Uint8Vector
+ ? static_cast<const circle::Uint8Vector *>(array_indices())
+ : nullptr;
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_FORMAT) &&
+ VerifyField<int32_t>(verifier, VT_DENSE_SIZE) &&
+ VerifyField<uint8_t>(verifier, VT_ARRAY_SEGMENTS_TYPE) &&
+ VerifyOffset(verifier, VT_ARRAY_SEGMENTS) &&
+ VerifySparseIndexVector(verifier, array_segments(), array_segments_type()) &&
+ VerifyField<uint8_t>(verifier, VT_ARRAY_INDICES_TYPE) &&
+ VerifyOffset(verifier, VT_ARRAY_INDICES) &&
+ VerifySparseIndexVector(verifier, array_indices(), array_indices_type()) &&
+ verifier.EndTable();
+ }
+ DimensionMetadataT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(DimensionMetadataT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<DimensionMetadata>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+template <>
+inline const circle::Int32Vector *DimensionMetadata::array_segments_as<circle::Int32Vector>() const
+{
+ return array_segments_as_Int32Vector();
+}
+
+template <>
+inline const circle::Uint16Vector *
+DimensionMetadata::array_segments_as<circle::Uint16Vector>() const
+{
+ return array_segments_as_Uint16Vector();
+}
+
+template <>
+inline const circle::Uint8Vector *DimensionMetadata::array_segments_as<circle::Uint8Vector>() const
+{
+ return array_segments_as_Uint8Vector();
+}
+
+template <>
+inline const circle::Int32Vector *DimensionMetadata::array_indices_as<circle::Int32Vector>() const
+{
+ return array_indices_as_Int32Vector();
+}
+
+template <>
+inline const circle::Uint16Vector *DimensionMetadata::array_indices_as<circle::Uint16Vector>() const
+{
+ return array_indices_as_Uint16Vector();
+}
+
+template <>
+inline const circle::Uint8Vector *DimensionMetadata::array_indices_as<circle::Uint8Vector>() const
+{
+ return array_indices_as_Uint8Vector();
+}
+
+struct DimensionMetadataBuilder
+{
+ typedef DimensionMetadata Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_format(circle::DimensionType format)
+ {
+ fbb_.AddElement<int8_t>(DimensionMetadata::VT_FORMAT, static_cast<int8_t>(format), 0);
+ }
+ void add_dense_size(int32_t dense_size)
+ {
+ fbb_.AddElement<int32_t>(DimensionMetadata::VT_DENSE_SIZE, dense_size, 0);
+ }
+ void add_array_segments_type(circle::SparseIndexVector array_segments_type)
+ {
+ fbb_.AddElement<uint8_t>(DimensionMetadata::VT_ARRAY_SEGMENTS_TYPE,
+ static_cast<uint8_t>(array_segments_type), 0);
+ }
+ void add_array_segments(flatbuffers::Offset<void> array_segments)
+ {
+ fbb_.AddOffset(DimensionMetadata::VT_ARRAY_SEGMENTS, array_segments);
+ }
+ void add_array_indices_type(circle::SparseIndexVector array_indices_type)
+ {
+ fbb_.AddElement<uint8_t>(DimensionMetadata::VT_ARRAY_INDICES_TYPE,
+ static_cast<uint8_t>(array_indices_type), 0);
+ }
+ void add_array_indices(flatbuffers::Offset<void> array_indices)
+ {
+ fbb_.AddOffset(DimensionMetadata::VT_ARRAY_INDICES, array_indices);
+ }
+ explicit DimensionMetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<DimensionMetadata> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<DimensionMetadata>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<DimensionMetadata> CreateDimensionMetadata(
+ flatbuffers::FlatBufferBuilder &_fbb, circle::DimensionType format = circle::DimensionType_DENSE,
+ int32_t dense_size = 0,
+ circle::SparseIndexVector array_segments_type = circle::SparseIndexVector_NONE,
+ flatbuffers::Offset<void> array_segments = 0,
+ circle::SparseIndexVector array_indices_type = circle::SparseIndexVector_NONE,
+ flatbuffers::Offset<void> array_indices = 0)
+{
+ DimensionMetadataBuilder builder_(_fbb);
+ builder_.add_array_indices(array_indices);
+ builder_.add_array_segments(array_segments);
+ builder_.add_dense_size(dense_size);
+ builder_.add_array_indices_type(array_indices_type);
+ builder_.add_array_segments_type(array_segments_type);
+ builder_.add_format(format);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<DimensionMetadata>
+CreateDimensionMetadata(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SparsityParametersT : public flatbuffers::NativeTable
+{
+ typedef SparsityParameters TableType;
+ std::vector<int32_t> traversal_order{};
+ std::vector<int32_t> block_map{};
+ std::vector<std::unique_ptr<circle::DimensionMetadataT>> dim_metadata{};
+};
+
+struct SparsityParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef SparsityParametersT NativeTableType;
+ typedef SparsityParametersBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_TRAVERSAL_ORDER = 4,
+ VT_BLOCK_MAP = 6,
+ VT_DIM_METADATA = 8
+ };
+ const flatbuffers::Vector<int32_t> *traversal_order() const
+ {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_TRAVERSAL_ORDER);
+ }
+ const flatbuffers::Vector<int32_t> *block_map() const
+ {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_BLOCK_MAP);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<circle::DimensionMetadata>> *dim_metadata() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<circle::DimensionMetadata>> *>(
+ VT_DIM_METADATA);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_TRAVERSAL_ORDER) &&
+ verifier.VerifyVector(traversal_order()) && VerifyOffset(verifier, VT_BLOCK_MAP) &&
+ verifier.VerifyVector(block_map()) && VerifyOffset(verifier, VT_DIM_METADATA) &&
+ verifier.VerifyVector(dim_metadata()) && verifier.VerifyVectorOfTables(dim_metadata()) &&
+ verifier.EndTable();
+ }
+ SparsityParametersT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SparsityParametersT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SparsityParameters>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SparsityParametersBuilder
+{
+ typedef SparsityParameters Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_traversal_order(flatbuffers::Offset<flatbuffers::Vector<int32_t>> traversal_order)
+ {
+ fbb_.AddOffset(SparsityParameters::VT_TRAVERSAL_ORDER, traversal_order);
+ }
+ void add_block_map(flatbuffers::Offset<flatbuffers::Vector<int32_t>> block_map)
+ {
+ fbb_.AddOffset(SparsityParameters::VT_BLOCK_MAP, block_map);
+ }
+ void add_dim_metadata(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<circle::DimensionMetadata>>>
+ dim_metadata)
+ {
+ fbb_.AddOffset(SparsityParameters::VT_DIM_METADATA, dim_metadata);
+ }
+ explicit SparsityParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SparsityParameters> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SparsityParameters>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SparsityParameters> CreateSparsityParameters(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> traversal_order = 0,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> block_map = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<circle::DimensionMetadata>>>
+ dim_metadata = 0)
+{
+ SparsityParametersBuilder builder_(_fbb);
+ builder_.add_dim_metadata(dim_metadata);
+ builder_.add_block_map(block_map);
+ builder_.add_traversal_order(traversal_order);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<SparsityParameters> CreateSparsityParametersDirect(
+ flatbuffers::FlatBufferBuilder &_fbb, const std::vector<int32_t> *traversal_order = nullptr,
+ const std::vector<int32_t> *block_map = nullptr,
+ const std::vector<flatbuffers::Offset<circle::DimensionMetadata>> *dim_metadata = nullptr)
+{
+ auto traversal_order__ = traversal_order ? _fbb.CreateVector<int32_t>(*traversal_order) : 0;
+ auto block_map__ = block_map ? _fbb.CreateVector<int32_t>(*block_map) : 0;
+ auto dim_metadata__ =
+ dim_metadata ? _fbb.CreateVector<flatbuffers::Offset<circle::DimensionMetadata>>(*dim_metadata)
+ : 0;
+ return circle::CreateSparsityParameters(_fbb, traversal_order__, block_map__, dim_metadata__);
+}
+
+flatbuffers::Offset<SparsityParameters>
+CreateSparsityParameters(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct TensorT : public flatbuffers::NativeTable
+{
+ typedef Tensor TableType;
+ std::vector<int32_t> shape{};
+ circle::TensorType type = circle::TensorType_FLOAT32;
+ uint32_t buffer = 0;
+ std::string name{};
+ std::unique_ptr<circle::QuantizationParametersT> quantization{};
+ bool is_variable = false;
+ std::unique_ptr<circle::SparsityParametersT> sparsity{};
+ std::vector<int32_t> shape_signature{};
+};
+
+struct Tensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef TensorT NativeTableType;
+ typedef TensorBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_SHAPE = 4,
+ VT_TYPE = 6,
+ VT_BUFFER = 8,
+ VT_NAME = 10,
+ VT_QUANTIZATION = 12,
+ VT_IS_VARIABLE = 14,
+ VT_SPARSITY = 16,
+ VT_SHAPE_SIGNATURE = 18
+ };
+ const flatbuffers::Vector<int32_t> *shape() const
+ {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SHAPE);
+ }
+ circle::TensorType type() const
+ {
+ return static_cast<circle::TensorType>(GetField<int8_t>(VT_TYPE, 0));
+ }
+ uint32_t buffer() const { return GetField<uint32_t>(VT_BUFFER, 0); }
+ const flatbuffers::String *name() const
+ {
+ return GetPointer<const flatbuffers::String *>(VT_NAME);
+ }
+ const circle::QuantizationParameters *quantization() const
+ {
+ return GetPointer<const circle::QuantizationParameters *>(VT_QUANTIZATION);
+ }
+ bool is_variable() const { return GetField<uint8_t>(VT_IS_VARIABLE, 0) != 0; }
+ const circle::SparsityParameters *sparsity() const
+ {
+ return GetPointer<const circle::SparsityParameters *>(VT_SPARSITY);
+ }
+ const flatbuffers::Vector<int32_t> *shape_signature() const
+ {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SHAPE_SIGNATURE);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_SHAPE) &&
+ verifier.VerifyVector(shape()) && VerifyField<int8_t>(verifier, VT_TYPE) &&
+ VerifyField<uint32_t>(verifier, VT_BUFFER) && VerifyOffset(verifier, VT_NAME) &&
+ verifier.VerifyString(name()) && VerifyOffset(verifier, VT_QUANTIZATION) &&
+ verifier.VerifyTable(quantization()) && VerifyField<uint8_t>(verifier, VT_IS_VARIABLE) &&
+ VerifyOffset(verifier, VT_SPARSITY) && verifier.VerifyTable(sparsity()) &&
+ VerifyOffset(verifier, VT_SHAPE_SIGNATURE) && verifier.VerifyVector(shape_signature()) &&
+ verifier.EndTable();
+ }
+ TensorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(TensorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<Tensor>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct TensorBuilder
+{
+ typedef Tensor Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape)
+ {
+ fbb_.AddOffset(Tensor::VT_SHAPE, shape);
+ }
+ void add_type(circle::TensorType type)
+ {
+ fbb_.AddElement<int8_t>(Tensor::VT_TYPE, static_cast<int8_t>(type), 0);
+ }
+ void add_buffer(uint32_t buffer) { fbb_.AddElement<uint32_t>(Tensor::VT_BUFFER, buffer, 0); }
+ void add_name(flatbuffers::Offset<flatbuffers::String> name)
+ {
+ fbb_.AddOffset(Tensor::VT_NAME, name);
+ }
+ void add_quantization(flatbuffers::Offset<circle::QuantizationParameters> quantization)
+ {
+ fbb_.AddOffset(Tensor::VT_QUANTIZATION, quantization);
+ }
+ void add_is_variable(bool is_variable)
+ {
+ fbb_.AddElement<uint8_t>(Tensor::VT_IS_VARIABLE, static_cast<uint8_t>(is_variable), 0);
+ }
+ void add_sparsity(flatbuffers::Offset<circle::SparsityParameters> sparsity)
+ {
+ fbb_.AddOffset(Tensor::VT_SPARSITY, sparsity);
+ }
+ void add_shape_signature(flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape_signature)
+ {
+ fbb_.AddOffset(Tensor::VT_SHAPE_SIGNATURE, shape_signature);
+ }
+ explicit TensorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<Tensor> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Tensor>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Tensor>
+CreateTensor(flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape = 0,
+ circle::TensorType type = circle::TensorType_FLOAT32, uint32_t buffer = 0,
+ flatbuffers::Offset<flatbuffers::String> name = 0,
+ flatbuffers::Offset<circle::QuantizationParameters> quantization = 0,
+ bool is_variable = false, flatbuffers::Offset<circle::SparsityParameters> sparsity = 0,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape_signature = 0)
+{
+ TensorBuilder builder_(_fbb);
+ builder_.add_shape_signature(shape_signature);
+ builder_.add_sparsity(sparsity);
+ builder_.add_quantization(quantization);
+ builder_.add_name(name);
+ builder_.add_buffer(buffer);
+ builder_.add_shape(shape);
+ builder_.add_is_variable(is_variable);
+ builder_.add_type(type);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<Tensor> CreateTensorDirect(
+ flatbuffers::FlatBufferBuilder &_fbb, const std::vector<int32_t> *shape = nullptr,
+ circle::TensorType type = circle::TensorType_FLOAT32, uint32_t buffer = 0,
+ const char *name = nullptr, flatbuffers::Offset<circle::QuantizationParameters> quantization = 0,
+ bool is_variable = false, flatbuffers::Offset<circle::SparsityParameters> sparsity = 0,
+ const std::vector<int32_t> *shape_signature = nullptr)
+{
+ auto shape__ = shape ? _fbb.CreateVector<int32_t>(*shape) : 0;
+ auto name__ = name ? _fbb.CreateString(name) : 0;
+ auto shape_signature__ = shape_signature ? _fbb.CreateVector<int32_t>(*shape_signature) : 0;
+ return circle::CreateTensor(_fbb, shape__, type, buffer, name__, quantization, is_variable,
+ sparsity, shape_signature__);
+}
+
+flatbuffers::Offset<Tensor>
+CreateTensor(flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct Conv2DOptionsT : public flatbuffers::NativeTable
+{
+ typedef Conv2DOptions TableType;
+ circle::Padding padding = circle::Padding_SAME;
+ int32_t stride_w = 0;
+ int32_t stride_h = 0;
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE;
+ int32_t dilation_w_factor = 1;
+ int32_t dilation_h_factor = 1;
+};
+
+struct Conv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef Conv2DOptionsT NativeTableType;
+ typedef Conv2DOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_PADDING = 4,
+ VT_STRIDE_W = 6,
+ VT_STRIDE_H = 8,
+ VT_FUSED_ACTIVATION_FUNCTION = 10,
+ VT_DILATION_W_FACTOR = 12,
+ VT_DILATION_H_FACTOR = 14
+ };
+ circle::Padding padding() const
+ {
+ return static_cast<circle::Padding>(GetField<int8_t>(VT_PADDING, 0));
+ }
+ int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); }
+ int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); }
+ circle::ActivationFunctionType fused_activation_function() const
+ {
+ return static_cast<circle::ActivationFunctionType>(
+ GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ int32_t dilation_w_factor() const { return GetField<int32_t>(VT_DILATION_W_FACTOR, 1); }
+ int32_t dilation_h_factor() const { return GetField<int32_t>(VT_DILATION_H_FACTOR, 1); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) &&
+ VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
+ VerifyField<int32_t>(verifier, VT_STRIDE_H) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ VerifyField<int32_t>(verifier, VT_DILATION_W_FACTOR) &&
+ VerifyField<int32_t>(verifier, VT_DILATION_H_FACTOR) && verifier.EndTable();
+ }
+ Conv2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(Conv2DOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<Conv2DOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct Conv2DOptionsBuilder
+{
+ typedef Conv2DOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_padding(circle::Padding padding)
+ {
+ fbb_.AddElement<int8_t>(Conv2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
+ }
+ void add_stride_w(int32_t stride_w)
+ {
+ fbb_.AddElement<int32_t>(Conv2DOptions::VT_STRIDE_W, stride_w, 0);
+ }
+ void add_stride_h(int32_t stride_h)
+ {
+ fbb_.AddElement<int32_t>(Conv2DOptions::VT_STRIDE_H, stride_h, 0);
+ }
+ void add_fused_activation_function(circle::ActivationFunctionType fused_activation_function)
+ {
+ fbb_.AddElement<int8_t>(Conv2DOptions::VT_FUSED_ACTIVATION_FUNCTION,
+ static_cast<int8_t>(fused_activation_function), 0);
+ }
+ void add_dilation_w_factor(int32_t dilation_w_factor)
+ {
+ fbb_.AddElement<int32_t>(Conv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1);
+ }
+ void add_dilation_h_factor(int32_t dilation_h_factor)
+ {
+ fbb_.AddElement<int32_t>(Conv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1);
+ }
+ explicit Conv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<Conv2DOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Conv2DOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Conv2DOptions> CreateConv2DOptions(
+ flatbuffers::FlatBufferBuilder &_fbb, circle::Padding padding = circle::Padding_SAME,
+ int32_t stride_w = 0, int32_t stride_h = 0,
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE,
+ int32_t dilation_w_factor = 1, int32_t dilation_h_factor = 1)
+{
+ Conv2DOptionsBuilder builder_(_fbb);
+ builder_.add_dilation_h_factor(dilation_h_factor);
+ builder_.add_dilation_w_factor(dilation_w_factor);
+ builder_.add_stride_h(stride_h);
+ builder_.add_stride_w(stride_w);
+ builder_.add_fused_activation_function(fused_activation_function);
+ builder_.add_padding(padding);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<Conv2DOptions>
+CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct Conv3DOptionsT : public flatbuffers::NativeTable
+{
+ typedef Conv3DOptions TableType;
+ circle::Padding padding = circle::Padding_SAME;
+ int32_t stride_d = 0;
+ int32_t stride_w = 0;
+ int32_t stride_h = 0;
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE;
+ int32_t dilation_d_factor = 1;
+ int32_t dilation_w_factor = 1;
+ int32_t dilation_h_factor = 1;
+};
+
+struct Conv3DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef Conv3DOptionsT NativeTableType;
+ typedef Conv3DOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_PADDING = 4,
+ VT_STRIDE_D = 6,
+ VT_STRIDE_W = 8,
+ VT_STRIDE_H = 10,
+ VT_FUSED_ACTIVATION_FUNCTION = 12,
+ VT_DILATION_D_FACTOR = 14,
+ VT_DILATION_W_FACTOR = 16,
+ VT_DILATION_H_FACTOR = 18
+ };
+ circle::Padding padding() const
+ {
+ return static_cast<circle::Padding>(GetField<int8_t>(VT_PADDING, 0));
+ }
+ int32_t stride_d() const { return GetField<int32_t>(VT_STRIDE_D, 0); }
+ int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); }
+ int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); }
+ circle::ActivationFunctionType fused_activation_function() const
+ {
+ return static_cast<circle::ActivationFunctionType>(
+ GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ int32_t dilation_d_factor() const { return GetField<int32_t>(VT_DILATION_D_FACTOR, 1); }
+ int32_t dilation_w_factor() const { return GetField<int32_t>(VT_DILATION_W_FACTOR, 1); }
+ int32_t dilation_h_factor() const { return GetField<int32_t>(VT_DILATION_H_FACTOR, 1); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) &&
+ VerifyField<int32_t>(verifier, VT_STRIDE_D) &&
+ VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
+ VerifyField<int32_t>(verifier, VT_STRIDE_H) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ VerifyField<int32_t>(verifier, VT_DILATION_D_FACTOR) &&
+ VerifyField<int32_t>(verifier, VT_DILATION_W_FACTOR) &&
+ VerifyField<int32_t>(verifier, VT_DILATION_H_FACTOR) && verifier.EndTable();
+ }
+ Conv3DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(Conv3DOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<Conv3DOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct Conv3DOptionsBuilder
+{
+ typedef Conv3DOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_padding(circle::Padding padding)
+ {
+ fbb_.AddElement<int8_t>(Conv3DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
+ }
+ void add_stride_d(int32_t stride_d)
+ {
+ fbb_.AddElement<int32_t>(Conv3DOptions::VT_STRIDE_D, stride_d, 0);
+ }
+ void add_stride_w(int32_t stride_w)
+ {
+ fbb_.AddElement<int32_t>(Conv3DOptions::VT_STRIDE_W, stride_w, 0);
+ }
+ void add_stride_h(int32_t stride_h)
+ {
+ fbb_.AddElement<int32_t>(Conv3DOptions::VT_STRIDE_H, stride_h, 0);
+ }
+ void add_fused_activation_function(circle::ActivationFunctionType fused_activation_function)
+ {
+ fbb_.AddElement<int8_t>(Conv3DOptions::VT_FUSED_ACTIVATION_FUNCTION,
+ static_cast<int8_t>(fused_activation_function), 0);
+ }
+ void add_dilation_d_factor(int32_t dilation_d_factor)
+ {
+ fbb_.AddElement<int32_t>(Conv3DOptions::VT_DILATION_D_FACTOR, dilation_d_factor, 1);
+ }
+ void add_dilation_w_factor(int32_t dilation_w_factor)
+ {
+ fbb_.AddElement<int32_t>(Conv3DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1);
+ }
+ void add_dilation_h_factor(int32_t dilation_h_factor)
+ {
+ fbb_.AddElement<int32_t>(Conv3DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1);
+ }
+ explicit Conv3DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<Conv3DOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Conv3DOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Conv3DOptions> CreateConv3DOptions(
+ flatbuffers::FlatBufferBuilder &_fbb, circle::Padding padding = circle::Padding_SAME,
+ int32_t stride_d = 0, int32_t stride_w = 0, int32_t stride_h = 0,
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE,
+ int32_t dilation_d_factor = 1, int32_t dilation_w_factor = 1, int32_t dilation_h_factor = 1)
+{
+ Conv3DOptionsBuilder builder_(_fbb);
+ builder_.add_dilation_h_factor(dilation_h_factor);
+ builder_.add_dilation_w_factor(dilation_w_factor);
+ builder_.add_dilation_d_factor(dilation_d_factor);
+ builder_.add_stride_h(stride_h);
+ builder_.add_stride_w(stride_w);
+ builder_.add_stride_d(stride_d);
+ builder_.add_fused_activation_function(fused_activation_function);
+ builder_.add_padding(padding);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<Conv3DOptions>
+CreateConv3DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct Pool2DOptionsT : public flatbuffers::NativeTable
+{
+ typedef Pool2DOptions TableType;
+ circle::Padding padding = circle::Padding_SAME;
+ int32_t stride_w = 0;
+ int32_t stride_h = 0;
+ int32_t filter_width = 0;
+ int32_t filter_height = 0;
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE;
+};
+
+struct Pool2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef Pool2DOptionsT NativeTableType;
+ typedef Pool2DOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_PADDING = 4,
+ VT_STRIDE_W = 6,
+ VT_STRIDE_H = 8,
+ VT_FILTER_WIDTH = 10,
+ VT_FILTER_HEIGHT = 12,
+ VT_FUSED_ACTIVATION_FUNCTION = 14
+ };
+ circle::Padding padding() const
+ {
+ return static_cast<circle::Padding>(GetField<int8_t>(VT_PADDING, 0));
+ }
+ int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); }
+ int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); }
+ int32_t filter_width() const { return GetField<int32_t>(VT_FILTER_WIDTH, 0); }
+ int32_t filter_height() const { return GetField<int32_t>(VT_FILTER_HEIGHT, 0); }
+ circle::ActivationFunctionType fused_activation_function() const
+ {
+ return static_cast<circle::ActivationFunctionType>(
+ GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) &&
+ VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
+ VerifyField<int32_t>(verifier, VT_STRIDE_H) &&
+ VerifyField<int32_t>(verifier, VT_FILTER_WIDTH) &&
+ VerifyField<int32_t>(verifier, VT_FILTER_HEIGHT) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
+ }
+ Pool2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(Pool2DOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<Pool2DOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct Pool2DOptionsBuilder
+{
+ typedef Pool2DOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_padding(circle::Padding padding)
+ {
+ fbb_.AddElement<int8_t>(Pool2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
+ }
+ void add_stride_w(int32_t stride_w)
+ {
+ fbb_.AddElement<int32_t>(Pool2DOptions::VT_STRIDE_W, stride_w, 0);
+ }
+ void add_stride_h(int32_t stride_h)
+ {
+ fbb_.AddElement<int32_t>(Pool2DOptions::VT_STRIDE_H, stride_h, 0);
+ }
+ void add_filter_width(int32_t filter_width)
+ {
+ fbb_.AddElement<int32_t>(Pool2DOptions::VT_FILTER_WIDTH, filter_width, 0);
+ }
+ void add_filter_height(int32_t filter_height)
+ {
+ fbb_.AddElement<int32_t>(Pool2DOptions::VT_FILTER_HEIGHT, filter_height, 0);
+ }
+ void add_fused_activation_function(circle::ActivationFunctionType fused_activation_function)
+ {
+ fbb_.AddElement<int8_t>(Pool2DOptions::VT_FUSED_ACTIVATION_FUNCTION,
+ static_cast<int8_t>(fused_activation_function), 0);
+ }
+ explicit Pool2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<Pool2DOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Pool2DOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Pool2DOptions> CreatePool2DOptions(
+ flatbuffers::FlatBufferBuilder &_fbb, circle::Padding padding = circle::Padding_SAME,
+ int32_t stride_w = 0, int32_t stride_h = 0, int32_t filter_width = 0, int32_t filter_height = 0,
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE)
+{
+ Pool2DOptionsBuilder builder_(_fbb);
+ builder_.add_filter_height(filter_height);
+ builder_.add_filter_width(filter_width);
+ builder_.add_stride_h(stride_h);
+ builder_.add_stride_w(stride_w);
+ builder_.add_fused_activation_function(fused_activation_function);
+ builder_.add_padding(padding);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<Pool2DOptions>
+CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct DepthwiseConv2DOptionsT : public flatbuffers::NativeTable
+{
+ typedef DepthwiseConv2DOptions TableType;
+ circle::Padding padding = circle::Padding_SAME;
+ int32_t stride_w = 0;
+ int32_t stride_h = 0;
+ int32_t depth_multiplier = 0;
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE;
+ int32_t dilation_w_factor = 1;
+ int32_t dilation_h_factor = 1;
+};
+
+struct DepthwiseConv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef DepthwiseConv2DOptionsT NativeTableType;
+ typedef DepthwiseConv2DOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_PADDING = 4,
+ VT_STRIDE_W = 6,
+ VT_STRIDE_H = 8,
+ VT_DEPTH_MULTIPLIER = 10,
+ VT_FUSED_ACTIVATION_FUNCTION = 12,
+ VT_DILATION_W_FACTOR = 14,
+ VT_DILATION_H_FACTOR = 16
+ };
+ circle::Padding padding() const
+ {
+ return static_cast<circle::Padding>(GetField<int8_t>(VT_PADDING, 0));
+ }
+ int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); }
+ int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); }
+ int32_t depth_multiplier() const { return GetField<int32_t>(VT_DEPTH_MULTIPLIER, 0); }
+ circle::ActivationFunctionType fused_activation_function() const
+ {
+ return static_cast<circle::ActivationFunctionType>(
+ GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ int32_t dilation_w_factor() const { return GetField<int32_t>(VT_DILATION_W_FACTOR, 1); }
+ int32_t dilation_h_factor() const { return GetField<int32_t>(VT_DILATION_H_FACTOR, 1); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) &&
+ VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
+ VerifyField<int32_t>(verifier, VT_STRIDE_H) &&
+ VerifyField<int32_t>(verifier, VT_DEPTH_MULTIPLIER) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ VerifyField<int32_t>(verifier, VT_DILATION_W_FACTOR) &&
+ VerifyField<int32_t>(verifier, VT_DILATION_H_FACTOR) && verifier.EndTable();
+ }
+ DepthwiseConv2DOptionsT *
+ UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(DepthwiseConv2DOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<DepthwiseConv2DOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct DepthwiseConv2DOptionsBuilder
+{
+ typedef DepthwiseConv2DOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_padding(circle::Padding padding)
+ {
+ fbb_.AddElement<int8_t>(DepthwiseConv2DOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
+ }
+ void add_stride_w(int32_t stride_w)
+ {
+ fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_STRIDE_W, stride_w, 0);
+ }
+ void add_stride_h(int32_t stride_h)
+ {
+ fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_STRIDE_H, stride_h, 0);
+ }
+ void add_depth_multiplier(int32_t depth_multiplier)
+ {
+ fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DEPTH_MULTIPLIER, depth_multiplier, 0);
+ }
+ void add_fused_activation_function(circle::ActivationFunctionType fused_activation_function)
+ {
+ fbb_.AddElement<int8_t>(DepthwiseConv2DOptions::VT_FUSED_ACTIVATION_FUNCTION,
+ static_cast<int8_t>(fused_activation_function), 0);
+ }
+ void add_dilation_w_factor(int32_t dilation_w_factor)
+ {
+ fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1);
+ }
+ void add_dilation_h_factor(int32_t dilation_h_factor)
+ {
+ fbb_.AddElement<int32_t>(DepthwiseConv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1);
+ }
+ explicit DepthwiseConv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<DepthwiseConv2DOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<DepthwiseConv2DOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<DepthwiseConv2DOptions> CreateDepthwiseConv2DOptions(
+ flatbuffers::FlatBufferBuilder &_fbb, circle::Padding padding = circle::Padding_SAME,
+ int32_t stride_w = 0, int32_t stride_h = 0, int32_t depth_multiplier = 0,
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE,
+ int32_t dilation_w_factor = 1, int32_t dilation_h_factor = 1)
+{
+ DepthwiseConv2DOptionsBuilder builder_(_fbb);
+ builder_.add_dilation_h_factor(dilation_h_factor);
+ builder_.add_dilation_w_factor(dilation_w_factor);
+ builder_.add_depth_multiplier(depth_multiplier);
+ builder_.add_stride_h(stride_h);
+ builder_.add_stride_w(stride_w);
+ builder_.add_fused_activation_function(fused_activation_function);
+ builder_.add_padding(padding);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<DepthwiseConv2DOptions>
+CreateDepthwiseConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const DepthwiseConv2DOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ConcatEmbeddingsOptionsT : public flatbuffers::NativeTable
+{
+ typedef ConcatEmbeddingsOptions TableType;
+ int32_t num_channels = 0;
+ std::vector<int32_t> num_columns_per_channel{};
+ std::vector<int32_t> embedding_dim_per_channel{};
+};
+
+struct ConcatEmbeddingsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef ConcatEmbeddingsOptionsT NativeTableType;
+ typedef ConcatEmbeddingsOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_NUM_CHANNELS = 4,
+ VT_NUM_COLUMNS_PER_CHANNEL = 6,
+ VT_EMBEDDING_DIM_PER_CHANNEL = 8
+ };
+ int32_t num_channels() const { return GetField<int32_t>(VT_NUM_CHANNELS, 0); }
+ const flatbuffers::Vector<int32_t> *num_columns_per_channel() const
+ {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_NUM_COLUMNS_PER_CHANNEL);
+ }
+ const flatbuffers::Vector<int32_t> *embedding_dim_per_channel() const
+ {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_EMBEDDING_DIM_PER_CHANNEL);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM_CHANNELS) &&
+ VerifyOffset(verifier, VT_NUM_COLUMNS_PER_CHANNEL) &&
+ verifier.VerifyVector(num_columns_per_channel()) &&
+ VerifyOffset(verifier, VT_EMBEDDING_DIM_PER_CHANNEL) &&
+ verifier.VerifyVector(embedding_dim_per_channel()) && verifier.EndTable();
+ }
+ ConcatEmbeddingsOptionsT *
+ UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ConcatEmbeddingsOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<ConcatEmbeddingsOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ConcatEmbeddingsOptionsBuilder
+{
+ typedef ConcatEmbeddingsOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_num_channels(int32_t num_channels)
+ {
+ fbb_.AddElement<int32_t>(ConcatEmbeddingsOptions::VT_NUM_CHANNELS, num_channels, 0);
+ }
+ void add_num_columns_per_channel(
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> num_columns_per_channel)
+ {
+ fbb_.AddOffset(ConcatEmbeddingsOptions::VT_NUM_COLUMNS_PER_CHANNEL, num_columns_per_channel);
+ }
+ void add_embedding_dim_per_channel(
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> embedding_dim_per_channel)
+ {
+ fbb_.AddOffset(ConcatEmbeddingsOptions::VT_EMBEDDING_DIM_PER_CHANNEL,
+ embedding_dim_per_channel);
+ }
+ explicit ConcatEmbeddingsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<ConcatEmbeddingsOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<ConcatEmbeddingsOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<ConcatEmbeddingsOptions> CreateConcatEmbeddingsOptions(
+ flatbuffers::FlatBufferBuilder &_fbb, int32_t num_channels = 0,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> num_columns_per_channel = 0,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> embedding_dim_per_channel = 0)
+{
+ ConcatEmbeddingsOptionsBuilder builder_(_fbb);
+ builder_.add_embedding_dim_per_channel(embedding_dim_per_channel);
+ builder_.add_num_columns_per_channel(num_columns_per_channel);
+ builder_.add_num_channels(num_channels);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<ConcatEmbeddingsOptions>
+CreateConcatEmbeddingsOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb, int32_t num_channels = 0,
+ const std::vector<int32_t> *num_columns_per_channel = nullptr,
+ const std::vector<int32_t> *embedding_dim_per_channel = nullptr)
+{
+ auto num_columns_per_channel__ =
+ num_columns_per_channel ? _fbb.CreateVector<int32_t>(*num_columns_per_channel) : 0;
+ auto embedding_dim_per_channel__ =
+ embedding_dim_per_channel ? _fbb.CreateVector<int32_t>(*embedding_dim_per_channel) : 0;
+ return circle::CreateConcatEmbeddingsOptions(_fbb, num_channels, num_columns_per_channel__,
+ embedding_dim_per_channel__);
+}
+
+flatbuffers::Offset<ConcatEmbeddingsOptions>
+CreateConcatEmbeddingsOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const ConcatEmbeddingsOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct LSHProjectionOptionsT : public flatbuffers::NativeTable
+{
+ typedef LSHProjectionOptions TableType;
+ circle::LSHProjectionType type = circle::LSHProjectionType_UNKNOWN;
+};
+
+struct LSHProjectionOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef LSHProjectionOptionsT NativeTableType;
+ typedef LSHProjectionOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_TYPE = 4
+ };
+ circle::LSHProjectionType type() const
+ {
+ return static_cast<circle::LSHProjectionType>(GetField<int8_t>(VT_TYPE, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_TYPE) &&
+ verifier.EndTable();
+ }
+ LSHProjectionOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(LSHProjectionOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<LSHProjectionOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct LSHProjectionOptionsBuilder
+{
+ typedef LSHProjectionOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_type(circle::LSHProjectionType type)
+ {
+ fbb_.AddElement<int8_t>(LSHProjectionOptions::VT_TYPE, static_cast<int8_t>(type), 0);
+ }
+ explicit LSHProjectionOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<LSHProjectionOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<LSHProjectionOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<LSHProjectionOptions>
+CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ circle::LSHProjectionType type = circle::LSHProjectionType_UNKNOWN)
+{
+ LSHProjectionOptionsBuilder builder_(_fbb);
+ builder_.add_type(type);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<LSHProjectionOptions>
+CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SVDFOptionsT : public flatbuffers::NativeTable
+{
+ typedef SVDFOptions TableType;
+ int32_t rank = 0;
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE;
+ bool asymmetric_quantize_inputs = false;
+};
+
+struct SVDFOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef SVDFOptionsT NativeTableType;
+ typedef SVDFOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_RANK = 4,
+ VT_FUSED_ACTIVATION_FUNCTION = 6,
+ VT_ASYMMETRIC_QUANTIZE_INPUTS = 8
+ };
+ int32_t rank() const { return GetField<int32_t>(VT_RANK, 0); }
+ circle::ActivationFunctionType fused_activation_function() const
+ {
+ return static_cast<circle::ActivationFunctionType>(
+ GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool asymmetric_quantize_inputs() const
+ {
+ return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_RANK) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
+ }
+ SVDFOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SVDFOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SVDFOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SVDFOptionsBuilder
+{
+ typedef SVDFOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_rank(int32_t rank) { fbb_.AddElement<int32_t>(SVDFOptions::VT_RANK, rank, 0); }
+ void add_fused_activation_function(circle::ActivationFunctionType fused_activation_function)
+ {
+ fbb_.AddElement<int8_t>(SVDFOptions::VT_FUSED_ACTIVATION_FUNCTION,
+ static_cast<int8_t>(fused_activation_function), 0);
+ }
+ void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
+ {
+ fbb_.AddElement<uint8_t>(SVDFOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
+ static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
+ }
+ explicit SVDFOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SVDFOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SVDFOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SVDFOptions> CreateSVDFOptions(
+ flatbuffers::FlatBufferBuilder &_fbb, int32_t rank = 0,
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE,
+ bool asymmetric_quantize_inputs = false)
+{
+ SVDFOptionsBuilder builder_(_fbb);
+ builder_.add_rank(rank);
+ builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SVDFOptions>
+CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct RNNOptionsT : public flatbuffers::NativeTable
+{
+ typedef RNNOptions TableType;
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE;
+ bool asymmetric_quantize_inputs = false;
+};
+
+struct RNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef RNNOptionsT NativeTableType;
+ typedef RNNOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_FUSED_ACTIVATION_FUNCTION = 4,
+ VT_ASYMMETRIC_QUANTIZE_INPUTS = 6
+ };
+ circle::ActivationFunctionType fused_activation_function() const
+ {
+ return static_cast<circle::ActivationFunctionType>(
+ GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool asymmetric_quantize_inputs() const
+ {
+ return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
+ }
+ RNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(RNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<RNNOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct RNNOptionsBuilder
+{
+ typedef RNNOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_fused_activation_function(circle::ActivationFunctionType fused_activation_function)
+ {
+ fbb_.AddElement<int8_t>(RNNOptions::VT_FUSED_ACTIVATION_FUNCTION,
+ static_cast<int8_t>(fused_activation_function), 0);
+ }
+ void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
+ {
+ fbb_.AddElement<uint8_t>(RNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
+ static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
+ }
+ explicit RNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<RNNOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<RNNOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<RNNOptions> CreateRNNOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE,
+ bool asymmetric_quantize_inputs = false)
+{
+ RNNOptionsBuilder builder_(_fbb);
+ builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<RNNOptions>
+CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SequenceRNNOptionsT : public flatbuffers::NativeTable
+{
+ typedef SequenceRNNOptions TableType;
+ bool time_major = false;
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE;
+ bool asymmetric_quantize_inputs = false;
+};
+
+struct SequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef SequenceRNNOptionsT NativeTableType;
+ typedef SequenceRNNOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_TIME_MAJOR = 4,
+ VT_FUSED_ACTIVATION_FUNCTION = 6,
+ VT_ASYMMETRIC_QUANTIZE_INPUTS = 8
+ };
+ bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 0) != 0; }
+ circle::ActivationFunctionType fused_activation_function() const
+ {
+ return static_cast<circle::ActivationFunctionType>(
+ GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool asymmetric_quantize_inputs() const
+ {
+ return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
+ }
+ SequenceRNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SequenceRNNOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SequenceRNNOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SequenceRNNOptionsBuilder
+{
+ typedef SequenceRNNOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_time_major(bool time_major)
+ {
+ fbb_.AddElement<uint8_t>(SequenceRNNOptions::VT_TIME_MAJOR, static_cast<uint8_t>(time_major),
+ 0);
+ }
+ void add_fused_activation_function(circle::ActivationFunctionType fused_activation_function)
+ {
+ fbb_.AddElement<int8_t>(SequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION,
+ static_cast<int8_t>(fused_activation_function), 0);
+ }
+ void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
+ {
+ fbb_.AddElement<uint8_t>(SequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
+ static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
+ }
+ explicit SequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SequenceRNNOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SequenceRNNOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SequenceRNNOptions> CreateSequenceRNNOptions(
+ flatbuffers::FlatBufferBuilder &_fbb, bool time_major = false,
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE,
+ bool asymmetric_quantize_inputs = false)
+{
+ SequenceRNNOptionsBuilder builder_(_fbb);
+ builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
+ builder_.add_fused_activation_function(fused_activation_function);
+ builder_.add_time_major(time_major);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SequenceRNNOptions>
+CreateSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct BidirectionalSequenceRNNOptionsT : public flatbuffers::NativeTable
+{
+ typedef BidirectionalSequenceRNNOptions TableType;
+ bool time_major = false;
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE;
+ bool merge_outputs = false;
+ bool asymmetric_quantize_inputs = false;
+};
+
+struct BidirectionalSequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef BidirectionalSequenceRNNOptionsT NativeTableType;
+ typedef BidirectionalSequenceRNNOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_TIME_MAJOR = 4,
+ VT_FUSED_ACTIVATION_FUNCTION = 6,
+ VT_MERGE_OUTPUTS = 8,
+ VT_ASYMMETRIC_QUANTIZE_INPUTS = 10
+ };
+ bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 0) != 0; }
+ circle::ActivationFunctionType fused_activation_function() const
+ {
+ return static_cast<circle::ActivationFunctionType>(
+ GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool merge_outputs() const { return GetField<uint8_t>(VT_MERGE_OUTPUTS, 0) != 0; }
+ bool asymmetric_quantize_inputs() const
+ {
+ return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ VerifyField<uint8_t>(verifier, VT_MERGE_OUTPUTS) &&
+ VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
+ }
+ BidirectionalSequenceRNNOptionsT *
+ UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(BidirectionalSequenceRNNOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<BidirectionalSequenceRNNOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct BidirectionalSequenceRNNOptionsBuilder
+{
+ typedef BidirectionalSequenceRNNOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_time_major(bool time_major)
+ {
+ fbb_.AddElement<uint8_t>(BidirectionalSequenceRNNOptions::VT_TIME_MAJOR,
+ static_cast<uint8_t>(time_major), 0);
+ }
+ void add_fused_activation_function(circle::ActivationFunctionType fused_activation_function)
+ {
+ fbb_.AddElement<int8_t>(BidirectionalSequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION,
+ static_cast<int8_t>(fused_activation_function), 0);
+ }
+ void add_merge_outputs(bool merge_outputs)
+ {
+ fbb_.AddElement<uint8_t>(BidirectionalSequenceRNNOptions::VT_MERGE_OUTPUTS,
+ static_cast<uint8_t>(merge_outputs), 0);
+ }
+ void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
+ {
+ fbb_.AddElement<uint8_t>(BidirectionalSequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
+ static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
+ }
+ explicit BidirectionalSequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<BidirectionalSequenceRNNOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<BidirectionalSequenceRNNOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<BidirectionalSequenceRNNOptions> CreateBidirectionalSequenceRNNOptions(
+ flatbuffers::FlatBufferBuilder &_fbb, bool time_major = false,
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE,
+ bool merge_outputs = false, bool asymmetric_quantize_inputs = false)
+{
+ BidirectionalSequenceRNNOptionsBuilder builder_(_fbb);
+ builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
+ builder_.add_merge_outputs(merge_outputs);
+ builder_.add_fused_activation_function(fused_activation_function);
+ builder_.add_time_major(time_major);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<BidirectionalSequenceRNNOptions>
+CreateBidirectionalSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const BidirectionalSequenceRNNOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct FullyConnectedOptionsT : public flatbuffers::NativeTable
+{
+ typedef FullyConnectedOptions TableType;
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE;
+ circle::FullyConnectedOptionsWeightsFormat weights_format =
+ circle::FullyConnectedOptionsWeightsFormat_DEFAULT;
+ bool keep_num_dims = false;
+ bool asymmetric_quantize_inputs = false;
+};
+
+struct FullyConnectedOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef FullyConnectedOptionsT NativeTableType;
+ typedef FullyConnectedOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_FUSED_ACTIVATION_FUNCTION = 4,
+ VT_WEIGHTS_FORMAT = 6,
+ VT_KEEP_NUM_DIMS = 8,
+ VT_ASYMMETRIC_QUANTIZE_INPUTS = 10
+ };
+ circle::ActivationFunctionType fused_activation_function() const
+ {
+ return static_cast<circle::ActivationFunctionType>(
+ GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ circle::FullyConnectedOptionsWeightsFormat weights_format() const
+ {
+ return static_cast<circle::FullyConnectedOptionsWeightsFormat>(
+ GetField<int8_t>(VT_WEIGHTS_FORMAT, 0));
+ }
+ bool keep_num_dims() const { return GetField<uint8_t>(VT_KEEP_NUM_DIMS, 0) != 0; }
+ bool asymmetric_quantize_inputs() const
+ {
+ return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ VerifyField<int8_t>(verifier, VT_WEIGHTS_FORMAT) &&
+ VerifyField<uint8_t>(verifier, VT_KEEP_NUM_DIMS) &&
+ VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
+ }
+ FullyConnectedOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(FullyConnectedOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<FullyConnectedOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct FullyConnectedOptionsBuilder
+{
+ typedef FullyConnectedOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_fused_activation_function(circle::ActivationFunctionType fused_activation_function)
+ {
+ fbb_.AddElement<int8_t>(FullyConnectedOptions::VT_FUSED_ACTIVATION_FUNCTION,
+ static_cast<int8_t>(fused_activation_function), 0);
+ }
+ void add_weights_format(circle::FullyConnectedOptionsWeightsFormat weights_format)
+ {
+ fbb_.AddElement<int8_t>(FullyConnectedOptions::VT_WEIGHTS_FORMAT,
+ static_cast<int8_t>(weights_format), 0);
+ }
+ void add_keep_num_dims(bool keep_num_dims)
+ {
+ fbb_.AddElement<uint8_t>(FullyConnectedOptions::VT_KEEP_NUM_DIMS,
+ static_cast<uint8_t>(keep_num_dims), 0);
+ }
+ void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
+ {
+ fbb_.AddElement<uint8_t>(FullyConnectedOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
+ static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
+ }
+ explicit FullyConnectedOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<FullyConnectedOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<FullyConnectedOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<FullyConnectedOptions> CreateFullyConnectedOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE,
+ circle::FullyConnectedOptionsWeightsFormat weights_format =
+ circle::FullyConnectedOptionsWeightsFormat_DEFAULT,
+ bool keep_num_dims = false, bool asymmetric_quantize_inputs = false)
+{
+ FullyConnectedOptionsBuilder builder_(_fbb);
+ builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
+ builder_.add_keep_num_dims(keep_num_dims);
+ builder_.add_weights_format(weights_format);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<FullyConnectedOptions>
+CreateFullyConnectedOptions(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SoftmaxOptionsT : public flatbuffers::NativeTable
+{
+ typedef SoftmaxOptions TableType;
+ float beta = 0.0f;
+};
+
+struct SoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef SoftmaxOptionsT NativeTableType;
+ typedef SoftmaxOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_BETA = 4
+ };
+ float beta() const { return GetField<float>(VT_BETA, 0.0f); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_BETA) &&
+ verifier.EndTable();
+ }
+ SoftmaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SoftmaxOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SoftmaxOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SoftmaxOptionsBuilder
+{
+ typedef SoftmaxOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_beta(float beta) { fbb_.AddElement<float>(SoftmaxOptions::VT_BETA, beta, 0.0f); }
+ explicit SoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SoftmaxOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SoftmaxOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SoftmaxOptions>
+CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, float beta = 0.0f)
+{
+ SoftmaxOptionsBuilder builder_(_fbb);
+ builder_.add_beta(beta);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SoftmaxOptions>
+CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ConcatenationOptionsT : public flatbuffers::NativeTable
+{
+ typedef ConcatenationOptions TableType;
+ int32_t axis = 0;
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE;
+};
+
+struct ConcatenationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef ConcatenationOptionsT NativeTableType;
+ typedef ConcatenationOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_AXIS = 4,
+ VT_FUSED_ACTIVATION_FUNCTION = 6
+ };
+ int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
+ circle::ActivationFunctionType fused_activation_function() const
+ {
+ return static_cast<circle::ActivationFunctionType>(
+ GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_AXIS) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
+ }
+ ConcatenationOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ConcatenationOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<ConcatenationOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ConcatenationOptionsBuilder
+{
+ typedef ConcatenationOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(ConcatenationOptions::VT_AXIS, axis, 0); }
+ void add_fused_activation_function(circle::ActivationFunctionType fused_activation_function)
+ {
+ fbb_.AddElement<int8_t>(ConcatenationOptions::VT_FUSED_ACTIVATION_FUNCTION,
+ static_cast<int8_t>(fused_activation_function), 0);
+ }
+ explicit ConcatenationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<ConcatenationOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<ConcatenationOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<ConcatenationOptions> CreateConcatenationOptions(
+ flatbuffers::FlatBufferBuilder &_fbb, int32_t axis = 0,
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE)
+{
+ ConcatenationOptionsBuilder builder_(_fbb);
+ builder_.add_axis(axis);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<ConcatenationOptions>
+CreateConcatenationOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct AddOptionsT : public flatbuffers::NativeTable
+{
+ typedef AddOptions TableType;
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE;
+ bool pot_scale_int16 = true;
+};
+
+struct AddOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef AddOptionsT NativeTableType;
+ typedef AddOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_FUSED_ACTIVATION_FUNCTION = 4,
+ VT_POT_SCALE_INT16 = 6
+ };
+ circle::ActivationFunctionType fused_activation_function() const
+ {
+ return static_cast<circle::ActivationFunctionType>(
+ GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool pot_scale_int16() const { return GetField<uint8_t>(VT_POT_SCALE_INT16, 1) != 0; }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ VerifyField<uint8_t>(verifier, VT_POT_SCALE_INT16) && verifier.EndTable();
+ }
+ AddOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(AddOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<AddOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct AddOptionsBuilder
+{
+ typedef AddOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_fused_activation_function(circle::ActivationFunctionType fused_activation_function)
+ {
+ fbb_.AddElement<int8_t>(AddOptions::VT_FUSED_ACTIVATION_FUNCTION,
+ static_cast<int8_t>(fused_activation_function), 0);
+ }
+ void add_pot_scale_int16(bool pot_scale_int16)
+ {
+ fbb_.AddElement<uint8_t>(AddOptions::VT_POT_SCALE_INT16, static_cast<uint8_t>(pot_scale_int16),
+ 1);
+ }
+ explicit AddOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<AddOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<AddOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<AddOptions> CreateAddOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE,
+ bool pot_scale_int16 = true)
+{
+ AddOptionsBuilder builder_(_fbb);
+ builder_.add_pot_scale_int16(pot_scale_int16);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<AddOptions>
+CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct MulOptionsT : public flatbuffers::NativeTable
+{
+ typedef MulOptions TableType;
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE;
+};
+
+struct MulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef MulOptionsT NativeTableType;
+ typedef MulOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_FUSED_ACTIVATION_FUNCTION = 4
+ };
+ circle::ActivationFunctionType fused_activation_function() const
+ {
+ return static_cast<circle::ActivationFunctionType>(
+ GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
+ }
+ MulOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(MulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<MulOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct MulOptionsBuilder
+{
+ typedef MulOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_fused_activation_function(circle::ActivationFunctionType fused_activation_function)
+ {
+ fbb_.AddElement<int8_t>(MulOptions::VT_FUSED_ACTIVATION_FUNCTION,
+ static_cast<int8_t>(fused_activation_function), 0);
+ }
+ explicit MulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<MulOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<MulOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<MulOptions> CreateMulOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE)
+{
+ MulOptionsBuilder builder_(_fbb);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<MulOptions>
+CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct L2NormOptionsT : public flatbuffers::NativeTable
+{
+ typedef L2NormOptions TableType;
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE;
+};
+
+struct L2NormOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef L2NormOptionsT NativeTableType;
+ typedef L2NormOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_FUSED_ACTIVATION_FUNCTION = 4
+ };
+ circle::ActivationFunctionType fused_activation_function() const
+ {
+ return static_cast<circle::ActivationFunctionType>(
+ GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
+ }
+ L2NormOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(L2NormOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<L2NormOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct L2NormOptionsBuilder
+{
+ typedef L2NormOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_fused_activation_function(circle::ActivationFunctionType fused_activation_function)
+ {
+ fbb_.AddElement<int8_t>(L2NormOptions::VT_FUSED_ACTIVATION_FUNCTION,
+ static_cast<int8_t>(fused_activation_function), 0);
+ }
+ explicit L2NormOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<L2NormOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<L2NormOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<L2NormOptions> CreateL2NormOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE)
+{
+ L2NormOptionsBuilder builder_(_fbb);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<L2NormOptions>
+CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct LocalResponseNormalizationOptionsT : public flatbuffers::NativeTable
+{
+ typedef LocalResponseNormalizationOptions TableType;
+ int32_t radius = 0;
+ float bias = 0.0f;
+ float alpha = 0.0f;
+ float beta = 0.0f;
+};
+
+struct LocalResponseNormalizationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef LocalResponseNormalizationOptionsT NativeTableType;
+ typedef LocalResponseNormalizationOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_RADIUS = 4,
+ VT_BIAS = 6,
+ VT_ALPHA = 8,
+ VT_BETA = 10
+ };
+ int32_t radius() const { return GetField<int32_t>(VT_RADIUS, 0); }
+ float bias() const { return GetField<float>(VT_BIAS, 0.0f); }
+ float alpha() const { return GetField<float>(VT_ALPHA, 0.0f); }
+ float beta() const { return GetField<float>(VT_BETA, 0.0f); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_RADIUS) &&
+ VerifyField<float>(verifier, VT_BIAS) && VerifyField<float>(verifier, VT_ALPHA) &&
+ VerifyField<float>(verifier, VT_BETA) && verifier.EndTable();
+ }
+ LocalResponseNormalizationOptionsT *
+ UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(LocalResponseNormalizationOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<LocalResponseNormalizationOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct LocalResponseNormalizationOptionsBuilder
+{
+ typedef LocalResponseNormalizationOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_radius(int32_t radius)
+ {
+ fbb_.AddElement<int32_t>(LocalResponseNormalizationOptions::VT_RADIUS, radius, 0);
+ }
+ void add_bias(float bias)
+ {
+ fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_BIAS, bias, 0.0f);
+ }
+ void add_alpha(float alpha)
+ {
+ fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_ALPHA, alpha, 0.0f);
+ }
+ void add_beta(float beta)
+ {
+ fbb_.AddElement<float>(LocalResponseNormalizationOptions::VT_BETA, beta, 0.0f);
+ }
+ explicit LocalResponseNormalizationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<LocalResponseNormalizationOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<LocalResponseNormalizationOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<LocalResponseNormalizationOptions>
+CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t radius = 0,
+ float bias = 0.0f, float alpha = 0.0f, float beta = 0.0f)
+{
+ LocalResponseNormalizationOptionsBuilder builder_(_fbb);
+ builder_.add_beta(beta);
+ builder_.add_alpha(alpha);
+ builder_.add_bias(bias);
+ builder_.add_radius(radius);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<LocalResponseNormalizationOptions> CreateLocalResponseNormalizationOptions(
+ flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct LSTMOptionsT : public flatbuffers::NativeTable
+{
+ typedef LSTMOptions TableType;
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE;
+ float cell_clip = 0.0f;
+ float proj_clip = 0.0f;
+ circle::LSTMKernelType kernel_type = circle::LSTMKernelType_FULL;
+ bool asymmetric_quantize_inputs = false;
+};
+
+struct LSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef LSTMOptionsT NativeTableType;
+ typedef LSTMOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_FUSED_ACTIVATION_FUNCTION = 4,
+ VT_CELL_CLIP = 6,
+ VT_PROJ_CLIP = 8,
+ VT_KERNEL_TYPE = 10,
+ VT_ASYMMETRIC_QUANTIZE_INPUTS = 12
+ };
+ circle::ActivationFunctionType fused_activation_function() const
+ {
+ return static_cast<circle::ActivationFunctionType>(
+ GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ float cell_clip() const { return GetField<float>(VT_CELL_CLIP, 0.0f); }
+ float proj_clip() const { return GetField<float>(VT_PROJ_CLIP, 0.0f); }
+ circle::LSTMKernelType kernel_type() const
+ {
+ return static_cast<circle::LSTMKernelType>(GetField<int8_t>(VT_KERNEL_TYPE, 0));
+ }
+ bool asymmetric_quantize_inputs() const
+ {
+ return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ VerifyField<float>(verifier, VT_CELL_CLIP) &&
+ VerifyField<float>(verifier, VT_PROJ_CLIP) &&
+ VerifyField<int8_t>(verifier, VT_KERNEL_TYPE) &&
+ VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
+ }
+ LSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(LSTMOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<LSTMOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct LSTMOptionsBuilder
+{
+ typedef LSTMOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_fused_activation_function(circle::ActivationFunctionType fused_activation_function)
+ {
+ fbb_.AddElement<int8_t>(LSTMOptions::VT_FUSED_ACTIVATION_FUNCTION,
+ static_cast<int8_t>(fused_activation_function), 0);
+ }
+ void add_cell_clip(float cell_clip)
+ {
+ fbb_.AddElement<float>(LSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f);
+ }
+ void add_proj_clip(float proj_clip)
+ {
+ fbb_.AddElement<float>(LSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f);
+ }
+ void add_kernel_type(circle::LSTMKernelType kernel_type)
+ {
+ fbb_.AddElement<int8_t>(LSTMOptions::VT_KERNEL_TYPE, static_cast<int8_t>(kernel_type), 0);
+ }
+ void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
+ {
+ fbb_.AddElement<uint8_t>(LSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
+ static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
+ }
+ explicit LSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<LSTMOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<LSTMOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<LSTMOptions> CreateLSTMOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE,
+ float cell_clip = 0.0f, float proj_clip = 0.0f,
+ circle::LSTMKernelType kernel_type = circle::LSTMKernelType_FULL,
+ bool asymmetric_quantize_inputs = false)
+{
+ LSTMOptionsBuilder builder_(_fbb);
+ builder_.add_proj_clip(proj_clip);
+ builder_.add_cell_clip(cell_clip);
+ builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
+ builder_.add_kernel_type(kernel_type);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<LSTMOptions>
+CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct UnidirectionalSequenceLSTMOptionsT : public flatbuffers::NativeTable
+{
+ typedef UnidirectionalSequenceLSTMOptions TableType;
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE;
+ float cell_clip = 0.0f;
+ float proj_clip = 0.0f;
+ bool time_major = false;
+ bool asymmetric_quantize_inputs = false;
+};
+
+struct UnidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef UnidirectionalSequenceLSTMOptionsT NativeTableType;
+ typedef UnidirectionalSequenceLSTMOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_FUSED_ACTIVATION_FUNCTION = 4,
+ VT_CELL_CLIP = 6,
+ VT_PROJ_CLIP = 8,
+ VT_TIME_MAJOR = 10,
+ VT_ASYMMETRIC_QUANTIZE_INPUTS = 12
+ };
+ circle::ActivationFunctionType fused_activation_function() const
+ {
+ return static_cast<circle::ActivationFunctionType>(
+ GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ float cell_clip() const { return GetField<float>(VT_CELL_CLIP, 0.0f); }
+ float proj_clip() const { return GetField<float>(VT_PROJ_CLIP, 0.0f); }
+ bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 0) != 0; }
+ bool asymmetric_quantize_inputs() const
+ {
+ return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ VerifyField<float>(verifier, VT_CELL_CLIP) &&
+ VerifyField<float>(verifier, VT_PROJ_CLIP) &&
+ VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) &&
+ VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
+ }
+ UnidirectionalSequenceLSTMOptionsT *
+ UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(UnidirectionalSequenceLSTMOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<UnidirectionalSequenceLSTMOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct UnidirectionalSequenceLSTMOptionsBuilder
+{
+ typedef UnidirectionalSequenceLSTMOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_fused_activation_function(circle::ActivationFunctionType fused_activation_function)
+ {
+ fbb_.AddElement<int8_t>(UnidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION,
+ static_cast<int8_t>(fused_activation_function), 0);
+ }
+ void add_cell_clip(float cell_clip)
+ {
+ fbb_.AddElement<float>(UnidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f);
+ }
+ void add_proj_clip(float proj_clip)
+ {
+ fbb_.AddElement<float>(UnidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f);
+ }
+ void add_time_major(bool time_major)
+ {
+ fbb_.AddElement<uint8_t>(UnidirectionalSequenceLSTMOptions::VT_TIME_MAJOR,
+ static_cast<uint8_t>(time_major), 0);
+ }
+ void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
+ {
+ fbb_.AddElement<uint8_t>(UnidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
+ static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
+ }
+ explicit UnidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<UnidirectionalSequenceLSTMOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<UnidirectionalSequenceLSTMOptions>
+CreateUnidirectionalSequenceLSTMOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE,
+ float cell_clip = 0.0f, float proj_clip = 0.0f, bool time_major = false,
+ bool asymmetric_quantize_inputs = false)
+{
+ UnidirectionalSequenceLSTMOptionsBuilder builder_(_fbb);
+ builder_.add_proj_clip(proj_clip);
+ builder_.add_cell_clip(cell_clip);
+ builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
+ builder_.add_time_major(time_major);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<UnidirectionalSequenceLSTMOptions> CreateUnidirectionalSequenceLSTMOptions(
+ flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct BidirectionalSequenceLSTMOptionsT : public flatbuffers::NativeTable
+{
+ typedef BidirectionalSequenceLSTMOptions TableType;
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE;
+ float cell_clip = 0.0f;
+ float proj_clip = 0.0f;
+ bool merge_outputs = false;
+ bool time_major = true;
+ bool asymmetric_quantize_inputs = false;
+};
+
+struct BidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef BidirectionalSequenceLSTMOptionsT NativeTableType;
+ typedef BidirectionalSequenceLSTMOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_FUSED_ACTIVATION_FUNCTION = 4,
+ VT_CELL_CLIP = 6,
+ VT_PROJ_CLIP = 8,
+ VT_MERGE_OUTPUTS = 10,
+ VT_TIME_MAJOR = 12,
+ VT_ASYMMETRIC_QUANTIZE_INPUTS = 14
+ };
+ circle::ActivationFunctionType fused_activation_function() const
+ {
+ return static_cast<circle::ActivationFunctionType>(
+ GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ float cell_clip() const { return GetField<float>(VT_CELL_CLIP, 0.0f); }
+ float proj_clip() const { return GetField<float>(VT_PROJ_CLIP, 0.0f); }
+ bool merge_outputs() const { return GetField<uint8_t>(VT_MERGE_OUTPUTS, 0) != 0; }
+ bool time_major() const { return GetField<uint8_t>(VT_TIME_MAJOR, 1) != 0; }
+ bool asymmetric_quantize_inputs() const
+ {
+ return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ VerifyField<float>(verifier, VT_CELL_CLIP) &&
+ VerifyField<float>(verifier, VT_PROJ_CLIP) &&
+ VerifyField<uint8_t>(verifier, VT_MERGE_OUTPUTS) &&
+ VerifyField<uint8_t>(verifier, VT_TIME_MAJOR) &&
+ VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
+ }
+ BidirectionalSequenceLSTMOptionsT *
+ UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(BidirectionalSequenceLSTMOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<BidirectionalSequenceLSTMOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct BidirectionalSequenceLSTMOptionsBuilder
+{
+ typedef BidirectionalSequenceLSTMOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_fused_activation_function(circle::ActivationFunctionType fused_activation_function)
+ {
+ fbb_.AddElement<int8_t>(BidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION,
+ static_cast<int8_t>(fused_activation_function), 0);
+ }
+ void add_cell_clip(float cell_clip)
+ {
+ fbb_.AddElement<float>(BidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f);
+ }
+ void add_proj_clip(float proj_clip)
+ {
+ fbb_.AddElement<float>(BidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f);
+ }
+ void add_merge_outputs(bool merge_outputs)
+ {
+ fbb_.AddElement<uint8_t>(BidirectionalSequenceLSTMOptions::VT_MERGE_OUTPUTS,
+ static_cast<uint8_t>(merge_outputs), 0);
+ }
+ void add_time_major(bool time_major)
+ {
+ fbb_.AddElement<uint8_t>(BidirectionalSequenceLSTMOptions::VT_TIME_MAJOR,
+ static_cast<uint8_t>(time_major), 1);
+ }
+ void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
+ {
+ fbb_.AddElement<uint8_t>(BidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
+ static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
+ }
+ explicit BidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<BidirectionalSequenceLSTMOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<BidirectionalSequenceLSTMOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<BidirectionalSequenceLSTMOptions> CreateBidirectionalSequenceLSTMOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE,
+ float cell_clip = 0.0f, float proj_clip = 0.0f, bool merge_outputs = false,
+ bool time_major = true, bool asymmetric_quantize_inputs = false)
+{
+ BidirectionalSequenceLSTMOptionsBuilder builder_(_fbb);
+ builder_.add_proj_clip(proj_clip);
+ builder_.add_cell_clip(cell_clip);
+ builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
+ builder_.add_time_major(time_major);
+ builder_.add_merge_outputs(merge_outputs);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<BidirectionalSequenceLSTMOptions>
+CreateBidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const BidirectionalSequenceLSTMOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ResizeBilinearOptionsT : public flatbuffers::NativeTable
+{
+ typedef ResizeBilinearOptions TableType;
+ bool align_corners = false;
+ bool half_pixel_centers = false;
+};
+
+struct ResizeBilinearOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef ResizeBilinearOptionsT NativeTableType;
+ typedef ResizeBilinearOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_ALIGN_CORNERS = 8,
+ VT_HALF_PIXEL_CENTERS = 10
+ };
+ bool align_corners() const { return GetField<uint8_t>(VT_ALIGN_CORNERS, 0) != 0; }
+ bool half_pixel_centers() const { return GetField<uint8_t>(VT_HALF_PIXEL_CENTERS, 0) != 0; }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_ALIGN_CORNERS) &&
+ VerifyField<uint8_t>(verifier, VT_HALF_PIXEL_CENTERS) && verifier.EndTable();
+ }
+ ResizeBilinearOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ResizeBilinearOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<ResizeBilinearOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ResizeBilinearOptionsBuilder
+{
+ typedef ResizeBilinearOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_align_corners(bool align_corners)
+ {
+ fbb_.AddElement<uint8_t>(ResizeBilinearOptions::VT_ALIGN_CORNERS,
+ static_cast<uint8_t>(align_corners), 0);
+ }
+ void add_half_pixel_centers(bool half_pixel_centers)
+ {
+ fbb_.AddElement<uint8_t>(ResizeBilinearOptions::VT_HALF_PIXEL_CENTERS,
+ static_cast<uint8_t>(half_pixel_centers), 0);
+ }
+ explicit ResizeBilinearOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<ResizeBilinearOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<ResizeBilinearOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<ResizeBilinearOptions>
+CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, bool align_corners = false,
+ bool half_pixel_centers = false)
+{
+ ResizeBilinearOptionsBuilder builder_(_fbb);
+ builder_.add_half_pixel_centers(half_pixel_centers);
+ builder_.add_align_corners(align_corners);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<ResizeBilinearOptions>
+CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ResizeNearestNeighborOptionsT : public flatbuffers::NativeTable
+{
+ typedef ResizeNearestNeighborOptions TableType;
+ bool align_corners = false;
+ bool half_pixel_centers = false;
+};
+
+struct ResizeNearestNeighborOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef ResizeNearestNeighborOptionsT NativeTableType;
+ typedef ResizeNearestNeighborOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_ALIGN_CORNERS = 4,
+ VT_HALF_PIXEL_CENTERS = 6
+ };
+ bool align_corners() const { return GetField<uint8_t>(VT_ALIGN_CORNERS, 0) != 0; }
+ bool half_pixel_centers() const { return GetField<uint8_t>(VT_HALF_PIXEL_CENTERS, 0) != 0; }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_ALIGN_CORNERS) &&
+ VerifyField<uint8_t>(verifier, VT_HALF_PIXEL_CENTERS) && verifier.EndTable();
+ }
+ ResizeNearestNeighborOptionsT *
+ UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ResizeNearestNeighborOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<ResizeNearestNeighborOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ResizeNearestNeighborOptionsBuilder
+{
+ typedef ResizeNearestNeighborOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_align_corners(bool align_corners)
+ {
+ fbb_.AddElement<uint8_t>(ResizeNearestNeighborOptions::VT_ALIGN_CORNERS,
+ static_cast<uint8_t>(align_corners), 0);
+ }
+ void add_half_pixel_centers(bool half_pixel_centers)
+ {
+ fbb_.AddElement<uint8_t>(ResizeNearestNeighborOptions::VT_HALF_PIXEL_CENTERS,
+ static_cast<uint8_t>(half_pixel_centers), 0);
+ }
+ explicit ResizeNearestNeighborOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<ResizeNearestNeighborOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<ResizeNearestNeighborOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<ResizeNearestNeighborOptions>
+CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb, bool align_corners = false,
+ bool half_pixel_centers = false)
+{
+ ResizeNearestNeighborOptionsBuilder builder_(_fbb);
+ builder_.add_half_pixel_centers(half_pixel_centers);
+ builder_.add_align_corners(align_corners);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<ResizeNearestNeighborOptions>
+CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const ResizeNearestNeighborOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct CallOptionsT : public flatbuffers::NativeTable
+{
+ typedef CallOptions TableType;
+ uint32_t subgraph = 0;
+};
+
+struct CallOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef CallOptionsT NativeTableType;
+ typedef CallOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_SUBGRAPH = 4
+ };
+ uint32_t subgraph() const { return GetField<uint32_t>(VT_SUBGRAPH, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<uint32_t>(verifier, VT_SUBGRAPH) &&
+ verifier.EndTable();
+ }
+ CallOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(CallOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<CallOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct CallOptionsBuilder
+{
+ typedef CallOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_subgraph(uint32_t subgraph)
+ {
+ fbb_.AddElement<uint32_t>(CallOptions::VT_SUBGRAPH, subgraph, 0);
+ }
+ explicit CallOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<CallOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<CallOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<CallOptions> CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ uint32_t subgraph = 0)
+{
+ CallOptionsBuilder builder_(_fbb);
+ builder_.add_subgraph(subgraph);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<CallOptions>
+CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct PadOptionsT : public flatbuffers::NativeTable
+{
+ typedef PadOptions TableType;
+};
+
+struct PadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef PadOptionsT NativeTableType;
+ typedef PadOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ PadOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(PadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<PadOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct PadOptionsBuilder
+{
+ typedef PadOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit PadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<PadOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<PadOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<PadOptions> CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ PadOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<PadOptions>
+CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct PadV2OptionsT : public flatbuffers::NativeTable
+{
+ typedef PadV2Options TableType;
+};
+
+struct PadV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef PadV2OptionsT NativeTableType;
+ typedef PadV2OptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ PadV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(PadV2OptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<PadV2Options>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct PadV2OptionsBuilder
+{
+ typedef PadV2Options Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit PadV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<PadV2Options> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<PadV2Options>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<PadV2Options> CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ PadV2OptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<PadV2Options>
+CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ReshapeOptionsT : public flatbuffers::NativeTable
+{
+ typedef ReshapeOptions TableType;
+ std::vector<int32_t> new_shape{};
+};
+
+struct ReshapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef ReshapeOptionsT NativeTableType;
+ typedef ReshapeOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_NEW_SHAPE = 4
+ };
+ const flatbuffers::Vector<int32_t> *new_shape() const
+ {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_NEW_SHAPE);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NEW_SHAPE) &&
+ verifier.VerifyVector(new_shape()) && verifier.EndTable();
+ }
+ ReshapeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ReshapeOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<ReshapeOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ReshapeOptionsBuilder
+{
+ typedef ReshapeOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_new_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> new_shape)
+ {
+ fbb_.AddOffset(ReshapeOptions::VT_NEW_SHAPE, new_shape);
+ }
+ explicit ReshapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<ReshapeOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<ReshapeOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<ReshapeOptions>
+CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> new_shape = 0)
+{
+ ReshapeOptionsBuilder builder_(_fbb);
+ builder_.add_new_shape(new_shape);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<ReshapeOptions>
+CreateReshapeOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb,
+ const std::vector<int32_t> *new_shape = nullptr)
+{
+ auto new_shape__ = new_shape ? _fbb.CreateVector<int32_t>(*new_shape) : 0;
+ return circle::CreateReshapeOptions(_fbb, new_shape__);
+}
+
+flatbuffers::Offset<ReshapeOptions>
+CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SpaceToBatchNDOptionsT : public flatbuffers::NativeTable
+{
+ typedef SpaceToBatchNDOptions TableType;
+};
+
+struct SpaceToBatchNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef SpaceToBatchNDOptionsT NativeTableType;
+ typedef SpaceToBatchNDOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ SpaceToBatchNDOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SpaceToBatchNDOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SpaceToBatchNDOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SpaceToBatchNDOptionsBuilder
+{
+ typedef SpaceToBatchNDOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit SpaceToBatchNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SpaceToBatchNDOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SpaceToBatchNDOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SpaceToBatchNDOptions>
+CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ SpaceToBatchNDOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SpaceToBatchNDOptions>
+CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct BatchToSpaceNDOptionsT : public flatbuffers::NativeTable
+{
+ typedef BatchToSpaceNDOptions TableType;
+};
+
+struct BatchToSpaceNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef BatchToSpaceNDOptionsT NativeTableType;
+ typedef BatchToSpaceNDOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ BatchToSpaceNDOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(BatchToSpaceNDOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<BatchToSpaceNDOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct BatchToSpaceNDOptionsBuilder
+{
+ typedef BatchToSpaceNDOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit BatchToSpaceNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<BatchToSpaceNDOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<BatchToSpaceNDOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<BatchToSpaceNDOptions>
+CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ BatchToSpaceNDOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<BatchToSpaceNDOptions>
+CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SkipGramOptionsT : public flatbuffers::NativeTable
+{
+ typedef SkipGramOptions TableType;
+ int32_t ngram_size = 0;
+ int32_t max_skip_size = 0;
+ bool include_all_ngrams = false;
+};
+
+struct SkipGramOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef SkipGramOptionsT NativeTableType;
+ typedef SkipGramOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_NGRAM_SIZE = 4,
+ VT_MAX_SKIP_SIZE = 6,
+ VT_INCLUDE_ALL_NGRAMS = 8
+ };
+ int32_t ngram_size() const { return GetField<int32_t>(VT_NGRAM_SIZE, 0); }
+ int32_t max_skip_size() const { return GetField<int32_t>(VT_MAX_SKIP_SIZE, 0); }
+ bool include_all_ngrams() const { return GetField<uint8_t>(VT_INCLUDE_ALL_NGRAMS, 0) != 0; }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NGRAM_SIZE) &&
+ VerifyField<int32_t>(verifier, VT_MAX_SKIP_SIZE) &&
+ VerifyField<uint8_t>(verifier, VT_INCLUDE_ALL_NGRAMS) && verifier.EndTable();
+ }
+ SkipGramOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SkipGramOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SkipGramOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SkipGramOptionsBuilder
+{
+ typedef SkipGramOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_ngram_size(int32_t ngram_size)
+ {
+ fbb_.AddElement<int32_t>(SkipGramOptions::VT_NGRAM_SIZE, ngram_size, 0);
+ }
+ void add_max_skip_size(int32_t max_skip_size)
+ {
+ fbb_.AddElement<int32_t>(SkipGramOptions::VT_MAX_SKIP_SIZE, max_skip_size, 0);
+ }
+ void add_include_all_ngrams(bool include_all_ngrams)
+ {
+ fbb_.AddElement<uint8_t>(SkipGramOptions::VT_INCLUDE_ALL_NGRAMS,
+ static_cast<uint8_t>(include_all_ngrams), 0);
+ }
+ explicit SkipGramOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SkipGramOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SkipGramOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SkipGramOptions>
+CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t ngram_size = 0,
+ int32_t max_skip_size = 0, bool include_all_ngrams = false)
+{
+ SkipGramOptionsBuilder builder_(_fbb);
+ builder_.add_max_skip_size(max_skip_size);
+ builder_.add_ngram_size(ngram_size);
+ builder_.add_include_all_ngrams(include_all_ngrams);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SkipGramOptions>
+CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SpaceToDepthOptionsT : public flatbuffers::NativeTable
+{
+ typedef SpaceToDepthOptions TableType;
+ int32_t block_size = 0;
+};
+
+struct SpaceToDepthOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef SpaceToDepthOptionsT NativeTableType;
+ typedef SpaceToDepthOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_BLOCK_SIZE = 4
+ };
+ int32_t block_size() const { return GetField<int32_t>(VT_BLOCK_SIZE, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_BLOCK_SIZE) &&
+ verifier.EndTable();
+ }
+ SpaceToDepthOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SpaceToDepthOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SpaceToDepthOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SpaceToDepthOptionsBuilder
+{
+ typedef SpaceToDepthOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_block_size(int32_t block_size)
+ {
+ fbb_.AddElement<int32_t>(SpaceToDepthOptions::VT_BLOCK_SIZE, block_size, 0);
+ }
+ explicit SpaceToDepthOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SpaceToDepthOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SpaceToDepthOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SpaceToDepthOptions>
+CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t block_size = 0)
+{
+ SpaceToDepthOptionsBuilder builder_(_fbb);
+ builder_.add_block_size(block_size);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SpaceToDepthOptions>
+CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct DepthToSpaceOptionsT : public flatbuffers::NativeTable
+{
+ typedef DepthToSpaceOptions TableType;
+ int32_t block_size = 0;
+};
+
+struct DepthToSpaceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef DepthToSpaceOptionsT NativeTableType;
+ typedef DepthToSpaceOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_BLOCK_SIZE = 4
+ };
+ int32_t block_size() const { return GetField<int32_t>(VT_BLOCK_SIZE, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_BLOCK_SIZE) &&
+ verifier.EndTable();
+ }
+ DepthToSpaceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(DepthToSpaceOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<DepthToSpaceOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct DepthToSpaceOptionsBuilder
+{
+ typedef DepthToSpaceOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_block_size(int32_t block_size)
+ {
+ fbb_.AddElement<int32_t>(DepthToSpaceOptions::VT_BLOCK_SIZE, block_size, 0);
+ }
+ explicit DepthToSpaceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<DepthToSpaceOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<DepthToSpaceOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<DepthToSpaceOptions>
+CreateDepthToSpaceOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t block_size = 0)
+{
+ DepthToSpaceOptionsBuilder builder_(_fbb);
+ builder_.add_block_size(block_size);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<DepthToSpaceOptions>
+CreateDepthToSpaceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SubOptionsT : public flatbuffers::NativeTable
+{
+ typedef SubOptions TableType;
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE;
+ bool pot_scale_int16 = true;
+};
+
+struct SubOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef SubOptionsT NativeTableType;
+ typedef SubOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_FUSED_ACTIVATION_FUNCTION = 4,
+ VT_POT_SCALE_INT16 = 6
+ };
+ circle::ActivationFunctionType fused_activation_function() const
+ {
+ return static_cast<circle::ActivationFunctionType>(
+ GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool pot_scale_int16() const { return GetField<uint8_t>(VT_POT_SCALE_INT16, 1) != 0; }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) &&
+ VerifyField<uint8_t>(verifier, VT_POT_SCALE_INT16) && verifier.EndTable();
+ }
+ SubOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SubOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SubOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SubOptionsBuilder
+{
+ typedef SubOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_fused_activation_function(circle::ActivationFunctionType fused_activation_function)
+ {
+ fbb_.AddElement<int8_t>(SubOptions::VT_FUSED_ACTIVATION_FUNCTION,
+ static_cast<int8_t>(fused_activation_function), 0);
+ }
+ void add_pot_scale_int16(bool pot_scale_int16)
+ {
+ fbb_.AddElement<uint8_t>(SubOptions::VT_POT_SCALE_INT16, static_cast<uint8_t>(pot_scale_int16),
+ 1);
+ }
+ explicit SubOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SubOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SubOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SubOptions> CreateSubOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE,
+ bool pot_scale_int16 = true)
+{
+ SubOptionsBuilder builder_(_fbb);
+ builder_.add_pot_scale_int16(pot_scale_int16);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SubOptions>
+CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct DivOptionsT : public flatbuffers::NativeTable
+{
+ typedef DivOptions TableType;
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE;
+};
+
+struct DivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef DivOptionsT NativeTableType;
+ typedef DivOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_FUSED_ACTIVATION_FUNCTION = 4
+ };
+ circle::ActivationFunctionType fused_activation_function() const
+ {
+ return static_cast<circle::ActivationFunctionType>(
+ GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
+ }
+ DivOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(DivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<DivOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct DivOptionsBuilder
+{
+ typedef DivOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_fused_activation_function(circle::ActivationFunctionType fused_activation_function)
+ {
+ fbb_.AddElement<int8_t>(DivOptions::VT_FUSED_ACTIVATION_FUNCTION,
+ static_cast<int8_t>(fused_activation_function), 0);
+ }
+ explicit DivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<DivOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<DivOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<DivOptions> CreateDivOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE)
+{
+ DivOptionsBuilder builder_(_fbb);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<DivOptions>
+CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct TopKV2OptionsT : public flatbuffers::NativeTable
+{
+ typedef TopKV2Options TableType;
+};
+
+struct TopKV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef TopKV2OptionsT NativeTableType;
+ typedef TopKV2OptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ TopKV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(TopKV2OptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<TopKV2Options>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct TopKV2OptionsBuilder
+{
+ typedef TopKV2Options Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit TopKV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<TopKV2Options> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<TopKV2Options>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<TopKV2Options> CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ TopKV2OptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<TopKV2Options>
+CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct EmbeddingLookupSparseOptionsT : public flatbuffers::NativeTable
+{
+ typedef EmbeddingLookupSparseOptions TableType;
+ circle::CombinerType combiner = circle::CombinerType_SUM;
+};
+
+struct EmbeddingLookupSparseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef EmbeddingLookupSparseOptionsT NativeTableType;
+ typedef EmbeddingLookupSparseOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_COMBINER = 4
+ };
+ circle::CombinerType combiner() const
+ {
+ return static_cast<circle::CombinerType>(GetField<int8_t>(VT_COMBINER, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_COMBINER) &&
+ verifier.EndTable();
+ }
+ EmbeddingLookupSparseOptionsT *
+ UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(EmbeddingLookupSparseOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<EmbeddingLookupSparseOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct EmbeddingLookupSparseOptionsBuilder
+{
+ typedef EmbeddingLookupSparseOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_combiner(circle::CombinerType combiner)
+ {
+ fbb_.AddElement<int8_t>(EmbeddingLookupSparseOptions::VT_COMBINER,
+ static_cast<int8_t>(combiner), 0);
+ }
+ explicit EmbeddingLookupSparseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<EmbeddingLookupSparseOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<EmbeddingLookupSparseOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<EmbeddingLookupSparseOptions>
+CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ circle::CombinerType combiner = circle::CombinerType_SUM)
+{
+ EmbeddingLookupSparseOptionsBuilder builder_(_fbb);
+ builder_.add_combiner(combiner);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<EmbeddingLookupSparseOptions>
+CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const EmbeddingLookupSparseOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct GatherOptionsT : public flatbuffers::NativeTable
+{
+ typedef GatherOptions TableType;
+ int32_t axis = 0;
+ int32_t batch_dims = 0;
+};
+
+struct GatherOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef GatherOptionsT NativeTableType;
+ typedef GatherOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_AXIS = 4,
+ VT_BATCH_DIMS = 6
+ };
+ int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
+ int32_t batch_dims() const { return GetField<int32_t>(VT_BATCH_DIMS, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_AXIS) &&
+ VerifyField<int32_t>(verifier, VT_BATCH_DIMS) && verifier.EndTable();
+ }
+ GatherOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(GatherOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<GatherOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct GatherOptionsBuilder
+{
+ typedef GatherOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(GatherOptions::VT_AXIS, axis, 0); }
+ void add_batch_dims(int32_t batch_dims)
+ {
+ fbb_.AddElement<int32_t>(GatherOptions::VT_BATCH_DIMS, batch_dims, 0);
+ }
+ explicit GatherOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<GatherOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<GatherOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<GatherOptions>
+CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t axis = 0, int32_t batch_dims = 0)
+{
+ GatherOptionsBuilder builder_(_fbb);
+ builder_.add_batch_dims(batch_dims);
+ builder_.add_axis(axis);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<GatherOptions>
+CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct TransposeOptionsT : public flatbuffers::NativeTable
+{
+ typedef TransposeOptions TableType;
+};
+
+struct TransposeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef TransposeOptionsT NativeTableType;
+ typedef TransposeOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ TransposeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(TransposeOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<TransposeOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct TransposeOptionsBuilder
+{
+ typedef TransposeOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit TransposeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<TransposeOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<TransposeOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<TransposeOptions>
+CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ TransposeOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<TransposeOptions>
+CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ExpOptionsT : public flatbuffers::NativeTable
+{
+ typedef ExpOptions TableType;
+};
+
+struct ExpOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef ExpOptionsT NativeTableType;
+ typedef ExpOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ ExpOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ExpOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<ExpOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ExpOptionsBuilder
+{
+ typedef ExpOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit ExpOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<ExpOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<ExpOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<ExpOptions> CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ ExpOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<ExpOptions>
+CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct CosOptionsT : public flatbuffers::NativeTable
+{
+ typedef CosOptions TableType;
+};
+
+struct CosOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef CosOptionsT NativeTableType;
+ typedef CosOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ CosOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(CosOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<CosOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct CosOptionsBuilder
+{
+ typedef CosOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit CosOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<CosOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<CosOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<CosOptions> CreateCosOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ CosOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<CosOptions>
+CreateCosOptions(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ReducerOptionsT : public flatbuffers::NativeTable
+{
+ typedef ReducerOptions TableType;
+ bool keep_dims = false;
+};
+
+struct ReducerOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef ReducerOptionsT NativeTableType;
+ typedef ReducerOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_KEEP_DIMS = 4
+ };
+ bool keep_dims() const { return GetField<uint8_t>(VT_KEEP_DIMS, 0) != 0; }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_KEEP_DIMS) &&
+ verifier.EndTable();
+ }
+ ReducerOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ReducerOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<ReducerOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ReducerOptionsBuilder
+{
+ typedef ReducerOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_keep_dims(bool keep_dims)
+ {
+ fbb_.AddElement<uint8_t>(ReducerOptions::VT_KEEP_DIMS, static_cast<uint8_t>(keep_dims), 0);
+ }
+ explicit ReducerOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<ReducerOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<ReducerOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<ReducerOptions>
+CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, bool keep_dims = false)
+{
+ ReducerOptionsBuilder builder_(_fbb);
+ builder_.add_keep_dims(keep_dims);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<ReducerOptions>
+CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SqueezeOptionsT : public flatbuffers::NativeTable
+{
+ typedef SqueezeOptions TableType;
+ std::vector<int32_t> squeeze_dims{};
+};
+
+struct SqueezeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef SqueezeOptionsT NativeTableType;
+ typedef SqueezeOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_SQUEEZE_DIMS = 4
+ };
+ const flatbuffers::Vector<int32_t> *squeeze_dims() const
+ {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SQUEEZE_DIMS);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_SQUEEZE_DIMS) &&
+ verifier.VerifyVector(squeeze_dims()) && verifier.EndTable();
+ }
+ SqueezeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SqueezeOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SqueezeOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SqueezeOptionsBuilder
+{
+ typedef SqueezeOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_squeeze_dims(flatbuffers::Offset<flatbuffers::Vector<int32_t>> squeeze_dims)
+ {
+ fbb_.AddOffset(SqueezeOptions::VT_SQUEEZE_DIMS, squeeze_dims);
+ }
+ explicit SqueezeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SqueezeOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SqueezeOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SqueezeOptions>
+CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> squeeze_dims = 0)
+{
+ SqueezeOptionsBuilder builder_(_fbb);
+ builder_.add_squeeze_dims(squeeze_dims);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<SqueezeOptions>
+CreateSqueezeOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb,
+ const std::vector<int32_t> *squeeze_dims = nullptr)
+{
+ auto squeeze_dims__ = squeeze_dims ? _fbb.CreateVector<int32_t>(*squeeze_dims) : 0;
+ return circle::CreateSqueezeOptions(_fbb, squeeze_dims__);
+}
+
+flatbuffers::Offset<SqueezeOptions>
+CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SplitOptionsT : public flatbuffers::NativeTable
+{
+ typedef SplitOptions TableType;
+ int32_t num_splits = 0;
+};
+
+struct SplitOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef SplitOptionsT NativeTableType;
+ typedef SplitOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_NUM_SPLITS = 4
+ };
+ int32_t num_splits() const { return GetField<int32_t>(VT_NUM_SPLITS, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM_SPLITS) &&
+ verifier.EndTable();
+ }
+ SplitOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SplitOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SplitOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SplitOptionsBuilder
+{
+ typedef SplitOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_num_splits(int32_t num_splits)
+ {
+ fbb_.AddElement<int32_t>(SplitOptions::VT_NUM_SPLITS, num_splits, 0);
+ }
+ explicit SplitOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SplitOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SplitOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SplitOptions> CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ int32_t num_splits = 0)
+{
+ SplitOptionsBuilder builder_(_fbb);
+ builder_.add_num_splits(num_splits);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SplitOptions>
+CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SplitVOptionsT : public flatbuffers::NativeTable
+{
+ typedef SplitVOptions TableType;
+ int32_t num_splits = 0;
+};
+
+struct SplitVOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef SplitVOptionsT NativeTableType;
+ typedef SplitVOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_NUM_SPLITS = 4
+ };
+ int32_t num_splits() const { return GetField<int32_t>(VT_NUM_SPLITS, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM_SPLITS) &&
+ verifier.EndTable();
+ }
+ SplitVOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SplitVOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SplitVOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SplitVOptionsBuilder
+{
+ typedef SplitVOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_num_splits(int32_t num_splits)
+ {
+ fbb_.AddElement<int32_t>(SplitVOptions::VT_NUM_SPLITS, num_splits, 0);
+ }
+ explicit SplitVOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SplitVOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SplitVOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SplitVOptions> CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ int32_t num_splits = 0)
+{
+ SplitVOptionsBuilder builder_(_fbb);
+ builder_.add_num_splits(num_splits);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SplitVOptions>
+CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct StridedSliceOptionsT : public flatbuffers::NativeTable
+{
+ typedef StridedSliceOptions TableType;
+ int32_t begin_mask = 0;
+ int32_t end_mask = 0;
+ int32_t ellipsis_mask = 0;
+ int32_t new_axis_mask = 0;
+ int32_t shrink_axis_mask = 0;
+};
+
+struct StridedSliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef StridedSliceOptionsT NativeTableType;
+ typedef StridedSliceOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_BEGIN_MASK = 4,
+ VT_END_MASK = 6,
+ VT_ELLIPSIS_MASK = 8,
+ VT_NEW_AXIS_MASK = 10,
+ VT_SHRINK_AXIS_MASK = 12
+ };
+ int32_t begin_mask() const { return GetField<int32_t>(VT_BEGIN_MASK, 0); }
+ int32_t end_mask() const { return GetField<int32_t>(VT_END_MASK, 0); }
+ int32_t ellipsis_mask() const { return GetField<int32_t>(VT_ELLIPSIS_MASK, 0); }
+ int32_t new_axis_mask() const { return GetField<int32_t>(VT_NEW_AXIS_MASK, 0); }
+ int32_t shrink_axis_mask() const { return GetField<int32_t>(VT_SHRINK_AXIS_MASK, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_BEGIN_MASK) &&
+ VerifyField<int32_t>(verifier, VT_END_MASK) &&
+ VerifyField<int32_t>(verifier, VT_ELLIPSIS_MASK) &&
+ VerifyField<int32_t>(verifier, VT_NEW_AXIS_MASK) &&
+ VerifyField<int32_t>(verifier, VT_SHRINK_AXIS_MASK) && verifier.EndTable();
+ }
+ StridedSliceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(StridedSliceOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<StridedSliceOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct StridedSliceOptionsBuilder
+{
+ typedef StridedSliceOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_begin_mask(int32_t begin_mask)
+ {
+ fbb_.AddElement<int32_t>(StridedSliceOptions::VT_BEGIN_MASK, begin_mask, 0);
+ }
+ void add_end_mask(int32_t end_mask)
+ {
+ fbb_.AddElement<int32_t>(StridedSliceOptions::VT_END_MASK, end_mask, 0);
+ }
+ void add_ellipsis_mask(int32_t ellipsis_mask)
+ {
+ fbb_.AddElement<int32_t>(StridedSliceOptions::VT_ELLIPSIS_MASK, ellipsis_mask, 0);
+ }
+ void add_new_axis_mask(int32_t new_axis_mask)
+ {
+ fbb_.AddElement<int32_t>(StridedSliceOptions::VT_NEW_AXIS_MASK, new_axis_mask, 0);
+ }
+ void add_shrink_axis_mask(int32_t shrink_axis_mask)
+ {
+ fbb_.AddElement<int32_t>(StridedSliceOptions::VT_SHRINK_AXIS_MASK, shrink_axis_mask, 0);
+ }
+ explicit StridedSliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<StridedSliceOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<StridedSliceOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<StridedSliceOptions>
+CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t begin_mask = 0,
+ int32_t end_mask = 0, int32_t ellipsis_mask = 0,
+ int32_t new_axis_mask = 0, int32_t shrink_axis_mask = 0)
+{
+ StridedSliceOptionsBuilder builder_(_fbb);
+ builder_.add_shrink_axis_mask(shrink_axis_mask);
+ builder_.add_new_axis_mask(new_axis_mask);
+ builder_.add_ellipsis_mask(ellipsis_mask);
+ builder_.add_end_mask(end_mask);
+ builder_.add_begin_mask(begin_mask);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<StridedSliceOptions>
+CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct LogSoftmaxOptionsT : public flatbuffers::NativeTable
+{
+ typedef LogSoftmaxOptions TableType;
+};
+
+struct LogSoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef LogSoftmaxOptionsT NativeTableType;
+ typedef LogSoftmaxOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ LogSoftmaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(LogSoftmaxOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<LogSoftmaxOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct LogSoftmaxOptionsBuilder
+{
+ typedef LogSoftmaxOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit LogSoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<LogSoftmaxOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<LogSoftmaxOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<LogSoftmaxOptions>
+CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ LogSoftmaxOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<LogSoftmaxOptions>
+CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct CastOptionsT : public flatbuffers::NativeTable
+{
+ typedef CastOptions TableType;
+ circle::TensorType in_data_type = circle::TensorType_FLOAT32;
+ circle::TensorType out_data_type = circle::TensorType_FLOAT32;
+};
+
+struct CastOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef CastOptionsT NativeTableType;
+ typedef CastOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_IN_DATA_TYPE = 4,
+ VT_OUT_DATA_TYPE = 6
+ };
+ circle::TensorType in_data_type() const
+ {
+ return static_cast<circle::TensorType>(GetField<int8_t>(VT_IN_DATA_TYPE, 0));
+ }
+ circle::TensorType out_data_type() const
+ {
+ return static_cast<circle::TensorType>(GetField<int8_t>(VT_OUT_DATA_TYPE, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_IN_DATA_TYPE) &&
+ VerifyField<int8_t>(verifier, VT_OUT_DATA_TYPE) && verifier.EndTable();
+ }
+ CastOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(CastOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<CastOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct CastOptionsBuilder
+{
+ typedef CastOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_in_data_type(circle::TensorType in_data_type)
+ {
+ fbb_.AddElement<int8_t>(CastOptions::VT_IN_DATA_TYPE, static_cast<int8_t>(in_data_type), 0);
+ }
+ void add_out_data_type(circle::TensorType out_data_type)
+ {
+ fbb_.AddElement<int8_t>(CastOptions::VT_OUT_DATA_TYPE, static_cast<int8_t>(out_data_type), 0);
+ }
+ explicit CastOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<CastOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<CastOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<CastOptions>
+CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ circle::TensorType in_data_type = circle::TensorType_FLOAT32,
+ circle::TensorType out_data_type = circle::TensorType_FLOAT32)
+{
+ CastOptionsBuilder builder_(_fbb);
+ builder_.add_out_data_type(out_data_type);
+ builder_.add_in_data_type(in_data_type);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<CastOptions>
+CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct DequantizeOptionsT : public flatbuffers::NativeTable
+{
+ typedef DequantizeOptions TableType;
+};
+
+struct DequantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef DequantizeOptionsT NativeTableType;
+ typedef DequantizeOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ DequantizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(DequantizeOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<DequantizeOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct DequantizeOptionsBuilder
+{
+ typedef DequantizeOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit DequantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<DequantizeOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<DequantizeOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<DequantizeOptions>
+CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ DequantizeOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<DequantizeOptions>
+CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct MaximumMinimumOptionsT : public flatbuffers::NativeTable
+{
+ typedef MaximumMinimumOptions TableType;
+};
+
+struct MaximumMinimumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef MaximumMinimumOptionsT NativeTableType;
+ typedef MaximumMinimumOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ MaximumMinimumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(MaximumMinimumOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<MaximumMinimumOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct MaximumMinimumOptionsBuilder
+{
+ typedef MaximumMinimumOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit MaximumMinimumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<MaximumMinimumOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<MaximumMinimumOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<MaximumMinimumOptions>
+CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ MaximumMinimumOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<MaximumMinimumOptions>
+CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct TileOptionsT : public flatbuffers::NativeTable
+{
+ typedef TileOptions TableType;
+};
+
+struct TileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef TileOptionsT NativeTableType;
+ typedef TileOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ TileOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(TileOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<TileOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct TileOptionsBuilder
+{
+ typedef TileOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit TileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<TileOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<TileOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<TileOptions> CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ TileOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<TileOptions>
+CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ArgMaxOptionsT : public flatbuffers::NativeTable
+{
+ typedef ArgMaxOptions TableType;
+ circle::TensorType output_type = circle::TensorType_FLOAT32;
+};
+
+struct ArgMaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef ArgMaxOptionsT NativeTableType;
+ typedef ArgMaxOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_OUTPUT_TYPE = 4
+ };
+ circle::TensorType output_type() const
+ {
+ return static_cast<circle::TensorType>(GetField<int8_t>(VT_OUTPUT_TYPE, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_OUTPUT_TYPE) &&
+ verifier.EndTable();
+ }
+ ArgMaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ArgMaxOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<ArgMaxOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ArgMaxOptionsBuilder
+{
+ typedef ArgMaxOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_output_type(circle::TensorType output_type)
+ {
+ fbb_.AddElement<int8_t>(ArgMaxOptions::VT_OUTPUT_TYPE, static_cast<int8_t>(output_type), 0);
+ }
+ explicit ArgMaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<ArgMaxOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<ArgMaxOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<ArgMaxOptions>
+CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ circle::TensorType output_type = circle::TensorType_FLOAT32)
+{
+ ArgMaxOptionsBuilder builder_(_fbb);
+ builder_.add_output_type(output_type);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<ArgMaxOptions>
+CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ArgMinOptionsT : public flatbuffers::NativeTable
+{
+ typedef ArgMinOptions TableType;
+ circle::TensorType output_type = circle::TensorType_FLOAT32;
+};
+
+struct ArgMinOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef ArgMinOptionsT NativeTableType;
+ typedef ArgMinOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_OUTPUT_TYPE = 4
+ };
+ circle::TensorType output_type() const
+ {
+ return static_cast<circle::TensorType>(GetField<int8_t>(VT_OUTPUT_TYPE, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_OUTPUT_TYPE) &&
+ verifier.EndTable();
+ }
+ ArgMinOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ArgMinOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<ArgMinOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ArgMinOptionsBuilder
+{
+ typedef ArgMinOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_output_type(circle::TensorType output_type)
+ {
+ fbb_.AddElement<int8_t>(ArgMinOptions::VT_OUTPUT_TYPE, static_cast<int8_t>(output_type), 0);
+ }
+ explicit ArgMinOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<ArgMinOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<ArgMinOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<ArgMinOptions>
+CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ circle::TensorType output_type = circle::TensorType_FLOAT32)
+{
+ ArgMinOptionsBuilder builder_(_fbb);
+ builder_.add_output_type(output_type);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<ArgMinOptions>
+CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct GreaterOptionsT : public flatbuffers::NativeTable
+{
+ typedef GreaterOptions TableType;
+};
+
+struct GreaterOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef GreaterOptionsT NativeTableType;
+ typedef GreaterOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ GreaterOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(GreaterOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<GreaterOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct GreaterOptionsBuilder
+{
+ typedef GreaterOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit GreaterOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<GreaterOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<GreaterOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<GreaterOptions>
+CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ GreaterOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<GreaterOptions>
+CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct GreaterEqualOptionsT : public flatbuffers::NativeTable
+{
+ typedef GreaterEqualOptions TableType;
+};
+
+struct GreaterEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef GreaterEqualOptionsT NativeTableType;
+ typedef GreaterEqualOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ GreaterEqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(GreaterEqualOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<GreaterEqualOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct GreaterEqualOptionsBuilder
+{
+ typedef GreaterEqualOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit GreaterEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<GreaterEqualOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<GreaterEqualOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<GreaterEqualOptions>
+CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ GreaterEqualOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<GreaterEqualOptions>
+CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct LessOptionsT : public flatbuffers::NativeTable
+{
+ typedef LessOptions TableType;
+};
+
+struct LessOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef LessOptionsT NativeTableType;
+ typedef LessOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ LessOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(LessOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<LessOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct LessOptionsBuilder
+{
+ typedef LessOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit LessOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<LessOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<LessOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<LessOptions> CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ LessOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<LessOptions>
+CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct LessEqualOptionsT : public flatbuffers::NativeTable
+{
+ typedef LessEqualOptions TableType;
+};
+
+struct LessEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef LessEqualOptionsT NativeTableType;
+ typedef LessEqualOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ LessEqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(LessEqualOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<LessEqualOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct LessEqualOptionsBuilder
+{
+ typedef LessEqualOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit LessEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<LessEqualOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<LessEqualOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<LessEqualOptions>
+CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ LessEqualOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<LessEqualOptions>
+CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct NegOptionsT : public flatbuffers::NativeTable
+{
+ typedef NegOptions TableType;
+};
+
+struct NegOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef NegOptionsT NativeTableType;
+ typedef NegOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ NegOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(NegOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<NegOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct NegOptionsBuilder
+{
+ typedef NegOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit NegOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<NegOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<NegOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<NegOptions> CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ NegOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<NegOptions>
+CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SelectOptionsT : public flatbuffers::NativeTable
+{
+ typedef SelectOptions TableType;
+};
+
+struct SelectOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef SelectOptionsT NativeTableType;
+ typedef SelectOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ SelectOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SelectOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SelectOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SelectOptionsBuilder
+{
+ typedef SelectOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit SelectOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SelectOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SelectOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SelectOptions> CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ SelectOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SelectOptions>
+CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SliceOptionsT : public flatbuffers::NativeTable
+{
+ typedef SliceOptions TableType;
+};
+
+struct SliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef SliceOptionsT NativeTableType;
+ typedef SliceOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ SliceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SliceOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SliceOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SliceOptionsBuilder
+{
+ typedef SliceOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit SliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SliceOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SliceOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SliceOptions> CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ SliceOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SliceOptions>
+CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct TransposeConvOptionsT : public flatbuffers::NativeTable
+{
+ typedef TransposeConvOptions TableType;
+ circle::Padding padding = circle::Padding_SAME;
+ int32_t stride_w = 0;
+ int32_t stride_h = 0;
+};
+
+struct TransposeConvOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef TransposeConvOptionsT NativeTableType;
+ typedef TransposeConvOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_PADDING = 4,
+ VT_STRIDE_W = 6,
+ VT_STRIDE_H = 8
+ };
+ circle::Padding padding() const
+ {
+ return static_cast<circle::Padding>(GetField<int8_t>(VT_PADDING, 0));
+ }
+ int32_t stride_w() const { return GetField<int32_t>(VT_STRIDE_W, 0); }
+ int32_t stride_h() const { return GetField<int32_t>(VT_STRIDE_H, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_PADDING) &&
+ VerifyField<int32_t>(verifier, VT_STRIDE_W) &&
+ VerifyField<int32_t>(verifier, VT_STRIDE_H) && verifier.EndTable();
+ }
+ TransposeConvOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(TransposeConvOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<TransposeConvOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct TransposeConvOptionsBuilder
+{
+ typedef TransposeConvOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_padding(circle::Padding padding)
+ {
+ fbb_.AddElement<int8_t>(TransposeConvOptions::VT_PADDING, static_cast<int8_t>(padding), 0);
+ }
+ void add_stride_w(int32_t stride_w)
+ {
+ fbb_.AddElement<int32_t>(TransposeConvOptions::VT_STRIDE_W, stride_w, 0);
+ }
+ void add_stride_h(int32_t stride_h)
+ {
+ fbb_.AddElement<int32_t>(TransposeConvOptions::VT_STRIDE_H, stride_h, 0);
+ }
+ explicit TransposeConvOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<TransposeConvOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<TransposeConvOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<TransposeConvOptions>
+CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ circle::Padding padding = circle::Padding_SAME, int32_t stride_w = 0,
+ int32_t stride_h = 0)
+{
+ TransposeConvOptionsBuilder builder_(_fbb);
+ builder_.add_stride_h(stride_h);
+ builder_.add_stride_w(stride_w);
+ builder_.add_padding(padding);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<TransposeConvOptions>
+CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ExpandDimsOptionsT : public flatbuffers::NativeTable
+{
+ typedef ExpandDimsOptions TableType;
+};
+
+struct ExpandDimsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef ExpandDimsOptionsT NativeTableType;
+ typedef ExpandDimsOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ ExpandDimsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ExpandDimsOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<ExpandDimsOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ExpandDimsOptionsBuilder
+{
+ typedef ExpandDimsOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit ExpandDimsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<ExpandDimsOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<ExpandDimsOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<ExpandDimsOptions>
+CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ ExpandDimsOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<ExpandDimsOptions>
+CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SparseToDenseOptionsT : public flatbuffers::NativeTable
+{
+ typedef SparseToDenseOptions TableType;
+ bool validate_indices = false;
+};
+
+struct SparseToDenseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef SparseToDenseOptionsT NativeTableType;
+ typedef SparseToDenseOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_VALIDATE_INDICES = 4
+ };
+ bool validate_indices() const { return GetField<uint8_t>(VT_VALIDATE_INDICES, 0) != 0; }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_VALIDATE_INDICES) &&
+ verifier.EndTable();
+ }
+ SparseToDenseOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SparseToDenseOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SparseToDenseOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SparseToDenseOptionsBuilder
+{
+ typedef SparseToDenseOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_validate_indices(bool validate_indices)
+ {
+ fbb_.AddElement<uint8_t>(SparseToDenseOptions::VT_VALIDATE_INDICES,
+ static_cast<uint8_t>(validate_indices), 0);
+ }
+ explicit SparseToDenseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SparseToDenseOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SparseToDenseOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SparseToDenseOptions>
+CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, bool validate_indices = false)
+{
+ SparseToDenseOptionsBuilder builder_(_fbb);
+ builder_.add_validate_indices(validate_indices);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SparseToDenseOptions>
+CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct EqualOptionsT : public flatbuffers::NativeTable
+{
+ typedef EqualOptions TableType;
+};
+
+struct EqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef EqualOptionsT NativeTableType;
+ typedef EqualOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ EqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(EqualOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<EqualOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct EqualOptionsBuilder
+{
+ typedef EqualOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit EqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<EqualOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<EqualOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<EqualOptions> CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ EqualOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<EqualOptions>
+CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct NotEqualOptionsT : public flatbuffers::NativeTable
+{
+ typedef NotEqualOptions TableType;
+};
+
+struct NotEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef NotEqualOptionsT NativeTableType;
+ typedef NotEqualOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ NotEqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(NotEqualOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<NotEqualOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct NotEqualOptionsBuilder
+{
+ typedef NotEqualOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit NotEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<NotEqualOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<NotEqualOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<NotEqualOptions>
+CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ NotEqualOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<NotEqualOptions>
+CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ShapeOptionsT : public flatbuffers::NativeTable
+{
+ typedef ShapeOptions TableType;
+ circle::TensorType out_type = circle::TensorType_FLOAT32;
+};
+
+struct ShapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef ShapeOptionsT NativeTableType;
+ typedef ShapeOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_OUT_TYPE = 4
+ };
+ circle::TensorType out_type() const
+ {
+ return static_cast<circle::TensorType>(GetField<int8_t>(VT_OUT_TYPE, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_OUT_TYPE) &&
+ verifier.EndTable();
+ }
+ ShapeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ShapeOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<ShapeOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ShapeOptionsBuilder
+{
+ typedef ShapeOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_out_type(circle::TensorType out_type)
+ {
+ fbb_.AddElement<int8_t>(ShapeOptions::VT_OUT_TYPE, static_cast<int8_t>(out_type), 0);
+ }
+ explicit ShapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<ShapeOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<ShapeOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<ShapeOptions>
+CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ circle::TensorType out_type = circle::TensorType_FLOAT32)
+{
+ ShapeOptionsBuilder builder_(_fbb);
+ builder_.add_out_type(out_type);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<ShapeOptions>
+CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct RankOptionsT : public flatbuffers::NativeTable
+{
+ typedef RankOptions TableType;
+};
+
+struct RankOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef RankOptionsT NativeTableType;
+ typedef RankOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ RankOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(RankOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<RankOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct RankOptionsBuilder
+{
+ typedef RankOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit RankOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<RankOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<RankOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<RankOptions> CreateRankOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ RankOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<RankOptions>
+CreateRankOptions(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct PowOptionsT : public flatbuffers::NativeTable
+{
+ typedef PowOptions TableType;
+};
+
+struct PowOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef PowOptionsT NativeTableType;
+ typedef PowOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ PowOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(PowOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<PowOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct PowOptionsBuilder
+{
+ typedef PowOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit PowOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<PowOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<PowOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<PowOptions> CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ PowOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<PowOptions>
+CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct FakeQuantOptionsT : public flatbuffers::NativeTable
+{
+ typedef FakeQuantOptions TableType;
+ float min = 0.0f;
+ float max = 0.0f;
+ int32_t num_bits = 0;
+ bool narrow_range = false;
+};
+
+struct FakeQuantOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef FakeQuantOptionsT NativeTableType;
+ typedef FakeQuantOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_MIN = 4,
+ VT_MAX = 6,
+ VT_NUM_BITS = 8,
+ VT_NARROW_RANGE = 10
+ };
+ float min() const { return GetField<float>(VT_MIN, 0.0f); }
+ float max() const { return GetField<float>(VT_MAX, 0.0f); }
+ int32_t num_bits() const { return GetField<int32_t>(VT_NUM_BITS, 0); }
+ bool narrow_range() const { return GetField<uint8_t>(VT_NARROW_RANGE, 0) != 0; }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_MIN) &&
+ VerifyField<float>(verifier, VT_MAX) && VerifyField<int32_t>(verifier, VT_NUM_BITS) &&
+ VerifyField<uint8_t>(verifier, VT_NARROW_RANGE) && verifier.EndTable();
+ }
+ FakeQuantOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(FakeQuantOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<FakeQuantOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct FakeQuantOptionsBuilder
+{
+ typedef FakeQuantOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_min(float min) { fbb_.AddElement<float>(FakeQuantOptions::VT_MIN, min, 0.0f); }
+ void add_max(float max) { fbb_.AddElement<float>(FakeQuantOptions::VT_MAX, max, 0.0f); }
+ void add_num_bits(int32_t num_bits)
+ {
+ fbb_.AddElement<int32_t>(FakeQuantOptions::VT_NUM_BITS, num_bits, 0);
+ }
+ void add_narrow_range(bool narrow_range)
+ {
+ fbb_.AddElement<uint8_t>(FakeQuantOptions::VT_NARROW_RANGE, static_cast<uint8_t>(narrow_range),
+ 0);
+ }
+ explicit FakeQuantOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<FakeQuantOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<FakeQuantOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<FakeQuantOptions>
+CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, float min = 0.0f, float max = 0.0f,
+ int32_t num_bits = 0, bool narrow_range = false)
+{
+ FakeQuantOptionsBuilder builder_(_fbb);
+ builder_.add_num_bits(num_bits);
+ builder_.add_max(max);
+ builder_.add_min(min);
+ builder_.add_narrow_range(narrow_range);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<FakeQuantOptions>
+CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct PackOptionsT : public flatbuffers::NativeTable
+{
+ typedef PackOptions TableType;
+ int32_t values_count = 0;
+ int32_t axis = 0;
+};
+
+struct PackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef PackOptionsT NativeTableType;
+ typedef PackOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_VALUES_COUNT = 4,
+ VT_AXIS = 6
+ };
+ int32_t values_count() const { return GetField<int32_t>(VT_VALUES_COUNT, 0); }
+ int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_VALUES_COUNT) &&
+ VerifyField<int32_t>(verifier, VT_AXIS) && verifier.EndTable();
+ }
+ PackOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(PackOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<PackOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct PackOptionsBuilder
+{
+ typedef PackOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_values_count(int32_t values_count)
+ {
+ fbb_.AddElement<int32_t>(PackOptions::VT_VALUES_COUNT, values_count, 0);
+ }
+ void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(PackOptions::VT_AXIS, axis, 0); }
+ explicit PackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<PackOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<PackOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<PackOptions>
+CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t values_count = 0, int32_t axis = 0)
+{
+ PackOptionsBuilder builder_(_fbb);
+ builder_.add_axis(axis);
+ builder_.add_values_count(values_count);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<PackOptions>
+CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct LogicalOrOptionsT : public flatbuffers::NativeTable
+{
+ typedef LogicalOrOptions TableType;
+};
+
+struct LogicalOrOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef LogicalOrOptionsT NativeTableType;
+ typedef LogicalOrOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ LogicalOrOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(LogicalOrOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<LogicalOrOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct LogicalOrOptionsBuilder
+{
+ typedef LogicalOrOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit LogicalOrOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<LogicalOrOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<LogicalOrOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<LogicalOrOptions>
+CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ LogicalOrOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<LogicalOrOptions>
+CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct OneHotOptionsT : public flatbuffers::NativeTable
+{
+ typedef OneHotOptions TableType;
+ int32_t axis = 0;
+};
+
+struct OneHotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef OneHotOptionsT NativeTableType;
+ typedef OneHotOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_AXIS = 4
+ };
+ int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_AXIS) &&
+ verifier.EndTable();
+ }
+ OneHotOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(OneHotOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<OneHotOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct OneHotOptionsBuilder
+{
+ typedef OneHotOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(OneHotOptions::VT_AXIS, axis, 0); }
+ explicit OneHotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<OneHotOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<OneHotOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<OneHotOptions> CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ int32_t axis = 0)
+{
+ OneHotOptionsBuilder builder_(_fbb);
+ builder_.add_axis(axis);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<OneHotOptions>
+CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct AbsOptionsT : public flatbuffers::NativeTable
+{
+ typedef AbsOptions TableType;
+};
+
+struct AbsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef AbsOptionsT NativeTableType;
+ typedef AbsOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ AbsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(AbsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<AbsOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct AbsOptionsBuilder
+{
+ typedef AbsOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit AbsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<AbsOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<AbsOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<AbsOptions> CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ AbsOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<AbsOptions>
+CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct HardSwishOptionsT : public flatbuffers::NativeTable
+{
+ typedef HardSwishOptions TableType;
+};
+
+struct HardSwishOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef HardSwishOptionsT NativeTableType;
+ typedef HardSwishOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ HardSwishOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(HardSwishOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<HardSwishOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct HardSwishOptionsBuilder
+{
+ typedef HardSwishOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit HardSwishOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<HardSwishOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<HardSwishOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<HardSwishOptions>
+CreateHardSwishOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ HardSwishOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<HardSwishOptions>
+CreateHardSwishOptions(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct LogicalAndOptionsT : public flatbuffers::NativeTable
+{
+ typedef LogicalAndOptions TableType;
+};
+
+struct LogicalAndOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef LogicalAndOptionsT NativeTableType;
+ typedef LogicalAndOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ LogicalAndOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(LogicalAndOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<LogicalAndOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct LogicalAndOptionsBuilder
+{
+ typedef LogicalAndOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit LogicalAndOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<LogicalAndOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<LogicalAndOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<LogicalAndOptions>
+CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ LogicalAndOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<LogicalAndOptions>
+CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct LogicalNotOptionsT : public flatbuffers::NativeTable
+{
+ typedef LogicalNotOptions TableType;
+};
+
+struct LogicalNotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef LogicalNotOptionsT NativeTableType;
+ typedef LogicalNotOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ LogicalNotOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(LogicalNotOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<LogicalNotOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct LogicalNotOptionsBuilder
+{
+ typedef LogicalNotOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit LogicalNotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<LogicalNotOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<LogicalNotOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<LogicalNotOptions>
+CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ LogicalNotOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<LogicalNotOptions>
+CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct UnpackOptionsT : public flatbuffers::NativeTable
+{
+ typedef UnpackOptions TableType;
+ int32_t num = 0;
+ int32_t axis = 0;
+};
+
+struct UnpackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef UnpackOptionsT NativeTableType;
+ typedef UnpackOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_NUM = 4,
+ VT_AXIS = 6
+ };
+ int32_t num() const { return GetField<int32_t>(VT_NUM, 0); }
+ int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_NUM) &&
+ VerifyField<int32_t>(verifier, VT_AXIS) && verifier.EndTable();
+ }
+ UnpackOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(UnpackOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<UnpackOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct UnpackOptionsBuilder
+{
+ typedef UnpackOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_num(int32_t num) { fbb_.AddElement<int32_t>(UnpackOptions::VT_NUM, num, 0); }
+ void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(UnpackOptions::VT_AXIS, axis, 0); }
+ explicit UnpackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<UnpackOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<UnpackOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<UnpackOptions> CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ int32_t num = 0, int32_t axis = 0)
+{
+ UnpackOptionsBuilder builder_(_fbb);
+ builder_.add_axis(axis);
+ builder_.add_num(num);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<UnpackOptions>
+CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct FloorDivOptionsT : public flatbuffers::NativeTable
+{
+ typedef FloorDivOptions TableType;
+};
+
+struct FloorDivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef FloorDivOptionsT NativeTableType;
+ typedef FloorDivOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ FloorDivOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(FloorDivOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<FloorDivOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct FloorDivOptionsBuilder
+{
+ typedef FloorDivOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit FloorDivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<FloorDivOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<FloorDivOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<FloorDivOptions>
+CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ FloorDivOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<FloorDivOptions>
+CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SquareOptionsT : public flatbuffers::NativeTable
+{
+ typedef SquareOptions TableType;
+};
+
+struct SquareOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef SquareOptionsT NativeTableType;
+ typedef SquareOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ SquareOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SquareOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SquareOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SquareOptionsBuilder
+{
+ typedef SquareOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit SquareOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SquareOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SquareOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SquareOptions> CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ SquareOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SquareOptions>
+CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ZerosLikeOptionsT : public flatbuffers::NativeTable
+{
+ typedef ZerosLikeOptions TableType;
+};
+
+struct ZerosLikeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef ZerosLikeOptionsT NativeTableType;
+ typedef ZerosLikeOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ ZerosLikeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ZerosLikeOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<ZerosLikeOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ZerosLikeOptionsBuilder
+{
+ typedef ZerosLikeOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit ZerosLikeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<ZerosLikeOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<ZerosLikeOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<ZerosLikeOptions>
+CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ ZerosLikeOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<ZerosLikeOptions>
+CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct FillOptionsT : public flatbuffers::NativeTable
+{
+ typedef FillOptions TableType;
+};
+
+struct FillOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef FillOptionsT NativeTableType;
+ typedef FillOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ FillOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(FillOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<FillOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct FillOptionsBuilder
+{
+ typedef FillOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit FillOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<FillOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<FillOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<FillOptions> CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ FillOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<FillOptions>
+CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct FloorModOptionsT : public flatbuffers::NativeTable
+{
+ typedef FloorModOptions TableType;
+};
+
+struct FloorModOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef FloorModOptionsT NativeTableType;
+ typedef FloorModOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ FloorModOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(FloorModOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<FloorModOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct FloorModOptionsBuilder
+{
+ typedef FloorModOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit FloorModOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<FloorModOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<FloorModOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<FloorModOptions>
+CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ FloorModOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<FloorModOptions>
+CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct RangeOptionsT : public flatbuffers::NativeTable
+{
+ typedef RangeOptions TableType;
+};
+
+struct RangeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef RangeOptionsT NativeTableType;
+ typedef RangeOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ RangeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(RangeOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<RangeOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct RangeOptionsBuilder
+{
+ typedef RangeOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit RangeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<RangeOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<RangeOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<RangeOptions> CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ RangeOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<RangeOptions>
+CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct LeakyReluOptionsT : public flatbuffers::NativeTable
+{
+ typedef LeakyReluOptions TableType;
+ float alpha = 0.0f;
+};
+
+struct LeakyReluOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef LeakyReluOptionsT NativeTableType;
+ typedef LeakyReluOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_ALPHA = 4
+ };
+ float alpha() const { return GetField<float>(VT_ALPHA, 0.0f); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_ALPHA) &&
+ verifier.EndTable();
+ }
+ LeakyReluOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(LeakyReluOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<LeakyReluOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct LeakyReluOptionsBuilder
+{
+ typedef LeakyReluOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_alpha(float alpha) { fbb_.AddElement<float>(LeakyReluOptions::VT_ALPHA, alpha, 0.0f); }
+ explicit LeakyReluOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<LeakyReluOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<LeakyReluOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<LeakyReluOptions>
+CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, float alpha = 0.0f)
+{
+ LeakyReluOptionsBuilder builder_(_fbb);
+ builder_.add_alpha(alpha);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<LeakyReluOptions>
+CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SquaredDifferenceOptionsT : public flatbuffers::NativeTable
+{
+ typedef SquaredDifferenceOptions TableType;
+};
+
+struct SquaredDifferenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef SquaredDifferenceOptionsT NativeTableType;
+ typedef SquaredDifferenceOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ SquaredDifferenceOptionsT *
+ UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SquaredDifferenceOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SquaredDifferenceOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SquaredDifferenceOptionsBuilder
+{
+ typedef SquaredDifferenceOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit SquaredDifferenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SquaredDifferenceOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SquaredDifferenceOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SquaredDifferenceOptions>
+CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ SquaredDifferenceOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SquaredDifferenceOptions>
+CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const SquaredDifferenceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct MirrorPadOptionsT : public flatbuffers::NativeTable
+{
+ typedef MirrorPadOptions TableType;
+ circle::MirrorPadMode mode = circle::MirrorPadMode_REFLECT;
+};
+
+struct MirrorPadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef MirrorPadOptionsT NativeTableType;
+ typedef MirrorPadOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_MODE = 4
+ };
+ circle::MirrorPadMode mode() const
+ {
+ return static_cast<circle::MirrorPadMode>(GetField<int8_t>(VT_MODE, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_MODE) &&
+ verifier.EndTable();
+ }
+ MirrorPadOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(MirrorPadOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<MirrorPadOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct MirrorPadOptionsBuilder
+{
+ typedef MirrorPadOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_mode(circle::MirrorPadMode mode)
+ {
+ fbb_.AddElement<int8_t>(MirrorPadOptions::VT_MODE, static_cast<int8_t>(mode), 0);
+ }
+ explicit MirrorPadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<MirrorPadOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<MirrorPadOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<MirrorPadOptions>
+CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ circle::MirrorPadMode mode = circle::MirrorPadMode_REFLECT)
+{
+ MirrorPadOptionsBuilder builder_(_fbb);
+ builder_.add_mode(mode);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<MirrorPadOptions>
+CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct UniqueOptionsT : public flatbuffers::NativeTable
+{
+ typedef UniqueOptions TableType;
+ circle::TensorType idx_out_type = circle::TensorType_INT32;
+};
+
+struct UniqueOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef UniqueOptionsT NativeTableType;
+ typedef UniqueOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_IDX_OUT_TYPE = 4
+ };
+ circle::TensorType idx_out_type() const
+ {
+ return static_cast<circle::TensorType>(GetField<int8_t>(VT_IDX_OUT_TYPE, 2));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int8_t>(verifier, VT_IDX_OUT_TYPE) &&
+ verifier.EndTable();
+ }
+ UniqueOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(UniqueOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<UniqueOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct UniqueOptionsBuilder
+{
+ typedef UniqueOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_idx_out_type(circle::TensorType idx_out_type)
+ {
+ fbb_.AddElement<int8_t>(UniqueOptions::VT_IDX_OUT_TYPE, static_cast<int8_t>(idx_out_type), 2);
+ }
+ explicit UniqueOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<UniqueOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<UniqueOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<UniqueOptions>
+CreateUniqueOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ circle::TensorType idx_out_type = circle::TensorType_INT32)
+{
+ UniqueOptionsBuilder builder_(_fbb);
+ builder_.add_idx_out_type(idx_out_type);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<UniqueOptions>
+CreateUniqueOptions(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ReverseV2OptionsT : public flatbuffers::NativeTable
+{
+ typedef ReverseV2Options TableType;
+};
+
+struct ReverseV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef ReverseV2OptionsT NativeTableType;
+ typedef ReverseV2OptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ ReverseV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ReverseV2OptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<ReverseV2Options>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ReverseV2OptionsBuilder
+{
+ typedef ReverseV2Options Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit ReverseV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<ReverseV2Options> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<ReverseV2Options>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<ReverseV2Options>
+CreateReverseV2Options(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ ReverseV2OptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<ReverseV2Options>
+CreateReverseV2Options(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct AddNOptionsT : public flatbuffers::NativeTable
+{
+ typedef AddNOptions TableType;
+};
+
+struct AddNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef AddNOptionsT NativeTableType;
+ typedef AddNOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ AddNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(AddNOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<AddNOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct AddNOptionsBuilder
+{
+ typedef AddNOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit AddNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<AddNOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<AddNOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<AddNOptions> CreateAddNOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ AddNOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<AddNOptions>
+CreateAddNOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct GatherNdOptionsT : public flatbuffers::NativeTable
+{
+ typedef GatherNdOptions TableType;
+};
+
+struct GatherNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef GatherNdOptionsT NativeTableType;
+ typedef GatherNdOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ GatherNdOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(GatherNdOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<GatherNdOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct GatherNdOptionsBuilder
+{
+ typedef GatherNdOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit GatherNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<GatherNdOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<GatherNdOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<GatherNdOptions>
+CreateGatherNdOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ GatherNdOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<GatherNdOptions>
+CreateGatherNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct WhereOptionsT : public flatbuffers::NativeTable
+{
+ typedef WhereOptions TableType;
+};
+
+struct WhereOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef WhereOptionsT NativeTableType;
+ typedef WhereOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ WhereOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(WhereOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<WhereOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct WhereOptionsBuilder
+{
+ typedef WhereOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit WhereOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<WhereOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<WhereOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<WhereOptions> CreateWhereOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ WhereOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<WhereOptions>
+CreateWhereOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ReverseSequenceOptionsT : public flatbuffers::NativeTable
+{
+ typedef ReverseSequenceOptions TableType;
+ int32_t seq_dim = 0;
+ int32_t batch_dim = 0;
+};
+
+struct ReverseSequenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef ReverseSequenceOptionsT NativeTableType;
+ typedef ReverseSequenceOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_SEQ_DIM = 4,
+ VT_BATCH_DIM = 6
+ };
+ int32_t seq_dim() const { return GetField<int32_t>(VT_SEQ_DIM, 0); }
+ int32_t batch_dim() const { return GetField<int32_t>(VT_BATCH_DIM, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_SEQ_DIM) &&
+ VerifyField<int32_t>(verifier, VT_BATCH_DIM) && verifier.EndTable();
+ }
+ ReverseSequenceOptionsT *
+ UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ReverseSequenceOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<ReverseSequenceOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ReverseSequenceOptionsBuilder
+{
+ typedef ReverseSequenceOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_seq_dim(int32_t seq_dim)
+ {
+ fbb_.AddElement<int32_t>(ReverseSequenceOptions::VT_SEQ_DIM, seq_dim, 0);
+ }
+ void add_batch_dim(int32_t batch_dim)
+ {
+ fbb_.AddElement<int32_t>(ReverseSequenceOptions::VT_BATCH_DIM, batch_dim, 0);
+ }
+ explicit ReverseSequenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<ReverseSequenceOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<ReverseSequenceOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<ReverseSequenceOptions>
+CreateReverseSequenceOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t seq_dim = 0,
+ int32_t batch_dim = 0)
+{
+ ReverseSequenceOptionsBuilder builder_(_fbb);
+ builder_.add_batch_dim(batch_dim);
+ builder_.add_seq_dim(seq_dim);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<ReverseSequenceOptions>
+CreateReverseSequenceOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const ReverseSequenceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct MatrixDiagOptionsT : public flatbuffers::NativeTable
+{
+ typedef MatrixDiagOptions TableType;
+};
+
+struct MatrixDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef MatrixDiagOptionsT NativeTableType;
+ typedef MatrixDiagOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ MatrixDiagOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(MatrixDiagOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<MatrixDiagOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct MatrixDiagOptionsBuilder
+{
+ typedef MatrixDiagOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit MatrixDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<MatrixDiagOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<MatrixDiagOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<MatrixDiagOptions>
+CreateMatrixDiagOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ MatrixDiagOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<MatrixDiagOptions>
+CreateMatrixDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct QuantizeOptionsT : public flatbuffers::NativeTable
+{
+ typedef QuantizeOptions TableType;
+};
+
+struct QuantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef QuantizeOptionsT NativeTableType;
+ typedef QuantizeOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ QuantizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(QuantizeOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<QuantizeOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct QuantizeOptionsBuilder
+{
+ typedef QuantizeOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit QuantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<QuantizeOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<QuantizeOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<QuantizeOptions>
+CreateQuantizeOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ QuantizeOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<QuantizeOptions>
+CreateQuantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct MatrixSetDiagOptionsT : public flatbuffers::NativeTable
+{
+ typedef MatrixSetDiagOptions TableType;
+};
+
+struct MatrixSetDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef MatrixSetDiagOptionsT NativeTableType;
+ typedef MatrixSetDiagOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ MatrixSetDiagOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(MatrixSetDiagOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<MatrixSetDiagOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct MatrixSetDiagOptionsBuilder
+{
+ typedef MatrixSetDiagOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit MatrixSetDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<MatrixSetDiagOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<MatrixSetDiagOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<MatrixSetDiagOptions>
+CreateMatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ MatrixSetDiagOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<MatrixSetDiagOptions>
+CreateMatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct IfOptionsT : public flatbuffers::NativeTable
+{
+ typedef IfOptions TableType;
+ int32_t then_subgraph_index = 0;
+ int32_t else_subgraph_index = 0;
+};
+
+struct IfOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef IfOptionsT NativeTableType;
+ typedef IfOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_THEN_SUBGRAPH_INDEX = 4,
+ VT_ELSE_SUBGRAPH_INDEX = 6
+ };
+ int32_t then_subgraph_index() const { return GetField<int32_t>(VT_THEN_SUBGRAPH_INDEX, 0); }
+ int32_t else_subgraph_index() const { return GetField<int32_t>(VT_ELSE_SUBGRAPH_INDEX, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_THEN_SUBGRAPH_INDEX) &&
+ VerifyField<int32_t>(verifier, VT_ELSE_SUBGRAPH_INDEX) && verifier.EndTable();
+ }
+ IfOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(IfOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<IfOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct IfOptionsBuilder
+{
+ typedef IfOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_then_subgraph_index(int32_t then_subgraph_index)
+ {
+ fbb_.AddElement<int32_t>(IfOptions::VT_THEN_SUBGRAPH_INDEX, then_subgraph_index, 0);
+ }
+ void add_else_subgraph_index(int32_t else_subgraph_index)
+ {
+ fbb_.AddElement<int32_t>(IfOptions::VT_ELSE_SUBGRAPH_INDEX, else_subgraph_index, 0);
+ }
+ explicit IfOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<IfOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<IfOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<IfOptions> CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ int32_t then_subgraph_index = 0,
+ int32_t else_subgraph_index = 0)
+{
+ IfOptionsBuilder builder_(_fbb);
+ builder_.add_else_subgraph_index(else_subgraph_index);
+ builder_.add_then_subgraph_index(then_subgraph_index);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<IfOptions>
+CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct CallOnceOptionsT : public flatbuffers::NativeTable
+{
+ typedef CallOnceOptions TableType;
+ int32_t init_subgraph_index = 0;
+};
+
+struct CallOnceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef CallOnceOptionsT NativeTableType;
+ typedef CallOnceOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_INIT_SUBGRAPH_INDEX = 4
+ };
+ int32_t init_subgraph_index() const { return GetField<int32_t>(VT_INIT_SUBGRAPH_INDEX, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_INIT_SUBGRAPH_INDEX) &&
+ verifier.EndTable();
+ }
+ CallOnceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(CallOnceOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<CallOnceOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct CallOnceOptionsBuilder
+{
+ typedef CallOnceOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_init_subgraph_index(int32_t init_subgraph_index)
+ {
+ fbb_.AddElement<int32_t>(CallOnceOptions::VT_INIT_SUBGRAPH_INDEX, init_subgraph_index, 0);
+ }
+ explicit CallOnceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<CallOnceOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<CallOnceOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<CallOnceOptions>
+CreateCallOnceOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t init_subgraph_index = 0)
+{
+ CallOnceOptionsBuilder builder_(_fbb);
+ builder_.add_init_subgraph_index(init_subgraph_index);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<CallOnceOptions>
+CreateCallOnceOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct WhileOptionsT : public flatbuffers::NativeTable
+{
+ typedef WhileOptions TableType;
+ int32_t cond_subgraph_index = 0;
+ int32_t body_subgraph_index = 0;
+};
+
+struct WhileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef WhileOptionsT NativeTableType;
+ typedef WhileOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_COND_SUBGRAPH_INDEX = 4,
+ VT_BODY_SUBGRAPH_INDEX = 6
+ };
+ int32_t cond_subgraph_index() const { return GetField<int32_t>(VT_COND_SUBGRAPH_INDEX, 0); }
+ int32_t body_subgraph_index() const { return GetField<int32_t>(VT_BODY_SUBGRAPH_INDEX, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_COND_SUBGRAPH_INDEX) &&
+ VerifyField<int32_t>(verifier, VT_BODY_SUBGRAPH_INDEX) && verifier.EndTable();
+ }
+ WhileOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(WhileOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<WhileOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct WhileOptionsBuilder
+{
+ typedef WhileOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_cond_subgraph_index(int32_t cond_subgraph_index)
+ {
+ fbb_.AddElement<int32_t>(WhileOptions::VT_COND_SUBGRAPH_INDEX, cond_subgraph_index, 0);
+ }
+ void add_body_subgraph_index(int32_t body_subgraph_index)
+ {
+ fbb_.AddElement<int32_t>(WhileOptions::VT_BODY_SUBGRAPH_INDEX, body_subgraph_index, 0);
+ }
+ explicit WhileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<WhileOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<WhileOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<WhileOptions> CreateWhileOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ int32_t cond_subgraph_index = 0,
+ int32_t body_subgraph_index = 0)
+{
+ WhileOptionsBuilder builder_(_fbb);
+ builder_.add_body_subgraph_index(body_subgraph_index);
+ builder_.add_cond_subgraph_index(cond_subgraph_index);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<WhileOptions>
+CreateWhileOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct NonMaxSuppressionV4OptionsT : public flatbuffers::NativeTable
+{
+ typedef NonMaxSuppressionV4Options TableType;
+};
+
+struct NonMaxSuppressionV4Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef NonMaxSuppressionV4OptionsT NativeTableType;
+ typedef NonMaxSuppressionV4OptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ NonMaxSuppressionV4OptionsT *
+ UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(NonMaxSuppressionV4OptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<NonMaxSuppressionV4Options>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct NonMaxSuppressionV4OptionsBuilder
+{
+ typedef NonMaxSuppressionV4Options Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit NonMaxSuppressionV4OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<NonMaxSuppressionV4Options> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<NonMaxSuppressionV4Options>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<NonMaxSuppressionV4Options>
+CreateNonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ NonMaxSuppressionV4OptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<NonMaxSuppressionV4Options>
+CreateNonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &_fbb,
+ const NonMaxSuppressionV4OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct NonMaxSuppressionV5OptionsT : public flatbuffers::NativeTable
+{
+ typedef NonMaxSuppressionV5Options TableType;
+};
+
+struct NonMaxSuppressionV5Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef NonMaxSuppressionV5OptionsT NativeTableType;
+ typedef NonMaxSuppressionV5OptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ NonMaxSuppressionV5OptionsT *
+ UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(NonMaxSuppressionV5OptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<NonMaxSuppressionV5Options>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct NonMaxSuppressionV5OptionsBuilder
+{
+ typedef NonMaxSuppressionV5Options Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit NonMaxSuppressionV5OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<NonMaxSuppressionV5Options> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<NonMaxSuppressionV5Options>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<NonMaxSuppressionV5Options>
+CreateNonMaxSuppressionV5Options(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ NonMaxSuppressionV5OptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<NonMaxSuppressionV5Options>
+CreateNonMaxSuppressionV5Options(flatbuffers::FlatBufferBuilder &_fbb,
+ const NonMaxSuppressionV5OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ScatterNdOptionsT : public flatbuffers::NativeTable
+{
+ typedef ScatterNdOptions TableType;
+};
+
+struct ScatterNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef ScatterNdOptionsT NativeTableType;
+ typedef ScatterNdOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ ScatterNdOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ScatterNdOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<ScatterNdOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ScatterNdOptionsBuilder
+{
+ typedef ScatterNdOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit ScatterNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<ScatterNdOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<ScatterNdOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<ScatterNdOptions>
+CreateScatterNdOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ ScatterNdOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<ScatterNdOptions>
+CreateScatterNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SelectV2OptionsT : public flatbuffers::NativeTable
+{
+ typedef SelectV2Options TableType;
+};
+
+struct SelectV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef SelectV2OptionsT NativeTableType;
+ typedef SelectV2OptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ SelectV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SelectV2OptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SelectV2Options>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SelectV2OptionsBuilder
+{
+ typedef SelectV2Options Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit SelectV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SelectV2Options> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SelectV2Options>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SelectV2Options>
+CreateSelectV2Options(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ SelectV2OptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SelectV2Options>
+CreateSelectV2Options(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct DensifyOptionsT : public flatbuffers::NativeTable
+{
+ typedef DensifyOptions TableType;
+};
+
+struct DensifyOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef DensifyOptionsT NativeTableType;
+ typedef DensifyOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ DensifyOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(DensifyOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<DensifyOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct DensifyOptionsBuilder
+{
+ typedef DensifyOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit DensifyOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<DensifyOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<DensifyOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<DensifyOptions>
+CreateDensifyOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ DensifyOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<DensifyOptions>
+CreateDensifyOptions(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SegmentSumOptionsT : public flatbuffers::NativeTable
+{
+ typedef SegmentSumOptions TableType;
+};
+
+struct SegmentSumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef SegmentSumOptionsT NativeTableType;
+ typedef SegmentSumOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ SegmentSumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SegmentSumOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SegmentSumOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SegmentSumOptionsBuilder
+{
+ typedef SegmentSumOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit SegmentSumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SegmentSumOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SegmentSumOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SegmentSumOptions>
+CreateSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ SegmentSumOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<SegmentSumOptions>
+CreateSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct BatchMatMulOptionsT : public flatbuffers::NativeTable
+{
+ typedef BatchMatMulOptions TableType;
+ bool adjoint_lhs = false;
+ bool adjoint_rhs = false;
+ bool asymmetric_quantize_inputs = false;
+};
+
+struct BatchMatMulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef BatchMatMulOptionsT NativeTableType;
+ typedef BatchMatMulOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_ADJOINT_LHS = 4,
+ VT_ADJOINT_RHS = 6,
+ VT_ASYMMETRIC_QUANTIZE_INPUTS = 8
+ };
+ bool adjoint_lhs() const { return GetField<uint8_t>(VT_ADJOINT_LHS, 0) != 0; }
+ bool adjoint_rhs() const { return GetField<uint8_t>(VT_ADJOINT_RHS, 0) != 0; }
+ bool asymmetric_quantize_inputs() const
+ {
+ return GetField<uint8_t>(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0;
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_ADJOINT_LHS) &&
+ VerifyField<uint8_t>(verifier, VT_ADJOINT_RHS) &&
+ VerifyField<uint8_t>(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable();
+ }
+ BatchMatMulOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(BatchMatMulOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<BatchMatMulOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct BatchMatMulOptionsBuilder
+{
+ typedef BatchMatMulOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_adjoint_lhs(bool adjoint_lhs)
+ {
+ fbb_.AddElement<uint8_t>(BatchMatMulOptions::VT_ADJOINT_LHS, static_cast<uint8_t>(adjoint_lhs),
+ 0);
+ }
+ void add_adjoint_rhs(bool adjoint_rhs)
+ {
+ fbb_.AddElement<uint8_t>(BatchMatMulOptions::VT_ADJOINT_RHS, static_cast<uint8_t>(adjoint_rhs),
+ 0);
+ }
+ void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs)
+ {
+ fbb_.AddElement<uint8_t>(BatchMatMulOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS,
+ static_cast<uint8_t>(asymmetric_quantize_inputs), 0);
+ }
+ explicit BatchMatMulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<BatchMatMulOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<BatchMatMulOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<BatchMatMulOptions>
+CreateBatchMatMulOptions(flatbuffers::FlatBufferBuilder &_fbb, bool adjoint_lhs = false,
+ bool adjoint_rhs = false, bool asymmetric_quantize_inputs = false)
+{
+ BatchMatMulOptionsBuilder builder_(_fbb);
+ builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs);
+ builder_.add_adjoint_rhs(adjoint_rhs);
+ builder_.add_adjoint_lhs(adjoint_lhs);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<BatchMatMulOptions>
+CreateBatchMatMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct CumsumOptionsT : public flatbuffers::NativeTable
+{
+ typedef CumsumOptions TableType;
+ bool exclusive = false;
+ bool reverse = false;
+};
+
+struct CumsumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef CumsumOptionsT NativeTableType;
+ typedef CumsumOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_EXCLUSIVE = 4,
+ VT_REVERSE = 6
+ };
+ bool exclusive() const { return GetField<uint8_t>(VT_EXCLUSIVE, 0) != 0; }
+ bool reverse() const { return GetField<uint8_t>(VT_REVERSE, 0) != 0; }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<uint8_t>(verifier, VT_EXCLUSIVE) &&
+ VerifyField<uint8_t>(verifier, VT_REVERSE) && verifier.EndTable();
+ }
+ CumsumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(CumsumOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<CumsumOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct CumsumOptionsBuilder
+{
+ typedef CumsumOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_exclusive(bool exclusive)
+ {
+ fbb_.AddElement<uint8_t>(CumsumOptions::VT_EXCLUSIVE, static_cast<uint8_t>(exclusive), 0);
+ }
+ void add_reverse(bool reverse)
+ {
+ fbb_.AddElement<uint8_t>(CumsumOptions::VT_REVERSE, static_cast<uint8_t>(reverse), 0);
+ }
+ explicit CumsumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<CumsumOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<CumsumOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<CumsumOptions> CreateCumsumOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ bool exclusive = false,
+ bool reverse = false)
+{
+ CumsumOptionsBuilder builder_(_fbb);
+ builder_.add_reverse(reverse);
+ builder_.add_exclusive(exclusive);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<CumsumOptions>
+CreateCumsumOptions(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct BroadcastToOptionsT : public flatbuffers::NativeTable
+{
+ typedef BroadcastToOptions TableType;
+};
+
+struct BroadcastToOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef BroadcastToOptionsT NativeTableType;
+ typedef BroadcastToOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ BroadcastToOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(BroadcastToOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<BroadcastToOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct BroadcastToOptionsBuilder
+{
+ typedef BroadcastToOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit BroadcastToOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<BroadcastToOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<BroadcastToOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<BroadcastToOptions>
+CreateBroadcastToOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ BroadcastToOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<BroadcastToOptions>
+CreateBroadcastToOptions(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct Rfft2dOptionsT : public flatbuffers::NativeTable
+{
+ typedef Rfft2dOptions TableType;
+};
+
+struct Rfft2dOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef Rfft2dOptionsT NativeTableType;
+ typedef Rfft2dOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ Rfft2dOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(Rfft2dOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<Rfft2dOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct Rfft2dOptionsBuilder
+{
+ typedef Rfft2dOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit Rfft2dOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<Rfft2dOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Rfft2dOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Rfft2dOptions> CreateRfft2dOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ Rfft2dOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<Rfft2dOptions>
+CreateRfft2dOptions(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct HashtableOptionsT : public flatbuffers::NativeTable
+{
+ typedef HashtableOptions TableType;
+ int32_t table_id = 0;
+ circle::TensorType key_dtype = circle::TensorType_FLOAT32;
+ circle::TensorType value_dtype = circle::TensorType_FLOAT32;
+};
+
+struct HashtableOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef HashtableOptionsT NativeTableType;
+ typedef HashtableOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_TABLE_ID = 4,
+ VT_KEY_DTYPE = 6,
+ VT_VALUE_DTYPE = 8
+ };
+ int32_t table_id() const { return GetField<int32_t>(VT_TABLE_ID, 0); }
+ circle::TensorType key_dtype() const
+ {
+ return static_cast<circle::TensorType>(GetField<int8_t>(VT_KEY_DTYPE, 0));
+ }
+ circle::TensorType value_dtype() const
+ {
+ return static_cast<circle::TensorType>(GetField<int8_t>(VT_VALUE_DTYPE, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_TABLE_ID) &&
+ VerifyField<int8_t>(verifier, VT_KEY_DTYPE) &&
+ VerifyField<int8_t>(verifier, VT_VALUE_DTYPE) && verifier.EndTable();
+ }
+ HashtableOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(HashtableOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<HashtableOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct HashtableOptionsBuilder
+{
+ typedef HashtableOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_table_id(int32_t table_id)
+ {
+ fbb_.AddElement<int32_t>(HashtableOptions::VT_TABLE_ID, table_id, 0);
+ }
+ void add_key_dtype(circle::TensorType key_dtype)
+ {
+ fbb_.AddElement<int8_t>(HashtableOptions::VT_KEY_DTYPE, static_cast<int8_t>(key_dtype), 0);
+ }
+ void add_value_dtype(circle::TensorType value_dtype)
+ {
+ fbb_.AddElement<int8_t>(HashtableOptions::VT_VALUE_DTYPE, static_cast<int8_t>(value_dtype), 0);
+ }
+ explicit HashtableOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<HashtableOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<HashtableOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<HashtableOptions>
+CreateHashtableOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t table_id = 0,
+ circle::TensorType key_dtype = circle::TensorType_FLOAT32,
+ circle::TensorType value_dtype = circle::TensorType_FLOAT32)
+{
+ HashtableOptionsBuilder builder_(_fbb);
+ builder_.add_table_id(table_id);
+ builder_.add_value_dtype(value_dtype);
+ builder_.add_key_dtype(key_dtype);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<HashtableOptions>
+CreateHashtableOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct HashtableFindOptionsT : public flatbuffers::NativeTable
+{
+ typedef HashtableFindOptions TableType;
+};
+
+struct HashtableFindOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef HashtableFindOptionsT NativeTableType;
+ typedef HashtableFindOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ HashtableFindOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(HashtableFindOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<HashtableFindOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct HashtableFindOptionsBuilder
+{
+ typedef HashtableFindOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit HashtableFindOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<HashtableFindOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<HashtableFindOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<HashtableFindOptions>
+CreateHashtableFindOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ HashtableFindOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<HashtableFindOptions>
+CreateHashtableFindOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct HashtableImportOptionsT : public flatbuffers::NativeTable
+{
+ typedef HashtableImportOptions TableType;
+};
+
+struct HashtableImportOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef HashtableImportOptionsT NativeTableType;
+ typedef HashtableImportOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ HashtableImportOptionsT *
+ UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(HashtableImportOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<HashtableImportOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct HashtableImportOptionsBuilder
+{
+ typedef HashtableImportOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit HashtableImportOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<HashtableImportOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<HashtableImportOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<HashtableImportOptions>
+CreateHashtableImportOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ HashtableImportOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<HashtableImportOptions>
+CreateHashtableImportOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const HashtableImportOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct HashtableSizeOptionsT : public flatbuffers::NativeTable
+{
+ typedef HashtableSizeOptions TableType;
+};
+
+struct HashtableSizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef HashtableSizeOptionsT NativeTableType;
+ typedef HashtableSizeOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ HashtableSizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(HashtableSizeOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<HashtableSizeOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct HashtableSizeOptionsBuilder
+{
+ typedef HashtableSizeOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit HashtableSizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<HashtableSizeOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<HashtableSizeOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<HashtableSizeOptions>
+CreateHashtableSizeOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ HashtableSizeOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<HashtableSizeOptions>
+CreateHashtableSizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct VarHandleOptionsT : public flatbuffers::NativeTable
+{
+ typedef VarHandleOptions TableType;
+ std::string container{};
+ std::string shared_name{};
+};
+
+struct VarHandleOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef VarHandleOptionsT NativeTableType;
+ typedef VarHandleOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_CONTAINER = 4,
+ VT_SHARED_NAME = 6
+ };
+ const flatbuffers::String *container() const
+ {
+ return GetPointer<const flatbuffers::String *>(VT_CONTAINER);
+ }
+ const flatbuffers::String *shared_name() const
+ {
+ return GetPointer<const flatbuffers::String *>(VT_SHARED_NAME);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_CONTAINER) &&
+ verifier.VerifyString(container()) && VerifyOffset(verifier, VT_SHARED_NAME) &&
+ verifier.VerifyString(shared_name()) && verifier.EndTable();
+ }
+ VarHandleOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(VarHandleOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<VarHandleOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct VarHandleOptionsBuilder
+{
+ typedef VarHandleOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_container(flatbuffers::Offset<flatbuffers::String> container)
+ {
+ fbb_.AddOffset(VarHandleOptions::VT_CONTAINER, container);
+ }
+ void add_shared_name(flatbuffers::Offset<flatbuffers::String> shared_name)
+ {
+ fbb_.AddOffset(VarHandleOptions::VT_SHARED_NAME, shared_name);
+ }
+ explicit VarHandleOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<VarHandleOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<VarHandleOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<VarHandleOptions>
+CreateVarHandleOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::String> container = 0,
+ flatbuffers::Offset<flatbuffers::String> shared_name = 0)
+{
+ VarHandleOptionsBuilder builder_(_fbb);
+ builder_.add_shared_name(shared_name);
+ builder_.add_container(container);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<VarHandleOptions>
+CreateVarHandleOptionsDirect(flatbuffers::FlatBufferBuilder &_fbb, const char *container = nullptr,
+ const char *shared_name = nullptr)
+{
+ auto container__ = container ? _fbb.CreateString(container) : 0;
+ auto shared_name__ = shared_name ? _fbb.CreateString(shared_name) : 0;
+ return circle::CreateVarHandleOptions(_fbb, container__, shared_name__);
+}
+
+flatbuffers::Offset<VarHandleOptions>
+CreateVarHandleOptions(flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ReadVariableOptionsT : public flatbuffers::NativeTable
+{
+ typedef ReadVariableOptions TableType;
+};
+
+struct ReadVariableOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef ReadVariableOptionsT NativeTableType;
+ typedef ReadVariableOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ ReadVariableOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ReadVariableOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<ReadVariableOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ReadVariableOptionsBuilder
+{
+ typedef ReadVariableOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit ReadVariableOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<ReadVariableOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<ReadVariableOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<ReadVariableOptions>
+CreateReadVariableOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ ReadVariableOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<ReadVariableOptions>
+CreateReadVariableOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct AssignVariableOptionsT : public flatbuffers::NativeTable
+{
+ typedef AssignVariableOptions TableType;
+};
+
+struct AssignVariableOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef AssignVariableOptionsT NativeTableType;
+ typedef AssignVariableOptionsBuilder Builder;
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && verifier.EndTable();
+ }
+ AssignVariableOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(AssignVariableOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<AssignVariableOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct AssignVariableOptionsBuilder
+{
+ typedef AssignVariableOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit AssignVariableOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<AssignVariableOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<AssignVariableOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<AssignVariableOptions>
+CreateAssignVariableOptions(flatbuffers::FlatBufferBuilder &_fbb)
+{
+ AssignVariableOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<AssignVariableOptions>
+CreateAssignVariableOptions(flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct RandomOptionsT : public flatbuffers::NativeTable
+{
+ typedef RandomOptions TableType;
+ int32_t seed = 0;
+ int32_t seed2 = 0;
+};
+
+struct RandomOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef RandomOptionsT NativeTableType;
+ typedef RandomOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_SEED = 4,
+ VT_SEED2 = 6
+ };
+ int32_t seed() const { return GetField<int32_t>(VT_SEED, 0); }
+ int32_t seed2() const { return GetField<int32_t>(VT_SEED2, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_SEED) &&
+ VerifyField<int32_t>(verifier, VT_SEED2) && verifier.EndTable();
+ }
+ RandomOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(RandomOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<RandomOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct RandomOptionsBuilder
+{
+ typedef RandomOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_seed(int32_t seed) { fbb_.AddElement<int32_t>(RandomOptions::VT_SEED, seed, 0); }
+ void add_seed2(int32_t seed2) { fbb_.AddElement<int32_t>(RandomOptions::VT_SEED2, seed2, 0); }
+ explicit RandomOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<RandomOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<RandomOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<RandomOptions> CreateRandomOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ int32_t seed = 0, int32_t seed2 = 0)
+{
+ RandomOptionsBuilder builder_(_fbb);
+ builder_.add_seed2(seed2);
+ builder_.add_seed(seed);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<RandomOptions>
+CreateRandomOptions(flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct BCQGatherOptionsT : public flatbuffers::NativeTable
+{
+ typedef BCQGatherOptions TableType;
+ int32_t input_hidden_size = 0;
+ int32_t axis = 0;
+};
+
+struct BCQGatherOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef BCQGatherOptionsT NativeTableType;
+ typedef BCQGatherOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_INPUT_HIDDEN_SIZE = 4,
+ VT_AXIS = 6
+ };
+ int32_t input_hidden_size() const { return GetField<int32_t>(VT_INPUT_HIDDEN_SIZE, 0); }
+ int32_t axis() const { return GetField<int32_t>(VT_AXIS, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_INPUT_HIDDEN_SIZE) &&
+ VerifyField<int32_t>(verifier, VT_AXIS) && verifier.EndTable();
+ }
+ BCQGatherOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(BCQGatherOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<BCQGatherOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const BCQGatherOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct BCQGatherOptionsBuilder
+{
+ typedef BCQGatherOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_input_hidden_size(int32_t input_hidden_size)
+ {
+ fbb_.AddElement<int32_t>(BCQGatherOptions::VT_INPUT_HIDDEN_SIZE, input_hidden_size, 0);
+ }
+ void add_axis(int32_t axis) { fbb_.AddElement<int32_t>(BCQGatherOptions::VT_AXIS, axis, 0); }
+ explicit BCQGatherOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<BCQGatherOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<BCQGatherOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<BCQGatherOptions>
+CreateBCQGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, int32_t input_hidden_size = 0,
+ int32_t axis = 0)
+{
+ BCQGatherOptionsBuilder builder_(_fbb);
+ builder_.add_axis(axis);
+ builder_.add_input_hidden_size(input_hidden_size);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<BCQGatherOptions>
+CreateBCQGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, const BCQGatherOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct BCQFullyConnectedOptionsT : public flatbuffers::NativeTable
+{
+ typedef BCQFullyConnectedOptions TableType;
+ int32_t weights_hidden_size = 0;
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE;
+};
+
+struct BCQFullyConnectedOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef BCQFullyConnectedOptionsT NativeTableType;
+ typedef BCQFullyConnectedOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_WEIGHTS_HIDDEN_SIZE = 4,
+ VT_FUSED_ACTIVATION_FUNCTION = 6
+ };
+ int32_t weights_hidden_size() const { return GetField<int32_t>(VT_WEIGHTS_HIDDEN_SIZE, 0); }
+ circle::ActivationFunctionType fused_activation_function() const
+ {
+ return static_cast<circle::ActivationFunctionType>(
+ GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<int32_t>(verifier, VT_WEIGHTS_HIDDEN_SIZE) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
+ }
+ BCQFullyConnectedOptionsT *
+ UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(BCQFullyConnectedOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<BCQFullyConnectedOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const BCQFullyConnectedOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct BCQFullyConnectedOptionsBuilder
+{
+ typedef BCQFullyConnectedOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_weights_hidden_size(int32_t weights_hidden_size)
+ {
+ fbb_.AddElement<int32_t>(BCQFullyConnectedOptions::VT_WEIGHTS_HIDDEN_SIZE, weights_hidden_size,
+ 0);
+ }
+ void add_fused_activation_function(circle::ActivationFunctionType fused_activation_function)
+ {
+ fbb_.AddElement<int8_t>(BCQFullyConnectedOptions::VT_FUSED_ACTIVATION_FUNCTION,
+ static_cast<int8_t>(fused_activation_function), 0);
+ }
+ explicit BCQFullyConnectedOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<BCQFullyConnectedOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<BCQFullyConnectedOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<BCQFullyConnectedOptions> CreateBCQFullyConnectedOptions(
+ flatbuffers::FlatBufferBuilder &_fbb, int32_t weights_hidden_size = 0,
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE)
+{
+ BCQFullyConnectedOptionsBuilder builder_(_fbb);
+ builder_.add_weights_hidden_size(weights_hidden_size);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<BCQFullyConnectedOptions>
+CreateBCQFullyConnectedOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const BCQFullyConnectedOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct InstanceNormOptionsT : public flatbuffers::NativeTable
+{
+ typedef InstanceNormOptions TableType;
+ float epsilon = 0.0f;
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE;
+};
+
+struct InstanceNormOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef InstanceNormOptionsT NativeTableType;
+ typedef InstanceNormOptionsBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_EPSILON = 4,
+ VT_FUSED_ACTIVATION_FUNCTION = 6
+ };
+ float epsilon() const { return GetField<float>(VT_EPSILON, 0.0f); }
+ circle::ActivationFunctionType fused_activation_function() const
+ {
+ return static_cast<circle::ActivationFunctionType>(
+ GetField<int8_t>(VT_FUSED_ACTIVATION_FUNCTION, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<float>(verifier, VT_EPSILON) &&
+ VerifyField<int8_t>(verifier, VT_FUSED_ACTIVATION_FUNCTION) && verifier.EndTable();
+ }
+ InstanceNormOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(InstanceNormOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<InstanceNormOptions>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const InstanceNormOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct InstanceNormOptionsBuilder
+{
+ typedef InstanceNormOptions Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_epsilon(float epsilon)
+ {
+ fbb_.AddElement<float>(InstanceNormOptions::VT_EPSILON, epsilon, 0.0f);
+ }
+ void add_fused_activation_function(circle::ActivationFunctionType fused_activation_function)
+ {
+ fbb_.AddElement<int8_t>(InstanceNormOptions::VT_FUSED_ACTIVATION_FUNCTION,
+ static_cast<int8_t>(fused_activation_function), 0);
+ }
+ explicit InstanceNormOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<InstanceNormOptions> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<InstanceNormOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<InstanceNormOptions> CreateInstanceNormOptions(
+ flatbuffers::FlatBufferBuilder &_fbb, float epsilon = 0.0f,
+ circle::ActivationFunctionType fused_activation_function = circle::ActivationFunctionType_NONE)
+{
+ InstanceNormOptionsBuilder builder_(_fbb);
+ builder_.add_epsilon(epsilon);
+ builder_.add_fused_activation_function(fused_activation_function);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<InstanceNormOptions>
+CreateInstanceNormOptions(flatbuffers::FlatBufferBuilder &_fbb, const InstanceNormOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct OperatorCodeT : public flatbuffers::NativeTable
+{
+ typedef OperatorCode TableType;
+ int8_t deprecated_builtin_code = 0;
+ std::string custom_code{};
+ int32_t version = 1;
+ circle::BuiltinOperator builtin_code = circle::BuiltinOperator_ADD;
+};
+
+struct OperatorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef OperatorCodeT NativeTableType;
+ typedef OperatorCodeBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_DEPRECATED_BUILTIN_CODE = 4,
+ VT_CUSTOM_CODE = 6,
+ VT_VERSION = 8,
+ VT_BUILTIN_CODE = 10
+ };
+ int8_t deprecated_builtin_code() const { return GetField<int8_t>(VT_DEPRECATED_BUILTIN_CODE, 0); }
+ const flatbuffers::String *custom_code() const
+ {
+ return GetPointer<const flatbuffers::String *>(VT_CUSTOM_CODE);
+ }
+ int32_t version() const { return GetField<int32_t>(VT_VERSION, 1); }
+ circle::BuiltinOperator builtin_code() const
+ {
+ return static_cast<circle::BuiltinOperator>(GetField<int32_t>(VT_BUILTIN_CODE, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int8_t>(verifier, VT_DEPRECATED_BUILTIN_CODE) &&
+ VerifyOffset(verifier, VT_CUSTOM_CODE) && verifier.VerifyString(custom_code()) &&
+ VerifyField<int32_t>(verifier, VT_VERSION) &&
+ VerifyField<int32_t>(verifier, VT_BUILTIN_CODE) && verifier.EndTable();
+ }
+ OperatorCodeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(OperatorCodeT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<OperatorCode>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct OperatorCodeBuilder
+{
+ typedef OperatorCode Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_deprecated_builtin_code(int8_t deprecated_builtin_code)
+ {
+ fbb_.AddElement<int8_t>(OperatorCode::VT_DEPRECATED_BUILTIN_CODE, deprecated_builtin_code, 0);
+ }
+ void add_custom_code(flatbuffers::Offset<flatbuffers::String> custom_code)
+ {
+ fbb_.AddOffset(OperatorCode::VT_CUSTOM_CODE, custom_code);
+ }
+ void add_version(int32_t version)
+ {
+ fbb_.AddElement<int32_t>(OperatorCode::VT_VERSION, version, 1);
+ }
+ void add_builtin_code(circle::BuiltinOperator builtin_code)
+ {
+ fbb_.AddElement<int32_t>(OperatorCode::VT_BUILTIN_CODE, static_cast<int32_t>(builtin_code), 0);
+ }
+ explicit OperatorCodeBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<OperatorCode> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<OperatorCode>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<OperatorCode>
+CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, int8_t deprecated_builtin_code = 0,
+ flatbuffers::Offset<flatbuffers::String> custom_code = 0, int32_t version = 1,
+ circle::BuiltinOperator builtin_code = circle::BuiltinOperator_ADD)
+{
+ OperatorCodeBuilder builder_(_fbb);
+ builder_.add_builtin_code(builtin_code);
+ builder_.add_version(version);
+ builder_.add_custom_code(custom_code);
+ builder_.add_deprecated_builtin_code(deprecated_builtin_code);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<OperatorCode>
+CreateOperatorCodeDirect(flatbuffers::FlatBufferBuilder &_fbb, int8_t deprecated_builtin_code = 0,
+ const char *custom_code = nullptr, int32_t version = 1,
+ circle::BuiltinOperator builtin_code = circle::BuiltinOperator_ADD)
+{
+ auto custom_code__ = custom_code ? _fbb.CreateString(custom_code) : 0;
+ return circle::CreateOperatorCode(_fbb, deprecated_builtin_code, custom_code__, version,
+ builtin_code);
+}
+
+flatbuffers::Offset<OperatorCode>
+CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct OperatorT : public flatbuffers::NativeTable
+{
+ typedef Operator TableType;
+ uint32_t opcode_index = 0;
+ std::vector<int32_t> inputs{};
+ std::vector<int32_t> outputs{};
+ circle::BuiltinOptionsUnion builtin_options{};
+ std::vector<uint8_t> custom_options{};
+ circle::CustomOptionsFormat custom_options_format = circle::CustomOptionsFormat_FLEXBUFFERS;
+ std::vector<bool> mutating_variable_inputs{};
+ std::vector<int32_t> intermediates{};
+};
+
+struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef OperatorT NativeTableType;
+ typedef OperatorBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_OPCODE_INDEX = 4,
+ VT_INPUTS = 6,
+ VT_OUTPUTS = 8,
+ VT_BUILTIN_OPTIONS_TYPE = 10,
+ VT_BUILTIN_OPTIONS = 12,
+ VT_CUSTOM_OPTIONS = 14,
+ VT_CUSTOM_OPTIONS_FORMAT = 16,
+ VT_MUTATING_VARIABLE_INPUTS = 18,
+ VT_INTERMEDIATES = 20
+ };
+ uint32_t opcode_index() const { return GetField<uint32_t>(VT_OPCODE_INDEX, 0); }
+ const flatbuffers::Vector<int32_t> *inputs() const
+ {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUTS);
+ }
+ const flatbuffers::Vector<int32_t> *outputs() const
+ {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUTS);
+ }
+ circle::BuiltinOptions builtin_options_type() const
+ {
+ return static_cast<circle::BuiltinOptions>(GetField<uint8_t>(VT_BUILTIN_OPTIONS_TYPE, 0));
+ }
+ const void *builtin_options() const { return GetPointer<const void *>(VT_BUILTIN_OPTIONS); }
+ template <typename T> const T *builtin_options_as() const;
+ const circle::Conv2DOptions *builtin_options_as_Conv2DOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_Conv2DOptions
+ ? static_cast<const circle::Conv2DOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::DepthwiseConv2DOptions *builtin_options_as_DepthwiseConv2DOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_DepthwiseConv2DOptions
+ ? static_cast<const circle::DepthwiseConv2DOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::ConcatEmbeddingsOptions *builtin_options_as_ConcatEmbeddingsOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_ConcatEmbeddingsOptions
+ ? static_cast<const circle::ConcatEmbeddingsOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::LSHProjectionOptions *builtin_options_as_LSHProjectionOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_LSHProjectionOptions
+ ? static_cast<const circle::LSHProjectionOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::Pool2DOptions *builtin_options_as_Pool2DOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_Pool2DOptions
+ ? static_cast<const circle::Pool2DOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::SVDFOptions *builtin_options_as_SVDFOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_SVDFOptions
+ ? static_cast<const circle::SVDFOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::RNNOptions *builtin_options_as_RNNOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_RNNOptions
+ ? static_cast<const circle::RNNOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::FullyConnectedOptions *builtin_options_as_FullyConnectedOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_FullyConnectedOptions
+ ? static_cast<const circle::FullyConnectedOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::SoftmaxOptions *builtin_options_as_SoftmaxOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_SoftmaxOptions
+ ? static_cast<const circle::SoftmaxOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::ConcatenationOptions *builtin_options_as_ConcatenationOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_ConcatenationOptions
+ ? static_cast<const circle::ConcatenationOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::AddOptions *builtin_options_as_AddOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_AddOptions
+ ? static_cast<const circle::AddOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::L2NormOptions *builtin_options_as_L2NormOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_L2NormOptions
+ ? static_cast<const circle::L2NormOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::LocalResponseNormalizationOptions *
+ builtin_options_as_LocalResponseNormalizationOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_LocalResponseNormalizationOptions
+ ? static_cast<const circle::LocalResponseNormalizationOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::LSTMOptions *builtin_options_as_LSTMOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_LSTMOptions
+ ? static_cast<const circle::LSTMOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::ResizeBilinearOptions *builtin_options_as_ResizeBilinearOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_ResizeBilinearOptions
+ ? static_cast<const circle::ResizeBilinearOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::CallOptions *builtin_options_as_CallOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_CallOptions
+ ? static_cast<const circle::CallOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::ReshapeOptions *builtin_options_as_ReshapeOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_ReshapeOptions
+ ? static_cast<const circle::ReshapeOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::SkipGramOptions *builtin_options_as_SkipGramOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_SkipGramOptions
+ ? static_cast<const circle::SkipGramOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::SpaceToDepthOptions *builtin_options_as_SpaceToDepthOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_SpaceToDepthOptions
+ ? static_cast<const circle::SpaceToDepthOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::EmbeddingLookupSparseOptions *
+ builtin_options_as_EmbeddingLookupSparseOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_EmbeddingLookupSparseOptions
+ ? static_cast<const circle::EmbeddingLookupSparseOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::MulOptions *builtin_options_as_MulOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_MulOptions
+ ? static_cast<const circle::MulOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::PadOptions *builtin_options_as_PadOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_PadOptions
+ ? static_cast<const circle::PadOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::GatherOptions *builtin_options_as_GatherOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_GatherOptions
+ ? static_cast<const circle::GatherOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::BatchToSpaceNDOptions *builtin_options_as_BatchToSpaceNDOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_BatchToSpaceNDOptions
+ ? static_cast<const circle::BatchToSpaceNDOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::SpaceToBatchNDOptions *builtin_options_as_SpaceToBatchNDOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_SpaceToBatchNDOptions
+ ? static_cast<const circle::SpaceToBatchNDOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::TransposeOptions *builtin_options_as_TransposeOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_TransposeOptions
+ ? static_cast<const circle::TransposeOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::ReducerOptions *builtin_options_as_ReducerOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_ReducerOptions
+ ? static_cast<const circle::ReducerOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::SubOptions *builtin_options_as_SubOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_SubOptions
+ ? static_cast<const circle::SubOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::DivOptions *builtin_options_as_DivOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_DivOptions
+ ? static_cast<const circle::DivOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::SqueezeOptions *builtin_options_as_SqueezeOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_SqueezeOptions
+ ? static_cast<const circle::SqueezeOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::SequenceRNNOptions *builtin_options_as_SequenceRNNOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_SequenceRNNOptions
+ ? static_cast<const circle::SequenceRNNOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::StridedSliceOptions *builtin_options_as_StridedSliceOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_StridedSliceOptions
+ ? static_cast<const circle::StridedSliceOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::ExpOptions *builtin_options_as_ExpOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_ExpOptions
+ ? static_cast<const circle::ExpOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::TopKV2Options *builtin_options_as_TopKV2Options() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_TopKV2Options
+ ? static_cast<const circle::TopKV2Options *>(builtin_options())
+ : nullptr;
+ }
+ const circle::SplitOptions *builtin_options_as_SplitOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_SplitOptions
+ ? static_cast<const circle::SplitOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::LogSoftmaxOptions *builtin_options_as_LogSoftmaxOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_LogSoftmaxOptions
+ ? static_cast<const circle::LogSoftmaxOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::CastOptions *builtin_options_as_CastOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_CastOptions
+ ? static_cast<const circle::CastOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::DequantizeOptions *builtin_options_as_DequantizeOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_DequantizeOptions
+ ? static_cast<const circle::DequantizeOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::MaximumMinimumOptions *builtin_options_as_MaximumMinimumOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_MaximumMinimumOptions
+ ? static_cast<const circle::MaximumMinimumOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::ArgMaxOptions *builtin_options_as_ArgMaxOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_ArgMaxOptions
+ ? static_cast<const circle::ArgMaxOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::LessOptions *builtin_options_as_LessOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_LessOptions
+ ? static_cast<const circle::LessOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::NegOptions *builtin_options_as_NegOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_NegOptions
+ ? static_cast<const circle::NegOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::PadV2Options *builtin_options_as_PadV2Options() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_PadV2Options
+ ? static_cast<const circle::PadV2Options *>(builtin_options())
+ : nullptr;
+ }
+ const circle::GreaterOptions *builtin_options_as_GreaterOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_GreaterOptions
+ ? static_cast<const circle::GreaterOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::GreaterEqualOptions *builtin_options_as_GreaterEqualOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_GreaterEqualOptions
+ ? static_cast<const circle::GreaterEqualOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::LessEqualOptions *builtin_options_as_LessEqualOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_LessEqualOptions
+ ? static_cast<const circle::LessEqualOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::SelectOptions *builtin_options_as_SelectOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_SelectOptions
+ ? static_cast<const circle::SelectOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::SliceOptions *builtin_options_as_SliceOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_SliceOptions
+ ? static_cast<const circle::SliceOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::TransposeConvOptions *builtin_options_as_TransposeConvOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_TransposeConvOptions
+ ? static_cast<const circle::TransposeConvOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::SparseToDenseOptions *builtin_options_as_SparseToDenseOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_SparseToDenseOptions
+ ? static_cast<const circle::SparseToDenseOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::TileOptions *builtin_options_as_TileOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_TileOptions
+ ? static_cast<const circle::TileOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::ExpandDimsOptions *builtin_options_as_ExpandDimsOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_ExpandDimsOptions
+ ? static_cast<const circle::ExpandDimsOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::EqualOptions *builtin_options_as_EqualOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_EqualOptions
+ ? static_cast<const circle::EqualOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::NotEqualOptions *builtin_options_as_NotEqualOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_NotEqualOptions
+ ? static_cast<const circle::NotEqualOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::ShapeOptions *builtin_options_as_ShapeOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_ShapeOptions
+ ? static_cast<const circle::ShapeOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::PowOptions *builtin_options_as_PowOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_PowOptions
+ ? static_cast<const circle::PowOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::ArgMinOptions *builtin_options_as_ArgMinOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_ArgMinOptions
+ ? static_cast<const circle::ArgMinOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::FakeQuantOptions *builtin_options_as_FakeQuantOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_FakeQuantOptions
+ ? static_cast<const circle::FakeQuantOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::PackOptions *builtin_options_as_PackOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_PackOptions
+ ? static_cast<const circle::PackOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::LogicalOrOptions *builtin_options_as_LogicalOrOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_LogicalOrOptions
+ ? static_cast<const circle::LogicalOrOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::OneHotOptions *builtin_options_as_OneHotOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_OneHotOptions
+ ? static_cast<const circle::OneHotOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::LogicalAndOptions *builtin_options_as_LogicalAndOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_LogicalAndOptions
+ ? static_cast<const circle::LogicalAndOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::LogicalNotOptions *builtin_options_as_LogicalNotOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_LogicalNotOptions
+ ? static_cast<const circle::LogicalNotOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::UnpackOptions *builtin_options_as_UnpackOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_UnpackOptions
+ ? static_cast<const circle::UnpackOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::FloorDivOptions *builtin_options_as_FloorDivOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_FloorDivOptions
+ ? static_cast<const circle::FloorDivOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::SquareOptions *builtin_options_as_SquareOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_SquareOptions
+ ? static_cast<const circle::SquareOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::ZerosLikeOptions *builtin_options_as_ZerosLikeOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_ZerosLikeOptions
+ ? static_cast<const circle::ZerosLikeOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::FillOptions *builtin_options_as_FillOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_FillOptions
+ ? static_cast<const circle::FillOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::BidirectionalSequenceLSTMOptions *
+ builtin_options_as_BidirectionalSequenceLSTMOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_BidirectionalSequenceLSTMOptions
+ ? static_cast<const circle::BidirectionalSequenceLSTMOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::BidirectionalSequenceRNNOptions *
+ builtin_options_as_BidirectionalSequenceRNNOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_BidirectionalSequenceRNNOptions
+ ? static_cast<const circle::BidirectionalSequenceRNNOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::UnidirectionalSequenceLSTMOptions *
+ builtin_options_as_UnidirectionalSequenceLSTMOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_UnidirectionalSequenceLSTMOptions
+ ? static_cast<const circle::UnidirectionalSequenceLSTMOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::FloorModOptions *builtin_options_as_FloorModOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_FloorModOptions
+ ? static_cast<const circle::FloorModOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::RangeOptions *builtin_options_as_RangeOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_RangeOptions
+ ? static_cast<const circle::RangeOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::ResizeNearestNeighborOptions *
+ builtin_options_as_ResizeNearestNeighborOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_ResizeNearestNeighborOptions
+ ? static_cast<const circle::ResizeNearestNeighborOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::LeakyReluOptions *builtin_options_as_LeakyReluOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_LeakyReluOptions
+ ? static_cast<const circle::LeakyReluOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::SquaredDifferenceOptions *builtin_options_as_SquaredDifferenceOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_SquaredDifferenceOptions
+ ? static_cast<const circle::SquaredDifferenceOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::MirrorPadOptions *builtin_options_as_MirrorPadOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_MirrorPadOptions
+ ? static_cast<const circle::MirrorPadOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::AbsOptions *builtin_options_as_AbsOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_AbsOptions
+ ? static_cast<const circle::AbsOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::SplitVOptions *builtin_options_as_SplitVOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_SplitVOptions
+ ? static_cast<const circle::SplitVOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::UniqueOptions *builtin_options_as_UniqueOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_UniqueOptions
+ ? static_cast<const circle::UniqueOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::ReverseV2Options *builtin_options_as_ReverseV2Options() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_ReverseV2Options
+ ? static_cast<const circle::ReverseV2Options *>(builtin_options())
+ : nullptr;
+ }
+ const circle::AddNOptions *builtin_options_as_AddNOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_AddNOptions
+ ? static_cast<const circle::AddNOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::GatherNdOptions *builtin_options_as_GatherNdOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_GatherNdOptions
+ ? static_cast<const circle::GatherNdOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::CosOptions *builtin_options_as_CosOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_CosOptions
+ ? static_cast<const circle::CosOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::WhereOptions *builtin_options_as_WhereOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_WhereOptions
+ ? static_cast<const circle::WhereOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::RankOptions *builtin_options_as_RankOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_RankOptions
+ ? static_cast<const circle::RankOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::ReverseSequenceOptions *builtin_options_as_ReverseSequenceOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_ReverseSequenceOptions
+ ? static_cast<const circle::ReverseSequenceOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::MatrixDiagOptions *builtin_options_as_MatrixDiagOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_MatrixDiagOptions
+ ? static_cast<const circle::MatrixDiagOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::QuantizeOptions *builtin_options_as_QuantizeOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_QuantizeOptions
+ ? static_cast<const circle::QuantizeOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::MatrixSetDiagOptions *builtin_options_as_MatrixSetDiagOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_MatrixSetDiagOptions
+ ? static_cast<const circle::MatrixSetDiagOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::HardSwishOptions *builtin_options_as_HardSwishOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_HardSwishOptions
+ ? static_cast<const circle::HardSwishOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::IfOptions *builtin_options_as_IfOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_IfOptions
+ ? static_cast<const circle::IfOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::WhileOptions *builtin_options_as_WhileOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_WhileOptions
+ ? static_cast<const circle::WhileOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::DepthToSpaceOptions *builtin_options_as_DepthToSpaceOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_DepthToSpaceOptions
+ ? static_cast<const circle::DepthToSpaceOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::NonMaxSuppressionV4Options *builtin_options_as_NonMaxSuppressionV4Options() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_NonMaxSuppressionV4Options
+ ? static_cast<const circle::NonMaxSuppressionV4Options *>(builtin_options())
+ : nullptr;
+ }
+ const circle::NonMaxSuppressionV5Options *builtin_options_as_NonMaxSuppressionV5Options() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_NonMaxSuppressionV5Options
+ ? static_cast<const circle::NonMaxSuppressionV5Options *>(builtin_options())
+ : nullptr;
+ }
+ const circle::ScatterNdOptions *builtin_options_as_ScatterNdOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_ScatterNdOptions
+ ? static_cast<const circle::ScatterNdOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::SelectV2Options *builtin_options_as_SelectV2Options() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_SelectV2Options
+ ? static_cast<const circle::SelectV2Options *>(builtin_options())
+ : nullptr;
+ }
+ const circle::DensifyOptions *builtin_options_as_DensifyOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_DensifyOptions
+ ? static_cast<const circle::DensifyOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::SegmentSumOptions *builtin_options_as_SegmentSumOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_SegmentSumOptions
+ ? static_cast<const circle::SegmentSumOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::BatchMatMulOptions *builtin_options_as_BatchMatMulOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_BatchMatMulOptions
+ ? static_cast<const circle::BatchMatMulOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::CumsumOptions *builtin_options_as_CumsumOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_CumsumOptions
+ ? static_cast<const circle::CumsumOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::CallOnceOptions *builtin_options_as_CallOnceOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_CallOnceOptions
+ ? static_cast<const circle::CallOnceOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::BroadcastToOptions *builtin_options_as_BroadcastToOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_BroadcastToOptions
+ ? static_cast<const circle::BroadcastToOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::Rfft2dOptions *builtin_options_as_Rfft2dOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_Rfft2dOptions
+ ? static_cast<const circle::Rfft2dOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::Conv3DOptions *builtin_options_as_Conv3DOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_Conv3DOptions
+ ? static_cast<const circle::Conv3DOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::HashtableOptions *builtin_options_as_HashtableOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_HashtableOptions
+ ? static_cast<const circle::HashtableOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::HashtableFindOptions *builtin_options_as_HashtableFindOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_HashtableFindOptions
+ ? static_cast<const circle::HashtableFindOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::HashtableImportOptions *builtin_options_as_HashtableImportOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_HashtableImportOptions
+ ? static_cast<const circle::HashtableImportOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::HashtableSizeOptions *builtin_options_as_HashtableSizeOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_HashtableSizeOptions
+ ? static_cast<const circle::HashtableSizeOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::VarHandleOptions *builtin_options_as_VarHandleOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_VarHandleOptions
+ ? static_cast<const circle::VarHandleOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::ReadVariableOptions *builtin_options_as_ReadVariableOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_ReadVariableOptions
+ ? static_cast<const circle::ReadVariableOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::AssignVariableOptions *builtin_options_as_AssignVariableOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_AssignVariableOptions
+ ? static_cast<const circle::AssignVariableOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::RandomOptions *builtin_options_as_RandomOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_RandomOptions
+ ? static_cast<const circle::RandomOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::BCQGatherOptions *builtin_options_as_BCQGatherOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_BCQGatherOptions
+ ? static_cast<const circle::BCQGatherOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::BCQFullyConnectedOptions *builtin_options_as_BCQFullyConnectedOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_BCQFullyConnectedOptions
+ ? static_cast<const circle::BCQFullyConnectedOptions *>(builtin_options())
+ : nullptr;
+ }
+ const circle::InstanceNormOptions *builtin_options_as_InstanceNormOptions() const
+ {
+ return builtin_options_type() == circle::BuiltinOptions_InstanceNormOptions
+ ? static_cast<const circle::InstanceNormOptions *>(builtin_options())
+ : nullptr;
+ }
+ const flatbuffers::Vector<uint8_t> *custom_options() const
+ {
+ return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM_OPTIONS);
+ }
+ circle::CustomOptionsFormat custom_options_format() const
+ {
+ return static_cast<circle::CustomOptionsFormat>(GetField<int8_t>(VT_CUSTOM_OPTIONS_FORMAT, 0));
+ }
+ const flatbuffers::Vector<uint8_t> *mutating_variable_inputs() const
+ {
+ return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_MUTATING_VARIABLE_INPUTS);
+ }
+ const flatbuffers::Vector<int32_t> *intermediates() const
+ {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INTERMEDIATES);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<uint32_t>(verifier, VT_OPCODE_INDEX) &&
+ VerifyOffset(verifier, VT_INPUTS) && verifier.VerifyVector(inputs()) &&
+ VerifyOffset(verifier, VT_OUTPUTS) && verifier.VerifyVector(outputs()) &&
+ VerifyField<uint8_t>(verifier, VT_BUILTIN_OPTIONS_TYPE) &&
+ VerifyOffset(verifier, VT_BUILTIN_OPTIONS) &&
+ VerifyBuiltinOptions(verifier, builtin_options(), builtin_options_type()) &&
+ VerifyOffset(verifier, VT_CUSTOM_OPTIONS) && verifier.VerifyVector(custom_options()) &&
+ VerifyField<int8_t>(verifier, VT_CUSTOM_OPTIONS_FORMAT) &&
+ VerifyOffset(verifier, VT_MUTATING_VARIABLE_INPUTS) &&
+ verifier.VerifyVector(mutating_variable_inputs()) &&
+ VerifyOffset(verifier, VT_INTERMEDIATES) && verifier.VerifyVector(intermediates()) &&
+ verifier.EndTable();
+ }
+ OperatorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(OperatorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<Operator>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+template <>
+inline const circle::Conv2DOptions *Operator::builtin_options_as<circle::Conv2DOptions>() const
+{
+ return builtin_options_as_Conv2DOptions();
+}
+
+template <>
+inline const circle::DepthwiseConv2DOptions *
+Operator::builtin_options_as<circle::DepthwiseConv2DOptions>() const
+{
+ return builtin_options_as_DepthwiseConv2DOptions();
+}
+
+template <>
+inline const circle::ConcatEmbeddingsOptions *
+Operator::builtin_options_as<circle::ConcatEmbeddingsOptions>() const
+{
+ return builtin_options_as_ConcatEmbeddingsOptions();
+}
+
+template <>
+inline const circle::LSHProjectionOptions *
+Operator::builtin_options_as<circle::LSHProjectionOptions>() const
+{
+ return builtin_options_as_LSHProjectionOptions();
+}
+
+template <>
+inline const circle::Pool2DOptions *Operator::builtin_options_as<circle::Pool2DOptions>() const
+{
+ return builtin_options_as_Pool2DOptions();
+}
+
+template <>
+inline const circle::SVDFOptions *Operator::builtin_options_as<circle::SVDFOptions>() const
+{
+ return builtin_options_as_SVDFOptions();
+}
+
+template <>
+inline const circle::RNNOptions *Operator::builtin_options_as<circle::RNNOptions>() const
+{
+ return builtin_options_as_RNNOptions();
+}
+
+template <>
+inline const circle::FullyConnectedOptions *
+Operator::builtin_options_as<circle::FullyConnectedOptions>() const
+{
+ return builtin_options_as_FullyConnectedOptions();
+}
+
+template <>
+inline const circle::SoftmaxOptions *Operator::builtin_options_as<circle::SoftmaxOptions>() const
+{
+ return builtin_options_as_SoftmaxOptions();
+}
+
+template <>
+inline const circle::ConcatenationOptions *
+Operator::builtin_options_as<circle::ConcatenationOptions>() const
+{
+ return builtin_options_as_ConcatenationOptions();
+}
+
+template <>
+inline const circle::AddOptions *Operator::builtin_options_as<circle::AddOptions>() const
+{
+ return builtin_options_as_AddOptions();
+}
+
+template <>
+inline const circle::L2NormOptions *Operator::builtin_options_as<circle::L2NormOptions>() const
+{
+ return builtin_options_as_L2NormOptions();
+}
+
+template <>
+inline const circle::LocalResponseNormalizationOptions *
+Operator::builtin_options_as<circle::LocalResponseNormalizationOptions>() const
+{
+ return builtin_options_as_LocalResponseNormalizationOptions();
+}
+
+template <>
+inline const circle::LSTMOptions *Operator::builtin_options_as<circle::LSTMOptions>() const
+{
+ return builtin_options_as_LSTMOptions();
+}
+
+template <>
+inline const circle::ResizeBilinearOptions *
+Operator::builtin_options_as<circle::ResizeBilinearOptions>() const
+{
+ return builtin_options_as_ResizeBilinearOptions();
+}
+
+template <>
+inline const circle::CallOptions *Operator::builtin_options_as<circle::CallOptions>() const
+{
+ return builtin_options_as_CallOptions();
+}
+
+template <>
+inline const circle::ReshapeOptions *Operator::builtin_options_as<circle::ReshapeOptions>() const
+{
+ return builtin_options_as_ReshapeOptions();
+}
+
+template <>
+inline const circle::SkipGramOptions *Operator::builtin_options_as<circle::SkipGramOptions>() const
+{
+ return builtin_options_as_SkipGramOptions();
+}
+
+template <>
+inline const circle::SpaceToDepthOptions *
+Operator::builtin_options_as<circle::SpaceToDepthOptions>() const
+{
+ return builtin_options_as_SpaceToDepthOptions();
+}
+
+template <>
+inline const circle::EmbeddingLookupSparseOptions *
+Operator::builtin_options_as<circle::EmbeddingLookupSparseOptions>() const
+{
+ return builtin_options_as_EmbeddingLookupSparseOptions();
+}
+
+template <>
+inline const circle::MulOptions *Operator::builtin_options_as<circle::MulOptions>() const
+{
+ return builtin_options_as_MulOptions();
+}
+
+template <>
+inline const circle::PadOptions *Operator::builtin_options_as<circle::PadOptions>() const
+{
+ return builtin_options_as_PadOptions();
+}
+
+template <>
+inline const circle::GatherOptions *Operator::builtin_options_as<circle::GatherOptions>() const
+{
+ return builtin_options_as_GatherOptions();
+}
+
+template <>
+inline const circle::BatchToSpaceNDOptions *
+Operator::builtin_options_as<circle::BatchToSpaceNDOptions>() const
+{
+ return builtin_options_as_BatchToSpaceNDOptions();
+}
+
+template <>
+inline const circle::SpaceToBatchNDOptions *
+Operator::builtin_options_as<circle::SpaceToBatchNDOptions>() const
+{
+ return builtin_options_as_SpaceToBatchNDOptions();
+}
+
+template <>
+inline const circle::TransposeOptions *
+Operator::builtin_options_as<circle::TransposeOptions>() const
+{
+ return builtin_options_as_TransposeOptions();
+}
+
+template <>
+inline const circle::ReducerOptions *Operator::builtin_options_as<circle::ReducerOptions>() const
+{
+ return builtin_options_as_ReducerOptions();
+}
+
+template <>
+inline const circle::SubOptions *Operator::builtin_options_as<circle::SubOptions>() const
+{
+ return builtin_options_as_SubOptions();
+}
+
+template <>
+inline const circle::DivOptions *Operator::builtin_options_as<circle::DivOptions>() const
+{
+ return builtin_options_as_DivOptions();
+}
+
+template <>
+inline const circle::SqueezeOptions *Operator::builtin_options_as<circle::SqueezeOptions>() const
+{
+ return builtin_options_as_SqueezeOptions();
+}
+
+template <>
+inline const circle::SequenceRNNOptions *
+Operator::builtin_options_as<circle::SequenceRNNOptions>() const
+{
+ return builtin_options_as_SequenceRNNOptions();
+}
+
+template <>
+inline const circle::StridedSliceOptions *
+Operator::builtin_options_as<circle::StridedSliceOptions>() const
+{
+ return builtin_options_as_StridedSliceOptions();
+}
+
+template <>
+inline const circle::ExpOptions *Operator::builtin_options_as<circle::ExpOptions>() const
+{
+ return builtin_options_as_ExpOptions();
+}
+
+template <>
+inline const circle::TopKV2Options *Operator::builtin_options_as<circle::TopKV2Options>() const
+{
+ return builtin_options_as_TopKV2Options();
+}
+
+template <>
+inline const circle::SplitOptions *Operator::builtin_options_as<circle::SplitOptions>() const
+{
+ return builtin_options_as_SplitOptions();
+}
+
+template <>
+inline const circle::LogSoftmaxOptions *
+Operator::builtin_options_as<circle::LogSoftmaxOptions>() const
+{
+ return builtin_options_as_LogSoftmaxOptions();
+}
+
+template <>
+inline const circle::CastOptions *Operator::builtin_options_as<circle::CastOptions>() const
+{
+ return builtin_options_as_CastOptions();
+}
+
+template <>
+inline const circle::DequantizeOptions *
+Operator::builtin_options_as<circle::DequantizeOptions>() const
+{
+ return builtin_options_as_DequantizeOptions();
+}
+
+template <>
+inline const circle::MaximumMinimumOptions *
+Operator::builtin_options_as<circle::MaximumMinimumOptions>() const
+{
+ return builtin_options_as_MaximumMinimumOptions();
+}
+
+template <>
+inline const circle::ArgMaxOptions *Operator::builtin_options_as<circle::ArgMaxOptions>() const
+{
+ return builtin_options_as_ArgMaxOptions();
+}
+
+template <>
+inline const circle::LessOptions *Operator::builtin_options_as<circle::LessOptions>() const
+{
+ return builtin_options_as_LessOptions();
+}
+
+template <>
+inline const circle::NegOptions *Operator::builtin_options_as<circle::NegOptions>() const
+{
+ return builtin_options_as_NegOptions();
+}
+
+template <>
+inline const circle::PadV2Options *Operator::builtin_options_as<circle::PadV2Options>() const
+{
+ return builtin_options_as_PadV2Options();
+}
+
+template <>
+inline const circle::GreaterOptions *Operator::builtin_options_as<circle::GreaterOptions>() const
+{
+ return builtin_options_as_GreaterOptions();
+}
+
+template <>
+inline const circle::GreaterEqualOptions *
+Operator::builtin_options_as<circle::GreaterEqualOptions>() const
+{
+ return builtin_options_as_GreaterEqualOptions();
+}
+
+template <>
+inline const circle::LessEqualOptions *
+Operator::builtin_options_as<circle::LessEqualOptions>() const
+{
+ return builtin_options_as_LessEqualOptions();
+}
+
+template <>
+inline const circle::SelectOptions *Operator::builtin_options_as<circle::SelectOptions>() const
+{
+ return builtin_options_as_SelectOptions();
+}
+
+template <>
+inline const circle::SliceOptions *Operator::builtin_options_as<circle::SliceOptions>() const
+{
+ return builtin_options_as_SliceOptions();
+}
+
+template <>
+inline const circle::TransposeConvOptions *
+Operator::builtin_options_as<circle::TransposeConvOptions>() const
+{
+ return builtin_options_as_TransposeConvOptions();
+}
+
+template <>
+inline const circle::SparseToDenseOptions *
+Operator::builtin_options_as<circle::SparseToDenseOptions>() const
+{
+ return builtin_options_as_SparseToDenseOptions();
+}
+
+template <>
+inline const circle::TileOptions *Operator::builtin_options_as<circle::TileOptions>() const
+{
+ return builtin_options_as_TileOptions();
+}
+
+template <>
+inline const circle::ExpandDimsOptions *
+Operator::builtin_options_as<circle::ExpandDimsOptions>() const
+{
+ return builtin_options_as_ExpandDimsOptions();
+}
+
+template <>
+inline const circle::EqualOptions *Operator::builtin_options_as<circle::EqualOptions>() const
+{
+ return builtin_options_as_EqualOptions();
+}
+
+template <>
+inline const circle::NotEqualOptions *Operator::builtin_options_as<circle::NotEqualOptions>() const
+{
+ return builtin_options_as_NotEqualOptions();
+}
+
+template <>
+inline const circle::ShapeOptions *Operator::builtin_options_as<circle::ShapeOptions>() const
+{
+ return builtin_options_as_ShapeOptions();
+}
+
+template <>
+inline const circle::PowOptions *Operator::builtin_options_as<circle::PowOptions>() const
+{
+ return builtin_options_as_PowOptions();
+}
+
+template <>
+inline const circle::ArgMinOptions *Operator::builtin_options_as<circle::ArgMinOptions>() const
+{
+ return builtin_options_as_ArgMinOptions();
+}
+
+template <>
+inline const circle::FakeQuantOptions *
+Operator::builtin_options_as<circle::FakeQuantOptions>() const
+{
+ return builtin_options_as_FakeQuantOptions();
+}
+
+template <>
+inline const circle::PackOptions *Operator::builtin_options_as<circle::PackOptions>() const
+{
+ return builtin_options_as_PackOptions();
+}
+
+template <>
+inline const circle::LogicalOrOptions *
+Operator::builtin_options_as<circle::LogicalOrOptions>() const
+{
+ return builtin_options_as_LogicalOrOptions();
+}
+
+template <>
+inline const circle::OneHotOptions *Operator::builtin_options_as<circle::OneHotOptions>() const
+{
+ return builtin_options_as_OneHotOptions();
+}
+
+template <>
+inline const circle::LogicalAndOptions *
+Operator::builtin_options_as<circle::LogicalAndOptions>() const
+{
+ return builtin_options_as_LogicalAndOptions();
+}
+
+template <>
+inline const circle::LogicalNotOptions *
+Operator::builtin_options_as<circle::LogicalNotOptions>() const
+{
+ return builtin_options_as_LogicalNotOptions();
+}
+
+template <>
+inline const circle::UnpackOptions *Operator::builtin_options_as<circle::UnpackOptions>() const
+{
+ return builtin_options_as_UnpackOptions();
+}
+
+template <>
+inline const circle::FloorDivOptions *Operator::builtin_options_as<circle::FloorDivOptions>() const
+{
+ return builtin_options_as_FloorDivOptions();
+}
+
+template <>
+inline const circle::SquareOptions *Operator::builtin_options_as<circle::SquareOptions>() const
+{
+ return builtin_options_as_SquareOptions();
+}
+
+template <>
+inline const circle::ZerosLikeOptions *
+Operator::builtin_options_as<circle::ZerosLikeOptions>() const
+{
+ return builtin_options_as_ZerosLikeOptions();
+}
+
+template <>
+inline const circle::FillOptions *Operator::builtin_options_as<circle::FillOptions>() const
+{
+ return builtin_options_as_FillOptions();
+}
+
+template <>
+inline const circle::BidirectionalSequenceLSTMOptions *
+Operator::builtin_options_as<circle::BidirectionalSequenceLSTMOptions>() const
+{
+ return builtin_options_as_BidirectionalSequenceLSTMOptions();
+}
+
+template <>
+inline const circle::BidirectionalSequenceRNNOptions *
+Operator::builtin_options_as<circle::BidirectionalSequenceRNNOptions>() const
+{
+ return builtin_options_as_BidirectionalSequenceRNNOptions();
+}
+
+template <>
+inline const circle::UnidirectionalSequenceLSTMOptions *
+Operator::builtin_options_as<circle::UnidirectionalSequenceLSTMOptions>() const
+{
+ return builtin_options_as_UnidirectionalSequenceLSTMOptions();
+}
+
+template <>
+inline const circle::FloorModOptions *Operator::builtin_options_as<circle::FloorModOptions>() const
+{
+ return builtin_options_as_FloorModOptions();
+}
+
+template <>
+inline const circle::RangeOptions *Operator::builtin_options_as<circle::RangeOptions>() const
+{
+ return builtin_options_as_RangeOptions();
+}
+
+template <>
+inline const circle::ResizeNearestNeighborOptions *
+Operator::builtin_options_as<circle::ResizeNearestNeighborOptions>() const
+{
+ return builtin_options_as_ResizeNearestNeighborOptions();
+}
+
+template <>
+inline const circle::LeakyReluOptions *
+Operator::builtin_options_as<circle::LeakyReluOptions>() const
+{
+ return builtin_options_as_LeakyReluOptions();
+}
+
+template <>
+inline const circle::SquaredDifferenceOptions *
+Operator::builtin_options_as<circle::SquaredDifferenceOptions>() const
+{
+ return builtin_options_as_SquaredDifferenceOptions();
+}
+
+template <>
+inline const circle::MirrorPadOptions *
+Operator::builtin_options_as<circle::MirrorPadOptions>() const
+{
+ return builtin_options_as_MirrorPadOptions();
+}
+
+template <>
+inline const circle::AbsOptions *Operator::builtin_options_as<circle::AbsOptions>() const
+{
+ return builtin_options_as_AbsOptions();
+}
+
+template <>
+inline const circle::SplitVOptions *Operator::builtin_options_as<circle::SplitVOptions>() const
+{
+ return builtin_options_as_SplitVOptions();
+}
+
+template <>
+inline const circle::UniqueOptions *Operator::builtin_options_as<circle::UniqueOptions>() const
+{
+ return builtin_options_as_UniqueOptions();
+}
+
+template <>
+inline const circle::ReverseV2Options *
+Operator::builtin_options_as<circle::ReverseV2Options>() const
+{
+ return builtin_options_as_ReverseV2Options();
+}
+
+template <>
+inline const circle::AddNOptions *Operator::builtin_options_as<circle::AddNOptions>() const
+{
+ return builtin_options_as_AddNOptions();
+}
+
+template <>
+inline const circle::GatherNdOptions *Operator::builtin_options_as<circle::GatherNdOptions>() const
+{
+ return builtin_options_as_GatherNdOptions();
+}
+
+template <>
+inline const circle::CosOptions *Operator::builtin_options_as<circle::CosOptions>() const
+{
+ return builtin_options_as_CosOptions();
+}
+
+template <>
+inline const circle::WhereOptions *Operator::builtin_options_as<circle::WhereOptions>() const
+{
+ return builtin_options_as_WhereOptions();
+}
+
+template <>
+inline const circle::RankOptions *Operator::builtin_options_as<circle::RankOptions>() const
+{
+ return builtin_options_as_RankOptions();
+}
+
+template <>
+inline const circle::ReverseSequenceOptions *
+Operator::builtin_options_as<circle::ReverseSequenceOptions>() const
+{
+ return builtin_options_as_ReverseSequenceOptions();
+}
+
+template <>
+inline const circle::MatrixDiagOptions *
+Operator::builtin_options_as<circle::MatrixDiagOptions>() const
+{
+ return builtin_options_as_MatrixDiagOptions();
+}
+
+template <>
+inline const circle::QuantizeOptions *Operator::builtin_options_as<circle::QuantizeOptions>() const
+{
+ return builtin_options_as_QuantizeOptions();
+}
+
+template <>
+inline const circle::MatrixSetDiagOptions *
+Operator::builtin_options_as<circle::MatrixSetDiagOptions>() const
+{
+ return builtin_options_as_MatrixSetDiagOptions();
+}
+
+template <>
+inline const circle::HardSwishOptions *
+Operator::builtin_options_as<circle::HardSwishOptions>() const
+{
+ return builtin_options_as_HardSwishOptions();
+}
+
+template <> inline const circle::IfOptions *Operator::builtin_options_as<circle::IfOptions>() const
+{
+ return builtin_options_as_IfOptions();
+}
+
+template <>
+inline const circle::WhileOptions *Operator::builtin_options_as<circle::WhileOptions>() const
+{
+ return builtin_options_as_WhileOptions();
+}
+
+template <>
+inline const circle::DepthToSpaceOptions *
+Operator::builtin_options_as<circle::DepthToSpaceOptions>() const
+{
+ return builtin_options_as_DepthToSpaceOptions();
+}
+
+template <>
+inline const circle::NonMaxSuppressionV4Options *
+Operator::builtin_options_as<circle::NonMaxSuppressionV4Options>() const
+{
+ return builtin_options_as_NonMaxSuppressionV4Options();
+}
+
+template <>
+inline const circle::NonMaxSuppressionV5Options *
+Operator::builtin_options_as<circle::NonMaxSuppressionV5Options>() const
+{
+ return builtin_options_as_NonMaxSuppressionV5Options();
+}
+
+template <>
+inline const circle::ScatterNdOptions *
+Operator::builtin_options_as<circle::ScatterNdOptions>() const
+{
+ return builtin_options_as_ScatterNdOptions();
+}
+
+template <>
+inline const circle::SelectV2Options *Operator::builtin_options_as<circle::SelectV2Options>() const
+{
+ return builtin_options_as_SelectV2Options();
+}
+
+template <>
+inline const circle::DensifyOptions *Operator::builtin_options_as<circle::DensifyOptions>() const
+{
+ return builtin_options_as_DensifyOptions();
+}
+
+template <>
+inline const circle::SegmentSumOptions *
+Operator::builtin_options_as<circle::SegmentSumOptions>() const
+{
+ return builtin_options_as_SegmentSumOptions();
+}
+
+template <>
+inline const circle::BatchMatMulOptions *
+Operator::builtin_options_as<circle::BatchMatMulOptions>() const
+{
+ return builtin_options_as_BatchMatMulOptions();
+}
+
+template <>
+inline const circle::CumsumOptions *Operator::builtin_options_as<circle::CumsumOptions>() const
+{
+ return builtin_options_as_CumsumOptions();
+}
+
+template <>
+inline const circle::CallOnceOptions *Operator::builtin_options_as<circle::CallOnceOptions>() const
+{
+ return builtin_options_as_CallOnceOptions();
+}
+
+template <>
+inline const circle::BroadcastToOptions *
+Operator::builtin_options_as<circle::BroadcastToOptions>() const
+{
+ return builtin_options_as_BroadcastToOptions();
+}
+
+template <>
+inline const circle::Rfft2dOptions *Operator::builtin_options_as<circle::Rfft2dOptions>() const
+{
+ return builtin_options_as_Rfft2dOptions();
+}
+
+template <>
+inline const circle::Conv3DOptions *Operator::builtin_options_as<circle::Conv3DOptions>() const
+{
+ return builtin_options_as_Conv3DOptions();
+}
+
+template <>
+inline const circle::HashtableOptions *
+Operator::builtin_options_as<circle::HashtableOptions>() const
+{
+ return builtin_options_as_HashtableOptions();
+}
+
+template <>
+inline const circle::HashtableFindOptions *
+Operator::builtin_options_as<circle::HashtableFindOptions>() const
+{
+ return builtin_options_as_HashtableFindOptions();
+}
+
+template <>
+inline const circle::HashtableImportOptions *
+Operator::builtin_options_as<circle::HashtableImportOptions>() const
+{
+ return builtin_options_as_HashtableImportOptions();
+}
+
+template <>
+inline const circle::HashtableSizeOptions *
+Operator::builtin_options_as<circle::HashtableSizeOptions>() const
+{
+ return builtin_options_as_HashtableSizeOptions();
+}
+
+template <>
+inline const circle::VarHandleOptions *
+Operator::builtin_options_as<circle::VarHandleOptions>() const
+{
+ return builtin_options_as_VarHandleOptions();
+}
+
+template <>
+inline const circle::ReadVariableOptions *
+Operator::builtin_options_as<circle::ReadVariableOptions>() const
+{
+ return builtin_options_as_ReadVariableOptions();
+}
+
+template <>
+inline const circle::AssignVariableOptions *
+Operator::builtin_options_as<circle::AssignVariableOptions>() const
+{
+ return builtin_options_as_AssignVariableOptions();
+}
+
+template <>
+inline const circle::RandomOptions *Operator::builtin_options_as<circle::RandomOptions>() const
+{
+ return builtin_options_as_RandomOptions();
+}
+
+template <>
+inline const circle::BCQGatherOptions *
+Operator::builtin_options_as<circle::BCQGatherOptions>() const
+{
+ return builtin_options_as_BCQGatherOptions();
+}
+
+template <>
+inline const circle::BCQFullyConnectedOptions *
+Operator::builtin_options_as<circle::BCQFullyConnectedOptions>() const
+{
+ return builtin_options_as_BCQFullyConnectedOptions();
+}
+
+template <>
+inline const circle::InstanceNormOptions *
+Operator::builtin_options_as<circle::InstanceNormOptions>() const
+{
+ return builtin_options_as_InstanceNormOptions();
+}
+
+struct OperatorBuilder
+{
+ typedef Operator Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_opcode_index(uint32_t opcode_index)
+ {
+ fbb_.AddElement<uint32_t>(Operator::VT_OPCODE_INDEX, opcode_index, 0);
+ }
+ void add_inputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs)
+ {
+ fbb_.AddOffset(Operator::VT_INPUTS, inputs);
+ }
+ void add_outputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs)
+ {
+ fbb_.AddOffset(Operator::VT_OUTPUTS, outputs);
+ }
+ void add_builtin_options_type(circle::BuiltinOptions builtin_options_type)
+ {
+ fbb_.AddElement<uint8_t>(Operator::VT_BUILTIN_OPTIONS_TYPE,
+ static_cast<uint8_t>(builtin_options_type), 0);
+ }
+ void add_builtin_options(flatbuffers::Offset<void> builtin_options)
+ {
+ fbb_.AddOffset(Operator::VT_BUILTIN_OPTIONS, builtin_options);
+ }
+ void add_custom_options(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom_options)
+ {
+ fbb_.AddOffset(Operator::VT_CUSTOM_OPTIONS, custom_options);
+ }
+ void add_custom_options_format(circle::CustomOptionsFormat custom_options_format)
+ {
+ fbb_.AddElement<int8_t>(Operator::VT_CUSTOM_OPTIONS_FORMAT,
+ static_cast<int8_t>(custom_options_format), 0);
+ }
+ void add_mutating_variable_inputs(
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>> mutating_variable_inputs)
+ {
+ fbb_.AddOffset(Operator::VT_MUTATING_VARIABLE_INPUTS, mutating_variable_inputs);
+ }
+ void add_intermediates(flatbuffers::Offset<flatbuffers::Vector<int32_t>> intermediates)
+ {
+ fbb_.AddOffset(Operator::VT_INTERMEDIATES, intermediates);
+ }
+ explicit OperatorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<Operator> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Operator>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Operator> CreateOperator(
+ flatbuffers::FlatBufferBuilder &_fbb, uint32_t opcode_index = 0,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs = 0,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs = 0,
+ circle::BuiltinOptions builtin_options_type = circle::BuiltinOptions_NONE,
+ flatbuffers::Offset<void> builtin_options = 0,
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom_options = 0,
+ circle::CustomOptionsFormat custom_options_format = circle::CustomOptionsFormat_FLEXBUFFERS,
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>> mutating_variable_inputs = 0,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> intermediates = 0)
+{
+ OperatorBuilder builder_(_fbb);
+ builder_.add_intermediates(intermediates);
+ builder_.add_mutating_variable_inputs(mutating_variable_inputs);
+ builder_.add_custom_options(custom_options);
+ builder_.add_builtin_options(builtin_options);
+ builder_.add_outputs(outputs);
+ builder_.add_inputs(inputs);
+ builder_.add_opcode_index(opcode_index);
+ builder_.add_custom_options_format(custom_options_format);
+ builder_.add_builtin_options_type(builtin_options_type);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<Operator> CreateOperatorDirect(
+ flatbuffers::FlatBufferBuilder &_fbb, uint32_t opcode_index = 0,
+ const std::vector<int32_t> *inputs = nullptr, const std::vector<int32_t> *outputs = nullptr,
+ circle::BuiltinOptions builtin_options_type = circle::BuiltinOptions_NONE,
+ flatbuffers::Offset<void> builtin_options = 0,
+ const std::vector<uint8_t> *custom_options = nullptr,
+ circle::CustomOptionsFormat custom_options_format = circle::CustomOptionsFormat_FLEXBUFFERS,
+ const std::vector<uint8_t> *mutating_variable_inputs = nullptr,
+ const std::vector<int32_t> *intermediates = nullptr)
+{
+ auto inputs__ = inputs ? _fbb.CreateVector<int32_t>(*inputs) : 0;
+ auto outputs__ = outputs ? _fbb.CreateVector<int32_t>(*outputs) : 0;
+ auto custom_options__ = custom_options ? _fbb.CreateVector<uint8_t>(*custom_options) : 0;
+ auto mutating_variable_inputs__ =
+ mutating_variable_inputs ? _fbb.CreateVector<uint8_t>(*mutating_variable_inputs) : 0;
+ auto intermediates__ = intermediates ? _fbb.CreateVector<int32_t>(*intermediates) : 0;
+ return circle::CreateOperator(_fbb, opcode_index, inputs__, outputs__, builtin_options_type,
+ builtin_options, custom_options__, custom_options_format,
+ mutating_variable_inputs__, intermediates__);
+}
+
+flatbuffers::Offset<Operator>
+CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SubGraphT : public flatbuffers::NativeTable
+{
+ typedef SubGraph TableType;
+ std::vector<std::unique_ptr<circle::TensorT>> tensors{};
+ std::vector<int32_t> inputs{};
+ std::vector<int32_t> outputs{};
+ std::vector<std::unique_ptr<circle::OperatorT>> operators{};
+ std::string name{};
+ circle::DataFormat data_format = circle::DataFormat_CHANNELS_LAST;
+};
+
+struct SubGraph FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef SubGraphT NativeTableType;
+ typedef SubGraphBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_TENSORS = 4,
+ VT_INPUTS = 6,
+ VT_OUTPUTS = 8,
+ VT_OPERATORS = 10,
+ VT_NAME = 12,
+ VT_DATA_FORMAT = 14
+ };
+ const flatbuffers::Vector<flatbuffers::Offset<circle::Tensor>> *tensors() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<circle::Tensor>> *>(VT_TENSORS);
+ }
+ const flatbuffers::Vector<int32_t> *inputs() const
+ {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUTS);
+ }
+ const flatbuffers::Vector<int32_t> *outputs() const
+ {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUTS);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<circle::Operator>> *operators() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<circle::Operator>> *>(
+ VT_OPERATORS);
+ }
+ const flatbuffers::String *name() const
+ {
+ return GetPointer<const flatbuffers::String *>(VT_NAME);
+ }
+ circle::DataFormat data_format() const
+ {
+ return static_cast<circle::DataFormat>(GetField<int8_t>(VT_DATA_FORMAT, 0));
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_TENSORS) &&
+ verifier.VerifyVector(tensors()) && verifier.VerifyVectorOfTables(tensors()) &&
+ VerifyOffset(verifier, VT_INPUTS) && verifier.VerifyVector(inputs()) &&
+ VerifyOffset(verifier, VT_OUTPUTS) && verifier.VerifyVector(outputs()) &&
+ VerifyOffset(verifier, VT_OPERATORS) && verifier.VerifyVector(operators()) &&
+ verifier.VerifyVectorOfTables(operators()) && VerifyOffset(verifier, VT_NAME) &&
+ verifier.VerifyString(name()) && VerifyField<int8_t>(verifier, VT_DATA_FORMAT) &&
+ verifier.EndTable();
+ }
+ SubGraphT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SubGraphT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SubGraph>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SubGraphBuilder
+{
+ typedef SubGraph Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void
+ add_tensors(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<circle::Tensor>>> tensors)
+ {
+ fbb_.AddOffset(SubGraph::VT_TENSORS, tensors);
+ }
+ void add_inputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs)
+ {
+ fbb_.AddOffset(SubGraph::VT_INPUTS, inputs);
+ }
+ void add_outputs(flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs)
+ {
+ fbb_.AddOffset(SubGraph::VT_OUTPUTS, outputs);
+ }
+ void add_operators(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<circle::Operator>>> operators)
+ {
+ fbb_.AddOffset(SubGraph::VT_OPERATORS, operators);
+ }
+ void add_name(flatbuffers::Offset<flatbuffers::String> name)
+ {
+ fbb_.AddOffset(SubGraph::VT_NAME, name);
+ }
+ void add_data_format(circle::DataFormat data_format)
+ {
+ fbb_.AddElement<int8_t>(SubGraph::VT_DATA_FORMAT, static_cast<int8_t>(data_format), 0);
+ }
+ explicit SubGraphBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SubGraph> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SubGraph>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SubGraph> CreateSubGraph(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<circle::Tensor>>> tensors = 0,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputs = 0,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputs = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<circle::Operator>>> operators = 0,
+ flatbuffers::Offset<flatbuffers::String> name = 0,
+ circle::DataFormat data_format = circle::DataFormat_CHANNELS_LAST)
+{
+ SubGraphBuilder builder_(_fbb);
+ builder_.add_name(name);
+ builder_.add_operators(operators);
+ builder_.add_outputs(outputs);
+ builder_.add_inputs(inputs);
+ builder_.add_tensors(tensors);
+ builder_.add_data_format(data_format);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<SubGraph> CreateSubGraphDirect(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ const std::vector<flatbuffers::Offset<circle::Tensor>> *tensors = nullptr,
+ const std::vector<int32_t> *inputs = nullptr, const std::vector<int32_t> *outputs = nullptr,
+ const std::vector<flatbuffers::Offset<circle::Operator>> *operators = nullptr,
+ const char *name = nullptr, circle::DataFormat data_format = circle::DataFormat_CHANNELS_LAST)
+{
+ auto tensors__ = tensors ? _fbb.CreateVector<flatbuffers::Offset<circle::Tensor>>(*tensors) : 0;
+ auto inputs__ = inputs ? _fbb.CreateVector<int32_t>(*inputs) : 0;
+ auto outputs__ = outputs ? _fbb.CreateVector<int32_t>(*outputs) : 0;
+ auto operators__ =
+ operators ? _fbb.CreateVector<flatbuffers::Offset<circle::Operator>>(*operators) : 0;
+ auto name__ = name ? _fbb.CreateString(name) : 0;
+ return circle::CreateSubGraph(_fbb, tensors__, inputs__, outputs__, operators__, name__,
+ data_format);
+}
+
+flatbuffers::Offset<SubGraph>
+CreateSubGraph(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct BufferT : public flatbuffers::NativeTable
+{
+ typedef Buffer TableType;
+ std::vector<uint8_t> data{};
+};
+
+struct Buffer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef BufferT NativeTableType;
+ typedef BufferBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_DATA = 4
+ };
+ const flatbuffers::Vector<uint8_t> *data() const
+ {
+ return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_DATA);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_DATA) &&
+ verifier.VerifyVector(data()) && verifier.EndTable();
+ }
+ BufferT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(BufferT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<Buffer>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct BufferBuilder
+{
+ typedef Buffer Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_data(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data)
+ {
+ fbb_.AddOffset(Buffer::VT_DATA, data);
+ }
+ explicit BufferBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<Buffer> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Buffer>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Buffer>
+CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data = 0)
+{
+ BufferBuilder builder_(_fbb);
+ builder_.add_data(data);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<Buffer> CreateBufferDirect(flatbuffers::FlatBufferBuilder &_fbb,
+ const std::vector<uint8_t> *data = nullptr)
+{
+ if (data)
+ {
+ _fbb.ForceVectorAlignment(data->size(), sizeof(uint8_t), 16);
+ }
+ auto data__ = data ? _fbb.CreateVector<uint8_t>(*data) : 0;
+ return circle::CreateBuffer(_fbb, data__);
+}
+
+flatbuffers::Offset<Buffer>
+CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct MetadataT : public flatbuffers::NativeTable
+{
+ typedef Metadata TableType;
+ std::string name{};
+ uint32_t buffer = 0;
+};
+
+struct Metadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef MetadataT NativeTableType;
+ typedef MetadataBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_NAME = 4,
+ VT_BUFFER = 6
+ };
+ const flatbuffers::String *name() const
+ {
+ return GetPointer<const flatbuffers::String *>(VT_NAME);
+ }
+ uint32_t buffer() const { return GetField<uint32_t>(VT_BUFFER, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NAME) &&
+ verifier.VerifyString(name()) && VerifyField<uint32_t>(verifier, VT_BUFFER) &&
+ verifier.EndTable();
+ }
+ MetadataT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(MetadataT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<Metadata>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct MetadataBuilder
+{
+ typedef Metadata Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_name(flatbuffers::Offset<flatbuffers::String> name)
+ {
+ fbb_.AddOffset(Metadata::VT_NAME, name);
+ }
+ void add_buffer(uint32_t buffer) { fbb_.AddElement<uint32_t>(Metadata::VT_BUFFER, buffer, 0); }
+ explicit MetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<Metadata> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Metadata>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Metadata>
+CreateMetadata(flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::String> name = 0, uint32_t buffer = 0)
+{
+ MetadataBuilder builder_(_fbb);
+ builder_.add_buffer(buffer);
+ builder_.add_name(name);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<Metadata> CreateMetadataDirect(flatbuffers::FlatBufferBuilder &_fbb,
+ const char *name = nullptr,
+ uint32_t buffer = 0)
+{
+ auto name__ = name ? _fbb.CreateString(name) : 0;
+ return circle::CreateMetadata(_fbb, name__, buffer);
+}
+
+flatbuffers::Offset<Metadata>
+CreateMetadata(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct TensorMapT : public flatbuffers::NativeTable
+{
+ typedef TensorMap TableType;
+ std::string name{};
+ uint32_t tensor_index = 0;
+};
+
+struct TensorMap FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef TensorMapT NativeTableType;
+ typedef TensorMapBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_NAME = 4,
+ VT_TENSOR_INDEX = 6
+ };
+ const flatbuffers::String *name() const
+ {
+ return GetPointer<const flatbuffers::String *>(VT_NAME);
+ }
+ uint32_t tensor_index() const { return GetField<uint32_t>(VT_TENSOR_INDEX, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NAME) &&
+ verifier.VerifyString(name()) && VerifyField<uint32_t>(verifier, VT_TENSOR_INDEX) &&
+ verifier.EndTable();
+ }
+ TensorMapT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(TensorMapT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<TensorMap>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct TensorMapBuilder
+{
+ typedef TensorMap Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_name(flatbuffers::Offset<flatbuffers::String> name)
+ {
+ fbb_.AddOffset(TensorMap::VT_NAME, name);
+ }
+ void add_tensor_index(uint32_t tensor_index)
+ {
+ fbb_.AddElement<uint32_t>(TensorMap::VT_TENSOR_INDEX, tensor_index, 0);
+ }
+ explicit TensorMapBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<TensorMap> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<TensorMap>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<TensorMap>
+CreateTensorMap(flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::String> name = 0, uint32_t tensor_index = 0)
+{
+ TensorMapBuilder builder_(_fbb);
+ builder_.add_tensor_index(tensor_index);
+ builder_.add_name(name);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<TensorMap> CreateTensorMapDirect(flatbuffers::FlatBufferBuilder &_fbb,
+ const char *name = nullptr,
+ uint32_t tensor_index = 0)
+{
+ auto name__ = name ? _fbb.CreateString(name) : 0;
+ return circle::CreateTensorMap(_fbb, name__, tensor_index);
+}
+
+flatbuffers::Offset<TensorMap>
+CreateTensorMap(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct SignatureDefT : public flatbuffers::NativeTable
+{
+ typedef SignatureDef TableType;
+ std::vector<std::unique_ptr<circle::TensorMapT>> inputs{};
+ std::vector<std::unique_ptr<circle::TensorMapT>> outputs{};
+ std::string signature_key{};
+ uint32_t subgraph_index = 0;
+};
+
+struct SignatureDef FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef SignatureDefT NativeTableType;
+ typedef SignatureDefBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_INPUTS = 4,
+ VT_OUTPUTS = 6,
+ VT_SIGNATURE_KEY = 8,
+ VT_SUBGRAPH_INDEX = 12
+ };
+ const flatbuffers::Vector<flatbuffers::Offset<circle::TensorMap>> *inputs() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<circle::TensorMap>> *>(
+ VT_INPUTS);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<circle::TensorMap>> *outputs() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<circle::TensorMap>> *>(
+ VT_OUTPUTS);
+ }
+ const flatbuffers::String *signature_key() const
+ {
+ return GetPointer<const flatbuffers::String *>(VT_SIGNATURE_KEY);
+ }
+ uint32_t subgraph_index() const { return GetField<uint32_t>(VT_SUBGRAPH_INDEX, 0); }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_INPUTS) &&
+ verifier.VerifyVector(inputs()) && verifier.VerifyVectorOfTables(inputs()) &&
+ VerifyOffset(verifier, VT_OUTPUTS) && verifier.VerifyVector(outputs()) &&
+ verifier.VerifyVectorOfTables(outputs()) && VerifyOffset(verifier, VT_SIGNATURE_KEY) &&
+ verifier.VerifyString(signature_key()) &&
+ VerifyField<uint32_t>(verifier, VT_SUBGRAPH_INDEX) && verifier.EndTable();
+ }
+ SignatureDefT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(SignatureDefT *_o,
+ const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<SignatureDef>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct SignatureDefBuilder
+{
+ typedef SignatureDef Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_inputs(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<circle::TensorMap>>> inputs)
+ {
+ fbb_.AddOffset(SignatureDef::VT_INPUTS, inputs);
+ }
+ void add_outputs(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<circle::TensorMap>>> outputs)
+ {
+ fbb_.AddOffset(SignatureDef::VT_OUTPUTS, outputs);
+ }
+ void add_signature_key(flatbuffers::Offset<flatbuffers::String> signature_key)
+ {
+ fbb_.AddOffset(SignatureDef::VT_SIGNATURE_KEY, signature_key);
+ }
+ void add_subgraph_index(uint32_t subgraph_index)
+ {
+ fbb_.AddElement<uint32_t>(SignatureDef::VT_SUBGRAPH_INDEX, subgraph_index, 0);
+ }
+ explicit SignatureDefBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<SignatureDef> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<SignatureDef>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<SignatureDef> CreateSignatureDef(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<circle::TensorMap>>> inputs = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<circle::TensorMap>>> outputs = 0,
+ flatbuffers::Offset<flatbuffers::String> signature_key = 0, uint32_t subgraph_index = 0)
+{
+ SignatureDefBuilder builder_(_fbb);
+ builder_.add_subgraph_index(subgraph_index);
+ builder_.add_signature_key(signature_key);
+ builder_.add_outputs(outputs);
+ builder_.add_inputs(inputs);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<SignatureDef> CreateSignatureDefDirect(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ const std::vector<flatbuffers::Offset<circle::TensorMap>> *inputs = nullptr,
+ const std::vector<flatbuffers::Offset<circle::TensorMap>> *outputs = nullptr,
+ const char *signature_key = nullptr, uint32_t subgraph_index = 0)
+{
+ auto inputs__ = inputs ? _fbb.CreateVector<flatbuffers::Offset<circle::TensorMap>>(*inputs) : 0;
+ auto outputs__ =
+ outputs ? _fbb.CreateVector<flatbuffers::Offset<circle::TensorMap>>(*outputs) : 0;
+ auto signature_key__ = signature_key ? _fbb.CreateString(signature_key) : 0;
+ return circle::CreateSignatureDef(_fbb, inputs__, outputs__, signature_key__, subgraph_index);
+}
+
+flatbuffers::Offset<SignatureDef>
+CreateSignatureDef(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct ModelT : public flatbuffers::NativeTable
+{
+ typedef Model TableType;
+ uint32_t version = 0;
+ std::vector<std::unique_ptr<circle::OperatorCodeT>> operator_codes{};
+ std::vector<std::unique_ptr<circle::SubGraphT>> subgraphs{};
+ std::string description{};
+ std::vector<std::unique_ptr<circle::BufferT>> buffers{};
+ std::vector<int32_t> metadata_buffer{};
+ std::vector<std::unique_ptr<circle::MetadataT>> metadata{};
+ std::vector<std::unique_ptr<circle::SignatureDefT>> signature_defs{};
+};
+
+struct Model FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table
+{
+ typedef ModelT NativeTableType;
+ typedef ModelBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE
+ {
+ VT_VERSION = 4,
+ VT_OPERATOR_CODES = 6,
+ VT_SUBGRAPHS = 8,
+ VT_DESCRIPTION = 10,
+ VT_BUFFERS = 12,
+ VT_METADATA_BUFFER = 14,
+ VT_METADATA = 16,
+ VT_SIGNATURE_DEFS = 18
+ };
+ uint32_t version() const { return GetField<uint32_t>(VT_VERSION, 0); }
+ const flatbuffers::Vector<flatbuffers::Offset<circle::OperatorCode>> *operator_codes() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<circle::OperatorCode>> *>(
+ VT_OPERATOR_CODES);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<circle::SubGraph>> *subgraphs() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<circle::SubGraph>> *>(
+ VT_SUBGRAPHS);
+ }
+ const flatbuffers::String *description() const
+ {
+ return GetPointer<const flatbuffers::String *>(VT_DESCRIPTION);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<circle::Buffer>> *buffers() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<circle::Buffer>> *>(VT_BUFFERS);
+ }
+ const flatbuffers::Vector<int32_t> *metadata_buffer() const
+ {
+ return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_METADATA_BUFFER);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<circle::Metadata>> *metadata() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<circle::Metadata>> *>(
+ VT_METADATA);
+ }
+ const flatbuffers::Vector<flatbuffers::Offset<circle::SignatureDef>> *signature_defs() const
+ {
+ return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<circle::SignatureDef>> *>(
+ VT_SIGNATURE_DEFS);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const
+ {
+ return VerifyTableStart(verifier) && VerifyField<uint32_t>(verifier, VT_VERSION) &&
+ VerifyOffset(verifier, VT_OPERATOR_CODES) && verifier.VerifyVector(operator_codes()) &&
+ verifier.VerifyVectorOfTables(operator_codes()) &&
+ VerifyOffset(verifier, VT_SUBGRAPHS) && verifier.VerifyVector(subgraphs()) &&
+ verifier.VerifyVectorOfTables(subgraphs()) && VerifyOffset(verifier, VT_DESCRIPTION) &&
+ verifier.VerifyString(description()) && VerifyOffset(verifier, VT_BUFFERS) &&
+ verifier.VerifyVector(buffers()) && verifier.VerifyVectorOfTables(buffers()) &&
+ VerifyOffset(verifier, VT_METADATA_BUFFER) && verifier.VerifyVector(metadata_buffer()) &&
+ VerifyOffset(verifier, VT_METADATA) && verifier.VerifyVector(metadata()) &&
+ verifier.VerifyVectorOfTables(metadata()) && VerifyOffset(verifier, VT_SIGNATURE_DEFS) &&
+ verifier.VerifyVector(signature_defs()) &&
+ verifier.VerifyVectorOfTables(signature_defs()) && verifier.EndTable();
+ }
+ ModelT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(ModelT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<Model>
+ Pack(flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct ModelBuilder
+{
+ typedef Model Table;
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_version(uint32_t version) { fbb_.AddElement<uint32_t>(Model::VT_VERSION, version, 0); }
+ void add_operator_codes(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<circle::OperatorCode>>>
+ operator_codes)
+ {
+ fbb_.AddOffset(Model::VT_OPERATOR_CODES, operator_codes);
+ }
+ void add_subgraphs(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<circle::SubGraph>>> subgraphs)
+ {
+ fbb_.AddOffset(Model::VT_SUBGRAPHS, subgraphs);
+ }
+ void add_description(flatbuffers::Offset<flatbuffers::String> description)
+ {
+ fbb_.AddOffset(Model::VT_DESCRIPTION, description);
+ }
+ void
+ add_buffers(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<circle::Buffer>>> buffers)
+ {
+ fbb_.AddOffset(Model::VT_BUFFERS, buffers);
+ }
+ void add_metadata_buffer(flatbuffers::Offset<flatbuffers::Vector<int32_t>> metadata_buffer)
+ {
+ fbb_.AddOffset(Model::VT_METADATA_BUFFER, metadata_buffer);
+ }
+ void add_metadata(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<circle::Metadata>>> metadata)
+ {
+ fbb_.AddOffset(Model::VT_METADATA, metadata);
+ }
+ void add_signature_defs(
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<circle::SignatureDef>>>
+ signature_defs)
+ {
+ fbb_.AddOffset(Model::VT_SIGNATURE_DEFS, signature_defs);
+ }
+ explicit ModelBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb)
+ {
+ start_ = fbb_.StartTable();
+ }
+ flatbuffers::Offset<Model> Finish()
+ {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<Model>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<Model> CreateModel(
+ flatbuffers::FlatBufferBuilder &_fbb, uint32_t version = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<circle::OperatorCode>>>
+ operator_codes = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<circle::SubGraph>>> subgraphs = 0,
+ flatbuffers::Offset<flatbuffers::String> description = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<circle::Buffer>>> buffers = 0,
+ flatbuffers::Offset<flatbuffers::Vector<int32_t>> metadata_buffer = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<circle::Metadata>>> metadata = 0,
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<circle::SignatureDef>>>
+ signature_defs = 0)
+{
+ ModelBuilder builder_(_fbb);
+ builder_.add_signature_defs(signature_defs);
+ builder_.add_metadata(metadata);
+ builder_.add_metadata_buffer(metadata_buffer);
+ builder_.add_buffers(buffers);
+ builder_.add_description(description);
+ builder_.add_subgraphs(subgraphs);
+ builder_.add_operator_codes(operator_codes);
+ builder_.add_version(version);
+ return builder_.Finish();
+}
+
+inline flatbuffers::Offset<Model> CreateModelDirect(
+ flatbuffers::FlatBufferBuilder &_fbb, uint32_t version = 0,
+ const std::vector<flatbuffers::Offset<circle::OperatorCode>> *operator_codes = nullptr,
+ const std::vector<flatbuffers::Offset<circle::SubGraph>> *subgraphs = nullptr,
+ const char *description = nullptr,
+ const std::vector<flatbuffers::Offset<circle::Buffer>> *buffers = nullptr,
+ const std::vector<int32_t> *metadata_buffer = nullptr,
+ const std::vector<flatbuffers::Offset<circle::Metadata>> *metadata = nullptr,
+ const std::vector<flatbuffers::Offset<circle::SignatureDef>> *signature_defs = nullptr)
+{
+ auto operator_codes__ =
+ operator_codes ? _fbb.CreateVector<flatbuffers::Offset<circle::OperatorCode>>(*operator_codes)
+ : 0;
+ auto subgraphs__ =
+ subgraphs ? _fbb.CreateVector<flatbuffers::Offset<circle::SubGraph>>(*subgraphs) : 0;
+ auto description__ = description ? _fbb.CreateString(description) : 0;
+ auto buffers__ = buffers ? _fbb.CreateVector<flatbuffers::Offset<circle::Buffer>>(*buffers) : 0;
+ auto metadata_buffer__ = metadata_buffer ? _fbb.CreateVector<int32_t>(*metadata_buffer) : 0;
+ auto metadata__ =
+ metadata ? _fbb.CreateVector<flatbuffers::Offset<circle::Metadata>>(*metadata) : 0;
+ auto signature_defs__ =
+ signature_defs ? _fbb.CreateVector<flatbuffers::Offset<circle::SignatureDef>>(*signature_defs)
+ : 0;
+ return circle::CreateModel(_fbb, version, operator_codes__, subgraphs__, description__, buffers__,
+ metadata_buffer__, metadata__, signature_defs__);
+}
+
+flatbuffers::Offset<Model> CreateModel(flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+inline CustomQuantizationT *
+CustomQuantization::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<CustomQuantizationT>(new CustomQuantizationT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void CustomQuantization::UnPackTo(CustomQuantizationT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = custom();
+ if (_e)
+ {
+ _o->custom.resize(_e->size());
+ std::copy(_e->begin(), _e->end(), _o->custom.begin());
+ }
+ }
+}
+
+inline flatbuffers::Offset<CustomQuantization>
+CustomQuantization::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateCustomQuantization(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<CustomQuantization>
+CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const CustomQuantizationT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ _fbb.ForceVectorAlignment(_o->custom.size(), sizeof(uint8_t), 16);
+ auto _custom = _o->custom.size() ? _fbb.CreateVector(_o->custom) : 0;
+ return circle::CreateCustomQuantization(_fbb, _custom);
+}
+
+inline QuantizationParametersT *
+QuantizationParameters::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<QuantizationParametersT>(new QuantizationParametersT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void
+QuantizationParameters::UnPackTo(QuantizationParametersT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = min();
+ if (_e)
+ {
+ _o->min.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->min[_i] = _e->Get(_i);
+ }
+ }
+ }
+ {
+ auto _e = max();
+ if (_e)
+ {
+ _o->max.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->max[_i] = _e->Get(_i);
+ }
+ }
+ }
+ {
+ auto _e = scale();
+ if (_e)
+ {
+ _o->scale.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->scale[_i] = _e->Get(_i);
+ }
+ }
+ }
+ {
+ auto _e = zero_point();
+ if (_e)
+ {
+ _o->zero_point.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->zero_point[_i] = _e->Get(_i);
+ }
+ }
+ }
+ {
+ auto _e = details_type();
+ _o->details.type = _e;
+ }
+ {
+ auto _e = details();
+ if (_e)
+ _o->details.value = circle::QuantizationDetailsUnion::UnPack(_e, details_type(), _resolver);
+ }
+ {
+ auto _e = quantized_dimension();
+ _o->quantized_dimension = _e;
+ }
+}
+
+inline flatbuffers::Offset<QuantizationParameters>
+QuantizationParameters::Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const QuantizationParametersT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateQuantizationParameters(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<QuantizationParameters>
+CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb,
+ const QuantizationParametersT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const QuantizationParametersT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _min = _o->min.size() ? _fbb.CreateVector(_o->min) : 0;
+ auto _max = _o->max.size() ? _fbb.CreateVector(_o->max) : 0;
+ auto _scale = _o->scale.size() ? _fbb.CreateVector(_o->scale) : 0;
+ auto _zero_point = _o->zero_point.size() ? _fbb.CreateVector(_o->zero_point) : 0;
+ auto _details_type = _o->details.type;
+ auto _details = _o->details.Pack(_fbb);
+ auto _quantized_dimension = _o->quantized_dimension;
+ return circle::CreateQuantizationParameters(_fbb, _min, _max, _scale, _zero_point, _details_type,
+ _details, _quantized_dimension);
+}
+
+inline Int32VectorT *Int32Vector::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<Int32VectorT>(new Int32VectorT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void Int32Vector::UnPackTo(Int32VectorT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = values();
+ if (_e)
+ {
+ _o->values.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->values[_i] = _e->Get(_i);
+ }
+ }
+ }
+}
+
+inline flatbuffers::Offset<Int32Vector>
+Int32Vector::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateInt32Vector(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<Int32Vector>
+CreateInt32Vector(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const Int32VectorT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0;
+ return circle::CreateInt32Vector(_fbb, _values);
+}
+
+inline Uint16VectorT *Uint16Vector::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<Uint16VectorT>(new Uint16VectorT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void Uint16Vector::UnPackTo(Uint16VectorT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = values();
+ if (_e)
+ {
+ _o->values.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->values[_i] = _e->Get(_i);
+ }
+ }
+ }
+}
+
+inline flatbuffers::Offset<Uint16Vector>
+Uint16Vector::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateUint16Vector(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<Uint16Vector>
+CreateUint16Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const Uint16VectorT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ _fbb.ForceVectorAlignment(_o->values.size(), sizeof(uint16_t), 4);
+ auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0;
+ return circle::CreateUint16Vector(_fbb, _values);
+}
+
+inline Uint8VectorT *Uint8Vector::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<Uint8VectorT>(new Uint8VectorT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void Uint8Vector::UnPackTo(Uint8VectorT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = values();
+ if (_e)
+ {
+ _o->values.resize(_e->size());
+ std::copy(_e->begin(), _e->end(), _o->values.begin());
+ }
+ }
+}
+
+inline flatbuffers::Offset<Uint8Vector>
+Uint8Vector::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateUint8Vector(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<Uint8Vector>
+CreateUint8Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const Uint8VectorT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ _fbb.ForceVectorAlignment(_o->values.size(), sizeof(uint8_t), 4);
+ auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0;
+ return circle::CreateUint8Vector(_fbb, _values);
+}
+
+inline DimensionMetadataT *
+DimensionMetadata::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<DimensionMetadataT>(new DimensionMetadataT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void DimensionMetadata::UnPackTo(DimensionMetadataT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = format();
+ _o->format = _e;
+ }
+ {
+ auto _e = dense_size();
+ _o->dense_size = _e;
+ }
+ {
+ auto _e = array_segments_type();
+ _o->array_segments.type = _e;
+ }
+ {
+ auto _e = array_segments();
+ if (_e)
+ _o->array_segments.value =
+ circle::SparseIndexVectorUnion::UnPack(_e, array_segments_type(), _resolver);
+ }
+ {
+ auto _e = array_indices_type();
+ _o->array_indices.type = _e;
+ }
+ {
+ auto _e = array_indices();
+ if (_e)
+ _o->array_indices.value =
+ circle::SparseIndexVectorUnion::UnPack(_e, array_indices_type(), _resolver);
+ }
+}
+
+inline flatbuffers::Offset<DimensionMetadata>
+DimensionMetadata::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateDimensionMetadata(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<DimensionMetadata>
+CreateDimensionMetadata(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const DimensionMetadataT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _format = _o->format;
+ auto _dense_size = _o->dense_size;
+ auto _array_segments_type = _o->array_segments.type;
+ auto _array_segments = _o->array_segments.Pack(_fbb);
+ auto _array_indices_type = _o->array_indices.type;
+ auto _array_indices = _o->array_indices.Pack(_fbb);
+ return circle::CreateDimensionMetadata(_fbb, _format, _dense_size, _array_segments_type,
+ _array_segments, _array_indices_type, _array_indices);
+}
+
+inline SparsityParametersT *
+SparsityParameters::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<SparsityParametersT>(new SparsityParametersT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SparsityParameters::UnPackTo(SparsityParametersT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = traversal_order();
+ if (_e)
+ {
+ _o->traversal_order.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->traversal_order[_i] = _e->Get(_i);
+ }
+ }
+ }
+ {
+ auto _e = block_map();
+ if (_e)
+ {
+ _o->block_map.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->block_map[_i] = _e->Get(_i);
+ }
+ }
+ }
+ {
+ auto _e = dim_metadata();
+ if (_e)
+ {
+ _o->dim_metadata.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->dim_metadata[_i] =
+ std::unique_ptr<circle::DimensionMetadataT>(_e->Get(_i)->UnPack(_resolver));
+ }
+ }
+ }
+}
+
+inline flatbuffers::Offset<SparsityParameters>
+SparsityParameters::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateSparsityParameters(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SparsityParameters>
+CreateSparsityParameters(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const SparsityParametersT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _traversal_order = _o->traversal_order.size() ? _fbb.CreateVector(_o->traversal_order) : 0;
+ auto _block_map = _o->block_map.size() ? _fbb.CreateVector(_o->block_map) : 0;
+ auto _dim_metadata = _o->dim_metadata.size()
+ ? _fbb.CreateVector<flatbuffers::Offset<circle::DimensionMetadata>>(
+ _o->dim_metadata.size(),
+ [](size_t i, _VectorArgs *__va) {
+ return CreateDimensionMetadata(
+ *__va->__fbb, __va->__o->dim_metadata[i].get(), __va->__rehasher);
+ },
+ &_va)
+ : 0;
+ return circle::CreateSparsityParameters(_fbb, _traversal_order, _block_map, _dim_metadata);
+}
+
+inline TensorT *Tensor::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<TensorT>(new TensorT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void Tensor::UnPackTo(TensorT *_o, const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = shape();
+ if (_e)
+ {
+ _o->shape.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->shape[_i] = _e->Get(_i);
+ }
+ }
+ }
+ {
+ auto _e = type();
+ _o->type = _e;
+ }
+ {
+ auto _e = buffer();
+ _o->buffer = _e;
+ }
+ {
+ auto _e = name();
+ if (_e)
+ _o->name = _e->str();
+ }
+ {
+ auto _e = quantization();
+ if (_e)
+ _o->quantization = std::unique_ptr<circle::QuantizationParametersT>(_e->UnPack(_resolver));
+ }
+ {
+ auto _e = is_variable();
+ _o->is_variable = _e;
+ }
+ {
+ auto _e = sparsity();
+ if (_e)
+ _o->sparsity = std::unique_ptr<circle::SparsityParametersT>(_e->UnPack(_resolver));
+ }
+ {
+ auto _e = shape_signature();
+ if (_e)
+ {
+ _o->shape_signature.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->shape_signature[_i] = _e->Get(_i);
+ }
+ }
+ }
+}
+
+inline flatbuffers::Offset<Tensor> Tensor::Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const TensorT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateTensor(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<Tensor> CreateTensor(flatbuffers::FlatBufferBuilder &_fbb,
+ const TensorT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const TensorT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _shape = _o->shape.size() ? _fbb.CreateVector(_o->shape) : 0;
+ auto _type = _o->type;
+ auto _buffer = _o->buffer;
+ auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name);
+ auto _quantization =
+ _o->quantization ? CreateQuantizationParameters(_fbb, _o->quantization.get(), _rehasher) : 0;
+ auto _is_variable = _o->is_variable;
+ auto _sparsity = _o->sparsity ? CreateSparsityParameters(_fbb, _o->sparsity.get(), _rehasher) : 0;
+ auto _shape_signature = _o->shape_signature.size() ? _fbb.CreateVector(_o->shape_signature) : 0;
+ return circle::CreateTensor(_fbb, _shape, _type, _buffer, _name, _quantization, _is_variable,
+ _sparsity, _shape_signature);
+}
+
+inline Conv2DOptionsT *
+Conv2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<Conv2DOptionsT>(new Conv2DOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void Conv2DOptions::UnPackTo(Conv2DOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = padding();
+ _o->padding = _e;
+ }
+ {
+ auto _e = stride_w();
+ _o->stride_w = _e;
+ }
+ {
+ auto _e = stride_h();
+ _o->stride_h = _e;
+ }
+ {
+ auto _e = fused_activation_function();
+ _o->fused_activation_function = _e;
+ }
+ {
+ auto _e = dilation_w_factor();
+ _o->dilation_w_factor = _e;
+ }
+ {
+ auto _e = dilation_h_factor();
+ _o->dilation_h_factor = _e;
+ }
+}
+
+inline flatbuffers::Offset<Conv2DOptions>
+Conv2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateConv2DOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<Conv2DOptions>
+CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const Conv2DOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _padding = _o->padding;
+ auto _stride_w = _o->stride_w;
+ auto _stride_h = _o->stride_h;
+ auto _fused_activation_function = _o->fused_activation_function;
+ auto _dilation_w_factor = _o->dilation_w_factor;
+ auto _dilation_h_factor = _o->dilation_h_factor;
+ return circle::CreateConv2DOptions(_fbb, _padding, _stride_w, _stride_h,
+ _fused_activation_function, _dilation_w_factor,
+ _dilation_h_factor);
+}
+
+inline Conv3DOptionsT *
+Conv3DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<Conv3DOptionsT>(new Conv3DOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void Conv3DOptions::UnPackTo(Conv3DOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = padding();
+ _o->padding = _e;
+ }
+ {
+ auto _e = stride_d();
+ _o->stride_d = _e;
+ }
+ {
+ auto _e = stride_w();
+ _o->stride_w = _e;
+ }
+ {
+ auto _e = stride_h();
+ _o->stride_h = _e;
+ }
+ {
+ auto _e = fused_activation_function();
+ _o->fused_activation_function = _e;
+ }
+ {
+ auto _e = dilation_d_factor();
+ _o->dilation_d_factor = _e;
+ }
+ {
+ auto _e = dilation_w_factor();
+ _o->dilation_w_factor = _e;
+ }
+ {
+ auto _e = dilation_h_factor();
+ _o->dilation_h_factor = _e;
+ }
+}
+
+inline flatbuffers::Offset<Conv3DOptions>
+Conv3DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateConv3DOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<Conv3DOptions>
+CreateConv3DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const Conv3DOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _padding = _o->padding;
+ auto _stride_d = _o->stride_d;
+ auto _stride_w = _o->stride_w;
+ auto _stride_h = _o->stride_h;
+ auto _fused_activation_function = _o->fused_activation_function;
+ auto _dilation_d_factor = _o->dilation_d_factor;
+ auto _dilation_w_factor = _o->dilation_w_factor;
+ auto _dilation_h_factor = _o->dilation_h_factor;
+ return circle::CreateConv3DOptions(_fbb, _padding, _stride_d, _stride_w, _stride_h,
+ _fused_activation_function, _dilation_d_factor,
+ _dilation_w_factor, _dilation_h_factor);
+}
+
+inline Pool2DOptionsT *
+Pool2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<Pool2DOptionsT>(new Pool2DOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void Pool2DOptions::UnPackTo(Pool2DOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = padding();
+ _o->padding = _e;
+ }
+ {
+ auto _e = stride_w();
+ _o->stride_w = _e;
+ }
+ {
+ auto _e = stride_h();
+ _o->stride_h = _e;
+ }
+ {
+ auto _e = filter_width();
+ _o->filter_width = _e;
+ }
+ {
+ auto _e = filter_height();
+ _o->filter_height = _e;
+ }
+ {
+ auto _e = fused_activation_function();
+ _o->fused_activation_function = _e;
+ }
+}
+
+inline flatbuffers::Offset<Pool2DOptions>
+Pool2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreatePool2DOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<Pool2DOptions>
+CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const Pool2DOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _padding = _o->padding;
+ auto _stride_w = _o->stride_w;
+ auto _stride_h = _o->stride_h;
+ auto _filter_width = _o->filter_width;
+ auto _filter_height = _o->filter_height;
+ auto _fused_activation_function = _o->fused_activation_function;
+ return circle::CreatePool2DOptions(_fbb, _padding, _stride_w, _stride_h, _filter_width,
+ _filter_height, _fused_activation_function);
+}
+
+inline DepthwiseConv2DOptionsT *
+DepthwiseConv2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<DepthwiseConv2DOptionsT>(new DepthwiseConv2DOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void
+DepthwiseConv2DOptions::UnPackTo(DepthwiseConv2DOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = padding();
+ _o->padding = _e;
+ }
+ {
+ auto _e = stride_w();
+ _o->stride_w = _e;
+ }
+ {
+ auto _e = stride_h();
+ _o->stride_h = _e;
+ }
+ {
+ auto _e = depth_multiplier();
+ _o->depth_multiplier = _e;
+ }
+ {
+ auto _e = fused_activation_function();
+ _o->fused_activation_function = _e;
+ }
+ {
+ auto _e = dilation_w_factor();
+ _o->dilation_w_factor = _e;
+ }
+ {
+ auto _e = dilation_h_factor();
+ _o->dilation_h_factor = _e;
+ }
+}
+
+inline flatbuffers::Offset<DepthwiseConv2DOptions>
+DepthwiseConv2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const DepthwiseConv2DOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateDepthwiseConv2DOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<DepthwiseConv2DOptions>
+CreateDepthwiseConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const DepthwiseConv2DOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const DepthwiseConv2DOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _padding = _o->padding;
+ auto _stride_w = _o->stride_w;
+ auto _stride_h = _o->stride_h;
+ auto _depth_multiplier = _o->depth_multiplier;
+ auto _fused_activation_function = _o->fused_activation_function;
+ auto _dilation_w_factor = _o->dilation_w_factor;
+ auto _dilation_h_factor = _o->dilation_h_factor;
+ return circle::CreateDepthwiseConv2DOptions(_fbb, _padding, _stride_w, _stride_h,
+ _depth_multiplier, _fused_activation_function,
+ _dilation_w_factor, _dilation_h_factor);
+}
+
+inline ConcatEmbeddingsOptionsT *
+ConcatEmbeddingsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<ConcatEmbeddingsOptionsT>(new ConcatEmbeddingsOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void
+ConcatEmbeddingsOptions::UnPackTo(ConcatEmbeddingsOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = num_channels();
+ _o->num_channels = _e;
+ }
+ {
+ auto _e = num_columns_per_channel();
+ if (_e)
+ {
+ _o->num_columns_per_channel.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->num_columns_per_channel[_i] = _e->Get(_i);
+ }
+ }
+ }
+ {
+ auto _e = embedding_dim_per_channel();
+ if (_e)
+ {
+ _o->embedding_dim_per_channel.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->embedding_dim_per_channel[_i] = _e->Get(_i);
+ }
+ }
+ }
+}
+
+inline flatbuffers::Offset<ConcatEmbeddingsOptions>
+ConcatEmbeddingsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const ConcatEmbeddingsOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateConcatEmbeddingsOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<ConcatEmbeddingsOptions>
+CreateConcatEmbeddingsOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const ConcatEmbeddingsOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const ConcatEmbeddingsOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _num_channels = _o->num_channels;
+ auto _num_columns_per_channel =
+ _o->num_columns_per_channel.size() ? _fbb.CreateVector(_o->num_columns_per_channel) : 0;
+ auto _embedding_dim_per_channel =
+ _o->embedding_dim_per_channel.size() ? _fbb.CreateVector(_o->embedding_dim_per_channel) : 0;
+ return circle::CreateConcatEmbeddingsOptions(_fbb, _num_channels, _num_columns_per_channel,
+ _embedding_dim_per_channel);
+}
+
+inline LSHProjectionOptionsT *
+LSHProjectionOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<LSHProjectionOptionsT>(new LSHProjectionOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void LSHProjectionOptions::UnPackTo(LSHProjectionOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = type();
+ _o->type = _e;
+ }
+}
+
+inline flatbuffers::Offset<LSHProjectionOptions>
+LSHProjectionOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateLSHProjectionOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<LSHProjectionOptions>
+CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const LSHProjectionOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _type = _o->type;
+ return circle::CreateLSHProjectionOptions(_fbb, _type);
+}
+
+inline SVDFOptionsT *SVDFOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<SVDFOptionsT>(new SVDFOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SVDFOptions::UnPackTo(SVDFOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = rank();
+ _o->rank = _e;
+ }
+ {
+ auto _e = fused_activation_function();
+ _o->fused_activation_function = _e;
+ }
+ {
+ auto _e = asymmetric_quantize_inputs();
+ _o->asymmetric_quantize_inputs = _e;
+ }
+}
+
+inline flatbuffers::Offset<SVDFOptions>
+SVDFOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateSVDFOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SVDFOptions>
+CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const SVDFOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _rank = _o->rank;
+ auto _fused_activation_function = _o->fused_activation_function;
+ auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs;
+ return circle::CreateSVDFOptions(_fbb, _rank, _fused_activation_function,
+ _asymmetric_quantize_inputs);
+}
+
+inline RNNOptionsT *RNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<RNNOptionsT>(new RNNOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void RNNOptions::UnPackTo(RNNOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = fused_activation_function();
+ _o->fused_activation_function = _e;
+ }
+ {
+ auto _e = asymmetric_quantize_inputs();
+ _o->asymmetric_quantize_inputs = _e;
+ }
+}
+
+inline flatbuffers::Offset<RNNOptions>
+RNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateRNNOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<RNNOptions>
+CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const RNNOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _fused_activation_function = _o->fused_activation_function;
+ auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs;
+ return circle::CreateRNNOptions(_fbb, _fused_activation_function, _asymmetric_quantize_inputs);
+}
+
+inline SequenceRNNOptionsT *
+SequenceRNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<SequenceRNNOptionsT>(new SequenceRNNOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SequenceRNNOptions::UnPackTo(SequenceRNNOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = time_major();
+ _o->time_major = _e;
+ }
+ {
+ auto _e = fused_activation_function();
+ _o->fused_activation_function = _e;
+ }
+ {
+ auto _e = asymmetric_quantize_inputs();
+ _o->asymmetric_quantize_inputs = _e;
+ }
+}
+
+inline flatbuffers::Offset<SequenceRNNOptions>
+SequenceRNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateSequenceRNNOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SequenceRNNOptions>
+CreateSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const SequenceRNNOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _time_major = _o->time_major;
+ auto _fused_activation_function = _o->fused_activation_function;
+ auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs;
+ return circle::CreateSequenceRNNOptions(_fbb, _time_major, _fused_activation_function,
+ _asymmetric_quantize_inputs);
+}
+
+inline BidirectionalSequenceRNNOptionsT *
+BidirectionalSequenceRNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o =
+ std::unique_ptr<BidirectionalSequenceRNNOptionsT>(new BidirectionalSequenceRNNOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void
+BidirectionalSequenceRNNOptions::UnPackTo(BidirectionalSequenceRNNOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = time_major();
+ _o->time_major = _e;
+ }
+ {
+ auto _e = fused_activation_function();
+ _o->fused_activation_function = _e;
+ }
+ {
+ auto _e = merge_outputs();
+ _o->merge_outputs = _e;
+ }
+ {
+ auto _e = asymmetric_quantize_inputs();
+ _o->asymmetric_quantize_inputs = _e;
+ }
+}
+
+inline flatbuffers::Offset<BidirectionalSequenceRNNOptions>
+BidirectionalSequenceRNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const BidirectionalSequenceRNNOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateBidirectionalSequenceRNNOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<BidirectionalSequenceRNNOptions>
+CreateBidirectionalSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const BidirectionalSequenceRNNOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const BidirectionalSequenceRNNOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _time_major = _o->time_major;
+ auto _fused_activation_function = _o->fused_activation_function;
+ auto _merge_outputs = _o->merge_outputs;
+ auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs;
+ return circle::CreateBidirectionalSequenceRNNOptions(
+ _fbb, _time_major, _fused_activation_function, _merge_outputs, _asymmetric_quantize_inputs);
+}
+
+inline FullyConnectedOptionsT *
+FullyConnectedOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<FullyConnectedOptionsT>(new FullyConnectedOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void FullyConnectedOptions::UnPackTo(FullyConnectedOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = fused_activation_function();
+ _o->fused_activation_function = _e;
+ }
+ {
+ auto _e = weights_format();
+ _o->weights_format = _e;
+ }
+ {
+ auto _e = keep_num_dims();
+ _o->keep_num_dims = _e;
+ }
+ {
+ auto _e = asymmetric_quantize_inputs();
+ _o->asymmetric_quantize_inputs = _e;
+ }
+}
+
+inline flatbuffers::Offset<FullyConnectedOptions>
+FullyConnectedOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateFullyConnectedOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<FullyConnectedOptions>
+CreateFullyConnectedOptions(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const FullyConnectedOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _fused_activation_function = _o->fused_activation_function;
+ auto _weights_format = _o->weights_format;
+ auto _keep_num_dims = _o->keep_num_dims;
+ auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs;
+ return circle::CreateFullyConnectedOptions(_fbb, _fused_activation_function, _weights_format,
+ _keep_num_dims, _asymmetric_quantize_inputs);
+}
+
+inline SoftmaxOptionsT *
+SoftmaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<SoftmaxOptionsT>(new SoftmaxOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SoftmaxOptions::UnPackTo(SoftmaxOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = beta();
+ _o->beta = _e;
+ }
+}
+
+inline flatbuffers::Offset<SoftmaxOptions>
+SoftmaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateSoftmaxOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SoftmaxOptions>
+CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const SoftmaxOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _beta = _o->beta;
+ return circle::CreateSoftmaxOptions(_fbb, _beta);
+}
+
+inline ConcatenationOptionsT *
+ConcatenationOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<ConcatenationOptionsT>(new ConcatenationOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void ConcatenationOptions::UnPackTo(ConcatenationOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = axis();
+ _o->axis = _e;
+ }
+ {
+ auto _e = fused_activation_function();
+ _o->fused_activation_function = _e;
+ }
+}
+
+inline flatbuffers::Offset<ConcatenationOptions>
+ConcatenationOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateConcatenationOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<ConcatenationOptions>
+CreateConcatenationOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const ConcatenationOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _axis = _o->axis;
+ auto _fused_activation_function = _o->fused_activation_function;
+ return circle::CreateConcatenationOptions(_fbb, _axis, _fused_activation_function);
+}
+
+inline AddOptionsT *AddOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<AddOptionsT>(new AddOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void AddOptions::UnPackTo(AddOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = fused_activation_function();
+ _o->fused_activation_function = _e;
+ }
+ {
+ auto _e = pot_scale_int16();
+ _o->pot_scale_int16 = _e;
+ }
+}
+
+inline flatbuffers::Offset<AddOptions>
+AddOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateAddOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<AddOptions>
+CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const AddOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _fused_activation_function = _o->fused_activation_function;
+ auto _pot_scale_int16 = _o->pot_scale_int16;
+ return circle::CreateAddOptions(_fbb, _fused_activation_function, _pot_scale_int16);
+}
+
+inline MulOptionsT *MulOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<MulOptionsT>(new MulOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void MulOptions::UnPackTo(MulOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = fused_activation_function();
+ _o->fused_activation_function = _e;
+ }
+}
+
+inline flatbuffers::Offset<MulOptions>
+MulOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateMulOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<MulOptions>
+CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const MulOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _fused_activation_function = _o->fused_activation_function;
+ return circle::CreateMulOptions(_fbb, _fused_activation_function);
+}
+
+inline L2NormOptionsT *
+L2NormOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<L2NormOptionsT>(new L2NormOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void L2NormOptions::UnPackTo(L2NormOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = fused_activation_function();
+ _o->fused_activation_function = _e;
+ }
+}
+
+inline flatbuffers::Offset<L2NormOptions>
+L2NormOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateL2NormOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<L2NormOptions>
+CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const L2NormOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _fused_activation_function = _o->fused_activation_function;
+ return circle::CreateL2NormOptions(_fbb, _fused_activation_function);
+}
+
+inline LocalResponseNormalizationOptionsT *
+LocalResponseNormalizationOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o =
+ std::unique_ptr<LocalResponseNormalizationOptionsT>(new LocalResponseNormalizationOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void
+LocalResponseNormalizationOptions::UnPackTo(LocalResponseNormalizationOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = radius();
+ _o->radius = _e;
+ }
+ {
+ auto _e = bias();
+ _o->bias = _e;
+ }
+ {
+ auto _e = alpha();
+ _o->alpha = _e;
+ }
+ {
+ auto _e = beta();
+ _o->beta = _e;
+ }
+}
+
+inline flatbuffers::Offset<LocalResponseNormalizationOptions>
+LocalResponseNormalizationOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const LocalResponseNormalizationOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateLocalResponseNormalizationOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<LocalResponseNormalizationOptions>
+CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const LocalResponseNormalizationOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const LocalResponseNormalizationOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _radius = _o->radius;
+ auto _bias = _o->bias;
+ auto _alpha = _o->alpha;
+ auto _beta = _o->beta;
+ return circle::CreateLocalResponseNormalizationOptions(_fbb, _radius, _bias, _alpha, _beta);
+}
+
+inline LSTMOptionsT *LSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<LSTMOptionsT>(new LSTMOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void LSTMOptions::UnPackTo(LSTMOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = fused_activation_function();
+ _o->fused_activation_function = _e;
+ }
+ {
+ auto _e = cell_clip();
+ _o->cell_clip = _e;
+ }
+ {
+ auto _e = proj_clip();
+ _o->proj_clip = _e;
+ }
+ {
+ auto _e = kernel_type();
+ _o->kernel_type = _e;
+ }
+ {
+ auto _e = asymmetric_quantize_inputs();
+ _o->asymmetric_quantize_inputs = _e;
+ }
+}
+
+inline flatbuffers::Offset<LSTMOptions>
+LSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateLSTMOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<LSTMOptions>
+CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const LSTMOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _fused_activation_function = _o->fused_activation_function;
+ auto _cell_clip = _o->cell_clip;
+ auto _proj_clip = _o->proj_clip;
+ auto _kernel_type = _o->kernel_type;
+ auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs;
+ return circle::CreateLSTMOptions(_fbb, _fused_activation_function, _cell_clip, _proj_clip,
+ _kernel_type, _asymmetric_quantize_inputs);
+}
+
+inline UnidirectionalSequenceLSTMOptionsT *
+UnidirectionalSequenceLSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o =
+ std::unique_ptr<UnidirectionalSequenceLSTMOptionsT>(new UnidirectionalSequenceLSTMOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void
+UnidirectionalSequenceLSTMOptions::UnPackTo(UnidirectionalSequenceLSTMOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = fused_activation_function();
+ _o->fused_activation_function = _e;
+ }
+ {
+ auto _e = cell_clip();
+ _o->cell_clip = _e;
+ }
+ {
+ auto _e = proj_clip();
+ _o->proj_clip = _e;
+ }
+ {
+ auto _e = time_major();
+ _o->time_major = _e;
+ }
+ {
+ auto _e = asymmetric_quantize_inputs();
+ _o->asymmetric_quantize_inputs = _e;
+ }
+}
+
+inline flatbuffers::Offset<UnidirectionalSequenceLSTMOptions>
+UnidirectionalSequenceLSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const UnidirectionalSequenceLSTMOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateUnidirectionalSequenceLSTMOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<UnidirectionalSequenceLSTMOptions>
+CreateUnidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const UnidirectionalSequenceLSTMOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const UnidirectionalSequenceLSTMOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _fused_activation_function = _o->fused_activation_function;
+ auto _cell_clip = _o->cell_clip;
+ auto _proj_clip = _o->proj_clip;
+ auto _time_major = _o->time_major;
+ auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs;
+ return circle::CreateUnidirectionalSequenceLSTMOptions(_fbb, _fused_activation_function,
+ _cell_clip, _proj_clip, _time_major,
+ _asymmetric_quantize_inputs);
+}
+
+inline BidirectionalSequenceLSTMOptionsT *
+BidirectionalSequenceLSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o =
+ std::unique_ptr<BidirectionalSequenceLSTMOptionsT>(new BidirectionalSequenceLSTMOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void
+BidirectionalSequenceLSTMOptions::UnPackTo(BidirectionalSequenceLSTMOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = fused_activation_function();
+ _o->fused_activation_function = _e;
+ }
+ {
+ auto _e = cell_clip();
+ _o->cell_clip = _e;
+ }
+ {
+ auto _e = proj_clip();
+ _o->proj_clip = _e;
+ }
+ {
+ auto _e = merge_outputs();
+ _o->merge_outputs = _e;
+ }
+ {
+ auto _e = time_major();
+ _o->time_major = _e;
+ }
+ {
+ auto _e = asymmetric_quantize_inputs();
+ _o->asymmetric_quantize_inputs = _e;
+ }
+}
+
+inline flatbuffers::Offset<BidirectionalSequenceLSTMOptions>
+BidirectionalSequenceLSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const BidirectionalSequenceLSTMOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateBidirectionalSequenceLSTMOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<BidirectionalSequenceLSTMOptions>
+CreateBidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const BidirectionalSequenceLSTMOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const BidirectionalSequenceLSTMOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _fused_activation_function = _o->fused_activation_function;
+ auto _cell_clip = _o->cell_clip;
+ auto _proj_clip = _o->proj_clip;
+ auto _merge_outputs = _o->merge_outputs;
+ auto _time_major = _o->time_major;
+ auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs;
+ return circle::CreateBidirectionalSequenceLSTMOptions(_fbb, _fused_activation_function,
+ _cell_clip, _proj_clip, _merge_outputs,
+ _time_major, _asymmetric_quantize_inputs);
+}
+
+inline ResizeBilinearOptionsT *
+ResizeBilinearOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<ResizeBilinearOptionsT>(new ResizeBilinearOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void ResizeBilinearOptions::UnPackTo(ResizeBilinearOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = align_corners();
+ _o->align_corners = _e;
+ }
+ {
+ auto _e = half_pixel_centers();
+ _o->half_pixel_centers = _e;
+ }
+}
+
+inline flatbuffers::Offset<ResizeBilinearOptions>
+ResizeBilinearOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateResizeBilinearOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<ResizeBilinearOptions>
+CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const ResizeBilinearOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _align_corners = _o->align_corners;
+ auto _half_pixel_centers = _o->half_pixel_centers;
+ return circle::CreateResizeBilinearOptions(_fbb, _align_corners, _half_pixel_centers);
+}
+
+inline ResizeNearestNeighborOptionsT *
+ResizeNearestNeighborOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<ResizeNearestNeighborOptionsT>(new ResizeNearestNeighborOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void
+ResizeNearestNeighborOptions::UnPackTo(ResizeNearestNeighborOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = align_corners();
+ _o->align_corners = _e;
+ }
+ {
+ auto _e = half_pixel_centers();
+ _o->half_pixel_centers = _e;
+ }
+}
+
+inline flatbuffers::Offset<ResizeNearestNeighborOptions>
+ResizeNearestNeighborOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const ResizeNearestNeighborOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateResizeNearestNeighborOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<ResizeNearestNeighborOptions>
+CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const ResizeNearestNeighborOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const ResizeNearestNeighborOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _align_corners = _o->align_corners;
+ auto _half_pixel_centers = _o->half_pixel_centers;
+ return circle::CreateResizeNearestNeighborOptions(_fbb, _align_corners, _half_pixel_centers);
+}
+
+inline CallOptionsT *CallOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<CallOptionsT>(new CallOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void CallOptions::UnPackTo(CallOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = subgraph();
+ _o->subgraph = _e;
+ }
+}
+
+inline flatbuffers::Offset<CallOptions>
+CallOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateCallOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<CallOptions>
+CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const CallOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _subgraph = _o->subgraph;
+ return circle::CreateCallOptions(_fbb, _subgraph);
+}
+
+inline PadOptionsT *PadOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<PadOptionsT>(new PadOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void PadOptions::UnPackTo(PadOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<PadOptions>
+PadOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreatePadOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<PadOptions>
+CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const PadOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreatePadOptions(_fbb);
+}
+
+inline PadV2OptionsT *PadV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<PadV2OptionsT>(new PadV2OptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void PadV2Options::UnPackTo(PadV2OptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<PadV2Options>
+PadV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreatePadV2Options(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<PadV2Options>
+CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const PadV2OptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreatePadV2Options(_fbb);
+}
+
+inline ReshapeOptionsT *
+ReshapeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<ReshapeOptionsT>(new ReshapeOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void ReshapeOptions::UnPackTo(ReshapeOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = new_shape();
+ if (_e)
+ {
+ _o->new_shape.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->new_shape[_i] = _e->Get(_i);
+ }
+ }
+ }
+}
+
+inline flatbuffers::Offset<ReshapeOptions>
+ReshapeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateReshapeOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<ReshapeOptions>
+CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const ReshapeOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _new_shape = _o->new_shape.size() ? _fbb.CreateVector(_o->new_shape) : 0;
+ return circle::CreateReshapeOptions(_fbb, _new_shape);
+}
+
+inline SpaceToBatchNDOptionsT *
+SpaceToBatchNDOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<SpaceToBatchNDOptionsT>(new SpaceToBatchNDOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SpaceToBatchNDOptions::UnPackTo(SpaceToBatchNDOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<SpaceToBatchNDOptions>
+SpaceToBatchNDOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateSpaceToBatchNDOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SpaceToBatchNDOptions>
+CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const SpaceToBatchNDOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateSpaceToBatchNDOptions(_fbb);
+}
+
+inline BatchToSpaceNDOptionsT *
+BatchToSpaceNDOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<BatchToSpaceNDOptionsT>(new BatchToSpaceNDOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void BatchToSpaceNDOptions::UnPackTo(BatchToSpaceNDOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<BatchToSpaceNDOptions>
+BatchToSpaceNDOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateBatchToSpaceNDOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<BatchToSpaceNDOptions>
+CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const BatchToSpaceNDOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateBatchToSpaceNDOptions(_fbb);
+}
+
+inline SkipGramOptionsT *
+SkipGramOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<SkipGramOptionsT>(new SkipGramOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SkipGramOptions::UnPackTo(SkipGramOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = ngram_size();
+ _o->ngram_size = _e;
+ }
+ {
+ auto _e = max_skip_size();
+ _o->max_skip_size = _e;
+ }
+ {
+ auto _e = include_all_ngrams();
+ _o->include_all_ngrams = _e;
+ }
+}
+
+inline flatbuffers::Offset<SkipGramOptions>
+SkipGramOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateSkipGramOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SkipGramOptions>
+CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const SkipGramOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _ngram_size = _o->ngram_size;
+ auto _max_skip_size = _o->max_skip_size;
+ auto _include_all_ngrams = _o->include_all_ngrams;
+ return circle::CreateSkipGramOptions(_fbb, _ngram_size, _max_skip_size, _include_all_ngrams);
+}
+
+inline SpaceToDepthOptionsT *
+SpaceToDepthOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<SpaceToDepthOptionsT>(new SpaceToDepthOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SpaceToDepthOptions::UnPackTo(SpaceToDepthOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = block_size();
+ _o->block_size = _e;
+ }
+}
+
+inline flatbuffers::Offset<SpaceToDepthOptions>
+SpaceToDepthOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateSpaceToDepthOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SpaceToDepthOptions>
+CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const SpaceToDepthOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _block_size = _o->block_size;
+ return circle::CreateSpaceToDepthOptions(_fbb, _block_size);
+}
+
+inline DepthToSpaceOptionsT *
+DepthToSpaceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<DepthToSpaceOptionsT>(new DepthToSpaceOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void DepthToSpaceOptions::UnPackTo(DepthToSpaceOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = block_size();
+ _o->block_size = _e;
+ }
+}
+
+inline flatbuffers::Offset<DepthToSpaceOptions>
+DepthToSpaceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateDepthToSpaceOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<DepthToSpaceOptions>
+CreateDepthToSpaceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const DepthToSpaceOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _block_size = _o->block_size;
+ return circle::CreateDepthToSpaceOptions(_fbb, _block_size);
+}
+
+inline SubOptionsT *SubOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<SubOptionsT>(new SubOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SubOptions::UnPackTo(SubOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = fused_activation_function();
+ _o->fused_activation_function = _e;
+ }
+ {
+ auto _e = pot_scale_int16();
+ _o->pot_scale_int16 = _e;
+ }
+}
+
+inline flatbuffers::Offset<SubOptions>
+SubOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateSubOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SubOptions>
+CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const SubOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _fused_activation_function = _o->fused_activation_function;
+ auto _pot_scale_int16 = _o->pot_scale_int16;
+ return circle::CreateSubOptions(_fbb, _fused_activation_function, _pot_scale_int16);
+}
+
+inline DivOptionsT *DivOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<DivOptionsT>(new DivOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void DivOptions::UnPackTo(DivOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = fused_activation_function();
+ _o->fused_activation_function = _e;
+ }
+}
+
+inline flatbuffers::Offset<DivOptions>
+DivOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateDivOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<DivOptions>
+CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const DivOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _fused_activation_function = _o->fused_activation_function;
+ return circle::CreateDivOptions(_fbb, _fused_activation_function);
+}
+
+inline TopKV2OptionsT *
+TopKV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<TopKV2OptionsT>(new TopKV2OptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void TopKV2Options::UnPackTo(TopKV2OptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<TopKV2Options>
+TopKV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateTopKV2Options(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<TopKV2Options>
+CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const TopKV2OptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateTopKV2Options(_fbb);
+}
+
+inline EmbeddingLookupSparseOptionsT *
+EmbeddingLookupSparseOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<EmbeddingLookupSparseOptionsT>(new EmbeddingLookupSparseOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void
+EmbeddingLookupSparseOptions::UnPackTo(EmbeddingLookupSparseOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = combiner();
+ _o->combiner = _e;
+ }
+}
+
+inline flatbuffers::Offset<EmbeddingLookupSparseOptions>
+EmbeddingLookupSparseOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const EmbeddingLookupSparseOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateEmbeddingLookupSparseOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<EmbeddingLookupSparseOptions>
+CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const EmbeddingLookupSparseOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const EmbeddingLookupSparseOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _combiner = _o->combiner;
+ return circle::CreateEmbeddingLookupSparseOptions(_fbb, _combiner);
+}
+
+inline GatherOptionsT *
+GatherOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<GatherOptionsT>(new GatherOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void GatherOptions::UnPackTo(GatherOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = axis();
+ _o->axis = _e;
+ }
+ {
+ auto _e = batch_dims();
+ _o->batch_dims = _e;
+ }
+}
+
+inline flatbuffers::Offset<GatherOptions>
+GatherOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateGatherOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<GatherOptions>
+CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const GatherOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _axis = _o->axis;
+ auto _batch_dims = _o->batch_dims;
+ return circle::CreateGatherOptions(_fbb, _axis, _batch_dims);
+}
+
+inline TransposeOptionsT *
+TransposeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<TransposeOptionsT>(new TransposeOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void TransposeOptions::UnPackTo(TransposeOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<TransposeOptions>
+TransposeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateTransposeOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<TransposeOptions>
+CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const TransposeOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateTransposeOptions(_fbb);
+}
+
+inline ExpOptionsT *ExpOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<ExpOptionsT>(new ExpOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void ExpOptions::UnPackTo(ExpOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<ExpOptions>
+ExpOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateExpOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<ExpOptions>
+CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const ExpOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateExpOptions(_fbb);
+}
+
+inline CosOptionsT *CosOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<CosOptionsT>(new CosOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void CosOptions::UnPackTo(CosOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<CosOptions>
+CosOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateCosOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<CosOptions>
+CreateCosOptions(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const CosOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateCosOptions(_fbb);
+}
+
+inline ReducerOptionsT *
+ReducerOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<ReducerOptionsT>(new ReducerOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void ReducerOptions::UnPackTo(ReducerOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = keep_dims();
+ _o->keep_dims = _e;
+ }
+}
+
+inline flatbuffers::Offset<ReducerOptions>
+ReducerOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateReducerOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<ReducerOptions>
+CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const ReducerOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _keep_dims = _o->keep_dims;
+ return circle::CreateReducerOptions(_fbb, _keep_dims);
+}
+
+inline SqueezeOptionsT *
+SqueezeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<SqueezeOptionsT>(new SqueezeOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SqueezeOptions::UnPackTo(SqueezeOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = squeeze_dims();
+ if (_e)
+ {
+ _o->squeeze_dims.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->squeeze_dims[_i] = _e->Get(_i);
+ }
+ }
+ }
+}
+
+inline flatbuffers::Offset<SqueezeOptions>
+SqueezeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateSqueezeOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SqueezeOptions>
+CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const SqueezeOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _squeeze_dims = _o->squeeze_dims.size() ? _fbb.CreateVector(_o->squeeze_dims) : 0;
+ return circle::CreateSqueezeOptions(_fbb, _squeeze_dims);
+}
+
+inline SplitOptionsT *SplitOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<SplitOptionsT>(new SplitOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SplitOptions::UnPackTo(SplitOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = num_splits();
+ _o->num_splits = _e;
+ }
+}
+
+inline flatbuffers::Offset<SplitOptions>
+SplitOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateSplitOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SplitOptions>
+CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const SplitOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _num_splits = _o->num_splits;
+ return circle::CreateSplitOptions(_fbb, _num_splits);
+}
+
+inline SplitVOptionsT *
+SplitVOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<SplitVOptionsT>(new SplitVOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SplitVOptions::UnPackTo(SplitVOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = num_splits();
+ _o->num_splits = _e;
+ }
+}
+
+inline flatbuffers::Offset<SplitVOptions>
+SplitVOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateSplitVOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SplitVOptions>
+CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const SplitVOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _num_splits = _o->num_splits;
+ return circle::CreateSplitVOptions(_fbb, _num_splits);
+}
+
+inline StridedSliceOptionsT *
+StridedSliceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<StridedSliceOptionsT>(new StridedSliceOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void StridedSliceOptions::UnPackTo(StridedSliceOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = begin_mask();
+ _o->begin_mask = _e;
+ }
+ {
+ auto _e = end_mask();
+ _o->end_mask = _e;
+ }
+ {
+ auto _e = ellipsis_mask();
+ _o->ellipsis_mask = _e;
+ }
+ {
+ auto _e = new_axis_mask();
+ _o->new_axis_mask = _e;
+ }
+ {
+ auto _e = shrink_axis_mask();
+ _o->shrink_axis_mask = _e;
+ }
+}
+
+inline flatbuffers::Offset<StridedSliceOptions>
+StridedSliceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateStridedSliceOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<StridedSliceOptions>
+CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const StridedSliceOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _begin_mask = _o->begin_mask;
+ auto _end_mask = _o->end_mask;
+ auto _ellipsis_mask = _o->ellipsis_mask;
+ auto _new_axis_mask = _o->new_axis_mask;
+ auto _shrink_axis_mask = _o->shrink_axis_mask;
+ return circle::CreateStridedSliceOptions(_fbb, _begin_mask, _end_mask, _ellipsis_mask,
+ _new_axis_mask, _shrink_axis_mask);
+}
+
+inline LogSoftmaxOptionsT *
+LogSoftmaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<LogSoftmaxOptionsT>(new LogSoftmaxOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void LogSoftmaxOptions::UnPackTo(LogSoftmaxOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<LogSoftmaxOptions>
+LogSoftmaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateLogSoftmaxOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<LogSoftmaxOptions>
+CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const LogSoftmaxOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateLogSoftmaxOptions(_fbb);
+}
+
+inline CastOptionsT *CastOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<CastOptionsT>(new CastOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void CastOptions::UnPackTo(CastOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = in_data_type();
+ _o->in_data_type = _e;
+ }
+ {
+ auto _e = out_data_type();
+ _o->out_data_type = _e;
+ }
+}
+
+inline flatbuffers::Offset<CastOptions>
+CastOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateCastOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<CastOptions>
+CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const CastOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _in_data_type = _o->in_data_type;
+ auto _out_data_type = _o->out_data_type;
+ return circle::CreateCastOptions(_fbb, _in_data_type, _out_data_type);
+}
+
+inline DequantizeOptionsT *
+DequantizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<DequantizeOptionsT>(new DequantizeOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void DequantizeOptions::UnPackTo(DequantizeOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<DequantizeOptions>
+DequantizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateDequantizeOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<DequantizeOptions>
+CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const DequantizeOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateDequantizeOptions(_fbb);
+}
+
+inline MaximumMinimumOptionsT *
+MaximumMinimumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<MaximumMinimumOptionsT>(new MaximumMinimumOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void MaximumMinimumOptions::UnPackTo(MaximumMinimumOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<MaximumMinimumOptions>
+MaximumMinimumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateMaximumMinimumOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<MaximumMinimumOptions>
+CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const MaximumMinimumOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateMaximumMinimumOptions(_fbb);
+}
+
+inline TileOptionsT *TileOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<TileOptionsT>(new TileOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void TileOptions::UnPackTo(TileOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<TileOptions>
+TileOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateTileOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<TileOptions>
+CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const TileOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateTileOptions(_fbb);
+}
+
+inline ArgMaxOptionsT *
+ArgMaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<ArgMaxOptionsT>(new ArgMaxOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void ArgMaxOptions::UnPackTo(ArgMaxOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = output_type();
+ _o->output_type = _e;
+ }
+}
+
+inline flatbuffers::Offset<ArgMaxOptions>
+ArgMaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateArgMaxOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<ArgMaxOptions>
+CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const ArgMaxOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _output_type = _o->output_type;
+ return circle::CreateArgMaxOptions(_fbb, _output_type);
+}
+
+inline ArgMinOptionsT *
+ArgMinOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<ArgMinOptionsT>(new ArgMinOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void ArgMinOptions::UnPackTo(ArgMinOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = output_type();
+ _o->output_type = _e;
+ }
+}
+
+inline flatbuffers::Offset<ArgMinOptions>
+ArgMinOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateArgMinOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<ArgMinOptions>
+CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const ArgMinOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _output_type = _o->output_type;
+ return circle::CreateArgMinOptions(_fbb, _output_type);
+}
+
+inline GreaterOptionsT *
+GreaterOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<GreaterOptionsT>(new GreaterOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void GreaterOptions::UnPackTo(GreaterOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<GreaterOptions>
+GreaterOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateGreaterOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<GreaterOptions>
+CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const GreaterOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateGreaterOptions(_fbb);
+}
+
+inline GreaterEqualOptionsT *
+GreaterEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<GreaterEqualOptionsT>(new GreaterEqualOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void GreaterEqualOptions::UnPackTo(GreaterEqualOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<GreaterEqualOptions>
+GreaterEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateGreaterEqualOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<GreaterEqualOptions>
+CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const GreaterEqualOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateGreaterEqualOptions(_fbb);
+}
+
+inline LessOptionsT *LessOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<LessOptionsT>(new LessOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void LessOptions::UnPackTo(LessOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<LessOptions>
+LessOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateLessOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<LessOptions>
+CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const LessOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateLessOptions(_fbb);
+}
+
+inline LessEqualOptionsT *
+LessEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<LessEqualOptionsT>(new LessEqualOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void LessEqualOptions::UnPackTo(LessEqualOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<LessEqualOptions>
+LessEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateLessEqualOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<LessEqualOptions>
+CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const LessEqualOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateLessEqualOptions(_fbb);
+}
+
+inline NegOptionsT *NegOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<NegOptionsT>(new NegOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void NegOptions::UnPackTo(NegOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<NegOptions>
+NegOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateNegOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<NegOptions>
+CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const NegOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateNegOptions(_fbb);
+}
+
+inline SelectOptionsT *
+SelectOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<SelectOptionsT>(new SelectOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SelectOptions::UnPackTo(SelectOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<SelectOptions>
+SelectOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateSelectOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SelectOptions>
+CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const SelectOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateSelectOptions(_fbb);
+}
+
+inline SliceOptionsT *SliceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<SliceOptionsT>(new SliceOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SliceOptions::UnPackTo(SliceOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<SliceOptions>
+SliceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateSliceOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SliceOptions>
+CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const SliceOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateSliceOptions(_fbb);
+}
+
+inline TransposeConvOptionsT *
+TransposeConvOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<TransposeConvOptionsT>(new TransposeConvOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void TransposeConvOptions::UnPackTo(TransposeConvOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = padding();
+ _o->padding = _e;
+ }
+ {
+ auto _e = stride_w();
+ _o->stride_w = _e;
+ }
+ {
+ auto _e = stride_h();
+ _o->stride_h = _e;
+ }
+}
+
+inline flatbuffers::Offset<TransposeConvOptions>
+TransposeConvOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateTransposeConvOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<TransposeConvOptions>
+CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const TransposeConvOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _padding = _o->padding;
+ auto _stride_w = _o->stride_w;
+ auto _stride_h = _o->stride_h;
+ return circle::CreateTransposeConvOptions(_fbb, _padding, _stride_w, _stride_h);
+}
+
+inline ExpandDimsOptionsT *
+ExpandDimsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<ExpandDimsOptionsT>(new ExpandDimsOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void ExpandDimsOptions::UnPackTo(ExpandDimsOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<ExpandDimsOptions>
+ExpandDimsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateExpandDimsOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<ExpandDimsOptions>
+CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const ExpandDimsOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateExpandDimsOptions(_fbb);
+}
+
+inline SparseToDenseOptionsT *
+SparseToDenseOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<SparseToDenseOptionsT>(new SparseToDenseOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SparseToDenseOptions::UnPackTo(SparseToDenseOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = validate_indices();
+ _o->validate_indices = _e;
+ }
+}
+
+inline flatbuffers::Offset<SparseToDenseOptions>
+SparseToDenseOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateSparseToDenseOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SparseToDenseOptions>
+CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const SparseToDenseOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _validate_indices = _o->validate_indices;
+ return circle::CreateSparseToDenseOptions(_fbb, _validate_indices);
+}
+
+inline EqualOptionsT *EqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<EqualOptionsT>(new EqualOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void EqualOptions::UnPackTo(EqualOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<EqualOptions>
+EqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateEqualOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<EqualOptions>
+CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const EqualOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateEqualOptions(_fbb);
+}
+
+inline NotEqualOptionsT *
+NotEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<NotEqualOptionsT>(new NotEqualOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void NotEqualOptions::UnPackTo(NotEqualOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<NotEqualOptions>
+NotEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateNotEqualOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<NotEqualOptions>
+CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const NotEqualOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateNotEqualOptions(_fbb);
+}
+
+inline ShapeOptionsT *ShapeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<ShapeOptionsT>(new ShapeOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void ShapeOptions::UnPackTo(ShapeOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = out_type();
+ _o->out_type = _e;
+ }
+}
+
+inline flatbuffers::Offset<ShapeOptions>
+ShapeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateShapeOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<ShapeOptions>
+CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const ShapeOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _out_type = _o->out_type;
+ return circle::CreateShapeOptions(_fbb, _out_type);
+}
+
+inline RankOptionsT *RankOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<RankOptionsT>(new RankOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void RankOptions::UnPackTo(RankOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<RankOptions>
+RankOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateRankOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<RankOptions>
+CreateRankOptions(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const RankOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateRankOptions(_fbb);
+}
+
+inline PowOptionsT *PowOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<PowOptionsT>(new PowOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void PowOptions::UnPackTo(PowOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<PowOptions>
+PowOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreatePowOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<PowOptions>
+CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const PowOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreatePowOptions(_fbb);
+}
+
+inline FakeQuantOptionsT *
+FakeQuantOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<FakeQuantOptionsT>(new FakeQuantOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void FakeQuantOptions::UnPackTo(FakeQuantOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = min();
+ _o->min = _e;
+ }
+ {
+ auto _e = max();
+ _o->max = _e;
+ }
+ {
+ auto _e = num_bits();
+ _o->num_bits = _e;
+ }
+ {
+ auto _e = narrow_range();
+ _o->narrow_range = _e;
+ }
+}
+
+inline flatbuffers::Offset<FakeQuantOptions>
+FakeQuantOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateFakeQuantOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<FakeQuantOptions>
+CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const FakeQuantOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _min = _o->min;
+ auto _max = _o->max;
+ auto _num_bits = _o->num_bits;
+ auto _narrow_range = _o->narrow_range;
+ return circle::CreateFakeQuantOptions(_fbb, _min, _max, _num_bits, _narrow_range);
+}
+
+inline PackOptionsT *PackOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<PackOptionsT>(new PackOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void PackOptions::UnPackTo(PackOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = values_count();
+ _o->values_count = _e;
+ }
+ {
+ auto _e = axis();
+ _o->axis = _e;
+ }
+}
+
+inline flatbuffers::Offset<PackOptions>
+PackOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreatePackOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<PackOptions>
+CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const PackOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _values_count = _o->values_count;
+ auto _axis = _o->axis;
+ return circle::CreatePackOptions(_fbb, _values_count, _axis);
+}
+
+inline LogicalOrOptionsT *
+LogicalOrOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<LogicalOrOptionsT>(new LogicalOrOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void LogicalOrOptions::UnPackTo(LogicalOrOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<LogicalOrOptions>
+LogicalOrOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateLogicalOrOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<LogicalOrOptions>
+CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const LogicalOrOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateLogicalOrOptions(_fbb);
+}
+
+inline OneHotOptionsT *
+OneHotOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<OneHotOptionsT>(new OneHotOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void OneHotOptions::UnPackTo(OneHotOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = axis();
+ _o->axis = _e;
+ }
+}
+
+inline flatbuffers::Offset<OneHotOptions>
+OneHotOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateOneHotOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<OneHotOptions>
+CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const OneHotOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _axis = _o->axis;
+ return circle::CreateOneHotOptions(_fbb, _axis);
+}
+
+inline AbsOptionsT *AbsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<AbsOptionsT>(new AbsOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void AbsOptions::UnPackTo(AbsOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<AbsOptions>
+AbsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateAbsOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<AbsOptions>
+CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const AbsOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateAbsOptions(_fbb);
+}
+
+inline HardSwishOptionsT *
+HardSwishOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<HardSwishOptionsT>(new HardSwishOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void HardSwishOptions::UnPackTo(HardSwishOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<HardSwishOptions>
+HardSwishOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateHardSwishOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<HardSwishOptions>
+CreateHardSwishOptions(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const HardSwishOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateHardSwishOptions(_fbb);
+}
+
+inline LogicalAndOptionsT *
+LogicalAndOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<LogicalAndOptionsT>(new LogicalAndOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void LogicalAndOptions::UnPackTo(LogicalAndOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<LogicalAndOptions>
+LogicalAndOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateLogicalAndOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<LogicalAndOptions>
+CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const LogicalAndOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateLogicalAndOptions(_fbb);
+}
+
+inline LogicalNotOptionsT *
+LogicalNotOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<LogicalNotOptionsT>(new LogicalNotOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void LogicalNotOptions::UnPackTo(LogicalNotOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<LogicalNotOptions>
+LogicalNotOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateLogicalNotOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<LogicalNotOptions>
+CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const LogicalNotOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateLogicalNotOptions(_fbb);
+}
+
+inline UnpackOptionsT *
+UnpackOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<UnpackOptionsT>(new UnpackOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void UnpackOptions::UnPackTo(UnpackOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = num();
+ _o->num = _e;
+ }
+ {
+ auto _e = axis();
+ _o->axis = _e;
+ }
+}
+
+inline flatbuffers::Offset<UnpackOptions>
+UnpackOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateUnpackOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<UnpackOptions>
+CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const UnpackOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _num = _o->num;
+ auto _axis = _o->axis;
+ return circle::CreateUnpackOptions(_fbb, _num, _axis);
+}
+
+inline FloorDivOptionsT *
+FloorDivOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<FloorDivOptionsT>(new FloorDivOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void FloorDivOptions::UnPackTo(FloorDivOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<FloorDivOptions>
+FloorDivOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateFloorDivOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<FloorDivOptions>
+CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const FloorDivOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateFloorDivOptions(_fbb);
+}
+
+inline SquareOptionsT *
+SquareOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<SquareOptionsT>(new SquareOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SquareOptions::UnPackTo(SquareOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<SquareOptions>
+SquareOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateSquareOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SquareOptions>
+CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const SquareOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateSquareOptions(_fbb);
+}
+
+inline ZerosLikeOptionsT *
+ZerosLikeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<ZerosLikeOptionsT>(new ZerosLikeOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void ZerosLikeOptions::UnPackTo(ZerosLikeOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<ZerosLikeOptions>
+ZerosLikeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateZerosLikeOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<ZerosLikeOptions>
+CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const ZerosLikeOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateZerosLikeOptions(_fbb);
+}
+
+inline FillOptionsT *FillOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<FillOptionsT>(new FillOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void FillOptions::UnPackTo(FillOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<FillOptions>
+FillOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateFillOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<FillOptions>
+CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const FillOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateFillOptions(_fbb);
+}
+
+inline FloorModOptionsT *
+FloorModOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<FloorModOptionsT>(new FloorModOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void FloorModOptions::UnPackTo(FloorModOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<FloorModOptions>
+FloorModOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateFloorModOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<FloorModOptions>
+CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const FloorModOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateFloorModOptions(_fbb);
+}
+
+inline RangeOptionsT *RangeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<RangeOptionsT>(new RangeOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void RangeOptions::UnPackTo(RangeOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<RangeOptions>
+RangeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateRangeOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<RangeOptions>
+CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const RangeOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateRangeOptions(_fbb);
+}
+
+inline LeakyReluOptionsT *
+LeakyReluOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<LeakyReluOptionsT>(new LeakyReluOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void LeakyReluOptions::UnPackTo(LeakyReluOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = alpha();
+ _o->alpha = _e;
+ }
+}
+
+inline flatbuffers::Offset<LeakyReluOptions>
+LeakyReluOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateLeakyReluOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<LeakyReluOptions>
+CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const LeakyReluOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _alpha = _o->alpha;
+ return circle::CreateLeakyReluOptions(_fbb, _alpha);
+}
+
+inline SquaredDifferenceOptionsT *
+SquaredDifferenceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<SquaredDifferenceOptionsT>(new SquaredDifferenceOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void
+SquaredDifferenceOptions::UnPackTo(SquaredDifferenceOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<SquaredDifferenceOptions>
+SquaredDifferenceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const SquaredDifferenceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateSquaredDifferenceOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SquaredDifferenceOptions>
+CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const SquaredDifferenceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const SquaredDifferenceOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateSquaredDifferenceOptions(_fbb);
+}
+
+inline MirrorPadOptionsT *
+MirrorPadOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<MirrorPadOptionsT>(new MirrorPadOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void MirrorPadOptions::UnPackTo(MirrorPadOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = mode();
+ _o->mode = _e;
+ }
+}
+
+inline flatbuffers::Offset<MirrorPadOptions>
+MirrorPadOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateMirrorPadOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<MirrorPadOptions>
+CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const MirrorPadOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _mode = _o->mode;
+ return circle::CreateMirrorPadOptions(_fbb, _mode);
+}
+
+inline UniqueOptionsT *
+UniqueOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<UniqueOptionsT>(new UniqueOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void UniqueOptions::UnPackTo(UniqueOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = idx_out_type();
+ _o->idx_out_type = _e;
+ }
+}
+
+inline flatbuffers::Offset<UniqueOptions>
+UniqueOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateUniqueOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<UniqueOptions>
+CreateUniqueOptions(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const UniqueOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _idx_out_type = _o->idx_out_type;
+ return circle::CreateUniqueOptions(_fbb, _idx_out_type);
+}
+
+inline ReverseV2OptionsT *
+ReverseV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<ReverseV2OptionsT>(new ReverseV2OptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void ReverseV2Options::UnPackTo(ReverseV2OptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<ReverseV2Options>
+ReverseV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateReverseV2Options(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<ReverseV2Options>
+CreateReverseV2Options(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const ReverseV2OptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateReverseV2Options(_fbb);
+}
+
+inline AddNOptionsT *AddNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<AddNOptionsT>(new AddNOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void AddNOptions::UnPackTo(AddNOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<AddNOptions>
+AddNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateAddNOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<AddNOptions>
+CreateAddNOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const AddNOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateAddNOptions(_fbb);
+}
+
+inline GatherNdOptionsT *
+GatherNdOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<GatherNdOptionsT>(new GatherNdOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void GatherNdOptions::UnPackTo(GatherNdOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<GatherNdOptions>
+GatherNdOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateGatherNdOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<GatherNdOptions>
+CreateGatherNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const GatherNdOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateGatherNdOptions(_fbb);
+}
+
+inline WhereOptionsT *WhereOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<WhereOptionsT>(new WhereOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void WhereOptions::UnPackTo(WhereOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<WhereOptions>
+WhereOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateWhereOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<WhereOptions>
+CreateWhereOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const WhereOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateWhereOptions(_fbb);
+}
+
+inline ReverseSequenceOptionsT *
+ReverseSequenceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<ReverseSequenceOptionsT>(new ReverseSequenceOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void
+ReverseSequenceOptions::UnPackTo(ReverseSequenceOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = seq_dim();
+ _o->seq_dim = _e;
+ }
+ {
+ auto _e = batch_dim();
+ _o->batch_dim = _e;
+ }
+}
+
+inline flatbuffers::Offset<ReverseSequenceOptions>
+ReverseSequenceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const ReverseSequenceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateReverseSequenceOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<ReverseSequenceOptions>
+CreateReverseSequenceOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const ReverseSequenceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const ReverseSequenceOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _seq_dim = _o->seq_dim;
+ auto _batch_dim = _o->batch_dim;
+ return circle::CreateReverseSequenceOptions(_fbb, _seq_dim, _batch_dim);
+}
+
+inline MatrixDiagOptionsT *
+MatrixDiagOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<MatrixDiagOptionsT>(new MatrixDiagOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void MatrixDiagOptions::UnPackTo(MatrixDiagOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<MatrixDiagOptions>
+MatrixDiagOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateMatrixDiagOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<MatrixDiagOptions>
+CreateMatrixDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const MatrixDiagOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateMatrixDiagOptions(_fbb);
+}
+
+inline QuantizeOptionsT *
+QuantizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<QuantizeOptionsT>(new QuantizeOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void QuantizeOptions::UnPackTo(QuantizeOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<QuantizeOptions>
+QuantizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateQuantizeOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<QuantizeOptions>
+CreateQuantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const QuantizeOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateQuantizeOptions(_fbb);
+}
+
+inline MatrixSetDiagOptionsT *
+MatrixSetDiagOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<MatrixSetDiagOptionsT>(new MatrixSetDiagOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void MatrixSetDiagOptions::UnPackTo(MatrixSetDiagOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<MatrixSetDiagOptions>
+MatrixSetDiagOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateMatrixSetDiagOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<MatrixSetDiagOptions>
+CreateMatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const MatrixSetDiagOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateMatrixSetDiagOptions(_fbb);
+}
+
+inline IfOptionsT *IfOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<IfOptionsT>(new IfOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void IfOptions::UnPackTo(IfOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = then_subgraph_index();
+ _o->then_subgraph_index = _e;
+ }
+ {
+ auto _e = else_subgraph_index();
+ _o->else_subgraph_index = _e;
+ }
+}
+
+inline flatbuffers::Offset<IfOptions>
+IfOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateIfOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<IfOptions>
+CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const IfOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _then_subgraph_index = _o->then_subgraph_index;
+ auto _else_subgraph_index = _o->else_subgraph_index;
+ return circle::CreateIfOptions(_fbb, _then_subgraph_index, _else_subgraph_index);
+}
+
+inline CallOnceOptionsT *
+CallOnceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<CallOnceOptionsT>(new CallOnceOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void CallOnceOptions::UnPackTo(CallOnceOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = init_subgraph_index();
+ _o->init_subgraph_index = _e;
+ }
+}
+
+inline flatbuffers::Offset<CallOnceOptions>
+CallOnceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateCallOnceOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<CallOnceOptions>
+CreateCallOnceOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const CallOnceOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _init_subgraph_index = _o->init_subgraph_index;
+ return circle::CreateCallOnceOptions(_fbb, _init_subgraph_index);
+}
+
+inline WhileOptionsT *WhileOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<WhileOptionsT>(new WhileOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void WhileOptions::UnPackTo(WhileOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = cond_subgraph_index();
+ _o->cond_subgraph_index = _e;
+ }
+ {
+ auto _e = body_subgraph_index();
+ _o->body_subgraph_index = _e;
+ }
+}
+
+inline flatbuffers::Offset<WhileOptions>
+WhileOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateWhileOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<WhileOptions>
+CreateWhileOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const WhileOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _cond_subgraph_index = _o->cond_subgraph_index;
+ auto _body_subgraph_index = _o->body_subgraph_index;
+ return circle::CreateWhileOptions(_fbb, _cond_subgraph_index, _body_subgraph_index);
+}
+
+inline NonMaxSuppressionV4OptionsT *
+NonMaxSuppressionV4Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<NonMaxSuppressionV4OptionsT>(new NonMaxSuppressionV4OptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void
+NonMaxSuppressionV4Options::UnPackTo(NonMaxSuppressionV4OptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<NonMaxSuppressionV4Options>
+NonMaxSuppressionV4Options::Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const NonMaxSuppressionV4OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateNonMaxSuppressionV4Options(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<NonMaxSuppressionV4Options>
+CreateNonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &_fbb,
+ const NonMaxSuppressionV4OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const NonMaxSuppressionV4OptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateNonMaxSuppressionV4Options(_fbb);
+}
+
+inline NonMaxSuppressionV5OptionsT *
+NonMaxSuppressionV5Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<NonMaxSuppressionV5OptionsT>(new NonMaxSuppressionV5OptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void
+NonMaxSuppressionV5Options::UnPackTo(NonMaxSuppressionV5OptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<NonMaxSuppressionV5Options>
+NonMaxSuppressionV5Options::Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const NonMaxSuppressionV5OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateNonMaxSuppressionV5Options(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<NonMaxSuppressionV5Options>
+CreateNonMaxSuppressionV5Options(flatbuffers::FlatBufferBuilder &_fbb,
+ const NonMaxSuppressionV5OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const NonMaxSuppressionV5OptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateNonMaxSuppressionV5Options(_fbb);
+}
+
+inline ScatterNdOptionsT *
+ScatterNdOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<ScatterNdOptionsT>(new ScatterNdOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void ScatterNdOptions::UnPackTo(ScatterNdOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<ScatterNdOptions>
+ScatterNdOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateScatterNdOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<ScatterNdOptions>
+CreateScatterNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const ScatterNdOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateScatterNdOptions(_fbb);
+}
+
+inline SelectV2OptionsT *
+SelectV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<SelectV2OptionsT>(new SelectV2OptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SelectV2Options::UnPackTo(SelectV2OptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<SelectV2Options>
+SelectV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateSelectV2Options(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SelectV2Options>
+CreateSelectV2Options(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const SelectV2OptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateSelectV2Options(_fbb);
+}
+
+inline DensifyOptionsT *
+DensifyOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<DensifyOptionsT>(new DensifyOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void DensifyOptions::UnPackTo(DensifyOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<DensifyOptions>
+DensifyOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateDensifyOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<DensifyOptions>
+CreateDensifyOptions(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const DensifyOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateDensifyOptions(_fbb);
+}
+
+inline SegmentSumOptionsT *
+SegmentSumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<SegmentSumOptionsT>(new SegmentSumOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SegmentSumOptions::UnPackTo(SegmentSumOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<SegmentSumOptions>
+SegmentSumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateSegmentSumOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SegmentSumOptions>
+CreateSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const SegmentSumOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateSegmentSumOptions(_fbb);
+}
+
+inline BatchMatMulOptionsT *
+BatchMatMulOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<BatchMatMulOptionsT>(new BatchMatMulOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void BatchMatMulOptions::UnPackTo(BatchMatMulOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = adjoint_lhs();
+ _o->adjoint_lhs = _e;
+ }
+ {
+ auto _e = adjoint_rhs();
+ _o->adjoint_rhs = _e;
+ }
+ {
+ auto _e = asymmetric_quantize_inputs();
+ _o->asymmetric_quantize_inputs = _e;
+ }
+}
+
+inline flatbuffers::Offset<BatchMatMulOptions>
+BatchMatMulOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateBatchMatMulOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<BatchMatMulOptions>
+CreateBatchMatMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const BatchMatMulOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _adjoint_lhs = _o->adjoint_lhs;
+ auto _adjoint_rhs = _o->adjoint_rhs;
+ auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs;
+ return circle::CreateBatchMatMulOptions(_fbb, _adjoint_lhs, _adjoint_rhs,
+ _asymmetric_quantize_inputs);
+}
+
+inline CumsumOptionsT *
+CumsumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<CumsumOptionsT>(new CumsumOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void CumsumOptions::UnPackTo(CumsumOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = exclusive();
+ _o->exclusive = _e;
+ }
+ {
+ auto _e = reverse();
+ _o->reverse = _e;
+ }
+}
+
+inline flatbuffers::Offset<CumsumOptions>
+CumsumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateCumsumOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<CumsumOptions>
+CreateCumsumOptions(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const CumsumOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _exclusive = _o->exclusive;
+ auto _reverse = _o->reverse;
+ return circle::CreateCumsumOptions(_fbb, _exclusive, _reverse);
+}
+
+inline BroadcastToOptionsT *
+BroadcastToOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<BroadcastToOptionsT>(new BroadcastToOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void BroadcastToOptions::UnPackTo(BroadcastToOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<BroadcastToOptions>
+BroadcastToOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateBroadcastToOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<BroadcastToOptions>
+CreateBroadcastToOptions(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const BroadcastToOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateBroadcastToOptions(_fbb);
+}
+
+inline Rfft2dOptionsT *
+Rfft2dOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<Rfft2dOptionsT>(new Rfft2dOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void Rfft2dOptions::UnPackTo(Rfft2dOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<Rfft2dOptions>
+Rfft2dOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateRfft2dOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<Rfft2dOptions>
+CreateRfft2dOptions(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const Rfft2dOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateRfft2dOptions(_fbb);
+}
+
+inline HashtableOptionsT *
+HashtableOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<HashtableOptionsT>(new HashtableOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void HashtableOptions::UnPackTo(HashtableOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = table_id();
+ _o->table_id = _e;
+ }
+ {
+ auto _e = key_dtype();
+ _o->key_dtype = _e;
+ }
+ {
+ auto _e = value_dtype();
+ _o->value_dtype = _e;
+ }
+}
+
+inline flatbuffers::Offset<HashtableOptions>
+HashtableOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateHashtableOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<HashtableOptions>
+CreateHashtableOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const HashtableOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _table_id = _o->table_id;
+ auto _key_dtype = _o->key_dtype;
+ auto _value_dtype = _o->value_dtype;
+ return circle::CreateHashtableOptions(_fbb, _table_id, _key_dtype, _value_dtype);
+}
+
+inline HashtableFindOptionsT *
+HashtableFindOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<HashtableFindOptionsT>(new HashtableFindOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void HashtableFindOptions::UnPackTo(HashtableFindOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<HashtableFindOptions>
+HashtableFindOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateHashtableFindOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<HashtableFindOptions>
+CreateHashtableFindOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const HashtableFindOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateHashtableFindOptions(_fbb);
+}
+
+inline HashtableImportOptionsT *
+HashtableImportOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<HashtableImportOptionsT>(new HashtableImportOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void
+HashtableImportOptions::UnPackTo(HashtableImportOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<HashtableImportOptions>
+HashtableImportOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const HashtableImportOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateHashtableImportOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<HashtableImportOptions>
+CreateHashtableImportOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const HashtableImportOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const HashtableImportOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateHashtableImportOptions(_fbb);
+}
+
+inline HashtableSizeOptionsT *
+HashtableSizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<HashtableSizeOptionsT>(new HashtableSizeOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void HashtableSizeOptions::UnPackTo(HashtableSizeOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<HashtableSizeOptions>
+HashtableSizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateHashtableSizeOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<HashtableSizeOptions>
+CreateHashtableSizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const HashtableSizeOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateHashtableSizeOptions(_fbb);
+}
+
+inline VarHandleOptionsT *
+VarHandleOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<VarHandleOptionsT>(new VarHandleOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void VarHandleOptions::UnPackTo(VarHandleOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = container();
+ if (_e)
+ _o->container = _e->str();
+ }
+ {
+ auto _e = shared_name();
+ if (_e)
+ _o->shared_name = _e->str();
+ }
+}
+
+inline flatbuffers::Offset<VarHandleOptions>
+VarHandleOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateVarHandleOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<VarHandleOptions>
+CreateVarHandleOptions(flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const VarHandleOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _container = _o->container.empty() ? 0 : _fbb.CreateString(_o->container);
+ auto _shared_name = _o->shared_name.empty() ? 0 : _fbb.CreateString(_o->shared_name);
+ return circle::CreateVarHandleOptions(_fbb, _container, _shared_name);
+}
+
+inline ReadVariableOptionsT *
+ReadVariableOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<ReadVariableOptionsT>(new ReadVariableOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void ReadVariableOptions::UnPackTo(ReadVariableOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<ReadVariableOptions>
+ReadVariableOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateReadVariableOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<ReadVariableOptions>
+CreateReadVariableOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const ReadVariableOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateReadVariableOptions(_fbb);
+}
+
+inline AssignVariableOptionsT *
+AssignVariableOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<AssignVariableOptionsT>(new AssignVariableOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void AssignVariableOptions::UnPackTo(AssignVariableOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<AssignVariableOptions>
+AssignVariableOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateAssignVariableOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<AssignVariableOptions>
+CreateAssignVariableOptions(flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const AssignVariableOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ return circle::CreateAssignVariableOptions(_fbb);
+}
+
+inline RandomOptionsT *
+RandomOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<RandomOptionsT>(new RandomOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void RandomOptions::UnPackTo(RandomOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = seed();
+ _o->seed = _e;
+ }
+ {
+ auto _e = seed2();
+ _o->seed2 = _e;
+ }
+}
+
+inline flatbuffers::Offset<RandomOptions>
+RandomOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateRandomOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<RandomOptions>
+CreateRandomOptions(flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const RandomOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _seed = _o->seed;
+ auto _seed2 = _o->seed2;
+ return circle::CreateRandomOptions(_fbb, _seed, _seed2);
+}
+
+inline BCQGatherOptionsT *
+BCQGatherOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<BCQGatherOptionsT>(new BCQGatherOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void BCQGatherOptions::UnPackTo(BCQGatherOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = input_hidden_size();
+ _o->input_hidden_size = _e;
+ }
+ {
+ auto _e = axis();
+ _o->axis = _e;
+ }
+}
+
+inline flatbuffers::Offset<BCQGatherOptions>
+BCQGatherOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BCQGatherOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateBCQGatherOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<BCQGatherOptions>
+CreateBCQGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, const BCQGatherOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const BCQGatherOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _input_hidden_size = _o->input_hidden_size;
+ auto _axis = _o->axis;
+ return circle::CreateBCQGatherOptions(_fbb, _input_hidden_size, _axis);
+}
+
+inline BCQFullyConnectedOptionsT *
+BCQFullyConnectedOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<BCQFullyConnectedOptionsT>(new BCQFullyConnectedOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void
+BCQFullyConnectedOptions::UnPackTo(BCQFullyConnectedOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = weights_hidden_size();
+ _o->weights_hidden_size = _e;
+ }
+ {
+ auto _e = fused_activation_function();
+ _o->fused_activation_function = _e;
+ }
+}
+
+inline flatbuffers::Offset<BCQFullyConnectedOptions>
+BCQFullyConnectedOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const BCQFullyConnectedOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateBCQFullyConnectedOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<BCQFullyConnectedOptions>
+CreateBCQFullyConnectedOptions(flatbuffers::FlatBufferBuilder &_fbb,
+ const BCQFullyConnectedOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const BCQFullyConnectedOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _weights_hidden_size = _o->weights_hidden_size;
+ auto _fused_activation_function = _o->fused_activation_function;
+ return circle::CreateBCQFullyConnectedOptions(_fbb, _weights_hidden_size,
+ _fused_activation_function);
+}
+
+inline InstanceNormOptionsT *
+InstanceNormOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<InstanceNormOptionsT>(new InstanceNormOptionsT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void InstanceNormOptions::UnPackTo(InstanceNormOptionsT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = epsilon();
+ _o->epsilon = _e;
+ }
+ {
+ auto _e = fused_activation_function();
+ _o->fused_activation_function = _e;
+ }
+}
+
+inline flatbuffers::Offset<InstanceNormOptions>
+InstanceNormOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const InstanceNormOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateInstanceNormOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<InstanceNormOptions>
+CreateInstanceNormOptions(flatbuffers::FlatBufferBuilder &_fbb, const InstanceNormOptionsT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const InstanceNormOptionsT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _epsilon = _o->epsilon;
+ auto _fused_activation_function = _o->fused_activation_function;
+ return circle::CreateInstanceNormOptions(_fbb, _epsilon, _fused_activation_function);
+}
+
+inline OperatorCodeT *OperatorCode::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<OperatorCodeT>(new OperatorCodeT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void OperatorCode::UnPackTo(OperatorCodeT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = deprecated_builtin_code();
+ _o->deprecated_builtin_code = _e;
+ }
+ {
+ auto _e = custom_code();
+ if (_e)
+ _o->custom_code = _e->str();
+ }
+ {
+ auto _e = version();
+ _o->version = _e;
+ }
+ {
+ auto _e = builtin_code();
+ _o->builtin_code = _e;
+ }
+}
+
+inline flatbuffers::Offset<OperatorCode>
+OperatorCode::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateOperatorCode(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<OperatorCode>
+CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const OperatorCodeT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _deprecated_builtin_code = _o->deprecated_builtin_code;
+ auto _custom_code = _o->custom_code.empty() ? 0 : _fbb.CreateString(_o->custom_code);
+ auto _version = _o->version;
+ auto _builtin_code = _o->builtin_code;
+ return circle::CreateOperatorCode(_fbb, _deprecated_builtin_code, _custom_code, _version,
+ _builtin_code);
+}
+
+inline OperatorT *Operator::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<OperatorT>(new OperatorT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void Operator::UnPackTo(OperatorT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = opcode_index();
+ _o->opcode_index = _e;
+ }
+ {
+ auto _e = inputs();
+ if (_e)
+ {
+ _o->inputs.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->inputs[_i] = _e->Get(_i);
+ }
+ }
+ }
+ {
+ auto _e = outputs();
+ if (_e)
+ {
+ _o->outputs.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->outputs[_i] = _e->Get(_i);
+ }
+ }
+ }
+ {
+ auto _e = builtin_options_type();
+ _o->builtin_options.type = _e;
+ }
+ {
+ auto _e = builtin_options();
+ if (_e)
+ _o->builtin_options.value =
+ circle::BuiltinOptionsUnion::UnPack(_e, builtin_options_type(), _resolver);
+ }
+ {
+ auto _e = custom_options();
+ if (_e)
+ {
+ _o->custom_options.resize(_e->size());
+ std::copy(_e->begin(), _e->end(), _o->custom_options.begin());
+ }
+ }
+ {
+ auto _e = custom_options_format();
+ _o->custom_options_format = _e;
+ }
+ {
+ auto _e = mutating_variable_inputs();
+ if (_e)
+ {
+ _o->mutating_variable_inputs.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->mutating_variable_inputs[_i] = _e->Get(_i) != 0;
+ }
+ }
+ }
+ {
+ auto _e = intermediates();
+ if (_e)
+ {
+ _o->intermediates.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->intermediates[_i] = _e->Get(_i);
+ }
+ }
+ }
+}
+
+inline flatbuffers::Offset<Operator>
+Operator::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateOperator(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<Operator>
+CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const OperatorT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _opcode_index = _o->opcode_index;
+ auto _inputs = _o->inputs.size() ? _fbb.CreateVector(_o->inputs) : 0;
+ auto _outputs = _o->outputs.size() ? _fbb.CreateVector(_o->outputs) : 0;
+ auto _builtin_options_type = _o->builtin_options.type;
+ auto _builtin_options = _o->builtin_options.Pack(_fbb);
+ auto _custom_options = _o->custom_options.size() ? _fbb.CreateVector(_o->custom_options) : 0;
+ auto _custom_options_format = _o->custom_options_format;
+ auto _mutating_variable_inputs =
+ _o->mutating_variable_inputs.size() ? _fbb.CreateVector(_o->mutating_variable_inputs) : 0;
+ auto _intermediates = _o->intermediates.size() ? _fbb.CreateVector(_o->intermediates) : 0;
+ return circle::CreateOperator(_fbb, _opcode_index, _inputs, _outputs, _builtin_options_type,
+ _builtin_options, _custom_options, _custom_options_format,
+ _mutating_variable_inputs, _intermediates);
+}
+
+inline SubGraphT *SubGraph::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<SubGraphT>(new SubGraphT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SubGraph::UnPackTo(SubGraphT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = tensors();
+ if (_e)
+ {
+ _o->tensors.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->tensors[_i] = std::unique_ptr<circle::TensorT>(_e->Get(_i)->UnPack(_resolver));
+ }
+ }
+ }
+ {
+ auto _e = inputs();
+ if (_e)
+ {
+ _o->inputs.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->inputs[_i] = _e->Get(_i);
+ }
+ }
+ }
+ {
+ auto _e = outputs();
+ if (_e)
+ {
+ _o->outputs.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->outputs[_i] = _e->Get(_i);
+ }
+ }
+ }
+ {
+ auto _e = operators();
+ if (_e)
+ {
+ _o->operators.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->operators[_i] = std::unique_ptr<circle::OperatorT>(_e->Get(_i)->UnPack(_resolver));
+ }
+ }
+ }
+ {
+ auto _e = name();
+ if (_e)
+ _o->name = _e->str();
+ }
+ {
+ auto _e = data_format();
+ _o->data_format = _e;
+ }
+}
+
+inline flatbuffers::Offset<SubGraph>
+SubGraph::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateSubGraph(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SubGraph>
+CreateSubGraph(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const SubGraphT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _tensors =
+ _o->tensors.size()
+ ? _fbb.CreateVector<flatbuffers::Offset<circle::Tensor>>(
+ _o->tensors.size(),
+ [](size_t i, _VectorArgs *__va) {
+ return CreateTensor(*__va->__fbb, __va->__o->tensors[i].get(), __va->__rehasher);
+ },
+ &_va)
+ : 0;
+ auto _inputs = _o->inputs.size() ? _fbb.CreateVector(_o->inputs) : 0;
+ auto _outputs = _o->outputs.size() ? _fbb.CreateVector(_o->outputs) : 0;
+ auto _operators =
+ _o->operators.size()
+ ? _fbb.CreateVector<flatbuffers::Offset<circle::Operator>>(
+ _o->operators.size(),
+ [](size_t i, _VectorArgs *__va) {
+ return CreateOperator(*__va->__fbb, __va->__o->operators[i].get(), __va->__rehasher);
+ },
+ &_va)
+ : 0;
+ auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name);
+ auto _data_format = _o->data_format;
+ return circle::CreateSubGraph(_fbb, _tensors, _inputs, _outputs, _operators, _name, _data_format);
+}
+
+inline BufferT *Buffer::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<BufferT>(new BufferT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void Buffer::UnPackTo(BufferT *_o, const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = data();
+ if (_e)
+ {
+ _o->data.resize(_e->size());
+ std::copy(_e->begin(), _e->end(), _o->data.begin());
+ }
+ }
+}
+
+inline flatbuffers::Offset<Buffer> Buffer::Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const BufferT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateBuffer(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<Buffer> CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb,
+ const BufferT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const BufferT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ _fbb.ForceVectorAlignment(_o->data.size(), sizeof(uint8_t), 16);
+ auto _data = _o->data.size() ? _fbb.CreateVector(_o->data) : 0;
+ return circle::CreateBuffer(_fbb, _data);
+}
+
+inline MetadataT *Metadata::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<MetadataT>(new MetadataT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void Metadata::UnPackTo(MetadataT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = name();
+ if (_e)
+ _o->name = _e->str();
+ }
+ {
+ auto _e = buffer();
+ _o->buffer = _e;
+ }
+}
+
+inline flatbuffers::Offset<Metadata>
+Metadata::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateMetadata(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<Metadata>
+CreateMetadata(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const MetadataT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name);
+ auto _buffer = _o->buffer;
+ return circle::CreateMetadata(_fbb, _name, _buffer);
+}
+
+inline TensorMapT *TensorMap::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<TensorMapT>(new TensorMapT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void TensorMap::UnPackTo(TensorMapT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = name();
+ if (_e)
+ _o->name = _e->str();
+ }
+ {
+ auto _e = tensor_index();
+ _o->tensor_index = _e;
+ }
+}
+
+inline flatbuffers::Offset<TensorMap>
+TensorMap::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateTensorMap(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<TensorMap>
+CreateTensorMap(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const TensorMapT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name);
+ auto _tensor_index = _o->tensor_index;
+ return circle::CreateTensorMap(_fbb, _name, _tensor_index);
+}
+
+inline SignatureDefT *SignatureDef::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<SignatureDefT>(new SignatureDefT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void SignatureDef::UnPackTo(SignatureDefT *_o,
+ const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = inputs();
+ if (_e)
+ {
+ _o->inputs.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->inputs[_i] = std::unique_ptr<circle::TensorMapT>(_e->Get(_i)->UnPack(_resolver));
+ }
+ }
+ }
+ {
+ auto _e = outputs();
+ if (_e)
+ {
+ _o->outputs.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->outputs[_i] = std::unique_ptr<circle::TensorMapT>(_e->Get(_i)->UnPack(_resolver));
+ }
+ }
+ }
+ {
+ auto _e = signature_key();
+ if (_e)
+ _o->signature_key = _e->str();
+ }
+ {
+ auto _e = subgraph_index();
+ _o->subgraph_index = _e;
+ }
+}
+
+inline flatbuffers::Offset<SignatureDef>
+SignatureDef::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateSignatureDef(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<SignatureDef>
+CreateSignatureDef(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const SignatureDefT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _inputs =
+ _o->inputs.size()
+ ? _fbb.CreateVector<flatbuffers::Offset<circle::TensorMap>>(
+ _o->inputs.size(),
+ [](size_t i, _VectorArgs *__va) {
+ return CreateTensorMap(*__va->__fbb, __va->__o->inputs[i].get(), __va->__rehasher);
+ },
+ &_va)
+ : 0;
+ auto _outputs =
+ _o->outputs.size()
+ ? _fbb.CreateVector<flatbuffers::Offset<circle::TensorMap>>(
+ _o->outputs.size(),
+ [](size_t i, _VectorArgs *__va) {
+ return CreateTensorMap(*__va->__fbb, __va->__o->outputs[i].get(), __va->__rehasher);
+ },
+ &_va)
+ : 0;
+ auto _signature_key = _o->signature_key.empty() ? 0 : _fbb.CreateString(_o->signature_key);
+ auto _subgraph_index = _o->subgraph_index;
+ return circle::CreateSignatureDef(_fbb, _inputs, _outputs, _signature_key, _subgraph_index);
+}
+
+inline ModelT *Model::UnPack(const flatbuffers::resolver_function_t *_resolver) const
+{
+ auto _o = std::unique_ptr<ModelT>(new ModelT());
+ UnPackTo(_o.get(), _resolver);
+ return _o.release();
+}
+
+inline void Model::UnPackTo(ModelT *_o, const flatbuffers::resolver_function_t *_resolver) const
+{
+ (void)_o;
+ (void)_resolver;
+ {
+ auto _e = version();
+ _o->version = _e;
+ }
+ {
+ auto _e = operator_codes();
+ if (_e)
+ {
+ _o->operator_codes.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->operator_codes[_i] =
+ std::unique_ptr<circle::OperatorCodeT>(_e->Get(_i)->UnPack(_resolver));
+ }
+ }
+ }
+ {
+ auto _e = subgraphs();
+ if (_e)
+ {
+ _o->subgraphs.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->subgraphs[_i] = std::unique_ptr<circle::SubGraphT>(_e->Get(_i)->UnPack(_resolver));
+ }
+ }
+ }
+ {
+ auto _e = description();
+ if (_e)
+ _o->description = _e->str();
+ }
+ {
+ auto _e = buffers();
+ if (_e)
+ {
+ _o->buffers.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->buffers[_i] = std::unique_ptr<circle::BufferT>(_e->Get(_i)->UnPack(_resolver));
+ }
+ }
+ }
+ {
+ auto _e = metadata_buffer();
+ if (_e)
+ {
+ _o->metadata_buffer.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->metadata_buffer[_i] = _e->Get(_i);
+ }
+ }
+ }
+ {
+ auto _e = metadata();
+ if (_e)
+ {
+ _o->metadata.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->metadata[_i] = std::unique_ptr<circle::MetadataT>(_e->Get(_i)->UnPack(_resolver));
+ }
+ }
+ }
+ {
+ auto _e = signature_defs();
+ if (_e)
+ {
+ _o->signature_defs.resize(_e->size());
+ for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++)
+ {
+ _o->signature_defs[_i] =
+ std::unique_ptr<circle::SignatureDefT>(_e->Get(_i)->UnPack(_resolver));
+ }
+ }
+ }
+}
+
+inline flatbuffers::Offset<Model> Model::Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const ModelT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ return CreateModel(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<Model> CreateModel(flatbuffers::FlatBufferBuilder &_fbb,
+ const ModelT *_o,
+ const flatbuffers::rehasher_function_t *_rehasher)
+{
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs
+ {
+ flatbuffers::FlatBufferBuilder *__fbb;
+ const ModelT *__o;
+ const flatbuffers::rehasher_function_t *__rehasher;
+ } _va = {&_fbb, _o, _rehasher};
+ (void)_va;
+ auto _version = _o->version;
+ auto _operator_codes =
+ _o->operator_codes.size()
+ ? _fbb.CreateVector<flatbuffers::Offset<circle::OperatorCode>>(
+ _o->operator_codes.size(),
+ [](size_t i, _VectorArgs *__va) {
+ return CreateOperatorCode(*__va->__fbb, __va->__o->operator_codes[i].get(),
+ __va->__rehasher);
+ },
+ &_va)
+ : 0;
+ auto _subgraphs =
+ _o->subgraphs.size()
+ ? _fbb.CreateVector<flatbuffers::Offset<circle::SubGraph>>(
+ _o->subgraphs.size(),
+ [](size_t i, _VectorArgs *__va) {
+ return CreateSubGraph(*__va->__fbb, __va->__o->subgraphs[i].get(), __va->__rehasher);
+ },
+ &_va)
+ : 0;
+ auto _description = _o->description.empty() ? 0 : _fbb.CreateString(_o->description);
+ auto _buffers =
+ _o->buffers.size()
+ ? _fbb.CreateVector<flatbuffers::Offset<circle::Buffer>>(
+ _o->buffers.size(),
+ [](size_t i, _VectorArgs *__va) {
+ return CreateBuffer(*__va->__fbb, __va->__o->buffers[i].get(), __va->__rehasher);
+ },
+ &_va)
+ : 0;
+ auto _metadata_buffer = _o->metadata_buffer.size() ? _fbb.CreateVector(_o->metadata_buffer) : 0;
+ auto _metadata =
+ _o->metadata.size()
+ ? _fbb.CreateVector<flatbuffers::Offset<circle::Metadata>>(
+ _o->metadata.size(),
+ [](size_t i, _VectorArgs *__va) {
+ return CreateMetadata(*__va->__fbb, __va->__o->metadata[i].get(), __va->__rehasher);
+ },
+ &_va)
+ : 0;
+ auto _signature_defs =
+ _o->signature_defs.size()
+ ? _fbb.CreateVector<flatbuffers::Offset<circle::SignatureDef>>(
+ _o->signature_defs.size(),
+ [](size_t i, _VectorArgs *__va) {
+ return CreateSignatureDef(*__va->__fbb, __va->__o->signature_defs[i].get(),
+ __va->__rehasher);
+ },
+ &_va)
+ : 0;
+ return circle::CreateModel(_fbb, _version, _operator_codes, _subgraphs, _description, _buffers,
+ _metadata_buffer, _metadata, _signature_defs);
+}
+
+inline bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj,
+ QuantizationDetails type)
+{
+ switch (type)
+ {
+ case QuantizationDetails_NONE:
+ {
+ return true;
+ }
+ case QuantizationDetails_CustomQuantization:
+ {
+ auto ptr = reinterpret_cast<const circle::CustomQuantization *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ default:
+ return true;
+ }
+}
+
+inline bool
+VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier,
+ const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
+ const flatbuffers::Vector<uint8_t> *types)
+{
+ if (!values || !types)
+ return !values && !types;
+ if (values->size() != types->size())
+ return false;
+ for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i)
+ {
+ if (!VerifyQuantizationDetails(verifier, values->Get(i),
+ types->GetEnum<QuantizationDetails>(i)))
+ {
+ return false;
+ }
+ }
+ return true;
+}
+
+inline void *QuantizationDetailsUnion::UnPack(const void *obj, QuantizationDetails type,
+ const flatbuffers::resolver_function_t *resolver)
+{
+ switch (type)
+ {
+ case QuantizationDetails_CustomQuantization:
+ {
+ auto ptr = reinterpret_cast<const circle::CustomQuantization *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ default:
+ return nullptr;
+ }
+}
+
+inline flatbuffers::Offset<void>
+QuantizationDetailsUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const flatbuffers::rehasher_function_t *_rehasher) const
+{
+ switch (type)
+ {
+ case QuantizationDetails_CustomQuantization:
+ {
+ auto ptr = reinterpret_cast<const circle::CustomQuantizationT *>(value);
+ return CreateCustomQuantization(_fbb, ptr, _rehasher).Union();
+ }
+ default:
+ return 0;
+ }
+}
+
+inline QuantizationDetailsUnion::QuantizationDetailsUnion(const QuantizationDetailsUnion &u)
+ : type(u.type), value(nullptr)
+{
+ switch (type)
+ {
+ case QuantizationDetails_CustomQuantization:
+ {
+ value =
+ new circle::CustomQuantizationT(*reinterpret_cast<circle::CustomQuantizationT *>(u.value));
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+inline void QuantizationDetailsUnion::Reset()
+{
+ switch (type)
+ {
+ case QuantizationDetails_CustomQuantization:
+ {
+ auto ptr = reinterpret_cast<circle::CustomQuantizationT *>(value);
+ delete ptr;
+ break;
+ }
+ default:
+ break;
+ }
+ value = nullptr;
+ type = QuantizationDetails_NONE;
+}
+
+inline bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj,
+ SparseIndexVector type)
+{
+ switch (type)
+ {
+ case SparseIndexVector_NONE:
+ {
+ return true;
+ }
+ case SparseIndexVector_Int32Vector:
+ {
+ auto ptr = reinterpret_cast<const circle::Int32Vector *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case SparseIndexVector_Uint16Vector:
+ {
+ auto ptr = reinterpret_cast<const circle::Uint16Vector *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case SparseIndexVector_Uint8Vector:
+ {
+ auto ptr = reinterpret_cast<const circle::Uint8Vector *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ default:
+ return true;
+ }
+}
+
+inline bool
+VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier,
+ const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
+ const flatbuffers::Vector<uint8_t> *types)
+{
+ if (!values || !types)
+ return !values && !types;
+ if (values->size() != types->size())
+ return false;
+ for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i)
+ {
+ if (!VerifySparseIndexVector(verifier, values->Get(i), types->GetEnum<SparseIndexVector>(i)))
+ {
+ return false;
+ }
+ }
+ return true;
+}
+
+inline void *SparseIndexVectorUnion::UnPack(const void *obj, SparseIndexVector type,
+ const flatbuffers::resolver_function_t *resolver)
+{
+ switch (type)
+ {
+ case SparseIndexVector_Int32Vector:
+ {
+ auto ptr = reinterpret_cast<const circle::Int32Vector *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case SparseIndexVector_Uint16Vector:
+ {
+ auto ptr = reinterpret_cast<const circle::Uint16Vector *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case SparseIndexVector_Uint8Vector:
+ {
+ auto ptr = reinterpret_cast<const circle::Uint8Vector *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ default:
+ return nullptr;
+ }
+}
+
+inline flatbuffers::Offset<void>
+SparseIndexVectorUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const flatbuffers::rehasher_function_t *_rehasher) const
+{
+ switch (type)
+ {
+ case SparseIndexVector_Int32Vector:
+ {
+ auto ptr = reinterpret_cast<const circle::Int32VectorT *>(value);
+ return CreateInt32Vector(_fbb, ptr, _rehasher).Union();
+ }
+ case SparseIndexVector_Uint16Vector:
+ {
+ auto ptr = reinterpret_cast<const circle::Uint16VectorT *>(value);
+ return CreateUint16Vector(_fbb, ptr, _rehasher).Union();
+ }
+ case SparseIndexVector_Uint8Vector:
+ {
+ auto ptr = reinterpret_cast<const circle::Uint8VectorT *>(value);
+ return CreateUint8Vector(_fbb, ptr, _rehasher).Union();
+ }
+ default:
+ return 0;
+ }
+}
+
+inline SparseIndexVectorUnion::SparseIndexVectorUnion(const SparseIndexVectorUnion &u)
+ : type(u.type), value(nullptr)
+{
+ switch (type)
+ {
+ case SparseIndexVector_Int32Vector:
+ {
+ value = new circle::Int32VectorT(*reinterpret_cast<circle::Int32VectorT *>(u.value));
+ break;
+ }
+ case SparseIndexVector_Uint16Vector:
+ {
+ value = new circle::Uint16VectorT(*reinterpret_cast<circle::Uint16VectorT *>(u.value));
+ break;
+ }
+ case SparseIndexVector_Uint8Vector:
+ {
+ value = new circle::Uint8VectorT(*reinterpret_cast<circle::Uint8VectorT *>(u.value));
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+inline void SparseIndexVectorUnion::Reset()
+{
+ switch (type)
+ {
+ case SparseIndexVector_Int32Vector:
+ {
+ auto ptr = reinterpret_cast<circle::Int32VectorT *>(value);
+ delete ptr;
+ break;
+ }
+ case SparseIndexVector_Uint16Vector:
+ {
+ auto ptr = reinterpret_cast<circle::Uint16VectorT *>(value);
+ delete ptr;
+ break;
+ }
+ case SparseIndexVector_Uint8Vector:
+ {
+ auto ptr = reinterpret_cast<circle::Uint8VectorT *>(value);
+ delete ptr;
+ break;
+ }
+ default:
+ break;
+ }
+ value = nullptr;
+ type = SparseIndexVector_NONE;
+}
+
+inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj,
+ BuiltinOptions type)
+{
+ switch (type)
+ {
+ case BuiltinOptions_NONE:
+ {
+ return true;
+ }
+ case BuiltinOptions_Conv2DOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::Conv2DOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_DepthwiseConv2DOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::DepthwiseConv2DOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_ConcatEmbeddingsOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ConcatEmbeddingsOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_LSHProjectionOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LSHProjectionOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_Pool2DOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::Pool2DOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_SVDFOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SVDFOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_RNNOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::RNNOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_FullyConnectedOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::FullyConnectedOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_SoftmaxOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SoftmaxOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_ConcatenationOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ConcatenationOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_AddOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::AddOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_L2NormOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::L2NormOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_LocalResponseNormalizationOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LocalResponseNormalizationOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_LSTMOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LSTMOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_ResizeBilinearOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ResizeBilinearOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_CallOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::CallOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_ReshapeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ReshapeOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_SkipGramOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SkipGramOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_SpaceToDepthOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SpaceToDepthOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_EmbeddingLookupSparseOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::EmbeddingLookupSparseOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_MulOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::MulOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_PadOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::PadOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_GatherOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::GatherOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_BatchToSpaceNDOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::BatchToSpaceNDOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_SpaceToBatchNDOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SpaceToBatchNDOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_TransposeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::TransposeOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_ReducerOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ReducerOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_SubOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SubOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_DivOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::DivOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_SqueezeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SqueezeOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_SequenceRNNOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SequenceRNNOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_StridedSliceOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::StridedSliceOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_ExpOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ExpOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_TopKV2Options:
+ {
+ auto ptr = reinterpret_cast<const circle::TopKV2Options *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_SplitOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SplitOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_LogSoftmaxOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LogSoftmaxOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_CastOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::CastOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_DequantizeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::DequantizeOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_MaximumMinimumOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::MaximumMinimumOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_ArgMaxOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ArgMaxOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_LessOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LessOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_NegOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::NegOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_PadV2Options:
+ {
+ auto ptr = reinterpret_cast<const circle::PadV2Options *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_GreaterOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::GreaterOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_GreaterEqualOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::GreaterEqualOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_LessEqualOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LessEqualOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_SelectOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SelectOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_SliceOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SliceOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_TransposeConvOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::TransposeConvOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_SparseToDenseOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SparseToDenseOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_TileOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::TileOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_ExpandDimsOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ExpandDimsOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_EqualOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::EqualOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_NotEqualOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::NotEqualOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_ShapeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ShapeOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_PowOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::PowOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_ArgMinOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ArgMinOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_FakeQuantOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::FakeQuantOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_PackOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::PackOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_LogicalOrOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LogicalOrOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_OneHotOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::OneHotOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_LogicalAndOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LogicalAndOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_LogicalNotOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LogicalNotOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_UnpackOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::UnpackOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_FloorDivOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::FloorDivOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_SquareOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SquareOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_ZerosLikeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ZerosLikeOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_FillOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::FillOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_BidirectionalSequenceLSTMOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::BidirectionalSequenceLSTMOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_BidirectionalSequenceRNNOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::BidirectionalSequenceRNNOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_UnidirectionalSequenceLSTMOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::UnidirectionalSequenceLSTMOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_FloorModOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::FloorModOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_RangeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::RangeOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_ResizeNearestNeighborOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ResizeNearestNeighborOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_LeakyReluOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LeakyReluOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_SquaredDifferenceOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SquaredDifferenceOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_MirrorPadOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::MirrorPadOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_AbsOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::AbsOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_SplitVOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SplitVOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_UniqueOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::UniqueOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_ReverseV2Options:
+ {
+ auto ptr = reinterpret_cast<const circle::ReverseV2Options *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_AddNOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::AddNOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_GatherNdOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::GatherNdOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_CosOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::CosOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_WhereOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::WhereOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_RankOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::RankOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_ReverseSequenceOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ReverseSequenceOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_MatrixDiagOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::MatrixDiagOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_QuantizeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::QuantizeOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_MatrixSetDiagOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::MatrixSetDiagOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_HardSwishOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::HardSwishOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_IfOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::IfOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_WhileOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::WhileOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_DepthToSpaceOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::DepthToSpaceOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_NonMaxSuppressionV4Options:
+ {
+ auto ptr = reinterpret_cast<const circle::NonMaxSuppressionV4Options *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_NonMaxSuppressionV5Options:
+ {
+ auto ptr = reinterpret_cast<const circle::NonMaxSuppressionV5Options *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_ScatterNdOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ScatterNdOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_SelectV2Options:
+ {
+ auto ptr = reinterpret_cast<const circle::SelectV2Options *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_DensifyOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::DensifyOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_SegmentSumOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SegmentSumOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_BatchMatMulOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::BatchMatMulOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_CumsumOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::CumsumOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_CallOnceOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::CallOnceOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_BroadcastToOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::BroadcastToOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_Rfft2dOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::Rfft2dOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_Conv3DOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::Conv3DOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_HashtableOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::HashtableOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_HashtableFindOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::HashtableFindOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_HashtableImportOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::HashtableImportOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_HashtableSizeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::HashtableSizeOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_VarHandleOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::VarHandleOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_ReadVariableOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ReadVariableOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_AssignVariableOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::AssignVariableOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_RandomOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::RandomOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_BCQGatherOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::BCQGatherOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_BCQFullyConnectedOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::BCQFullyConnectedOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_InstanceNormOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::InstanceNormOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ default:
+ return true;
+ }
+}
+
+inline bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier,
+ const flatbuffers::Vector<flatbuffers::Offset<void>> *values,
+ const flatbuffers::Vector<uint8_t> *types)
+{
+ if (!values || !types)
+ return !values && !types;
+ if (values->size() != types->size())
+ return false;
+ for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i)
+ {
+ if (!VerifyBuiltinOptions(verifier, values->Get(i), types->GetEnum<BuiltinOptions>(i)))
+ {
+ return false;
+ }
+ }
+ return true;
+}
+
+inline void *BuiltinOptionsUnion::UnPack(const void *obj, BuiltinOptions type,
+ const flatbuffers::resolver_function_t *resolver)
+{
+ switch (type)
+ {
+ case BuiltinOptions_Conv2DOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::Conv2DOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_DepthwiseConv2DOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::DepthwiseConv2DOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_ConcatEmbeddingsOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ConcatEmbeddingsOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_LSHProjectionOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LSHProjectionOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_Pool2DOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::Pool2DOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_SVDFOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SVDFOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_RNNOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::RNNOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_FullyConnectedOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::FullyConnectedOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_SoftmaxOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SoftmaxOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_ConcatenationOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ConcatenationOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_AddOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::AddOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_L2NormOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::L2NormOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_LocalResponseNormalizationOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LocalResponseNormalizationOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_LSTMOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LSTMOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_ResizeBilinearOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ResizeBilinearOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_CallOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::CallOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_ReshapeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ReshapeOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_SkipGramOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SkipGramOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_SpaceToDepthOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SpaceToDepthOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_EmbeddingLookupSparseOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::EmbeddingLookupSparseOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_MulOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::MulOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_PadOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::PadOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_GatherOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::GatherOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_BatchToSpaceNDOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::BatchToSpaceNDOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_SpaceToBatchNDOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SpaceToBatchNDOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_TransposeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::TransposeOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_ReducerOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ReducerOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_SubOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SubOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_DivOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::DivOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_SqueezeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SqueezeOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_SequenceRNNOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SequenceRNNOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_StridedSliceOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::StridedSliceOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_ExpOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ExpOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_TopKV2Options:
+ {
+ auto ptr = reinterpret_cast<const circle::TopKV2Options *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_SplitOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SplitOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_LogSoftmaxOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LogSoftmaxOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_CastOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::CastOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_DequantizeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::DequantizeOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_MaximumMinimumOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::MaximumMinimumOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_ArgMaxOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ArgMaxOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_LessOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LessOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_NegOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::NegOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_PadV2Options:
+ {
+ auto ptr = reinterpret_cast<const circle::PadV2Options *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_GreaterOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::GreaterOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_GreaterEqualOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::GreaterEqualOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_LessEqualOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LessEqualOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_SelectOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SelectOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_SliceOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SliceOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_TransposeConvOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::TransposeConvOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_SparseToDenseOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SparseToDenseOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_TileOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::TileOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_ExpandDimsOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ExpandDimsOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_EqualOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::EqualOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_NotEqualOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::NotEqualOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_ShapeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ShapeOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_PowOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::PowOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_ArgMinOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ArgMinOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_FakeQuantOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::FakeQuantOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_PackOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::PackOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_LogicalOrOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LogicalOrOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_OneHotOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::OneHotOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_LogicalAndOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LogicalAndOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_LogicalNotOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LogicalNotOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_UnpackOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::UnpackOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_FloorDivOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::FloorDivOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_SquareOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SquareOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_ZerosLikeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ZerosLikeOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_FillOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::FillOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_BidirectionalSequenceLSTMOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::BidirectionalSequenceLSTMOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_BidirectionalSequenceRNNOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::BidirectionalSequenceRNNOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_UnidirectionalSequenceLSTMOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::UnidirectionalSequenceLSTMOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_FloorModOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::FloorModOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_RangeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::RangeOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_ResizeNearestNeighborOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ResizeNearestNeighborOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_LeakyReluOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LeakyReluOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_SquaredDifferenceOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SquaredDifferenceOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_MirrorPadOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::MirrorPadOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_AbsOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::AbsOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_SplitVOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SplitVOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_UniqueOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::UniqueOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_ReverseV2Options:
+ {
+ auto ptr = reinterpret_cast<const circle::ReverseV2Options *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_AddNOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::AddNOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_GatherNdOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::GatherNdOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_CosOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::CosOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_WhereOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::WhereOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_RankOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::RankOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_ReverseSequenceOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ReverseSequenceOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_MatrixDiagOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::MatrixDiagOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_QuantizeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::QuantizeOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_MatrixSetDiagOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::MatrixSetDiagOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_HardSwishOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::HardSwishOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_IfOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::IfOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_WhileOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::WhileOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_DepthToSpaceOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::DepthToSpaceOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_NonMaxSuppressionV4Options:
+ {
+ auto ptr = reinterpret_cast<const circle::NonMaxSuppressionV4Options *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_NonMaxSuppressionV5Options:
+ {
+ auto ptr = reinterpret_cast<const circle::NonMaxSuppressionV5Options *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_ScatterNdOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ScatterNdOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_SelectV2Options:
+ {
+ auto ptr = reinterpret_cast<const circle::SelectV2Options *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_DensifyOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::DensifyOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_SegmentSumOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SegmentSumOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_BatchMatMulOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::BatchMatMulOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_CumsumOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::CumsumOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_CallOnceOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::CallOnceOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_BroadcastToOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::BroadcastToOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_Rfft2dOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::Rfft2dOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_Conv3DOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::Conv3DOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_HashtableOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::HashtableOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_HashtableFindOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::HashtableFindOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_HashtableImportOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::HashtableImportOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_HashtableSizeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::HashtableSizeOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_VarHandleOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::VarHandleOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_ReadVariableOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ReadVariableOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_AssignVariableOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::AssignVariableOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_RandomOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::RandomOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_BCQGatherOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::BCQGatherOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_BCQFullyConnectedOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::BCQFullyConnectedOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_InstanceNormOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::InstanceNormOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ default:
+ return nullptr;
+ }
+}
+
+inline flatbuffers::Offset<void>
+BuiltinOptionsUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb,
+ const flatbuffers::rehasher_function_t *_rehasher) const
+{
+ switch (type)
+ {
+ case BuiltinOptions_Conv2DOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::Conv2DOptionsT *>(value);
+ return CreateConv2DOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_DepthwiseConv2DOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::DepthwiseConv2DOptionsT *>(value);
+ return CreateDepthwiseConv2DOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_ConcatEmbeddingsOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ConcatEmbeddingsOptionsT *>(value);
+ return CreateConcatEmbeddingsOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_LSHProjectionOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LSHProjectionOptionsT *>(value);
+ return CreateLSHProjectionOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_Pool2DOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::Pool2DOptionsT *>(value);
+ return CreatePool2DOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_SVDFOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SVDFOptionsT *>(value);
+ return CreateSVDFOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_RNNOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::RNNOptionsT *>(value);
+ return CreateRNNOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_FullyConnectedOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::FullyConnectedOptionsT *>(value);
+ return CreateFullyConnectedOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_SoftmaxOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SoftmaxOptionsT *>(value);
+ return CreateSoftmaxOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_ConcatenationOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ConcatenationOptionsT *>(value);
+ return CreateConcatenationOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_AddOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::AddOptionsT *>(value);
+ return CreateAddOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_L2NormOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::L2NormOptionsT *>(value);
+ return CreateL2NormOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_LocalResponseNormalizationOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LocalResponseNormalizationOptionsT *>(value);
+ return CreateLocalResponseNormalizationOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_LSTMOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LSTMOptionsT *>(value);
+ return CreateLSTMOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_ResizeBilinearOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ResizeBilinearOptionsT *>(value);
+ return CreateResizeBilinearOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_CallOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::CallOptionsT *>(value);
+ return CreateCallOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_ReshapeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ReshapeOptionsT *>(value);
+ return CreateReshapeOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_SkipGramOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SkipGramOptionsT *>(value);
+ return CreateSkipGramOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_SpaceToDepthOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SpaceToDepthOptionsT *>(value);
+ return CreateSpaceToDepthOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_EmbeddingLookupSparseOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::EmbeddingLookupSparseOptionsT *>(value);
+ return CreateEmbeddingLookupSparseOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_MulOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::MulOptionsT *>(value);
+ return CreateMulOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_PadOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::PadOptionsT *>(value);
+ return CreatePadOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_GatherOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::GatherOptionsT *>(value);
+ return CreateGatherOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_BatchToSpaceNDOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::BatchToSpaceNDOptionsT *>(value);
+ return CreateBatchToSpaceNDOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_SpaceToBatchNDOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SpaceToBatchNDOptionsT *>(value);
+ return CreateSpaceToBatchNDOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_TransposeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::TransposeOptionsT *>(value);
+ return CreateTransposeOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_ReducerOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ReducerOptionsT *>(value);
+ return CreateReducerOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_SubOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SubOptionsT *>(value);
+ return CreateSubOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_DivOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::DivOptionsT *>(value);
+ return CreateDivOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_SqueezeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SqueezeOptionsT *>(value);
+ return CreateSqueezeOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_SequenceRNNOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SequenceRNNOptionsT *>(value);
+ return CreateSequenceRNNOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_StridedSliceOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::StridedSliceOptionsT *>(value);
+ return CreateStridedSliceOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_ExpOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ExpOptionsT *>(value);
+ return CreateExpOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_TopKV2Options:
+ {
+ auto ptr = reinterpret_cast<const circle::TopKV2OptionsT *>(value);
+ return CreateTopKV2Options(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_SplitOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SplitOptionsT *>(value);
+ return CreateSplitOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_LogSoftmaxOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LogSoftmaxOptionsT *>(value);
+ return CreateLogSoftmaxOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_CastOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::CastOptionsT *>(value);
+ return CreateCastOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_DequantizeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::DequantizeOptionsT *>(value);
+ return CreateDequantizeOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_MaximumMinimumOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::MaximumMinimumOptionsT *>(value);
+ return CreateMaximumMinimumOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_ArgMaxOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ArgMaxOptionsT *>(value);
+ return CreateArgMaxOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_LessOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LessOptionsT *>(value);
+ return CreateLessOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_NegOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::NegOptionsT *>(value);
+ return CreateNegOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_PadV2Options:
+ {
+ auto ptr = reinterpret_cast<const circle::PadV2OptionsT *>(value);
+ return CreatePadV2Options(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_GreaterOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::GreaterOptionsT *>(value);
+ return CreateGreaterOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_GreaterEqualOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::GreaterEqualOptionsT *>(value);
+ return CreateGreaterEqualOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_LessEqualOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LessEqualOptionsT *>(value);
+ return CreateLessEqualOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_SelectOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SelectOptionsT *>(value);
+ return CreateSelectOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_SliceOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SliceOptionsT *>(value);
+ return CreateSliceOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_TransposeConvOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::TransposeConvOptionsT *>(value);
+ return CreateTransposeConvOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_SparseToDenseOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SparseToDenseOptionsT *>(value);
+ return CreateSparseToDenseOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_TileOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::TileOptionsT *>(value);
+ return CreateTileOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_ExpandDimsOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ExpandDimsOptionsT *>(value);
+ return CreateExpandDimsOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_EqualOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::EqualOptionsT *>(value);
+ return CreateEqualOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_NotEqualOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::NotEqualOptionsT *>(value);
+ return CreateNotEqualOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_ShapeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ShapeOptionsT *>(value);
+ return CreateShapeOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_PowOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::PowOptionsT *>(value);
+ return CreatePowOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_ArgMinOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ArgMinOptionsT *>(value);
+ return CreateArgMinOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_FakeQuantOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::FakeQuantOptionsT *>(value);
+ return CreateFakeQuantOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_PackOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::PackOptionsT *>(value);
+ return CreatePackOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_LogicalOrOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LogicalOrOptionsT *>(value);
+ return CreateLogicalOrOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_OneHotOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::OneHotOptionsT *>(value);
+ return CreateOneHotOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_LogicalAndOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LogicalAndOptionsT *>(value);
+ return CreateLogicalAndOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_LogicalNotOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LogicalNotOptionsT *>(value);
+ return CreateLogicalNotOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_UnpackOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::UnpackOptionsT *>(value);
+ return CreateUnpackOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_FloorDivOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::FloorDivOptionsT *>(value);
+ return CreateFloorDivOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_SquareOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SquareOptionsT *>(value);
+ return CreateSquareOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_ZerosLikeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ZerosLikeOptionsT *>(value);
+ return CreateZerosLikeOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_FillOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::FillOptionsT *>(value);
+ return CreateFillOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_BidirectionalSequenceLSTMOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::BidirectionalSequenceLSTMOptionsT *>(value);
+ return CreateBidirectionalSequenceLSTMOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_BidirectionalSequenceRNNOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::BidirectionalSequenceRNNOptionsT *>(value);
+ return CreateBidirectionalSequenceRNNOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_UnidirectionalSequenceLSTMOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::UnidirectionalSequenceLSTMOptionsT *>(value);
+ return CreateUnidirectionalSequenceLSTMOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_FloorModOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::FloorModOptionsT *>(value);
+ return CreateFloorModOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_RangeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::RangeOptionsT *>(value);
+ return CreateRangeOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_ResizeNearestNeighborOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ResizeNearestNeighborOptionsT *>(value);
+ return CreateResizeNearestNeighborOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_LeakyReluOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::LeakyReluOptionsT *>(value);
+ return CreateLeakyReluOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_SquaredDifferenceOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SquaredDifferenceOptionsT *>(value);
+ return CreateSquaredDifferenceOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_MirrorPadOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::MirrorPadOptionsT *>(value);
+ return CreateMirrorPadOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_AbsOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::AbsOptionsT *>(value);
+ return CreateAbsOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_SplitVOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SplitVOptionsT *>(value);
+ return CreateSplitVOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_UniqueOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::UniqueOptionsT *>(value);
+ return CreateUniqueOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_ReverseV2Options:
+ {
+ auto ptr = reinterpret_cast<const circle::ReverseV2OptionsT *>(value);
+ return CreateReverseV2Options(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_AddNOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::AddNOptionsT *>(value);
+ return CreateAddNOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_GatherNdOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::GatherNdOptionsT *>(value);
+ return CreateGatherNdOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_CosOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::CosOptionsT *>(value);
+ return CreateCosOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_WhereOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::WhereOptionsT *>(value);
+ return CreateWhereOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_RankOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::RankOptionsT *>(value);
+ return CreateRankOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_ReverseSequenceOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ReverseSequenceOptionsT *>(value);
+ return CreateReverseSequenceOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_MatrixDiagOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::MatrixDiagOptionsT *>(value);
+ return CreateMatrixDiagOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_QuantizeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::QuantizeOptionsT *>(value);
+ return CreateQuantizeOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_MatrixSetDiagOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::MatrixSetDiagOptionsT *>(value);
+ return CreateMatrixSetDiagOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_HardSwishOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::HardSwishOptionsT *>(value);
+ return CreateHardSwishOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_IfOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::IfOptionsT *>(value);
+ return CreateIfOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_WhileOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::WhileOptionsT *>(value);
+ return CreateWhileOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_DepthToSpaceOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::DepthToSpaceOptionsT *>(value);
+ return CreateDepthToSpaceOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_NonMaxSuppressionV4Options:
+ {
+ auto ptr = reinterpret_cast<const circle::NonMaxSuppressionV4OptionsT *>(value);
+ return CreateNonMaxSuppressionV4Options(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_NonMaxSuppressionV5Options:
+ {
+ auto ptr = reinterpret_cast<const circle::NonMaxSuppressionV5OptionsT *>(value);
+ return CreateNonMaxSuppressionV5Options(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_ScatterNdOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ScatterNdOptionsT *>(value);
+ return CreateScatterNdOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_SelectV2Options:
+ {
+ auto ptr = reinterpret_cast<const circle::SelectV2OptionsT *>(value);
+ return CreateSelectV2Options(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_DensifyOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::DensifyOptionsT *>(value);
+ return CreateDensifyOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_SegmentSumOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::SegmentSumOptionsT *>(value);
+ return CreateSegmentSumOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_BatchMatMulOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::BatchMatMulOptionsT *>(value);
+ return CreateBatchMatMulOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_CumsumOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::CumsumOptionsT *>(value);
+ return CreateCumsumOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_CallOnceOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::CallOnceOptionsT *>(value);
+ return CreateCallOnceOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_BroadcastToOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::BroadcastToOptionsT *>(value);
+ return CreateBroadcastToOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_Rfft2dOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::Rfft2dOptionsT *>(value);
+ return CreateRfft2dOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_Conv3DOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::Conv3DOptionsT *>(value);
+ return CreateConv3DOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_HashtableOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::HashtableOptionsT *>(value);
+ return CreateHashtableOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_HashtableFindOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::HashtableFindOptionsT *>(value);
+ return CreateHashtableFindOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_HashtableImportOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::HashtableImportOptionsT *>(value);
+ return CreateHashtableImportOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_HashtableSizeOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::HashtableSizeOptionsT *>(value);
+ return CreateHashtableSizeOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_VarHandleOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::VarHandleOptionsT *>(value);
+ return CreateVarHandleOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_ReadVariableOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::ReadVariableOptionsT *>(value);
+ return CreateReadVariableOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_AssignVariableOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::AssignVariableOptionsT *>(value);
+ return CreateAssignVariableOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_RandomOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::RandomOptionsT *>(value);
+ return CreateRandomOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_BCQGatherOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::BCQGatherOptionsT *>(value);
+ return CreateBCQGatherOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_BCQFullyConnectedOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::BCQFullyConnectedOptionsT *>(value);
+ return CreateBCQFullyConnectedOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_InstanceNormOptions:
+ {
+ auto ptr = reinterpret_cast<const circle::InstanceNormOptionsT *>(value);
+ return CreateInstanceNormOptions(_fbb, ptr, _rehasher).Union();
+ }
+ default:
+ return 0;
+ }
+}
+
+inline BuiltinOptionsUnion::BuiltinOptionsUnion(const BuiltinOptionsUnion &u)
+ : type(u.type), value(nullptr)
+{
+ switch (type)
+ {
+ case BuiltinOptions_Conv2DOptions:
+ {
+ value = new circle::Conv2DOptionsT(*reinterpret_cast<circle::Conv2DOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_DepthwiseConv2DOptions:
+ {
+ value = new circle::DepthwiseConv2DOptionsT(
+ *reinterpret_cast<circle::DepthwiseConv2DOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_ConcatEmbeddingsOptions:
+ {
+ value = new circle::ConcatEmbeddingsOptionsT(
+ *reinterpret_cast<circle::ConcatEmbeddingsOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_LSHProjectionOptions:
+ {
+ value = new circle::LSHProjectionOptionsT(
+ *reinterpret_cast<circle::LSHProjectionOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_Pool2DOptions:
+ {
+ value = new circle::Pool2DOptionsT(*reinterpret_cast<circle::Pool2DOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_SVDFOptions:
+ {
+ value = new circle::SVDFOptionsT(*reinterpret_cast<circle::SVDFOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_RNNOptions:
+ {
+ value = new circle::RNNOptionsT(*reinterpret_cast<circle::RNNOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_FullyConnectedOptions:
+ {
+ value = new circle::FullyConnectedOptionsT(
+ *reinterpret_cast<circle::FullyConnectedOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_SoftmaxOptions:
+ {
+ value = new circle::SoftmaxOptionsT(*reinterpret_cast<circle::SoftmaxOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_ConcatenationOptions:
+ {
+ value = new circle::ConcatenationOptionsT(
+ *reinterpret_cast<circle::ConcatenationOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_AddOptions:
+ {
+ value = new circle::AddOptionsT(*reinterpret_cast<circle::AddOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_L2NormOptions:
+ {
+ value = new circle::L2NormOptionsT(*reinterpret_cast<circle::L2NormOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_LocalResponseNormalizationOptions:
+ {
+ value = new circle::LocalResponseNormalizationOptionsT(
+ *reinterpret_cast<circle::LocalResponseNormalizationOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_LSTMOptions:
+ {
+ value = new circle::LSTMOptionsT(*reinterpret_cast<circle::LSTMOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_ResizeBilinearOptions:
+ {
+ value = new circle::ResizeBilinearOptionsT(
+ *reinterpret_cast<circle::ResizeBilinearOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_CallOptions:
+ {
+ value = new circle::CallOptionsT(*reinterpret_cast<circle::CallOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_ReshapeOptions:
+ {
+ value = new circle::ReshapeOptionsT(*reinterpret_cast<circle::ReshapeOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_SkipGramOptions:
+ {
+ value = new circle::SkipGramOptionsT(*reinterpret_cast<circle::SkipGramOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_SpaceToDepthOptions:
+ {
+ value = new circle::SpaceToDepthOptionsT(
+ *reinterpret_cast<circle::SpaceToDepthOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_EmbeddingLookupSparseOptions:
+ {
+ value = new circle::EmbeddingLookupSparseOptionsT(
+ *reinterpret_cast<circle::EmbeddingLookupSparseOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_MulOptions:
+ {
+ value = new circle::MulOptionsT(*reinterpret_cast<circle::MulOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_PadOptions:
+ {
+ value = new circle::PadOptionsT(*reinterpret_cast<circle::PadOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_GatherOptions:
+ {
+ value = new circle::GatherOptionsT(*reinterpret_cast<circle::GatherOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_BatchToSpaceNDOptions:
+ {
+ value = new circle::BatchToSpaceNDOptionsT(
+ *reinterpret_cast<circle::BatchToSpaceNDOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_SpaceToBatchNDOptions:
+ {
+ value = new circle::SpaceToBatchNDOptionsT(
+ *reinterpret_cast<circle::SpaceToBatchNDOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_TransposeOptions:
+ {
+ value =
+ new circle::TransposeOptionsT(*reinterpret_cast<circle::TransposeOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_ReducerOptions:
+ {
+ value = new circle::ReducerOptionsT(*reinterpret_cast<circle::ReducerOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_SubOptions:
+ {
+ value = new circle::SubOptionsT(*reinterpret_cast<circle::SubOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_DivOptions:
+ {
+ value = new circle::DivOptionsT(*reinterpret_cast<circle::DivOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_SqueezeOptions:
+ {
+ value = new circle::SqueezeOptionsT(*reinterpret_cast<circle::SqueezeOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_SequenceRNNOptions:
+ {
+ value =
+ new circle::SequenceRNNOptionsT(*reinterpret_cast<circle::SequenceRNNOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_StridedSliceOptions:
+ {
+ value = new circle::StridedSliceOptionsT(
+ *reinterpret_cast<circle::StridedSliceOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_ExpOptions:
+ {
+ value = new circle::ExpOptionsT(*reinterpret_cast<circle::ExpOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_TopKV2Options:
+ {
+ value = new circle::TopKV2OptionsT(*reinterpret_cast<circle::TopKV2OptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_SplitOptions:
+ {
+ value = new circle::SplitOptionsT(*reinterpret_cast<circle::SplitOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_LogSoftmaxOptions:
+ {
+ value =
+ new circle::LogSoftmaxOptionsT(*reinterpret_cast<circle::LogSoftmaxOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_CastOptions:
+ {
+ value = new circle::CastOptionsT(*reinterpret_cast<circle::CastOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_DequantizeOptions:
+ {
+ value =
+ new circle::DequantizeOptionsT(*reinterpret_cast<circle::DequantizeOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_MaximumMinimumOptions:
+ {
+ value = new circle::MaximumMinimumOptionsT(
+ *reinterpret_cast<circle::MaximumMinimumOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_ArgMaxOptions:
+ {
+ value = new circle::ArgMaxOptionsT(*reinterpret_cast<circle::ArgMaxOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_LessOptions:
+ {
+ value = new circle::LessOptionsT(*reinterpret_cast<circle::LessOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_NegOptions:
+ {
+ value = new circle::NegOptionsT(*reinterpret_cast<circle::NegOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_PadV2Options:
+ {
+ value = new circle::PadV2OptionsT(*reinterpret_cast<circle::PadV2OptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_GreaterOptions:
+ {
+ value = new circle::GreaterOptionsT(*reinterpret_cast<circle::GreaterOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_GreaterEqualOptions:
+ {
+ value = new circle::GreaterEqualOptionsT(
+ *reinterpret_cast<circle::GreaterEqualOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_LessEqualOptions:
+ {
+ value =
+ new circle::LessEqualOptionsT(*reinterpret_cast<circle::LessEqualOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_SelectOptions:
+ {
+ value = new circle::SelectOptionsT(*reinterpret_cast<circle::SelectOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_SliceOptions:
+ {
+ value = new circle::SliceOptionsT(*reinterpret_cast<circle::SliceOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_TransposeConvOptions:
+ {
+ value = new circle::TransposeConvOptionsT(
+ *reinterpret_cast<circle::TransposeConvOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_SparseToDenseOptions:
+ {
+ value = new circle::SparseToDenseOptionsT(
+ *reinterpret_cast<circle::SparseToDenseOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_TileOptions:
+ {
+ value = new circle::TileOptionsT(*reinterpret_cast<circle::TileOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_ExpandDimsOptions:
+ {
+ value =
+ new circle::ExpandDimsOptionsT(*reinterpret_cast<circle::ExpandDimsOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_EqualOptions:
+ {
+ value = new circle::EqualOptionsT(*reinterpret_cast<circle::EqualOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_NotEqualOptions:
+ {
+ value = new circle::NotEqualOptionsT(*reinterpret_cast<circle::NotEqualOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_ShapeOptions:
+ {
+ value = new circle::ShapeOptionsT(*reinterpret_cast<circle::ShapeOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_PowOptions:
+ {
+ value = new circle::PowOptionsT(*reinterpret_cast<circle::PowOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_ArgMinOptions:
+ {
+ value = new circle::ArgMinOptionsT(*reinterpret_cast<circle::ArgMinOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_FakeQuantOptions:
+ {
+ value =
+ new circle::FakeQuantOptionsT(*reinterpret_cast<circle::FakeQuantOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_PackOptions:
+ {
+ value = new circle::PackOptionsT(*reinterpret_cast<circle::PackOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_LogicalOrOptions:
+ {
+ value =
+ new circle::LogicalOrOptionsT(*reinterpret_cast<circle::LogicalOrOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_OneHotOptions:
+ {
+ value = new circle::OneHotOptionsT(*reinterpret_cast<circle::OneHotOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_LogicalAndOptions:
+ {
+ value =
+ new circle::LogicalAndOptionsT(*reinterpret_cast<circle::LogicalAndOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_LogicalNotOptions:
+ {
+ value =
+ new circle::LogicalNotOptionsT(*reinterpret_cast<circle::LogicalNotOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_UnpackOptions:
+ {
+ value = new circle::UnpackOptionsT(*reinterpret_cast<circle::UnpackOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_FloorDivOptions:
+ {
+ value = new circle::FloorDivOptionsT(*reinterpret_cast<circle::FloorDivOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_SquareOptions:
+ {
+ value = new circle::SquareOptionsT(*reinterpret_cast<circle::SquareOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_ZerosLikeOptions:
+ {
+ value =
+ new circle::ZerosLikeOptionsT(*reinterpret_cast<circle::ZerosLikeOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_FillOptions:
+ {
+ value = new circle::FillOptionsT(*reinterpret_cast<circle::FillOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_BidirectionalSequenceLSTMOptions:
+ {
+ value = new circle::BidirectionalSequenceLSTMOptionsT(
+ *reinterpret_cast<circle::BidirectionalSequenceLSTMOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_BidirectionalSequenceRNNOptions:
+ {
+ value = new circle::BidirectionalSequenceRNNOptionsT(
+ *reinterpret_cast<circle::BidirectionalSequenceRNNOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_UnidirectionalSequenceLSTMOptions:
+ {
+ value = new circle::UnidirectionalSequenceLSTMOptionsT(
+ *reinterpret_cast<circle::UnidirectionalSequenceLSTMOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_FloorModOptions:
+ {
+ value = new circle::FloorModOptionsT(*reinterpret_cast<circle::FloorModOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_RangeOptions:
+ {
+ value = new circle::RangeOptionsT(*reinterpret_cast<circle::RangeOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_ResizeNearestNeighborOptions:
+ {
+ value = new circle::ResizeNearestNeighborOptionsT(
+ *reinterpret_cast<circle::ResizeNearestNeighborOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_LeakyReluOptions:
+ {
+ value =
+ new circle::LeakyReluOptionsT(*reinterpret_cast<circle::LeakyReluOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_SquaredDifferenceOptions:
+ {
+ value = new circle::SquaredDifferenceOptionsT(
+ *reinterpret_cast<circle::SquaredDifferenceOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_MirrorPadOptions:
+ {
+ value =
+ new circle::MirrorPadOptionsT(*reinterpret_cast<circle::MirrorPadOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_AbsOptions:
+ {
+ value = new circle::AbsOptionsT(*reinterpret_cast<circle::AbsOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_SplitVOptions:
+ {
+ value = new circle::SplitVOptionsT(*reinterpret_cast<circle::SplitVOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_UniqueOptions:
+ {
+ value = new circle::UniqueOptionsT(*reinterpret_cast<circle::UniqueOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_ReverseV2Options:
+ {
+ value =
+ new circle::ReverseV2OptionsT(*reinterpret_cast<circle::ReverseV2OptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_AddNOptions:
+ {
+ value = new circle::AddNOptionsT(*reinterpret_cast<circle::AddNOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_GatherNdOptions:
+ {
+ value = new circle::GatherNdOptionsT(*reinterpret_cast<circle::GatherNdOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_CosOptions:
+ {
+ value = new circle::CosOptionsT(*reinterpret_cast<circle::CosOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_WhereOptions:
+ {
+ value = new circle::WhereOptionsT(*reinterpret_cast<circle::WhereOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_RankOptions:
+ {
+ value = new circle::RankOptionsT(*reinterpret_cast<circle::RankOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_ReverseSequenceOptions:
+ {
+ value = new circle::ReverseSequenceOptionsT(
+ *reinterpret_cast<circle::ReverseSequenceOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_MatrixDiagOptions:
+ {
+ value =
+ new circle::MatrixDiagOptionsT(*reinterpret_cast<circle::MatrixDiagOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_QuantizeOptions:
+ {
+ value = new circle::QuantizeOptionsT(*reinterpret_cast<circle::QuantizeOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_MatrixSetDiagOptions:
+ {
+ value = new circle::MatrixSetDiagOptionsT(
+ *reinterpret_cast<circle::MatrixSetDiagOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_HardSwishOptions:
+ {
+ value =
+ new circle::HardSwishOptionsT(*reinterpret_cast<circle::HardSwishOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_IfOptions:
+ {
+ value = new circle::IfOptionsT(*reinterpret_cast<circle::IfOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_WhileOptions:
+ {
+ value = new circle::WhileOptionsT(*reinterpret_cast<circle::WhileOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_DepthToSpaceOptions:
+ {
+ value = new circle::DepthToSpaceOptionsT(
+ *reinterpret_cast<circle::DepthToSpaceOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_NonMaxSuppressionV4Options:
+ {
+ value = new circle::NonMaxSuppressionV4OptionsT(
+ *reinterpret_cast<circle::NonMaxSuppressionV4OptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_NonMaxSuppressionV5Options:
+ {
+ value = new circle::NonMaxSuppressionV5OptionsT(
+ *reinterpret_cast<circle::NonMaxSuppressionV5OptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_ScatterNdOptions:
+ {
+ value =
+ new circle::ScatterNdOptionsT(*reinterpret_cast<circle::ScatterNdOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_SelectV2Options:
+ {
+ value = new circle::SelectV2OptionsT(*reinterpret_cast<circle::SelectV2OptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_DensifyOptions:
+ {
+ value = new circle::DensifyOptionsT(*reinterpret_cast<circle::DensifyOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_SegmentSumOptions:
+ {
+ value =
+ new circle::SegmentSumOptionsT(*reinterpret_cast<circle::SegmentSumOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_BatchMatMulOptions:
+ {
+ value =
+ new circle::BatchMatMulOptionsT(*reinterpret_cast<circle::BatchMatMulOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_CumsumOptions:
+ {
+ value = new circle::CumsumOptionsT(*reinterpret_cast<circle::CumsumOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_CallOnceOptions:
+ {
+ value = new circle::CallOnceOptionsT(*reinterpret_cast<circle::CallOnceOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_BroadcastToOptions:
+ {
+ value =
+ new circle::BroadcastToOptionsT(*reinterpret_cast<circle::BroadcastToOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_Rfft2dOptions:
+ {
+ value = new circle::Rfft2dOptionsT(*reinterpret_cast<circle::Rfft2dOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_Conv3DOptions:
+ {
+ value = new circle::Conv3DOptionsT(*reinterpret_cast<circle::Conv3DOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_HashtableOptions:
+ {
+ value =
+ new circle::HashtableOptionsT(*reinterpret_cast<circle::HashtableOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_HashtableFindOptions:
+ {
+ value = new circle::HashtableFindOptionsT(
+ *reinterpret_cast<circle::HashtableFindOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_HashtableImportOptions:
+ {
+ value = new circle::HashtableImportOptionsT(
+ *reinterpret_cast<circle::HashtableImportOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_HashtableSizeOptions:
+ {
+ value = new circle::HashtableSizeOptionsT(
+ *reinterpret_cast<circle::HashtableSizeOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_VarHandleOptions:
+ {
+ value =
+ new circle::VarHandleOptionsT(*reinterpret_cast<circle::VarHandleOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_ReadVariableOptions:
+ {
+ value = new circle::ReadVariableOptionsT(
+ *reinterpret_cast<circle::ReadVariableOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_AssignVariableOptions:
+ {
+ value = new circle::AssignVariableOptionsT(
+ *reinterpret_cast<circle::AssignVariableOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_RandomOptions:
+ {
+ value = new circle::RandomOptionsT(*reinterpret_cast<circle::RandomOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_BCQGatherOptions:
+ {
+ value =
+ new circle::BCQGatherOptionsT(*reinterpret_cast<circle::BCQGatherOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_BCQFullyConnectedOptions:
+ {
+ value = new circle::BCQFullyConnectedOptionsT(
+ *reinterpret_cast<circle::BCQFullyConnectedOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_InstanceNormOptions:
+ {
+ value = new circle::InstanceNormOptionsT(
+ *reinterpret_cast<circle::InstanceNormOptionsT *>(u.value));
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+inline void BuiltinOptionsUnion::Reset()
+{
+ switch (type)
+ {
+ case BuiltinOptions_Conv2DOptions:
+ {
+ auto ptr = reinterpret_cast<circle::Conv2DOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_DepthwiseConv2DOptions:
+ {
+ auto ptr = reinterpret_cast<circle::DepthwiseConv2DOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_ConcatEmbeddingsOptions:
+ {
+ auto ptr = reinterpret_cast<circle::ConcatEmbeddingsOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_LSHProjectionOptions:
+ {
+ auto ptr = reinterpret_cast<circle::LSHProjectionOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_Pool2DOptions:
+ {
+ auto ptr = reinterpret_cast<circle::Pool2DOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_SVDFOptions:
+ {
+ auto ptr = reinterpret_cast<circle::SVDFOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_RNNOptions:
+ {
+ auto ptr = reinterpret_cast<circle::RNNOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_FullyConnectedOptions:
+ {
+ auto ptr = reinterpret_cast<circle::FullyConnectedOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_SoftmaxOptions:
+ {
+ auto ptr = reinterpret_cast<circle::SoftmaxOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_ConcatenationOptions:
+ {
+ auto ptr = reinterpret_cast<circle::ConcatenationOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_AddOptions:
+ {
+ auto ptr = reinterpret_cast<circle::AddOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_L2NormOptions:
+ {
+ auto ptr = reinterpret_cast<circle::L2NormOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_LocalResponseNormalizationOptions:
+ {
+ auto ptr = reinterpret_cast<circle::LocalResponseNormalizationOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_LSTMOptions:
+ {
+ auto ptr = reinterpret_cast<circle::LSTMOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_ResizeBilinearOptions:
+ {
+ auto ptr = reinterpret_cast<circle::ResizeBilinearOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_CallOptions:
+ {
+ auto ptr = reinterpret_cast<circle::CallOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_ReshapeOptions:
+ {
+ auto ptr = reinterpret_cast<circle::ReshapeOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_SkipGramOptions:
+ {
+ auto ptr = reinterpret_cast<circle::SkipGramOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_SpaceToDepthOptions:
+ {
+ auto ptr = reinterpret_cast<circle::SpaceToDepthOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_EmbeddingLookupSparseOptions:
+ {
+ auto ptr = reinterpret_cast<circle::EmbeddingLookupSparseOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_MulOptions:
+ {
+ auto ptr = reinterpret_cast<circle::MulOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_PadOptions:
+ {
+ auto ptr = reinterpret_cast<circle::PadOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_GatherOptions:
+ {
+ auto ptr = reinterpret_cast<circle::GatherOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_BatchToSpaceNDOptions:
+ {
+ auto ptr = reinterpret_cast<circle::BatchToSpaceNDOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_SpaceToBatchNDOptions:
+ {
+ auto ptr = reinterpret_cast<circle::SpaceToBatchNDOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_TransposeOptions:
+ {
+ auto ptr = reinterpret_cast<circle::TransposeOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_ReducerOptions:
+ {
+ auto ptr = reinterpret_cast<circle::ReducerOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_SubOptions:
+ {
+ auto ptr = reinterpret_cast<circle::SubOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_DivOptions:
+ {
+ auto ptr = reinterpret_cast<circle::DivOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_SqueezeOptions:
+ {
+ auto ptr = reinterpret_cast<circle::SqueezeOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_SequenceRNNOptions:
+ {
+ auto ptr = reinterpret_cast<circle::SequenceRNNOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_StridedSliceOptions:
+ {
+ auto ptr = reinterpret_cast<circle::StridedSliceOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_ExpOptions:
+ {
+ auto ptr = reinterpret_cast<circle::ExpOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_TopKV2Options:
+ {
+ auto ptr = reinterpret_cast<circle::TopKV2OptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_SplitOptions:
+ {
+ auto ptr = reinterpret_cast<circle::SplitOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_LogSoftmaxOptions:
+ {
+ auto ptr = reinterpret_cast<circle::LogSoftmaxOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_CastOptions:
+ {
+ auto ptr = reinterpret_cast<circle::CastOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_DequantizeOptions:
+ {
+ auto ptr = reinterpret_cast<circle::DequantizeOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_MaximumMinimumOptions:
+ {
+ auto ptr = reinterpret_cast<circle::MaximumMinimumOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_ArgMaxOptions:
+ {
+ auto ptr = reinterpret_cast<circle::ArgMaxOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_LessOptions:
+ {
+ auto ptr = reinterpret_cast<circle::LessOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_NegOptions:
+ {
+ auto ptr = reinterpret_cast<circle::NegOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_PadV2Options:
+ {
+ auto ptr = reinterpret_cast<circle::PadV2OptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_GreaterOptions:
+ {
+ auto ptr = reinterpret_cast<circle::GreaterOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_GreaterEqualOptions:
+ {
+ auto ptr = reinterpret_cast<circle::GreaterEqualOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_LessEqualOptions:
+ {
+ auto ptr = reinterpret_cast<circle::LessEqualOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_SelectOptions:
+ {
+ auto ptr = reinterpret_cast<circle::SelectOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_SliceOptions:
+ {
+ auto ptr = reinterpret_cast<circle::SliceOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_TransposeConvOptions:
+ {
+ auto ptr = reinterpret_cast<circle::TransposeConvOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_SparseToDenseOptions:
+ {
+ auto ptr = reinterpret_cast<circle::SparseToDenseOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_TileOptions:
+ {
+ auto ptr = reinterpret_cast<circle::TileOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_ExpandDimsOptions:
+ {
+ auto ptr = reinterpret_cast<circle::ExpandDimsOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_EqualOptions:
+ {
+ auto ptr = reinterpret_cast<circle::EqualOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_NotEqualOptions:
+ {
+ auto ptr = reinterpret_cast<circle::NotEqualOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_ShapeOptions:
+ {
+ auto ptr = reinterpret_cast<circle::ShapeOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_PowOptions:
+ {
+ auto ptr = reinterpret_cast<circle::PowOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_ArgMinOptions:
+ {
+ auto ptr = reinterpret_cast<circle::ArgMinOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_FakeQuantOptions:
+ {
+ auto ptr = reinterpret_cast<circle::FakeQuantOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_PackOptions:
+ {
+ auto ptr = reinterpret_cast<circle::PackOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_LogicalOrOptions:
+ {
+ auto ptr = reinterpret_cast<circle::LogicalOrOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_OneHotOptions:
+ {
+ auto ptr = reinterpret_cast<circle::OneHotOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_LogicalAndOptions:
+ {
+ auto ptr = reinterpret_cast<circle::LogicalAndOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_LogicalNotOptions:
+ {
+ auto ptr = reinterpret_cast<circle::LogicalNotOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_UnpackOptions:
+ {
+ auto ptr = reinterpret_cast<circle::UnpackOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_FloorDivOptions:
+ {
+ auto ptr = reinterpret_cast<circle::FloorDivOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_SquareOptions:
+ {
+ auto ptr = reinterpret_cast<circle::SquareOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_ZerosLikeOptions:
+ {
+ auto ptr = reinterpret_cast<circle::ZerosLikeOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_FillOptions:
+ {
+ auto ptr = reinterpret_cast<circle::FillOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_BidirectionalSequenceLSTMOptions:
+ {
+ auto ptr = reinterpret_cast<circle::BidirectionalSequenceLSTMOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_BidirectionalSequenceRNNOptions:
+ {
+ auto ptr = reinterpret_cast<circle::BidirectionalSequenceRNNOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_UnidirectionalSequenceLSTMOptions:
+ {
+ auto ptr = reinterpret_cast<circle::UnidirectionalSequenceLSTMOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_FloorModOptions:
+ {
+ auto ptr = reinterpret_cast<circle::FloorModOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_RangeOptions:
+ {
+ auto ptr = reinterpret_cast<circle::RangeOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_ResizeNearestNeighborOptions:
+ {
+ auto ptr = reinterpret_cast<circle::ResizeNearestNeighborOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_LeakyReluOptions:
+ {
+ auto ptr = reinterpret_cast<circle::LeakyReluOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_SquaredDifferenceOptions:
+ {
+ auto ptr = reinterpret_cast<circle::SquaredDifferenceOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_MirrorPadOptions:
+ {
+ auto ptr = reinterpret_cast<circle::MirrorPadOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_AbsOptions:
+ {
+ auto ptr = reinterpret_cast<circle::AbsOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_SplitVOptions:
+ {
+ auto ptr = reinterpret_cast<circle::SplitVOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_UniqueOptions:
+ {
+ auto ptr = reinterpret_cast<circle::UniqueOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_ReverseV2Options:
+ {
+ auto ptr = reinterpret_cast<circle::ReverseV2OptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_AddNOptions:
+ {
+ auto ptr = reinterpret_cast<circle::AddNOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_GatherNdOptions:
+ {
+ auto ptr = reinterpret_cast<circle::GatherNdOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_CosOptions:
+ {
+ auto ptr = reinterpret_cast<circle::CosOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_WhereOptions:
+ {
+ auto ptr = reinterpret_cast<circle::WhereOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_RankOptions:
+ {
+ auto ptr = reinterpret_cast<circle::RankOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_ReverseSequenceOptions:
+ {
+ auto ptr = reinterpret_cast<circle::ReverseSequenceOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_MatrixDiagOptions:
+ {
+ auto ptr = reinterpret_cast<circle::MatrixDiagOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_QuantizeOptions:
+ {
+ auto ptr = reinterpret_cast<circle::QuantizeOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_MatrixSetDiagOptions:
+ {
+ auto ptr = reinterpret_cast<circle::MatrixSetDiagOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_HardSwishOptions:
+ {
+ auto ptr = reinterpret_cast<circle::HardSwishOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_IfOptions:
+ {
+ auto ptr = reinterpret_cast<circle::IfOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_WhileOptions:
+ {
+ auto ptr = reinterpret_cast<circle::WhileOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_DepthToSpaceOptions:
+ {
+ auto ptr = reinterpret_cast<circle::DepthToSpaceOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_NonMaxSuppressionV4Options:
+ {
+ auto ptr = reinterpret_cast<circle::NonMaxSuppressionV4OptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_NonMaxSuppressionV5Options:
+ {
+ auto ptr = reinterpret_cast<circle::NonMaxSuppressionV5OptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_ScatterNdOptions:
+ {
+ auto ptr = reinterpret_cast<circle::ScatterNdOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_SelectV2Options:
+ {
+ auto ptr = reinterpret_cast<circle::SelectV2OptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_DensifyOptions:
+ {
+ auto ptr = reinterpret_cast<circle::DensifyOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_SegmentSumOptions:
+ {
+ auto ptr = reinterpret_cast<circle::SegmentSumOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_BatchMatMulOptions:
+ {
+ auto ptr = reinterpret_cast<circle::BatchMatMulOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_CumsumOptions:
+ {
+ auto ptr = reinterpret_cast<circle::CumsumOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_CallOnceOptions:
+ {
+ auto ptr = reinterpret_cast<circle::CallOnceOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_BroadcastToOptions:
+ {
+ auto ptr = reinterpret_cast<circle::BroadcastToOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_Rfft2dOptions:
+ {
+ auto ptr = reinterpret_cast<circle::Rfft2dOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_Conv3DOptions:
+ {
+ auto ptr = reinterpret_cast<circle::Conv3DOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_HashtableOptions:
+ {
+ auto ptr = reinterpret_cast<circle::HashtableOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_HashtableFindOptions:
+ {
+ auto ptr = reinterpret_cast<circle::HashtableFindOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_HashtableImportOptions:
+ {
+ auto ptr = reinterpret_cast<circle::HashtableImportOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_HashtableSizeOptions:
+ {
+ auto ptr = reinterpret_cast<circle::HashtableSizeOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_VarHandleOptions:
+ {
+ auto ptr = reinterpret_cast<circle::VarHandleOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_ReadVariableOptions:
+ {
+ auto ptr = reinterpret_cast<circle::ReadVariableOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_AssignVariableOptions:
+ {
+ auto ptr = reinterpret_cast<circle::AssignVariableOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_RandomOptions:
+ {
+ auto ptr = reinterpret_cast<circle::RandomOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_BCQGatherOptions:
+ {
+ auto ptr = reinterpret_cast<circle::BCQGatherOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_BCQFullyConnectedOptions:
+ {
+ auto ptr = reinterpret_cast<circle::BCQFullyConnectedOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_InstanceNormOptions:
+ {
+ auto ptr = reinterpret_cast<circle::InstanceNormOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ default:
+ break;
+ }
+ value = nullptr;
+ type = BuiltinOptions_NONE;
+}
+
+inline const circle::Model *GetModel(const void *buf)
+{
+ return flatbuffers::GetRoot<circle::Model>(buf);
+}
+
+inline const circle::Model *GetSizePrefixedModel(const void *buf)
+{
+ return flatbuffers::GetSizePrefixedRoot<circle::Model>(buf);
+}
+
+inline const char *ModelIdentifier() { return "CIR0"; }
+
+inline bool ModelBufferHasIdentifier(const void *buf)
+{
+ return flatbuffers::BufferHasIdentifier(buf, ModelIdentifier());
+}
+
+inline bool VerifyModelBuffer(flatbuffers::Verifier &verifier)
+{
+ return verifier.VerifyBuffer<circle::Model>(ModelIdentifier());
+}
+
+inline bool VerifySizePrefixedModelBuffer(flatbuffers::Verifier &verifier)
+{
+ return verifier.VerifySizePrefixedBuffer<circle::Model>(ModelIdentifier());
+}
+
+inline const char *ModelExtension() { return "circle"; }
+
+inline void FinishModelBuffer(flatbuffers::FlatBufferBuilder &fbb,
+ flatbuffers::Offset<circle::Model> root)
+{
+ fbb.Finish(root, ModelIdentifier());
+}
+
+inline void FinishSizePrefixedModelBuffer(flatbuffers::FlatBufferBuilder &fbb,
+ flatbuffers::Offset<circle::Model> root)
+{
+ fbb.FinishSizePrefixed(root, ModelIdentifier());
+}
+
+inline std::unique_ptr<circle::ModelT>
+UnPackModel(const void *buf, const flatbuffers::resolver_function_t *res = nullptr)
+{
+ return std::unique_ptr<circle::ModelT>(GetModel(buf)->UnPack(res));
+}
+
+inline std::unique_ptr<circle::ModelT>
+UnPackSizePrefixedModel(const void *buf, const flatbuffers::resolver_function_t *res = nullptr)
+{
+ return std::unique_ptr<circle::ModelT>(GetSizePrefixedModel(buf)->UnPack(res));
+}
+
+} // namespace circle
+
+#endif // FLATBUFFERS_GENERATED_SCHEMA_CIRCLE_H_