diff options
Diffstat (limited to 'compiler/tflchef/core/src')
-rw-r--r-- | compiler/tflchef/core/src/Convert.cpp | 2 | ||||
-rw-r--r-- | compiler/tflchef/core/src/CustomOp/Erf.cpp | 58 | ||||
-rw-r--r-- | compiler/tflchef/core/src/CustomOp/Erf.h | 49 | ||||
-rw-r--r-- | compiler/tflchef/core/src/DataChef.def | 3 | ||||
-rw-r--r-- | compiler/tflchef/core/src/ModelChef.cpp | 19 | ||||
-rw-r--r-- | compiler/tflchef/core/src/Op/Gelu.cpp | 34 | ||||
-rw-r--r-- | compiler/tflchef/core/src/Op/Gelu.h | 46 | ||||
-rw-r--r-- | compiler/tflchef/core/src/Op/HardSwish.cpp | 27 | ||||
-rw-r--r-- | compiler/tflchef/core/src/Op/HardSwish.h | 46 | ||||
-rw-r--r-- | compiler/tflchef/core/src/Op/TransposeConv.cpp | 7 | ||||
-rw-r--r-- | compiler/tflchef/core/src/OpChef.def | 3 | ||||
-rw-r--r-- | compiler/tflchef/core/src/OpChefs.h | 3 |
12 files changed, 294 insertions, 3 deletions
diff --git a/compiler/tflchef/core/src/Convert.cpp b/compiler/tflchef/core/src/Convert.cpp index f4dd4b332..d1babf09a 100644 --- a/compiler/tflchef/core/src/Convert.cpp +++ b/compiler/tflchef/core/src/Convert.cpp @@ -77,6 +77,8 @@ tflite::TensorType as_tflite_tensortype(const tflchef::TensorType &value) return tflite::TensorType_BOOL; case tflchef::INT16: return tflite::TensorType_INT16; + case tflchef::INT8: + return tflite::TensorType_INT8; default: break; } diff --git a/compiler/tflchef/core/src/CustomOp/Erf.cpp b/compiler/tflchef/core/src/CustomOp/Erf.cpp new file mode 100644 index 000000000..f611b68e1 --- /dev/null +++ b/compiler/tflchef/core/src/CustomOp/Erf.cpp @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2015 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Erf.h" + +#include <flatbuffers/flexbuffers.h> + +flatbuffers::Offset<void> ErfChef::value(flatbuffers::FlatBufferBuilder &fbb) const +{ + return flatbuffers::Offset<void>(); +} + +flatbuffers::Offset<flatbuffers::Vector<uint8_t>> +ErfChef::custom_value(flatbuffers::FlatBufferBuilder &fbb) const +{ + auto &operation = (*_operation); + + assert(operation.type() == "Erf"); + + /** + * REGISTER_OP("Erf") + .Input("x: T") + .Output("y: T") + .Attr("T: {bfloat16, half, float, double}") + .SetShapeFn(shape_inference::UnchangedShape) + */ + + auto flex_buffers = std::make_unique<flexbuffers::Builder>(); + size_t map_start = flex_buffers->StartMap(); + + // TODO Support more data types + flex_buffers->Int("T", tflite::TensorType_FLOAT32); + + flex_buffers->EndMap(map_start); + flex_buffers->Finish(); + + auto circle_custom_options = fbb.CreateVector(flex_buffers->GetBuffer()); + return circle_custom_options; +} + +std::unique_ptr<OpChef> ErfChefFactory::create(const tflchef::Operation *operation) const +{ + return std::unique_ptr<OpChef>{new ErfChef{operation}}; +} diff --git a/compiler/tflchef/core/src/CustomOp/Erf.h b/compiler/tflchef/core/src/CustomOp/Erf.h new file mode 100644 index 000000000..192c5f334 --- /dev/null +++ b/compiler/tflchef/core/src/CustomOp/Erf.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __OP_ERF_H__ +#define __OP_ERF_H__ + +#include "OpChef.h" + +class ErfChef final : public OpChef +{ +public: + explicit ErfChef(const tflchef::Operation *operation) : _operation{operation} + { + // DO NOTHING + } + +public: + tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_CUSTOM; } + + tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_NONE; } + + flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override; + + flatbuffers::Offset<flatbuffers::Vector<uint8_t>> + custom_value(flatbuffers::FlatBufferBuilder &fbb) const override; + +private: + const tflchef::Operation *_operation; +}; + +struct ErfChefFactory final : public OpChefFactory +{ + std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override; +}; + +#endif // __OP_ERF_H__ diff --git a/compiler/tflchef/core/src/DataChef.def b/compiler/tflchef/core/src/DataChef.def index 28a5b7617..abe642645 100644 --- a/compiler/tflchef/core/src/DataChef.def +++ b/compiler/tflchef/core/src/DataChef.def @@ -7,12 +7,14 @@ DATA_CHEF(FLOAT32, constant, ConstantDataChefFactory<float>) DATA_CHEF(BOOL, constant, ConstantDataChefFactory<bool>) DATA_CHEF(UINT8, constant, ConstantDataChefFactory<uint8_t>) +DATA_CHEF(INT8, constant, ConstantDataChefFactory<int8_t>) DATA_CHEF(INT16, constant, ConstantDataChefFactory<int16_t>) DATA_CHEF(INT32, constant, ConstantDataChefFactory<int32_t>) DATA_CHEF(INT64, constant, ConstantDataChefFactory<int64_t>) DATA_CHEF(INT64, explicit, ExplicitDataChefFactory<int64_t>) DATA_CHEF(INT32, explicit, ExplicitDataChefFactory<int32_t>) DATA_CHEF(INT16, explicit, ExplicitDataChefFactory<int16_t>) +DATA_CHEF(INT8, explicit, ExplicitDataChefFactory<int8_t>) DATA_CHEF(UINT8, explicit, ExplicitDataChefFactory<uint8_t>) DATA_CHEF(BOOL, explicit, ExplicitDataChefFactory<bool>) DATA_CHEF(FLOAT32, explicit, ExplicitDataChefFactory<float>) @@ -20,6 +22,7 @@ DATA_CHEF(STRING, explicit, ExplicitDataChefFactory<std::string>) DATA_CHEF(FLOAT32, gaussian, GaussianFloat32DataChefFactory) DATA_CHEF(INT32, gaussian, GaussianInt32DataChefFactory) DATA_CHEF(INT16, gaussian, GaussianInt16DataChefFactory) +DATA_CHEF(INT8, gaussian, GaussianInt8DataChefFactory) DATA_CHEF(UINT8, gaussian, GaussianUint8DataChefFactory) // FLOAT16 support for only gaussian, explicit for now diff --git a/compiler/tflchef/core/src/ModelChef.cpp b/compiler/tflchef/core/src/ModelChef.cpp index a788adc02..3afcd232d 100644 --- a/compiler/tflchef/core/src/ModelChef.cpp +++ b/compiler/tflchef/core/src/ModelChef.cpp @@ -93,6 +93,7 @@ DataChefRegistry &data_chef_registry(const tflchef::TensorType &type) static DataChefRegistry boolean; static DataChefRegistry s16; static DataChefRegistry fp16; + static DataChefRegistry s8; switch (type) { @@ -112,6 +113,8 @@ DataChefRegistry &data_chef_registry(const tflchef::TensorType &type) return boolean; case tflchef::INT16: return s16; + case tflchef::INT8: + return s8; default: break; } @@ -734,9 +737,19 @@ GeneratedModel cook(const ::tflchef::ModelRecipe &model_recipe) for (auto const &opcode : builtin_code_map) { tflite::OperatorCodeBuilder code_builder{*flatbuffer_builder}; - // TODO support for opcode.first >= 127 - assert(opcode.first < 127); - code_builder.add_deprecated_builtin_code(opcode.first); + // 127 is BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES + // This is the way to handle deprecated builtin code + // See + // https://github.com/tensorflow/tensorflow/blob/a0afe8f9218be5eb9ed5dffc2dff652996da8c28/tensorflow/lite/schema/schema.fbs#L1061-L1077 + if (opcode.first < 127) + { + code_builder.add_deprecated_builtin_code(opcode.first); + } + else + { + code_builder.add_deprecated_builtin_code( + ::tflite::BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES); + } code_builder.add_version(opcode.second); code_builder.add_builtin_code(opcode.first); auto code = code_builder.Finish(); diff --git a/compiler/tflchef/core/src/Op/Gelu.cpp b/compiler/tflchef/core/src/Op/Gelu.cpp new file mode 100644 index 000000000..91d2bb36c --- /dev/null +++ b/compiler/tflchef/core/src/Op/Gelu.cpp @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Gelu.h" + +flatbuffers::Offset<void> GeluChef::value(flatbuffers::FlatBufferBuilder &fbb) const +{ + assert(_operation->has_gelu_options()); + + const auto &options = _operation->gelu_options(); + + tflite::GeluOptionsBuilder options_builder{fbb}; + options_builder.add_approximate(options.approximate()); + + return options_builder.Finish().Union(); +} + +std::unique_ptr<OpChef> GeluChefFactory::create(const tflchef::Operation *operation) const +{ + return std::unique_ptr<OpChef>{new GeluChef{operation}}; +} diff --git a/compiler/tflchef/core/src/Op/Gelu.h b/compiler/tflchef/core/src/Op/Gelu.h new file mode 100644 index 000000000..64d9361e6 --- /dev/null +++ b/compiler/tflchef/core/src/Op/Gelu.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __OP_GELU_H__ +#define __OP_GELU_H__ + +#include "OpChef.h" + +class GeluChef final : public OpChef +{ +public: + explicit GeluChef(const tflchef::Operation *operation) : _operation{operation} + { + // DO NOTHING + } + +public: + tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_GELU; } + + tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_GeluOptions; } + + flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override; + +private: + const tflchef::Operation *_operation; +}; + +struct GeluChefFactory final : public OpChefFactory +{ + std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override; +}; + +#endif // __OP_GELU_H__ diff --git a/compiler/tflchef/core/src/Op/HardSwish.cpp b/compiler/tflchef/core/src/Op/HardSwish.cpp new file mode 100644 index 000000000..27ab8b5ab --- /dev/null +++ b/compiler/tflchef/core/src/Op/HardSwish.cpp @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "HardSwish.h" + +flatbuffers::Offset<void> HardSwishChef::value(flatbuffers::FlatBufferBuilder &fbb) const +{ + return flatbuffers::Offset<void>(); +} + +std::unique_ptr<OpChef> HardSwishChefFactory::create(const tflchef::Operation *operation) const +{ + return std::unique_ptr<OpChef>{new HardSwishChef{operation}}; +} diff --git a/compiler/tflchef/core/src/Op/HardSwish.h b/compiler/tflchef/core/src/Op/HardSwish.h new file mode 100644 index 000000000..10ed51e61 --- /dev/null +++ b/compiler/tflchef/core/src/Op/HardSwish.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __OP_HARDSWISH_H__ +#define __OP_HARDSWISH_H__ + +#include "OpChef.h" + +class HardSwishChef final : public OpChef +{ +public: + explicit HardSwishChef(const tflchef::Operation *operation) : _operation{operation} + { + // DO NOTHING + } + +public: + tflite::BuiltinOperator code(void) const override { return tflite::BuiltinOperator_HARD_SWISH; } + + tflite::BuiltinOptions type(void) const override { return tflite::BuiltinOptions_NONE; } + + flatbuffers::Offset<void> value(flatbuffers::FlatBufferBuilder &fbb) const override; + +private: + const tflchef::Operation *_operation; +}; + +struct HardSwishChefFactory final : public OpChefFactory +{ + std::unique_ptr<OpChef> create(const tflchef::Operation *operation) const override; +}; + +#endif // __OP_HARDSWISH_H__ diff --git a/compiler/tflchef/core/src/Op/TransposeConv.cpp b/compiler/tflchef/core/src/Op/TransposeConv.cpp index c9e452714..530ebae78 100644 --- a/compiler/tflchef/core/src/Op/TransposeConv.cpp +++ b/compiler/tflchef/core/src/Op/TransposeConv.cpp @@ -34,6 +34,13 @@ flatbuffers::Offset<void> TransposeConvChef::value(flatbuffers::FlatBufferBuilde options_builder.add_stride_h(operation.transpose_conv_options().stride_h()); options_builder.add_stride_w(operation.transpose_conv_options().stride_w()); + // TODO remove calling has_activation + auto chef_activation = operation.transpose_conv_options().has_activation() + ? operation.transpose_conv_options().activation() + : tflchef::NONE; + auto tflite_activation = as_tflite_activation(chef_activation); + options_builder.add_fused_activation_function(tflite_activation); + return options_builder.Finish().Union(); } diff --git a/compiler/tflchef/core/src/OpChef.def b/compiler/tflchef/core/src/OpChef.def index c19d00dfb..9a2164640 100644 --- a/compiler/tflchef/core/src/OpChef.def +++ b/compiler/tflchef/core/src/OpChef.def @@ -35,8 +35,10 @@ OP_CHEF(FloorMod, FloorModChefFactory) OP_CHEF(FullyConnected, FullyConnectedChefFactory) OP_CHEF(Gather, GatherChefFactory) OP_CHEF(GatherNd, GatherNdChefFactory) +OP_CHEF(Gelu, GeluChefFactory) OP_CHEF(Greater, GreaterChefFactory) OP_CHEF(GreaterEqual, GreaterEqualChefFactory) +OP_CHEF(HardSwish, HardSwishChefFactory) OP_CHEF(If, IfChefFactory) OP_CHEF(L2Normalize, L2NormalizeChefFactory) OP_CHEF(L2Pool2D, L2Pool2DChefFactory) @@ -123,6 +125,7 @@ OP_CHEF(AddV2, AddV2ChefFactory) OP_CHEF(All, AllChefFactory) OP_CHEF(BatchMatMulV2, BatchMatMulV2ChefFactory) OP_CHEF(BroadcastTo, BroadcastToChefFactory) +OP_CHEF(Erf, ErfChefFactory) OP_CHEF(MatMul, MatMulChefFactory) OP_CHEF(MatrixBandPart, MatrixBandPartChefFactory) OP_CHEF(MaxPoolWithArgmax, MaxPoolWithArgmaxChefFactory) diff --git a/compiler/tflchef/core/src/OpChefs.h b/compiler/tflchef/core/src/OpChefs.h index 3cd3be558..ba2b17571 100644 --- a/compiler/tflchef/core/src/OpChefs.h +++ b/compiler/tflchef/core/src/OpChefs.h @@ -48,8 +48,10 @@ #include "Op/FullyConnected.h" #include "Op/Gather.h" #include "Op/GatherNd.h" +#include "Op/Gelu.h" #include "Op/Greater.h" #include "Op/GreaterEqual.h" +#include "Op/HardSwish.h" #include "Op/If.h" #include "Op/L2Normalize.h" #include "Op/L2Pool2D.h" @@ -135,6 +137,7 @@ #include "CustomOp/All.h" #include "CustomOp/BatchMatMulV2.h" #include "CustomOp/BroadcastTo.h" +#include "CustomOp/Erf.h" #include "CustomOp/MatMul.h" #include "CustomOp/MatrixBandPart.h" #include "CustomOp/MaxPoolWithArgmax.h" |