summaryrefslogtreecommitdiff
path: root/src/inference_engine_tflite_private.h
blob: d4915007a49a0cda4a869da8378a5e57930617a4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
/**
 * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef __INFERENCE_ENGINE_IMPL_TFLite_H__
#define __INFERENCE_ENGINE_IMPL_TFLite_H__

#include <inference_engine_common.h>

#include "tensorflow2/lite/delegates/gpu/delegate.h"
#include "tensorflow2/lite/kernels/register.h"
#include "tensorflow2/lite/model.h"
#include "tensorflow2/lite/optional_debug_tools.h"

#include <memory>
#include <dlog.h>
/**
* @file inference_engine_tflite_private.h
* @brief This file contains the InferenceTFLite class which
*        provide Tensorflow-lite based inference functionality
*/

#ifdef LOG_TAG
#undef LOG_TAG
#endif

#define LOG_TAG "INFERENCE_ENGINE_TFLITE"

using namespace InferenceEngineInterface::Common;

namespace InferenceEngineImpl
{
namespace TFLiteImpl
{
	class InferenceTFLite : public IInferenceEngineCommon
	{
	public:
		InferenceTFLite();
		~InferenceTFLite();

		int SetPrivateData(void *data) override;

		int SetTargetDevices(int types) override;

		int SetCLTuner(const inference_engine_cltuner *cltuner) final;

		int Load(std::vector<std::string> model_paths,
				 inference_model_format_e model_format) override;

		int GetInputTensorBuffers(
				std::map<std::string, inference_engine_tensor_buffer> &buffers) override;

		int GetOutputTensorBuffers(
				std::map<std::string, inference_engine_tensor_buffer> &buffers) override;

		int GetInputLayerProperty(
				inference_engine_layer_property &property) override;

		int GetOutputLayerProperty(
				inference_engine_layer_property &property) override;

		int SetInputLayerProperty(
				inference_engine_layer_property &property) override;

		int SetOutputLayerProperty(
				inference_engine_layer_property &property) override;

		int GetBackendCapacity(inference_engine_capacity *capacity) override;

		int Run(std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
				std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
				override;

	private:
		int SetInterpreterInfo();
		void FillLayerId(std::map<std::string, int>& layerId,
				std::map<std::string, inference_engine_tensor_info>& layers,
				const std::vector<int>& buffer);
		int FillLayer(std::map<std::string, inference_engine_tensor_info>& layers,
				std::map<std::string, int>& layerId);

		std::unique_ptr<tflite::Interpreter> mInterpreter;
		std::unique_ptr<tflite::FlatBufferModel> mFlatBuffModel;
		std::vector<void *> mInputData;

		std::map<std::string, inference_engine_tensor_info> mInputLayers;
		std::map<std::string, inference_engine_tensor_info> mOutputLayers;

		std::map<std::string, int> mInputLayerId;
		std::map<std::string, int> mOutputLayerId;

		std::string mConfigFile;
		std::string mWeightFile;
		int mTargetTypes;
	};

} /* InferenceEngineImpl */
} /* TFLiteImpl */

#endif /* __INFERENCE_ENGINE_IMPL_TFLite_H__ */