1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
|
/*
* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "InferenceInterface.h"
using namespace tflite;
using namespace tflite::ops::builtin;
InferenceInterface::InferenceInterface(const std::string &model_file, const bool use_nnapi)
: _interpreter(nullptr), _model(nullptr), _sess(nullptr)
{
// Load model
StderrReporter error_reporter;
_model = FlatBufferModel::BuildFromFile(model_file.c_str(), &error_reporter);
BuiltinOpResolver resolver;
InterpreterBuilder builder(*_model, resolver);
builder(&_interpreter);
if (use_nnapi)
{
_sess = std::make_shared<nnfw::tflite::NNAPISession>(_interpreter.get());
}
else
{
_sess = std::make_shared<nnfw::tflite::InterpreterSession>(_interpreter.get());
}
_sess->prepare();
}
InferenceInterface::~InferenceInterface() { _sess->teardown(); }
void InferenceInterface::feed(const std::string &input_name, const std::vector<float> &data,
const int batch, const int height, const int width, const int channel)
{
// Set input tensor
for (const auto &id : _interpreter->inputs())
{
if (_interpreter->tensor(id)->name == input_name)
{
assert(_interpreter->tensor(id)->type == kTfLiteFloat32);
float *p = _interpreter->tensor(id)->data.f;
// TODO consider batch
for (int y = 0; y < height; ++y)
{
for (int x = 0; x < width; ++x)
{
for (int c = 0; c < channel; ++c)
{
*p++ = data[y * width * channel + x * channel + c];
}
}
}
}
}
}
void InferenceInterface::run(const std::string &output_name)
{
// Run model
_sess->run();
}
void InferenceInterface::fetch(const std::string &output_name, std::vector<float> &outputs)
{
// Get output tensor
for (const auto &id : _interpreter->outputs())
{
if (_interpreter->tensor(id)->name == output_name)
{
assert(_interpreter->tensor(id)->type == kTfLiteFloat32);
assert(getTensorSize(output_name) == outputs.capacity());
float *p = _interpreter->tensor(id)->data.f;
outputs.clear();
for (int i = 0; i < outputs.capacity(); ++i)
{
outputs.push_back(p[i]);
}
}
}
}
int InferenceInterface::getTensorSize(const std::string &name)
{
for (const auto &id : _interpreter->outputs())
{
if (_interpreter->tensor(id)->name == name)
{
TfLiteTensor *t = _interpreter->tensor(id);
int v = 1;
for (int i = 0; i < t->dims->size; ++i)
{
v *= t->dims->data[i];
}
return v;
}
}
return -1;
}
|