summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/inference_engine_tflite.cpp44
-rw-r--r--src/inference_engine_tflite_private.h2
2 files changed, 46 insertions, 0 deletions
diff --git a/src/inference_engine_tflite.cpp b/src/inference_engine_tflite.cpp
index edb27c2..5c3eae9 100644
--- a/src/inference_engine_tflite.cpp
+++ b/src/inference_engine_tflite.cpp
@@ -30,6 +30,8 @@ namespace InferenceEngineImpl
{
namespace TFLiteImpl
{
+ static unsigned int dummy_buffer;
+
InferenceTFLite::InferenceTFLite()
{
LOGI("ENTER");
@@ -40,6 +42,8 @@ namespace TFLiteImpl
{
if (mDelegate)
TfLiteGpuDelegateV2Delete(mDelegate);
+
+ _constTensorIdx.clear();
}
int InferenceTFLite::SetPrivateData(void *data)
@@ -181,6 +185,13 @@ namespace TFLiteImpl
return INFERENCE_ENGINE_ERROR_NONE;
}
+ void InferenceTFLite::addConstTensorIdx(inference_engine_tensor_buffer &tensor_buffer, const std::string &layerName)
+ {
+ tensor_buffer.buffer = static_cast<void *>(&dummy_buffer);
+ _constTensorIdx.insert(std::make_pair(mInterpreter->tensor(mOutputLayerId[layerName])->name,
+ mOutputLayerId[layerName]));
+ }
+
int InferenceTFLite::GetOutputTensorBuffers(
std::map<std::string, inference_engine_tensor_buffer> &buffers)
{
@@ -190,6 +201,8 @@ namespace TFLiteImpl
SetInterpreterInfo();
}
+ _constTensorIdx.clear();
+
for (auto& layer : mOutputLayers) {
inference_engine_tensor_buffer buffer;
size_t size = 1;
@@ -202,16 +215,28 @@ namespace TFLiteImpl
LOGI("type is kTfLiteUInt8");
pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[layer.first]));
buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 };
+
+ if (pBuff == nullptr && size == 1)
+ addConstTensorIdx(buffer, layer.first);
+
break;
case kTfLiteInt64:
LOGI("type is kTfLiteInt64");
pBuff = static_cast<void *>(mInterpreter->typed_tensor<int64_t>(mOutputLayerId[layer.first]));
buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_INT64, size * 8, 1};
+
+ if (pBuff == nullptr && size == 1)
+ addConstTensorIdx(buffer, layer.first);
+
break;
case kTfLiteFloat32:
LOGI("type is kTfLiteFloat32");
pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mOutputLayerId[layer.first]));
buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1 };
+
+ if (pBuff == nullptr && size == 1)
+ addConstTensorIdx(buffer, layer.first);
+
break;
default:
LOGE("Not supported");
@@ -353,6 +378,25 @@ namespace TFLiteImpl
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
+ // If output tensor is const type then set the const buffer because the const buffer is allocated after invoke.
+ if (!_constTensorIdx.empty()) {
+ for (auto &m : _constTensorIdx) {
+ auto &dstTensor = output_buffers[m.first.c_str()];
+
+ if (dstTensor.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
+ dstTensor.buffer = mInterpreter->typed_tensor<uint8_t>(m.second);
+ }
+ if (dstTensor.data_type == INFERENCE_TENSOR_DATA_TYPE_INT64) {
+ dstTensor.buffer = mInterpreter->typed_tensor<uint64_t>(m.second);
+ }
+ if (dstTensor.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
+ dstTensor.buffer = mInterpreter->typed_tensor<float>(m.second);
+ }
+ }
+
+ _constTensorIdx.clear();
+ }
+
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
}
diff --git a/src/inference_engine_tflite_private.h b/src/inference_engine_tflite_private.h
index 8009022..4ab5d66 100644
--- a/src/inference_engine_tflite_private.h
+++ b/src/inference_engine_tflite_private.h
@@ -90,12 +90,14 @@ namespace TFLiteImpl
const std::vector<int>& buffer);
int FillLayer(std::map<std::string, inference_engine_tensor_info>& layers,
std::map<std::string, int>& layerId);
+ void addConstTensorIdx(inference_engine_tensor_buffer &tensor_buffer, const std::string &layerName);
std::unique_ptr<tflite::Interpreter> mInterpreter;
std::unique_ptr<tflite::FlatBufferModel> mFlatBuffModel;
std::map<std::string, inference_engine_tensor_info> mInputLayers;
std::map<std::string, inference_engine_tensor_info> mOutputLayers;
+ std::map<std::string, int> _constTensorIdx;
std::map<std::string, int> mInputLayerId;
std::map<std::string, int> mOutputLayerId;