summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTae-Young Chung <ty83.chung@samsung.com>2019-09-18 17:36:11 +0900
committerTae-Young Chung <ty83.chung@samsung.com>2019-09-23 15:09:15 +0900
commit9d443679de2152941e805ca04840b58da838ae9c (patch)
tree84758d441e04ec254d2112de54430a934e9f64cc
parent2b34612a686057e0b80e3ffd17cc7f36020355f7 (diff)
downloadinference-engine-tflite-9d443679de2152941e805ca04840b58da838ae9c.tar.gz
inference-engine-tflite-9d443679de2152941e805ca04840b58da838ae9c.tar.bz2
inference-engine-tflite-9d443679de2152941e805ca04840b58da838ae9c.zip
inference-engine-tflite is a plugin to provide inference only. Thus, domain specific functions such as vision should be removed. Instead, add apis GetInputLayerAttrType(), SetInputDataBuffer(), and GetInputDataPtr() which can be used to access to memory. Change-Id: I408a95c86bc2477465e5a08dab192bb6f3813ad1 Signed-off-by: Tae-Young Chung <ty83.chung@samsung.com>
-rw-r--r--CMakeLists.txt14
-rw-r--r--packaging/inference-engine-tflite.spec4
-rw-r--r--src/inference_engine_tflite.cpp344
-rw-r--r--src/inference_engine_tflite_private.h58
4 files changed, 36 insertions, 384 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 31dd196..49e6c16 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -8,7 +8,7 @@ SET(CMAKE_INSTALL_PREFIX /usr)
SET(PREFIX ${CMAKE_INSTALL_PREFIX})
SET(INC_DIR "${PROJECT_SOURCE_DIR}/include")
-SET(dependents "dlog inference-engine-interface-vision inference-engine-interface-common tensorflow-lite")
+SET(dependents "dlog inference-engine-interface-common tensorflow-lite")
INCLUDE(FindPkgConfig)
pkg_check_modules(${fw_name} REQUIRED ${dependents})
@@ -23,16 +23,6 @@ ENDFOREACH(flag)
#Remove leading whitespace POLICY CMP0004
STRING(REGEX REPLACE "^ " "" EXTRA_LDFLAGS ${EXTRA_LDFLAGS})
-#OpenCV
-FIND_PACKAGE(OpenCV REQUIRED core imgproc)
-if(NOT OpenCV_FOUND)
- MESSAGE(SEND_ERROR "OpenCV NOT FOUND")
- RETURN()
-else()
- INCLUDE_DIRECTORIES(${OpenCV_INCLUDE_DIRS})
-endif()
-
-
SET(CMAKE_C_FLAGS "-I./include -I./include/headers ${CMAKE_C_FLAGS} ${EXTRA_CFLAGS} -fPIC -Wall -w")
SET(CMAKE_C_FLAGS_DEBUG "-O0 -g")
@@ -47,7 +37,7 @@ SET(CMAKE_EXE_LINKER_FLAGS "-Wl,--as-needed -Wl,--rpath=${LIB_INSTALL_DIR}")
aux_source_directory(src SOURCES)
ADD_LIBRARY(${fw_name} SHARED ${SOURCES})
-TARGET_LINK_LIBRARIES(${fw_name} ${OpenCV_LIBS} ${EXTRA_LDFLAGS})
+TARGET_LINK_LIBRARIES(${fw_name} ${EXTRA_LDFLAGS})
SET_TARGET_PROPERTIES(${fw_name}
diff --git a/packaging/inference-engine-tflite.spec b/packaging/inference-engine-tflite.spec
index 7332c15..5334ab5 100644
--- a/packaging/inference-engine-tflite.spec
+++ b/packaging/inference-engine-tflite.spec
@@ -1,7 +1,7 @@
Name: inference-engine-tflite
Summary: Tensorflow-Lite based implementation of inference-engine-interface
Version: 0.0.1
-Release: 6
+Release: 7
Group: Multimedia/Libraries
License: Apache-2.0
Source0: %{name}-%{version}.tar.gz
@@ -10,9 +10,7 @@ Requires(postun): /sbin/ldconfig
BuildRequires: cmake
BuildRequires: python
BuildRequires: pkgconfig(dlog)
-BuildRequires: pkgconfig(inference-engine-interface-vision)
BuildRequires: pkgconfig(inference-engine-interface-common)
-BuildRequires: pkgconfig(opencv) >= 3.4.1
BuildRequires: tensorflow-lite-devel
%description
diff --git a/src/inference_engine_tflite.cpp b/src/inference_engine_tflite.cpp
index ed611f3..82d702d 100644
--- a/src/inference_engine_tflite.cpp
+++ b/src/inference_engine_tflite.cpp
@@ -29,11 +29,9 @@
namespace InferenceEngineImpl {
namespace TFLiteImpl {
-InferenceTFLite::InferenceTFLite(std::string protoFile, std::string weightFile,
- std::string userFile) :
+InferenceTFLite::InferenceTFLite(std::string protoFile, std::string weightFile) :
mConfigFile(protoFile),
- mWeightFile(weightFile),
- mUserFile(userFile)
+ mWeightFile(weightFile)
{
LOGI("ENTER");
LOGI("LEAVE");
@@ -44,49 +42,11 @@ InferenceTFLite::~InferenceTFLite()
;
}
-int InferenceTFLite::SetUserFile()
-{
- std::ifstream fp(mUserFile.c_str());
- if (!fp.is_open()) {
- return INFERENCE_ENGINE_ERROR_INVALID_PATH;
- }
-
- std::string userListName;
- while (!fp.eof()) {
- std::getline(fp, userListName);
- if (userListName.length())
- SetUserListName(userListName);
- }
-
- fp.close();
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
int InferenceTFLite::SetInputTensorParam()
{
return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
}
-int InferenceTFLite::SetInputTensorParamInput(int width, int height, int dim, int ch)
-{
- mCh = ch;
- mDim = dim;
- mInputSize = cv::Size(width, height);
-
- LOGI("InputSize is %d x %d\n", mInputSize.width, mInputSize.height);
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceTFLite::SetInputTensorParamNorm(double deviation, double mean)
-{
- mDeviation = deviation;
- mMean = mean;
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
int InferenceTFLite::SetInputTensorParamNode(std::string node)
{
mInputLayer = node;
@@ -98,25 +58,6 @@ int InferenceTFLite::SetOutputTensorParam()
return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
}
-int InferenceTFLite::SetOutputTensorParamThresHold(double threshold)
-{
- mThreshold = threshold;
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceTFLite::SetOutputTensorParamNumbers(int number)
-{
- mOutputNumbers = number;
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceTFLite::SetOutputTensorParamType(int type)
-{
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
int InferenceTFLite::SetOutputTensorParamNodes(std::vector<std::string> nodes)
{
mOutputLayer = nodes;
@@ -168,16 +109,6 @@ int InferenceTFLite::Load()
mInterpreter->SetNumThreads(MV_INFERENCE_TFLITE_MAX_THREAD_NUM);
- size_t userFileLength = mUserFile.length();
- if (userFileLength > 0 && access(mUserFile.c_str(), F_OK)) {
- LOGE("userFilePath in [%s] ", mUserFile.c_str());
- return INFERENCE_ENGINE_ERROR_INVALID_PATH;
- }
-
- ret = (userFileLength > 0) ? SetUserFile() : INFERENCE_ENGINE_ERROR_NONE;
- if (ret != INFERENCE_ENGINE_ERROR_NONE)
- LOGW("Fail to read categoryList");
-
// input tensor
if (mInterpreter->inputs().size()) {
mInputLayerId = mInterpreter->inputs()[0];
@@ -240,91 +171,38 @@ int InferenceTFLite::CreateInputLayerPassage()
return INFERENCE_ENGINE_ERROR_NONE;
}
-int InferenceTFLite::PrepareInputLayerPassage(inference_input_type_e type)
+int InferenceTFLite::GetInputLayerAttrType()
{
- switch (type) {
- case INFERENCE_INPUT_GENERAL:
- break;
- case INFERENCE_INPUT_IMAGE:
- {
- if (mInputAttrType == kTfLiteUInt8)
- mMatType = CV_8UC3;
- else if (mInputAttrType == kTfLiteFloat32)
- mMatType = CV_32FC3;
- else
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
-
- mInputBuffer = cv::Mat(mInputSize.height, mInputSize.width, mMatType, mInputData);
- }
- break;
- default:
- LOGE("Not supported");
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
- }
-
- return INFERENCE_ENGINE_ERROR_NONE;
+ return mInputAttrType;
}
-int InferenceTFLite::setInput(cv::Mat cvImg)
+void * InferenceTFLite::GetInputDataPtr()
{
- mSourceSize = cvImg.size();
- int width = mInputSize.width;
- int height = mInputSize.height;
-
- //PreProcess();
- cv::Mat sample;
- if (cvImg.channels() == 3 && mCh == 1)
- cv::cvtColor(cvImg, sample, cv::COLOR_BGR2GRAY);
- else
- sample = cvImg;
-
- // size
- cv::Mat sampleResized;
- if (sample.size() != cv::Size(width, height))
- cv::resize(sample, sampleResized, cv::Size(width, height));
- else
- sampleResized = sample;
-
- // type
- cv::Mat sampleFloat;
- if (mCh == 3)
- sampleResized.convertTo(sampleFloat, CV_32FC3);
- else
- sampleResized.convertTo(sampleFloat, CV_32FC1);
-
- // normalize
- cv::Mat sampleNormalized;
- cv::Mat meanMat;
- if (mCh == 3)
- meanMat = cv::Mat(sampleFloat.size(), CV_32FC3, cv::Scalar((float)mMean, (float)mMean, (float)mMean));
- else
- meanMat = cv::Mat(sampleFloat.size(), CV_32FC1, cv::Scalar((float)mMean));
-
- cv::subtract(sampleFloat, meanMat, sampleNormalized);
-
- sampleNormalized /= (float)mDeviation;
-
- sampleNormalized.convertTo(mInputBuffer, mMatType);
+ return mInputData;
+}
+int InferenceTFLite::SetInputDataBuffer(tensor_t data)
+{
return INFERENCE_ENGINE_ERROR_NONE;
}
-int InferenceTFLite::Run(cv::Mat tensor)
+int InferenceTFLite::Run()
{
- int ret = setInput(tensor);
-
+ LOGI("ENTER");
TfLiteStatus status = mInterpreter->Invoke();
if (status != kTfLiteOk) {
LOGE("Fail to invoke with kTfLiteError");
- ret = INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
- return ret;
+ LOGI("LEAVE");
+ return INFERENCE_ENGINE_ERROR_NONE;
}
int InferenceTFLite::Run(std::vector<float> tensor)
{
+ LOGI("ENTER");
int dataIdx = 0;
float * inputData = static_cast<float*>(mInputData);
for( std::vector<float>::iterator iter = tensor.begin();
@@ -340,168 +218,14 @@ int InferenceTFLite::Run(std::vector<float> tensor)
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
+ LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
}
-int InferenceTFLite::GetInferenceResult(ImageClassificationResults& results)
-{
- // Will contain top N results in ascending order.
- std::vector<std::pair<float, int>> top_results;
- std::priority_queue<std::pair<float, int>,
- std::vector<std::pair<float, int>>,
- std::greater<std::pair<float, int>>> top_result_pq;
- float value;
-
- TfLiteIntArray* dims = mInterpreter->tensor(mOutputLayerId[0])->dims;
- const long count = dims->data[1];
-
- LOGI("dims size: %d", dims->size);
- for (int k = 0; k < dims->size; ++k) {
- LOGI("%d: %d", k, dims->data[k]);
- }
-
- float *prediction = mInterpreter->typed_output_tensor<float>(0);
- for (int i = 0; i < count; ++i) {
- value = prediction[i];
- // Only add it if it beats the threshold and has a chance at being in
- // the top N.
- top_result_pq.push(std::pair<float, int>(value, i));
-
- // If at capacity, kick the smallest value out.
- if (top_result_pq.size() > mOutputNumbers) {
- top_result_pq.pop();
- }
- }
-
- // Copy to output vector and reverse into descending order.
- while (!top_result_pq.empty()) {
- top_results.push_back(top_result_pq.top());
- top_result_pq.pop();
- }
- std::reverse(top_results.begin(), top_results.end());
-
- int classIdx = -1;
- results.number_of_classes = 0;
- for (int idx = 0; idx < mOutputNumbers; ++idx) {
- if (top_results[idx].first < mThreshold)
- continue;
- LOGI("idx:%d", idx);
- LOGI("classIdx: %d", top_results[idx].second);
- LOGI("classProb: %f", top_results[idx].first);
-
- classIdx = top_results[idx].second;
- results.indices.push_back(classIdx);
- results.confidences.push_back(top_results[idx].first);
- results.names.push_back(mUserListName[classIdx]);
- results.number_of_classes++;
- }
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceTFLite::GetInferenceResult(ObjectDetectionResults& results)
-{
- float* boxes = mInterpreter->typed_tensor<float>(mOutputLayerId[0]);
- float* classes = mInterpreter->typed_tensor<float>(mOutputLayerId[1]);
- float* scores = mInterpreter->typed_tensor<float>(mOutputLayerId[2]);
-
- int number_of_detections = (int)(*mInterpreter->typed_tensor<float>(mOutputLayerId[3]));
- int left, top, right, bottom;
- cv::Rect loc;
-
- results.number_of_objects = 0;
- for (int idx = 0; idx < number_of_detections; ++idx) {
- if (scores[idx] < mThreshold)
- continue;
-
- left = (int)(boxes[idx*4 + 1] * mSourceSize.width);
- top = (int)(boxes[idx*4 + 0] * mSourceSize.height);
- right = (int)(boxes[idx*4 + 3] * mSourceSize.width);
- bottom = (int)(boxes[idx*4 + 2] * mSourceSize.height);
-
- loc.x = left;
- loc.y = top;
- loc.width = right -left + 1;
- loc.height = bottom - top + 1;
-
- results.indices.push_back((int)classes[idx]);
- results.confidences.push_back(scores[idx]);
- results.names.push_back(mUserListName[(int)classes[idx]]);
- results.locations.push_back(loc);
- results.number_of_objects++;
-
- LOGI("objectClass: %d", (int)classes[idx]);
- LOGI("confidence:%f", scores[idx]);
- LOGI("left:%d, top:%d, right:%d, bottom:%d", left, top, right, bottom);
- }
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceTFLite::GetInferenceResult(FaceDetectionResults& results)
-{
- float* boxes = mInterpreter->typed_tensor<float>(mOutputLayerId[0]);
- float* classes = mInterpreter->typed_tensor<float>(mOutputLayerId[1]);
- float* scores = mInterpreter->typed_tensor<float>(mOutputLayerId[2]);
-
- int number_of_detections = (int)(*mInterpreter->typed_tensor<float>(mOutputLayerId[3]));
- int left, top, right, bottom;
- cv::Rect loc;
-
- results.number_of_faces = 0;
- for (int idx = 0; idx < number_of_detections; ++idx) {
- if (scores[idx] < mThreshold)
- continue;
-
- left = (int)(boxes[idx*4 + 1] * mSourceSize.width);
- top = (int)(boxes[idx*4 + 0] * mSourceSize.height);
- right = (int)(boxes[idx*4 + 3] * mSourceSize.width);
- bottom = (int)(boxes[idx*4 + 2] * mSourceSize.height);
-
- loc.x = left;
- loc.y = top;
- loc.width = right -left + 1;
- loc.height = bottom - top + 1;
-
- results.confidences.push_back(scores[idx]);
- results.locations.push_back(loc);
- results.number_of_faces++;
-
- LOGI("confidence:%f", scores[idx]);
- LOGI("class: %f", classes[idx]);
- LOGI("left:%f, top:%f, right:%f, bottom:%f", boxes[idx*4 + 1], boxes[idx*4 + 0], boxes[idx*4 + 3], boxes[idx*4 + 2]);
- LOGI("left:%d, top:%d, right:%d, bottom:%d", left, top, right, bottom);
- }
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceTFLite::GetInferenceResult(FacialLandMarkDetectionResults& results)
+int InferenceTFLite::GetInferenceResult(tensor_t& results)
{
- TfLiteIntArray* dims = mInterpreter->tensor(mOutputLayerId[0])->dims;
- const long number_of_detections = dims->data[1];
- float* loc = mInterpreter->typed_tensor<float>(mOutputLayerId[0]);
-
- cv::Point point(0,0);
- results.number_of_landmarks = 0;
- LOGI("imgW:%d, imgH:%d", mSourceSize.width, mSourceSize.height);
- for (int idx = 0; idx < number_of_detections; idx+=2) {
- point.x = (int)(loc[idx] * mSourceSize.width);
- point.y = (int)(loc[idx+1] * mSourceSize.height);
-
- results.locations.push_back(point);
- results.number_of_landmarks++;
-
- LOGI("x:%d, y:%d", point.x, point.y);
- }
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
+ LOGI("ENTER");
-int InferenceTFLite::GetInferenceResult(std::vector<std::vector<int>>& dimInfo, std::vector<float*>& results)
-{
- dimInfo.clear();
- results.clear();
TfLiteIntArray* dims = NULL;
std::vector<int> tmpDimInfo;
@@ -513,41 +237,19 @@ int InferenceTFLite::GetInferenceResult(std::vector<std::vector<int>>& dimInfo,
tmpDimInfo.push_back(dims->data[d]);
}
- dimInfo.push_back(tmpDimInfo);
- results.push_back(mInterpreter->typed_tensor<float>(mOutputLayerId[idx]));
+ results.dimInfo.push_back(tmpDimInfo);
+ results.data.push_back((void*)mInterpreter->typed_tensor<float>(mOutputLayerId[idx]));
}
+ LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
}
-int InferenceTFLite::GetNumberOfOutputs()
-{
- return mOutputNumbers;
-}
-
-void InferenceTFLite::SetUserListName(std::string userlist)
-{
- mUserListName.push_back(userlist);
-}
-
extern "C"
{
-class IInferenceEngineVision* EngineVisionInit(std::string protoFile, std::string weightFile,
- std::string userFile)
-{
- InferenceTFLite *engine = new InferenceTFLite(protoFile, weightFile, userFile);
- return engine;
-}
-
-void EngineVisionDestroy(class IInferenceEngineVision *engine)
-{
- delete engine;
-}
-
-class IInferenceEngineCommon* EngineCommonInit(std::string protoFile, std::string weightFile,
- std::string userFile)
+class IInferenceEngineCommon* EngineCommonInit(std::string protoFile, std::string weightFile)
{
- InferenceTFLite *engine = new InferenceTFLite(protoFile, weightFile, userFile);
+ InferenceTFLite *engine = new InferenceTFLite(protoFile, weightFile);
return engine;
}
diff --git a/src/inference_engine_tflite_private.h b/src/inference_engine_tflite_private.h
index 9de8c5b..2c388f7 100644
--- a/src/inference_engine_tflite_private.h
+++ b/src/inference_engine_tflite_private.h
@@ -17,15 +17,13 @@
#ifndef __INFERENCE_ENGINE_IMPL_TFLite_H__
#define __INFERENCE_ENGINE_IMPL_TFLite_H__
-#include <inference_engine_vision.h>
+#include <inference_engine_common.h>
#include "tensorflow/contrib/lite/string.h"
#include "tensorflow/contrib/lite/kernels/register.h"
#include "tensorflow/contrib/lite/model.h"
#include "tensorflow/contrib/lite/context.h"
-#include <opencv2/core.hpp>
-#include <opencv2/imgproc.hpp>
#include <memory>
#include <dlog.h>
@@ -41,17 +39,15 @@
#define LOG_TAG "INFERENCE_ENGINE_TFLITE"
-using namespace InferenceEngineInterface::Vision;
using namespace InferenceEngineInterface::Common;
namespace InferenceEngineImpl {
namespace TFLiteImpl {
-class InferenceTFLite : public IInferenceEngineVision {
+class InferenceTFLite : public IInferenceEngineCommon {
public:
InferenceTFLite(std::string protoFile,
- std::string weightFile,
- std::string userFile);
+ std::string weightFile);
~InferenceTFLite();
@@ -60,19 +56,9 @@ public:
int SetInputTensorParamNode(std::string node = "input") override;
- int SetInputTensorParamInput(int width, int height, int dim, int ch) override;
-
- int SetInputTensorParamNorm(double deviation = 1.0, double mean = 0.0) override;
-
// Output Tensor Params
int SetOutputTensorParam() override;
- int SetOutputTensorParamThresHold(double threshold) override;
-
- int SetOutputTensorParamNumbers(int number) override;
-
- int SetOutputTensorParamType(int type) override;
-
int SetOutputTensorParamNodes(std::vector<std::string> nodes) override;
int SetTargetDevice(inference_target_type_e type) override;
@@ -82,29 +68,18 @@ public:
int CreateInputLayerPassage() override;
- int PrepareInputLayerPassage(inference_input_type_e type) override;
-
- int Run(cv::Mat tensor) override;
-
- int Run(std::vector<float> tensor) override;
+ int GetInputLayerAttrType() override;
- int GetInferenceResult(ImageClassificationResults& results);
+ void * GetInputDataPtr() override;
- int GetInferenceResult(ObjectDetectionResults& results);
+ int SetInputDataBuffer(tensor_t data) override;
- int GetInferenceResult(FaceDetectionResults& results);
+ int Run() override;
- int GetInferenceResult(FacialLandMarkDetectionResults& results);
-
- int GetInferenceResult(std::vector<std::vector<int>>& dimInfo, std::vector<float*>& results);
-
- int GetNumberOfOutputs() override;
+ int Run(std::vector<float> tensor) override;
- void SetUserListName(std::string userList) override;
+ int GetInferenceResult(tensor_t& results);
-public:
- int SetUserFile();
- int setInput(cv::Mat cvImg);
private:
std::unique_ptr<tflite::Interpreter> mInterpreter;
@@ -115,26 +90,13 @@ private:
int mInputLayerId;
std::vector<int> mOutputLayerId;
- int mMatType;
+
TfLiteType mInputAttrType;
void *mInputData;
- cv::Mat mInputBuffer;
-
- int mCh;
- int mDim;
- cv::Size mInputSize;
-
- double mDeviation;
- double mMean;
- double mThreshold;
- int mOutputNumbers;
- cv::Size mSourceSize;
std::string mConfigFile;
std::string mWeightFile;
- std::string mUserFile;
- std::vector<std::string> mUserListName;
};
} /* InferenceEngineImpl */