summaryrefslogtreecommitdiff
path: root/src/inference_engine_tflite.cpp
diff options
context:
space:
mode:
authorInki Dae <inki.dae@samsung.com>2020-11-03 18:03:46 +0900
committerInki Dae <inki.dae@samsung.com>2021-01-05 17:47:42 +0900
commit3b98079db6287cf7320e2ff5bbb8e99499111a81 (patch)
treede553d652d7aa13858bf158280dbefc90c0cff5f /src/inference_engine_tflite.cpp
parent6f1aaf7472a644d72a3e879c23cfe9af80c98ca3 (diff)
downloadinference-engine-tflite-3b98079db6287cf7320e2ff5bbb8e99499111a81.tar.gz
inference-engine-tflite-3b98079db6287cf7320e2ff5bbb8e99499111a81.tar.bz2
inference-engine-tflite-3b98079db6287cf7320e2ff5bbb8e99499111a81.zip
Add tflite 2.3 gpu delegate support
Change-Id: I289316756e3d6a4c266810c57db68a0596b1733b Signed-off-by: Inki Dae <inki.dae@samsung.com>
Diffstat (limited to 'src/inference_engine_tflite.cpp')
-rw-r--r--src/inference_engine_tflite.cpp39
1 files changed, 27 insertions, 12 deletions
diff --git a/src/inference_engine_tflite.cpp b/src/inference_engine_tflite.cpp
index 78e4f64..4c265fd 100644
--- a/src/inference_engine_tflite.cpp
+++ b/src/inference_engine_tflite.cpp
@@ -52,6 +52,21 @@ namespace TFLiteImpl
{
LOGI("ENTER");
+ switch (types) {
+ case INFERENCE_TARGET_CPU:
+ LOGI("Device type is CPU.");
+ break;
+ case INFERENCE_TARGET_GPU:
+ LOGI("Device type is GPU.");
+ break;
+ case INFERENCE_TARGET_CUSTOM:
+ case INFERENCE_TARGET_NONE:
+ default:
+ LOGW("Not supported device type [%d], Set CPU mode",
+ (int) mTargetTypes);
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
mTargetTypes = types;
LOGI("LEAVE");
@@ -89,18 +104,18 @@ namespace TFLiteImpl
LOGI("Inferece targets are: [%d]", mTargetTypes);
- switch (mTargetTypes) {
- case INFERENCE_TARGET_CPU:
- mInterpreter->UseNNAPI(false);
- break;
- case INFERENCE_TARGET_GPU:
- mInterpreter->UseNNAPI(true);
- break;
- case INFERENCE_TARGET_CUSTOM:
- case INFERENCE_TARGET_NONE:
- default:
- LOGW("Not supported device type [%d], Set CPU mode",
- (int) mTargetTypes);
+ if (mTargetTypes == INFERENCE_TARGET_GPU) {
+ TfLiteDelegate *delegate = TfLiteGpuDelegateV2Create(nullptr);
+ if (!delegate){
+ LOGE("Failed to GPU delegate");
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ if (mInterpreter->ModifyGraphWithDelegate(delegate) != kTfLiteOk)
+ {
+ LOGE("Failed to construct GPU delegate");
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
}
mInterpreter->SetNumThreads(MV_INFERENCE_TFLITE_MAX_THREAD_NUM);