summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorInki Dae <inki.dae@samsung.com>2022-12-29 12:47:40 +0900
committerInki Dae <inki.dae@samsung.com>2023-02-01 15:39:53 +0900
commitfff95beb44ab99482fd4e11a72ba2ddf230a78de (patch)
tree969dea49dc89b8c5a7e3f3bfa1e214d626f79481
parent2f53bb1ddc32c9813d00552aef8c602434aad66a (diff)
downloadinference-engine-tflite-sandbox/inki.dae/xnnpack_support.tar.gz
inference-engine-tflite-sandbox/inki.dae/xnnpack_support.tar.bz2
inference-engine-tflite-sandbox/inki.dae/xnnpack_support.zip
add XNNPACK acceleration supportsandbox/inki.dae/xnnpack_support
Change-Id: I4c635c7cf9c3e3a4715e46f81c114a4458aa6d6f Signed-off-by: Inki Dae <inki.dae@samsung.com>
-rw-r--r--CMakeLists.txt6
-rw-r--r--packaging/inference-engine-tflite.spec5
-rw-r--r--src/inference_engine_tflite.cpp38
-rw-r--r--src/inference_engine_tflite_private.h1
4 files changed, 29 insertions, 21 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8c520ef..4236f65 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -39,7 +39,6 @@ ADD_LIBRARY(${fw_name} SHARED ${SOURCES})
TARGET_LINK_LIBRARIES(${fw_name} ${EXTRA_LDFLAGS})
-
SET_TARGET_PROPERTIES(${fw_name}
PROPERTIES
CLEAN_DIRECT_OUTPUT 1
@@ -53,6 +52,11 @@ INSTALL(
PATTERN "*.h"
)
+IF (${USE_XNNPACK})
+ message("Use XNNPACK acceleration.")
+ ADD_DEFINITIONS(-DUSE_XNNPACK)
+ENDIF()
+
SET(PC_NAME ${fw_name})
SET(PC_REQUIRED ${pc_dependents})
SET(PC_LDFLAGS -l${fw_name})
diff --git a/packaging/inference-engine-tflite.spec b/packaging/inference-engine-tflite.spec
index 566eae8..478bedf 100644
--- a/packaging/inference-engine-tflite.spec
+++ b/packaging/inference-engine-tflite.spec
@@ -14,11 +14,12 @@ BuildRequires: pkgconfig(inference-engine-interface-common)
BuildRequires: coregl-devel
BuildRequires: flatbuffers-devel
BuildRequires: tensorflow2-lite-devel
+# if you want to use XNNPACK acceleration then set USE_XNNPACK to 1.
+%define build_options -DUSE_XNNPACK=1
%description
Tensorflow-Lite based implementation of inference-engine-interface
-
%prep
%setup -q
@@ -29,7 +30,7 @@ export CXXFLAGS="$CXXFLAGS -DTIZEN_DEBUG_ENABLE"
export FFLAGS="$FFLAGS -DTIZEN_DEBUG_ENABLE"
%endif
-%cmake .
+%cmake %{build_options} .
make %{?jobs:-j%jobs}
diff --git a/src/inference_engine_tflite.cpp b/src/inference_engine_tflite.cpp
index 183fc3e..4351462 100644
--- a/src/inference_engine_tflite.cpp
+++ b/src/inference_engine_tflite.cpp
@@ -34,6 +34,11 @@ namespace TFLiteImpl
InferenceTFLite::InferenceTFLite(void) : mTargetTypes(INFERENCE_TARGET_NONE)
{
LOGI("ENTER");
+
+#if USE_XNNPACK
+ _use_xnnpack = true;
+#endif
+
LOGI("LEAVE");
}
@@ -116,48 +121,45 @@ namespace TFLiteImpl
LOGI("Inferece targets are: [%d]", mTargetTypes);
+ TfLiteDelegate *delegate = NULL;
+
if (mTargetTypes == INFERENCE_TARGET_GPU) {
TfLiteGpuDelegateOptionsV2 options = TfLiteGpuDelegateOptionsV2Default();
- TfLiteDelegate *delegate = TfLiteGpuDelegateV2Create(&options);
+
+ delegate = TfLiteGpuDelegateV2Create(&options);
if (!delegate){
LOGE("Failed to GPU delegate");
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
-
- if (mInterpreter->ModifyGraphWithDelegate(delegate) != kTfLiteOk)
- {
- LOGE("Failed to construct GPU delegate");
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
- } else {
+ } else if (_use_xnnpack) {
LOGI("Use XNNPACK.");
- TfLiteDelegate *delegate = NULL;
int num_threads = std::thread::hardware_concurrency();
char *env_tflite_num_threads = getenv("FORCE_TFLITE_NUM_THREADS");
- if (env_tflite_num_threads)
- {
+ if (env_tflite_num_threads) {
num_threads = atoi(env_tflite_num_threads);
- LOGI("@@@@@@ FORCE_TFLITE_NUM_THREADS(XNNPACK)=%d", num_threads);
+ LOGI("FORCE_TFLITE_NUM_THREADS for NNPACK = %d", num_threads);
}
// IMPORTANT: initialize options with TfLiteXNNPackDelegateOptionsDefault() for
// API-compatibility with future extensions of the TfLiteXNNPackDelegateOptions
// structure.
TfLiteXNNPackDelegateOptions xnnpack_options = TfLiteXNNPackDelegateOptionsDefault();
+
xnnpack_options.num_threads = num_threads;
- delegate = TfLiteXNNPackDelegateCreate (&xnnpack_options);
+ delegate = TfLiteXNNPackDelegateCreate(&xnnpack_options);
if (!delegate) {
- LOGE("ERR: %s(%d)", __FILE__, __LINE__);
- }
-
- if (mInterpreter->ModifyGraphWithDelegate(delegate) != kTfLiteOk) {
- LOGE("Failed to construct GPU delegate");
+ LOGE("Fail to create XNNPACK Delegate.");
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
}
+ if (mInterpreter->ModifyGraphWithDelegate(delegate) != kTfLiteOk) {
+ LOGE("Failed to construct a delegate");
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
mInterpreter->SetNumThreads(MV_INFERENCE_TFLITE_MAX_THREAD_NUM);
LOGI("mInterpreter->tensors_size() :[%zu]",
mInterpreter->tensors_size());
diff --git a/src/inference_engine_tflite_private.h b/src/inference_engine_tflite_private.h
index 250f192..1defb31 100644
--- a/src/inference_engine_tflite_private.h
+++ b/src/inference_engine_tflite_private.h
@@ -105,6 +105,7 @@ namespace TFLiteImpl
std::string mConfigFile;
std::string mWeightFile;
int mTargetTypes;
+ bool _use_xnnpack;
};
} /* InferenceEngineImpl */