diff options
author | Hyunsoo Park <hance.park@samsung.com> | 2021-10-25 15:31:04 +0900 |
---|---|---|
committer | Hyunsoo Park <hance.park@samsung.com> | 2021-10-25 16:48:22 +0900 |
commit | 4a8997a298a3b2f601c566cb815ddc129828492e (patch) | |
tree | 212351414455b7fbc0060e62bb56177859fba5e5 | |
parent | 8a9f3702a23dea5b2daeada684cc48f8792a4b3a (diff) | |
download | inference-engine-armnn-tizen_7.0_hotfix.tar.gz inference-engine-armnn-tizen_7.0_hotfix.tar.bz2 inference-engine-armnn-tizen_7.0_hotfix.zip |
Excludes tuning level when READ mode is settizen_7.0_m2_releasesubmit/tizen/20220105.081745submit/tizen/20220105.080154accepted/tizen/unified/20220110.140027accepted/tizen/7.0/unified/hotfix/20221116.105353accepted/tizen/7.0/unified/20221110.060517tizen_develtizen_7.0_hotfixtizen_7.0accepted/tizen_7.0_unified_hotfixaccepted/tizen_7.0_unified
Change-Id: I33210956a9af5ef94614a582eda945dcab350724
Signed-off-by: Hyunsoo Park <hance.park@samsung.com>
-rw-r--r-- | packaging/inference-engine-armnn.spec | 2 | ||||
-rw-r--r-- | src/inference_engine_armnn.cpp | 28 |
2 files changed, 20 insertions, 10 deletions
diff --git a/packaging/inference-engine-armnn.spec b/packaging/inference-engine-armnn.spec index 83e7a44..7347c76 100644 --- a/packaging/inference-engine-armnn.spec +++ b/packaging/inference-engine-armnn.spec @@ -1,7 +1,7 @@ Name: inference-engine-armnn Summary: ARM Neural Network Runtime based implementation of inference-engine-interface Version: 0.0.1 -Release: 3 +Release: 4 Group: Multimedia/Libraries License: Apache-2.0 ExclusiveArch: %{arm} aarch64 diff --git a/src/inference_engine_armnn.cpp b/src/inference_engine_armnn.cpp index 29ba8c4..5409b44 100644 --- a/src/inference_engine_armnn.cpp +++ b/src/inference_engine_armnn.cpp @@ -281,26 +281,36 @@ namespace ARMNNImpl LOGI("CLTuner tuning file name is %s", tune_path.c_str()); - // If CLTuner is read only mode then set INFERENCE_ENGINE_CLTUNER_READ - // to TuningLevel. - // Ps. if TuningLevel is INFERENCE_ENGINE_CLTUNER_READ then + // If CLTuner is read only mode then skip to set TuningLevel + // if user skipped "TuningLevel" options, + // then "TuningLevel::None" will be used as tuningLevel internally in armnn. + // Ps. if TuningLevel is "TuningLevel::None" then // ARMCL will read a tuned file for inference. - if (mCLTuner.update == false) { - LOGI("CLTuner mode is read only."); - mCLTuner.tuning_mode = INFERENCE_ENGINE_CLTUNER_READ; - } - creation_options.m_BackendOptions.emplace_back( armnn::BackendOptions { "GpuAcc", { {"FastMathEnabled", true}, - {"TuningLevel", static_cast<int>(ConvertTuningType(mCLTuner.tuning_mode))}, {"TuningFile", tune_path.c_str()} } } ); + + // If TuningLevel is set, + // ARMCL will generate a tuned file as specified by user. + if (mCLTuner.update) { + LOGI("CLTuner tuning mode is set %d", mCLTuner.tuning_mode); + creation_options.m_BackendOptions.emplace_back( + armnn::BackendOptions + { + "GpuAcc", + { + {"TuningLevel", static_cast<int>(ConvertTuningType(mCLTuner.tuning_mode))} + } + } + ); + } } else { creation_options.m_BackendOptions.emplace_back( armnn::BackendOptions |