summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
author오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>2018-11-08 16:12:55 +0900
committer이춘석/동작제어Lab(SR)/Staff Engineer/삼성전자 <chunseok.lee@samsung.com>2018-11-08 16:12:55 +0900
commit19bb8e8e56b2775f5e77d0b562d0b08322635c74 (patch)
treea2c86bd2389f28ca782c504f5cdf9699f74d7608 /include
parentd01c9dc2320bdc1c218843e38e3e53f72b64142c (diff)
downloadnnfw-19bb8e8e56b2775f5e77d0b562d0b08322635c74.tar.gz
nnfw-19bb8e8e56b2775f5e77d0b562d0b08322635c74.tar.bz2
nnfw-19bb8e8e56b2775f5e77d0b562d0b08322635c74.zip
Update tensorflow to v1.12 (#3486)
* Update external package - Update external package version - Update cmake to build tflite - Add gitignore for new external code * Update library to support tflite - Update nnapi delegate - Update NeuralNetworksShim.h for new API - Fix cmake for new files in updated tensorflow - Workaround: Introduce OBS_BUILD flag to avoid gbs build fail * Update neurun CPU kernel - Update neurun CPU kernel to call updated tflite interpreter kernel - Add build option -fPIC for updated tflite kernel Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
Diffstat (limited to 'include')
-rw-r--r--include/NeuralNetworksShim.h33
-rw-r--r--include/support/tflite/nnapi_delegate.h13
2 files changed, 44 insertions, 2 deletions
diff --git a/include/NeuralNetworksShim.h b/include/NeuralNetworksShim.h
index a7bd745fb..b310a44cd 100644
--- a/include/NeuralNetworksShim.h
+++ b/include/NeuralNetworksShim.h
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-// NOTE This header is derived from the following file (in TensorFlow)
+// NOTE This header is derived from the following file (in TensorFlow v1.12)
// 'externals/tensorflow/tensorflow/contrib/lite/nnapi/NeuralNetworksShim.h'
#ifndef __NEURAL_NETWORKS_SHIM__
#define __NEURAL_NETWORKS_SHIM__
@@ -68,6 +68,9 @@ typedef int (*ANeuralNetworksModel_identifyInputsAndOutputs_fn)(
ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs,
uint32_t outputCount, const uint32_t* outputs);
+typedef int (*ANeuralNetworksModel_relaxComputationFloat32toFloat16_fn)(
+ ANeuralNetworksModel* model, bool allow);
+
typedef int (*ANeuralNetworksExecution_create_fn)(
ANeuralNetworksCompilation* compilation,
ANeuralNetworksExecution** execution);
@@ -360,6 +363,34 @@ inline int ANeuralNetworksModel_identifyInputsAndOutputs(
}
/**
+ * Specifies whether {@link ANEURALNETWORKS_TENSOR_FLOAT32} is allowed to be
+ * calculated with range and/or precision as low as that of the IEEE 754 16-bit
+ * floating-point format. By default, {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * must be calculated using at least the range and precision of the IEEE 754
+ * 32-bit floating-point format.
+ *
+ * @param model The model to be modified.
+ * @param allow 'true' indicates {@link ANEURALNETWORKS_TENSOR_FLOAT32} may be
+ * calculated with range and/or precision as low as that of the
+ * IEEE 754 16-bit floating point format. 'false' indicates
+ * {@link ANEURALNETWORKS_TENSOR_FLOAT32} must be calculated using
+ * at least the range and precision of the IEEE 754 32-bit floating
+ * point format.
+ *
+ * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
+ * been called will return an error.
+ *
+ * Available since API level 28.
+ *
+ * See {@link ANeuralNetworksModel} for information on multithreaded usage.
+ */
+inline int ANeuralNetworksModel_relaxComputationFloat32toFloat16(
+ ANeuralNetworksModel* model, bool allow) {
+ LOAD_FUNCTION(ANeuralNetworksModel_relaxComputationFloat32toFloat16);
+ EXECUTE_FUNCTION_RETURN(model, allow);
+}
+
+/**
* Create a {@link ANeuralNetworksCompilation} to compile the given model.
* This only creates the object. Compilation is only performed once
* {@link ANeuralNetworksCompilation_start} is invoked.
diff --git a/include/support/tflite/nnapi_delegate.h b/include/support/tflite/nnapi_delegate.h
index a5da8ac39..b396d77f2 100644
--- a/include/support/tflite/nnapi_delegate.h
+++ b/include/support/tflite/nnapi_delegate.h
@@ -17,18 +17,24 @@ limitations under the License.
// NOTE To minimize diff with upstream tensorflow, disable clang-format
// clang-format off
-// NOTE This header is derived from the following file (in TensorFlow)
+// NOTE This header is derived from the following file (in TensorFlow v1.12)
// 'externals/tensorflow/tensorflow/contrib/lite/nnapi_delegate.h'
#ifndef __NNFW_SUPPORT_TFLITE_NNAPI_DELEGATE_H__
#define __NNFW_SUPPORT_TFLITE_NNAPI_DELEGATE_H__
#include "tensorflow/contrib/lite/allocation.h"
+#ifdef OBS_BUILD
#include "tensorflow/contrib/lite/context.h"
#include "tensorflow/contrib/lite/error_reporter.h"
+#else
+#include "tensorflow/contrib/lite/c/c_api_internal.h"
+#include "tensorflow/contrib/lite/core/api/error_reporter.h"
+#endif
#include "tensorflow/contrib/lite/interpreter.h"
#include "NeuralNetworksShim.h"
class ANeuralNetworksModel;
+class ANeuralNetworksMemory;
class ANeuralNetworksCompilation;
namespace nnfw {
@@ -62,11 +68,16 @@ class NNAPIDelegate {
// Run
TfLiteStatus Invoke(::tflite::Interpreter* interpreter);
+ // Whether the current platform supports NNAPI delegation.
+ static bool IsSupported();
+
private:
// The NN API model handle
ANeuralNetworksModel* nn_model_ = nullptr;
// The NN API compilation handle
ANeuralNetworksCompilation* nn_compiled_model_ = nullptr;
+ // Model status
+ TfLiteStatus model_status_ = kTfLiteOk;
// List of state tensors for LSTM, RNN, SVDF.
// NN API does not allow ops to maintain states across multiple