summaryrefslogtreecommitdiff
path: root/include/NeuralNetworksShim.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/NeuralNetworksShim.h')
-rw-r--r--include/NeuralNetworksShim.h36
1 files changed, 35 insertions, 1 deletions
diff --git a/include/NeuralNetworksShim.h b/include/NeuralNetworksShim.h
index a7bd745fb..60b16f766 100644
--- a/include/NeuralNetworksShim.h
+++ b/include/NeuralNetworksShim.h
@@ -14,7 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-// NOTE This header is derived from the following file (in TensorFlow)
+// NOTE To minimize diff with upstream tensorflow, disable clang-format
+// clang-format off
+
+// NOTE This header is derived from part of the following file (in TensorFlow v1.12)
// 'externals/tensorflow/tensorflow/contrib/lite/nnapi/NeuralNetworksShim.h'
#ifndef __NEURAL_NETWORKS_SHIM__
#define __NEURAL_NETWORKS_SHIM__
@@ -68,6 +71,9 @@ typedef int (*ANeuralNetworksModel_identifyInputsAndOutputs_fn)(
ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs,
uint32_t outputCount, const uint32_t* outputs);
+typedef int (*ANeuralNetworksModel_relaxComputationFloat32toFloat16_fn)(
+ ANeuralNetworksModel* model, bool allow);
+
typedef int (*ANeuralNetworksExecution_create_fn)(
ANeuralNetworksCompilation* compilation,
ANeuralNetworksExecution** execution);
@@ -360,6 +366,34 @@ inline int ANeuralNetworksModel_identifyInputsAndOutputs(
}
/**
+ * Specifies whether {@link ANEURALNETWORKS_TENSOR_FLOAT32} is allowed to be
+ * calculated with range and/or precision as low as that of the IEEE 754 16-bit
+ * floating-point format. By default, {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * must be calculated using at least the range and precision of the IEEE 754
+ * 32-bit floating-point format.
+ *
+ * @param model The model to be modified.
+ * @param allow 'true' indicates {@link ANEURALNETWORKS_TENSOR_FLOAT32} may be
+ * calculated with range and/or precision as low as that of the
+ * IEEE 754 16-bit floating point format. 'false' indicates
+ * {@link ANEURALNETWORKS_TENSOR_FLOAT32} must be calculated using
+ * at least the range and precision of the IEEE 754 32-bit floating
+ * point format.
+ *
+ * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
+ * been called will return an error.
+ *
+ * Available since API level 28.
+ *
+ * See {@link ANeuralNetworksModel} for information on multithreaded usage.
+ */
+inline int ANeuralNetworksModel_relaxComputationFloat32toFloat16(
+ ANeuralNetworksModel* model, bool allow) {
+ LOAD_FUNCTION(ANeuralNetworksModel_relaxComputationFloat32toFloat16);
+ EXECUTE_FUNCTION_RETURN(model, allow);
+}
+
+/**
* Create a {@link ANeuralNetworksCompilation} to compile the given model.
* This only creates the object. Compilation is only performed once
* {@link ANeuralNetworksCompilation_start} is invoked.