From a3c56bd968f6242eb4604651b464daeb1ae7fb95 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=EC=98=A4=ED=98=95=EC=84=9D/On-Device=20Lab=28SR=29/Staff?=
=?UTF-8?q?=20Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?=
It is also the application's responsibility to ensure that there are no - * other uses of the model after calling {@link ANeuralNetworksModel_free}. - * This includes any compilation, execution object or burst object created using - * the model.
+ *It is also the application's responsibility to ensure that there are no other + * uses of the model after calling {@link ANeuralNetworksModel_free}. + * This includes any compilation or execution object created using the model.
* * Available since API level 27. */ @@ -5127,7 +5119,7 @@ typedef struct ANeuralNetworksModel ANeuralNetworksModel; * *It is also the application's responsibility to ensure that there are no other * uses of the compilation after calling {@link ANeuralNetworksCompilation_free}. - * This includes any execution object or burst object created using the compilation.
+ * This includes any execution object created using the compilation. * * Available since API level 27. */ @@ -5162,12 +5154,10 @@ typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation; * ({@link ANeuralNetworksModel_setOperandValueFromMemory}). * *An execution cannot be modified once - * {@link ANeuralNetworksExecution_burstCompute}, * {@link ANeuralNetworksExecution_compute} or * {@link ANeuralNetworksExecution_startCompute} has been called on it.
* *An execution can be applied to a model with - * {@link ANeuralNetworksExecution_burstCompute}, * {@link ANeuralNetworksExecution_compute} or * {@link ANeuralNetworksExecution_startCompute} only once. Create new * executions to do new evaluations of the model.
@@ -5176,29 +5166,20 @@ typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation; * modifies an execution at a given time. It is however safe for more than one * thread to use {@link ANeuralNetworksEvent_wait} at the same time. * - *It is also the application's responsibility to ensure that the execution - * either has never been scheduled or has completed (i.e., that - * {@link ANeuralNetworksExecution_burstCompute}, - * {@link ANeuralNetworksExecution_compute}, or - * {@link ANeuralNetworksEvent_wait} has returned) before calling - * {@link ANeuralNetworksExecution_free}.
. - * *It is also the application's responsibility to ensure that there are no other * uses of the execution after calling {@link ANeuralNetworksExecution_free}.
* *Multiple executions can be scheduled and evaluated concurrently, either by - * means of {@link ANeuralNetworksExecution_compute} or - * {@link ANeuralNetworksExecution_burstCompute} (which are synchronous) in - * different threads, or by means of - * {@link ANeuralNetworksExecution_startCompute} (which is asynchronous). - * (Concurrent uses of {@link ANeuralNetworksExecution_burstCompute} must be on - * different burst objects.) The runtime makes no guarantee on the ordering of - * completion of executions. If it's important to the application, the - * application should enforce the ordering by ensuring that one execution - * completes before the next is scheduled (for example, by scheduling all - * executions synchronously within a single thread, or by scheduling all - * executions asynchronously and using {@link ANeuralNetworksEvent_wait} between - * calls to {@link ANeuralNetworksExecution_startCompute}).
+ * means of {@link ANeuralNetworksExecution_compute} (which is synchronous) in + * different threads or by means of + * {@link ANeuralNetworksExecution_startCompute} (which is asynchronous). The + * runtime makes no guarantee on the ordering of completion of executions. If + * it's important to the application, the application should enforce the + * ordering by ensuring that one execution completes before the next is + * scheduled (for example, by scheduling all executions synchronously within a + * single thread, or by scheduling all executions asynchronously and using + * {@link ANeuralNetworksEvent_wait} between calls to + * {@link ANeuralNetworksExecution_startCompute}). * * Available since API level 27. */ @@ -5521,7 +5502,7 @@ int ANeuralNetworksCompilation_createForDevices(ANeuralNetworksModel* model, * data. It is recommended to use the code cache directory provided * by the Android runtime. If not using the code cache directory, the * user should choose a directory local to the application, and is - * responsible for managing the cache entries. + * responsible to managing the cache entries. * @param token The token provided by the user to specify a model must be of length * ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN. The user should ensure that * the token is unique to a model within the application. The NNAPI @@ -5682,6 +5663,8 @@ int ANeuralNetworksExecution_burstCompute(ANeuralNetworksExecution* execution, * backed by an AHardwareBuffer of a format other than AHARDWAREBUFFER_FORMAT_BLOB is * disallowed. * + * TODO(miaowang): add documentation about intended usage with introspection API. + * * Available since API level 29. * * @param ahwb The AHardwareBuffer handle. @@ -5793,8 +5776,7 @@ int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t * * Available since API level 27. * - * @param memory The memory object to be freed. Passing NULL is acceptable and - * results in no operation. + * @param memory The memory object to be freed. */ void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) __INTRODUCED_IN(27); @@ -5844,8 +5826,8 @@ void ANeuralNetworksModel_free(ANeuralNetworksModel* model) __INTRODUCED_IN(27); * calling {@link ANeuralNetworksCompilation_create} and * {@link ANeuralNetworksCompilation_createForDevices}. * - * An application must ensure that no other thread uses the model at the same - * time. + * An application is responsible to make sure that no other thread uses + * the model at the same time. * * This function must only be called once for a given model. * @@ -5919,13 +5901,11 @@ int ANeuralNetworksModel_addOperand(ANeuralNetworksModel* model, * {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES} * are immediately copied into the model. * - * For values of length greater than - * {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES}, a pointer to - * the buffer is stored within the model. The application must not change the - * content of this region until all executions using this model have - * completed. As the data may be copied during processing, modifying the data - * after this call yields undefined results. The provided buffer must outlive - * this model. + * For values of length greater than {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES}, + * a pointer to the buffer is stored within the model. The application is responsible + * for not changing the content of this region until all executions using this model + * have completed. As the data may be copied during processing, modifying the data + * after this call yields undefined results. * * For large tensors, using {@link ANeuralNetworksModel_setOperandValueFromMemory} * is likely to be more efficient. @@ -5981,12 +5961,10 @@ int ANeuralNetworksModel_setOperandSymmPerChannelQuantParams( * Sets an operand to a value stored in a memory object. * * The content of the memory is not copied. A reference to that memory is stored - * inside the model. The application must not change the content of the memory - * region until all executions using this model have completed. As the data may - * be copied during processing, modifying the data after this call yields - * undefined results. - * - *The provided memory must outlive this model.
+ * inside the model. The application is responsible for not changing the content + * of the memory region until all executions using this model have completed. + * As the data may be copied during processing, modifying the data after this call + * yields undefined results. * * To indicate that an optional operand should be considered missing, * use {@link ANeuralNetworksModel_setOperandValue} instead, passing nullptr for buffer. @@ -6136,7 +6114,7 @@ int ANeuralNetworksCompilation_create(ANeuralNetworksModel* model, * Destroy a compilation. * * The compilation need not have been finished by a call to - * {@link ANeuralNetworksCompilation_finish}. + * {@link ANeuralNetworksModel_finish}. * * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. * @@ -6170,8 +6148,8 @@ int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation* compila * Indicate that we have finished modifying a compilation. Required before * calling {@link ANeuralNetworksExecution_create}. * - * An application must ensure that no other thread uses the compilation at the - * same time. + * An application is responsible to make sure that no other thread uses + * the compilation at the same time. * * This function must only be called once for a given compilation. * @@ -6209,15 +6187,12 @@ int ANeuralNetworksExecution_create(ANeuralNetworksCompilation* compilation, /** * Destroy an execution. * - *The execution need not have been scheduled by a call to - * {@link ANeuralNetworksExecution_burstCompute}, - * {@link ANeuralNetworksExecution_compute}, or - * {@link ANeuralNetworksExecution_startCompute}; but if it has been scheduled, - * then the application must not call {@link ANeuralNetworksExecution_free} - * until the execution has completed (i.e., - * {@link ANeuralNetworksExecution_burstCompute}, - * {@link ANeuralNetworksExecution_compute}, or - * {@link ANeuralNetworksEvent_wait} has returned). + *
If called on an execution for which + * {@link ANeuralNetworksExecution_startCompute} has been called, the + * function will return immediately but will mark the execution to be deleted + * once the computation completes. The related {@link ANeuralNetworksEvent} + * will be signaled and the {@link ANeuralNetworksEvent_wait} will return + * ANEURALNETWORKS_ERROR_DELETED. * * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * @@ -6231,10 +6206,7 @@ void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution) __INTROD /** * Associate a user buffer with an input of the model of the * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have - * been scheduled. Once evaluation of the execution has been scheduled, the - * application must not change the content of the buffer until the execution has - * completed. Evaluation of the execution will not change the content of the - * buffer. + * been scheduled. * *
The provided buffer must outlive the execution.
* @@ -6272,12 +6244,9 @@ int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution* execution, int32 size_t length) __INTRODUCED_IN(27); /** - * Associate a region of a memory object with an input of the model of the + * Associate part of a memory object with an input of the model of the * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have - * been scheduled. Once evaluation of the execution has been scheduled, the - * application must not change the content of the region until the execution has - * completed. Evaluation of the execution will not change the content of the - * region. + * been scheduled. * *The provided memory must outlive the execution.
* @@ -6321,9 +6290,7 @@ int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution* execut /** * Associate a user buffer with an output of the model of the * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have - * been scheduled. Once evaluation of the execution has been scheduled, the - * application must not change the content of the buffer until the execution has - * completed. + * been scheduled. * * If the output is optional, you can indicate that it is omitted by * passing nullptr for buffer and 0 for length. @@ -6366,11 +6333,9 @@ int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution* execution, int3 size_t length) __INTRODUCED_IN(27); /** - * Associate a region of a memory object with an output of the model of the + * Associate part of a memory object with an output of the model of the * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have - * been scheduled. Once evaluation of the execution has been scheduled, the - * application must not change the content of the region until the execution has - * completed. + * been scheduled. * * If the output is optional, you can indicate that it is omitted by * using {@link ANeuralNetworksExecution_setOutput} instead, passing nullptr for @@ -6467,9 +6432,6 @@ int ANeuralNetworksEvent_wait(ANeuralNetworksEvent* event) __INTRODUCED_IN(27); * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * * Available since API level 27. - * - * @param event The event object to be destroyed. Passing NULL is acceptable and - * results in no operation. */ void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event) __INTRODUCED_IN(27); @@ -6477,6 +6439,6 @@ void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event) __INTRODUCED_IN(27); __END_DECLS -#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H +#endif // ANDROID_ML_NN_RUNTIME_NEURAL_NETWORKS_H /** @} */ diff --git a/runtimes/include/NeuralNetworksExtensions.h b/runtimes/include/NeuralNetworksExtensions.h index 429a1dcf8..ca2e04567 100644 --- a/runtimes/include/NeuralNetworksExtensions.h +++ b/runtimes/include/NeuralNetworksExtensions.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_EXTENSIONS_H -#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_EXTENSIONS_H +#ifndef ANDROID_ML_NN_RUNTIME_NEURAL_NETWORKS_EXTENSIONS_H +#define ANDROID_ML_NN_RUNTIME_NEURAL_NETWORKS_EXTENSIONS_H #include "NeuralNetworks.h" @@ -114,4 +114,4 @@ int ANeuralNetworksModel_setOperandExtensionData(ANeuralNetworksModel* model, in __END_DECLS -#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_EXTENSIONS_H +#endif // ANDROID_ML_NN_RUNTIME_NEURAL_NETWORKS_EXTENSIONS_H -- cgit v1.2.3