summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorgichan2-jang <gichan2.jang@samsung.com>2024-08-21 10:46:51 +0900
committerjaeyun-jung <39614140+jaeyun-jung@users.noreply.github.com>2024-08-26 10:27:04 +0900
commit147937b247a70a839744f033fcb4dc3c52ad6e44 (patch)
tree0278d86e16abc6584db0f76963b0f72ea47ee6eb
parent2d28b105e9c91e3f44fe68ac93a798ebda75d55c (diff)
downloadmachine-learning-147937b247a70a839744f033fcb4dc3c52ad6e44.tar.gz
machine-learning-147937b247a70a839744f033fcb4dc3c52ad6e44.tar.bz2
machine-learning-147937b247a70a839744f033fcb4dc3c52ad6e44.zip
[Refactor] Prepend `_` to internal API name
To distinguish between internal and public APIs, prepend `_` to the internal APIs. Signed-off-by: gichan2-jang <gichan2.jang@samsung.com>
-rw-r--r--c/src/ml-api-service-agent-client.c2
-rw-r--r--c/src/ml-api-service-extension.c16
-rw-r--r--c/src/ml-api-service-extension.h16
-rw-r--r--c/src/ml-api-service-offloading.c36
-rw-r--r--c/src/ml-api-service-offloading.h52
-rw-r--r--c/src/ml-api-service-private.h4
-rw-r--r--c/src/ml-api-service-query-client.c2
-rw-r--r--c/src/ml-api-service-training-offloading.c27
-rw-r--r--c/src/ml-api-service-training-offloading.h34
-rw-r--r--c/src/ml-api-service.c32
-rw-r--r--tests/capi/unittest_capi_service_offloading.cc32
-rw-r--r--tests/capi/unittest_capi_service_training_offloading.cc44
12 files changed, 149 insertions, 148 deletions
diff --git a/c/src/ml-api-service-agent-client.c b/c/src/ml-api-service-agent-client.c
index 1b7e1b0..382214e 100644
--- a/c/src/ml-api-service-agent-client.c
+++ b/c/src/ml-api-service-agent-client.c
@@ -389,7 +389,7 @@ ml_service_pipeline_get_state (ml_service_h handle, ml_pipeline_state_e * state)
* @brief Internal function to release ml-service pipeline data.
*/
int
-ml_service_pipeline_release_internal (ml_service_s * mls)
+_ml_service_pipeline_release_internal (ml_service_s * mls)
{
_ml_service_server_s *server = (_ml_service_server_s *) mls->priv;
int ret;
diff --git a/c/src/ml-api-service-extension.c b/c/src/ml-api-service-extension.c
index 755b9f9..f0532d7 100644
--- a/c/src/ml-api-service-extension.c
+++ b/c/src/ml-api-service-extension.c
@@ -615,7 +615,7 @@ _ml_extension_conf_parse_json (ml_service_s * mls, JsonObject * object)
* @brief Internal function to create ml-service extension.
*/
int
-ml_service_extension_create (ml_service_s * mls, JsonObject * object)
+_ml_service_extension_create (ml_service_s * mls, JsonObject * object)
{
ml_extension_s *ext;
g_autofree gchar *thread_name = g_strdup_printf ("ml-ext-msg-%d", getpid ());
@@ -656,7 +656,7 @@ ml_service_extension_create (ml_service_s * mls, JsonObject * object)
* @brief Internal function to release ml-service extension.
*/
int
-ml_service_extension_destroy (ml_service_s * mls)
+_ml_service_extension_destroy (ml_service_s * mls)
{
ml_extension_s *ext = (ml_extension_s *) mls->priv;
@@ -706,7 +706,7 @@ ml_service_extension_destroy (ml_service_s * mls)
* @brief Internal function to start ml-service extension.
*/
int
-ml_service_extension_start (ml_service_s * mls)
+_ml_service_extension_start (ml_service_s * mls)
{
ml_extension_s *ext = (ml_extension_s *) mls->priv;
int status = ML_ERROR_NONE;
@@ -730,7 +730,7 @@ ml_service_extension_start (ml_service_s * mls)
* @brief Internal function to stop ml-service extension.
*/
int
-ml_service_extension_stop (ml_service_s * mls)
+_ml_service_extension_stop (ml_service_s * mls)
{
ml_extension_s *ext = (ml_extension_s *) mls->priv;
int status = ML_ERROR_NONE;
@@ -754,7 +754,7 @@ ml_service_extension_stop (ml_service_s * mls)
* @brief Internal function to get the information of required input data.
*/
int
-ml_service_extension_get_input_information (ml_service_s * mls,
+_ml_service_extension_get_input_information (ml_service_s * mls,
const char *name, ml_tensors_info_h * info)
{
ml_extension_s *ext = (ml_extension_s *) mls->priv;
@@ -789,7 +789,7 @@ ml_service_extension_get_input_information (ml_service_s * mls,
* @brief Internal function to get the information of output data.
*/
int
-ml_service_extension_get_output_information (ml_service_s * mls,
+_ml_service_extension_get_output_information (ml_service_s * mls,
const char *name, ml_tensors_info_h * info)
{
ml_extension_s *ext = (ml_extension_s *) mls->priv;
@@ -831,7 +831,7 @@ ml_service_extension_get_output_information (ml_service_s * mls,
* @brief Internal function to set the information for ml-service extension.
*/
int
-ml_service_extension_set_information (ml_service_s * mls, const char *name,
+_ml_service_extension_set_information (ml_service_s * mls, const char *name,
const char *value)
{
ml_extension_s *ext = (ml_extension_s *) mls->priv;
@@ -851,7 +851,7 @@ ml_service_extension_set_information (ml_service_s * mls, const char *name,
* @brief Internal function to add an input data to process the model in ml-service extension handle.
*/
int
-ml_service_extension_request (ml_service_s * mls, const char *name,
+_ml_service_extension_request (ml_service_s * mls, const char *name,
const ml_tensors_data_h data)
{
ml_extension_s *ext = (ml_extension_s *) mls->priv;
diff --git a/c/src/ml-api-service-extension.h b/c/src/ml-api-service-extension.h
index 791c94f..c2bc6ff 100644
--- a/c/src/ml-api-service-extension.h
+++ b/c/src/ml-api-service-extension.h
@@ -21,42 +21,42 @@ extern "C" {
/**
* @brief Internal function to create ml-service extension.
*/
-int ml_service_extension_create (ml_service_s *mls, JsonObject *object);
+int _ml_service_extension_create (ml_service_s *mls, JsonObject *object);
/**
* @brief Internal function to release ml-service extension.
*/
-int ml_service_extension_destroy (ml_service_s *mls);
+int _ml_service_extension_destroy (ml_service_s *mls);
/**
* @brief Internal function to start ml-service extension.
*/
-int ml_service_extension_start (ml_service_s *mls);
+int _ml_service_extension_start (ml_service_s *mls);
/**
* @brief Internal function to stop ml-service extension.
*/
-int ml_service_extension_stop (ml_service_s *mls);
+int _ml_service_extension_stop (ml_service_s *mls);
/**
* @brief Internal function to get the information of required input data.
*/
-int ml_service_extension_get_input_information (ml_service_s *mls, const char *name, ml_tensors_info_h *info);
+int _ml_service_extension_get_input_information (ml_service_s *mls, const char *name, ml_tensors_info_h *info);
/**
* @brief Internal function to get the information of output data.
*/
-int ml_service_extension_get_output_information (ml_service_s *mls, const char *name, ml_tensors_info_h *info);
+int _ml_service_extension_get_output_information (ml_service_s *mls, const char *name, ml_tensors_info_h *info);
/**
* @brief Internal function to set the information for ml-service extension.
*/
-int ml_service_extension_set_information (ml_service_s *mls, const char *name, const char *value);
+int _ml_service_extension_set_information (ml_service_s *mls, const char *name, const char *value);
/**
* @brief Internal function to add an input data to process the model in ml-service extension handle.
*/
-int ml_service_extension_request (ml_service_s *mls, const char *name, const ml_tensors_data_h data);
+int _ml_service_extension_request (ml_service_s *mls, const char *name, const ml_tensors_data_h data);
#ifdef __cplusplus
}
diff --git a/c/src/ml-api-service-offloading.c b/c/src/ml-api-service-offloading.c
index dbd8a47..902d0c4 100644
--- a/c/src/ml-api-service-offloading.c
+++ b/c/src/ml-api-service-offloading.c
@@ -385,7 +385,7 @@ _mlrs_process_service_offloading (nns_edge_data_h data_h, void *user_data)
dir_path = _mlrs_get_model_dir_path (offloading_s, service_key);
if (offloading_s->offloading_mode == ML_SERVICE_OFFLOADING_MODE_TRAINING) {
- ret = ml_service_training_offloading_process_received_data (mls, data_h,
+ ret = _ml_service_training_offloading_process_received_data (mls, data_h,
dir_path, data, service_type);
if (NNS_EDGE_ERROR_NONE != ret) {
_ml_error_report_return (ret,
@@ -642,7 +642,7 @@ error:
* @brief Set offloading mode and private data.
*/
int
-ml_service_offloading_set_mode (ml_service_h handle,
+_ml_service_offloading_set_mode (ml_service_h handle,
ml_service_offloading_mode_e mode, void *priv)
{
ml_service_s *mls = (ml_service_s *) handle;
@@ -665,7 +665,7 @@ ml_service_offloading_set_mode (ml_service_h handle,
* @brief Get offloading mode and private data.
*/
int
-ml_service_offloading_get_mode (ml_service_h handle,
+_ml_service_offloading_get_mode (ml_service_h handle,
ml_service_offloading_mode_e * mode, void **priv)
{
ml_service_s *mls = (ml_service_s *) handle;
@@ -693,7 +693,7 @@ ml_service_offloading_get_mode (ml_service_h handle,
* @brief Internal function to release ml-service offloading data.
*/
int
-ml_service_offloading_release_internal (ml_service_s * mls)
+_ml_service_offloading_release_internal (ml_service_s * mls)
{
_ml_service_offloading_s *offloading_s;
@@ -705,10 +705,10 @@ ml_service_offloading_release_internal (ml_service_s * mls)
if (offloading_s->offloading_mode == ML_SERVICE_OFFLOADING_MODE_TRAINING) {
/**
- * 'ml_service_training_offloading_destroy' transfers internally trained models.
+ * '_ml_service_training_offloading_destroy' transfers internally trained models.
* So keep offloading handle.
*/
- if (ML_ERROR_NONE != ml_service_training_offloading_destroy (mls)) {
+ if (ML_ERROR_NONE != _ml_service_training_offloading_destroy (mls)) {
_ml_error_report
("Failed to release ml-service training offloading handle");
}
@@ -740,7 +740,7 @@ ml_service_offloading_release_internal (ml_service_s * mls)
* @brief Set value in ml-service offloading handle.
*/
int
-ml_service_offloading_set_information (ml_service_h handle, const gchar * name,
+_ml_service_offloading_set_information (ml_service_h handle, const gchar * name,
const gchar * value)
{
ml_service_s *mls = (ml_service_s *) handle;
@@ -770,7 +770,7 @@ ml_service_offloading_set_information (ml_service_h handle, const gchar * name,
offloading_s->path = g_strdup (value);
if (offloading_s->offloading_mode == ML_SERVICE_OFFLOADING_MODE_TRAINING) {
- ret = ml_service_training_offloading_set_path (mls, offloading_s->path);
+ ret = _ml_service_training_offloading_set_path (mls, offloading_s->path);
}
}
@@ -877,7 +877,7 @@ _ml_service_offloading_create_from_option (ml_service_s * mls,
}
if (ML_ERROR_NONE == ml_option_get (option, "path", (void **) (&_path))) {
- ret = ml_service_offloading_set_information (mls, "path", _path);
+ ret = _ml_service_offloading_set_information (mls, "path", _path);
if (ML_ERROR_NONE != ret) {
_ml_error_report_return (ret,
"Failed to set path in ml-service offloading handle.");
@@ -946,7 +946,7 @@ _ml_service_offloading_convert_to_option (JsonObject * object,
* @brief Internal function to parse configuration file to create offloading service.
*/
int
-ml_service_offloading_create (ml_service_h handle, JsonObject * object)
+_ml_service_offloading_create (ml_service_h handle, JsonObject * object)
{
ml_service_s *mls = (ml_service_s *) handle;
int status;
@@ -987,7 +987,7 @@ ml_service_offloading_create (ml_service_h handle, JsonObject * object)
}
if (json_object_has_member (offloading, "training")) {
- status = ml_service_training_offloading_create (mls, offloading);
+ status = _ml_service_training_offloading_create (mls, offloading);
if (status != ML_ERROR_NONE) {
_ml_logw ("Failed to parse training from configuration file.");
}
@@ -1004,7 +1004,7 @@ done:
* @brief Internal function to start ml-service offloading.
*/
int
-ml_service_offloading_start (ml_service_h handle)
+_ml_service_offloading_start (ml_service_h handle)
{
ml_service_s *mls = (ml_service_s *) handle;
_ml_service_offloading_s *offloading_s;
@@ -1018,7 +1018,7 @@ ml_service_offloading_start (ml_service_h handle)
offloading_s = (_ml_service_offloading_s *) mls->priv;
if (offloading_s->offloading_mode == ML_SERVICE_OFFLOADING_MODE_TRAINING) {
- ret = ml_service_training_offloading_start (mls);
+ ret = _ml_service_training_offloading_start (mls);
if (ret != ML_ERROR_NONE) {
_ml_error_report ("Failed to start training offloading.");
}
@@ -1031,7 +1031,7 @@ ml_service_offloading_start (ml_service_h handle)
* @brief Internal function to stop ml-service offloading.
*/
int
-ml_service_offloading_stop (ml_service_h handle)
+_ml_service_offloading_stop (ml_service_h handle)
{
ml_service_s *mls = (ml_service_s *) handle;
_ml_service_offloading_s *offloading_s;
@@ -1045,7 +1045,7 @@ ml_service_offloading_stop (ml_service_h handle)
offloading_s = (_ml_service_offloading_s *) mls->priv;
if (offloading_s->offloading_mode == ML_SERVICE_OFFLOADING_MODE_TRAINING) {
- ret = ml_service_training_offloading_stop (mls);
+ ret = _ml_service_training_offloading_stop (mls);
if (ret != ML_ERROR_NONE) {
_ml_error_report ("Failed to stop training offloading.");
}
@@ -1059,7 +1059,7 @@ ml_service_offloading_stop (ml_service_h handle)
* Register new information, such as neural network models or pipeline descriptions, on a offloading server.
*/
int
-ml_service_offloading_request (ml_service_h handle, const char *key,
+_ml_service_offloading_request (ml_service_h handle, const char *key,
const ml_tensors_data_h input)
{
ml_service_s *mls = (ml_service_s *) handle;
@@ -1191,7 +1191,7 @@ done:
* Register new information, such as neural network models or pipeline descriptions, on a offloading server.
*/
int
-ml_service_offloading_request_raw (ml_service_h handle, const char *key,
+_ml_service_offloading_request_raw (ml_service_h handle, const char *key,
void *data, size_t len)
{
ml_tensors_data_s input;
@@ -1201,5 +1201,5 @@ ml_service_offloading_request_raw (ml_service_h handle, const char *key,
input.tensors[0].data = data;
input.tensors[0].size = len;
- return ml_service_offloading_request (handle, key, &input);
+ return _ml_service_offloading_request (handle, key, &input);
}
diff --git a/c/src/ml-api-service-offloading.h b/c/src/ml-api-service-offloading.h
index 220882e..a008370 100644
--- a/c/src/ml-api-service-offloading.h
+++ b/c/src/ml-api-service-offloading.h
@@ -48,7 +48,7 @@ typedef enum
#if defined(ENABLE_SERVICE_OFFLOADING)
/**
- * @brief Parse configuration file and create offloading service.
+ * @brief Internal function to parse configuration file and create offloading service.
* @param[in] handle The handle of ml-service created by ml_service_new().
* @param[in] object The json object from config file.
* @return @c 0 on success. Otherwise a negative error value.
@@ -57,10 +57,10 @@ typedef enum
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
* @retval #ML_ERROR_OUT_OF_MEMORY Failed to allocate required memory.
*/
-int ml_service_offloading_create (ml_service_h handle, JsonObject *object);
+int _ml_service_offloading_create (ml_service_h handle, JsonObject *object);
/**
- * @brief Start ml offloading service.
+ * @brief Internal function to start ml offloading service.
* @param[in] handle ml-service handle created by ml_service_new().
* @return @c 0 on success. Otherwise a negative error value.
* @retval #ML_ERROR_NONE Successful.
@@ -71,10 +71,10 @@ int ml_service_offloading_create (ml_service_h handle, JsonObject *object);
* @retval #ML_ERROR_TRY_AGAIN The pipeline is not ready yet.
* @retval #ML_ERROR_PERMISSION_DENIED The application does not have the privilege to access to the storage.
*/
-int ml_service_offloading_start (ml_service_h handle);
+int _ml_service_offloading_start (ml_service_h handle);
/**
- * @brief Stop ml offloading service.
+ * @brief Internal function to stop ml offloading service.
* @param[in] handle ml-service handle created by ml_service_new().
* @return @c 0 on success. Otherwise a negative error value.
* @retval #ML_ERROR_NONE Successful.
@@ -85,10 +85,10 @@ int ml_service_offloading_start (ml_service_h handle);
* @retval #ML_ERROR_TRY_AGAIN The pipeline is not ready yet.
* @retval #ML_ERROR_PERMISSION_DENIED The application does not have the privilege to access to the storage.
*/
-int ml_service_offloading_stop (ml_service_h handle);
+int _ml_service_offloading_stop (ml_service_h handle);
/**
- * @brief Request service to ml-service offloading.
+ * @brief Internal function to request service to ml-service offloading.
* @param[in] handle The handle of ml-service.
* @param[in] key The key of machine learning service.
* @param[in] input The data to be registered on the offloading server.
@@ -97,10 +97,10 @@ int ml_service_offloading_stop (ml_service_h handle);
* @retval #ML_ERROR_NOT_SUPPORTED Not supported.
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_service_offloading_request (ml_service_h handle, const char *key, const ml_tensors_data_h input);
+int _ml_service_offloading_request (ml_service_h handle, const char *key, const ml_tensors_data_h input);
/**
- * @brief Request service to ml-service offloading.
+ * @brief Internal function to request service to ml-service offloading.
* @param[in] handle The handle of ml-service.
* @param[in] key The key of machine learning service.
* @param[in] data The raw data to be registered on the offloading server.
@@ -110,10 +110,10 @@ int ml_service_offloading_request (ml_service_h handle, const char *key, const m
* @retval #ML_ERROR_NOT_SUPPORTED Not supported.
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_service_offloading_request_raw (ml_service_h handle, const char *key, void *data, size_t len);
+int _ml_service_offloading_request_raw (ml_service_h handle, const char *key, void *data, size_t len);
/**
- * @brief Set a required value in ml-service offloading handle.
+ * @brief Internal function to set a required value in ml-service offloading handle.
* @param[in] handle The handle of ml-service.
* @param[in] name The service key.
* @param[in] value The value to set.
@@ -122,10 +122,10 @@ int ml_service_offloading_request_raw (ml_service_h handle, const char *key, voi
* @retval #ML_ERROR_NOT_SUPPORTED Not supported.
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_service_offloading_set_information (ml_service_h handle, const char *name, const char *value);
+int _ml_service_offloading_set_information (ml_service_h handle, const char *name, const char *value);
/**
- * @brief Set offloading mode and private data.
+ * @brief Internal function to set offloading mode and private data.
* @param[in] handle The handle of ml-service.
* @param[in] mode The offloading mode.
* @param[in] priv The private data for each offloading mode.
@@ -134,10 +134,10 @@ int ml_service_offloading_set_information (ml_service_h handle, const char *name
* @retval #ML_ERROR_NOT_SUPPORTED Not supported.
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_service_offloading_set_mode (ml_service_h handle, ml_service_offloading_mode_e mode, void *priv);
+int _ml_service_offloading_set_mode (ml_service_h handle, ml_service_offloading_mode_e mode, void *priv);
/**
- * @brief Get offloading mode and private data.
+ * @brief Internal function to get offloading mode and private data.
* @param[in] handle The handle of ml-service.
* @param[out] mode The offloading mode.
* @param[out] priv The private data for each offloading mode.
@@ -146,22 +146,22 @@ int ml_service_offloading_set_mode (ml_service_h handle, ml_service_offloading_m
* @retval #ML_ERROR_NOT_SUPPORTED Not supported.
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_service_offloading_get_mode (ml_service_h handle, ml_service_offloading_mode_e *mode, void **priv);
+int _ml_service_offloading_get_mode (ml_service_h handle, ml_service_offloading_mode_e *mode, void **priv);
/**
* @brief Internal function to release ml-service offloading data.
*/
-int ml_service_offloading_release_internal (ml_service_s *mls);
+int _ml_service_offloading_release_internal (ml_service_s *mls);
#else
-#define ml_service_offloading_create(...) ML_ERROR_NOT_SUPPORTED
-#define ml_service_offloading_start(...) ML_ERROR_NOT_SUPPORTED
-#define ml_service_offloading_stop(...) ML_ERROR_NOT_SUPPORTED
-#define ml_service_offloading_request(...) ML_ERROR_NOT_SUPPORTED
-#define ml_service_offloading_request_raw(...) ML_ERROR_NOT_SUPPORTED
-#define ml_service_offloading_set_information(...) ML_ERROR_NOT_SUPPORTED
-#define ml_service_offloading_release_internal(...) ML_ERROR_NOT_SUPPORTED
-#define ml_service_offloading_set_mode(...) ML_ERROR_NOT_SUPPORTED
-#define ml_service_offloading_get_mode(...) ML_ERROR_NOT_SUPPORTED
+#define _ml_service_offloading_create(...) ML_ERROR_NOT_SUPPORTED
+#define _ml_service_offloading_start(...) ML_ERROR_NOT_SUPPORTED
+#define _ml_service_offloading_stop(...) ML_ERROR_NOT_SUPPORTED
+#define _ml_service_offloading_request(...) ML_ERROR_NOT_SUPPORTED
+#define _ml_service_offloading_request_raw(...) ML_ERROR_NOT_SUPPORTED
+#define _ml_service_offloading_set_information(...) ML_ERROR_NOT_SUPPORTED
+#define _ml_service_offloading_release_internal(...) ML_ERROR_NOT_SUPPORTED
+#define _ml_service_offloading_set_mode(...) ML_ERROR_NOT_SUPPORTED
+#define _ml_service_offloading_get_mode(...) ML_ERROR_NOT_SUPPORTED
#endif /* ENABLE_SERVICE_OFFLOADING */
#ifdef __cplusplus
diff --git a/c/src/ml-api-service-private.h b/c/src/ml-api-service-private.h
index c3e0a14..e0fb2c5 100644
--- a/c/src/ml-api-service-private.h
+++ b/c/src/ml-api-service-private.h
@@ -113,12 +113,12 @@ int _ml_service_conf_parse_tensors_info (JsonNode *info_node, ml_tensors_info_h
/**
* @brief Internal function to release ml-service pipeline data.
*/
-int ml_service_pipeline_release_internal (ml_service_s *mls);
+int _ml_service_pipeline_release_internal (ml_service_s *mls);
/**
* @brief Internal function to release ml-service query data.
*/
-int ml_service_query_release_internal (ml_service_s *mls);
+int _ml_service_query_release_internal (ml_service_s *mls);
/**
* @brief Internal function to get json string member.
diff --git a/c/src/ml-api-service-query-client.c b/c/src/ml-api-service-query-client.c
index 40b80e8..6421cf5 100644
--- a/c/src/ml-api-service-query-client.c
+++ b/c/src/ml-api-service-query-client.c
@@ -58,7 +58,7 @@ _sink_callback_for_query_client (const ml_tensors_data_h data,
* @brief Internal function to release ml-service query data.
*/
int
-ml_service_query_release_internal (ml_service_s * mls)
+_ml_service_query_release_internal (ml_service_s * mls)
{
_ml_service_query_s *query = (_ml_service_query_s *) mls->priv;
ml_tensors_data_h data_h;
diff --git a/c/src/ml-api-service-training-offloading.c b/c/src/ml-api-service-training-offloading.c
index 72c0c2e..92eea25 100644
--- a/c/src/ml-api-service-training-offloading.c
+++ b/c/src/ml-api-service-training-offloading.c
@@ -96,7 +96,7 @@ _training_offloading_get_priv (ml_service_s * mls,
ml_service_offloading_mode_e mode = ML_SERVICE_OFFLOADING_MODE_NONE;
int ret;
- ret = ml_service_offloading_get_mode (mls, &mode, (void **) training_s);
+ ret = _ml_service_offloading_get_mode (mls, &mode, (void **) training_s);
if (ret != ML_ERROR_NONE) {
_ml_error_report_return (ret,
"Failed to get offloading mode and private data.");
@@ -448,13 +448,13 @@ _training_offloading_create (ml_service_s * mls)
training_s->type = ML_TRAINING_OFFLOADING_TYPE_UNKNOWN;
training_s->time_limit = DEFAULT_TIME_LIMIT;
- ml_service_offloading_set_mode (mls,
+ _ml_service_offloading_set_mode (mls,
ML_SERVICE_OFFLOADING_MODE_TRAINING, training_s);
training_s->transfer_data_table =
g_hash_table_new_full (g_str_hash, g_str_equal, g_free, g_free);
if (!training_s->transfer_data_table) {
- ml_service_training_offloading_destroy (mls);
+ _ml_service_training_offloading_destroy (mls);
_ml_error_report_return (ML_ERROR_OUT_OF_MEMORY,
"Failed to allocate memory for the data table. Out of memory?");
}
@@ -463,7 +463,7 @@ _training_offloading_create (ml_service_s * mls)
g_hash_table_new_full (g_str_hash, g_str_equal, g_free,
_training_offloading_node_info_free);
if (!training_s->node_table) {
- ml_service_training_offloading_destroy (mls);
+ _ml_service_training_offloading_destroy (mls);
_ml_error_report_return (ML_ERROR_OUT_OF_MEMORY,
"Failed to allocate memory for the node table. Out of memory?");
}
@@ -475,7 +475,7 @@ _training_offloading_create (ml_service_s * mls)
* @brief Internal function to create ml-service training offloading handle.
*/
int
-ml_service_training_offloading_create (ml_service_s * mls,
+_ml_service_training_offloading_create (ml_service_s * mls,
JsonObject * offloading)
{
int ret = ML_ERROR_NONE;
@@ -491,7 +491,7 @@ ml_service_training_offloading_create (ml_service_s * mls,
ret = _training_offloading_conf_parse_json (mls, offloading);
if (ret != ML_ERROR_NONE) {
- ml_service_training_offloading_destroy (mls);
+ _ml_service_training_offloading_destroy (mls);
_ml_error_report_return (ret,
"Failed to parse the configuration file for training offloading.");
}
@@ -513,7 +513,7 @@ _training_offloading_request (ml_service_s * mls,
g_return_val_if_fail (data != NULL, ML_ERROR_INVALID_PARAMETER);
g_return_val_if_fail (len > 0, ML_ERROR_INVALID_PARAMETER);
- ret = ml_service_offloading_request_raw (mls, service_name, data, len);
+ ret = _ml_service_offloading_request_raw (mls, service_name, data, len);
if (ret != ML_ERROR_NONE) {
_ml_error_report ("Failed to request service '%s'.)", service_name);
}
@@ -716,7 +716,8 @@ _training_offloading_replce_pipeline_data_path (ml_service_s * mls)
* @brief Set path in ml-service training offloading handle.
*/
int
-ml_service_training_offloading_set_path (ml_service_s * mls, const gchar * path)
+_ml_service_training_offloading_set_path (ml_service_s * mls,
+ const gchar * path)
{
int ret = ML_ERROR_NONE;
ml_training_services_s *training_s = NULL;
@@ -736,7 +737,7 @@ ml_service_training_offloading_set_path (ml_service_s * mls, const gchar * path)
* @brief Start ml training offloading service.
*/
int
-ml_service_training_offloading_start (ml_service_s * mls)
+_ml_service_training_offloading_start (ml_service_s * mls)
{
int ret = ML_ERROR_NONE;
g_autoptr (JsonNode) pipeline_node = NULL;
@@ -829,7 +830,7 @@ ml_service_training_offloading_start (ml_service_s * mls)
* @brief Stop ml training offloading service.
*/
int
-ml_service_training_offloading_stop (ml_service_s * mls)
+_ml_service_training_offloading_stop (ml_service_s * mls)
{
int ret = ML_ERROR_NONE;
ml_training_services_s *training_s = NULL;
@@ -854,7 +855,7 @@ ml_service_training_offloading_stop (ml_service_s * mls)
* @brief Save receiver pipeline description.
*/
int
-ml_service_training_offloading_process_received_data (ml_service_s * mls,
+_ml_service_training_offloading_process_received_data (ml_service_s * mls,
void *data_h, const gchar * dir_path, const gchar * data, int service_type)
{
g_autofree gchar *name = NULL;
@@ -936,7 +937,7 @@ _training_offloading_send_trained_model (ml_service_s * mls)
* @brief Internal function to destroy ml-service training offloading data.
*/
int
-ml_service_training_offloading_destroy (ml_service_s * mls)
+_ml_service_training_offloading_destroy (ml_service_s * mls)
{
int ret = ML_ERROR_NONE;
ml_training_services_s *training_s = NULL;
@@ -993,6 +994,6 @@ ml_service_training_offloading_destroy (ml_service_s * mls)
g_free (training_s);
- ml_service_offloading_set_mode (mls, ML_SERVICE_OFFLOADING_MODE_NONE, NULL);
+ _ml_service_offloading_set_mode (mls, ML_SERVICE_OFFLOADING_MODE_NONE, NULL);
return ret;
}
diff --git a/c/src/ml-api-service-training-offloading.h b/c/src/ml-api-service-training-offloading.h
index 7df6358..43fef37 100644
--- a/c/src/ml-api-service-training-offloading.h
+++ b/c/src/ml-api-service-training-offloading.h
@@ -24,7 +24,7 @@ extern "C" {
#if defined(ENABLE_TRAINING_OFFLOADING)
/**
- * @brief Creates a training offloading handle for ml-service training offloading service.
+ * @brief Internal function to creates a training offloading handle for ml-service training offloading service.
* @param[in] mls ml-service handle created by ml_service_new().
* @param[in] offloading The Json object containing the service option.
* @return @c 0 on success. Otherwise a negative error value.
@@ -36,10 +36,10 @@ extern "C" {
* @retval #ML_ERROR_STREAMS_PIPE Failed to open the model.
* @retval #ML_ERROR_OUT_OF_MEMORY Failed to allocate required memory.
*/
-int ml_service_training_offloading_create (ml_service_s *mls, JsonObject *offloading);
+int _ml_service_training_offloading_create (ml_service_s *mls, JsonObject *offloading);
/**
- * @brief Set path in ml-service training offloading handle.
+ * @brief Internal function to set path in ml-service training offloading handle.
* @param[in] mls ml-service handle created by ml_service_new().
* @param[in] path Readable and writable path set by the app.
* @return @c 0 on success. Otherwise a negative error value.
@@ -47,10 +47,10 @@ int ml_service_training_offloading_create (ml_service_s *mls, JsonObject *offloa
* @retval #ML_ERROR_NOT_SUPPORTED Not supported.
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_service_training_offloading_set_path (ml_service_s *mls, const gchar *path);
+int _ml_service_training_offloading_set_path (ml_service_s *mls, const gchar *path);
/**
- * @brief Start ml training offloading service.
+ * @brief Internal function to start ml training offloading service.
* @param[in] mls ml-service handle created by ml_service_new().
* @return @c 0 on success. Otherwise a negative error value.
* @retval #ML_ERROR_NONE Successful.
@@ -61,10 +61,10 @@ int ml_service_training_offloading_set_path (ml_service_s *mls, const gchar *pat
* @retval #ML_ERROR_TRY_AGAIN The pipeline is not ready yet.
* @retval #ML_ERROR_PERMISSION_DENIED The application does not have the privilege to access to the storage.
*/
-int ml_service_training_offloading_start (ml_service_s *mls);
+int _ml_service_training_offloading_start (ml_service_s *mls);
/**
- * @brief Stop ml training offloading service.
+ * @brief Internal function to stop ml training offloading service.
* @param[in] mls ml-service handle created by ml_service_new().
* @return @c 0 on success. Otherwise a negative error value.
* @retval #ML_ERROR_NONE Successful.
@@ -75,10 +75,10 @@ int ml_service_training_offloading_start (ml_service_s *mls);
* @retval #ML_ERROR_TRY_AGAIN The pipeline is not ready yet.
* @retval #ML_ERROR_PERMISSION_DENIED The application does not have the privilege to access to the storage.
*/
-int ml_service_training_offloading_stop (ml_service_s *mls);
+int _ml_service_training_offloading_stop (ml_service_s *mls);
/**
- * @brief Process received data
+ * @brief Internal function to process received data
* @param[in] mls ml-service handle created by ml_service_new().
* @param[in] data_h handle nns_edge_data_h
* @param[in] data data of received file
@@ -89,7 +89,7 @@ int ml_service_training_offloading_stop (ml_service_s *mls);
* @retval #ML_ERROR_NOT_SUPPORTED Not supported.
* @retval #ML_ERROR_INVALID_PARAMETER Fail. The parameter is invalid.
*/
-int ml_service_training_offloading_process_received_data (ml_service_s *mls, void *data_h, const gchar *dir_path, const gchar *data, int service_type);
+int _ml_service_training_offloading_process_received_data (ml_service_s *mls, void *data_h, const gchar *dir_path, const gchar *data, int service_type);
/**
* @brief Internal function to destroy ml-service training offloading data.
@@ -99,14 +99,14 @@ int ml_service_training_offloading_process_received_data (ml_service_s *mls, voi
* @retval #ML_ERROR_NOT_SUPPORTED Not supported.
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_service_training_offloading_destroy (ml_service_s *mls);
+int _ml_service_training_offloading_destroy (ml_service_s *mls);
#else
-#define ml_service_training_offloading_create(...) ML_ERROR_NOT_SUPPORTED
-#define ml_service_training_offloading_set_path(...) ML_ERROR_NOT_SUPPORTED
-#define ml_service_training_offloading_start(...) ML_ERROR_NOT_SUPPORTED
-#define ml_service_training_offloading_stop(...) ML_ERROR_NOT_SUPPORTED
-#define ml_service_training_offloading_process_received_data(...) ML_ERROR_NOT_SUPPORTED
-#define ml_service_training_offloading_destroy(...) ML_ERROR_NOT_SUPPORTED
+#define _ml_service_training_offloading_create(...) ML_ERROR_NOT_SUPPORTED
+#define _ml_service_training_offloading_set_path(...) ML_ERROR_NOT_SUPPORTED
+#define _ml_service_training_offloading_start(...) ML_ERROR_NOT_SUPPORTED
+#define _ml_service_training_offloading_stop(...) ML_ERROR_NOT_SUPPORTED
+#define _ml_service_training_offloading_process_received_data(...) ML_ERROR_NOT_SUPPORTED
+#define _ml_service_training_offloading_destroy(...) ML_ERROR_NOT_SUPPORTED
#endif /* ENABLE_TRAINING_OFFLOADING */
#ifdef __cplusplus
}
diff --git a/c/src/ml-api-service.c b/c/src/ml-api-service.c
index e504e3e..95626b3 100644
--- a/c/src/ml-api-service.c
+++ b/c/src/ml-api-service.c
@@ -66,10 +66,10 @@ _ml_service_set_information_internal (ml_service_s * mls, const char *name,
switch (mls->type) {
case ML_SERVICE_TYPE_EXTENSION:
- status = ml_service_extension_set_information (mls, name, value);
+ status = _ml_service_extension_set_information (mls, name, value);
break;
case ML_SERVICE_TYPE_OFFLOADING:
- status = ml_service_offloading_set_information (mls, name, value);
+ status = _ml_service_offloading_set_information (mls, name, value);
break;
default:
break;
@@ -127,16 +127,16 @@ _ml_service_destroy_internal (ml_service_s * mls)
switch (mls->type) {
case ML_SERVICE_TYPE_SERVER_PIPELINE:
- status = ml_service_pipeline_release_internal (mls);
+ status = _ml_service_pipeline_release_internal (mls);
break;
case ML_SERVICE_TYPE_CLIENT_QUERY:
- status = ml_service_query_release_internal (mls);
+ status = _ml_service_query_release_internal (mls);
break;
case ML_SERVICE_TYPE_OFFLOADING:
- status = ml_service_offloading_release_internal (mls);
+ status = _ml_service_offloading_release_internal (mls);
break;
case ML_SERVICE_TYPE_EXTENSION:
- status = ml_service_extension_destroy (mls);
+ status = _ml_service_extension_destroy (mls);
break;
default:
_ml_error_report ("Invalid type of ml_service_h.");
@@ -367,10 +367,10 @@ ml_service_new (const char *config, ml_service_h * handle)
switch (service_type) {
case ML_SERVICE_TYPE_EXTENSION:
- status = ml_service_extension_create (mls, object);
+ status = _ml_service_extension_create (mls, object);
break;
case ML_SERVICE_TYPE_OFFLOADING:
- status = ml_service_offloading_create (mls, object);
+ status = _ml_service_offloading_create (mls, object);
break;
default:
/* Invalid handle type. */
@@ -462,10 +462,10 @@ ml_service_start (ml_service_h handle)
break;
}
case ML_SERVICE_TYPE_EXTENSION:
- status = ml_service_extension_start (mls);
+ status = _ml_service_extension_start (mls);
break;
case ML_SERVICE_TYPE_OFFLOADING:
- status = ml_service_offloading_start (mls);
+ status = _ml_service_offloading_start (mls);
break;
default:
/* Invalid handle type. */
@@ -504,10 +504,10 @@ ml_service_stop (ml_service_h handle)
break;
}
case ML_SERVICE_TYPE_EXTENSION:
- status = ml_service_extension_stop (mls);
+ status = _ml_service_extension_stop (mls);
break;
case ML_SERVICE_TYPE_OFFLOADING:
- status = ml_service_offloading_stop (mls);
+ status = _ml_service_offloading_stop (mls);
break;
default:
/* Invalid handle type. */
@@ -545,7 +545,7 @@ ml_service_get_input_information (ml_service_h handle, const char *name,
switch (mls->type) {
case ML_SERVICE_TYPE_EXTENSION:
- status = ml_service_extension_get_input_information (mls, name, info);
+ status = _ml_service_extension_get_input_information (mls, name, info);
break;
default:
/* Invalid handle type. */
@@ -590,7 +590,7 @@ ml_service_get_output_information (ml_service_h handle, const char *name,
switch (mls->type) {
case ML_SERVICE_TYPE_EXTENSION:
- status = ml_service_extension_get_output_information (mls, name, info);
+ status = _ml_service_extension_get_output_information (mls, name, info);
break;
default:
/* Invalid handle type. */
@@ -711,10 +711,10 @@ ml_service_request (ml_service_h handle, const char *name,
switch (mls->type) {
case ML_SERVICE_TYPE_EXTENSION:
- status = ml_service_extension_request (mls, name, data);
+ status = _ml_service_extension_request (mls, name, data);
break;
case ML_SERVICE_TYPE_OFFLOADING:
- status = ml_service_offloading_request (mls, name, data);
+ status = _ml_service_offloading_request (mls, name, data);
break;
default:
/* Invalid handle type. */
diff --git a/tests/capi/unittest_capi_service_offloading.cc b/tests/capi/unittest_capi_service_offloading.cc
index 47bce9f..56f2d87 100644
--- a/tests/capi/unittest_capi_service_offloading.cc
+++ b/tests/capi/unittest_capi_service_offloading.cc
@@ -254,20 +254,20 @@ TEST_F (MLOffloadingService, registerPipelineURI)
}
/**
- * @brief Test ml_service_offloading_create with invalid param.
+ * @brief Test _ml_service_offloading_create with invalid param.
*/
TEST_F (MLOffloadingService, createInvalidParam_n)
{
int status;
- status = ml_service_offloading_create (NULL, NULL);
+ status = _ml_service_offloading_create (NULL, NULL);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
- status = ml_service_offloading_create (server_h, NULL);
+ status = _ml_service_offloading_create (server_h, NULL);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
}
/**
- * @brief Test ml_service_offloading_request with invalid param.
+ * @brief Test _ml_service_offloading_request with invalid param.
*/
TEST_F (MLOffloadingService, registerInvalidParam01_n)
{
@@ -279,13 +279,13 @@ TEST_F (MLOffloadingService, registerInvalidParam01_n)
status = _create_tensor_data_from_str (pipeline_desc, strlen (pipeline_desc) + 1, &input);
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_offloading_request (NULL, "pipeline_registration_raw", input);
+ status = _ml_service_offloading_request (NULL, "pipeline_registration_raw", input);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
- status = ml_service_offloading_request (client_h, NULL, input);
+ status = _ml_service_offloading_request (client_h, NULL, input);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
- status = ml_service_offloading_request (client_h, "pipeline_registration_raw", NULL);
+ status = _ml_service_offloading_request (client_h, "pipeline_registration_raw", NULL);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
status = ml_tensors_data_destroy (input);
@@ -293,7 +293,7 @@ TEST_F (MLOffloadingService, registerInvalidParam01_n)
}
/**
- * @brief Test ml_service_offloading_request_raw with invalid param.
+ * @brief Test _ml_service_offloading_request_raw with invalid param.
*/
TEST_F (MLOffloadingService, registerInvalidParam02_n)
{
@@ -302,16 +302,16 @@ TEST_F (MLOffloadingService, registerInvalidParam02_n)
g_autofree gchar *data = g_strdup ("fakesrc ! fakesink");
gsize len = strlen (data);
- status = ml_service_offloading_request_raw (NULL, "req_raw", data, len);
+ status = _ml_service_offloading_request_raw (NULL, "req_raw", data, len);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
- status = ml_service_offloading_request_raw (client_h, NULL, data, len);
+ status = _ml_service_offloading_request_raw (client_h, NULL, data, len);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
- status = ml_service_offloading_request_raw (client_h, "req_raw", NULL, len);
+ status = _ml_service_offloading_request_raw (client_h, "req_raw", NULL, len);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
- status = ml_service_offloading_request_raw (client_h, "req_raw", data, 0);
+ status = _ml_service_offloading_request_raw (client_h, "req_raw", data, 0);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
}
@@ -324,7 +324,7 @@ TEST_F (MLOffloadingService, registerModel)
ml_tensors_data_h input = NULL;
const gchar *root_path = g_getenv ("MLAPI_SOURCE_ROOT_PATH");
- /* ml_service_offloading_request () requires absolute path to model, ignore this case. */
+ /* _ml_service_offloading_request () requires absolute path to model, ignore this case. */
if (root_path == NULL)
return;
@@ -341,7 +341,7 @@ TEST_F (MLOffloadingService, registerModel)
status = ml_service_set_event_cb (server_h, _ml_service_event_cb, &test_data);
EXPECT_EQ (status, ML_ERROR_NONE);
- status = ml_service_offloading_set_information (server_h, "path", model_dir);
+ status = _ml_service_offloading_set_information (server_h, "path", model_dir);
EXPECT_EQ (status, ML_ERROR_NONE);
status = _create_tensor_data_from_str (contents, len, &input);
@@ -368,7 +368,7 @@ TEST_F (MLOffloadingService, registerModelURI)
int status;
ml_tensors_data_h input = NULL;
const gchar *root_path = g_getenv ("MLAPI_SOURCE_ROOT_PATH");
- /* ml_service_offloading_request () requires absolute path to model, ignore this case. */
+ /* _ml_service_offloading_request () requires absolute path to model, ignore this case. */
if (root_path == NULL)
return;
@@ -410,7 +410,7 @@ TEST_F (MLOffloadingService, registerModelPath)
int status;
ml_tensors_data_h input = NULL;
const gchar *root_path = g_getenv ("MLAPI_SOURCE_ROOT_PATH");
- /* ml_service_offloading_request () requires absolute path to model, ignore this case. */
+ /* _ml_service_offloading_request () requires absolute path to model, ignore this case. */
if (root_path == NULL)
return;
diff --git a/tests/capi/unittest_capi_service_training_offloading.cc b/tests/capi/unittest_capi_service_training_offloading.cc
index ded44a1..db6968c 100644
--- a/tests/capi/unittest_capi_service_training_offloading.cc
+++ b/tests/capi/unittest_capi_service_training_offloading.cc
@@ -238,7 +238,7 @@ TEST_F (MLServiceTrainingOffloading, trainingOffloading_p)
}
/**
- * @brief Test ml_service_training_offloading_create with invalid param.
+ * @brief Test _ml_service_training_offloading_create with invalid param.
*/
TEST_F (MLServiceTrainingOffloading, createInvalidParam1_n)
{
@@ -248,15 +248,15 @@ TEST_F (MLServiceTrainingOffloading, createInvalidParam1_n)
mls = _ml_service_create_internal (ML_SERVICE_TYPE_OFFLOADING);
ASSERT_NE (nullptr, mls);
- status = ml_service_training_offloading_create (mls, NULL);
+ status = _ml_service_training_offloading_create (mls, NULL);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
- status = ml_service_offloading_release_internal (mls);
+ status = _ml_service_offloading_release_internal (mls);
EXPECT_EQ (ML_ERROR_NONE, status);
}
/**
- * @brief Test ml_service_training_offloading_create with invalid param.
+ * @brief Test _ml_service_training_offloading_create with invalid param.
*/
TEST_F (MLServiceTrainingOffloading, createInvalidParam2_n)
{
@@ -280,14 +280,14 @@ TEST_F (MLServiceTrainingOffloading, createInvalidParam2_n)
ASSERT_NE (nullptr, object);
JsonObject *offloading = json_object_get_object_member (object, "offloading");
- status = ml_service_training_offloading_create (NULL, offloading);
+ status = _ml_service_training_offloading_create (NULL, offloading);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
EXPECT_EQ (g_remove (receiver_config), 0);
}
/**
- * @brief Test ml_service_training_offloading_create.
+ * @brief Test _ml_service_training_offloading_create.
*/
TEST_F (MLServiceTrainingOffloading, create_p)
{
@@ -314,37 +314,37 @@ TEST_F (MLServiceTrainingOffloading, create_p)
mls = _ml_service_create_internal (ML_SERVICE_TYPE_OFFLOADING);
ASSERT_NE (nullptr, mls);
- status = ml_service_offloading_create (mls, object);
+ status = _ml_service_offloading_create (mls, object);
/* nns-edge error occurs because there is no remote to connect to. */
EXPECT_EQ (ML_ERROR_NONE, status);
/* An offloading instance must be created first. */
JsonObject *offloading = json_object_get_object_member (object, "offloading");
- status = ml_service_training_offloading_create (mls, offloading);
+ status = _ml_service_training_offloading_create (mls, offloading);
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_training_offloading_destroy (mls);
+ status = _ml_service_training_offloading_destroy (mls);
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_offloading_release_internal (mls);
+ status = _ml_service_offloading_release_internal (mls);
EXPECT_EQ (ML_ERROR_NONE, status);
EXPECT_EQ (g_remove (receiver_config), 0);
}
/**
- * @brief Test ml_service_training_offloading_destroy.
+ * @brief Test _ml_service_training_offloading_destroy.
*/
TEST_F (MLServiceTrainingOffloading, destroyInvalidParam1_n)
{
int status;
- status = ml_service_training_offloading_destroy (NULL);
+ status = _ml_service_training_offloading_destroy (NULL);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
}
/**
- * @brief Test ml_service_training_offloading_set_path.
+ * @brief Test _ml_service_training_offloading_set_path.
*/
TEST_F (MLServiceTrainingOffloading, setPathInvalidParam1_n)
{
@@ -366,10 +366,10 @@ TEST_F (MLServiceTrainingOffloading, setPathInvalidParam1_n)
mls = (ml_service_s *) service_h;
ASSERT_NE (nullptr, mls);
- status = ml_service_training_offloading_set_path (mls, NULL);
+ status = _ml_service_training_offloading_set_path (mls, NULL);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
- status = ml_service_training_offloading_set_path (mls, file_path);
+ status = _ml_service_training_offloading_set_path (mls, file_path);
EXPECT_EQ (ML_ERROR_NONE, status);
status = ml_service_destroy (service_h);
@@ -379,7 +379,7 @@ TEST_F (MLServiceTrainingOffloading, setPathInvalidParam1_n)
}
/**
- * @brief Test ml_service_training_offloading_start.
+ * @brief Test _ml_service_training_offloading_start.
*/
TEST_F (MLServiceTrainingOffloading, startInvalidParam1_n)
{
@@ -401,14 +401,14 @@ TEST_F (MLServiceTrainingOffloading, startInvalidParam1_n)
mls = (ml_service_s *) receiver_h;
ASSERT_NE (nullptr, mls);
- status = ml_service_training_offloading_set_path (mls, file_path);
+ status = _ml_service_training_offloading_set_path (mls, file_path);
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_training_offloading_start (mls);
+ status = _ml_service_training_offloading_start (mls);
/* Not receiving data needed for training. */
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
- status = ml_service_training_offloading_start (NULL);
+ status = _ml_service_training_offloading_start (NULL);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
status = ml_service_destroy (receiver_h);
@@ -418,7 +418,7 @@ TEST_F (MLServiceTrainingOffloading, startInvalidParam1_n)
}
/**
- * @brief Test ml_service_training_offloading_start.
+ * @brief Test _ml_service_training_offloading_start.
*/
TEST_F (MLServiceTrainingOffloading, stopInvalidParam1_n)
{
@@ -440,11 +440,11 @@ TEST_F (MLServiceTrainingOffloading, stopInvalidParam1_n)
mls = (ml_service_s *) receiver_h;
ASSERT_NE (nullptr, mls);
- status = ml_service_training_offloading_set_path (mls, file_path);
+ status = _ml_service_training_offloading_set_path (mls, file_path);
EXPECT_EQ (ML_ERROR_NONE, status);
/* not start */
- status = ml_service_training_offloading_stop (mls);
+ status = _ml_service_training_offloading_stop (mls);
EXPECT_EQ (ML_ERROR_STREAMS_PIPE, status);
status = ml_service_destroy (receiver_h);