diff options
author | Inki Dae <inki.dae@samsung.com> | 2023-04-25 13:27:54 +0900 |
---|---|---|
committer | Inki Dae <inki.dae@samsung.com> | 2023-09-18 13:53:45 +0900 |
commit | 57657c5385c2f7ffb7634dd10d7a6f2700c434c3 (patch) | |
tree | 1bbe8a338da96a5ada39097c8f45d14dcd17b0b2 | |
parent | 68734c87eae257fc55db74458cf9e99d8a212d9d (diff) | |
download | inference-engine-mlapi-accepted/tizen_7.0_unified.tar.gz inference-engine-mlapi-accepted/tizen_7.0_unified.tar.bz2 inference-engine-mlapi-accepted/tizen_7.0_unified.zip |
[Version] : 0.4.10
[Issue type] : bug fix
Fix a coverity issue - Out-of-bounds access.
NNStreamer uses a fixed tensor demension with 16 so correct the indims
and outdims array size.
Change-Id: I9569398df8d29ed5b1b3a1a9ee84290aa2e1fee0
Signed-off-by: Inki Dae <inki.dae@samsung.com>
-rw-r--r-- | packaging/inference-engine-mlapi.spec | 2 | ||||
-rw-r--r-- | src/inference_engine_mlapi.cpp | 6 |
2 files changed, 4 insertions, 4 deletions
diff --git a/packaging/inference-engine-mlapi.spec b/packaging/inference-engine-mlapi.spec index ad18b74..d2ea2d9 100644 --- a/packaging/inference-engine-mlapi.spec +++ b/packaging/inference-engine-mlapi.spec @@ -1,6 +1,6 @@ Name: inference-engine-mlapi Summary: ML Single API backend of NNStreamer for MediaVision -Version: 0.4.9 +Version: 0.4.10 Release: 0 Group: Multimedia/Libraries License: Apache-2.0 diff --git a/src/inference_engine_mlapi.cpp b/src/inference_engine_mlapi.cpp index 4f3adfc..7fbf997 100644 --- a/src/inference_engine_mlapi.cpp +++ b/src/inference_engine_mlapi.cpp @@ -176,8 +176,8 @@ namespace MLAPIImpl return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; } - // TODO. nnstreamer needs fixed dimention with 4 for nntrainer tensor filter. Why?? - std::vector<unsigned int> indim(4, 1); + // NNStreamer uses a fixed dimention with 16. + std::vector<unsigned int> indim(ML_TENSOR_RANK_LIMIT, 1); LOGI("Input tensor(%zu) shape:", layer_idx); @@ -616,7 +616,7 @@ namespace MLAPIImpl for (auto& output : mDesignated_outputs) { inference_engine_tensor_info tensor_info; ml_tensor_type_e out_type; - unsigned int out_dim[MAX_TENSOR_DIMENSION_SIZE]; + unsigned int out_dim[ML_TENSOR_RANK_LIMIT]; size_t out_size = 1; ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, output.second, &out_type); |