summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorInki Dae <inki.dae@samsung.com>2023-04-25 13:27:54 +0900
committerInki Dae <inki.dae@samsung.com>2023-04-25 04:35:44 +0000
commitff5d0a0c11ba926fc263e95e756f6ced582c8f95 (patch)
tree1bbe8a338da96a5ada39097c8f45d14dcd17b0b2
parentc2c143f9a037c242347aab32693970a09c8311f6 (diff)
downloadinference-engine-mlapi-tizen.tar.gz
inference-engine-mlapi-tizen.tar.bz2
inference-engine-mlapi-tizen.zip
[Version] : 0.4.10 [Issue type] : bug fix Fix a coverity issue - Out-of-bounds access. NNStreamer uses a fixed tensor demension with 16 so correct the indims and outdims array size. Change-Id: I9569398df8d29ed5b1b3a1a9ee84290aa2e1fee0 Signed-off-by: Inki Dae <inki.dae@samsung.com>
-rw-r--r--packaging/inference-engine-mlapi.spec2
-rw-r--r--src/inference_engine_mlapi.cpp6
2 files changed, 4 insertions, 4 deletions
diff --git a/packaging/inference-engine-mlapi.spec b/packaging/inference-engine-mlapi.spec
index ad18b74..d2ea2d9 100644
--- a/packaging/inference-engine-mlapi.spec
+++ b/packaging/inference-engine-mlapi.spec
@@ -1,6 +1,6 @@
Name: inference-engine-mlapi
Summary: ML Single API backend of NNStreamer for MediaVision
-Version: 0.4.9
+Version: 0.4.10
Release: 0
Group: Multimedia/Libraries
License: Apache-2.0
diff --git a/src/inference_engine_mlapi.cpp b/src/inference_engine_mlapi.cpp
index 4f3adfc..7fbf997 100644
--- a/src/inference_engine_mlapi.cpp
+++ b/src/inference_engine_mlapi.cpp
@@ -176,8 +176,8 @@ namespace MLAPIImpl
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
- // TODO. nnstreamer needs fixed dimention with 4 for nntrainer tensor filter. Why??
- std::vector<unsigned int> indim(4, 1);
+ // NNStreamer uses a fixed dimention with 16.
+ std::vector<unsigned int> indim(ML_TENSOR_RANK_LIMIT, 1);
LOGI("Input tensor(%zu) shape:", layer_idx);
@@ -616,7 +616,7 @@ namespace MLAPIImpl
for (auto& output : mDesignated_outputs) {
inference_engine_tensor_info tensor_info;
ml_tensor_type_e out_type;
- unsigned int out_dim[MAX_TENSOR_DIMENSION_SIZE];
+ unsigned int out_dim[ML_TENSOR_RANK_LIMIT];
size_t out_size = 1;
ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, output.second, &out_type);