summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--packaging/inference-engine-mlapi.spec2
-rw-r--r--src/inference_engine_mlapi.cpp6
2 files changed, 4 insertions, 4 deletions
diff --git a/packaging/inference-engine-mlapi.spec b/packaging/inference-engine-mlapi.spec
index ad18b74..d2ea2d9 100644
--- a/packaging/inference-engine-mlapi.spec
+++ b/packaging/inference-engine-mlapi.spec
@@ -1,6 +1,6 @@
Name: inference-engine-mlapi
Summary: ML Single API backend of NNStreamer for MediaVision
-Version: 0.4.9
+Version: 0.4.10
Release: 0
Group: Multimedia/Libraries
License: Apache-2.0
diff --git a/src/inference_engine_mlapi.cpp b/src/inference_engine_mlapi.cpp
index 4f3adfc..7fbf997 100644
--- a/src/inference_engine_mlapi.cpp
+++ b/src/inference_engine_mlapi.cpp
@@ -176,8 +176,8 @@ namespace MLAPIImpl
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
- // TODO. nnstreamer needs fixed dimention with 4 for nntrainer tensor filter. Why??
- std::vector<unsigned int> indim(4, 1);
+ // NNStreamer uses a fixed dimention with 16.
+ std::vector<unsigned int> indim(ML_TENSOR_RANK_LIMIT, 1);
LOGI("Input tensor(%zu) shape:", layer_idx);
@@ -616,7 +616,7 @@ namespace MLAPIImpl
for (auto& output : mDesignated_outputs) {
inference_engine_tensor_info tensor_info;
ml_tensor_type_e out_type;
- unsigned int out_dim[MAX_TENSOR_DIMENSION_SIZE];
+ unsigned int out_dim[ML_TENSOR_RANK_LIMIT];
size_t out_size = 1;
ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, output.second, &out_type);