diff options
author | Inki Dae <inki.dae@samsung.com> | 2020-08-26 09:26:58 +0900 |
---|---|---|
committer | Inki Dae <inki.dae@samsung.com> | 2020-08-26 09:30:25 +0900 |
commit | 939175938edb44b934e7cf92abe2cc83dce657c6 (patch) | |
tree | 3e15eacd43dd5ff26c8fc289ae5b1ac0e1b3110b | |
parent | 1a82b20c9a2cb70f8f64221bc1b595875939196d (diff) | |
download | mediavision-939175938edb44b934e7cf92abe2cc83dce657c6.tar.gz mediavision-939175938edb44b934e7cf92abe2cc83dce657c6.tar.bz2 mediavision-939175938edb44b934e7cf92abe2cc83dce657c6.zip |
mv_inference: Add two tensor buffer types supportsubmit/tizen/20200828.100528submit/tizen/20200828.025650accepted/tizen/unified/20200831.002550
This patch adds uint32 and int64 types for tensor data allocation.
And also it fixes an issue which allocates tensor buffer with wrong type
Change-Id: I209b23176cc666a8e3417275baad8d8e3a91eaae
Signed-off-by: Inki Dae <inki.dae@samsung.com>
-rw-r--r-- | mv_inference/inference/src/Inference.cpp | 26 |
1 files changed, 23 insertions, 3 deletions
diff --git a/mv_inference/inference/src/Inference.cpp b/mv_inference/inference/src/Inference.cpp index 3b3199b8..771da373 100644 --- a/mv_inference/inference/src/Inference.cpp +++ b/mv_inference/inference/src/Inference.cpp @@ -554,6 +554,9 @@ namespace inference if (tensor_buffer.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) delete[] static_cast<float *>(tensor_buffer.buffer); + else if (tensor_buffer.data_type == + INFERENCE_TENSOR_DATA_TYPE_UINT16) + delete[] static_cast<unsigned short *>(tensor_buffer.buffer); else delete[] static_cast<unsigned char *>(tensor_buffer.buffer); } @@ -579,6 +582,15 @@ namespace inference if (tensor_buffer.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) delete[] static_cast<float *>(tensor_buffer.buffer); + else if (tensor_buffer.data_type == + INFERENCE_TENSOR_DATA_TYPE_INT64) + delete[] static_cast<long long *>(tensor_buffer.buffer); + else if (tensor_buffer.data_type == + INFERENCE_TENSOR_DATA_TYPE_UINT32) + delete[] static_cast<uint32_t *>(tensor_buffer.buffer); + else if (tensor_buffer.data_type == + INFERENCE_TENSOR_DATA_TYPE_UINT16) + delete[] static_cast<unsigned short *>(tensor_buffer.buffer); else delete[] static_cast<unsigned char *>(tensor_buffer.buffer); } @@ -642,7 +654,7 @@ namespace inference tensor_buffer.size = tensor_info.size; } else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT16) { - tensor_buffer.buffer = new short[tensor_info.size]; + tensor_buffer.buffer = new unsigned short[tensor_info.size]; tensor_buffer.size = tensor_info.size; } else { LOGE("Invalid input tensor data type."); @@ -689,12 +701,20 @@ namespace inference tensor_buffer.buffer = new float[tensor_info.size]; tensor_buffer.size = tensor_info.size; } else if (tensor_info.data_type == - INFERENCE_TENSOR_DATA_TYPE_UINT8) { + INFERENCE_TENSOR_DATA_TYPE_INT64) { + tensor_buffer.buffer = new long long[tensor_info.size]; + tensor_buffer.size = tensor_info.size; + } else if (tensor_info.data_type == + INFERENCE_TENSOR_DATA_TYPE_UINT32) { + tensor_buffer.buffer = new unsigned int[tensor_info.size]; + tensor_buffer.size = tensor_info.size; + } else if (tensor_info.data_type == + INFERENCE_TENSOR_DATA_TYPE_UINT8) { tensor_buffer.buffer = new char[tensor_info.size]; tensor_buffer.size = tensor_info.size; } else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT16) { - tensor_buffer.buffer = new short[tensor_info.size]; + tensor_buffer.buffer = new unsigned short[tensor_info.size]; tensor_buffer.size = tensor_info.size; } else { LOGE("Invalid output tensor data type."); |