diff options
author | Inki Dae <inki.dae@samsung.com> | 2020-06-15 17:23:37 +0900 |
---|---|---|
committer | Inki Dae <inki.dae@samsung.com> | 2020-06-26 08:58:22 +0900 |
commit | 7f4fde27995c5f9ab9bf079c7a6fec0bbd9968e3 (patch) | |
tree | 086edb1b276a937bed0f6083591f41a84f914135 | |
parent | d92b31574bffdb68a29c2277f2c365d8dcabe839 (diff) | |
download | mediavision-7f4fde27995c5f9ab9bf079c7a6fec0bbd9968e3.tar.gz mediavision-7f4fde27995c5f9ab9bf079c7a6fec0bbd9968e3.tar.bz2 mediavision-7f4fde27995c5f9ab9bf079c7a6fec0bbd9968e3.zip |
mv_inference: Fix tensor data type
Output tensor type of Inception v3 model converted to Vivante NPU
is UINT16.
Change-Id: Ic9b568aa42b6e77e63d404688efc59cfa8038266
Signed-off-by: Inki Dae <inki.dae@samsung.com>
-rw-r--r-- | mv_inference/inference/src/Inference.cpp | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/mv_inference/inference/src/Inference.cpp b/mv_inference/inference/src/Inference.cpp index 62262adc..6a032a3c 100644 --- a/mv_inference/inference/src/Inference.cpp +++ b/mv_inference/inference/src/Inference.cpp @@ -611,7 +611,7 @@ namespace inference tensor_buffer.buffer = new unsigned char[tensor_info.size]; tensor_buffer.size = tensor_info.size; } else if (tensor_info.data_type == - INFERENCE_TENSOR_DATA_TYPE_FLOAT16) { + INFERENCE_TENSOR_DATA_TYPE_UINT16) { tensor_buffer.buffer = new short[tensor_info.size]; tensor_buffer.size = tensor_info.size; } else { @@ -663,7 +663,7 @@ namespace inference tensor_buffer.buffer = new char[tensor_info.size]; tensor_buffer.size = tensor_info.size; } else if (tensor_info.data_type == - INFERENCE_TENSOR_DATA_TYPE_FLOAT16) { + INFERENCE_TENSOR_DATA_TYPE_UINT16) { tensor_buffer.buffer = new short[tensor_info.size]; tensor_buffer.size = tensor_info.size; } else { @@ -728,7 +728,7 @@ namespace inference delete[] ori_buf; } - if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT16) { + if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT16) { float *new_buf = new float[tensor_info.size]; if (new_buf == NULL) { LOGE("Fail to allocate a new output tensor buffer."); |