summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTae-Young Chung <ty83.chung@samsung.com>2016-11-22 15:29:25 +0900
committerTae-Young Chung <ty83.chung@samsung.com>2016-11-27 18:20:55 -0800
commit9a0e11ed445dfc91a58d68c820b8f7ac00698baa (patch)
tree092ea8dd9ff93e297061e5e1ba97395fed6267b8
parentf26100a2491b83c4f0dad532eaab201aae15f155 (diff)
downloadmediavision-9a0e11ed445dfc91a58d68c820b8f7ac00698baa.tar.gz
mediavision-9a0e11ed445dfc91a58d68c820b8f7ac00698baa.tar.bz2
mediavision-9a0e11ed445dfc91a58d68c820b8f7ac00698baa.zip
1. Applied coding convention to C++ files;identation, brace, etc 2. Fixed typo 3. Minor changes (Applied explicit casting using static_cast<>, refactoring in a surveillance moduel, etc) Change-Id: Ieccc949d0b761ef5dbb72aea6512fee85d754894 Signed-off-by: Tae-Young Chung <ty83.chung@samsung.com> (cherry picked from commit 441cbc7d0f70c0898129110da75a75ac11233828)
-rw-r--r--README4
-rw-r--r--doc/mediavision_doc.h4
-rw-r--r--include/mv_face.h8
-rw-r--r--include/mv_surveillance.h74
-rw-r--r--mv_barcode/barcode_detector/include/Barcode.h8
-rw-r--r--mv_barcode/barcode_detector/include/BarcodeUtils.h8
-rw-r--r--mv_barcode/barcode_detector/include/mv_barcode_detect_open.h6
-rw-r--r--mv_barcode/barcode_detector/src/Barcode.cpp45
-rw-r--r--mv_barcode/barcode_detector/src/BarcodeUtils.cpp4
-rw-r--r--mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp26
-rw-r--r--mv_barcode/barcode_detector_lic/include/mv_barcode_detect_lic.h6
-rw-r--r--mv_barcode/barcode_generator/include/BarcodeGenerator.h6
-rw-r--r--mv_barcode/barcode_generator/include/BarcodeOptions.h6
-rw-r--r--mv_barcode/barcode_generator/include/mv_barcode_generate_open.h6
-rw-r--r--mv_barcode/barcode_generator/src/BarcodeGenerator.cpp21
-rw-r--r--mv_barcode/barcode_generator/src/mv_barcode_generate_open.cpp17
-rw-r--r--mv_barcode/barcode_generator_lic/include/mv_barcode_generate_lic.h6
-rw-r--r--mv_common/CMakeLists.txt2
-rw-r--r--mv_common/include/EngineConfig.h8
-rw-r--r--mv_common/include/MediaSource.h14
-rw-r--r--mv_common/include/mv_common_c.h6
-rw-r--r--mv_common/src/EngineConfig.cpp21
-rw-r--r--mv_common/src/MediaSource.cpp27
-rw-r--r--mv_common/src/mv_common_c.cpp106
-rw-r--r--mv_face/face/include/FaceDetector.h6
-rw-r--r--mv_face/face/include/FaceExpressionRecognizer.h12
-rw-r--r--mv_face/face/include/FaceEyeCondition.h6
-rw-r--r--mv_face/face/include/FaceRecognitionModel.h8
-rw-r--r--mv_face/face/include/FaceTrackingModel.h6
-rw-r--r--mv_face/face/include/FaceUtil.h6
-rw-r--r--mv_face/face/include/TrackerMedianFlow.h15
-rw-r--r--mv_face/face/include/mv_face_open.h10
-rw-r--r--mv_face/face/src/FaceDetector.cpp23
-rw-r--r--mv_face/face/src/FaceExpressionRecognizer.cpp51
-rw-r--r--mv_face/face/src/FaceEyeCondition.cpp29
-rw-r--r--mv_face/face/src/FaceRecognitionModel.cpp48
-rw-r--r--mv_face/face/src/FaceTrackingModel.cpp12
-rw-r--r--mv_face/face/src/FaceUtil.cpp1
-rw-r--r--mv_face/face/src/TrackerMedianFlow.cpp101
-rw-r--r--mv_face/face/src/mv_face_open.cpp118
-rw-r--r--mv_face/face_lic/include/mv_face_lic.h10
-rw-r--r--mv_face/face_lic/src/mv_face_lic.c2
-rw-r--r--mv_image/CMakeLists.txt2
-rw-r--r--mv_image/image/CMakeLists.txt4
-rw-r--r--mv_image/image/include/Features/BasicExtractorFactory.h10
-rw-r--r--mv_image/image/include/Features/FeatureExtractor.h18
-rw-r--r--mv_image/image/include/Features/FeatureExtractorFactory.h6
-rw-r--r--mv_image/image/include/Features/FeatureMatcher.h16
-rw-r--r--mv_image/image/include/Features/FeaturePack.h16
-rw-r--r--mv_image/image/include/Features/ORBExtractorFactory.h12
-rw-r--r--mv_image/image/include/ImageConfig.h17
-rw-r--r--mv_image/image/include/ImageMathUtil.h7
-rw-r--r--mv_image/image/include/Recognition/ImageObject.h27
-rw-r--r--mv_image/image/include/Recognition/ImageRecognizer.h22
-rw-r--r--mv_image/image/include/Tracking/AsyncTracker.h28
-rw-r--r--mv_image/image/include/Tracking/CascadeTracker.h17
-rw-r--r--mv_image/image/include/Tracking/FeatureSubstitutionTracker.h22
-rw-r--r--mv_image/image/include/Tracking/ImageContourStabilizator.h32
-rw-r--r--mv_image/image/include/Tracking/ImageTrackingModel.h16
-rw-r--r--mv_image/image/include/Tracking/MFTracker.h42
-rw-r--r--mv_image/image/include/Tracking/ObjectTracker.h6
-rw-r--r--mv_image/image/include/Tracking/RecognitionBasedTracker.h12
-rw-r--r--mv_image/image/include/mv_image_open.h18
-rw-r--r--mv_image/image/src/Features/BasicExtractorFactory.cpp17
-rw-r--r--mv_image/image/src/Features/FeatureExtractor.cpp68
-rw-r--r--mv_image/image/src/Features/FeatureExtractorFactory.cpp2
-rw-r--r--mv_image/image/src/Features/FeatureMatcher.cpp65
-rw-r--r--mv_image/image/src/Features/FeaturePack.cpp30
-rw-r--r--mv_image/image/src/Features/ORBExtractorFactory.cpp58
-rw-r--r--mv_image/image/src/ImageConfig.cpp14
-rw-r--r--mv_image/image/src/ImageMathUtil.cpp7
-rw-r--r--mv_image/image/src/Recognition/ImageObject.cpp154
-rw-r--r--mv_image/image/src/Recognition/ImageRecognizer.cpp40
-rw-r--r--mv_image/image/src/Tracking/AsyncTracker.cpp141
-rw-r--r--mv_image/image/src/Tracking/CascadeTracker.cpp67
-rw-r--r--mv_image/image/src/Tracking/FeatureSubstitutionTracker.cpp96
-rw-r--r--mv_image/image/src/Tracking/ImageContourStabilizator.cpp178
-rw-r--r--mv_image/image/src/Tracking/ImageTrackingModel.cpp130
-rw-r--r--mv_image/image/src/Tracking/MFTracker.cpp180
-rw-r--r--mv_image/image/src/Tracking/RecognitionBasedTracker.cpp21
-rw-r--r--mv_image/image/src/mv_image_open.cpp92
-rw-r--r--mv_image/image_lic/include/mv_image_lic.h84
-rw-r--r--mv_image/image_lic/src/mv_image_lic.c16
-rw-r--r--mv_surveillance/surveillance/include/EventManager.h10
-rw-r--r--mv_surveillance/surveillance/include/EventTrigger.h10
-rw-r--r--mv_surveillance/surveillance/include/EventTriggerMovementDetection.h4
-rw-r--r--mv_surveillance/surveillance/include/EventTriggerPersonAppearance.h26
-rw-r--r--mv_surveillance/surveillance/include/EventTriggerPersonRecognition.h4
-rw-r--r--mv_surveillance/surveillance/include/HoGDetector.h27
-rw-r--r--mv_surveillance/surveillance/include/MFTracker.h137
-rw-r--r--mv_surveillance/surveillance/include/SurveillanceHelper.h1
-rw-r--r--mv_surveillance/surveillance/src/EventManager.cpp21
-rw-r--r--mv_surveillance/surveillance/src/EventTrigger.cpp8
-rw-r--r--mv_surveillance/surveillance/src/EventTriggerMovementDetection.cpp44
-rw-r--r--mv_surveillance/surveillance/src/EventTriggerPersonAppearance.cpp342
-rw-r--r--mv_surveillance/surveillance/src/EventTriggerPersonRecognition.cpp38
-rw-r--r--mv_surveillance/surveillance/src/MFTracker.cpp371
-rw-r--r--mv_surveillance/surveillance/src/mv_surveillance_open.cpp3
-rw-r--r--mv_surveillance/surveillance_lic/include/mv_surveillance_lic.h45
-rw-r--r--mv_surveillance/surveillance_lic/src/mv_surveillance_lic.c6
-rw-r--r--packaging/capi-media-vision.spec4
-rw-r--r--src/mv_barcode.c4
-rw-r--r--src/mv_common.c4
-rw-r--r--src/mv_face.c32
-rw-r--r--src/mv_image.c10
-rw-r--r--src/mv_private.c42
-rw-r--r--test/testsuites/barcode/barcode_test_suite.c45
-rw-r--r--test/testsuites/common/image_helper/include/ImageHelper.h365
-rw-r--r--test/testsuites/common/image_helper/include/image_helper.h65
-rw-r--r--test/testsuites/common/image_helper/src/ImageHelper.cpp87
-rw-r--r--test/testsuites/common/image_helper/src/image_helper.cpp146
-rw-r--r--test/testsuites/common/testsuite_common/mv_testsuite_common.h6
-rw-r--r--test/testsuites/common/video_helper/mv_log_cfg.h6
-rw-r--r--test/testsuites/common/video_helper/mv_video_helper.c119
-rw-r--r--test/testsuites/common/video_helper/mv_video_helper.h60
-rw-r--r--test/testsuites/face/face_test_suite.c279
-rw-r--r--test/testsuites/image/image_test_suite.c404
-rw-r--r--test/testsuites/surveillance/surveillance_test_suite.c36
118 files changed, 3158 insertions, 2366 deletions
diff --git a/README b/README
index 415ddebc..54f37071 100644
--- a/README
+++ b/README
@@ -12,7 +12,7 @@ CONTENT
Media Vision package includes following modules: Common, Media Vision Barcode,
Media Vision Face and Media Vision Image. Common module provides two handles
-(mv_source_h and mv_engine_config_h) and related fuctionality. It used by
+(mv_source_h and mv_engine_config_h) and related functionality. It used by
barcode detector and generator submodules. mv_source_h is used for wrapping raw
image data buffers. mv_engine_config_h is optional. It can be used for fine
tuning of internal libraries which are used by API. mv_engine_config_h handle
@@ -109,7 +109,7 @@ b. Options change will cause CMake to build from different subdirectories of
delete[] types;
}
- return TIZEN_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
c. Change the packaging/capi-media-vision.spec to support any packages required
diff --git a/doc/mediavision_doc.h b/doc/mediavision_doc.h
index 3458f3c0..d205fc0a 100644
--- a/doc/mediavision_doc.h
+++ b/doc/mediavision_doc.h
@@ -328,7 +328,7 @@
* </table>
* Before subscription of the event trigger with
* @ref mv_surveillance_subscribe_event_trigger() call it is possible to create
- * @ref mv_engine_config_h handle and configurate following attributes:
+ * @ref mv_engine_config_h handle and configure following attributes:
* - @ref MV_SURVEILLANCE_SKIP_FRAMES_COUNT to setup number of frames will be
ignored by event trigger;
* - @ref MV_SURVEILLANCE_MOVEMENT_DETECTION_THRESHOLD to specify sensitivity of
@@ -339,7 +339,7 @@
* Created engine config has to be used as a parameter of
* @ref mv_surveillance_subscribe_event_trigger() to apply the configuration. If
* NULL will be passed instead of valid @ref mv_engine_config_h handle, then
- * default attribute values will be used for subsriptions.
+ * default attribute values will be used for subscriptions.
* To make surveillance system work with video sequences
* @ref mv_surveillance_push_source() function has to
* be used for each frame in the sequence in the correct order. Multiple video
diff --git a/include/mv_face.h b/include/mv_face.h
index 18e4772d..c68955a2 100644
--- a/include/mv_face.h
+++ b/include/mv_face.h
@@ -65,7 +65,7 @@ extern "C" {
/**
* @brief Define MV_FACE_DETECTION_ROI_X to set X coordinate of face detection
- * roi as attribute of the engine configuration.
+ * roi (Region of Interest) as attribute of the engine configuration.
* @details Default value is -1 (the roi will be a full image) can be changed
* to specify the roi for face detection
*
@@ -77,7 +77,7 @@ extern "C" {
/**
* @brief Define MV_FACE_DETECTION_ROI_Y to set Y coordinate of face detection
- * roi as attribute of the engine configuration.
+ * roi (Region of Interest) as attribute of the engine configuration.
* @details Default value is -1 (the roi will be a full image) can be changed
* to specify the roi for face detection
*
@@ -89,7 +89,7 @@ extern "C" {
/**
* @brief Define MV_FACE_DETECTION_ROI_WIDTH to set width of face detection
- * roi as attribute of the engine configuration.
+ * roi (Region of Interest) as attribute of the engine configuration.
* @details Default value is -1 (the roi will be a full image) can be changed
* to specify the roi for face detection
*
@@ -101,7 +101,7 @@ extern "C" {
/**
* @brief Define MV_FACE_DETECTION_ROI_HEIGHT to set height of face detection
- * roi as attribute of the engine configuration.
+ * roi (Region of Interest) as attribute of the engine configuration.
* @details Default value is -1 (the roi will be a full image) can be changed
* to specify the roi for face detection
*
diff --git a/include/mv_surveillance.h b/include/mv_surveillance.h
index f17c77b6..ecf57b16 100644
--- a/include/mv_surveillance.h
+++ b/include/mv_surveillance.h
@@ -790,11 +790,11 @@ typedef void *mv_surveillance_result_h;
* @see mv_surveillance_unsubscribe_event_trigger()
*/
typedef void (*mv_surveillance_event_occurred_cb)(
- mv_surveillance_event_trigger_h trigger,
- mv_source_h source,
- int video_stream_id,
- mv_surveillance_result_h event_result,
- void *user_data);
+ mv_surveillance_event_trigger_h trigger,
+ mv_source_h source,
+ int video_stream_id,
+ mv_surveillance_result_h event_result,
+ void *user_data);
/**
* @brief Called to get the information once for each supported event type.
@@ -812,8 +812,8 @@ typedef void (*mv_surveillance_event_occurred_cb)(
* @see mv_surveillance_foreach_supported_event_type()
*/
typedef bool (*mv_surveillance_event_type_cb)(
- const char *event_type,
- void *user_data);
+ const char *event_type,
+ void *user_data);
/**
* @brief Called to get the result name from the triggered event.
@@ -835,8 +835,8 @@ typedef bool (*mv_surveillance_event_type_cb)(
* @see mv_surveillance_foreach_event_result_name()
*/
typedef bool (*mv_surveillance_event_result_name_cb)(
- const char *name,
- void *user_data);
+ const char *name,
+ void *user_data);
/**
* @brief Creates surveillance event trigger handle.
@@ -859,8 +859,8 @@ typedef bool (*mv_surveillance_event_result_name_cb)(
* @see mv_surveillance_foreach_supported_event_type()
*/
int mv_surveillance_event_trigger_create(
- const char *event_type,
- mv_surveillance_event_trigger_h *trigger);
+ const char *event_type,
+ mv_surveillance_event_trigger_h *trigger);
/**
* @brief Destroys the surveillance event trigger handle and releases all its
@@ -876,7 +876,7 @@ int mv_surveillance_event_trigger_create(
* @see mv_surveillance_event_trigger_create
*/
int mv_surveillance_event_trigger_destroy(
- mv_surveillance_event_trigger_h trigger);
+ mv_surveillance_event_trigger_h trigger);
/**
* @brief Gets the surveillance event trigger type as character string.
@@ -895,8 +895,8 @@ int mv_surveillance_event_trigger_destroy(
* @ref mv_surveillance_event_trigger_create() function
*/
int mv_surveillance_get_event_trigger_type(
- mv_surveillance_event_trigger_h trigger,
- char **event_type);
+ mv_surveillance_event_trigger_h trigger,
+ char **event_type);
/**
* @brief Sets ROI (Region Of Interest) to the event trigger.
@@ -925,9 +925,9 @@ int mv_surveillance_get_event_trigger_type(
* @see mv_surveillance_get_event_trigger_roi()
*/
int mv_surveillance_set_event_trigger_roi(
- mv_surveillance_event_trigger_h trigger,
- int number_of_points,
- mv_point_s *roi);
+ mv_surveillance_event_trigger_h trigger,
+ int number_of_points,
+ mv_point_s *roi);
/**
* @brief Gets ROI (Region Of Interest) from the event trigger.
@@ -953,9 +953,9 @@ int mv_surveillance_set_event_trigger_roi(
* @see mv_surveillance_set_event_trigger_roi()
*/
int mv_surveillance_get_event_trigger_roi(
- mv_surveillance_event_trigger_h trigger,
- int *number_of_points,
- mv_point_s **roi);
+ mv_surveillance_event_trigger_h trigger,
+ int *number_of_points,
+ mv_point_s **roi);
/**
* @brief Subscribes @a trigger to process sources pushed from video identified
@@ -992,11 +992,11 @@ int mv_surveillance_get_event_trigger_roi(
* @see mv_surveillance_push_source()
*/
int mv_surveillance_subscribe_event_trigger(
- mv_surveillance_event_trigger_h trigger,
- int video_stream_id,
- mv_engine_config_h engine_cfg,
- mv_surveillance_event_occurred_cb callback,
- void *user_data);
+ mv_surveillance_event_trigger_h trigger,
+ int video_stream_id,
+ mv_engine_config_h engine_cfg,
+ mv_surveillance_event_occurred_cb callback,
+ void *user_data);
/**
* @brief Unsubscribes @a trigger from the event and stop calling @a callback.
@@ -1020,8 +1020,8 @@ int mv_surveillance_subscribe_event_trigger(
* @see mv_surveillance_subscribe_event_trigger()
*/
int mv_surveillance_unsubscribe_event_trigger(
- mv_surveillance_event_trigger_h trigger,
- int video_stream_id);
+ mv_surveillance_event_trigger_h trigger,
+ int video_stream_id);
/**
* @brief Pushes source to the surveillance system to detect events.
@@ -1052,8 +1052,8 @@ int mv_surveillance_unsubscribe_event_trigger(
* @see mv_surveillance_unsubscribe_event_trigger()
*/
int mv_surveillance_push_source(
- mv_source_h source,
- int video_stream_id);
+ mv_source_h source,
+ int video_stream_id);
/**
* @brief Starts traversing through list of supported event types.
@@ -1075,8 +1075,8 @@ int mv_surveillance_push_source(
* @see mv_surveillance_foreach_event_result_name()
*/
int mv_surveillance_foreach_supported_event_type(
- mv_surveillance_event_type_cb callback,
- void *user_data);
+ mv_surveillance_event_type_cb callback,
+ void *user_data);
/**
* @brief Starts traversing through list of supported event result value names.
@@ -1104,9 +1104,9 @@ int mv_surveillance_foreach_supported_event_type(
* @see mv_surveillance_get_result_value()
*/
int mv_surveillance_foreach_event_result_name(
- const char *event_type,
- mv_surveillance_event_result_name_cb callback,
- void *user_data);
+ const char *event_type,
+ mv_surveillance_event_result_name_cb callback,
+ void *user_data);
/**
* @brief Gets result value.
@@ -1139,9 +1139,9 @@ int mv_surveillance_foreach_event_result_name(
* @see mv_surveillance_foreach_event_result_name()
*/
int mv_surveillance_get_result_value(
- mv_surveillance_result_h result,
- const char *name,
- void *value);
+ mv_surveillance_result_h result,
+ const char *name,
+ void *value);
/**
* @}
diff --git a/mv_barcode/barcode_detector/include/Barcode.h b/mv_barcode/barcode_detector/include/Barcode.h
index 5a595c2c..79c76c56 100644
--- a/mv_barcode/barcode_detector/include/Barcode.h
+++ b/mv_barcode/barcode_detector/include/Barcode.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __BARCODE_H__
-#define __BARCODE_H__
+#ifndef __MEDIA_VISION_BARCODE_H__
+#define __MEDIS_VISION_BARCODE_H__
#include "mv_barcode.h"
@@ -75,10 +75,10 @@ public:
int calculateLocation(mv_quadrangle_s& location) const;
private:
- const zbar::Symbol *m_pBarcodeObj; ///< Pointer to zbar barcode handle
+ const zbar::Symbol *__pBarcodeObj; ///< Pointer to zbar barcode handle
};
} /* Barcode */
} /* MediaVision */
-#endif /* __BARCODE_H__ */
+#endif /* __MEDIA_VISION_BARCODE_H__ */
diff --git a/mv_barcode/barcode_detector/include/BarcodeUtils.h b/mv_barcode/barcode_detector/include/BarcodeUtils.h
index 10f3f8e7..58f30c61 100644
--- a/mv_barcode/barcode_detector/include/BarcodeUtils.h
+++ b/mv_barcode/barcode_detector/include/BarcodeUtils.h
@@ -14,13 +14,13 @@
* limitations under the License.
*/
-#ifndef __TIZEN_MEDIAVISION_BARCODE_UTILS_H__
-#define __TIZEN_MEDIAVISION_BARCODE_UTILS_H__
+#ifndef __MEDIA_VISION_BARCODE_UTILS_H__
+#define __MEDIA_VISION_BARCODE_UTILS_H__
#include "mv_common.h"
namespace zbar {
-class Image;
+ class Image;
}
namespace MediaVision {
@@ -40,4 +40,4 @@ int convertSourceMV2Zbar(mv_source_h mvSource, zbar::Image& zbarSource);
} /* Barcode */
} /* MediaVision */
-#endif /* __TIZEN_MEDIAVISION_BARCODE_UTILS_H__ */
+#endif /* __MEDIA_VISION_BARCODE_UTILS_H__ */
diff --git a/mv_barcode/barcode_detector/include/mv_barcode_detect_open.h b/mv_barcode/barcode_detector/include/mv_barcode_detect_open.h
index 9f90e35f..8631b36e 100644
--- a/mv_barcode/barcode_detector/include/mv_barcode_detect_open.h
+++ b/mv_barcode/barcode_detector/include/mv_barcode_detect_open.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __TIZEN_MEDIAVISION_BARCODE_DETECT_OPEN_H__
-#define __TIZEN_MEDIAVISION_BARCODE_DETECT_OPEN_H__
+#ifndef __MEDIA_VISION_BARCODE_DETECT_OPEN_H__
+#define __MEDIA_VISION_BARCODE_DETECT_OPEN_H__
#include "mv_barcode_detect.h"
@@ -63,4 +63,4 @@ int mv_barcode_detect_open(
}
#endif /* __cplusplus */
-#endif /* __TIZEN_MEDIAVISION_BARCODE_DETECT_OPEN_H__ */
+#endif /* __MEDIA_VISION_BARCODE_DETECT_OPEN_H__ */
diff --git a/mv_barcode/barcode_detector/src/Barcode.cpp b/mv_barcode/barcode_detector/src/Barcode.cpp
index 33616102..8523722d 100644
--- a/mv_barcode/barcode_detector/src/Barcode.cpp
+++ b/mv_barcode/barcode_detector/src/Barcode.cpp
@@ -22,44 +22,52 @@ namespace MediaVision {
namespace Barcode {
Barcode::Barcode(const zbar::Symbol& barObj):
- m_pBarcodeObj(new zbar::Symbol(barObj))
+ __pBarcodeObj(new zbar::Symbol(barObj))
{
- ; /* NULL */
+ ; /* NULL */
}
Barcode::~Barcode()
{
LOGI("Delete ZBar object");
- delete m_pBarcodeObj;
+ delete __pBarcodeObj;
}
std::string Barcode::getMessage(void) const
{
LOGI("Retrieve message data from ZBar object");
- return m_pBarcodeObj->get_data();
+ return __pBarcodeObj->get_data();
}
mv_barcode_type_e Barcode::getType(void) const
{
- zbar::zbar_symbol_type_t barcodeType = m_pBarcodeObj->get_type();
+ zbar::zbar_symbol_type_t barcodeType = __pBarcodeObj->get_type();
switch (barcodeType) {
case zbar::ZBAR_QRCODE:
return MV_BARCODE_QR;
+
case zbar::ZBAR_UPCA:
return MV_BARCODE_UPC_A;
+
case zbar::ZBAR_UPCE:
return MV_BARCODE_UPC_E;
+
case zbar::ZBAR_EAN8:
return MV_BARCODE_EAN_8;
+
case zbar::ZBAR_EAN13:
return MV_BARCODE_EAN_13;
+
case zbar::ZBAR_CODE128:
return MV_BARCODE_CODE128;
+
case zbar::ZBAR_CODE39:
return MV_BARCODE_CODE39;
+
case zbar::ZBAR_I25:
return MV_BARCODE_I2_5;
+
default:
LOGE("ZBar symbol colorspace is not supported by media vision");
return MV_BARCODE_UNDEFINED;
@@ -70,25 +78,19 @@ int Barcode::calculateLocation(mv_quadrangle_s& location) const
{
const int numberOfVertexes = 4;
- const int locationPolygonSize = m_pBarcodeObj->get_location_size();
+ const int locationPolygonSize = __pBarcodeObj->get_location_size();
- /*polygon location should contain at least 4 points */
+ /* polygon location should contain at least 4 points */
if (locationPolygonSize < numberOfVertexes) {
- LOGW("Can't compute location of the barcode by %i points (less then %i).", locationPolygonSize, numberOfVertexes);
+ LOGW("Can't compute location of the barcode by %i"
+ " points (less then %i).", locationPolygonSize,
+ numberOfVertexes);
return MEDIA_VISION_ERROR_INVALID_OPERATION;
}
- if (locationPolygonSize == numberOfVertexes) {
- for (int i = 0; i < numberOfVertexes; ++i) {
- location.points[i].x = m_pBarcodeObj->get_location_x(i);
- location.points[i].y = m_pBarcodeObj->get_location_y(i);
- }
-
- return MEDIA_VISION_ERROR_NONE;
- }
-
/* bounding quadrangle is computing by 4 marginal points */
- mv_point_s first = {m_pBarcodeObj->get_location_x(0), m_pBarcodeObj->get_location_y(0)};
+ mv_point_s first = {__pBarcodeObj->get_location_x(0),
+ __pBarcodeObj->get_location_y(0)};
int minX = first.x;
int maxX = first.x;
@@ -96,7 +98,8 @@ int Barcode::calculateLocation(mv_quadrangle_s& location) const
int maxY = first.y;
for (int i = 0; i < locationPolygonSize; ++i) {
- mv_point_s current = {m_pBarcodeObj->get_location_x(i), m_pBarcodeObj->get_location_y(i)};
+ mv_point_s current = {__pBarcodeObj->get_location_x(i),
+ __pBarcodeObj->get_location_y(i)};
if (current.x < minX) {
minX = current.x;
} else if (current.x > maxX) {
@@ -110,6 +113,10 @@ int Barcode::calculateLocation(mv_quadrangle_s& location) const
}
}
+ /* magic number("5") mean minimal size of detected area which will be returned. */
+ if (abs(minX - maxX) < 5 || abs(minY - maxY) < 5)
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+
mv_point_s bottomLeft = {minX, maxY};
mv_point_s bottomRight = {maxX, maxY};
mv_point_s topRight = {maxX, minY};
diff --git a/mv_barcode/barcode_detector/src/BarcodeUtils.cpp b/mv_barcode/barcode_detector/src/BarcodeUtils.cpp
index a9da9230..b8c70d97 100644
--- a/mv_barcode/barcode_detector/src/BarcodeUtils.cpp
+++ b/mv_barcode/barcode_detector/src/BarcodeUtils.cpp
@@ -23,6 +23,7 @@
namespace MediaVision {
namespace Barcode {
+
int convertSourceMV2Zbar(mv_source_h mvSource, zbar::Image& zbarSource)
{
int err = MEDIA_VISION_ERROR_NONE;
@@ -34,7 +35,8 @@ int convertSourceMV2Zbar(mv_source_h mvSource, zbar::Image& zbarSource)
err = mv_source_get_colorspace_c(mvSource, &colorspace);
if (err != MEDIA_VISION_ERROR_NONE) {
- LOGW("Can't determine mv_source_h colorspace to convert to ZBar colorspace. Conversion failed");
+ LOGW("Can't determine mv_source_h colorspace to convert"
+ " to ZBar colorspace. Conversion failed");
return err;
}
diff --git a/mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp b/mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp
index d6dd4f84..0349d2cf 100644
--- a/mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp
+++ b/mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp
@@ -32,9 +32,8 @@ int mv_barcode_detect_open(
mv_barcode_detected_cb detect_cb,
void *user_data)
{
- if (!source || !detect_cb) {
+ if (!source || !detect_cb)
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
zbar::Image image;
int err = convertSourceMV2Zbar(source, image);
@@ -53,7 +52,10 @@ int mv_barcode_detect_open(
zbar::ImageScanner scanner;
int target_val;
- err = mv_engine_config_get_int_attribute(engine_cfg, "MV_BARCODE_DETECT_ATTR_TARGET", &target_val);
+ err = mv_engine_config_get_int_attribute(
+ engine_cfg,
+ "MV_BARCODE_DETECT_ATTR_TARGET",
+ &target_val);
if (err != MEDIA_VISION_ERROR_NONE) {
LOGW("mv_engine_config_get_int_attribute failed");
return err;
@@ -87,16 +89,19 @@ int mv_barcode_detect_open(
}
int numberOfBarcodes = scanner.scan(greyImage);
- LOGI("ZBar scanner has found %i barcodes on the mv_source_h", numberOfBarcodes);
+ LOGI("ZBar scanner has found %i barcodes on the mv_source_h",
+ numberOfBarcodes);
mv_quadrangle_s *barcodeLocations = NULL;
mv_barcode_type_e *types = NULL;
if (numberOfBarcodes == 0) {
LOGI("Call the detect callback for 0 detected barcodes");
- detect_cb(source, engine_cfg, barcodeLocations, NULL, types, numberOfBarcodes, user_data);
+ detect_cb(source, engine_cfg, barcodeLocations, NULL,
+ types, numberOfBarcodes, user_data);
return MEDIA_VISION_ERROR_NONE;
} else if (numberOfBarcodes < 0) {
- LOGW("Incorrect number of barcodes (%i), detection is terminated", numberOfBarcodes);
+ LOGW("Incorrect number of barcodes (%i), detection is terminated",
+ numberOfBarcodes);
return MEDIA_VISION_ERROR_INTERNAL;
}
@@ -122,9 +127,8 @@ int mv_barcode_detect_open(
int err = curBarcode.calculateLocation(barcodeLocations[i]);
if (err != MEDIA_VISION_ERROR_NONE) {
LOGW("Can't determine location for barcode, detection is terminated");
- for (int j = 0; j <= i; ++j) {
+ for (int j = 0; j <= i; ++j)
delete[] messagesArray[j];
- }
delete[] messagesArray;
delete[] barcodeLocations;
delete[] types;
@@ -133,12 +137,12 @@ int mv_barcode_detect_open(
}
LOGI("Call the detect callback for %i detected barcodes", numberOfBarcodes);
- detect_cb(source, engine_cfg, barcodeLocations, messagesArray, types, numberOfBarcodes, user_data);
+ detect_cb(source, engine_cfg, barcodeLocations, messagesArray, types,
+ numberOfBarcodes, user_data);
LOGI("Clean the memory from barcodes messages, locations and types");
- for (int j = 0; j < numberOfBarcodes; ++j) {
+ for (int j = 0; j < numberOfBarcodes; ++j)
delete[] messagesArray[j];
- }
delete[] messagesArray;
delete[] barcodeLocations;
delete[] types;
diff --git a/mv_barcode/barcode_detector_lic/include/mv_barcode_detect_lic.h b/mv_barcode/barcode_detector_lic/include/mv_barcode_detect_lic.h
index 9346f865..37a5b8c7 100644
--- a/mv_barcode/barcode_detector_lic/include/mv_barcode_detect_lic.h
+++ b/mv_barcode/barcode_detector_lic/include/mv_barcode_detect_lic.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __TIZEN_MEDIAVISION_BARCODE_DETECT_LIC_H__
-#define __TIZEN_MEDIAVISION_BARCODE_DETECT_LIC_H__
+#ifndef __MEDIA_VISION_BARCODE_DETECT_LIC_H__
+#define __MEDIA_VISION_BARCODE_DETECT_LIC_H__
#include "mv_barcode_detect.h"
@@ -60,4 +60,4 @@ int mv_barcode_detect_lic(
}
#endif /* __cplusplus */
-#endif /* __TIZEN_MEDIAVISION_BARCODE_DETECT_LIC_H__ */
+#endif /* __MEDIA_VISION_BARCODE_DETECT_LIC_H__ */
diff --git a/mv_barcode/barcode_generator/include/BarcodeGenerator.h b/mv_barcode/barcode_generator/include/BarcodeGenerator.h
index 1625dd66..288e169e 100644
--- a/mv_barcode/barcode_generator/include/BarcodeGenerator.h
+++ b/mv_barcode/barcode_generator/include/BarcodeGenerator.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __BARCODEGENERATOR_H__
-#define __BARCODEGENERATOR_H__
+#ifndef __MEDIA_VISION_BARCODE_GENERATOR_H__
+#define __MEDIA_VISION_BARCODE_GENERATOR_H__
#include "BarcodeOptions.h"
@@ -106,5 +106,5 @@ public:
} /* Barcode */
} /* MediaVision */
-#endif /* __BARCODEGENERATOR_H__ */
+#endif /* __MEDIA_VISION_BARCODE_GENERATOR_H__ */
diff --git a/mv_barcode/barcode_generator/include/BarcodeOptions.h b/mv_barcode/barcode_generator/include/BarcodeOptions.h
index 0eefd4a7..bebfe46c 100644
--- a/mv_barcode/barcode_generator/include/BarcodeOptions.h
+++ b/mv_barcode/barcode_generator/include/BarcodeOptions.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __BARCODEOPTIONS_H__
-#define __BARCODEOPTIONS_H__
+#ifndef __MEDIA_VISION_BARCODE_OPTIONS_H__
+#define __MEDIA_VISION_BARCODE_OPTIONS_H__
/**
* @file BarcodeOptions.h
@@ -111,5 +111,5 @@ enum BarcodeError {
} /* Barcode */
} /* MediaVision */
-#endif /* __BARCODEOPTIONS_H__ */
+#endif /* __MEDIA_VISION_BARCODE_OPTIONS_H__ */
diff --git a/mv_barcode/barcode_generator/include/mv_barcode_generate_open.h b/mv_barcode/barcode_generator/include/mv_barcode_generate_open.h
index 2664ad30..612a6814 100644
--- a/mv_barcode/barcode_generator/include/mv_barcode_generate_open.h
+++ b/mv_barcode/barcode_generator/include/mv_barcode_generate_open.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __TIZEN_MEDIAVISION_BARCODE_GENERATE_OPEN_H__
-#define __TIZEN_MEDIAVISION_BARCODE_GENERATE_OPEN_H__
+#ifndef __MEDIA_VISION_BARCODE_GENERATE_OPEN_H__
+#define __MEDIA_VISION_BARCODE_GENERATE_OPEN_H__
#include "mv_barcode_generate.h"
@@ -106,4 +106,4 @@ int mv_barcode_generate_image_open(
}
#endif /* __cplusplus */
-#endif /* __TIZEN_MEDIAVISION_BARCODE_GENERATE_OPEN_H__ */
+#endif /* __MEDIA_VISION_BARCODE_GENERATE_OPEN_H__ */
diff --git a/mv_barcode/barcode_generator/src/BarcodeGenerator.cpp b/mv_barcode/barcode_generator/src/BarcodeGenerator.cpp
index c4117d1e..d3299460 100644
--- a/mv_barcode/barcode_generator/src/BarcodeGenerator.cpp
+++ b/mv_barcode/barcode_generator/src/BarcodeGenerator.cpp
@@ -32,12 +32,13 @@ namespace MediaVision {
namespace Barcode {
namespace {
+
int getFormatEncodingInfo(
BarcodeImageFormat imageFormat,
std::vector<std::string>& extensions,
std::vector<int>& compressionParams)
{
- static const int PNGCompressionLevel = 3;
+ static const int PNG_COMPRESSION_LEVEL = 3;
compressionParams.clear();
extensions.clear();
@@ -45,7 +46,7 @@ int getFormatEncodingInfo(
switch (imageFormat) {
case BARCODE_IMAGE_PNG:
compressionParams.push_back(CV_IMWRITE_PNG_COMPRESSION);
- compressionParams.push_back(PNGCompressionLevel);
+ compressionParams.push_back(PNG_COMPRESSION_LEVEL);
extensions.push_back(".png");
break;
case BARCODE_IMAGE_JPG:
@@ -179,9 +180,8 @@ int writeBufferToImageFile(
}
}
- if (!rightExtensionFlag) {
+ if (!rightExtensionFlag)
resultFilePath += expectedExtensions[0];
- }
cv::Mat image(symbol->bitmap_height, symbol->bitmap_width, CV_8UC3, symbol->bitmap);
cv::resize(image, image, cv::Size(imageWidth, imageHeight), 0, 0, cv::INTER_AREA);
@@ -191,7 +191,7 @@ int writeBufferToImageFile(
if (BARCODE_ERROR_NONE != error) {
LOGE("Write barcode image to file %s operation failed.",
- resultFilePath.c_str());
+ resultFilePath.c_str());
return error;
}
@@ -244,11 +244,11 @@ int BarcodeGenerator::generateBarcodeToImage(
imageFormat,
imageWidth,
imageHeight);
- if (error != BARCODE_ERROR_NONE) {
+ if (error != BARCODE_ERROR_NONE)
LOGE("Barcode [%s] file write fail, clean memory", imageFileName.c_str());
- } else {
- LOGI("Barcode image [%s] is successfully generated, clean memory", imageFileName.c_str());
- }
+ else
+ LOGI("Barcode image [%s] is successfully generated, clean memory",
+ imageFileName.c_str());
ZBarcode_Delete(symbol);
@@ -298,7 +298,8 @@ int BarcodeGenerator::generateBarcodeToBuffer(
*imageWidth = symbol->bitmap_width;
*imageHeight = symbol->bitmap_height;
*imageChannels = 3;
- const unsigned int imageBufferSize = (*imageWidth) * (*imageHeight) * (*imageChannels);
+ const unsigned int imageBufferSize = (*imageWidth) * (*imageHeight) *
+ (*imageChannels);
*imageBuffer = new unsigned char[imageBufferSize];
memmove(*imageBuffer, symbol->bitmap, imageBufferSize);
diff --git a/mv_barcode/barcode_generator/src/mv_barcode_generate_open.cpp b/mv_barcode/barcode_generator/src/mv_barcode_generate_open.cpp
index 493f68e9..0663866b 100644
--- a/mv_barcode/barcode_generator/src/mv_barcode_generate_open.cpp
+++ b/mv_barcode/barcode_generator/src/mv_barcode_generate_open.cpp
@@ -27,13 +27,15 @@
using namespace MediaVision::Barcode;
namespace {
+
int alphanumToUpper(std::string& strToTransform)
{
std::string tempString = strToTransform;
std::transform(tempString.begin(), tempString.end(),
- tempString.begin(), ::toupper);
+ tempString.begin(), ::toupper);
- if (std::string::npos != tempString.find_first_not_of("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:")) {
+ if (std::string::npos != tempString.find_first_not_of(
+ "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:")) {
LOGE("Barcode message can't be converted according to support "
"alphanumeric (0..9, A..Z, space, $, %, *, +, -, ., /, :) "
"mode: %s", strToTransform.c_str());
@@ -122,7 +124,7 @@ BarcodeQRErrorCorrectionLevel convertECC(mv_barcode_qr_ecc_e ecc)
}
LOGI("Media vision ECC level has been converted to "
- "ZInt ECC level (%i -> %i)", ecc, ecclevel);
+ "ZInt ECC level (%i -> %i)", ecc, ecclevel);
return ecclevel;
}
@@ -162,7 +164,7 @@ int convertBarcodeError(int barcodeError)
}
LOGI("ZInt error code has been converted to the media vision error code "
- "(%i -> (0x%08x))", barcodeError, mvError);
+ "(%i -> (0x%08x))", barcodeError, mvError);
return mvError;
}
@@ -182,7 +184,7 @@ BarcodeImageFormat convertImageFormat(mv_barcode_image_format_e format)
}
LOGI("Media vision image format has been converted to "
- "internal image format (%i -> %i)", format, imageFormat);
+ "internal image format (%i -> %i)", format, imageFormat);
return imageFormat;
}
@@ -210,9 +212,8 @@ int mv_barcode_generate_source_open(
if (MV_BARCODE_QR == type &&
MV_BARCODE_QR_MODE_ALPHANUMERIC == qr_enc_mode) {
error = alphanumToUpper(messageStr);
- if (BARCODE_ERROR_NONE != error) {
+ if (BARCODE_ERROR_NONE != error)
return convertBarcodeError(error);
- }
}
unsigned char *imageBuffer = NULL;
@@ -363,7 +364,7 @@ int mv_barcode_generate_image_open(
if (qr_enc_mode == MV_BARCODE_QR_MODE_NUMERIC &&
messageStr.find_first_not_of("0123456789") != std::string::npos) {
LOGE("Barcode message can't be used according to support "
- "numeric (0..9) mode: %s", messageStr.c_str());
+ "numeric (0..9) mode: %s", messageStr.c_str());
return MEDIA_VISION_ERROR_INVALID_DATA;
}
diff --git a/mv_barcode/barcode_generator_lic/include/mv_barcode_generate_lic.h b/mv_barcode/barcode_generator_lic/include/mv_barcode_generate_lic.h
index 76fec39f..3ca79747 100644
--- a/mv_barcode/barcode_generator_lic/include/mv_barcode_generate_lic.h
+++ b/mv_barcode/barcode_generator_lic/include/mv_barcode_generate_lic.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __TIZEN_MEDIAVISION_BARCODE_GENERATE_LIC_H__
-#define __TIZEN_MEDIAVISION_BARCODE_GENERATE_LIC_H__
+#ifndef __MEDIA_VISION_BARCODE_GENERATE_LIC_H__
+#define __MEDIA_VISION_BARCODE_GENERATE_LIC_H__
#include "mv_barcode_generate.h"
@@ -104,4 +104,4 @@ int mv_barcode_generate_image_lic(
}
#endif /* __cplusplus */
-#endif /* __TIZEN_MEDIAVISION_BARCODE_GENERATE_LIC_H__ */
+#endif /* __MEDIA_VISION_BARCODE_GENERATE_LIC_H__ */
diff --git a/mv_common/CMakeLists.txt b/mv_common/CMakeLists.txt
index 05e8310a..f1df1628 100644
--- a/mv_common/CMakeLists.txt
+++ b/mv_common/CMakeLists.txt
@@ -16,7 +16,7 @@ include_directories("${PROJECT_SOURCE_DIR}/include")
file(GLOB MV_COMMON_INCLUDE_LIST "${PROJECT_SOURCE_DIR}/include/*.h")
file(GLOB MV_COMMON_SRC_LIST "${PROJECT_SOURCE_DIR}/src/*.cpp")
-find_package(OpenCV REQUIRED core highgui imgproc)
+find_package(OpenCV REQUIRED core imgproc)
if(NOT OpenCV_FOUND)
message(SEND_ERROR "Failed to find OpenCV")
diff --git a/mv_common/include/EngineConfig.h b/mv_common/include/EngineConfig.h
index 7083910c..5bc586e2 100644
--- a/mv_common/include/EngineConfig.h
+++ b/mv_common/include/EngineConfig.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __ENGINECONFIG_H__
-#define __ENGINECONFIG_H__
+#ifndef __MEDIA_VISION_ENGINE_CONFIG_H__
+#define __MEDIA_VISION_ENGINE_CONFIG_H__
#include <string>
#include <map>
@@ -121,7 +121,7 @@ public:
*/
int getIntegerAttribute(const std::string& key, int *value) const;
-/**
+ /**
* @brief Gets boolean attribute value by attribute name.
*
* @since_tizen @if MOBILE 2.4 @else 3.0 @endif
@@ -176,4 +176,4 @@ private:
} /* Common */
} /* MediaVision */
-#endif /* __ENGINECONFIG_H__ */
+#endif /* __MEDIA_VISION_ENGINE_CONFIG_H__ */
diff --git a/mv_common/include/MediaSource.h b/mv_common/include/MediaSource.h
index 5e3ce581..37e873b3 100644
--- a/mv_common/include/MediaSource.h
+++ b/mv_common/include/MediaSource.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __MEDIASOURCE_H__
-#define __MEDIASOURCE_H__
+#ifndef __MEDIA_VISION_MEDIA_SOURCE_H__
+#define __MEDIA_VISION_MEDIA_SOURCE_H__
// Need for a colorspace
#include <mv_common.h>
@@ -81,18 +81,18 @@ public:
* @see MediaSource::MediaSource()
* @see MediaSource::clear()
*/
- bool fill(const unsigned char *buffer, unsigned int bufferSize, unsigned int
- width, unsigned int height, mv_colorspace_e colorspace);
+ bool fill(const unsigned char *buffer, unsigned int bufferSize,
+ unsigned int width, unsigned int height, mv_colorspace_e colorspace);
/**
* @brief Gets data buffer of the MediaSource.
*
* @since_tizen @if MOBILE 2.4 @else 3.0 @endif
- * @return Pointer to the data buffer.
+ * @return Pointer to the data buffer.
*/
unsigned char *getBuffer(void) const;
-/**
+ /**
* @brief Gets buffer size of the MediaSource.
*
* @since_tizen @if MOBILE 2.4 @else 3.0 @endif
@@ -139,4 +139,4 @@ private:
} /* Common */
} /* MediaVision */
-#endif /* __MEDIASOURCE_H__ */
+#endif /* __MEDIA_VISION_MEDIA_SOURCE_H__ */
diff --git a/mv_common/include/mv_common_c.h b/mv_common/include/mv_common_c.h
index a46a903b..22a8a74c 100644
--- a/mv_common/include/mv_common_c.h
+++ b/mv_common/include/mv_common_c.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __TIZEN_MEDIAVISION_COMMON_C_H__
-#define __TIZEN_MEDIAVISION_COMMON_C_H__
+#ifndef __MEDIA_VISION_COMMON_C_H__
+#define __MEDIA_VISION_COMMON_C_H__
#include "mv_common.h"
@@ -489,4 +489,4 @@ int mv_engine_config_foreach_supported_attribute_c(
}
#endif /* __cplusplus */
-#endif /* __TIZEN_MEDIAVISION_COMMON_C_H__ */
+#endif /* __MEDIA_VISION_COMMON_C_H__ */
diff --git a/mv_common/src/EngineConfig.cpp b/mv_common/src/EngineConfig.cpp
index 7e92ac88..f69db65b 100644
--- a/mv_common/src/EngineConfig.cpp
+++ b/mv_common/src/EngineConfig.cpp
@@ -44,10 +44,10 @@ EngineConfig::EngineConfig()
LOGE("Default Engine config file location is %s", DefConfigFilePath.c_str());
- // Force load default attributes from configuration file
+ /* Force load default attributes from configuration file */
cacheDictionaries(false);
- // Insert default attribute values into creating engine configuration
+ /* Insert default attribute values into creating engine configuration */
m_dblDict.insert(getDefaultDblDict().begin(), getDefaultDblDict().end());
m_intDict.insert(getDefaultIntDict().begin(), getDefaultIntDict().end());
m_boolDict.insert(getDefaultBoolDict().begin(), getDefaultBoolDict().end());
@@ -265,7 +265,7 @@ int EngineConfig::cacheDictionaries(bool isLazyCache, std::string configFilePath
if (!json_object_has_member(jobj, "attributes")) {
LOGW("Can't parse tests configuration file. "
- "No 'attributes' section.");
+ "No 'attributes' section.");
g_object_unref(parser);
return MEDIA_VISION_ERROR_NO_DATA;
}
@@ -275,7 +275,7 @@ int EngineConfig::cacheDictionaries(bool isLazyCache, std::string configFilePath
if (JSON_NODE_ARRAY != json_node_get_node_type(attr_node)) {
LOGW("Can't parse tests configuration file. "
- "'attributes' section isn't array.");
+ "'attributes' section isn't array.");
g_object_unref(parser);
return MEDIA_VISION_ERROR_NO_DATA;
}
@@ -296,8 +296,9 @@ int EngineConfig::cacheDictionaries(bool isLazyCache, std::string configFilePath
JsonObject *attr_obj = json_node_get_object(attr_node);
if (!json_object_has_member(attr_obj, "name") ||
- !json_object_has_member(attr_obj, "type") ||
- !json_object_has_member(attr_obj, "value")) {
+ !json_object_has_member(attr_obj, "type") ||
+ !json_object_has_member(attr_obj, "value")) {
+
LOGW("Attribute %u wasn't parsed from json file.", attrInd);
continue;
}
@@ -309,7 +310,7 @@ int EngineConfig::cacheDictionaries(bool isLazyCache, std::string configFilePath
if (NULL == nameStr || NULL == typeStr) {
LOGW("Attribute %i wasn't parsed from json file. name and/or "
- "type of the attribute are parsed as NULL.", attrInd);
+ "type of the attribute are parsed as NULL.", attrInd);
continue;
} else if (0 == strcmp("double", typeStr)) {
DefDblDict[std::string(nameStr)] =
@@ -325,7 +326,7 @@ int EngineConfig::cacheDictionaries(bool isLazyCache, std::string configFilePath
(char*)json_object_get_string_member(attr_obj, "value");
} else {
LOGW("Attribute %i:%s wasn't parsed from json file. "
- "Type isn't supported.", attrInd, nameStr);
+ "Type isn't supported.", attrInd, nameStr);
continue;
}
}
@@ -337,5 +338,5 @@ int EngineConfig::cacheDictionaries(bool isLazyCache, std::string configFilePath
return MEDIA_VISION_ERROR_NONE;
}
-} /* namespace Common */
-} /* namespace MediaVision */
+} /* Common */
+} /* MediaVision */
diff --git a/mv_common/src/MediaSource.cpp b/mv_common/src/MediaSource.cpp
index bca35c58..e213c114 100644
--- a/mv_common/src/MediaSource.cpp
+++ b/mv_common/src/MediaSource.cpp
@@ -19,9 +19,11 @@
#include <mv_private.h>
#include <cstring>
+#include <new>
namespace MediaVision {
namespace Common {
+
MediaSource::MediaSource() :
m_pBuffer(NULL),
m_bufferSize(0),
@@ -43,8 +45,8 @@ void MediaSource::clear(void)
delete[] m_pBuffer;
}
LOGD("Set defaults for media source %p : buffer = NULL; "
- "bufferSize = 0; width = 0; height = 0; "
- "colorspace = MEDIA_VISION_COLORSPACE_INVALID", this);
+ "bufferSize = 0; width = 0; height = 0; "
+ "colorspace = MEDIA_VISION_COLORSPACE_INVALID", this);
m_pBuffer = NULL;
m_bufferSize = 0;
m_width = 0;
@@ -53,39 +55,36 @@ void MediaSource::clear(void)
}
bool MediaSource::fill(const unsigned char *buffer, unsigned int bufferSize,
- unsigned int width, unsigned int height, mv_colorspace_e colorspace)
+ unsigned int width, unsigned int height, mv_colorspace_e colorspace)
{
- if (bufferSize == 0 || buffer == NULL) {
+ if (bufferSize == 0 || buffer == NULL)
return false;
- }
LOGD("Call clear() first for media source %p", this);
clear();
- try {
- LOGD("Allocate memory for buffer in media source %p", this);
- m_pBuffer = new unsigned char[bufferSize];
- } catch(...) {
+ LOGD("Allocate memory for buffer in media source %p", this);
+ m_pBuffer = new (std::nothrow)unsigned char[bufferSize];
+ if (m_pBuffer == NULL) {
LOGE("Memory allocating for buffer in media source %p failed!", this);
- m_pBuffer = NULL;
return false;
}
LOGD("Copy data from external buffer (%p) to the internal buffer (%p) of "
- "media source %p", buffer, m_pBuffer, this);
+ "media source %p", buffer, m_pBuffer, this);
std::memcpy(m_pBuffer, buffer, bufferSize);
LOGD("Assign new size of the internal buffer of media source %p. "
- "New size is %ui.", this, bufferSize);
+ "New size is %ui.", this, bufferSize);
m_bufferSize = bufferSize;
LOGD("Assign new size (%ui x %ui) of the internal buffer image for "
- "the media source %p", width, height, this);
+ "the media source %p", width, height, this);
m_width = width;
m_height = height;
LOGD("Assign new colorspace (%i) of the internal buffer image for "
- "the media source %p", colorspace, this);
+ "the media source %p", colorspace, this);
m_colorspace = colorspace;
return true;
diff --git a/mv_common/src/mv_common_c.cpp b/mv_common/src/mv_common_c.cpp
index 51e0dbb5..53d16108 100644
--- a/mv_common/src/mv_common_c.cpp
+++ b/mv_common/src/mv_common_c.cpp
@@ -21,19 +21,20 @@
#include <mv_private.h>
+#include <new>
#include <string.h>
#include <stdlib.h>
#include <media_packet.h>
int mv_create_source_c(
- mv_source_h *source_ptr)
+ mv_source_h *source_ptr)
{
- if (source_ptr == NULL) {
+ if (source_ptr == NULL)
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
LOGD("Creating media vision source");
- (*source_ptr) = ((mv_source_h)new MediaVision::Common::MediaSource());
+ (*source_ptr) = (static_cast<mv_source_h>
+ (new (std::nothrow)MediaVision::Common::MediaSource()));
if (*source_ptr == NULL) {
LOGE("Failed to create media vision source");
@@ -45,7 +46,7 @@ int mv_create_source_c(
}
int mv_destroy_source_c(
- mv_source_h source)
+ mv_source_h source)
{
if (!source) {
LOGE("Media source can't be destroyed because handle is NULL");
@@ -53,15 +54,15 @@ int mv_destroy_source_c(
}
LOGD("Destroying media vision source [%p]", source);
- delete ((MediaVision::Common::MediaSource*)source);
+ delete (static_cast<MediaVision::Common::MediaSource*>(source));
LOGD("Media vision source has been destroyed");
return MEDIA_VISION_ERROR_NONE;
}
int mv_source_fill_by_media_packet_c(
- mv_source_h source,
- media_packet_h media_packet)
+ mv_source_h source,
+ media_packet_h media_packet)
{
if (!source || !media_packet) {
LOGE("Media source can't be filled by media_packet handle because "
@@ -141,14 +142,14 @@ int mv_source_fill_by_media_packet_c(
break;
default:
LOGE("Format of the media packet buffer is not supported by media "
- "vision source (media_format_h mimetype=%i)", mimetype);
+ "vision source (media_format_h mimetype=%i)", mimetype);
return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
}
ret = media_packet_get_buffer_data_ptr(media_packet, (void**)&data_buffer);
if (ret != MEDIA_PACKET_ERROR_NONE) {
LOGE("media_packet_get_buffer_data_ptr() failed, mv_source_h fill skipped");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
ret = media_packet_get_buffer_size(media_packet, &buffer_size);
@@ -157,8 +158,9 @@ int mv_source_fill_by_media_packet_c(
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
- if (!((MediaVision::Common::MediaSource*)source)->fill(data_buffer, buffer_size,
- (unsigned int)image_width, (unsigned int)image_height, image_colorspace)) {
+ if (!(static_cast<MediaVision::Common::MediaSource*>(source))->fill(data_buffer,
+ buffer_size, static_cast<unsigned int>(image_width),
+ static_cast<unsigned int>(image_height), image_colorspace)) {
LOGE("mv_source_h filling from media_packet_h failed");
return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
}
@@ -183,8 +185,8 @@ int mv_source_fill_by_buffer_c(
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
- if (!((MediaVision::Common::MediaSource*)source)->fill(data_buffer,
- buffer_size, image_width, image_height, image_colorspace)) {
+ if (!(static_cast<MediaVision::Common::MediaSource*>(source))->fill(data_buffer,
+ buffer_size, image_width, image_height, image_colorspace)) {
LOGE("mv_source_h filling from buffer failed");
return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
}
@@ -194,7 +196,7 @@ int mv_source_fill_by_buffer_c(
}
int mv_source_clear_c(
- mv_source_h source)
+ mv_source_h source)
{
if (!source) {
LOGE("Media source can't be cleared because source handle is NULL");
@@ -202,16 +204,16 @@ int mv_source_clear_c(
}
LOGD("Clear media vision source [%p]", source);
- ((MediaVision::Common::MediaSource*)source)->clear();
+ (static_cast<MediaVision::Common::MediaSource*>(source))->clear();
LOGD("Media vision source [%p] has been cleared", source);
return MEDIA_VISION_ERROR_NONE;
}
int mv_source_get_buffer_c(
- mv_source_h source,
- unsigned char **buffer,
- unsigned int *size)
+ mv_source_h source,
+ unsigned char **buffer,
+ unsigned int *size)
{
if (!source) {
LOGE("Impossible to get buffer for NULL mv_source_h handle");
@@ -219,16 +221,16 @@ int mv_source_get_buffer_c(
}
LOGD("Get media vision source [%p] buffer and buffer size to be returned", source);
- *buffer = ((MediaVision::Common::MediaSource*)source)->getBuffer();
- *size = ((MediaVision::Common::MediaSource*)source)->getBufferSize();
+ *buffer = (static_cast<MediaVision::Common::MediaSource*>(source))->getBuffer();
+ *size = (static_cast<MediaVision::Common::MediaSource*>(source))->getBufferSize();
LOGD("Media vision source [%p] buffer (%p) and buffer size (%ui) has been returned", source, buffer, *size);
return MEDIA_VISION_ERROR_NONE;
}
int mv_source_get_height_c(
- mv_source_h source,
- unsigned int *height)
+ mv_source_h source,
+ unsigned int *height)
{
if (!source) {
LOGE("Impossible to get height for NULL mv_source_h handle");
@@ -236,15 +238,15 @@ int mv_source_get_height_c(
}
LOGD("Get media vision source [%p] height to be returned", source);
- *height = ((MediaVision::Common::MediaSource*)source)->getHeight();
+ *height = (static_cast<MediaVision::Common::MediaSource*>(source))->getHeight();
LOGD("Media vision source [%p] height (%ui) has been returned", source, *height);
return MEDIA_VISION_ERROR_NONE;
}
int mv_source_get_width_c(
- mv_source_h source,
- unsigned int *width)
+ mv_source_h source,
+ unsigned int *width)
{
if (!source) {
LOGE("Impossible to get width for NULL mv_source_h handle");
@@ -252,30 +254,30 @@ int mv_source_get_width_c(
}
LOGD("Get media vision source [%p] width to be returned", source);
- *width = ((MediaVision::Common::MediaSource*)source)->getWidth();
+ *width = (static_cast<MediaVision::Common::MediaSource*>(source))->getWidth();
LOGD("Media vision source [%p] width (%ui) has been returned", source, *width);
return MEDIA_VISION_ERROR_NONE;
}
int mv_source_get_colorspace_c(
- mv_source_h source,
- mv_colorspace_e *colorspace)
+ mv_source_h source,
+ mv_colorspace_e *colorspace)
{
if (!source) {
LOGE("Impossible to get colorspace for NULL mv_source_h handle");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
- LOGD("Get media vision source [%p] colorspace to be returned", source);
- *colorspace = ((MediaVision::Common::MediaSource*)source)->getColorspace();
- LOGD("Media vision source [%p] colorspace (%i) has been returned", source, *colorspace);
+ LOGD("Get media vision source [%p] colorspace to be returned", source);
+ *colorspace = (static_cast<MediaVision::Common::MediaSource*>(source))->getColorspace();
+ LOGD("Media vision source [%p] colorspace (%i) has been returned", source, *colorspace);
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_create_engine_config_c(
- mv_engine_config_h *engine_cfg)
+ mv_engine_config_h *engine_cfg)
{
if (engine_cfg == NULL) {
LOGE("Impossible to create mv_engine_config_h handle");
@@ -283,13 +285,13 @@ int mv_create_engine_config_c(
}
LOGD("Creating media vision engine config");
- (*engine_cfg) = ((mv_engine_config_h)new MediaVision::Common::EngineConfig());
- LOGD("Media vision engine config [%p] has been created", *engine_cfg);
-
+ (*engine_cfg) = static_cast<mv_engine_config_h>
+ (new (std::nothrow) MediaVision::Common::EngineConfig());
if (*engine_cfg == NULL) {
LOGE("Failed to create mv_engine_config_h handle");
return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
}
+ LOGD("Media vision engine config [%p] has been created", *engine_cfg);
return MEDIA_VISION_ERROR_NONE;
}
@@ -303,7 +305,7 @@ int mv_destroy_engine_config_c(
}
LOGD("Destroying media vision engine config [%p]", engine_cfg);
- delete ((MediaVision::Common::EngineConfig*)engine_cfg);
+ delete (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg));
LOGD("Media vision engine config has been destroyed");
return MEDIA_VISION_ERROR_NONE;
@@ -317,11 +319,11 @@ int mv_engine_config_set_double_attribute_c(
if (!engine_cfg || name == NULL) {
LOGE("Impossible to set attribute. One of the required parameters is "
"NULL. engine_cfg = %p; name = %p;",
- engine_cfg, name);
+ engine_cfg, name);
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
- int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->setAttribute(
+ int ret = (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg))->setAttribute(
std::string(name), value);
if (ret != MEDIA_VISION_ERROR_NONE) {
@@ -346,7 +348,7 @@ int mv_engine_config_set_int_attribute_c(
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
- int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->setAttribute(
+ int ret = (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg))->setAttribute(
std::string(name), value);
if (ret != MEDIA_VISION_ERROR_NONE) {
@@ -372,7 +374,7 @@ int mv_engine_config_set_bool_attribute_c(
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
- int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->setAttribute(
+ int ret = (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg))->setAttribute(
std::string(name), value);
if (ret != MEDIA_VISION_ERROR_NONE) {
@@ -398,7 +400,7 @@ int mv_engine_config_set_string_attribute_c(
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
- int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->setAttribute(
+ int ret = (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg))->setAttribute(
std::string(name), std::string(value));
if (ret != MEDIA_VISION_ERROR_NONE) {
@@ -423,7 +425,7 @@ int mv_engine_config_get_double_attribute_c(
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
- int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->getDoubleAttribute(
+ int ret = (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg))->getDoubleAttribute(
std::string(name), value);
if (ret != MEDIA_VISION_ERROR_NONE) {
@@ -449,7 +451,7 @@ int mv_engine_config_get_int_attribute_c(
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
- int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->getIntegerAttribute(
+ int ret = (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg))->getIntegerAttribute(
std::string(name), value);
if (ret != MEDIA_VISION_ERROR_NONE) {
@@ -475,7 +477,7 @@ int mv_engine_config_get_bool_attribute_c(
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
- int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->getBooleanAttribute(
+ int ret = (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg))->getBooleanAttribute(
std::string(name), value);
if (ret != MEDIA_VISION_ERROR_NONE) {
@@ -502,7 +504,7 @@ int mv_engine_config_get_string_attribute_c(
}
std::string attributeValue;
- int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->getStringAttribute(
+ int ret = (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg))->getStringAttribute(
std::string(name), &attributeValue);
if (ret != MEDIA_VISION_ERROR_NONE) {
@@ -513,7 +515,11 @@ int mv_engine_config_get_string_attribute_c(
LOGD("Convert string to char*");
int stringSize = attributeValue.size();
- (*value) = (char *)malloc(sizeof(char) * (stringSize + 1));
+ (*value) = (char*)malloc(sizeof(char) * (stringSize + 1));
+ if ((*value) == NULL) {
+ LOGE("Failed to convert string to char*");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
if (attributeValue.copy(*value, stringSize) != attributeValue.size()) {
LOGE("Conversion from string to char* failed");
@@ -529,8 +535,8 @@ int mv_engine_config_get_string_attribute_c(
}
int mv_engine_config_foreach_supported_attribute_c(
- mv_supported_attribute_cb callback,
- void *user_data)
+ mv_supported_attribute_cb callback,
+ void *user_data)
{
if (NULL == callback) {
LOGE("Impossible to traverse supported by Media Vision engine "
diff --git a/mv_face/face/include/FaceDetector.h b/mv_face/face/include/FaceDetector.h
index b9b28883..48d17305 100644
--- a/mv_face/face/include/FaceDetector.h
+++ b/mv_face/face/include/FaceDetector.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __FACEDETECTOR_H__
-#define __FACEDETECTOR_H__
+#ifndef __MEDIA_VISION_FACE_DETECTOR_H__
+#define __MEDIA_VISION_FACE_DETECTOR_H__
#include <opencv/cv.h>
#include <vector>
@@ -103,4 +103,4 @@ private:
} /* Face */
} /* MediaVision */
-#endif /* __FACEDETECTOR_H__ */
+#endif /* __MEDIA_VISION_FACE_DETECTOR_H__ */
diff --git a/mv_face/face/include/FaceExpressionRecognizer.h b/mv_face/face/include/FaceExpressionRecognizer.h
index 284e7d91..b8948338 100644
--- a/mv_face/face/include/FaceExpressionRecognizer.h
+++ b/mv_face/face/include/FaceExpressionRecognizer.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __FACEEXPRESSIONRECOGNIZER_H__
-#define __FACEEXPRESSIONRECOGNIZER_H__
+#ifndef __MEDIA_VISION_FACE_EXPRESSION_RECOGNIZER_H__
+#define __MEDIA_VISION_FACE_EXPRESSION_RECOGNIZER_H__
#include "mv_common_c.h"
#include "mv_face_open.h"
@@ -23,7 +23,7 @@
#include <string>
namespace cv {
- class Mat;
+ class Mat;
}
/**
@@ -40,8 +40,8 @@ namespace Face {
* @since_tizen 3.0
*/
struct FaceRecognizerConfig {
- FaceRecognizerConfig();
- std::string mHaarcascadeFilepath;
+ FaceRecognizerConfig();
+ std::string mHaarcascadeFilepath;
};
/**
@@ -76,4 +76,4 @@ public:
} /* Face */
} /* MediaVision */
-#endif /* __FACEEXPRESSIONRECOGNIZER_H__ */
+#endif /* __MEDIA_VISION_FACE_EXPRESSION_RECOGNIZER_H__ */
diff --git a/mv_face/face/include/FaceEyeCondition.h b/mv_face/face/include/FaceEyeCondition.h
index 78c09927..7c1ec363 100644
--- a/mv_face/face/include/FaceEyeCondition.h
+++ b/mv_face/face/include/FaceEyeCondition.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __FACEEYECONDITION_H__
-#define __FACEEYECONDITION_H__
+#ifndef __MEDIA_VISION_FACE_EYE_CONDITION_H__
+#define __MEDIA_VISION_FACE_EYE_CONDITION_H__
#include <mv_common_c.h>
#include <mv_face.h>
@@ -67,4 +67,4 @@ private:
} /* Face */
} /* MediaVision */
-#endif /* __FACEEYECONDITION_H__ */
+#endif /* __MEDIA_VISION_FACE_EYE_CONDITION_H__ */
diff --git a/mv_face/face/include/FaceRecognitionModel.h b/mv_face/face/include/FaceRecognitionModel.h
index 15232e17..f89c8466 100644
--- a/mv_face/face/include/FaceRecognitionModel.h
+++ b/mv_face/face/include/FaceRecognitionModel.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __FACERECOGNITIONMODEL_H__
-#define __FACERECOGNITIONMODEL_H__
+#ifndef __MEDIA_VISION_FACE_RECOGNITION_MODEL_H__
+#define __MEDIA_VISION_FACE_RECOGNITION_MODEL_H__
#include "FaceUtil.h"
@@ -108,7 +108,7 @@ struct FaceRecognitionResults {
*/
class FaceRecognitionModel {
public:
-/**
+ /**
* @brief Creates a FaceRecognitionModel class instance.
*
* @since_tizen 3.0
@@ -278,4 +278,4 @@ private:
} /* Face */
} /* MediaVision */
-#endif /* __FACERECOGNITIONMODEL_H__ */
+#endif /* __MEDIA_VISION_FACE_RECOGNITION_MODEL_H__ */
diff --git a/mv_face/face/include/FaceTrackingModel.h b/mv_face/face/include/FaceTrackingModel.h
index 1fb6ccfd..8c73705b 100644
--- a/mv_face/face/include/FaceTrackingModel.h
+++ b/mv_face/face/include/FaceTrackingModel.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __FACETRACKINGMODEL_H__
-#define __FACETRACKINGMODEL_H__
+#ifndef __MEDIA_VISION_FACE_TRACKING_MODEL_H__
+#define __MEDIA_VISION_FACE_TRACKING_MODEL_H__
#include "TrackerMedianFlow.h"
@@ -165,4 +165,4 @@ private:
} /* Face */
} /* MediaVision */
-#endif /* __FACETRACKINGMODEL_H__ */
+#endif /* __MEDIA_VISION_FACE_TRACKING_MODEL_H__ */
diff --git a/mv_face/face/include/FaceUtil.h b/mv_face/face/include/FaceUtil.h
index a6e19137..65c58969 100644
--- a/mv_face/face/include/FaceUtil.h
+++ b/mv_face/face/include/FaceUtil.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __FACEUTIL_H__
-#define __FACEUTIL_H__
+#ifndef __MEDIA_VISION_FACE_UTIL_H__
+#define __MEDIA_VISION_FACE_UTIL_H__
#include <opencv/cv.h>
@@ -67,4 +67,4 @@ int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource);
} /* Face */
} /* MediaVision */
-#endif /* __FACEUTIL_H__ */
+#endif /* __MEDIA_VISION_FACE_UTIL_H__ */
diff --git a/mv_face/face/include/TrackerMedianFlow.h b/mv_face/face/include/TrackerMedianFlow.h
index 44b46c7c..e8bed92d 100644
--- a/mv_face/face/include/TrackerMedianFlow.h
+++ b/mv_face/face/include/TrackerMedianFlow.h
@@ -39,13 +39,12 @@
//
//M*/
-#ifndef __TRACKERMEDIANFLOW_H__
-#define __TRACKERMEDIANFLOW_H__
+#ifndef __MEDIA_VISION_TRACKER_MEDIAN_FLOW_H__
+#define __MEDIA_VISION_TRACKER_MEDIAN_FLOW_H__
#include "opencv2/core/core.hpp"
-namespace cv
-{
+namespace cv {
class TrackerMedianFlowModel;
@@ -97,7 +96,7 @@ public:
void write(FileStorage& fs) const;
private:
- bool isInit;
+ bool m_isInit;
bool medianFlowImpl(Mat oldImage, Mat newImage, Rect_<float>& oldBox);
@@ -126,10 +125,10 @@ private:
inline float l2distance(Point2f p1, Point2f p2);
- Params params; /**< Parameters used during tracking, see
+ Params m_params; /**< Parameters used during tracking, see
@ref TrackerMedianFlow::Params */
- TermCriteria termcrit; /**< Terminating criteria for OpenCV
+ TermCriteria m_termcrit; /**< Terminating criteria for OpenCV
Lucas–Kanade optical flow algorithm used
during tracking */
@@ -148,4 +147,4 @@ private:
} /* namespace cv */
-#endif /* __TRACKERMEDIANFLOW_H__ */
+#endif /* __MEDIA_VISION_TRACKER_MEDIAN_FLOW_H__ */
diff --git a/mv_face/face/include/mv_face_open.h b/mv_face/face/include/mv_face_open.h
index 8346b4f0..bc8b054f 100644
--- a/mv_face/face/include/mv_face_open.h
+++ b/mv_face/face/include/mv_face_open.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __TIZEN_MEDIAVISION_FACE_OPEN_H__
-#define __TIZEN_MEDIAVISION_FACE_OPEN_H__
+#ifndef __MEDIA_VISION_FACE_OPEN_H__
+#define __MEDIA_VISION_FACE_OPEN_H__
#include "mv_face.h"
@@ -228,7 +228,7 @@ int mv_face_track_open(
* isn't supported
* @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
*
- * @pre Create a source handle by calling @ref mv_create_source_open()
+ * @pre Create a source handle by calling @ref mv_create_source()
*
* @see mv_face_eye_condition_recognized_cb
*/
@@ -268,7 +268,7 @@ int mv_face_eye_condition_recognize_open(
* isn't supported
* @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
*
- * @pre Create a source handle by calling @ref mv_create_source_open()
+ * @pre Create a source handle by calling @ref mv_create_source()
* @pre Create a face engine configuration handle by calling @ref mv_create_engine_config()
*
* @see mv_face_facial_expression_recognized_cb
@@ -788,4 +788,4 @@ int mv_face_tracking_model_load_open(
}
#endif /* __cplusplus */
-#endif /* __TIZEN_MEDIAVISION_FACE_OPEN_H__ */
+#endif /* __MEDIA_VISION_FACE_OPEN_H__ */
diff --git a/mv_face/face/src/FaceDetector.cpp b/mv_face/face/src/FaceDetector.cpp
index d9b4fe5b..f80983d7 100644
--- a/mv_face/face/src/FaceDetector.cpp
+++ b/mv_face/face/src/FaceDetector.cpp
@@ -18,12 +18,13 @@
namespace MediaVision {
namespace Face {
+
FaceDetector::FaceDetector() :
m_faceCascade(),
m_haarcascadeFilepath(),
m_faceCascadeIsLoaded(false)
{
- ; /* NULL */
+ ; /* NULL */
}
FaceDetector::~FaceDetector()
@@ -37,9 +38,8 @@ bool FaceDetector::detectFaces(
const cv::Size& minSize,
std::vector<cv::Rect>& faceLocations)
{
- if (!m_faceCascadeIsLoaded) {
+ if (!m_faceCascadeIsLoaded)
return false;
- }
faceLocations.clear();
@@ -47,14 +47,16 @@ bool FaceDetector::detectFaces(
bool roiIsUsed = false;
if (roi.x >= 0 && roi.y >= 0 && roi.width > 0 && roi.height > 0 &&
- (roi.x + roi.width) <= image.cols &&
- (roi.y + roi.height) <= image.rows) {
+ (roi.x + roi.width) <= image.cols &&
+ (roi.y + roi.height) <= image.rows) {
+
intrestingRegion = intrestingRegion(roi);
roiIsUsed = true;
}
if (minSize.width > 0 && minSize.height > 0 &&
- minSize.width <= image.cols && minSize.height <= image.rows) {
+ minSize.width <= image.cols && minSize.height <= image.rows) {
+
m_faceCascade.detectMultiScale(
intrestingRegion,
faceLocations,
@@ -62,7 +64,9 @@ bool FaceDetector::detectFaces(
3,
0,
minSize);
+
} else {
+
m_faceCascade.detectMultiScale(intrestingRegion, faceLocations);
}
@@ -80,10 +84,11 @@ bool FaceDetector::detectFaces(
bool FaceDetector::loadHaarcascade(const std::string& haarcascadeFilepath)
{
if (!m_faceCascadeIsLoaded ||
- m_haarcascadeFilepath != haarcascadeFilepath) {
- if (!(m_faceCascadeIsLoaded = m_faceCascade.load(haarcascadeFilepath))) {
+ m_haarcascadeFilepath != haarcascadeFilepath) {
+
+ if (!(m_faceCascadeIsLoaded = m_faceCascade.load(haarcascadeFilepath)))
return false;
- }
+
m_haarcascadeFilepath = haarcascadeFilepath;
}
diff --git a/mv_face/face/src/FaceExpressionRecognizer.cpp b/mv_face/face/src/FaceExpressionRecognizer.cpp
index e32ddc09..a1f7b0b7 100644
--- a/mv_face/face/src/FaceExpressionRecognizer.cpp
+++ b/mv_face/face/src/FaceExpressionRecognizer.cpp
@@ -24,25 +24,25 @@
namespace MediaVision {
namespace Face {
-static const int MinDetectionWidth = 30;
-static const int MinDetectionHeight = 30;
+
+static const int MIN_DETECTION_WIDTH = 30;
+static const int MIN_DETECTION_HEIGHT = 30;
FaceRecognizerConfig::FaceRecognizerConfig() :
- mHaarcascadeFilepath(
+ mHaarcascadeFilepath(
"/usr/share/OpenCV/haarcascades/haarcascade_smile.xml")
{
; /* NULL */
}
int FaceExpressionRecognizer::recognizeFaceExpression(
- const cv::Mat& grayImage,
- const mv_rectangle_s& faceLocation,
- mv_face_facial_expression_e *faceExpression,
- const FaceRecognizerConfig& config)
+ const cv::Mat& grayImage,
+ const mv_rectangle_s& faceLocation,
+ mv_face_facial_expression_e *faceExpression,
+ const FaceRecognizerConfig& config)
{
- if (NULL == faceExpression) {
+ if (NULL == faceExpression)
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
const int smileRectHeight = cvRound((float)faceLocation.height / 2);
@@ -52,18 +52,16 @@ int FaceExpressionRecognizer::recognizeFaceExpression(
faceLocation.width,
smileRectHeight);
- if (roi.width < MinDetectionWidth ||
- roi.height < MinDetectionHeight) {
+ if (roi.width < MIN_DETECTION_WIDTH ||
+ roi.height < MIN_DETECTION_HEIGHT) {
(*faceExpression) = MV_FACE_UNKNOWN;
return MEDIA_VISION_ERROR_NONE;
}
- if (0 > roi.x ||
- 0 > roi.y ||
- roi.x + roi.width > grayImage.cols ||
- roi.y + roi.height > grayImage.rows) {
+ if (0 > roi.x || 0 > roi.y ||
+ roi.x + roi.width > grayImage.cols ||
+ roi.y + roi.height > grayImage.rows)
return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
const cv::Mat mouthImg(grayImage, roi);
@@ -72,22 +70,21 @@ int FaceExpressionRecognizer::recognizeFaceExpression(
cv::CascadeClassifier smileClassifier;
smileClassifier.load(config.mHaarcascadeFilepath);
smileClassifier.detectMultiScale(
- mouthImg,
- areas,
- 1.1,
- 80,
- cv::CASCADE_FIND_BIGGEST_OBJECT |
- cv::CASCADE_DO_CANNY_PRUNING |
- cv::CASCADE_SCALE_IMAGE,
- cv::Size(MinDetectionWidth, MinDetectionHeight));
+ mouthImg,
+ areas,
+ 1.1,
+ 80,
+ cv::CASCADE_FIND_BIGGEST_OBJECT |
+ cv::CASCADE_DO_CANNY_PRUNING |
+ cv::CASCADE_SCALE_IMAGE,
+ cv::Size(MIN_DETECTION_WIDTH, MIN_DETECTION_HEIGHT));
(*faceExpression) = MV_FACE_UNKNOWN;
const size_t smilesFoundSize = areas.size();
- if (smilesFoundSize == 0) {
+ if (smilesFoundSize == 0)
(*faceExpression) = MV_FACE_NEUTRAL;
- } else if (smilesFoundSize == 1) {
+ else if (smilesFoundSize == 1)
(*faceExpression) = MV_FACE_SMILE;
- }
return MEDIA_VISION_ERROR_NONE;
}
diff --git a/mv_face/face/src/FaceEyeCondition.cpp b/mv_face/face/src/FaceEyeCondition.cpp
index 22039405..ad4fab66 100644
--- a/mv_face/face/src/FaceEyeCondition.cpp
+++ b/mv_face/face/src/FaceEyeCondition.cpp
@@ -91,7 +91,7 @@ int FaceEyeCondition::isEyeOpen(const cv::Mat& eye)
cv::Mat eyeEqualized;
cv::equalizeHist(eye, eyeEqualized);
- const int thresold = 8;
+ const int thresold = 20;
eyeEqualized = eyeEqualized < thresold;
std::vector<std::vector<cv::Point> > contours;
@@ -106,9 +106,8 @@ int FaceEyeCondition::isEyeOpen(const cv::Mat& eye)
const size_t contoursSize = contours.size();
- if (!contoursSize) {
+ if (!contoursSize)
return MV_FACE_EYES_NOT_FOUND;
- }
const int xCenter = eyeEqualized.cols / 2;
const int yCenter = eyeEqualized.rows / 2;
@@ -127,20 +126,19 @@ int FaceEyeCondition::isEyeOpen(const cv::Mat& eye)
const double currentArea = cv::contourArea(contours[i]);
if (boundThresold.contains(currentRect.br()) &&
- boundThresold.contains(currentRect.tl()) &&
- currentArea > areaRatio * boundThresold.area() &&
- currentRect.width < widthHeightRatio * currentRect.height) {
+ boundThresold.contains(currentRect.tl()) &&
+ currentArea > areaRatio * boundThresold.area() &&
+ currentRect.width < widthHeightRatio * currentRect.height)
isOpen = MV_FACE_EYES_OPEN;
- } else if (boundThresold.contains(currentRect.br()) &&
- boundThresold.contains(currentRect.tl()) &&
- currentArea > areaSmallRatio * boundThresold.area()) {
+ else if (boundThresold.contains(currentRect.br()) &&
+ boundThresold.contains(currentRect.tl()) &&
+ currentArea > areaSmallRatio * boundThresold.area())
++rectanglesInsideCount;
- }
+
}
- if (rectanglesInsideCount > 8u) {
+ if (rectanglesInsideCount > 8u)
isOpen = MV_FACE_EYES_CLOSED;
- }
return isOpen;
}
@@ -192,13 +190,12 @@ int FaceEyeCondition::recognizeEyeCondition(
const int isOpenRight = isEyeOpen(rightEye);
- if (isOpenRight == MV_FACE_EYES_OPEN) {
+ if (isOpenRight == MV_FACE_EYES_OPEN)
*eyeCondition = MV_FACE_EYES_OPEN;
- } else if (isOpenRight == MV_FACE_EYES_CLOSED) {
+ else if (isOpenRight == MV_FACE_EYES_CLOSED)
*eyeCondition = MV_FACE_EYES_CLOSED;
- } else {
+ else
*eyeCondition = MV_FACE_EYES_NOT_FOUND;
- }
return MEDIA_VISION_ERROR_NONE;
}
diff --git a/mv_face/face/src/FaceRecognitionModel.cpp b/mv_face/face/src/FaceRecognitionModel.cpp
index b8ae24b4..a0754a77 100644
--- a/mv_face/face/src/FaceRecognitionModel.cpp
+++ b/mv_face/face/src/FaceRecognitionModel.cpp
@@ -41,12 +41,11 @@ int CopyOpenCVAlgorithmParameters(const cv::Ptr<cv::FaceRecognizer>& srcAlg,
srcAlg->save(tempPath);
dstAlg->load(tempPath);
- if (0 != remove(tempPath)) {
+ if (0 != remove(tempPath))
LOGW("Error removing serialized FaceRecognizer in %s", tempPath);
- }
/* todo: consider to uncomment this lines if OpenCV will support deep
- / copy of AlgorithmInfo objects: */
+ copy of AlgorithmInfo objects: */
/*std::vector<std::string> paramNames;
srcAlg->getParams(paramNames);
@@ -101,10 +100,11 @@ void ParseOpenCVLabels(
std::set<int>& outLabels)
{
if (!recognizer.empty()) {
+
cv::Mat labels = recognizer->getMat("labels");
- for (int i = 0; i < labels.rows; ++i) {
+
+ for (int i = 0; i < labels.rows; ++i)
outLabels.insert(labels.at<int>(i, 0));
- }
}
}
@@ -135,7 +135,7 @@ FaceRecognitionResults::FaceRecognitionResults() :
bool FaceRecognitionModelConfig::operator!=(
const FaceRecognitionModelConfig& other) const
{
- return mModelType != other.mModelType ||
+ return mModelType != other.mModelType ||
mNumComponents != other.mNumComponents ||
mThreshold != other.mThreshold ||
mRadius != other.mRadius ||
@@ -160,9 +160,8 @@ FaceRecognitionModel::FaceRecognitionModel(const FaceRecognitionModel& origin) :
m_recognizer(CreateRecognitionAlgorithm(origin.m_learnAlgorithmConfig)),
m_learnedLabels(origin.m_learnedLabels)
{
- if (!m_recognizer.empty()) {
+ if (!m_recognizer.empty())
CopyOpenCVAlgorithmParameters(origin.m_recognizer, m_recognizer);
- }
}
FaceRecognitionModel& FaceRecognitionModel::operator=(
@@ -175,9 +174,8 @@ FaceRecognitionModel& FaceRecognitionModel::operator=(
m_recognizer = CreateRecognitionAlgorithm(m_learnAlgorithmConfig);
m_learnedLabels = copy.m_learnedLabels;
- if (!m_recognizer.empty()) {
+ if (!m_recognizer.empty())
CopyOpenCVAlgorithmParameters(copy.m_recognizer, m_recognizer);
- }
}
return *this;
@@ -305,7 +303,7 @@ int FaceRecognitionModel::load(const std::string& fileName)
} else {
tempConfig = FaceRecognitionModelConfig();
LOGE("Failed to load face recognition model from file. File is in "
- "unsupported format");
+ "unsupported format");
storage.release();
@@ -353,7 +351,7 @@ int FaceRecognitionModel::resetFaceExamples(int faceLabel)
{
if (1 > m_faceSamples.erase(faceLabel)) {
LOGD("Failed to remove face image examples for label %i. "
- "No such examples", faceLabel);
+ "No such examples", faceLabel);
return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
}
@@ -385,25 +383,22 @@ int FaceRecognitionModel::learn(const FaceRecognitionModelConfig& config)
bool isIncremental = false;
bool isUnisize = false;
- if (MEDIA_VISION_FACE_MODEL_TYPE_LBPH == config.mModelType) {
+ if (MEDIA_VISION_FACE_MODEL_TYPE_LBPH == config.mModelType)
isIncremental = true;
- }
if (MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES == config.mModelType ||
- MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES == config.mModelType) {
+ MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES == config.mModelType)
isUnisize = true;
- }
std::vector<cv::Mat> samples;
std::vector<int> labels;
std::set<int> learnedLabels;
- if (isIncremental) {
+ if (isIncremental)
learnedLabels.insert(m_learnedLabels.begin(), m_learnedLabels.end());
- }
- std::map<int, std::vector<cv::Mat> >::const_iterator it =
- m_faceSamples.begin();
+ std::map<int, std::vector<cv::Mat> >::const_iterator it = m_faceSamples.begin();
+
for (; it != m_faceSamples.end(); ++it) {
const size_t faceClassSamplesSize = it->second.size();
labels.insert(labels.end(), faceClassSamplesSize, it->first);
@@ -417,9 +412,9 @@ int FaceRecognitionModel::learn(const FaceRecognitionModelConfig& config)
for (size_t sampleInd = 0; sampleInd < faceClassSamplesSize; ++sampleInd) {
cv::Mat resizedSample;
cv::resize(it->second[sampleInd],
- resizedSample,
- cv::Size(config.mImgWidth, config.mImgHeight),
- 0.0, 0.0, cv::INTER_CUBIC);
+ resizedSample,
+ cv::Size(config.mImgWidth, config.mImgHeight),
+ 0.0, 0.0, cv::INTER_CUBIC);
samples.push_back(resizedSample);
}
}
@@ -430,15 +425,14 @@ int FaceRecognitionModel::learn(const FaceRecognitionModelConfig& config)
if (0 != samplesSize && samplesSize == labelsSize) {
LOGD("Start to learn the model for %u samples and %u labels",
- samplesSize, labelsSize);
+ samplesSize, labelsSize);
- if (m_learnAlgorithmConfig != config || m_recognizer.empty()) {
+ if (m_learnAlgorithmConfig != config || m_recognizer.empty())
m_recognizer = CreateRecognitionAlgorithm(config);
- }
if (m_recognizer.empty()) {
LOGE("Can't create recognition algorithm for recognition model. "
- "Configuration is not supported by any of known algorithms.");
+ "Configuration is not supported by any of known algorithms.");
return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
diff --git a/mv_face/face/src/FaceTrackingModel.cpp b/mv_face/face/src/FaceTrackingModel.cpp
index cebbb696..98df8a42 100644
--- a/mv_face/face/src/FaceTrackingModel.cpp
+++ b/mv_face/face/src/FaceTrackingModel.cpp
@@ -41,9 +41,8 @@ FaceTrackingModel::FaceTrackingModel(const FaceTrackingModel& origin) :
m_canTrack(origin.m_canTrack),
m_tracker(new cv::TrackerMedianFlow())
{
- if (!origin.m_tracker.empty()) {
+ if (!origin.m_tracker.empty())
origin.m_tracker->copyTo(*(m_tracker.obj));
- }
}
FaceTrackingModel& FaceTrackingModel::operator=(const FaceTrackingModel& copy)
@@ -51,9 +50,8 @@ FaceTrackingModel& FaceTrackingModel::operator=(const FaceTrackingModel& copy)
if (this != &copy) {
m_canTrack = copy.m_canTrack;
m_tracker = cv::Ptr<cv::TrackerMedianFlow>(new cv::TrackerMedianFlow());
- if (!copy.m_tracker.empty()) {
+ if (!copy.m_tracker.empty())
copy.m_tracker->copyTo(*(m_tracker.obj));
- }
}
return *this;
@@ -137,8 +135,8 @@ int FaceTrackingModel::prepare(const cv::Mat& image)
{
if (m_tracker.empty()) {
LOGE("Failed to prepare tracking model. No tracking algorithm "
- "is available.");
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ "is available.");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
}
cv::Rect_<float> lastBoundingBox;
@@ -160,7 +158,7 @@ int FaceTrackingModel::prepare(
{
if (m_tracker.empty()) {
LOGE("Failed to prepare tracking model. No tracking algorithm "
- "is available.");
+ "is available.");
return MEDIA_VISION_ERROR_INVALID_OPERATION;
}
diff --git a/mv_face/face/src/FaceUtil.cpp b/mv_face/face/src/FaceUtil.cpp
index 1430fe14..954f82b0 100644
--- a/mv_face/face/src/FaceUtil.cpp
+++ b/mv_face/face/src/FaceUtil.cpp
@@ -19,7 +19,6 @@
#include "mv_private.h"
#include <opencv2/imgproc/types_c.h>
-#include <opencv2/highgui/highgui.hpp>
namespace MediaVision {
namespace Face {
diff --git a/mv_face/face/src/TrackerMedianFlow.cpp b/mv_face/face/src/TrackerMedianFlow.cpp
index ee4bc983..759b6061 100644
--- a/mv_face/face/src/TrackerMedianFlow.cpp
+++ b/mv_face/face/src/TrackerMedianFlow.cpp
@@ -77,18 +77,18 @@ void TrackerMedianFlow::Params::write(cv::FileStorage& fs) const
}
TrackerMedianFlow::TrackerMedianFlow(Params paramsIn) :
- termcrit(TermCriteria::COUNT | TermCriteria::EPS, 20, 0.3),
+ m_termcrit(TermCriteria::COUNT | TermCriteria::EPS, 20, 0.3),
m_confidence(0.0)
{
- params = paramsIn;
- isInit = false;
+ m_params = paramsIn;
+ m_isInit = false;
}
bool TrackerMedianFlow::copyTo(TrackerMedianFlow& copy) const
{
- copy.isInit = isInit;
- copy.params = params;
- copy.termcrit = termcrit;
+ copy.m_isInit = m_isInit;
+ copy.m_params = m_params;
+ copy.m_termcrit = m_termcrit;
copy.m_boundingBox = m_boundingBox;
copy.m_confidence = m_confidence;
m_image.copyTo(copy.m_image);
@@ -97,22 +97,21 @@ bool TrackerMedianFlow::copyTo(TrackerMedianFlow& copy) const
bool TrackerMedianFlow::init(const Mat& image, const Rect_<float>& boundingBox)
{
- if (image.empty()) {
+ if (image.empty())
return false;
- }
image.copyTo(m_image);
buildOpticalFlowPyramid(
- m_image, m_pyramid, params.mWindowSize, params.mPyrMaxLevel);
+ m_image, m_pyramid, m_params.mWindowSize, m_params.mPyrMaxLevel);
m_boundingBox = boundingBox;
- isInit = true;
- return isInit;
+ m_isInit = true;
+ return m_isInit;
}
bool TrackerMedianFlow::update(const Mat& image, Rect_<float>& boundingBox)
{
- if (!isInit || image.empty())
+ if (!m_isInit || image.empty())
return false;
/* Handles such behaviour when preparation frame has the size
@@ -134,9 +133,8 @@ bool TrackerMedianFlow::update(const Mat& image, Rect_<float>& boundingBox)
Mat oldImage = m_image;
Rect_<float> oldBox = m_boundingBox;
- if(!medianFlowImpl(oldImage, image, oldBox)) {
+ if(!medianFlowImpl(oldImage, image, oldBox))
return false;
- }
boundingBox = oldBox;
image.copyTo(m_image);
@@ -146,7 +144,7 @@ bool TrackerMedianFlow::update(const Mat& image, Rect_<float>& boundingBox)
bool TrackerMedianFlow::isInited() const
{
- return isInit;
+ return m_isInit;
}
float TrackerMedianFlow::getLastConfidence() const
@@ -160,29 +158,27 @@ Rect_<float> TrackerMedianFlow::getLastBoundingBox() const
}
bool TrackerMedianFlow::medianFlowImpl(
- Mat oldImage_gray, Mat newImage_gray, Rect_<float>& oldBox)
+ Mat oldGrayImage, Mat newGrayImage, Rect_<float>& oldBox)
{
std::vector<Point2f> pointsToTrackOld, pointsToTrackNew;
- const float gridXStep = oldBox.width / params.mPointsInGrid;
- const float gridYStep = oldBox.height / params.mPointsInGrid;
- for (int i = 0; i < params.mPointsInGrid; i++) {
- for (int j = 0; j < params.mPointsInGrid; j++) {
+ const float gridXStep = oldBox.width / m_params.mPointsInGrid;
+ const float gridYStep = oldBox.height / m_params.mPointsInGrid;
+ for (int i = 0; i < m_params.mPointsInGrid; i++)
+ for (int j = 0; j < m_params.mPointsInGrid; j++)
pointsToTrackOld.push_back(
Point2f(oldBox.x + .5f*gridXStep + 1.f*gridXStep*j,
oldBox.y + .5f*gridYStep + 1.f*gridYStep*i));
- }
- }
std::vector<uchar> status(pointsToTrackOld.size());
std::vector<float> errors(pointsToTrackOld.size());
std::vector<Mat> tempPyramid;
buildOpticalFlowPyramid(
- newImage_gray,
+ newGrayImage,
tempPyramid,
- params.mWindowSize,
- params.mPyrMaxLevel);
+ m_params.mWindowSize,
+ m_params.mPyrMaxLevel);
calcOpticalFlowPyrLK(m_pyramid,
tempPyramid,
@@ -190,16 +186,14 @@ bool TrackerMedianFlow::medianFlowImpl(
pointsToTrackNew,
status,
errors,
- params.mWindowSize,
- params.mPyrMaxLevel,
- termcrit);
+ m_params.mWindowSize,
+ m_params.mPyrMaxLevel,
+ m_termcrit);
std::vector<Point2f> di;
- for (size_t idx = 0u; idx < pointsToTrackOld.size(); idx++) {
- if (status[idx] == 1) {
+ for (size_t idx = 0u; idx < pointsToTrackOld.size(); idx++)
+ if (status[idx] == 1)
di.push_back(pointsToTrackNew[idx] - pointsToTrackOld[idx]);
- }
- }
std::vector<bool> filter_status;
check_FB(tempPyramid,
@@ -207,8 +201,8 @@ bool TrackerMedianFlow::medianFlowImpl(
pointsToTrackNew,
filter_status);
- check_NCC(oldImage_gray,
- newImage_gray,
+ check_NCC(oldGrayImage,
+ newGrayImage,
pointsToTrackOld,
pointsToTrackNew,
filter_status);
@@ -222,9 +216,8 @@ bool TrackerMedianFlow::medianFlowImpl(
}
}
- if (pointsToTrackOld.size() == 0 || di.size() == 0) {
+ if (pointsToTrackOld.size() == 0 || di.size() == 0)
return false;
- }
Point2f mDisplacement;
Rect_<float> boxCandidate =
@@ -259,7 +252,7 @@ Rect_<float> TrackerMedianFlow::vote(
oldRect.y + oldRect.height/2.0);
int n = (int)oldPoints.size();
- std::vector<float>buf(std::max(n*(n-1) / 2, 3), 0.f);
+ std::vector<float> buf(std::max(n*(n-1) / 2, 3), 0.f);
if(oldPoints.size() == 1) {
newRect.x = oldRect.x+newPoints[0].x-oldPoints[0].x;
@@ -271,15 +264,13 @@ Rect_<float> TrackerMedianFlow::vote(
float xshift = 0.f;
float yshift = 0.f;
- for(int i = 0; i < n; i++) {
+ for(int i = 0; i < n; i++)
buf[i] = newPoints[i].x - oldPoints[i].x;
- }
xshift = getMedian(buf, n);
newCenter.x += xshift;
- for(int idx = 0; idx < n; idx++) {
+ for(int idx = 0; idx < n; idx++)
buf[idx] = newPoints[idx].y - oldPoints[idx].y;
- }
yshift = getMedian(buf, n);
newCenter.y += yshift;
@@ -316,9 +307,8 @@ Rect_<float> TrackerMedianFlow::vote(
template<typename T>
T TrackerMedianFlow::getMedian(std::vector<T>& values, int size)
{
- if (size == -1) {
+ if (size == -1)
size = (int)values.size();
- }
std::vector<T> copy(values.begin(), values.begin() + size);
std::sort(copy.begin(), copy.end());
@@ -342,9 +332,8 @@ void TrackerMedianFlow::check_FB(
const std::vector<Point2f>& newPoints,
std::vector<bool>& status)
{
- if(status.size() == 0) {
+ if(status.size() == 0)
status = std::vector<bool>(oldPoints.size(), true);
- }
std::vector<uchar> LKstatus(oldPoints.size());
std::vector<float> errors(oldPoints.size());
@@ -357,19 +346,16 @@ void TrackerMedianFlow::check_FB(
pointsToTrackReprojection,
LKstatus,
errors,
- params.mWindowSize,
- params.mPyrMaxLevel,
- termcrit);
+ m_params.mWindowSize,
+ m_params.mPyrMaxLevel,
+ m_termcrit);
- for (size_t idx = 0u; idx < oldPoints.size(); idx++) {
- FBerror[idx] =
- l2distance(oldPoints[idx], pointsToTrackReprojection[idx]);
- }
+ for (size_t idx = 0u; idx < oldPoints.size(); idx++)
+ FBerror[idx] = l2distance(oldPoints[idx], pointsToTrackReprojection[idx]);
float FBerrorMedian = getMedian(FBerror) + FloatEps;
- for (size_t idx = 0u; idx < oldPoints.size(); idx++) {
+ for (size_t idx = 0u; idx < oldPoints.size(); idx++)
status[idx] = (FBerror[idx] < FBerrorMedian);
- }
}
void TrackerMedianFlow::check_NCC(
@@ -401,14 +387,13 @@ void TrackerMedianFlow::check_NCC(
}
float median = getMedian(NCC) - FloatEps;
- for(size_t idx = 0u; idx < oldPoints.size(); idx++) {
+ for(size_t idx = 0u; idx < oldPoints.size(); idx++)
status[idx] = status[idx] && (NCC[idx] > median);
- }
}
void TrackerMedianFlow::read(cv::FileStorage& fs)
{
- params.read(fs.root());
+ m_params.read(fs.root());
float bbX = 0.f;
float bbY = 0.f;
float bbW = 0.f;
@@ -423,7 +408,7 @@ void TrackerMedianFlow::read(cv::FileStorage& fs)
void TrackerMedianFlow::write(cv::FileStorage& fs) const
{
- params.write(fs);
+ m_params.write(fs);
fs << "lastLocationX" << m_boundingBox.x;
fs << "lastLocationY" << m_boundingBox.y;
fs << "lastLocationW" << m_boundingBox.width;
diff --git a/mv_face/face/src/mv_face_open.cpp b/mv_face/face/src/mv_face_open.cpp
index f1a70ec6..292c10a4 100644
--- a/mv_face/face/src/mv_face_open.cpp
+++ b/mv_face/face/src/mv_face_open.cpp
@@ -31,7 +31,7 @@
using namespace ::MediaVision::Face;
-static const RecognitionParams defaultRecognitionParams = RecognitionParams();
+static const RecognitionParams DEFAULT_RECOGNITION_PARAMS = RecognitionParams();
static void extractRecognitionParams(
mv_engine_config_h engine_cfg,
@@ -51,17 +51,15 @@ static void extractRecognitionParams(
"MV_FACE_RECOGNITION_MODEL_TYPE",
&algType);
- if (0 < algType && 4 > algType) {
+ if (0 < algType && 4 > algType)
recognitionParams.mRecognitionAlgType =
- (FaceRecognitionModelType)algType;
- } else {
- recognitionParams.mRecognitionAlgType =
- defaultRecognitionParams.mRecognitionAlgType;
- }
+ (FaceRecognitionModelType)algType;
+ else
+ recognitionParams.mRecognitionAlgType =
+ DEFAULT_RECOGNITION_PARAMS.mRecognitionAlgType;
- if (NULL == engine_cfg) {
- mv_destroy_engine_config(working_cfg);
- }
+ if (NULL == engine_cfg)
+ mv_destroy_engine_config(working_cfg);
}
inline void convertRectCV2MV(const cv::Rect& src, mv_rectangle_s& dst)
@@ -119,56 +117,50 @@ int mv_face_detect_open(
engine_cfg,
MV_FACE_DETECTION_ROI_X,
&roi.x);
- if (error != MEDIA_VISION_ERROR_NONE) {
+ if (error != MEDIA_VISION_ERROR_NONE)
LOGE("Error occurred during face detection roi (x) receiving."
" (%i)", error);
- }
error = mv_engine_config_get_int_attribute_c(
engine_cfg,
MV_FACE_DETECTION_ROI_Y,
&roi.y);
- if (error != MEDIA_VISION_ERROR_NONE) {
+ if (error != MEDIA_VISION_ERROR_NONE)
LOGE("Error occurred during face detection roi (y) receiving."
" (%i)", error);
-}
error = mv_engine_config_get_int_attribute_c(
engine_cfg,
MV_FACE_DETECTION_ROI_WIDTH,
&roi.width);
- if (error != MEDIA_VISION_ERROR_NONE) {
+ if (error != MEDIA_VISION_ERROR_NONE)
LOGE("Error occurred during face detection roi (width) receiving."
" (%i)", error);
- }
error = mv_engine_config_get_int_attribute_c(
engine_cfg,
MV_FACE_DETECTION_ROI_HEIGHT,
&roi.height);
- if (error != MEDIA_VISION_ERROR_NONE) {
+ if (error != MEDIA_VISION_ERROR_NONE)
LOGE("Error occurred during face detection roi (height) receiving."
" (%i)", error);
- }
cv::Size minSize(-1, -1);
error = mv_engine_config_get_int_attribute_c(
engine_cfg,
MV_FACE_DETECTION_MIN_SIZE_WIDTH,
&minSize.width);
- if (error != MEDIA_VISION_ERROR_NONE) {
+ if (error != MEDIA_VISION_ERROR_NONE)
LOGE("Error occurred during face detection minimum width receiving."
" (%i)", error);
- }
error = mv_engine_config_get_int_attribute_c(
engine_cfg,
MV_FACE_DETECTION_MIN_SIZE_HEIGHT,
&minSize.height);
- if (error != MEDIA_VISION_ERROR_NONE) {
+ if (error != MEDIA_VISION_ERROR_NONE)
LOGE("Error occurred during face detection minimum height receiving."
" (%i)", error);
- }
std::vector<cv::Rect> faceLocations;
if (!faceDetector.detectFaces(image, roi, minSize, faceLocations)) {
@@ -176,17 +168,15 @@ int mv_face_detect_open(
return MEDIA_VISION_ERROR_INVALID_OPERATION;
}
- static const int StartMaxResultsNumber = 50;
- static std::vector<mv_rectangle_s> results(StartMaxResultsNumber);
+ static const int START_MAX_RESULTS_NUMBER = 50;
+ static std::vector<mv_rectangle_s> results(START_MAX_RESULTS_NUMBER);
const int numberOfResults = faceLocations.size();
- if (numberOfResults > StartMaxResultsNumber) {
+ if (numberOfResults > START_MAX_RESULTS_NUMBER)
results.resize(numberOfResults);
- }
- for (int rectNum = 0; rectNum < numberOfResults; ++rectNum) {
+ for (int rectNum = 0; rectNum < numberOfResults; ++rectNum)
convertRectCV2MV(faceLocations[rectNum], results[rectNum]);
- }
LOGI("Call the detect callback for %i detected faces", numberOfResults);
detected_cb(source, engine_cfg, results.data(), numberOfResults, user_data);
@@ -209,13 +199,13 @@ int mv_face_recognize_open(
if (!recognized_cb) {
LOGE("Recognition failed. Can't output recognition results without "
- "callback function");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ "callback function");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
if (!recognition_model) {
- LOGE("Can't recognize for the NULL Media Vision Face recognition model");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ LOGE("Can't recognize for the NULL Media Vision Face recognition model");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
FaceRecognitionModel *pRecModel = static_cast<FaceRecognitionModel*>(recognition_model);
@@ -258,13 +248,13 @@ int mv_face_recognize_open(
if (!results.mIsRecognized) {
recognized_cb(
- source,
- recognition_model,
- engine_cfg,
- NULL,
- NULL,
- 0.0,
- user_data);
+ source,
+ recognition_model,
+ engine_cfg,
+ NULL,
+ NULL,
+ 0.0,
+ user_data);
} else {
mv_rectangle_s location;
location.point.x = results.mFaceLocation.x;
@@ -321,7 +311,7 @@ int mv_face_track_open(
if (!pTrackModel) {
LOGE("Face tracking failed. "
- "Incorrect Media Vision Face tracking model handle is used");
+ "Incorrect Media Vision Face tracking model handle is used");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
@@ -338,7 +328,7 @@ int mv_face_track_open(
if (MEDIA_VISION_ERROR_NONE != ret) {
LOGE("Tracking can't be performed. "
- "Check that tracking model is prepared when tracking starts");
+ "Check that tracking model is prepared when tracking starts");
return ret;
}
@@ -429,7 +419,7 @@ int mv_face_facial_expression_recognize_open(
mv_face_facial_expression_e expression;
error = FaceExpressionRecognizer::recognizeFaceExpression(
- image, face_location, &expression);
+ image, face_location, &expression);
if (error != MEDIA_VISION_ERROR_NONE) {
LOGE("eye contition recognition failed");
@@ -488,7 +478,7 @@ int mv_face_recognition_model_clone_open(
{
if (!src || !dst) {
LOGE("Can't clone recognition model. Both source and destination"
- "recognition model handles has to be not NULL");
+ "recognition model handles has to be not NULL");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
@@ -542,13 +532,13 @@ int mv_face_recognition_model_load_open(
{
if (!recognition_model) {
LOGE("Can't load recognition model from the file. "
- "Handle has to be not NULL");
+ "Handle has to be not NULL");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
if (NULL == file_name) {
LOGE("Can't load recognition model from the file. "
- "File name has to be specified");
+ "File name has to be specified");
return MEDIA_VISION_ERROR_INVALID_PATH;
}
@@ -565,7 +555,7 @@ int mv_face_recognition_model_load_open(
if (!pRecModel) {
LOGE("Loading of the face recognition model from file failed. "
- "Incorrect Media Vision Face recognition model handle is used");
+ "Incorrect Media Vision Face recognition model handle is used");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
@@ -588,13 +578,13 @@ int mv_face_recognition_model_add_open(
{
if (!source) {
LOGE("Can't add face image example for recognition model. "
- "Media Vision source handle has to be not NULL");
+ "Media Vision source handle has to be not NULL");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
if (!recognition_model) {
LOGE("Can't add face image example for recognition model. "
- "Model handle has to be not NULL");
+ "Model handle has to be not NULL");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
@@ -603,7 +593,7 @@ int mv_face_recognition_model_add_open(
if (!pRecModel) {
LOGE("Add face image example to the model failed. "
- "Incorrect Media Vision Face recognition model handle is used");
+ "Incorrect Media Vision Face recognition model handle is used");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
@@ -631,7 +621,7 @@ int mv_face_recognition_model_add_open(
}
LOGD("The face image example labeled %i has been added "
- "to the Media Vision recognition model", face_label);
+ "to the Media Vision recognition model", face_label);
return ret;
}
@@ -649,7 +639,7 @@ int mv_face_recognition_model_reset_open(
if (!pRecModel) {
LOGE("Loading of the face recognition model from file failed. "
- "Incorrect Media Vision Face recognition model handle is used");
+ "Incorrect Media Vision Face recognition model handle is used");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
@@ -680,7 +670,7 @@ int mv_face_recognition_model_learn_open(
if (!pRecModel) {
LOGE("Learning of the face recognition model failed. "
- "Incorrect Media Vision Face recognition model handle is used");
+ "Incorrect Media Vision Face recognition model handle is used");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
@@ -712,7 +702,7 @@ int mv_face_recognition_model_query_labels_open(
if (NULL == labels || NULL == number_of_labels) {
LOGE("Can't get list of labels. labels and number_of_labels out "
- "parameters both has to be not NULL.");
+ "parameters both has to be not NULL.");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
@@ -721,8 +711,8 @@ int mv_face_recognition_model_query_labels_open(
if (!pRecModel) {
LOGE("Learning of the face recognition model failed. "
- "Incorrect Media Vision Face recognition model handle is used");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ "Incorrect Media Vision Face recognition model handle is used");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
const std::set<int>& learnedLabels = pRecModel->getFaceLabels();
@@ -792,7 +782,7 @@ int mv_face_tracking_model_prepare_open(
if (!source) {
LOGE("Can't prepare tracking model. "
- "Media Vision source handle has to be not NULL");
+ "Media Vision source handle has to be not NULL");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
@@ -801,7 +791,7 @@ int mv_face_tracking_model_prepare_open(
if (!pTrackModel) {
LOGE("Preparation of the face tracking model failed. "
- "Incorrect Media Vision Face tracking model handle is used");
+ "Incorrect Media Vision Face tracking model handle is used");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
@@ -850,7 +840,7 @@ int mv_face_tracking_model_clone_open(
{
if (!src || !dst) {
LOGE("Can't clone tracking model. Both source and destination"
- "tracking model handles has to be not NULL");
+ "tracking model handles has to be not NULL");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
@@ -879,13 +869,13 @@ int mv_face_tracking_model_save_open(
{
if (!tracking_model) {
LOGE("Can't save tracking model to the file. "
- "Handle has to be not NULL");
+ "Handle has to be not NULL");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
if (NULL == file_name) {
LOGE("Can't save tracking model to the file. "
- "File name has to be specified");
+ "File name has to be specified");
return MEDIA_VISION_ERROR_INVALID_PATH;
}
@@ -893,7 +883,7 @@ int mv_face_tracking_model_save_open(
if (!pTrackModel) {
LOGE("Saving of the face tracking model to file failed. "
- "Incorrect Media Vision Face tracking model handle is used");
+ "Incorrect Media Vision Face tracking model handle is used");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
@@ -915,13 +905,13 @@ int mv_face_tracking_model_load_open(
{
if (!tracking_model) {
LOGE("Can't load tracking model from the file. "
- "Handle has to be not NULL");
+ "Handle has to be not NULL");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
if (NULL == file_name) {
LOGE("Can't load tracking model from the file. "
- "File name has to be specified");
+ "File name has to be specified");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
@@ -938,7 +928,7 @@ int mv_face_tracking_model_load_open(
if (!pTrackModel) {
LOGE("Loading of the face tracking model from file failed. "
- "Incorrect Media Vision Face tracking model handle is used");
+ "Incorrect Media Vision Face tracking model handle is used");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
diff --git a/mv_face/face_lic/include/mv_face_lic.h b/mv_face/face_lic/include/mv_face_lic.h
index 42a39af0..a0cf0bb6 100644
--- a/mv_face/face_lic/include/mv_face_lic.h
+++ b/mv_face/face_lic/include/mv_face_lic.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __TIZEN_MEDIAVISION_FACE_LIC_H__
-#define __TIZEN_MEDIAVISION_FACE_LIC_H__
+#ifndef __MEDIA_VISION_FACE_LIC_H__
+#define __MEDIA_VISION_FACE_LIC_H__
#include "mv_face.h"
@@ -335,7 +335,7 @@ int mv_face_recognition_model_destroy_lic(
* @since_tizen 3.0
* @remarks Cloning perform not only handle copy, but also copies all internal
* resources of the model. @a dst must be released using
- * @a mv_face_recognition_model_destroy_open_lic().
+ * @a mv_face_recognition_model_destroy_lic().
* @param [in] src The handle to the recognition model to be copied
* @param [out] dst The handle to the copy of existed recognition model
* specified as @a src
@@ -397,7 +397,7 @@ int mv_face_recognition_model_save_lic(
* support reinforcement learning.
* @remarks @a recognition_model is loaded from the application's data directory.
* @a recognition_model must be destroyed using
- * @ref mv_face_recognition_model_destroy_open_lic().
+ * @ref mv_face_recognition_model_destroy_lic().
* @param [in] file_name Name of file to load the model
* @param [out] recognition_model The handle to the recognition model
* to be loaded from the file
@@ -775,4 +775,4 @@ int mv_face_tracking_model_load_lic(
}
#endif /* __cplusplus */
-#endif /* __TIZEN_MEDIAVISION_FACE_LIC_H__ */
+#endif /* __MEDIA_VISION_FACE_LIC_H__ */
diff --git a/mv_face/face_lic/src/mv_face_lic.c b/mv_face/face_lic/src/mv_face_lic.c
index e37f367f..394d9340 100644
--- a/mv_face/face_lic/src/mv_face_lic.c
+++ b/mv_face/face_lic/src/mv_face_lic.c
@@ -188,7 +188,7 @@ int mv_face_tracking_model_prepare_lic(
int mv_face_tracking_model_clone_lic(
mv_face_tracking_model_h src,
- mv_face_tracking_model_h *dst)
+ mv_face_tracking_model_h *dst)
{
return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
diff --git a/mv_image/CMakeLists.txt b/mv_image/CMakeLists.txt
index 36d20373..ba1a6ce2 100644
--- a/mv_image/CMakeLists.txt
+++ b/mv_image/CMakeLists.txt
@@ -1,7 +1,7 @@
project(mv_image_port)
cmake_minimum_required(VERSION 2.6)
-if(MEDIA_VISION_IMAGE_PORT)
+if(MEDIA_VISION_IMAGE_LICENSE_PORT)
add_subdirectory(${PROJECT_SOURCE_DIR}/image_lic) # Licensed port
else()
add_subdirectory(${PROJECT_SOURCE_DIR}/image) # Open port
diff --git a/mv_image/image/CMakeLists.txt b/mv_image/image/CMakeLists.txt
index 739d5f26..faa50726 100644
--- a/mv_image/image/CMakeLists.txt
+++ b/mv_image/image/CMakeLists.txt
@@ -3,6 +3,10 @@ cmake_minimum_required(VERSION 2.6)
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG _DEBUG)
+if(NOT SKIP_WARNINGS)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Werror")
+endif()
+
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${LIB_INSTALL_DIR})
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${LIB_INSTALL_DIR})
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
diff --git a/mv_image/image/include/Features/BasicExtractorFactory.h b/mv_image/image/include/Features/BasicExtractorFactory.h
index bbfc824f..ca65f4f5 100644
--- a/mv_image/image/include/Features/BasicExtractorFactory.h
+++ b/mv_image/image/include/Features/BasicExtractorFactory.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __IMAGEFEATURES_BASICEXTRACTORFACTORY_H__
-#define __IMAGEFEATURES_BASICEXTRACTORFACTORY_H__
+#ifndef __MEDIA_VISION_BASICEXTRACTORFACTORY_H__
+#define __MEDIA_VISION_BASICEXTRACTORFACTORY_H__
#include "Features/FeatureExtractorFactory.h"
@@ -29,12 +29,12 @@ public:
virtual cv::Ptr<FeatureExtractor> buildFeatureExtractor();
private:
- KeypointType m_kpType;
+ KeypointType __kpType;
- DescriptorType m_descType;
+ DescriptorType __descType;
};
} /* Image */
} /* MediaVision */
-#endif /* __IMAGEFEATURES_BASICEXTRACTORFACTORY_H__ */
+#endif /* __MEDIA_VISION_BASICEXTRACTORFACTORY_H__ */
diff --git a/mv_image/image/include/Features/FeatureExtractor.h b/mv_image/image/include/Features/FeatureExtractor.h
index ae555037..4a34faea 100644
--- a/mv_image/image/include/Features/FeatureExtractor.h
+++ b/mv_image/image/include/Features/FeatureExtractor.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __IMAGEFEATURES_FEATUREEXTRACTOR_H__
-#define __IMAGEFEATURES_FEATUREEXTRACTOR_H__
+#ifndef __MEDIA_VISION_FEATUREEXTRACTOR_H__
+#define __MEDIA_VISION_FEATUREEXTRACTOR_H__
#include "ImageConfig.h"
@@ -57,18 +57,18 @@ public:
const std::vector<cv::Point2f>& roi = std::vector<cv::Point2f>());
private:
- static const cv::Size MinSize;
+ static const cv::Size __MIN_SIZE;
private:
- KeypointType m_kpType;
+ KeypointType __kpType;
- cv::Ptr<cv::FeatureDetector> m_detector;
+ cv::Ptr<cv::FeatureDetector> __detector;
- DescriptorType m_descType;
+ DescriptorType __descType;
- cv::Ptr<cv::DescriptorExtractor> m_extractor;
+ cv::Ptr<cv::DescriptorExtractor> __extractor;
- float (*m_computeRecognitionRate)(
+ float (*__computeRecognitionRate)(
const cv::Mat&,
const std::vector<cv::KeyPoint>&);
};
@@ -76,4 +76,4 @@ private:
} /* Image */
} /* MediaVision */
-#endif /* __IMAGEFEATURES_FEATUREEXTRACTOR_H__ */
+#endif /* __MEDIA_VISION_FEATUREEXTRACTOR_H__ */
diff --git a/mv_image/image/include/Features/FeatureExtractorFactory.h b/mv_image/image/include/Features/FeatureExtractorFactory.h
index d421478f..837725a3 100644
--- a/mv_image/image/include/Features/FeatureExtractorFactory.h
+++ b/mv_image/image/include/Features/FeatureExtractorFactory.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __IMAGEFEATURES_FEATUREEXTRACTORFACTORY_H__
-#define __IMAGEFEATURES_FEATUREEXTRACTORFACTORY_H__
+#ifndef __MEDIA_VISION_FEATUREEXTRACTORFACTORY_H__
+#define __MEDIA_VISION_FEATUREEXTRACTORFACTORY_H__
#include "Features/FeatureExtractor.h"
@@ -34,4 +34,4 @@ public:
} /* Image */
} /* MediaVision */
-#endif /* __IMAGEFEATURES_FEATUREEXTRACTORFACTORY_H__ */
+#endif /* __MEDIA_VISION_FEATUREEXTRACTORFACTORY_H__ */
diff --git a/mv_image/image/include/Features/FeatureMatcher.h b/mv_image/image/include/Features/FeatureMatcher.h
index f3c24630..37f4508b 100644
--- a/mv_image/image/include/Features/FeatureMatcher.h
+++ b/mv_image/image/include/Features/FeatureMatcher.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __IMAGEFEATURES_FEATUREMATCHER_H__
-#define __IMAGEFEATURES_FEATUREMATCHER_H__
+#ifndef __MEDIA_VISION_FEATUREMATCHER_H__
+#define __MEDIA_VISION_FEATUREMATCHER_H__
#include "Features/FeaturePack.h"
@@ -26,7 +26,7 @@ namespace Image {
class FeatureMatcher {
public:
- enum MatchError{
+ enum MatchError {
InvalidFeaturePackFrom,
InvalidFeaturePackTo,
DisparateTypes,
@@ -58,16 +58,16 @@ public:
void setMinimumMatchesNumber(size_t minimumMatchesNumber);
private:
- cv::BFMatcher m_matcher;
+ cv::BFMatcher __matcher;
- float m_affectingPart;
+ float __affectingPart;
- float m_tolerantError;
+ float __tolerantError;
- size_t m_minimumMatchesNumber;
+ size_t __minimumMatchesNumber;
};
} /* Image */
} /* MediaVision */
-#endif /* __IMAGEFEATURES_FEATUREMATCHER_H__ */
+#endif /* __MEDIA_VISION_FEATUREMATCHER_H__ */
diff --git a/mv_image/image/include/Features/FeaturePack.h b/mv_image/image/include/Features/FeaturePack.h
index a100ba6e..c492bf3a 100644
--- a/mv_image/image/include/Features/FeaturePack.h
+++ b/mv_image/image/include/Features/FeaturePack.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __IMAGEFEATURES_FEATUREPACK_H__
-#define __IMAGEFEATURES_FEATUREPACK_H__
+#ifndef __MEDIA_VISION_FEATUREPACK_H__
+#define __MEDIA_VISION_FEATUREPACK_H__
#include "ImageConfig.h"
@@ -62,18 +62,18 @@ public:
*/
FeaturePack& operator= (const FeaturePack& copy);
- KeypointType m_keypointsType;
+ KeypointType __keypointsType;
- std::vector<cv::KeyPoint> m_objectKeypoints;
+ std::vector<cv::KeyPoint> __objectKeypoints;
- DescriptorType m_descriptorsType;
+ DescriptorType __descriptorsType;
- cv::Mat m_objectDescriptors;
+ cv::Mat __objectDescriptors;
- float m_recognitionRate;
+ float __recognitionRate;
};
} /* Image */
} /* MediaVision */
-#endif /* __IMAGEFEATURES_FEATUREPACK_H__ */
+#endif /* __MEDIA_VISION_FEATUREPACK_H__ */
diff --git a/mv_image/image/include/Features/ORBExtractorFactory.h b/mv_image/image/include/Features/ORBExtractorFactory.h
index 50f6ad6e..335c04b3 100644
--- a/mv_image/image/include/Features/ORBExtractorFactory.h
+++ b/mv_image/image/include/Features/ORBExtractorFactory.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __IMAGEFEATURES_ORBEXTRACTORFACTORY_H__
-#define __IMAGEFEATURES_ORBEXTRACTORFACTORY_H__
+#ifndef __MEDIA_VISION_ORBEXTRACTORFACTORY_H__
+#define __MEDIA_VISION_ORBEXTRACTORFACTORY_H__
#include "Features/FeatureExtractorFactory.h"
@@ -43,13 +43,13 @@ private:
const cv::Mat&,
const std::vector<cv::KeyPoint>&);
- float m_scaleFactor; /**< Recognition scale factor for the ORB detector. */
+ float __scaleFactor; /**< Recognition scale factor for the ORB detector. */
- size_t m_maximumFeaturesNumber; /**< Maximum number of features, which will
- be extracted from object image. */
+ size_t __maximumFeaturesNumber; /**< Maximum number of features, which will
+ be extracted from object image. */
};
} /* Image */
} /* MediaVision */
-#endif /* __IMAGEFEATURES_ORBEXTRACTORFACTORY_H__ */
+#endif /* __MEDIA_VISION_ORBEXTRACTORFACTORY_H__ */
diff --git a/mv_image/image/include/ImageConfig.h b/mv_image/image/include/ImageConfig.h
index e43987d2..5b037a4e 100644
--- a/mv_image/image/include/ImageConfig.h
+++ b/mv_image/image/include/ImageConfig.h
@@ -14,14 +14,14 @@
* limitations under the License.
*/
-#ifndef __IMAGECONFIG_H__
-#define __IMAGECONFIG_H__
+#ifndef __MEDIA_VISION_IMAGECONFIG_H__
+#define __MEDIA_VISION_IMAGECONFIG_H__
#include <string>
/**
* @file ImageConfig.h
- * @brief This file contains Image Module Configuration.
+ * @brief This file contains Image Module configuration.
*/
namespace MediaVision {
@@ -57,11 +57,12 @@ enum DescriptorType {
};
const std::string DescriptorNames[DT_SIZE] = {
- [DT_ORB] = "ORB",
- [DT_BRIEF] = "BRIEF"
+ [DT_ORB] = "ORB",
+ [DT_BRIEF] = "BRIEF"
};
/**
+ * @class FeaturesExtractingParams
* @brief Contains parameters for features extracting from image objects.
*
* @since_tizen 3.0
@@ -73,7 +74,7 @@ struct FeaturesExtractingParams {
DescriptorType mDescriptorType; /**< Descriptor's type. */
- union { /**< Extracting parameters for concretes algorithms */
+ union { /**< Extracting parameters for concretes algorithms. */
struct { /**< Extracting parameters for ORB algorithm. */
double mScaleFactor; /**< Recognition scale factor for the ORB detector. */
int mMaximumFeaturesNumber; /**< Maximum number of features,
@@ -150,7 +151,7 @@ struct StabilizationParams {
};
/**
- * @calss TrackingParams
+ * @class TrackingParams
* @brief Contains parameters for image objects tracking.
*
* @since_tizen 3.0
@@ -182,4 +183,4 @@ struct TrackingParams {
} /* Image */
} /* MediaVision */
-#endif /* __IMAGECONFIG_H__ */
+#endif /* __MEDIA_VISION_IMAGECONFIG_H__ */
diff --git a/mv_image/image/include/ImageMathUtil.h b/mv_image/image/include/ImageMathUtil.h
index 41cdb0ca..f8a8ce11 100644
--- a/mv_image/image/include/ImageMathUtil.h
+++ b/mv_image/image/include/ImageMathUtil.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __IMAGEMATHUTIL_H__
-#define __IMAGEMATHUTIL_H__
+#ifndef __MEDIA_VISION_IMAGEMATHUTIL_H__
+#define __MEDIA_VISION_IMAGEMATHUTIL_H__
#include <opencv/cv.h>
@@ -26,6 +26,7 @@
namespace MediaVision {
namespace Image {
+
const size_t MinimumNumberOfFeatures = 4u; /* Minimum number of features
when perspective transform
parameters calculation
@@ -106,4 +107,4 @@ std::vector<cv::Point2f> contourResize(
} /* Image */
} /* MediaVision */
-#endif /* __IMAGEMATHUTIL_H__ */
+#endif /* __MEDIA_VISION_IMAGEMATHUTIL_H__ */
diff --git a/mv_image/image/include/Recognition/ImageObject.h b/mv_image/image/include/Recognition/ImageObject.h
index a73dcf5e..12e3f260 100644
--- a/mv_image/image/include/Recognition/ImageObject.h
+++ b/mv_image/image/include/Recognition/ImageObject.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __IMAGEOBJECT_H__
-#define __IMAGEOBJECT_H__
+#ifndef __MEDIA_VISION_IMAGEOBJECT_H__
+#define __MEDIA_VISION_IMAGEOBJECT_H__
#include "ImageConfig.h"
@@ -38,6 +38,7 @@ namespace Image {
* @since_tizen 3.0
*/
class ImageObject {
+
public:
/**
* @brief @ref ImageObject default constructor.
@@ -55,7 +56,7 @@ public:
* @param [in] image The image for which instance of @ref ImageObject
* will be created
* @param [in] params Features extracting parameters
- */
+ */
ImageObject(const cv::Mat& image, const FeaturesExtractingParams& params);
/**
@@ -96,9 +97,9 @@ public:
* @param [in] roi Region of interested object on the @a image
*/
void fill(
- const cv::Mat& image,
- const FeaturesExtractingParams& params,
- const std::vector<cv::Point2f>& roi = std::vector<cv::Point2f>());
+ const cv::Mat& image,
+ const FeaturesExtractingParams& params,
+ const std::vector<cv::Point2f>& roi = std::vector<cv::Point2f>());
/**
* @brief Gets a value that determines how well an @ref ImageObject can be recognized.
@@ -173,17 +174,17 @@ private:
const std::vector<cv::Point2f>& roi);
private:
- FeaturesExtractingParams m_featureExtractingParams;
+ FeaturesExtractingParams __featureExtractingParams;
- FeaturePack m_features;
+ FeaturePack __features;
- bool m_isEmpty;
+ bool __isEmpty;
- bool m_isLabeled;
+ bool __isLabeled;
- int m_label;
+ int __label;
- std::vector<cv::Point2f> m_boundingContour;
+ std::vector<cv::Point2f> __boundingContour;
friend class ImageRecognizer;
@@ -195,4 +196,4 @@ private:
} /* Image */
} /* MediaVision */
-#endif /* __IMAGEOBJECT_H__ */
+#endif /* __MEDIA_VISION_IMAGEOBJECT_H__ */
diff --git a/mv_image/image/include/Recognition/ImageRecognizer.h b/mv_image/image/include/Recognition/ImageRecognizer.h
index 37d7e23e..2a925508 100644
--- a/mv_image/image/include/Recognition/ImageRecognizer.h
+++ b/mv_image/image/include/Recognition/ImageRecognizer.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __IMAGERECOGNIZER_H__
-#define __IMAGERECOGNIZER_H__
+#ifndef __MEDIA_VISION_IMAGERECOGNIZER_H__
+#define __MEDIA_VISION_IMAGERECOGNIZER_H__
#include "ImageMathUtil.h"
#include "ImageConfig.h"
@@ -59,12 +59,12 @@ public:
* @brief Recognizes the @a target on the scene.
*
* @since_tizen 3.0
- * @param [in] target @ref ImageObject, which will be recognized
- * @param [in] params Recognition parameters
- * @param [out] contour The result contour of @a target object on the
- * scene
- * @param [out] ignoreFactor Scaling factor of area near the contour
- * of object which will be ignored
+ * @param [in] target @ref ImageObject, which will be recognized
+ * @param [in] params Recognition parameters
+ * @param [out] contour The result contour of @a target object on the
+ * scene
+ * @param [out] ignoreFactor Scaling factor of area near the contour
+ * of object which will be ignored
* @return true if object is found on the scene, otherwise return false
*/
bool recognize(
@@ -95,12 +95,12 @@ private:
private:
/* TODO: Replace to cv::Ptr<ImageObject> */
- ImageObject m_scene;
+ ImageObject __scene;
- cv::BFMatcher m_matcher;
+ cv::BFMatcher __matcher;
};
} /* Image */
} /* MediaVision */
-#endif /* __IMAGERECOGNIZER_H__ */
+#endif /* __MEDIA_VISION_IMAGERECOGNIZER_H__ */
diff --git a/mv_image/image/include/Tracking/AsyncTracker.h b/mv_image/image/include/Tracking/AsyncTracker.h
index 890f655b..a84fa574 100644
--- a/mv_image/image/include/Tracking/AsyncTracker.h
+++ b/mv_image/image/include/Tracking/AsyncTracker.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __IMAGETRACKING_ASYNCTRACKER_H__
-#define __IMAGETRACKING_ASYNCTRACKER_H__
+#ifndef __MEDIA_VISION_ASYNCTRACKER_H__
+#define __MEDIA_VISION_ASYNCTRACKER_H__
#include "Tracking/ObjectTracker.h"
@@ -102,30 +102,30 @@ private:
static void *asyncTrack(void *data);
private:
- cv::Ptr<ObjectTracker> m_baseTracker;
+ cv::Ptr<ObjectTracker> __baseTracker;
- cv::Mat m_frame;
+ cv::Mat __frame;
- std::vector<cv::Point> m_result;
+ std::vector<cv::Point> __result;
- bool m_isRun;
+ bool __isRun;
- bool m_isUpdated;
+ bool __isUpdated;
- bool m_copyingPolicy;
+ bool __copyingPolicy;
- pthread_t m_thread;
+ pthread_t __mvThread;
- mutable pthread_mutex_t m_globalGuard;
+ mutable pthread_mutex_t __globalGuard;
- mutable pthread_spinlock_t m_resultGuard;
+ mutable pthread_spinlock_t __resultGuard;
- mutable pthread_spinlock_t m_isRunGuard;
+ mutable pthread_spinlock_t __isRunGuard;
- mutable pthread_spinlock_t m_isUpdatedGuard;
+ mutable pthread_spinlock_t __isUpdatedGuard;
};
} /* Image */
} /* MediaVision */
-#endif /* __IMAGETRACKING_ASYNCTRACKER_H__ */
+#endif /* __MEDIA_VISION_ASYNCTRACKER_H__ */
diff --git a/mv_image/image/include/Tracking/CascadeTracker.h b/mv_image/image/include/Tracking/CascadeTracker.h
index 4ac0ec32..e28e2944 100644
--- a/mv_image/image/include/Tracking/CascadeTracker.h
+++ b/mv_image/image/include/Tracking/CascadeTracker.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __IMAGETRACKING_CASCADETRACKER_H__
-#define __IMAGETRACKING_CASCADETRACKER_H__
+#ifndef __MEDIA_VISION_CASCADETRACKER_H__
+#define __MEDIA_VISION_CASCADETRACKER_H__
#include "Tracking/ObjectTracker.h"
@@ -118,15 +118,16 @@ private:
mutable std::vector<cv::Point> mResult;
};
- /* don't use m_trackers.find() because
- operator==() and operator<() are independent
- TODO: Fix it with aggregator or something like that */
- std::set<TrackerInfo> m_trackers;
+ /* don't use __trackers.find() because
+ * operator==() and operator<() are independent
+ * TODO: Fix it with aggregator or something like that
+ */
+ std::set<TrackerInfo> __trackers;
- float m_minimumArea;
+ float __minimumArea;
};
} /* Image */
} /* MediaVision */
-#endif /* __IMAGETRACKING_CASCADETRACKER_H__ */
+#endif /* __MEDIA_VISION_CASCADETRACKER_H__ */
diff --git a/mv_image/image/include/Tracking/FeatureSubstitutionTracker.h b/mv_image/image/include/Tracking/FeatureSubstitutionTracker.h
index 010ca89c..29ec6b13 100644
--- a/mv_image/image/include/Tracking/FeatureSubstitutionTracker.h
+++ b/mv_image/image/include/Tracking/FeatureSubstitutionTracker.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __IMAGETRACKING_FEATURESUBSTITUTIONTRACKER_H__
-#define __IMAGETRACKING_FEATURESUBSTITUTIONTRACKER_H__
+#ifndef __MEDIA_VISION_FEATURESUBSTITUTIONTRACKER_H__
+#define __MEDIA_VISION_FEATURESUBSTITUTIONTRACKER_H__
#include "Tracking/ObjectTracker.h"
@@ -85,24 +85,24 @@ private:
std::vector<cv::Point2f> computeExpectedArea();
private:
- bool m_isInit;
+ bool __isInit;
- cv::Ptr<ImageObject> m_target;
+ cv::Ptr<ImageObject> __target;
- std::vector<cv::Point> m_location;
+ std::vector<cv::Point> __location;
- FeaturesExtractingParams m_featureExtractingParams;
+ FeaturesExtractingParams __featureExtractingParams;
- RecognitionParams m_recogParams;
+ RecognitionParams __recogParams;
- float m_expectedOffset;
+ float __expectedOffset;
- float m_sceneScalingFactor;
+ float __sceneScalingFactor;
- float m_objectScalingFactor;
+ float __objectScalingFactor;
};
} /* Image */
} /* MediaVision */
-#endif /* __IMAGETRACKING_FEATURESUBSTITUTIONTRACKER_H__ */
+#endif /* __MEDIA_VISION_FEATURESUBSTITUTIONTRACKER_H__ */
diff --git a/mv_image/image/include/Tracking/ImageContourStabilizator.h b/mv_image/image/include/Tracking/ImageContourStabilizator.h
index 0d1e2956..4163218f 100644
--- a/mv_image/image/include/Tracking/ImageContourStabilizator.h
+++ b/mv_image/image/include/Tracking/ImageContourStabilizator.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __IMAGECONTOURSTABILIZATOR_H__
-#define __IMAGECONTOURSTABILIZATOR_H__
+#ifndef __MEDIA_VISION_IMAGECONTOURSTABILIZATOR_H__
+#define __MEDIA_VISION_IMAGECONTOURSTABILIZATOR_H__
#include "ImageConfig.h"
@@ -46,8 +46,8 @@ public:
enum StabilizationError {
Successfully, /**< Contour is stabilized. */
TooShortMovingHistory, /**< Too short moving history, it's normal
- behavior, you can continue to call
- stabilization in order to accumulate it. */
+ behavior, you can continue to call
+ stabilization in order to accumulate it. */
InvalidSettings, /**< Invalid settings. */
UnsupportedContourType /**< Unsupported contour type. */
};
@@ -90,30 +90,30 @@ private:
std::vector<cv::Point2f> computeStabilizedQuadrangleContour(void);
private:
- float m_tolerantShift;
+ float __tolerantShift;
- float m_tolerantShiftExtra;
+ float __tolerantShiftExtra;
- std::vector<float> m_speeds;
+ std::vector<float> __speeds;
- std::vector<size_t> m_currentCornersSpeed;
+ std::vector<size_t> __currentCornersSpeed;
- std::deque<std::vector<cv::Point2f> > m_movingHistory;
+ std::deque<std::vector<cv::Point2f> > __movingHistory;
- std::vector<cv::Point2f> m_lastStabilizedContour;
+ std::vector<cv::Point2f> __lastStabilizedContour;
- size_t m_historyAmount;
+ size_t __historyAmount;
- size_t m_currentHistoryAmount;
+ size_t __currentHistoryAmount;
- int m_tempContourIndex;
+ int __tempContourIndex;
- std::vector<float> m_priorities;
+ std::vector<float> __priorities;
- bool m_isPrepared;
+ bool __isPrepared;
};
} /* Image */
} /* MediaVision */
-#endif /* __IMAGECONTOURSTABILIZATOR_H__ */
+#endif /* __MEDIA_VISION_IMAGECONTOURSTABILIZATOR_H__ */
diff --git a/mv_image/image/include/Tracking/ImageTrackingModel.h b/mv_image/image/include/Tracking/ImageTrackingModel.h
index 5a971742..ce408252 100644
--- a/mv_image/image/include/Tracking/ImageTrackingModel.h
+++ b/mv_image/image/include/Tracking/ImageTrackingModel.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __IMAGETRACKINGMODEL_H__
-#define __IMAGETRACKINGMODEL_H__
+#ifndef __MEDIA_VISION_IMAGETRACKINGMODEL_H__
+#define __MEDIA_VISION_IMAGETRACKINGMODEL_H__
#include "Recognition/ImageObject.h"
@@ -129,18 +129,18 @@ public:
ImageTrackingModel& obj);
private:
- ImageObject m_target;
+ ImageObject __target;
- cv::Ptr<ObjectTracker> m_tracker;
+ cv::Ptr<ObjectTracker> __tracker;
- ImageContourStabilizator m_stabilizator;
+ ImageContourStabilizator __stabilizator;
- std::vector<cv::Point> m_location;
+ std::vector<cv::Point> __location;
- StabilizationParams m_stabilizationParams;
+ StabilizationParams __stabilizationParams;
};
} /* Image */
} /* MediaVision */
-#endif /* __IMAGETRACKINGMODEL_H__ */
+#endif /* __MEDIA_VISION_IMAGETRACKINGMODEL_H__ */
diff --git a/mv_image/image/include/Tracking/MFTracker.h b/mv_image/image/include/Tracking/MFTracker.h
index 3ff98baf..3fbd9eaf 100644
--- a/mv_image/image/include/Tracking/MFTracker.h
+++ b/mv_image/image/include/Tracking/MFTracker.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __IMAGETRACKING_MFTRACKER_H__
-#define __IMAGETRACKING_MFTRACKER_H__
+#ifndef __MEDIA_VISION_MFTRACKER_H__
+#define __MEDIA_VISION_MFTRACKER_H__
#include "Tracking/ObjectTracker.h"
@@ -33,22 +33,22 @@ public:
/**
* @brief TrackerMedianFlow algorithm parameters constructor
*/
- Params();
+ Params();
- int mPointsInGrid; /**< Square root of number of used keypoints.
- Increase it to trade accurateness for speed.
- Default value is sensible and recommended */
+ int mPointsInGrid; /**< Square root of number of used keypoints.
+ Increase it to trade accurateness for speed.
+ Default value is sensible and recommended */
- cv::Size mWindowSize; /**< Size of the search window at each pyramid level
- for Lucas-Kanade optical flow search used for
- tracking */
+ cv::Size mWindowSize; /**< Size of the search window at each pyramid level
+ for Lucas-Kanade optical flow search used for
+ tracking */
- int mPyrMaxLevel; /**< Number of pyramid levels for Lucas-Kanade optical
- flow search used for tracking */
+ int mPyrMaxLevel; /**< Number of pyramid levels for Lucas-Kanade optical
+ flow search used for tracking */
/* TODO: add lifetime*/
/*time_t mLifetime;*/ /**< Time of tracking without reinforcement. */
- };
+ };
/**
* @brief @ref MFTracker constructor based on tracking algorithm parameters.
@@ -118,29 +118,29 @@ private:
std::vector<bool>& status);
private:
- bool m_isInit; /**< Flag is used to determine the model
+ bool __isInit; /**< Flag is used to determine the model
initialization */
- Params m_params; /**< Parameters used during tracking, see
+ Params __params; /**< Parameters used during tracking, see
@ref TrackerMedianFlow::Params */
- cv::TermCriteria m_termcrit; /**< Terminating criteria for OpenCV
+ cv::TermCriteria __termcrit; /**< Terminating criteria for OpenCV
Lucas–Kanade optical flow algorithm used
during tracking */
- std::vector<cv::Point2f> m_startLocation; /**< Tracking object start
+ std::vector<cv::Point2f> __startLocation; /**< Tracking object start
location with relative values
to the bounding box */
- cv::Rect_<float> m_boundingBox; /**< Tracking object bounding box */
+ cv::Rect_<float> __boundingBox; /**< Tracking object bounding box */
- float m_confidence; /**< Confidence that object was tracked
+ float __confidence; /**< Confidence that object was tracked
correctly at the last tracking iteration */
- cv::Mat m_image; /**< Last image for which tracking was
+ cv::Mat __image; /**< Last image for which tracking was
performed */
- std::vector<cv::Mat> m_pyramid; /**< The pyramid had been calculated for
+ std::vector<cv::Mat> __pyramid; /**< The pyramid had been calculated for
the previous frame(or when
initialize the model) */
};
@@ -148,4 +148,4 @@ private:
} /* Image */
} /* MediaVision */
-#endif /* __IMAGETRACKING_MFTRACKER_H__ */
+#endif /* __MEDIA_VISION_MFTRACKER_H__ */
diff --git a/mv_image/image/include/Tracking/ObjectTracker.h b/mv_image/image/include/Tracking/ObjectTracker.h
index 77e884e1..ffc02c1f 100644
--- a/mv_image/image/include/Tracking/ObjectTracker.h
+++ b/mv_image/image/include/Tracking/ObjectTracker.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __IMAGETRACKING_OBJECTTRACKER_H__
-#define __IMAGETRACKING_OBJECTTRACKER_H__
+#ifndef __MEDIA_VISION_OBJECTTRACKER_H__
+#define __MEDIA_VISION_OBJECTTRACKER_H__
#include <opencv2/core/core.hpp>
@@ -77,4 +77,4 @@ private:
} /* Image */
} /* MediaVision */
-#endif /* __IMAGETRACKING_OBJECTTRACKER_H__ */
+#endif /* __MEDIA_VISION_OBJECTTRACKER_H__ */
diff --git a/mv_image/image/include/Tracking/RecognitionBasedTracker.h b/mv_image/image/include/Tracking/RecognitionBasedTracker.h
index 81068604..3f63b75c 100644
--- a/mv_image/image/include/Tracking/RecognitionBasedTracker.h
+++ b/mv_image/image/include/Tracking/RecognitionBasedTracker.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __IMAGETRACKING_RECOGNITIONBASEDTRACKER_H__
-#define __IMAGETRACKING_RECOGNITIONBASEDTRACKER_H__
+#ifndef __MEDIA_VISION_RECOGNITIONBASEDTRACKER_H__
+#define __MEDIA_VISION_RECOGNITIONBASEDTRACKER_H__
#include "Tracking/ObjectTracker.h"
@@ -80,14 +80,14 @@ public:
virtual cv::Ptr<ObjectTracker> clone() const;
private:
- ImageObject m_target;
+ ImageObject __target;
- FeaturesExtractingParams m_sceneFeatureExtractingParams;
+ FeaturesExtractingParams __sceneFeatureExtractingParams;
- RecognitionParams m_recogParams;
+ RecognitionParams __recogParams;
};
} /* Image */
} /* MediaVision */
-#endif /* __IMAGETRACKING_RECOGNITIONBASEDTRACKER_H__ */
+#endif /* __MEDIA_VISION_RECOGNITIONBASEDTRACKER_H__ */
diff --git a/mv_image/image/include/mv_image_open.h b/mv_image/image/include/mv_image_open.h
index a73df98b..0184b815 100644
--- a/mv_image/image/include/mv_image_open.h
+++ b/mv_image/image/include/mv_image_open.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __TIZEN_MEDIAVISION_IMAGE_OPEN_H__
-#define __TIZEN_MEDIAVISION_IMAGE_OPEN_H__
+#ifndef __MEDIA_VISION_IMAGE_OPEN_H__
+#define __MEDIA_VISION_IMAGE_OPEN_H__
#include "mv_image.h"
@@ -351,7 +351,8 @@ int mv_image_object_clone_open(
* @see mv_image_object_destroy_open()
*/
int mv_image_object_save_open(
- const char *file_name, mv_image_object_h image_object);
+ const char *file_name,
+ mv_image_object_h image_object);
/**
* @brief Loads an image object from the file.
@@ -377,7 +378,8 @@ int mv_image_object_save_open(
* @see mv_image_object_destroy_open()
*/
int mv_image_object_load_open(
- const char *file_name, mv_image_object_h *image_object);
+ const char *file_name,
+ mv_image_object_h *image_object);
/**********************************/
/* Image tracking model behaviour */
@@ -526,7 +528,8 @@ int mv_image_tracking_model_clone_open(
* @see mv_image_tracking_model_destroy_open()
*/
int mv_image_tracking_model_save_open(
- const char *file_name, mv_image_tracking_model_h image_tracking_model);
+ const char *file_name,
+ mv_image_tracking_model_h image_tracking_model);
/**
* @brief Loads an image tracking model from the file.
@@ -552,10 +555,11 @@ int mv_image_tracking_model_save_open(
* @see mv_image_tracking_model_destroy_open()
*/
int mv_image_tracking_model_load_open(
- const char *file_name, mv_image_tracking_model_h *image_tracking_model);
+ const char *file_name,
+ mv_image_tracking_model_h *image_tracking_model);
#ifdef __cplusplus
}
#endif /* __cplusplus */
-#endif /* __TIZEN_MEDIAVISION_IMAGE_OPEN_H__ */
+#endif /* __MEDIA_VISION_IMAGE_OPEN_H__ */
diff --git a/mv_image/image/src/Features/BasicExtractorFactory.cpp b/mv_image/image/src/Features/BasicExtractorFactory.cpp
index 09285da1..9c2d6e61 100644
--- a/mv_image/image/src/Features/BasicExtractorFactory.cpp
+++ b/mv_image/image/src/Features/BasicExtractorFactory.cpp
@@ -20,26 +20,29 @@
namespace MediaVision {
namespace Image {
+
BasicExtractorFactory::BasicExtractorFactory(
KeypointType keypointsType,
DescriptorType descType) :
- m_kpType(keypointsType),
- m_descType(descType)
+ __kpType(keypointsType),
+ __descType(descType)
{
}
cv::Ptr<FeatureExtractor> BasicExtractorFactory::buildFeatureExtractor()
{
- cv::Ptr<FeatureExtractor> featureExtractor(new FeatureExtractor());
+ cv::Ptr<FeatureExtractor> featureExtractor(new (std::nothrow)FeatureExtractor());
+ if (featureExtractor == NULL)
+ return NULL;
cv::Ptr<cv::FeatureDetector> detector =
- cv::FeatureDetector::create(KeypointNames[m_kpType]);
+ cv::FeatureDetector::create(KeypointNames[__kpType]);
cv::Ptr<cv::DescriptorExtractor> extractor =
- cv::DescriptorExtractor::create(DescriptorNames[m_descType]);
+ cv::DescriptorExtractor::create(DescriptorNames[__descType]);
- featureExtractor->setFeatureDetector(detector, m_kpType);
- featureExtractor->setDescriptorExtractor(extractor, m_descType);
+ featureExtractor->setFeatureDetector(detector, __kpType);
+ featureExtractor->setDescriptorExtractor(extractor, __descType);
return featureExtractor;
}
diff --git a/mv_image/image/src/Features/FeatureExtractor.cpp b/mv_image/image/src/Features/FeatureExtractor.cpp
index be9224b7..15c36bec 100644
--- a/mv_image/image/src/Features/FeatureExtractor.cpp
+++ b/mv_image/image/src/Features/FeatureExtractor.cpp
@@ -22,14 +22,15 @@
namespace MediaVision {
namespace Image {
-const cv::Size FeatureExtractor::MinSize = cv::Size(5, 5);
+
+const cv::Size FeatureExtractor::__MIN_SIZE = cv::Size(5, 5);
FeatureExtractor::FeatureExtractor() :
- m_kpType(KT_INVALID),
- m_detector(),
- m_descType(DT_INVALID),
- m_extractor(),
- m_computeRecognitionRate(NULL)
+ __kpType(KT_INVALID),
+ __detector(),
+ __descType(DT_INVALID),
+ __extractor(),
+ __computeRecognitionRate(NULL)
{
}
@@ -37,16 +38,16 @@ void FeatureExtractor::setFeatureDetector(
const cv::Ptr<cv::FeatureDetector> detector,
KeypointType keypointType)
{
- m_detector = detector;
- m_kpType = keypointType;
+ __detector = detector;
+ __kpType = keypointType;
}
void FeatureExtractor::setDescriptorExtractor(
cv::Ptr<cv::DescriptorExtractor> extractor,
DescriptorType descriptorType)
{
- m_extractor = extractor;
- m_descType = descriptorType;
+ __extractor = extractor;
+ __descType = descriptorType;
}
void FeatureExtractor::setRecognitionRateMetric(
@@ -54,7 +55,7 @@ void FeatureExtractor::setRecognitionRateMetric(
const cv::Mat&,
const std::vector<cv::KeyPoint>&))
{
- m_computeRecognitionRate = computeRecognitionRate;
+ __computeRecognitionRate = computeRecognitionRate;
}
bool FeatureExtractor::extract(
@@ -62,7 +63,7 @@ bool FeatureExtractor::extract(
FeaturePack& result,
const std::vector<cv::Point2f>& roi)
{
- if (m_detector.empty() || m_extractor.empty())
+ if (__detector.empty() || __extractor.empty())
return false;
cv::Rect boundingBox;
@@ -80,58 +81,59 @@ bool FeatureExtractor::extract(
catRect(boundingBox, image.size());
}
- if (boundingBox.width < MinSize.width || boundingBox.height < MinSize.height)
+ if (boundingBox.width < __MIN_SIZE.width || boundingBox.height < __MIN_SIZE.height)
return false;
- result.m_objectKeypoints.clear();
+ result.__objectKeypoints.clear();
std::vector<cv::KeyPoint> keypoints;
- m_detector->detect(
+ __detector->detect(
image(boundingBox),
keypoints);
- result.m_objectKeypoints = keypoints;
+ result.__objectKeypoints = keypoints;
+
if (!roi.empty()) {
const size_t numberOfKeypoints = keypoints.size();
- result.m_objectKeypoints.resize(numberOfKeypoints);
+ result.__objectKeypoints.resize(numberOfKeypoints);
for (size_t i = 0; i < numberOfKeypoints; ++i) {
- result.m_objectKeypoints[i].pt.x += boundingBox.x;
- result.m_objectKeypoints[i].pt.y += boundingBox.y;
+ result.__objectKeypoints[i].pt.x += boundingBox.x;
+ result.__objectKeypoints[i].pt.y += boundingBox.y;
}
}
if (!roi.empty()) {
/* TODO: Ecode roi to reduce the boundary effect. Provide new parameter
- / for this action cause roi is a bounding contour for the object. */
+ for this action cause roi is a bounding contour for the object. */
- for (size_t i = 0; i < result.m_objectKeypoints.size(); ++i) {
- if (!checkAccessory(result.m_objectKeypoints[i].pt, roi)) {
- result.m_objectKeypoints.erase(result.m_objectKeypoints.begin() + i);
+ for (size_t i = 0; i < result.__objectKeypoints.size(); ++i) {
+ if (!checkAccessory(result.__objectKeypoints[i].pt, roi)) {
+ result.__objectKeypoints.erase(result.__objectKeypoints.begin() + i);
--i;
}
}
}
- m_extractor->compute(
+ __extractor->compute(
image,
- result.m_objectKeypoints,
- result.m_objectDescriptors);
+ result.__objectKeypoints,
+ result.__objectDescriptors);
- if (NULL != m_computeRecognitionRate) {
- result.m_recognitionRate = m_computeRecognitionRate(
+ if (NULL != __computeRecognitionRate) {
+ result.__recognitionRate = __computeRecognitionRate(
image(boundingBox),
keypoints);
} else {
/* Default recognition rate metric */
- if (result.m_objectKeypoints.size() < MinimumNumberOfFeatures)
- result.m_recognitionRate = 0.f;
+ if (result.__objectKeypoints.size() < MinimumNumberOfFeatures)
+ result.__recognitionRate = 0.f;
else
- result.m_recognitionRate = 0.5f;
+ result.__recognitionRate = 0.5f;
}
- result.m_keypointsType = m_kpType;
- result.m_descriptorsType = m_descType;
+ result.__keypointsType = __kpType;
+ result.__descriptorsType = __descType;
return true;
}
diff --git a/mv_image/image/src/Features/FeatureExtractorFactory.cpp b/mv_image/image/src/Features/FeatureExtractorFactory.cpp
index be022d59..3cda88ee 100644
--- a/mv_image/image/src/Features/FeatureExtractorFactory.cpp
+++ b/mv_image/image/src/Features/FeatureExtractorFactory.cpp
@@ -21,7 +21,7 @@ namespace Image {
FeatureExtractorFactory::~FeatureExtractorFactory()
{
- ; /* NULL */
+ ; /* NULL */
}
} /* Image */
diff --git a/mv_image/image/src/Features/FeatureMatcher.cpp b/mv_image/image/src/Features/FeatureMatcher.cpp
index 74b4312e..891e85f4 100644
--- a/mv_image/image/src/Features/FeatureMatcher.cpp
+++ b/mv_image/image/src/Features/FeatureMatcher.cpp
@@ -24,6 +24,7 @@ namespace MediaVision {
namespace Image {
namespace {
+
float computeLinearSupportElement(
const std::vector<cv::DMatch>& examples,
int requiredNumber,
@@ -77,8 +78,8 @@ size_t matchesSelection(
while (true) {
if (leftLimit >= rightLimit) {
- if (leftLimit < (requiredNumber - (int)allowableError))
- leftLimit = requiredNumber + (int)allowableError;
+ if (leftLimit < (requiredNumber - static_cast<int>(allowableError)))
+ leftLimit = requiredNumber + static_cast<int>(allowableError);
break;
}
@@ -88,29 +89,27 @@ size_t matchesSelection(
/* Iteration similar quicksort */
while (true) {
- /* Search the leftmost element which have bigger confidence than support element */
+ /* Search the leftmost element which have
+ * bigger confidence than support element */
while (examples[leftLimit].distance <= supportElement &&
- leftLimit < startRightLimit) {
+ leftLimit < startRightLimit)
++leftLimit;
- }
- /* Search the rightmost element which have smaller confidence than support element */
+ /* Search the rightmost element which have smaller
+ * confidence than support element */
while (examples[rightLimit].distance >= supportElement &&
- rightLimit >= startLeftLimit) {
+ rightLimit >= startLeftLimit)
--rightLimit;
- }
if (leftLimit >= rightLimit)
break;
/* Swap */
- std::swap(examples[leftLimit], examples[rightLimit]);
+ std::swap(examples[leftLimit], examples[rightLimit]);
}
-
- if (abs(filterAmount - leftLimit) <= (int)allowableError)
+ if (abs(filterAmount - leftLimit) <= static_cast<int>(allowableError))
break;
-
- if ((int)filterAmount > leftLimit) {
+ if (static_cast<int>(filterAmount) > leftLimit) {
requiredNumber -= leftLimit - startLeftLimit;
rightLimit = startRightLimit;
@@ -141,43 +140,43 @@ FeatureMatcher::MatchError FeatureMatcher::match(
const FeaturePack& to,
cv::Mat& homophraphyMatrix) const
{
- if (MinimumNumberOfFeatures > from.m_objectKeypoints.size())
+ if (MinimumNumberOfFeatures > from.__objectKeypoints.size())
return InvalidFeaturePackFrom;
- if (MinimumNumberOfFeatures > to.m_objectKeypoints.size())
+ if (MinimumNumberOfFeatures > to.__objectKeypoints.size())
return InvalidFeaturePackTo;
- if (from.m_descriptorsType != to.m_descriptorsType)
+ if (from.__descriptorsType != to.__descriptorsType)
return DisparateTypes;
std::vector<cv::DMatch> matches;
- m_matcher.match(from.m_objectDescriptors, to.m_objectDescriptors, matches);
+ __matcher.match(from.__objectDescriptors, to.__objectDescriptors, matches);
size_t matchesNumber = matches.size();
if (MinimumNumberOfFeatures > matchesNumber)
return MatchesNotFound;
- size_t requiredMatchesNumber = m_affectingPart * matchesNumber;
- size_t allowableMatchesNumberError = m_tolerantError * requiredMatchesNumber;
+ size_t requiredMatchesNumber = __affectingPart * matchesNumber;
+ size_t allowableMatchesNumberError = __tolerantError * requiredMatchesNumber;
if (matchesNumber - allowableMatchesNumberError > MinimumNumberOfFeatures &&
- requiredMatchesNumber + allowableMatchesNumberError < matchesNumber) {
+ requiredMatchesNumber + allowableMatchesNumberError < matchesNumber) {
if (requiredMatchesNumber - allowableMatchesNumberError <
- m_minimumMatchesNumber) {
+ __minimumMatchesNumber) {
if (requiredMatchesNumber + allowableMatchesNumberError >
- m_minimumMatchesNumber) {
+ __minimumMatchesNumber) {
requiredMatchesNumber = (requiredMatchesNumber +
- m_minimumMatchesNumber + allowableMatchesNumberError) / 2;
+ __minimumMatchesNumber + allowableMatchesNumberError) / 2;
allowableMatchesNumberError = requiredMatchesNumber -
- m_minimumMatchesNumber + allowableMatchesNumberError;
+ __minimumMatchesNumber + allowableMatchesNumberError;
} else {
const size_t minimalAllowableMatchesNumberError = 2u;
requiredMatchesNumber = minimalAllowableMatchesNumberError +
- m_minimumMatchesNumber;
+ __minimumMatchesNumber;
allowableMatchesNumberError = minimalAllowableMatchesNumberError;
}
@@ -199,10 +198,10 @@ FeatureMatcher::MatchError FeatureMatcher::match(
for (size_t matchIdx = 0; matchIdx < matchesNumber; ++matchIdx) {
objectPoints[matchIdx] =
- from.m_objectKeypoints[matches[matchIdx].queryIdx].pt;
+ from.__objectKeypoints[matches[matchIdx].queryIdx].pt;
scenePoints[matchIdx] =
- to.m_objectKeypoints[matches[matchIdx].trainIdx].pt;
+ to.__objectKeypoints[matches[matchIdx].trainIdx].pt;
}
homophraphyMatrix = cv::findHomography(objectPoints, scenePoints, CV_RANSAC);
@@ -212,32 +211,32 @@ FeatureMatcher::MatchError FeatureMatcher::match(
float FeatureMatcher::getAffectingPart() const
{
- return m_affectingPart;
+ return __affectingPart;
}
void FeatureMatcher::setAffectingPart(float affectingPart)
{
- m_affectingPart = std::max(0.f, std::min(1.f, affectingPart));
+ __affectingPart = std::max(0.f, std::min(1.f, affectingPart));
}
float FeatureMatcher::getTolerantError() const
{
- return m_tolerantError;
+ return __tolerantError;
}
void FeatureMatcher::setTolerantError(float tolerantError)
{
- m_tolerantError = std::max(0.f, std::min(1.f, tolerantError));
+ __tolerantError = std::max(0.f, std::min(1.f, tolerantError));
}
size_t FeatureMatcher::getMinimumMatchesNumber() const
{
- return m_minimumMatchesNumber;
+ return __minimumMatchesNumber;
}
void FeatureMatcher::setMinimumMatchesNumber(size_t minimumMatchesNumber)
{
- m_minimumMatchesNumber = minimumMatchesNumber;
+ __minimumMatchesNumber = minimumMatchesNumber;
}
} /* Image */
diff --git a/mv_image/image/src/Features/FeaturePack.cpp b/mv_image/image/src/Features/FeaturePack.cpp
index 61364f5f..57338ff2 100644
--- a/mv_image/image/src/Features/FeaturePack.cpp
+++ b/mv_image/image/src/Features/FeaturePack.cpp
@@ -22,21 +22,21 @@ namespace MediaVision {
namespace Image {
FeaturePack::FeaturePack() :
- m_keypointsType(KT_INVALID),
- m_objectKeypoints(),
- m_descriptorsType(DT_INVALID),
- m_objectDescriptors(),
- m_recognitionRate(0.f)
+ __keypointsType(KT_INVALID),
+ __objectKeypoints(),
+ __descriptorsType(DT_INVALID),
+ __objectDescriptors(),
+ __recognitionRate(0.f)
{
; /* NULL */
}
FeaturePack::FeaturePack(const FeaturePack& copy) :
- m_keypointsType(copy.m_keypointsType),
- m_objectKeypoints(copy.m_objectKeypoints),
- m_descriptorsType(copy.m_descriptorsType),
- m_objectDescriptors(copy.m_objectDescriptors.clone()),
- m_recognitionRate(copy.m_recognitionRate)
+ __keypointsType(copy.__keypointsType),
+ __objectKeypoints(copy.__objectKeypoints),
+ __descriptorsType(copy.__descriptorsType),
+ __objectDescriptors(copy.__objectDescriptors.clone()),
+ __recognitionRate(copy.__recognitionRate)
{
; /* NULL */
}
@@ -44,11 +44,11 @@ FeaturePack::FeaturePack(const FeaturePack& copy) :
FeaturePack& FeaturePack::operator= (const FeaturePack& copy)
{
if (this != &copy) {
- m_keypointsType = copy.m_keypointsType;
- m_objectKeypoints = copy.m_objectKeypoints;
- m_descriptorsType = copy.m_descriptorsType;
- m_objectDescriptors = copy.m_objectDescriptors.clone();
- m_recognitionRate = copy.m_recognitionRate;
+ __keypointsType = copy.__keypointsType;
+ __objectKeypoints = copy.__objectKeypoints;
+ __descriptorsType = copy.__descriptorsType;
+ __objectDescriptors = copy.__objectDescriptors.clone();
+ __recognitionRate = copy.__recognitionRate;
}
return *this;
diff --git a/mv_image/image/src/Features/ORBExtractorFactory.cpp b/mv_image/image/src/Features/ORBExtractorFactory.cpp
index 8ac34df4..cc482cee 100644
--- a/mv_image/image/src/Features/ORBExtractorFactory.cpp
+++ b/mv_image/image/src/Features/ORBExtractorFactory.cpp
@@ -32,12 +32,16 @@ ORBExtractorFactory::ORBExtractorFactory(
cv::Ptr<FeatureExtractor> ORBExtractorFactory::buildFeatureExtractor()
{
- cv::Ptr<FeatureExtractor> featureExtractor(new FeatureExtractor());
+ cv::Ptr<FeatureExtractor> featureExtractor(new (std::nothrow)FeatureExtractor());
+ if (featureExtractor == NULL)
+ return NULL;
cv::Ptr<cv::OrbFeatureDetector> detector(
- new cv::ORB(
- m_maximumFeaturesNumber,
- m_scaleFactor));
+ new (std::nothrow)cv::ORB(
+ __maximumFeaturesNumber,
+ __scaleFactor));
+ if (detector == NULL)
+ return NULL;
cv::Ptr<cv::OrbDescriptorExtractor> extractor = detector;
@@ -50,22 +54,22 @@ cv::Ptr<FeatureExtractor> ORBExtractorFactory::buildFeatureExtractor()
float ORBExtractorFactory::getScaleFactor() const
{
- return m_scaleFactor;
+ return __scaleFactor;
}
void ORBExtractorFactory::setScaleFactor(float scaleFactor)
{
- m_scaleFactor = scaleFactor;
+ __scaleFactor = scaleFactor;
}
size_t ORBExtractorFactory::getMaximumFeaturesNumber() const
{
- return m_scaleFactor;
+ return __scaleFactor;
}
void ORBExtractorFactory::setMaximumFeaturesNumber(size_t maximumFeaturesNumber)
{
- m_maximumFeaturesNumber = maximumFeaturesNumber;
+ __maximumFeaturesNumber = maximumFeaturesNumber;
}
float ORBExtractorFactory::computeRecognitionRate(
@@ -79,17 +83,17 @@ float ORBExtractorFactory::computeRecognitionRate(
if (numberOfKeypoints < MinimumNumberOfFeatures)
return 0.f;
- static const size_t xCellsNumber = 10u;
- static const size_t yCellsNumber = 10u;
+ static const size_t X_CELLS_NUMBER = 10u;
+ static const size_t Y_CELLS_NUMBER = 10u;
- cv::Mat cells[xCellsNumber][yCellsNumber];
- size_t accumulationCounter[xCellsNumber][yCellsNumber];
+ cv::Mat cells[X_CELLS_NUMBER][Y_CELLS_NUMBER];
+ size_t accumulationCounter[X_CELLS_NUMBER][Y_CELLS_NUMBER];
- const size_t cellWidth = image.cols / xCellsNumber;
- const size_t cellHeight = image.rows / yCellsNumber;
+ const size_t cellWidth = image.cols / X_CELLS_NUMBER;
+ const size_t cellHeight = image.rows / Y_CELLS_NUMBER;
- for (size_t x = 0u; x < xCellsNumber; ++x) {
- for (size_t y = 0u; y < yCellsNumber; ++y) {
+ for (size_t x = 0u; x < X_CELLS_NUMBER; ++x) {
+ for (size_t y = 0u; y < Y_CELLS_NUMBER; ++y) {
cells[x][y] = image(cv::Rect(
x * cellWidth,
y * cellHeight,
@@ -102,36 +106,34 @@ float ORBExtractorFactory::computeRecognitionRate(
for (size_t i = 0u; i < numberOfKeypoints; ++i) {
size_t xCellIdx = keypoints[i].pt.x / cellWidth;
- if (xCellIdx >= xCellsNumber)
- xCellIdx = xCellsNumber - 1;
-
+ if (xCellIdx >= X_CELLS_NUMBER)
+ xCellIdx = X_CELLS_NUMBER - 1;
size_t yCellIdx = keypoints[i].pt.y / cellHeight;
- if (yCellIdx >= yCellsNumber)
- yCellIdx = yCellsNumber - 1;
-
+ if (yCellIdx >= Y_CELLS_NUMBER)
+ yCellIdx = Y_CELLS_NUMBER - 1;
++(accumulationCounter[xCellIdx][yCellIdx]);
}
const float exceptedNumber = numberOfKeypoints /
- (float)(xCellsNumber * yCellsNumber);
+ (float)(X_CELLS_NUMBER * Y_CELLS_NUMBER);
float distributedEvaluation = 0.f;
- for (size_t x = 0u; x < xCellsNumber; ++x) {
- for (size_t y = 0u; y < yCellsNumber; ++y) {
+ for (size_t x = 0u; x < X_CELLS_NUMBER; ++x) {
+ for (size_t y = 0u; y < Y_CELLS_NUMBER; ++y) {
distributedEvaluation += (accumulationCounter[x][y] - exceptedNumber) *
(accumulationCounter[x][y] - exceptedNumber) / exceptedNumber;
}
}
- float maximumDistributedEvaluation = (xCellsNumber * yCellsNumber - 1) *
- exceptedNumber;
+ float maximumDistributedEvaluation = (X_CELLS_NUMBER * Y_CELLS_NUMBER - 1) *
+ exceptedNumber;
maximumDistributedEvaluation += (numberOfKeypoints - exceptedNumber) *
(numberOfKeypoints - exceptedNumber) / exceptedNumber;
distributedEvaluation = 1 -
- (distributedEvaluation / maximumDistributedEvaluation);
+ (distributedEvaluation / maximumDistributedEvaluation);
/* Exponentiation to find an approximate confidence value based on the
* number of key points on the image. */
diff --git a/mv_image/image/src/ImageConfig.cpp b/mv_image/image/src/ImageConfig.cpp
index 0f31d665..d90330fc 100644
--- a/mv_image/image/src/ImageConfig.cpp
+++ b/mv_image/image/src/ImageConfig.cpp
@@ -37,9 +37,9 @@ RecognitionParams::RecognitionParams(
}
RecognitionParams::RecognitionParams() :
- mMinMatchesNumber(0),
- mRequiredMatchesPart(1.0),
- mTolerantMatchesPartError(0.0)
+ mMinMatchesNumber(0),
+ mRequiredMatchesPart(1.0),
+ mTolerantMatchesPartError(0.0)
{
; /* NULL */
}
@@ -73,10 +73,10 @@ StabilizationParams::StabilizationParams() :
}
TrackingParams::TrackingParams(
- FeaturesExtractingParams framesFeaturesExtractingParams,
- RecognitionParams recognitionParams,
- StabilizationParams stabilizationParams,
- double expectedOffset) :
+ FeaturesExtractingParams framesFeaturesExtractingParams,
+ RecognitionParams recognitionParams,
+ StabilizationParams stabilizationParams,
+ double expectedOffset) :
mFramesFeaturesExtractingParams(framesFeaturesExtractingParams),
mRecognitionParams(recognitionParams),
mStabilizationParams(stabilizationParams),
diff --git a/mv_image/image/src/ImageMathUtil.cpp b/mv_image/image/src/ImageMathUtil.cpp
index f8d78902..8b53f9f0 100644
--- a/mv_image/image/src/ImageMathUtil.cpp
+++ b/mv_image/image/src/ImageMathUtil.cpp
@@ -69,11 +69,10 @@ bool checkAccessory(
for (size_t i = 0u, j = numberOfContourPoints - 1; i < numberOfContourPoints; j = i++) {
if (((region[i].y > point.y) != (region[j].y > point.y)) &&
- ((float) point.x < (float)
- (region[j].x - region[i].x) * (point.y - region[i].y) /
- (region[j].y - region[i].y) + region[i].x)) {
+ ((float) point.x < (float)
+ (region[j].x - region[i].x) * (point.y - region[i].y) /
+ (region[j].y - region[i].y) + region[i].x))
insideFlag = !insideFlag;
- }
}
return insideFlag;
diff --git a/mv_image/image/src/Recognition/ImageObject.cpp b/mv_image/image/src/Recognition/ImageObject.cpp
index 41d2f8e4..057038eb 100644
--- a/mv_image/image/src/Recognition/ImageObject.cpp
+++ b/mv_image/image/src/Recognition/ImageObject.cpp
@@ -34,31 +34,31 @@
namespace MediaVision {
namespace Image {
ImageObject::ImageObject() :
- m_features(),
- m_isEmpty(true),
- m_isLabeled(false),
- m_label(0)
+ __features(),
+ __isEmpty(true),
+ __isLabeled(false),
+ __label(0)
{
; /* NULL */
}
ImageObject::ImageObject(const cv::Mat& image, const FeaturesExtractingParams& params) :
- m_featureExtractingParams(),
- m_features(),
- m_isEmpty(true),
- m_isLabeled(false),
- m_label(0)
+ __featureExtractingParams(),
+ __features(),
+ __isEmpty(true),
+ __isLabeled(false),
+ __label(0)
{
fill(image, params);
}
ImageObject::ImageObject(const ImageObject& copy) :
- m_featureExtractingParams(copy.m_featureExtractingParams),
- m_features(copy.m_features),
- m_isEmpty(copy.m_isEmpty),
- m_isLabeled(copy.m_isLabeled),
- m_label(copy.m_label),
- m_boundingContour(copy.m_boundingContour)
+ __featureExtractingParams(copy.__featureExtractingParams),
+ __features(copy.__features),
+ __isEmpty(copy.__isEmpty),
+ __isLabeled(copy.__isLabeled),
+ __label(copy.__label),
+ __boundingContour(copy.__boundingContour)
{
; /* NULL */
}
@@ -66,12 +66,12 @@ ImageObject::ImageObject(const ImageObject& copy) :
ImageObject& ImageObject::operator=(const ImageObject& copy)
{
if (this != &copy) {
- m_isEmpty = copy.m_isEmpty;
- m_isLabeled = copy.m_isLabeled;
- m_label = copy.m_label;
- m_boundingContour = copy.m_boundingContour;
+ __isEmpty = copy.__isEmpty;
+ __isLabeled = copy.__isLabeled;
+ __label = copy.__label;
+ __boundingContour = copy.__boundingContour;
- m_features = copy.m_features;
+ __features = copy.__features;
}
return *this;
@@ -87,36 +87,36 @@ void ImageObject::fill(
const FeaturesExtractingParams& params,
const std::vector<cv::Point2f>& roi)
{
- m_isEmpty = false;
+ __isEmpty = false;
if (!roi.empty()) {
- m_boundingContour = roi;
+ __boundingContour = roi;
} else {
- m_boundingContour.resize(NumberOfQuadrangleCorners);
+ __boundingContour.resize(NumberOfQuadrangleCorners);
- m_boundingContour[0].x = 0.f;
- m_boundingContour[0].y = 0.f;
+ __boundingContour[0].x = 0.f;
+ __boundingContour[0].y = 0.f;
- m_boundingContour[1].x = image.cols;
- m_boundingContour[1].y = 0.f;
+ __boundingContour[1].x = image.cols;
+ __boundingContour[1].y = 0.f;
- m_boundingContour[2].x = image.cols;
- m_boundingContour[2].y = image.rows;
+ __boundingContour[2].x = image.cols;
+ __boundingContour[2].y = image.rows;
- m_boundingContour[3].x = 0.f;
- m_boundingContour[3].y = image.rows;
+ __boundingContour[3].x = 0.f;
+ __boundingContour[3].y = image.rows;
}
- extractFeatures(image, params, m_boundingContour);
+ extractFeatures(image, params, __boundingContour);
- m_featureExtractingParams = params;
+ __featureExtractingParams = params;
LOGI("[%s] Image object is filled.", __FUNCTION__);
}
float ImageObject::getRecognitionRate(void) const
{
- return m_features.m_recognitionRate;
+ return __features.__recognitionRate;
}
void ImageObject::extractFeatures(
@@ -145,33 +145,33 @@ void ImageObject::extractFeatures(
}
if (!extractor.empty())
- extractor->extract(image, m_features, roi);
+ extractor->extract(image, __features, roi);
}
bool ImageObject::isEmpty() const
{
- return (m_features.m_objectKeypoints.empty() ||
- m_features.m_objectDescriptors.empty());
+ return (__features.__objectKeypoints.empty() ||
+ __features.__objectDescriptors.empty());
}
void ImageObject::setContour(const std::vector<cv::Point2f>& contour)
{
- m_boundingContour = contour;
+ __boundingContour = contour;
}
void ImageObject::setLabel(int label)
{
- m_isLabeled = true;
- m_label = label;
+ __isLabeled = true;
+ __label = label;
}
bool ImageObject::getLabel(int& label) const
{
- if (!m_isLabeled) {
+ if (!__isLabeled) {
LOGW("[%s] Image hasn't label.", __FUNCTION__);
return false;
}
- label = m_label;
+ label = __label;
return true;
}
@@ -246,40 +246,40 @@ std::ostream& operator << (std::ostream& os, const ImageObject& obj)
{
os << std::setprecision(7);
- os << obj.m_isEmpty << '\n';
- os << obj.m_isLabeled << '\n';
- os << obj.m_label << '\n';
+ os << obj.__isEmpty << '\n';
+ os << obj.__isLabeled << '\n';
+ os << obj.__label << '\n';
- os << obj.m_boundingContour.size() << '\n';
- for (size_t pointNum = 0u; pointNum < obj.m_boundingContour.size(); ++pointNum) {
- os << obj.m_boundingContour[pointNum].x << ' ';
- os << obj.m_boundingContour[pointNum].y << '\n';
+ os << obj.__boundingContour.size() << '\n';
+ for (size_t pointNum = 0u; pointNum < obj.__boundingContour.size(); ++pointNum) {
+ os << obj.__boundingContour[pointNum].x << ' ';
+ os << obj.__boundingContour[pointNum].y << '\n';
}
- const size_t numberOfKeypoints = obj.m_features.m_objectKeypoints.size();
+ const size_t numberOfKeypoints = obj.__features.__objectKeypoints.size();
os << numberOfKeypoints << '\n';
for (size_t keypointNum = 0u; keypointNum < numberOfKeypoints; ++keypointNum) {
- os << obj.m_features.m_objectKeypoints[keypointNum].pt.x << ' ';
- os << obj.m_features.m_objectKeypoints[keypointNum].pt.y << ' ';
- os << obj.m_features.m_objectKeypoints[keypointNum].size << ' ';
- os << obj.m_features.m_objectKeypoints[keypointNum].response << ' ';
- os << obj.m_features.m_objectKeypoints[keypointNum].angle << ' ';
- os << obj.m_features.m_objectKeypoints[keypointNum].octave << ' ';
- os << obj.m_features.m_objectKeypoints[keypointNum].class_id << '\n';
+ os << obj.__features.__objectKeypoints[keypointNum].pt.x << ' ';
+ os << obj.__features.__objectKeypoints[keypointNum].pt.y << ' ';
+ os << obj.__features.__objectKeypoints[keypointNum].size << ' ';
+ os << obj.__features.__objectKeypoints[keypointNum].response << ' ';
+ os << obj.__features.__objectKeypoints[keypointNum].angle << ' ';
+ os << obj.__features.__objectKeypoints[keypointNum].octave << ' ';
+ os << obj.__features.__objectKeypoints[keypointNum].class_id << '\n';
}
- const int numberOfDescriptors = obj.m_features.m_objectDescriptors.rows;
- const int sizeOfDescriptor = obj.m_features.m_objectDescriptors.cols;
+ const int numberOfDescriptors = obj.__features.__objectDescriptors.rows;
+ const int sizeOfDescriptor = obj.__features.__objectDescriptors.cols;
os << numberOfDescriptors << ' ';
os << sizeOfDescriptor << ' ';
- os << obj.m_features.m_objectDescriptors.type() << '\n';
+ os << obj.__features.__objectDescriptors.type() << '\n';
for (int descriptorNum = 0; descriptorNum < numberOfDescriptors;
++descriptorNum, os << '\n') {
for (int featureNum = 0; featureNum < sizeOfDescriptor;
++featureNum) {
- os << (int)obj.m_features.m_objectDescriptors.at<uchar>(
+ os << (int)obj.__features.__objectDescriptors.at<uchar>(
descriptorNum,
featureNum) << ' ';
}
@@ -302,40 +302,40 @@ std::istream& operator >> (std::istream& is, ImageObject& obj)
return is; \
}
- is >> temporal.m_isEmpty;
+ is >> temporal.__isEmpty;
MEDIA_VISION_CHECK_IFSTREAM
- is >> temporal.m_isLabeled;
+ is >> temporal.__isLabeled;
MEDIA_VISION_CHECK_IFSTREAM
- is >> temporal.m_label;
+ is >> temporal.__label;
MEDIA_VISION_CHECK_IFSTREAM
is >> numberOfContourPoints;
MEDIA_VISION_CHECK_IFSTREAM
- temporal.m_boundingContour.resize(numberOfContourPoints);
+ temporal.__boundingContour.resize(numberOfContourPoints);
for (size_t pointNum = 0; pointNum < numberOfContourPoints; ++pointNum) {
- is >> temporal.m_boundingContour[pointNum].x;
+ is >> temporal.__boundingContour[pointNum].x;
MEDIA_VISION_CHECK_IFSTREAM
- is >> temporal.m_boundingContour[pointNum].y;
+ is >> temporal.__boundingContour[pointNum].y;
MEDIA_VISION_CHECK_IFSTREAM
}
is >> numberOfKeypoints;
- temporal.m_features.m_objectKeypoints.resize(numberOfKeypoints);
+ temporal.__features.__objectKeypoints.resize(numberOfKeypoints);
for (size_t keypointNum = 0; keypointNum < numberOfKeypoints; ++keypointNum) {
- is >> temporal.m_features.m_objectKeypoints[keypointNum].pt.x;
+ is >> temporal.__features.__objectKeypoints[keypointNum].pt.x;
MEDIA_VISION_CHECK_IFSTREAM
- is >> temporal.m_features.m_objectKeypoints[keypointNum].pt.y;
+ is >> temporal.__features.__objectKeypoints[keypointNum].pt.y;
MEDIA_VISION_CHECK_IFSTREAM
- is >> temporal.m_features.m_objectKeypoints[keypointNum].size;
+ is >> temporal.__features.__objectKeypoints[keypointNum].size;
MEDIA_VISION_CHECK_IFSTREAM
- is >> temporal.m_features.m_objectKeypoints[keypointNum].response;
+ is >> temporal.__features.__objectKeypoints[keypointNum].response;
MEDIA_VISION_CHECK_IFSTREAM
- is >> temporal.m_features.m_objectKeypoints[keypointNum].angle;
+ is >> temporal.__features.__objectKeypoints[keypointNum].angle;
MEDIA_VISION_CHECK_IFSTREAM
- is >> temporal.m_features.m_objectKeypoints[keypointNum].octave;
+ is >> temporal.__features.__objectKeypoints[keypointNum].octave;
MEDIA_VISION_CHECK_IFSTREAM
- is >> temporal.m_features.m_objectKeypoints[keypointNum].class_id;
+ is >> temporal.__features.__objectKeypoints[keypointNum].class_id;
MEDIA_VISION_CHECK_IFSTREAM
}
@@ -345,14 +345,14 @@ std::istream& operator >> (std::istream& is, ImageObject& obj)
MEDIA_VISION_CHECK_IFSTREAM
is >> descriptorType;
MEDIA_VISION_CHECK_IFSTREAM
- temporal.m_features.m_objectDescriptors = cv::Mat(rows, cols, descriptorType);
+ temporal.__features.__objectDescriptors = cv::Mat(rows, cols, descriptorType);
int value = 0;
for (int descriptorNum = 0; descriptorNum < rows; ++descriptorNum) {
for (int featureNum = 0; featureNum < cols; ++featureNum) {
is >> value;
MEDIA_VISION_CHECK_IFSTREAM
- temporal.m_features.m_objectDescriptors.at<uchar>(descriptorNum, featureNum) =
+ temporal.__features.__objectDescriptors.at<uchar>(descriptorNum, featureNum) =
(uchar)value;
}
}
diff --git a/mv_image/image/src/Recognition/ImageRecognizer.cpp b/mv_image/image/src/Recognition/ImageRecognizer.cpp
index ebfc3869..2088b444 100644
--- a/mv_image/image/src/Recognition/ImageRecognizer.cpp
+++ b/mv_image/image/src/Recognition/ImageRecognizer.cpp
@@ -22,7 +22,7 @@
namespace MediaVision {
namespace Image {
ImageRecognizer::ImageRecognizer(const ImageObject& scene) :
- m_scene(scene)
+ __scene(scene)
{
; /* NULL */
}
@@ -42,12 +42,12 @@ bool ImageRecognizer::recognize(
contour.clear();
- if (MinimumNumberOfFeatures > target.m_features.m_objectKeypoints.size()) {
+ if (MinimumNumberOfFeatures > target.__features.__objectKeypoints.size()) {
LOGW("[%s] Image object can't be recognized (Recognition rate is too small).", __FUNCTION__);
return false;
}
- if (MinimumNumberOfFeatures > m_scene.m_features.m_objectKeypoints.size()) {
+ if (MinimumNumberOfFeatures > __scene.__features.__objectKeypoints.size()) {
LOGW("[%s] Scene image can't be analyzed (Too few features for recognition).", __FUNCTION__);
return false;
}
@@ -57,9 +57,9 @@ bool ImageRecognizer::recognize(
return false;
}
- cv::perspectiveTransform(target.m_boundingContour, contour, homophraphyMatrix);
+ cv::perspectiveTransform(target.__boundingContour, contour, homophraphyMatrix);
- if (target.m_boundingContour.size() == NumberOfQuadrangleCorners) {
+ if (target.__boundingContour.size() == NumberOfQuadrangleCorners) {
if (!isPossibleQuadrangleCorners(contour.data())) {
LOGI("[%s] Image object isn't recognized.", __FUNCTION__);
contour.clear();
@@ -79,9 +79,9 @@ bool ImageRecognizer::findHomophraphyMatrix(
{
std::vector<cv::DMatch> matches;
- m_matcher.match(
- target.m_features.m_objectDescriptors,
- m_scene.m_features.m_objectDescriptors,
+ __matcher.match(
+ target.__features.__objectDescriptors,
+ __scene.__features.__objectDescriptors,
matches);
size_t matchesNumber = matches.size();
@@ -106,7 +106,7 @@ bool ImageRecognizer::findHomophraphyMatrix(
if (requiredMatchesNumber + allowableMatchesNumberError >
(size_t)params.mMinMatchesNumber) {
requiredMatchesNumber = ((size_t)params.mMinMatchesNumber +
- requiredMatchesNumber + allowableMatchesNumberError) / 2;
+ requiredMatchesNumber + allowableMatchesNumberError) / 2;
allowableMatchesNumberError = requiredMatchesNumber-
(size_t)params.mMinMatchesNumber +
@@ -138,15 +138,15 @@ bool ImageRecognizer::findHomophraphyMatrix(
for (size_t matchIdx = 0; matchIdx < matchesNumber; ++matchIdx) {
objectPoints[matchIdx] =
- target.m_features.m_objectKeypoints[matches[matchIdx].queryIdx].pt;
+ target.__features.__objectKeypoints[matches[matchIdx].queryIdx].pt;
scenePoints[matchIdx] =
- m_scene.m_features.m_objectKeypoints[matches[matchIdx].trainIdx].pt;
+ __scene.__features.__objectKeypoints[matches[matchIdx].trainIdx].pt;
}
if (ignoreFactor > FLT_EPSILON) {
const std::vector<cv::Point2f> significantArea = contourResize(
- target.m_boundingContour,
+ target.__boundingContour,
ignoreFactor);
for (size_t matchIdx = 0; matchIdx < objectPoints.size(); ++matchIdx) {
@@ -200,15 +200,13 @@ size_t ImageRecognizer::matchesSelection(
while (true) {
/* Search the leftmost element which have bigger confidence than support element */
while (examples[leftLimit].distance <= supportElement &&
- leftLimit < startRightLimit) {
+ leftLimit < startRightLimit)
++leftLimit;
- }
/* Search the rightmost element which have smaller confidence than support element */
while (examples[rightLimit].distance >= supportElement &&
- rightLimit >= startLeftLimit) {
+ rightLimit >= startLeftLimit)
--rightLimit;
- }
if (leftLimit >= rightLimit)
break;
@@ -267,10 +265,10 @@ float ImageRecognizer::computeLinearSupportElement(const std::vector<cv::DMatch>
bool ImageRecognizer::isPossibleQuadrangleCorners(
const cv::Point2f corners[NumberOfQuadrangleCorners])
{
- static const float Epsilon = 0.1f;
+ static const float __EPSILON = 0.1f;
- /* TODO: move the MinSizeOfDetectedArea out of the ImageRecognizer */
- static const float MinSizeOfDetectedArea = 64.f;
+ /* TODO: move the __MIN_SIZE_OF_DETECTED_AREA out of the ImageRecognizer */
+ static const float __MIN_SIZE_OF_DETECTED_AREA = 64.f;
const float firstSemiArea = getTriangleArea(corners[0], corners[2], corners[1]) +
getTriangleArea(corners[0], corners[2], corners[3]);
@@ -278,8 +276,8 @@ bool ImageRecognizer::isPossibleQuadrangleCorners(
const float secondSemiArea = getTriangleArea(corners[1], corners[3], corners[2]) +
getTriangleArea(corners[1], corners[3], corners[0]);
- if (Epsilon < fabs(firstSemiArea - secondSemiArea) ||
- MinSizeOfDetectedArea > (firstSemiArea + secondSemiArea))
+ if (__EPSILON < fabs(firstSemiArea - secondSemiArea) ||
+ __MIN_SIZE_OF_DETECTED_AREA > (firstSemiArea + secondSemiArea))
return false;
return true;
diff --git a/mv_image/image/src/Tracking/AsyncTracker.cpp b/mv_image/image/src/Tracking/AsyncTracker.cpp
index 389fc4be..ea24f8a4 100644
--- a/mv_image/image/src/Tracking/AsyncTracker.cpp
+++ b/mv_image/image/src/Tracking/AsyncTracker.cpp
@@ -16,78 +16,82 @@
#include "Tracking/AsyncTracker.h"
+#include <new>
+
namespace MediaVision {
namespace Image {
+
AsyncTracker::AsyncTracker(const AsyncTracker& copy) :
- m_baseTracker(copy.m_baseTracker.obj->clone()),
- m_result(copy.m_result),
- m_isRun(false),
- m_isUpdated(copy.m_isUpdated),
- m_copyingPolicy(copy.m_copyingPolicy),
- m_thread(0)
+ __baseTracker(copy.__baseTracker.obj->clone()),
+ __result(copy.__result),
+ __isRun(false),
+ __isUpdated(copy.__isUpdated),
+ __copyingPolicy(copy.__copyingPolicy),
+ __mvThread(0)
{
- pthread_mutex_init(&m_globalGuard, NULL);
- pthread_spin_init(&m_resultGuard, PTHREAD_PROCESS_SHARED);
- pthread_spin_init(&m_isRunGuard, PTHREAD_PROCESS_SHARED);
- pthread_spin_init(&m_isUpdatedGuard, PTHREAD_PROCESS_SHARED);
+ pthread_mutex_init(&__globalGuard, NULL);
+ pthread_spin_init(&__resultGuard, PTHREAD_PROCESS_SHARED);
+ pthread_spin_init(&__isRunGuard, PTHREAD_PROCESS_SHARED);
+ pthread_spin_init(&__isUpdatedGuard, PTHREAD_PROCESS_SHARED);
}
AsyncTracker::AsyncTracker(
cv::Ptr<ObjectTracker> baseTracker,
bool copyingPolicy) :
- m_baseTracker(baseTracker),
- m_result(),
- m_isRun(false),
- m_isUpdated(false),
- m_copyingPolicy(copyingPolicy),
- m_thread(0)
+ __baseTracker(baseTracker),
+ __result(),
+ __isRun(false),
+ __isUpdated(false),
+ __copyingPolicy(copyingPolicy),
+ __mvThread(0)
{
- pthread_mutex_init(&m_globalGuard, NULL);
- pthread_spin_init(&m_resultGuard, PTHREAD_PROCESS_SHARED);
- pthread_spin_init(&m_isRunGuard, PTHREAD_PROCESS_SHARED);
- pthread_spin_init(&m_isUpdatedGuard, PTHREAD_PROCESS_SHARED);
+ pthread_mutex_init(&__globalGuard, NULL);
+ pthread_spin_init(&__resultGuard, PTHREAD_PROCESS_SHARED);
+ pthread_spin_init(&__isRunGuard, PTHREAD_PROCESS_SHARED);
+ pthread_spin_init(&__isUpdatedGuard, PTHREAD_PROCESS_SHARED);
}
AsyncTracker::~AsyncTracker()
{
- if(isRun())
- pthread_join(m_thread, NULL);
+ if(isRun()) {
+ pthread_mutex_lock(&__globalGuard);
+ pthread_mutex_unlock(&__globalGuard);
+ }
- pthread_mutex_destroy(&m_globalGuard);
- pthread_spin_destroy(&m_resultGuard);
- pthread_spin_destroy(&m_isRunGuard);
- pthread_spin_destroy(&m_isUpdatedGuard);
+ pthread_mutex_destroy(&__globalGuard);
+ pthread_spin_destroy(&__resultGuard);
+ pthread_spin_destroy(&__isRunGuard);
+ pthread_spin_destroy(&__isUpdatedGuard);
}
bool AsyncTracker::track(
const cv::Mat& frame,
std::vector<cv::Point>& result)
{
- while (pthread_mutex_trylock(&m_globalGuard) != 0) {
+ while (pthread_mutex_trylock(&__globalGuard) != 0)
return getResult(result);
- }
- pthread_spin_lock(&m_isRunGuard);
- m_isRun = true;
- pthread_spin_unlock(&m_isRunGuard);
+ pthread_spin_lock(&__isRunGuard);
+ __isRun = true;
+ pthread_spin_unlock(&__isRunGuard);
- if (m_copyingPolicy)
- m_frame = frame.clone();
+ if (__copyingPolicy)
+ __frame = frame.clone();
else
- m_frame = frame;
+ __frame = frame;
- const int err = pthread_create(&m_thread, NULL, asyncTrack, this);
+ const int err = pthread_create(&__mvThread, NULL, asyncTrack, this);
if (0 == err) {
- pthread_join(m_thread, NULL);
+ pthread_detach(__mvThread);
return getResult(result);
}
- pthread_spin_lock(&m_isRunGuard);
- m_isRun = false;
- pthread_spin_unlock(&m_isRunGuard);
+ pthread_spin_lock(&__isRunGuard);
+ __isRun = false;
+ pthread_spin_unlock(&__isRunGuard);
- pthread_mutex_unlock(&m_globalGuard);
+ pthread_mutex_unlock(&__globalGuard);
return getResult(result);
}
@@ -95,21 +99,21 @@ bool AsyncTracker::track(
void AsyncTracker::reinforcement(const std::vector<cv::Point>& location)
{
/* TODO: Unsafe. Need to redesign. */
- m_baseTracker->reinforcement(location);
+ __baseTracker->reinforcement(location);
- pthread_spin_lock(&m_resultGuard);
- m_result = location;
- pthread_spin_unlock(&m_resultGuard);
+ pthread_spin_lock(&__resultGuard);
+ __result = location;
+ pthread_spin_unlock(&__resultGuard);
}
cv::Ptr<ObjectTracker> AsyncTracker::clone() const
{
- return cv::Ptr<ObjectTracker>(new AsyncTracker(*this));
+ return cv::Ptr<ObjectTracker>(new (std::nothrow)AsyncTracker(*this));
}
bool AsyncTracker::baseTrack(std::vector<cv::Point>& result)
{
- return m_baseTracker->track(m_frame, result);
+ return __baseTracker->track(__frame, result);
}
void *AsyncTracker::asyncTrack(void *data)
@@ -119,19 +123,19 @@ void *AsyncTracker::asyncTrack(void *data)
std::vector<cv::Point> result;
tracker->baseTrack(result);
- pthread_spin_lock(&tracker->m_resultGuard);
- tracker->m_result = result;
- pthread_spin_unlock(&tracker->m_resultGuard);
+ pthread_spin_lock(&tracker->__resultGuard);
+ tracker->__result = result;
+ pthread_spin_unlock(&tracker->__resultGuard);
- pthread_spin_lock(&tracker->m_isUpdatedGuard);
- tracker->m_isUpdated = true;
- pthread_spin_unlock(&tracker->m_isUpdatedGuard);
+ pthread_spin_lock(&tracker->__isUpdatedGuard);
+ tracker->__isUpdated = true;
+ pthread_spin_unlock(&tracker->__isUpdatedGuard);
- pthread_mutex_unlock(&tracker->m_globalGuard);
+ pthread_mutex_unlock(&tracker->__globalGuard);
- pthread_spin_lock(&tracker->m_isRunGuard);
- tracker->m_isRun = false;
- pthread_spin_unlock(&tracker->m_isRunGuard);
+ pthread_spin_lock(&tracker->__isRunGuard);
+ tracker->__isRun = false;
+ pthread_spin_unlock(&tracker->__isRunGuard);
return NULL;
}
@@ -139,7 +143,8 @@ void *AsyncTracker::asyncTrack(void *data)
bool AsyncTracker::wait()
{
if(isRun()) {
- pthread_join(m_thread, NULL);
+ pthread_mutex_lock(&__globalGuard);
+ pthread_mutex_unlock(&__globalGuard);
return true;
}
return false;
@@ -149,9 +154,9 @@ bool AsyncTracker::isRun()
{
bool result = false;
- pthread_spin_lock(&m_isRunGuard);
- result = m_isRun;
- pthread_spin_unlock(&m_isRunGuard);
+ pthread_spin_lock(&__isRunGuard);
+ result = __isRun;
+ pthread_spin_unlock(&__isRunGuard);
return result;
}
@@ -162,10 +167,10 @@ bool AsyncTracker::isUpdated(std::vector<cv::Point>& result)
getResult(result);
- pthread_spin_lock(&m_isUpdatedGuard);
- isUpdated = m_isUpdated;
- m_isUpdated = false;
- pthread_spin_unlock(&m_isUpdatedGuard);
+ pthread_spin_lock(&__isUpdatedGuard);
+ isUpdated = __isUpdated;
+ __isUpdated = false;
+ pthread_spin_unlock(&__isUpdatedGuard);
return isUpdated;
}
@@ -174,10 +179,10 @@ bool AsyncTracker::getResult(std::vector<cv::Point>& result)
{
bool isTracked = false;
- pthread_spin_lock(&m_resultGuard);
- isTracked = !m_result.empty();
- result = m_result;
- pthread_spin_unlock(&m_resultGuard);
+ pthread_spin_lock(&__resultGuard);
+ isTracked = !__result.empty();
+ result = __result;
+ pthread_spin_unlock(&__resultGuard);
return isTracked;
}
diff --git a/mv_image/image/src/Tracking/CascadeTracker.cpp b/mv_image/image/src/Tracking/CascadeTracker.cpp
index ed56f093..71109394 100644
--- a/mv_image/image/src/Tracking/CascadeTracker.cpp
+++ b/mv_image/image/src/Tracking/CascadeTracker.cpp
@@ -18,19 +18,21 @@
#include "Tracking/AsyncTracker.h"
#include "ImageMathUtil.h"
+#include <new>
namespace MediaVision {
namespace Image {
+
CascadeTracker::CascadeTracker(float minimumArea) :
- m_trackers(),
- m_minimumArea(minimumArea)
+ __trackers(),
+ __minimumArea(minimumArea)
{
; /* NULL */
}
CascadeTracker::CascadeTracker(const CascadeTracker& copy) :
- m_trackers(),
- m_minimumArea(copy.m_minimumArea)
+ __trackers(),
+ __minimumArea(copy.__minimumArea)
{
*this = copy;
}
@@ -44,42 +46,40 @@ bool CascadeTracker::track(const cv::Mat& frame, std::vector<cv::Point>& result)
{
internalReinforcement();
- std::set<TrackerInfo>::iterator it = m_trackers.begin();
+ std::set<TrackerInfo>::iterator it = __trackers.begin();
- for (; it != m_trackers.end(); ++it) {
- if (!it->mTracker.obj->track(frame, it->mResult)) {
+ for (; it != __trackers.end(); ++it)
+ if (!it->mTracker.obj->track(frame, it->mResult))
it->mResult.clear();
- }
- }
return mergeResults(result);
}
void CascadeTracker::reinforcement(const std::vector<cv::Point>& location)
{
- std::set<TrackerInfo>::iterator it = m_trackers.begin();
+ std::set<TrackerInfo>::iterator it = __trackers.begin();
- for (; it != m_trackers.end(); ++it)
+ for (; it != __trackers.end(); ++it)
it->mTracker.obj->reinforcement(location);
}
cv::Ptr<ObjectTracker> CascadeTracker::clone() const
{
- return cv::Ptr<ObjectTracker>(new CascadeTracker(*this));
+ return cv::Ptr<ObjectTracker>(new (std::nothrow)CascadeTracker(*this));
}
CascadeTracker& CascadeTracker::operator=(const CascadeTracker& copy)
{
if (this != &copy) {
- this->m_minimumArea = copy.m_minimumArea;
- this->m_trackers.clear();
+ this->__minimumArea = copy.__minimumArea;
+ this->__trackers.clear();
- std::set<TrackerInfo>::iterator it = copy.m_trackers.begin();
- for (; it != copy.m_trackers.end(); ++it) {
+ std::set<TrackerInfo>::iterator it = copy.__trackers.begin();
+ for (; it != copy.__trackers.end(); ++it) {
TrackerInfo temp(it->mTracker.obj->clone(), it->mPriority);
temp.mResult = it->mResult;
- m_trackers.insert(temp);
+ __trackers.insert(temp);
}
}
@@ -91,12 +91,12 @@ bool CascadeTracker::enableTracker(cv::Ptr<ObjectTracker> tracker, float priorit
TrackerInfo temp(tracker, priority);
std::set<TrackerInfo>::iterator it =
- std::find(m_trackers.begin(), m_trackers.end(), temp);
+ std::find(__trackers.begin(), __trackers.end(), temp);
- if (it != m_trackers.end())
- m_trackers.erase(it);
+ if (it != __trackers.end())
+ __trackers.erase(it);
- return m_trackers.insert(temp).second;
+ return __trackers.insert(temp).second;
}
bool CascadeTracker::disableTracker(cv::Ptr<ObjectTracker> tracker)
@@ -104,20 +104,20 @@ bool CascadeTracker::disableTracker(cv::Ptr<ObjectTracker> tracker)
TrackerInfo target(tracker, 0);
std::set<TrackerInfo>::iterator it =
- std::find(m_trackers.begin(), m_trackers.end(), target);
+ std::find(__trackers.begin(), __trackers.end(), target);
- if (it == m_trackers.end())
+ if (it == __trackers.end())
return false;
- m_trackers.erase(it);
+ __trackers.erase(it);
return true;
}
void CascadeTracker::internalReinforcement()
{
- std::set<TrackerInfo>::iterator it1 = m_trackers.begin();
- for (; it1 != m_trackers.end(); ++it1) {
+ std::set<TrackerInfo>::iterator it1 = __trackers.begin();
+ for (; it1 != __trackers.end(); ++it1) {
bool isUpdated = true;
/* TODO: Redesign without dynamic_cast */
@@ -133,20 +133,17 @@ void CascadeTracker::internalReinforcement()
checkedArea[i].y = it1->mResult[i].y;
}
- if (getQuadrangleArea(checkedArea.data()) < m_minimumArea) {
+ if (getQuadrangleArea(checkedArea.data()) < __minimumArea) {
it1->mResult = std::vector<cv::Point>(0);
it1->mTracker.obj->reinforcement(it1->mResult);
}
float priority = it1->mPriority;
- std::set<TrackerInfo>::iterator it2 = m_trackers.begin();
+ std::set<TrackerInfo>::iterator it2 = __trackers.begin();
- for (; it2 != m_trackers.end(); ++it2) {
- if (it1 != it2 &&
- priority > it2->mPriority) {
+ for (; it2 != __trackers.end(); ++it2)
+ if (it1 != it2 && priority > it2->mPriority)
it2->mTracker.obj->reinforcement(it1->mResult);
- }
- }
}
}
}
@@ -155,10 +152,10 @@ bool CascadeTracker::mergeResults(std::vector<cv::Point>& result) const
{
result.clear();
- std::set<TrackerInfo>::iterator it = m_trackers.begin();
+ std::set<TrackerInfo>::iterator it = __trackers.begin();
float resPriotiry = 0.f;
- for (; it != m_trackers.end(); ++it) {
+ for (; it != __trackers.end(); ++it) {
if (result.empty() || resPriotiry > it->mPriority) {
resPriotiry = it->mPriority;
result = it->mResult;
diff --git a/mv_image/image/src/Tracking/FeatureSubstitutionTracker.cpp b/mv_image/image/src/Tracking/FeatureSubstitutionTracker.cpp
index eaf8bef1..43805a81 100644
--- a/mv_image/image/src/Tracking/FeatureSubstitutionTracker.cpp
+++ b/mv_image/image/src/Tracking/FeatureSubstitutionTracker.cpp
@@ -18,22 +18,25 @@
#include "Recognition/ImageRecognizer.h"
+#include <new>
+
namespace MediaVision {
namespace Image {
+
FeatureSubstitutionTracker::FeatureSubstitutionTracker(
const FeaturesExtractingParams& featuresExtractingParams,
const RecognitionParams& recognitionParams,
float expectedOffset,
float sceneScalingFactor,
float objectScalingFactor) :
- m_isInit(false),
- m_target(),
- m_location(),
- m_featureExtractingParams(featuresExtractingParams),
- m_recogParams(recognitionParams),
- m_expectedOffset(expectedOffset),
- m_sceneScalingFactor(sceneScalingFactor),
- m_objectScalingFactor(objectScalingFactor)
+ __isInit(false),
+ __target(),
+ __location(),
+ __featureExtractingParams(featuresExtractingParams),
+ __recogParams(recognitionParams),
+ __expectedOffset(expectedOffset),
+ __sceneScalingFactor(sceneScalingFactor),
+ __objectScalingFactor(objectScalingFactor)
{
; /* NULL */
}
@@ -43,56 +46,61 @@ bool FeatureSubstitutionTracker::track(
std::vector<cv::Point>& result)
{
std::vector<cv::Point2f> contour;
- size_t numberOfContourPoints = m_location.size();
+ size_t numberOfContourPoints = __location.size();
contour.resize(numberOfContourPoints);
- for(size_t i = 0u; i < numberOfContourPoints; ++i) {
- contour[i].x = m_location[i].x;
- contour[i].y = m_location[i].y;
+ for (size_t i = 0u; i < numberOfContourPoints; ++i) {
+ contour[i].x = __location[i].x;
+ contour[i].y = __location[i].y;
}
- if (!m_isInit) {
- if (m_location.empty()) {
+ if (!__isInit) {
+ if (__location.empty()) {
return false;
} else {
- m_target = new ImageObject;
- m_target->fill(
+ __target = new (std::nothrow)ImageObject;
+ if (__target == NULL)
+ return false;
+
+ __target->fill(
frame,
- m_featureExtractingParams,
- contourResize(contour, m_objectScalingFactor));
- m_target->setContour(contour);
- m_isInit = true;
- result = m_location;
+ __featureExtractingParams,
+ contourResize(contour, __objectScalingFactor));
+ __target->setContour(contour);
+ __isInit = true;
+ result = __location;
return true;
}
}
- cv::Ptr<ImageObject> sceneImageObject = new ImageObject;
+ cv::Ptr<ImageObject> sceneImageObject = new (std::nothrow)ImageObject;
+ if (sceneImageObject == NULL)
+ return false;
- sceneImageObject->fill(frame, m_featureExtractingParams, computeExpectedArea());
+ sceneImageObject->fill(frame, __featureExtractingParams, computeExpectedArea());
ImageRecognizer recognizer(*sceneImageObject.obj);
const bool isTracked =
recognizer.recognize(
- *m_target.obj,
- m_recogParams,
+ *__target.obj,
+ __recogParams,
contour,
- m_objectScalingFactor);
+ __objectScalingFactor);
if (isTracked) {
numberOfContourPoints = contour.size();
- m_location.resize(numberOfContourPoints);
- for(size_t i = 0u; i < numberOfContourPoints; ++i) {
- m_location[i].x = (int)contour[i].x;
- m_location[i].y = (int)contour[i].y;
+ __location.resize(numberOfContourPoints);
+ for (size_t i = 0u; i < numberOfContourPoints; ++i) {
+ __location[i].x = (int)contour[i].x;
+ __location[i].y = (int)contour[i].y;
}
- result = m_location;
- m_target = sceneImageObject;
- m_target->setContour(contour);
+ result = __location;
+ __target = sceneImageObject;
+ __target->setContour(contour);
} else {
- m_location.clear();
- m_isInit = false;
+ __location.clear();
+ __isInit = false;
}
return isTracked;
@@ -100,32 +108,32 @@ bool FeatureSubstitutionTracker::track(
void FeatureSubstitutionTracker::reinforcement(const std::vector<cv::Point>& location)
{
- m_isInit = false;
+ __isInit = false;
if (location.size() < 3) {
- m_location.clear();
+ __location.clear();
return;
}
- m_location = location;
+ __location = location;
}
cv::Ptr<ObjectTracker> FeatureSubstitutionTracker::clone() const
{
- return cv::Ptr<ObjectTracker>(new FeatureSubstitutionTracker(*this));
+ return cv::Ptr<ObjectTracker>(new (std::nothrow)FeatureSubstitutionTracker(*this));
}
std::vector<cv::Point2f> FeatureSubstitutionTracker::computeExpectedArea()
{
std::vector<cv::Point2f> contour;
- const size_t numberOfContourPoints = m_location.size();
+ const size_t numberOfContourPoints = __location.size();
contour.resize(numberOfContourPoints);
- for(size_t i = 0u; i < numberOfContourPoints; ++i) {
- contour[i].x = m_location[i].x;
- contour[i].y = m_location[i].y;
+ for (size_t i = 0u; i < numberOfContourPoints; ++i) {
+ contour[i].x = __location[i].x;
+ contour[i].y = __location[i].y;
}
- return contourResize(contour, m_sceneScalingFactor);
+ return contourResize(contour, __sceneScalingFactor);
}
} /* Image */
diff --git a/mv_image/image/src/Tracking/ImageContourStabilizator.cpp b/mv_image/image/src/Tracking/ImageContourStabilizator.cpp
index a745cecb..cb1ad3ae 100644
--- a/mv_image/image/src/Tracking/ImageContourStabilizator.cpp
+++ b/mv_image/image/src/Tracking/ImageContourStabilizator.cpp
@@ -22,22 +22,23 @@
namespace MediaVision {
namespace Image {
+
ImageContourStabilizator::ImageContourStabilizator() :
- m_movingHistory(),
- m_priorities()
+ __movingHistory(),
+ __priorities()
{
reset();
}
void ImageContourStabilizator::reset(void)
{
- m_tolerantShift = 0.0f;
- m_tolerantShiftExtra = 0.0f;
- m_isPrepared = false;
- m_tempContourIndex = -1;
- m_currentHistoryAmount = 0;
- m_historyAmount = 0;
- m_movingHistory.clear();
+ __tolerantShift = 0.0f;
+ __tolerantShiftExtra = 0.0f;
+ __isPrepared = false;
+ __tempContourIndex = -1;
+ __currentHistoryAmount = 0;
+ __historyAmount = 0;
+ __movingHistory.clear();
}
ImageContourStabilizator::StabilizationError ImageContourStabilizator::stabilize(
@@ -57,22 +58,22 @@ ImageContourStabilizator::StabilizationError ImageContourStabilizator::stabilize
return UnsupportedContourType;
}
- m_currentCornersSpeed.resize(contour.size(), 0);
+ __currentCornersSpeed.resize(contour.size(), 0);
- if (m_lastStabilizedContour.empty())
- m_lastStabilizedContour = contour;
+ if (__lastStabilizedContour.empty())
+ __lastStabilizedContour = contour;
std::vector<cv::Point2f> stabilizedState;
/* history amount < 2 it's no sense */
- if (m_historyAmount >= 2) {
+ if (__historyAmount >= 2) {
/* first sample */
- if (m_tempContourIndex == -1) {
- m_movingHistory.push_back(contour);
- m_movingHistory.push_back(contour);
+ if (__tempContourIndex == -1) {
+ __movingHistory.push_back(contour);
+ __movingHistory.push_back(contour);
- m_tempContourIndex = 1;
- m_currentHistoryAmount = 2;
+ __tempContourIndex = 1;
+ __currentHistoryAmount = 2;
LOGI("Not stabilized. Too small moving history. (the first one)");
@@ -80,10 +81,10 @@ ImageContourStabilizator::StabilizationError ImageContourStabilizator::stabilize
}
/* too short moving history */
- if (m_currentHistoryAmount < m_historyAmount) {
- ++m_currentHistoryAmount;
- ++m_tempContourIndex;
- m_movingHistory.push_back(contour);
+ if (__currentHistoryAmount < __historyAmount) {
+ ++__currentHistoryAmount;
+ ++__tempContourIndex;
+ __movingHistory.push_back(contour);
LOGI("Not stabilized. Too short moving history.");
@@ -91,52 +92,51 @@ ImageContourStabilizator::StabilizationError ImageContourStabilizator::stabilize
}
/* saving into moving history */
- m_movingHistory.pop_front();
- m_movingHistory.push_back(contour);
+ __movingHistory.pop_front();
+ __movingHistory.push_back(contour);
- if (!m_isPrepared) {
- m_lastStabilizedContour = m_movingHistory[m_historyAmount - 2];
+ if (!__isPrepared) {
+ __lastStabilizedContour = __movingHistory[__historyAmount - 2];
LOGI("Not stabilized. Too small moving history. (the last one)");
- m_isPrepared = true;
+ __isPrepared = true;
}
/* stabilization */
stabilizedState = computeStabilizedQuadrangleContour();
if (stabilizedState.empty())
- stabilizedState = m_lastStabilizedContour;
+ stabilizedState = __lastStabilizedContour;
} else {
- stabilizedState = m_lastStabilizedContour;
+ stabilizedState = __lastStabilizedContour;
}
const float tolerantShift = getQuadrangleArea(contour.data()) *
- m_tolerantShift + m_tolerantShiftExtra;
+ __tolerantShift + __tolerantShiftExtra;
const size_t contourSize = stabilizedState.size();
for (size_t i = 0u; i < contourSize; ++i) {
if (fabs(getDistance(stabilizedState[i], contour[i])) > tolerantShift) {
- const float dirX = m_lastStabilizedContour[i].x - contour[i].x;
- const float dirY = m_lastStabilizedContour[i].y - contour[i].y;
+ const float dirX = __lastStabilizedContour[i].x - contour[i].x;
+ const float dirY = __lastStabilizedContour[i].y - contour[i].y;
- const float speedX = dirX * m_speeds[m_currentCornersSpeed[i]];
- const float speedY = dirY * m_speeds[m_currentCornersSpeed[i]];
+ const float speedX = dirX * __speeds[__currentCornersSpeed[i]];
+ const float speedY = dirY * __speeds[__currentCornersSpeed[i]];
/* final moving */
- m_lastStabilizedContour[i].x -= speedX;
- m_lastStabilizedContour[i].y -= speedY;
+ __lastStabilizedContour[i].x -= speedX;
+ __lastStabilizedContour[i].y -= speedY;
- if (m_currentCornersSpeed[i] < m_speeds.size() - 1) {
- ++m_currentCornersSpeed[i];
- }
+ if (__currentCornersSpeed[i] < __speeds.size() - 1)
+ ++__currentCornersSpeed[i];
} else {
- m_currentCornersSpeed[i] = 0;
+ __currentCornersSpeed[i] = 0;
}
}
- /* m_lastStabilizedContour = stabilizedState; */
- contour = m_lastStabilizedContour;
+ /* __lastStabilizedContour = stabilizedState; */
+ contour = __lastStabilizedContour;
LOGI("Contour successfully stabilized.");
@@ -148,64 +148,64 @@ bool ImageContourStabilizator::updateSettings(const StabilizationParams& params)
if (params.mHistoryAmount < 1)
return false;
- m_tolerantShift = (float)params.mTolerantShift;
- m_tolerantShiftExtra = (float)params.mTolerantShiftExtra;
+ __tolerantShift = (float)params.mTolerantShift;
+ __tolerantShiftExtra = (float)params.mTolerantShiftExtra;
- if (m_historyAmount != (size_t)params.mHistoryAmount) {
- m_historyAmount = (size_t)params.mHistoryAmount;
+ if (__historyAmount != (size_t)params.mHistoryAmount) {
+ __historyAmount = (size_t)params.mHistoryAmount;
- m_priorities.resize(m_historyAmount);
+ __priorities.resize(__historyAmount);
/* calculation of priorities for positions in the moving history */
- for (size_t i = 0u; i < m_historyAmount; ++i) {
+ for (size_t i = 0u; i < __historyAmount; ++i) {
/* linear dependence on the elapsed time */
- m_priorities[i] = ((i + 1) * 2.0f) /
- ((m_historyAmount + 1) * m_historyAmount);
+ __priorities[i] = ((i + 1) * 2.0f) /
+ ((__historyAmount + 1) * __historyAmount);
}
}
- while (m_historyAmount > (size_t)params.mHistoryAmount) {
- m_movingHistory.pop_front();
- --m_historyAmount;
+ while (__historyAmount > (size_t)params.mHistoryAmount) {
+ __movingHistory.pop_front();
+ --__historyAmount;
}
- if ((size_t)params.mHistoryAmount > m_historyAmount) {
+ if ((size_t)params.mHistoryAmount > __historyAmount) {
/* TODO: save current moving history */
- m_tempContourIndex = -1;
- m_historyAmount = (size_t)params.mHistoryAmount;
- m_movingHistory.clear();
+ __tempContourIndex = -1;
+ __historyAmount = (size_t)params.mHistoryAmount;
+ __movingHistory.clear();
}
bool speedIsValid = false;
- if (m_speeds.size() > 1) {
- const static float Epsilon = 0.0001f;
- if (fabs(m_speeds[0] - params.mStabilizationSpeed) < Epsilon &&
- fabs((m_speeds[1] - m_speeds[0]) -
- params.mStabilizationAcceleration) < Epsilon) {
- speedIsValid = true;
+ if (__speeds.size() > 1) {
+ const static float __EPSILON = 0.0001f;
+ if (fabs(__speeds[0] - params.mStabilizationSpeed) < __EPSILON &&
+ fabs((__speeds[1] - __speeds[0]) -
+ params.mStabilizationAcceleration) < __EPSILON) {
+ speedIsValid = true;
}
}
if (!speedIsValid) {
- m_speeds.clear();
+ __speeds.clear();
int speedsSize = (int)((1 - params.mStabilizationSpeed) /
params.mStabilizationAcceleration) + 1;
if (speedsSize < 1) {
- m_speeds.push_back(1.0f);
+ __speeds.push_back(1.0f);
} else {
- static const int MaxSpeedsSize = 25;
+ static const int MAX_SPEED_SIZE = 25;
- if (speedsSize > MaxSpeedsSize)
- speedsSize = MaxSpeedsSize;
+ if (speedsSize > MAX_SPEED_SIZE)
+ speedsSize = MAX_SPEED_SIZE;
float speed = std::max(0.f,
std::min((float)params.mStabilizationSpeed, 1.0f));
for (int i = 0; i < speedsSize; ++i) {
- m_speeds.push_back(speed);
+ __speeds.push_back(speed);
speed += params.mStabilizationAcceleration;
}
}
@@ -231,25 +231,25 @@ std::vector<cv::Point2f> ImageContourStabilizator::computeStabilizedQuadrangleCo
std::vector<cv::Point2f> directionsToLastPos(NumberOfQuadrangleCorners);
for (size_t j = 0u; j < NumberOfQuadrangleCorners; ++j) {
/* calculation the moving directions and computing average direction */
- std::vector<cv::Point2f> trackDirections(m_historyAmount - 1);
+ std::vector<cv::Point2f> trackDirections(__historyAmount - 1);
cv::Point2f averageDirections(0.f, 0.f);
- for (size_t i = 0u; i < m_historyAmount - 1; ++i) {
+ for (size_t i = 0u; i < __historyAmount - 1; ++i) {
averageDirections.x += (trackDirections[i].x =
- m_movingHistory[i+1][j].x - m_movingHistory[i][j].x) /
- (m_historyAmount - 1);
+ __movingHistory[i+1][j].x - __movingHistory[i][j].x) /
+ (__historyAmount - 1);
averageDirections.y += (trackDirections[i].y =
- m_movingHistory[i+1][j].y - m_movingHistory[i][j].y) /
- (m_historyAmount - 1);
+ __movingHistory[i+1][j].y - __movingHistory[i][j].y) /
+ (__historyAmount - 1);
}
/* calculation a deviations and select outlier */
- std::vector<float> directionDistances(m_historyAmount - 1);
+ std::vector<float> directionDistances(__historyAmount - 1);
float maxDistance = 0.f, prevMaxDistance = 0.f;
int idxWithMaxDistance = 0;
int numExpressiveDirection = -1;
- for (size_t i = 0u; i < m_historyAmount - 1; ++i) {
+ for (size_t i = 0u; i < __historyAmount - 1; ++i) {
directionDistances[i] = getDistance(
trackDirections[i],
averageDirections);
@@ -274,28 +274,28 @@ std::vector<cv::Point2f> ImageContourStabilizator::computeStabilizedQuadrangleCo
/* final direction computing */
float summPriority = 0.f;
- for (size_t i = 0u; i < m_historyAmount - 1; ++i) {
+ for (size_t i = 0u; i < __historyAmount - 1; ++i) {
if ((int)i != numExpressiveDirection) {
- directions[j].x += trackDirections[i].x * m_priorities[i];
- directions[j].y += trackDirections[i].y * m_priorities[i];
- summPriority += m_priorities[i];
+ directions[j].x += trackDirections[i].x * __priorities[i];
+ directions[j].y += trackDirections[i].y * __priorities[i];
+ summPriority += __priorities[i];
}
}
- if (numExpressiveDirection == (int)(m_historyAmount - 1))
+ if (numExpressiveDirection == (int)(__historyAmount - 1))
expressiveTime = true;
summPriorityWithoutToLastPos[j] = summPriority;
- priorityToLastPos[j] = m_priorities[m_historyAmount - 1];
+ priorityToLastPos[j] = __priorities[__historyAmount - 1];
directions[j].x -= directionsToLastPos[j].x =
- (m_lastStabilizedContour[j].x -
- m_movingHistory[m_historyAmount - 1][j].x) *
+ (__lastStabilizedContour[j].x -
+ __movingHistory[__historyAmount - 1][j].x) *
priorityToLastPos[j];
directions[j].y -= directionsToLastPos[j].y =
- (m_lastStabilizedContour[j].y -
- m_movingHistory[m_historyAmount - 1][j].y) *
+ (__lastStabilizedContour[j].y -
+ __movingHistory[__historyAmount - 1][j].y) *
priorityToLastPos[j];
summPriority += priorityToLastPos[j];
@@ -318,8 +318,8 @@ std::vector<cv::Point2f> ImageContourStabilizator::computeStabilizedQuadrangleCo
directions[j].y /= summPriorityWithoutToLastPos[j];
}
- stabilizedState[j].x = m_lastStabilizedContour[j].x + directions[j].x;
- stabilizedState[j].y = m_lastStabilizedContour[j].y + directions[j].y;
+ stabilizedState[j].x = __lastStabilizedContour[j].x + directions[j].x;
+ stabilizedState[j].y = __lastStabilizedContour[j].y + directions[j].y;
}
return stabilizedState;
diff --git a/mv_image/image/src/Tracking/ImageTrackingModel.cpp b/mv_image/image/src/Tracking/ImageTrackingModel.cpp
index 8f4c0f90..3d78550e 100644
--- a/mv_image/image/src/Tracking/ImageTrackingModel.cpp
+++ b/mv_image/image/src/Tracking/ImageTrackingModel.cpp
@@ -27,25 +27,27 @@
#include <fstream>
#include <unistd.h>
+#include <new>
namespace MediaVision {
namespace Image {
+
ImageTrackingModel::ImageTrackingModel() :
- m_target(),
- m_tracker(),
- m_stabilizator(),
- m_location(),
- m_stabilizationParams()
+ __target(),
+ __tracker(),
+ __stabilizator(),
+ __location(),
+ __stabilizationParams()
{
; /* NULL */
}
ImageTrackingModel::ImageTrackingModel(const ImageTrackingModel& copy) :
- m_target(copy.m_target),
- m_tracker(copy.m_tracker.empty()? NULL: copy.m_tracker->clone()),
- m_stabilizator(copy.m_stabilizator),
- m_location(copy.m_location),
- m_stabilizationParams(copy.m_stabilizationParams)
+ __target(copy.__target),
+ __tracker(copy.__tracker.empty()? NULL: copy.__tracker->clone()),
+ __stabilizator(copy.__stabilizator),
+ __location(copy.__location),
+ __stabilizationParams(copy.__stabilizationParams)
{
; /* NULL */
}
@@ -102,31 +104,37 @@ void ImageTrackingModel::setTarget(const ImageObject& target)
/* Parameters of stabilization */
- m_stabilizationParams.mIsEnabled = true;
- m_stabilizationParams.mHistoryAmount = 3;
- m_stabilizationParams.mTolerantShift = 0.00006;
- m_stabilizationParams.mTolerantShiftExtra = 1.3;
- m_stabilizationParams.mStabilizationSpeed = 0.3;
- m_stabilizationParams.mStabilizationAcceleration = 0.1;
+ __stabilizationParams.mIsEnabled = true;
+ __stabilizationParams.mHistoryAmount = 3;
+ __stabilizationParams.mTolerantShift = 0.00006;
+ __stabilizationParams.mTolerantShiftExtra = 1.3;
+ __stabilizationParams.mStabilizationSpeed = 0.3;
+ __stabilizationParams.mStabilizationAcceleration = 0.1;
/* Parameters definition is finished */
/* Creating a basic tracker which will have other trackers */
- cv::Ptr<CascadeTracker> mainTracker = new CascadeTracker;
+ cv::Ptr<CascadeTracker> mainTracker = new (std::nothrow)CascadeTracker;
+ if (mainTracker == NULL)
+ LOGE("Failed to create mainTracker");
/* Adding asynchronous recognition based tracker */
cv::Ptr<RecognitionBasedTracker> recogTracker =
- new RecognitionBasedTracker(
+ new (std::nothrow)RecognitionBasedTracker(
target,
orbFeatureExtractingParams,
orbRecogParams);
+ if (recogTracker == NULL)
+ LOGE("Failed to create Recognition Tracker");
cv::Ptr<AsyncTracker> asyncRecogTracker =
- new AsyncTracker(
+ new (std::nothrow)AsyncTracker(
recogTracker,
true);
+ if (asyncRecogTracker == NULL)
+ LOGE("Failed to create Async Recognition Tracker");
mainTracker->enableTracker(
asyncRecogTracker,
@@ -135,15 +143,19 @@ void ImageTrackingModel::setTarget(const ImageObject& target)
/* Adding asynchronous feature substitution based tracker */
cv::Ptr<FeatureSubstitutionTracker> substitutionTracker =
- new FeatureSubstitutionTracker(
+ new (std::nothrow)FeatureSubstitutionTracker(
gfttWbriefFeatureExtractingParams,
gfttWbriefRecogParams,
expectedOffset);
+ if (substitutionTracker == NULL)
+ LOGE("Failed to create Substitution Tracker");
cv::Ptr<AsyncTracker> asyncSubstitutionTracker =
- new AsyncTracker(
+ new (std::nothrow)AsyncTracker(
substitutionTracker,
true);
+ if (asyncSubstitutionTracker == NULL)
+ LOGE("Failed to create Async Substitution Tracker");
mainTracker->enableTracker(
asyncSubstitutionTracker,
@@ -151,68 +163,71 @@ void ImageTrackingModel::setTarget(const ImageObject& target)
/* Adding median flow tracker */
- cv::Ptr<MFTracker> mfTracker = new MFTracker(medianflowTrackingParams);
+ cv::Ptr<MFTracker> mfTracker = new (std::nothrow)MFTracker(
+ medianflowTrackingParams);
+ if (mfTracker == NULL)
+ LOGE("Failed to create MFTracker");
mainTracker->enableTracker(
mfTracker,
medianFlowTrackerPriotity);
- m_tracker = mainTracker;
- m_target = target;
+ __tracker = mainTracker;
+ __target = target;
}
bool ImageTrackingModel::isValid() const
{
- return !(m_target.isEmpty());
+ return !(__target.isEmpty());
}
bool ImageTrackingModel::track(const cv::Mat& frame, std::vector<cv::Point>& result)
{
result.clear();
- if (m_tracker.empty())
+ if (__tracker.empty())
return false;
- if (!(m_tracker->track(frame, m_location))) {
- m_stabilizator.reset();
+ if (!(__tracker->track(frame, __location))) {
+ __stabilizator.reset();
return false;
}
- const size_t numberOfContourPoints = m_location.size();
+ const size_t numberOfContourPoints = __location.size();
std::vector<cv::Point2f> stabilizedContour(numberOfContourPoints);
for (size_t i = 0; i < numberOfContourPoints; ++i) {
- stabilizedContour[i].x = (float)m_location[i].x;
- stabilizedContour[i].y = (float)m_location[i].y;
+ stabilizedContour[i].x = (float)__location[i].x;
+ stabilizedContour[i].y = (float)__location[i].y;
}
- m_stabilizator.stabilize(stabilizedContour, m_stabilizationParams);
+ __stabilizator.stabilize(stabilizedContour, __stabilizationParams);
for (size_t i = 0; i < numberOfContourPoints; ++i) {
- m_location[i].x = (int)stabilizedContour[i].x;
- m_location[i].y = (int)stabilizedContour[i].y;
+ __location[i].x = (int)stabilizedContour[i].x;
+ __location[i].y = (int)stabilizedContour[i].y;
}
- result = m_location;
+ result = __location;
return true;
}
void ImageTrackingModel::refresh(void)
{
- m_location.clear();
+ __location.clear();
}
ImageTrackingModel& ImageTrackingModel::operator=(const ImageTrackingModel& copy)
{
if (this != &copy) {
- m_target = copy.m_target;
- if (!copy.m_tracker.empty())
- m_tracker = copy.m_tracker->clone();
+ __target = copy.__target;
+ if (!copy.__tracker.empty())
+ __tracker = copy.__tracker->clone();
else
- m_tracker.release();
+ __tracker.release();
- m_stabilizator = copy.m_stabilizator;
- m_location = copy.m_location;
- m_stabilizationParams = copy.m_stabilizationParams;
+ __stabilizator = copy.__stabilizator;
+ __location = copy.__location;
+ __stabilizationParams = copy.__stabilizationParams;
}
return *this;
@@ -287,18 +302,18 @@ std::ostream& operator << (std::ostream& os, const ImageTrackingModel& obj)
{
os << std::setprecision(7);
- os << obj.m_target;
- os << obj.m_stabilizationParams.mIsEnabled << '\n';
- os << obj.m_stabilizationParams.mHistoryAmount << '\n';
- os << obj.m_stabilizationParams.mStabilizationSpeed << '\n';
- os << obj.m_stabilizationParams.mStabilizationAcceleration << '\n';
- os << obj.m_stabilizationParams.mTolerantShift << '\n';
- os << obj.m_stabilizationParams.mTolerantShiftExtra << '\n';
+ os << obj.__target;
+ os << obj.__stabilizationParams.mIsEnabled << '\n';
+ os << obj.__stabilizationParams.mHistoryAmount << '\n';
+ os << obj.__stabilizationParams.mStabilizationSpeed << '\n';
+ os << obj.__stabilizationParams.mStabilizationAcceleration << '\n';
+ os << obj.__stabilizationParams.mTolerantShift << '\n';
+ os << obj.__stabilizationParams.mTolerantShiftExtra << '\n';
- const size_t numberOfContourPoints = obj.m_location.size();
+ const size_t numberOfContourPoints = obj.__location.size();
os << numberOfContourPoints << '\n';
for (size_t pointNum = 0u; pointNum < numberOfContourPoints; ++pointNum)
- os << ' ' << obj.m_location[pointNum].x << ' ' << obj.m_location[pointNum].y;
+ os << ' ' << obj.__location[pointNum].x << ' ' << obj.__location[pointNum].y;
os << '\n';
@@ -308,9 +323,8 @@ std::ostream& operator << (std::ostream& os, const ImageTrackingModel& obj)
std::istream& operator >> (std::istream& is, ImageTrackingModel& obj)
{
#define MEDIA_VISION_CHECK_IFSTREAM \
- if (!is.good()) { \
- return is; \
- }
+ if (!is.good()) \
+ return is;
ImageObject target;
std::vector<cv::Point> location;
@@ -340,11 +354,11 @@ std::istream& operator >> (std::istream& is, ImageTrackingModel& obj)
#undef MEDIA_VISION_CHECK_IFSTREAM
- obj.m_stabilizationParams = params;
- obj.m_location = location;
+ obj.__stabilizationParams = params;
+ obj.__location = location;
if (!(target.isEmpty())) {
obj.setTarget(target);
- obj.m_tracker->reinforcement(location);
+ obj.__tracker->reinforcement(location);
}
return is;
diff --git a/mv_image/image/src/Tracking/MFTracker.cpp b/mv_image/image/src/Tracking/MFTracker.cpp
index df7f2a73..fd99019f 100644
--- a/mv_image/image/src/Tracking/MFTracker.cpp
+++ b/mv_image/image/src/Tracking/MFTracker.cpp
@@ -20,27 +20,31 @@
namespace MediaVision {
namespace Image {
+
namespace {
- const float FloatEps = 10e-6f;
-
- template<typename T>
- T getMedian(std::vector<T>& values, int size = -1) {
- if (size == -1)
- size = (int)values.size();
-
- std::vector<T> copy(values.begin(), values.begin() + size);
- std::sort(copy.begin(), copy.end());
- if (size%2 == 0)
- return (copy[size/2-1]+copy[size/2])/((T)2.0);
- else
- return copy[(size - 1) / 2];
- }
+const float FLOATEPS = 10e-6f;
+
+template<typename T>
+T getMedian(std::vector<T>& values, int size = -1)
+{
+ if (size == -1)
+ size = (int)values.size();
- inline float l2distance(cv::Point2f p1, cv::Point2f p2) {
- const float dx = p1.x - p2.x;
- const float dy = p1.y - p2.y;
- return sqrtf(dx * dx + dy * dy);
+ std::vector<T> copy(values.begin(), values.begin() + size);
+ std::sort(copy.begin(), copy.end());
+ if (size%2 == 0) {
+ return (copy[size / 2 - 1] + copy[size/2]) / ((T)2.0);
+ } else {
+ return copy[(size - 1) / 2];
}
+}
+
+inline float l2distance(cv::Point2f p1, cv::Point2f p2)
+{
+ const float dx = p1.x - p2.x;
+ const float dy = p1.y - p2.y;
+ return sqrtf(dx * dx + dy * dy);
+}
} /* anonymous namespace */
MFTracker::Params::Params()
@@ -51,10 +55,10 @@ MFTracker::Params::Params()
}
MFTracker::MFTracker(Params params) :
- m_isInit(false),
- m_params(params),
- m_termcrit(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 20, 0.3),
- m_confidence(0.0f)
+ __isInit(false),
+ __params(params),
+ __termcrit(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 20, 0.3),
+ __confidence(0.0f)
{
}
@@ -62,28 +66,28 @@ bool MFTracker::track(const cv::Mat& frame, std::vector<cv::Point>& result)
{
result.clear();
- if (!m_isInit) {
- if (m_startLocation.empty())
+ if (!__isInit) {
+ if (__startLocation.empty())
return false;
if (!init(frame))
return false;
} else {
if (!update(frame)) {
- m_isInit = false;
- m_startLocation.clear();
+ __isInit = false;
+ __startLocation.clear();
return false;
}
}
- const size_t numberOfContourPoints = m_startLocation.size();
+ const size_t numberOfContourPoints = __startLocation.size();
result.resize(numberOfContourPoints);
for (size_t i = 0; i < numberOfContourPoints; ++i) {
- result[i].x = (int)(m_boundingBox.x +
- m_startLocation[i].x * m_boundingBox.width);
- result[i].y = (int)(m_boundingBox.y +
- m_startLocation[i].y * m_boundingBox.height);
+ result[i].x = static_cast<int>(__boundingBox.x +
+ __startLocation[i].x * __boundingBox.width);
+ result[i].y = static_cast<int>(__boundingBox.y +
+ __startLocation[i].y * __boundingBox.height);
}
return true;
@@ -91,32 +95,32 @@ bool MFTracker::track(const cv::Mat& frame, std::vector<cv::Point>& result)
void MFTracker::reinforcement(const std::vector<cv::Point>& location)
{
- m_isInit = false;
+ __isInit = false;
if (location.size() < 3) {
- m_startLocation.clear();
- m_boundingBox.x = 0;
- m_boundingBox.y = 0;
- m_boundingBox.width = 0;
- m_boundingBox.height = 0;
+ __startLocation.clear();
+ __boundingBox.x = 0;
+ __boundingBox.y = 0;
+ __boundingBox.width = 0;
+ __boundingBox.height = 0;
return;
}
const cv::Rect_<float>& boundingBox = cv::boundingRect(location);
- m_boundingBox = boundingBox;
+ __boundingBox = boundingBox;
const size_t numberOfContourPoints = location.size();
- m_startLocation.resize(numberOfContourPoints);
+ __startLocation.resize(numberOfContourPoints);
for (size_t i = 0; i < numberOfContourPoints; ++i) {
- m_startLocation[i].x = (location[i].x - boundingBox.x) / boundingBox.width;
- m_startLocation[i].y = (location[i].y - boundingBox.y) / boundingBox.height;
+ __startLocation[i].x = (location[i].x - boundingBox.x) / boundingBox.width;
+ __startLocation[i].y = (location[i].y - boundingBox.y) / boundingBox.height;
}
}
cv::Ptr<ObjectTracker> MFTracker::clone() const
{
- return cv::Ptr<ObjectTracker>(new MFTracker(*this));
+ return cv::Ptr<ObjectTracker>(new (std::nothrow)MFTracker(*this));
}
bool MFTracker::init(const cv::Mat& image)
@@ -124,63 +128,63 @@ bool MFTracker::init(const cv::Mat& image)
if (image.empty())
return false;
- image.copyTo(m_image);
+ image.copyTo(__image);
buildOpticalFlowPyramid(
- m_image,
- m_pyramid,
- m_params.mWindowSize,
- m_params.mPyrMaxLevel);
+ __image,
+ __pyramid,
+ __params.mWindowSize,
+ __params.mPyrMaxLevel);
- m_isInit = true;
- return m_isInit;
+ __isInit = true;
+ return __isInit;
}
bool MFTracker::update(const cv::Mat& image)
{
- if (!m_isInit || image.empty())
+ if (!__isInit || image.empty())
return false;
/* Handles such behaviour when preparation frame has the size
* different to the tracking frame size. In such case, we resize preparation
*frame and bounding box. Then, track as usually:
*/
- if (m_image.rows != image.rows || m_image.cols != image.cols) {
- const float xFactor = (float) image.cols / m_image.cols;
- const float yFactor = (float) image.rows / m_image.rows;
+ if (__image.rows != image.rows || __image.cols != image.cols) {
+ const float xFactor = static_cast<float>(image.cols) / __image.cols;
+ const float yFactor = static_cast<float>(image.rows) / __image.rows;
- resize(m_image, m_image, cv::Size(), xFactor, yFactor);
+ resize(__image, __image, cv::Size(), xFactor, yFactor);
- m_boundingBox.x *= xFactor;
- m_boundingBox.y *= yFactor;
- m_boundingBox.width *= xFactor;
- m_boundingBox.height *= yFactor;
+ __boundingBox.x *= xFactor;
+ __boundingBox.y *= yFactor;
+ __boundingBox.width *= xFactor;
+ __boundingBox.height *= yFactor;
}
- cv::Mat oldImage = m_image;
+ cv::Mat oldImage = __image;
- cv::Rect_<float> oldBox = m_boundingBox;
+ cv::Rect_<float> oldBox = __boundingBox;
if (!medianFlowImpl(oldImage, image, oldBox))
return false;
- image.copyTo(m_image);
- m_boundingBox = oldBox;
+ image.copyTo(__image);
+ __boundingBox = oldBox;
return true;
}
bool MFTracker::isInited() const
{
- return m_isInit;
+ return __isInit;
}
float MFTracker::getLastConfidence() const
{
- return m_confidence;
+ return __confidence;
}
cv::Rect_<float> MFTracker::getLastBoundingBox() const
{
- return m_boundingBox;
+ return __boundingBox;
}
bool MFTracker::medianFlowImpl(
@@ -188,15 +192,13 @@ bool MFTracker::medianFlowImpl(
{
std::vector<cv::Point2f> pointsToTrackOld, pointsToTrackNew;
- const float gridXStep = oldBox.width / m_params.mPointsInGrid;
- const float gridYStep = oldBox.height / m_params.mPointsInGrid;
- for (int i = 0; i < m_params.mPointsInGrid; i++) {
- for (int j = 0; j < m_params.mPointsInGrid; j++) {
+ const float gridXStep = oldBox.width / __params.mPointsInGrid;
+ const float gridYStep = oldBox.height / __params.mPointsInGrid;
+ for (int i = 0; i < __params.mPointsInGrid; i++)
+ for (int j = 0; j < __params.mPointsInGrid; j++)
pointsToTrackOld.push_back(
cv::Point2f(oldBox.x + .5f*gridXStep + 1.f*gridXStep*j,
oldBox.y + .5f*gridYStep + 1.f*gridYStep*i));
- }
- }
const size_t numberOfPointsToTrackOld = pointsToTrackOld.size();
std::vector<uchar> status(numberOfPointsToTrackOld);
@@ -206,24 +208,23 @@ bool MFTracker::medianFlowImpl(
cv::buildOpticalFlowPyramid(
newImage_gray,
tempPyramid,
- m_params.mWindowSize,
- m_params.mPyrMaxLevel);
+ __params.mWindowSize,
+ __params.mPyrMaxLevel);
- cv::calcOpticalFlowPyrLK(m_pyramid,
+ cv::calcOpticalFlowPyrLK(__pyramid,
tempPyramid,
pointsToTrackOld,
pointsToTrackNew,
status,
errors,
- m_params.mWindowSize,
- m_params.mPyrMaxLevel,
- m_termcrit);
+ __params.mWindowSize,
+ __params.mPyrMaxLevel,
+ __termcrit);
std::vector<cv::Point2f> di;
- for (size_t idx = 0u; idx < numberOfPointsToTrackOld; idx++) {
+ for (size_t idx = 0u; idx < numberOfPointsToTrackOld; idx++)
if (status[idx] == 1)
di.push_back(pointsToTrackNew[idx] - pointsToTrackOld[idx]);
- }
std::vector<bool> filter_status;
check_FB(tempPyramid,
@@ -259,14 +260,15 @@ bool MFTracker::medianFlowImpl(
displacements.push_back(sqrt(di[idx].ddot(di[idx])));
}
- m_confidence = (10.f - getMedian(displacements, (int)displacements.size())) / 10.f;
+ __confidence =
+ (10.f - getMedian(displacements, static_cast<int>(displacements.size()))) / 10.f;
- if (m_confidence < 0.f) {
- m_confidence = 0.f;
+ if (__confidence < 0.f) {
+ __confidence = 0.f;
return false;
}
- m_pyramid.swap(tempPyramid);
+ __pyramid.swap(tempPyramid);
oldBox = boxCandidate;
return true;
}
@@ -354,19 +356,19 @@ void MFTracker::check_FB(
std::vector<cv::Point2f> pointsToTrackReprojection;
calcOpticalFlowPyrLK(newPyramid,
- m_pyramid,
+ __pyramid,
newPoints,
pointsToTrackReprojection,
LKstatus,
errors,
- m_params.mWindowSize,
- m_params.mPyrMaxLevel,
- m_termcrit);
+ __params.mWindowSize,
+ __params.mPyrMaxLevel,
+ __termcrit);
for (size_t idx = 0u; idx < numberOfOldPoints; idx++)
FBerror[idx] = l2distance(oldPoints[idx], pointsToTrackReprojection[idx]);
- float FBerrorMedian = getMedian(FBerror) + FloatEps;
+ float FBerrorMedian = getMedian(FBerror) + FLOATEPS;
for (size_t idx = 0u; idx < numberOfOldPoints; idx++)
status[idx] = (FBerror[idx] < FBerrorMedian);
}
@@ -398,7 +400,7 @@ void MFTracker::check_NCC(
NCC[idx] = (sq2 == 0 ? sq1 / std::abs(sq1) : (prod - s1 * s2 / N) / sq1 / sq2);
}
- float median = getMedian(NCC) - FloatEps;
+ float median = getMedian(NCC) - FLOATEPS;
for (size_t idx = 0u; idx < oldPoints.size(); idx++)
status[idx] = status[idx] && (NCC[idx] > median);
}
diff --git a/mv_image/image/src/Tracking/RecognitionBasedTracker.cpp b/mv_image/image/src/Tracking/RecognitionBasedTracker.cpp
index 218ac16f..d3cbfbc6 100644
--- a/mv_image/image/src/Tracking/RecognitionBasedTracker.cpp
+++ b/mv_image/image/src/Tracking/RecognitionBasedTracker.cpp
@@ -18,15 +18,18 @@
#include "Recognition/ImageRecognizer.h"
+#include <new>
+
namespace MediaVision {
namespace Image {
+
RecognitionBasedTracker::RecognitionBasedTracker(
const ImageObject& target,
const FeaturesExtractingParams& sceneFeaturesExtractingParams,
const RecognitionParams& recognitionParams) :
- m_target(target),
- m_sceneFeatureExtractingParams(sceneFeaturesExtractingParams),
- m_recogParams(recognitionParams)
+ __target(target),
+ __sceneFeatureExtractingParams(sceneFeaturesExtractingParams),
+ __recogParams(recognitionParams)
{
; /* NULL */
}
@@ -42,18 +45,18 @@ bool RecognitionBasedTracker::track(
{
result.clear();
- ImageObject scene(frame, m_sceneFeatureExtractingParams);
+ ImageObject scene(frame, __sceneFeatureExtractingParams);
ImageRecognizer recognizer(scene);
std::vector<cv::Point2f> contour;
- bool isRecognized = recognizer.recognize(m_target, m_recogParams, contour);
+ bool isRecognized = recognizer.recognize(__target, __recogParams, contour);
if (isRecognized) {
size_t numberOfContourPoints = contour.size();
result.resize(numberOfContourPoints);
- for(size_t i = 0u; i < numberOfContourPoints; ++i) {
+ for (size_t i = 0u; i < numberOfContourPoints; ++i) {
result[i].x = (int)contour[i].x;
result[i].y = (int)contour[i].y;
}
@@ -62,15 +65,15 @@ bool RecognitionBasedTracker::track(
return isRecognized;
}
-void RecognitionBasedTracker::reinforcement(const std::vector<cv::Point>& location)
+void RecognitionBasedTracker::reinforcement(const std::vector<cv::Point>& /*location*/)
{
; /* The tracker is based on the recognition on the entire image.
- *The reinforcement does not make a sense.*/
+ * The reinforcement does not make a sense.*/
}
cv::Ptr<ObjectTracker> RecognitionBasedTracker::clone() const
{
- return cv::Ptr<ObjectTracker>(new RecognitionBasedTracker(*this));
+ return cv::Ptr<ObjectTracker>(new (std::nothrow)RecognitionBasedTracker(*this));
}
} /* Image */
diff --git a/mv_image/image/src/mv_image_open.cpp b/mv_image/image/src/mv_image_open.cpp
index 53d46bb9..42a5ce30 100644
--- a/mv_image/image/src/mv_image_open.cpp
+++ b/mv_image/image/src/mv_image_open.cpp
@@ -29,6 +29,7 @@
#include <opencv/cv.h>
namespace {
+
class DefaultConfiguration {
public:
static const DefaultConfiguration& getInstance();
@@ -49,40 +50,40 @@ private:
private:
static DefaultConfiguration instance;
- MediaVision::Image::FeaturesExtractingParams m_objectFeaturesExtractingParams;
+ MediaVision::Image::FeaturesExtractingParams __objectFeaturesExtractingParams;
- MediaVision::Image::FeaturesExtractingParams m_sceneFeaturesExtractingParams;
+ MediaVision::Image::FeaturesExtractingParams __sceneFeaturesExtractingParams;
- MediaVision::Image::RecognitionParams m_recognitionParams;
+ MediaVision::Image::RecognitionParams __recognitionParams;
- MediaVision::Image::StabilizationParams m_stabilizationParams;
+ MediaVision::Image::StabilizationParams __stabilizationParams;
- MediaVision::Image::TrackingParams m_trackingParams;
+ MediaVision::Image::TrackingParams __trackingParams;
};
DefaultConfiguration DefaultConfiguration::instance;
DefaultConfiguration::DefaultConfiguration() :
- m_objectFeaturesExtractingParams(),
- m_sceneFeaturesExtractingParams(),
- m_recognitionParams(15, 0.33, 0.1),
- m_stabilizationParams(true, 3, 0.00006, 1.3, 2, 0.001),
- m_trackingParams()
+ __objectFeaturesExtractingParams(),
+ __sceneFeaturesExtractingParams(),
+ __recognitionParams(15, 0.33, 0.1),
+ __stabilizationParams(true, 3, 0.00006, 1.3, 2, 0.001),
+ __trackingParams()
{
- m_objectFeaturesExtractingParams.mKeypointType = MediaVision::Image::KT_ORB;
- m_objectFeaturesExtractingParams.mDescriptorType = MediaVision::Image::DT_ORB;
- m_objectFeaturesExtractingParams.ORB.mScaleFactor = 1.2;
- m_objectFeaturesExtractingParams.ORB.mMaximumFeaturesNumber = 1000;
-
- m_sceneFeaturesExtractingParams.mKeypointType = MediaVision::Image::KT_ORB;
- m_sceneFeaturesExtractingParams.mDescriptorType = MediaVision::Image::DT_ORB;
- m_sceneFeaturesExtractingParams.ORB.mScaleFactor = 1.2;
- m_sceneFeaturesExtractingParams.ORB.mMaximumFeaturesNumber = 5000;
-
- m_trackingParams.mFramesFeaturesExtractingParams = m_sceneFeaturesExtractingParams;
- m_trackingParams.mRecognitionParams = m_recognitionParams;
- m_trackingParams.mStabilizationParams = m_stabilizationParams;
- m_trackingParams.mExpectedOffset = 0.0;
+ __objectFeaturesExtractingParams.mKeypointType = MediaVision::Image::KT_ORB;
+ __objectFeaturesExtractingParams.mDescriptorType = MediaVision::Image::DT_ORB;
+ __objectFeaturesExtractingParams.ORB.mScaleFactor = 1.2;
+ __objectFeaturesExtractingParams.ORB.mMaximumFeaturesNumber = 1000;
+
+ __sceneFeaturesExtractingParams.mKeypointType = MediaVision::Image::KT_ORB;
+ __sceneFeaturesExtractingParams.mDescriptorType = MediaVision::Image::DT_ORB;
+ __sceneFeaturesExtractingParams.ORB.mScaleFactor = 1.2;
+ __sceneFeaturesExtractingParams.ORB.mMaximumFeaturesNumber = 5000;
+
+ __trackingParams.mFramesFeaturesExtractingParams = __sceneFeaturesExtractingParams;
+ __trackingParams.mRecognitionParams = __recognitionParams;
+ __trackingParams.mStabilizationParams = __stabilizationParams;
+ __trackingParams.mExpectedOffset = 0.0;
}
const DefaultConfiguration& DefaultConfiguration::getInstance()
@@ -93,31 +94,31 @@ const DefaultConfiguration& DefaultConfiguration::getInstance()
MediaVision::Image::FeaturesExtractingParams
DefaultConfiguration::getObjectFeaturesExtractingParams() const
{
- return m_objectFeaturesExtractingParams;
+ return __objectFeaturesExtractingParams;
}
MediaVision::Image::FeaturesExtractingParams
DefaultConfiguration::getSceneFeaturesExtractingParams() const
{
- return m_sceneFeaturesExtractingParams;
+ return __sceneFeaturesExtractingParams;
}
MediaVision::Image::RecognitionParams
DefaultConfiguration::getRecognitionParams() const
{
- return m_recognitionParams;
+ return __recognitionParams;
}
MediaVision::Image::StabilizationParams
DefaultConfiguration::getStabilizationParams() const
{
- return m_stabilizationParams;
+ return __stabilizationParams;
}
MediaVision::Image::TrackingParams
DefaultConfiguration::getTrackingParams() const
{
- return m_trackingParams;
+ return __trackingParams;
}
void extractTargetFeaturesExtractingParams(
@@ -145,9 +146,8 @@ void extractTargetFeaturesExtractingParams(
MV_IMAGE_RECOGNITION_OBJECT_MAX_KEYPOINTS_NUM,
&featuresExtractingParams.ORB.mMaximumFeaturesNumber);
- if (NULL == engine_cfg) {
+ if (NULL == engine_cfg)
mv_destroy_engine_config(working_cfg);
- }
}
void extractSceneFeaturesExtractingParams(
@@ -175,9 +175,8 @@ void extractSceneFeaturesExtractingParams(
MV_IMAGE_RECOGNITION_SCENE_MAX_KEYPOINTS_NUM,
&featuresExtractingParams.ORB.mMaximumFeaturesNumber);
- if (NULL == engine_cfg) {
+ if (NULL == engine_cfg)
mv_destroy_engine_config(working_cfg);
- }
}
void extractRecognitionParams(
@@ -210,9 +209,8 @@ void extractRecognitionParams(
MV_IMAGE_RECOGNITION_TOLERANT_MATCH_PART_ERR,
&recognitionParams.mTolerantMatchesPartError);
- if (NULL == engine_cfg) {
+ if (NULL == engine_cfg)
mv_destroy_engine_config(working_cfg);
- }
}
void extractStabilizationParams(
@@ -255,9 +253,8 @@ void extractStabilizationParams(
MV_IMAGE_TRACKING_STABLIZATION_ACCELERATION,
&stabilizationParams.mStabilizationAcceleration);
- if (NULL == engine_cfg) {
+ if (NULL == engine_cfg)
mv_destroy_engine_config(working_cfg);
- }
}
void extractTrackingParams(
@@ -292,9 +289,8 @@ void extractTrackingParams(
MV_IMAGE_TRACKING_EXPECTED_OFFSET,
&trackingParams.mExpectedOffset);
- if (NULL == engine_cfg) {
+ if (NULL == engine_cfg)
mv_destroy_engine_config(working_cfg);
- }
}
int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource)
@@ -401,9 +397,8 @@ int mv_image_recognize_open(
{
MEDIA_VISION_INSTANCE_CHECK(source);
MEDIA_VISION_NULL_ARG_CHECK(image_objects);
- for (int objectNum = 0; objectNum < number_of_objects; ++objectNum) {
+ for (int objectNum = 0; objectNum < number_of_objects; ++objectNum)
MEDIA_VISION_INSTANCE_CHECK(image_objects[objectNum]);
- }
MEDIA_VISION_NULL_ARG_CHECK(recognized_cb);
cv::Mat scene;
@@ -524,9 +519,8 @@ int mv_image_object_create_open(
MEDIA_VISION_NULL_ARG_CHECK(image_object);
(*image_object) = (mv_image_object_h)new (std::nothrow)MediaVision::Image::ImageObject();
- if (*image_object == NULL) {
+ if (*image_object == NULL)
return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
return MEDIA_VISION_ERROR_NONE;
}
@@ -629,9 +623,8 @@ int mv_image_object_clone_open(
MEDIA_VISION_NULL_ARG_CHECK(dst);
(*dst) = (mv_image_object_h)new (std::nothrow)MediaVision::Image::ImageObject();
- if (*dst == NULL) {
+ if (*dst == NULL)
return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
*(MediaVision::Image::ImageObject*)(*dst) =
*(MediaVision::Image::ImageObject*)src;
@@ -669,9 +662,8 @@ int mv_image_object_load_open(
}
(*image_object) = (mv_image_object_h)new (std::nothrow)MediaVision::Image::ImageObject();
- if (*image_object == NULL) {
+ if (*image_object == NULL)
return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
int ret = ((MediaVision::Image::ImageObject*)(*image_object))->load(file_name);
if (ret != MEDIA_VISION_ERROR_NONE) {
@@ -689,9 +681,8 @@ int mv_image_tracking_model_create_open(
(*image_tracking_model) = (mv_image_tracking_model_h)
new (std::nothrow)MediaVision::Image::ImageTrackingModel();
- if (*image_tracking_model == NULL) {
+ if (*image_tracking_model == NULL)
return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
return MEDIA_VISION_ERROR_NONE;
}
@@ -792,9 +783,8 @@ int mv_image_tracking_model_load_open(
(*image_tracking_model) = (mv_image_tracking_model_h)
new (std::nothrow)MediaVision::Image::ImageTrackingModel();
- if (*image_tracking_model == NULL) {
+ if (*image_tracking_model == NULL)
return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
int ret = ((MediaVision::Image::ImageTrackingModel*)(*image_tracking_model))->load(file_name);
if (ret != MEDIA_VISION_ERROR_NONE) {
diff --git a/mv_image/image_lic/include/mv_image_lic.h b/mv_image/image_lic/include/mv_image_lic.h
index 29e1c1dc..d84b52e0 100644
--- a/mv_image/image_lic/include/mv_image_lic.h
+++ b/mv_image/image_lic/include/mv_image_lic.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __TIZEN_MEDIAVISION_IMAGE_LIC_H__
-#define __TIZEN_MEDIAVISION_IMAGE_LIC_H__
+#ifndef __MEDIA_VISION_IMAGE_LIC_H__
+#define __MEDIA_VISION_IMAGE_LIC_H__
#include "mv_image.h"
@@ -78,12 +78,12 @@ extern "C" {
* @see mv_engine_config_h
*/
int mv_image_recognize_lic(
- mv_source_h source,
- const mv_image_object_h *image_objects,
- int number_of_objects,
- mv_engine_config_h engine_cfg,
- mv_image_recognized_cb recognized_cb,
- void *user_data);
+ mv_source_h source,
+ const mv_image_object_h *image_objects,
+ int number_of_objects,
+ mv_engine_config_h engine_cfg,
+ mv_image_recognized_cb recognized_cb,
+ void *user_data);
/*************************/
/* Image object tracking */
@@ -142,11 +142,11 @@ int mv_image_recognize_lic(
* @see mv_image_tracking_model_destroy_lic()
*/
int mv_image_track_lic(
- mv_source_h source,
- mv_image_tracking_model_h image_tracking_model,
- mv_engine_config_h engine_cfg,
- mv_image_tracked_cb tracked_cb,
- void *user_data);
+ mv_source_h source,
+ mv_image_tracking_model_h image_tracking_model,
+ mv_engine_config_h engine_cfg,
+ mv_image_tracked_cb tracked_cb,
+ void *user_data);
/**************************/
/* Image object behaviour */
@@ -167,7 +167,7 @@ int mv_image_track_lic(
* @see mv_image_object_destroy_lic()
*/
int mv_image_object_create_lic(
- mv_image_object_h *image_object);
+ mv_image_object_h *image_object);
/**
* @brief Destroys the image object.
@@ -182,7 +182,7 @@ int mv_image_object_create_lic(
* @see mv_image_object_create_lic()
*/
int mv_image_object_destroy_lic(
- mv_image_object_h image_object);
+ mv_image_object_h image_object);
/**
* @brief Fills the image object.
@@ -223,10 +223,10 @@ int mv_image_object_destroy_lic(
* @see mv_engine_config_h
*/
int mv_image_object_fill_lic(
- mv_image_object_h image_object,
- mv_engine_config_h engine_cfg,
- mv_source_h source,
- mv_rectangle_s *location);
+ mv_image_object_h image_object,
+ mv_engine_config_h engine_cfg,
+ mv_source_h source,
+ mv_rectangle_s *location);
/**
* @brief Gets a value that determines how well an image object can be recognized.
@@ -260,8 +260,8 @@ int mv_image_object_fill_lic(
* @see mv_engine_config_h
*/
int mv_image_object_get_recognition_rate_lic(
- mv_image_object_h image_object,
- double *recognition_rate);
+ mv_image_object_h image_object,
+ double *recognition_rate);
/**
* @brief Sets a label for the image object.
@@ -287,8 +287,8 @@ int mv_image_object_get_recognition_rate_lic(
* @see mv_image_object_destroy_lic()
*/
int mv_image_object_set_label_lic(
- mv_image_object_h image_object,
- int label);
+ mv_image_object_h image_object,
+ int label);
/**
* @brief Gets a label of image object.
@@ -316,8 +316,8 @@ int mv_image_object_set_label_lic(
* @see mv_image_object_destroy_lic()
*/
int mv_image_object_get_label_lic(
- mv_image_object_h image_object,
- int *label);
+ mv_image_object_h image_object,
+ int *label);
/**
* @brief Clones the image object.
@@ -336,8 +336,8 @@ int mv_image_object_get_label_lic(
* @see mv_image_object_destroy_lic()
*/
int mv_image_object_clone_lic(
- mv_image_object_h src,
- mv_image_object_h *dst);
+ mv_image_object_h src,
+ mv_image_object_h *dst);
/**
* @brief Saves the image object.
@@ -356,7 +356,8 @@ int mv_image_object_clone_lic(
* @see mv_image_object_destroy_lic()
*/
int mv_image_object_save_lic(
- const char *file_name, mv_image_object_h image_object);
+ const char *file_name,
+ mv_image_object_h image_object);
/**
* @brief Loads an image object from the file.
@@ -380,7 +381,8 @@ int mv_image_object_save_lic(
* @see mv_image_object_destroy_lic()
*/
int mv_image_object_load_lic(
- const char *file_name, mv_image_object_h image_object);
+ const char *file_name,
+ mv_image_object_h *image_object);
/**********************************/
/* Image tracking model behaviour */
@@ -401,7 +403,7 @@ int mv_image_object_load_lic(
* @see mv_image_tracking_model_destroy_lic()
*/
int mv_image_tracking_model_create_lic(
- mv_image_tracking_model_h *image_tracking_model);
+ mv_image_tracking_model_h *image_tracking_model);
/**
* @brief Sets target of image tracking model.
@@ -435,8 +437,8 @@ int mv_image_tracking_model_create_lic(
* @see mv_image_tracking_model_destroy_lic()
*/
int mv_image_tracking_model_set_target_lic(
- mv_image_object_h image_object,
- mv_image_tracking_model_h image_tracking_model);
+ mv_image_object_h image_object,
+ mv_image_tracking_model_h image_tracking_model);
/**
* @brief Destroys the image tracking model.
@@ -454,7 +456,7 @@ int mv_image_tracking_model_set_target_lic(
* @see mv_image_tracking_model_create_lic()
*/
int mv_image_tracking_model_destroy_lic(
- mv_image_tracking_model_h image_tracking_model);
+ mv_image_tracking_model_h image_tracking_model);
/**
* @brief Refreshes the state of image tracking model.
@@ -486,8 +488,8 @@ int mv_image_tracking_model_destroy_lic(
* @see mv_image_tracking_model_destroy_lic()
*/
int mv_image_tracking_model_refresh_lic(
- mv_image_tracking_model_h image_tracking_model,
- mv_engine_config_h engine_cfg);
+ mv_image_tracking_model_h image_tracking_model,
+ mv_engine_config_h engine_cfg);
/**
* @brief Clones the image tracking model.
@@ -504,8 +506,8 @@ int mv_image_tracking_model_refresh_lic(
* @see mv_image_tracking_model_destroy_lic()
*/
int mv_image_tracking_model_clone_lic(
- mv_image_tracking_model_h src,
- mv_image_tracking_model_h *dst);
+ mv_image_tracking_model_h src,
+ mv_image_tracking_model_h *dst);
/**
* @brief Saves the image tracking model.
@@ -531,7 +533,8 @@ int mv_image_tracking_model_clone_lic(
* @see mv_image_tracking_model_destroy_lic()
*/
int mv_image_tracking_model_save_lic(
- const char *file_name, mv_image_tracking_model_h image_tracking_model);
+ const char *file_name,
+ mv_image_tracking_model_h image_tracking_model);
/**
* @brief Loads an image tracking model from the file.
@@ -555,10 +558,11 @@ int mv_image_tracking_model_save_lic(
* @see mv_image_tracking_model_destroy_lic()
*/
int mv_image_tracking_model_load_lic(
- const char *file_name, mv_image_tracking_model_h *image_tracking_model);
+ const char *file_name,
+ mv_image_tracking_model_h *image_tracking_model);
#ifdef __cplusplus
}
#endif /* __cplusplus */
-#endif /* __TIZEN_MEDIAVISION_IMAGE_LIC_H__ */
+#endif /* __MEDIA_VISION_IMAGE_LIC_H__ */
diff --git a/mv_image/image_lic/src/mv_image_lic.c b/mv_image/image_lic/src/mv_image_lic.c
index 00e898b4..7e2a56d9 100644
--- a/mv_image/image_lic/src/mv_image_lic.c
+++ b/mv_image/image_lic/src/mv_image_lic.c
@@ -88,15 +88,15 @@ int mv_image_object_clone_lic(
}
int mv_image_object_save_lic(
- mv_image_object_h image_object,
- const char *file_name)
+ const char *file_name,
+ mv_image_object_h image_object)
{
return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
int mv_image_object_load_lic(
- mv_image_object_h image_object,
- const char *file_name)
+ const char *file_name,
+ mv_image_object_h image_object)
{
return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
@@ -135,15 +135,15 @@ int mv_image_tracking_model_clone_lic(
}
int mv_image_tracking_model_save_lic(
- mv_image_tracking_model_h image_tracking_model,
- const char *file_name)
+ const char *file_name,
+ mv_image_tracking_model_h image_tracking_model)
{
return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
int mv_image_tracking_model_load_lic(
- mv_image_tracking_model_h image_tracking_model,
- const char *file_name)
+ const char *file_name,
+ mv_image_tracking_model_h *image_tracking_model)
{
return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
diff --git a/mv_surveillance/surveillance/include/EventManager.h b/mv_surveillance/surveillance/include/EventManager.h
index 540957b0..dc802a7a 100644
--- a/mv_surveillance/surveillance/include/EventManager.h
+++ b/mv_surveillance/surveillance/include/EventManager.h
@@ -38,6 +38,7 @@ class EventManager;
*/
class EventManagerDestroyer {
public:
+
/**
* @brief Default destructor.
*
@@ -54,6 +55,7 @@ public:
void initialize(EventManager *pointer);
private:
+
EventManager *__pInstance;
};
@@ -66,6 +68,7 @@ private:
class EventManager {
public:
+
/**
* @brief Gets EventManager instance.
*
@@ -86,6 +89,7 @@ public:
* @param [in] user_data The user data to be passed to the callback function
* @param [in] numberOfPoints The number of ROI points
* @param [in] roi The intput array with ROI points
+ * @param [in] isInternal Interpretation event as internal in surveillance
* @return @c 0 on success, otherwise a negative error value
*/
int registerEvent(
@@ -97,7 +101,8 @@ public:
mv_surveillance_event_occurred_cb callback,
void *user_data,
int numberOfPoints,
- mv_point_s *roi);
+ mv_point_s *roi,
+ bool isInternal);
/**
* @brief Unregisters event.
@@ -153,6 +158,7 @@ public:
StringVector& eventResValNames);
private:
+
EventManager();
EventManager(const EventManager&);
@@ -168,6 +174,7 @@ private:
friend class EventManagerDestroyer;
private:
+
static EventManager *__pInstance;
static EventManagerDestroyer Destroyer;
@@ -175,6 +182,7 @@ private:
static EventTypesMap SupportedEventTypes;
private:
+
EventTriggersMap __eventTriggers;
};
diff --git a/mv_surveillance/surveillance/include/EventTrigger.h b/mv_surveillance/surveillance/include/EventTrigger.h
index d5851957..f2fbd2d5 100644
--- a/mv_surveillance/surveillance/include/EventTrigger.h
+++ b/mv_surveillance/surveillance/include/EventTrigger.h
@@ -53,6 +53,7 @@ public:
* @param [in] user_data The user data to be passed to the callback function
* @param [in] numberOfPoints The number of ROI points
* @param [in] roi The intput array with ROI points
+ * @param [in] isInternal Interpretation event as internal in surveillance
*/
EventTrigger(
mv_surveillance_event_trigger_h eventTrigger,
@@ -61,7 +62,8 @@ public:
mv_surveillance_event_occurred_cb callback,
void *userData,
int numberOfPoints,
- mv_point_s *roi);
+ mv_point_s *roi,
+ bool isInternal);
/**
* @brief Default destructor.
@@ -127,6 +129,7 @@ public:
* @param [in] user_data The user data to be passed to the callback function
* @param [in] numberOfPoints The number of ROI points
* @param [in] roi The intput array with ROI points
+ * @param [in] isInternal Interpretation event as internal in surveillance
* @return @c true on success, false otherwise
*/
bool subscribeCallback(
@@ -135,7 +138,8 @@ public:
mv_surveillance_event_occurred_cb callback,
void *userData,
int numberOfPoints,
- mv_point_s *roi);
+ mv_point_s *roi,
+ bool isInternal);
/**
* @brief Unsubscibes callback with unique identifier.
@@ -197,6 +201,8 @@ protected:
mv_surveillance_event_occurred_cb callback;
void *userData;
+
+ bool isInternal;
};
typedef std::map<long int, CallbackData> CallbackDataMap;
diff --git a/mv_surveillance/surveillance/include/EventTriggerMovementDetection.h b/mv_surveillance/surveillance/include/EventTriggerMovementDetection.h
index e3b57a9c..2f113a19 100644
--- a/mv_surveillance/surveillance/include/EventTriggerMovementDetection.h
+++ b/mv_surveillance/surveillance/include/EventTriggerMovementDetection.h
@@ -76,6 +76,7 @@ public:
* @param [in] user_data The user data to be passed to the callback function
* @param [in] numberOfPoints The number of ROI points
* @param [in] roi The intput array with ROI points
+ * @param [in] isInternal Interpretation event as internal in surveillance
*/
EventTriggerMovementDetection(
mv_surveillance_event_trigger_h eventTrigger,
@@ -84,7 +85,8 @@ public:
mv_surveillance_event_occurred_cb callback,
void *userData,
int numberOfPoints,
- mv_point_s *roi);
+ mv_point_s *roi,
+ bool isInternal);
/**
* @brief Default destructor.
diff --git a/mv_surveillance/surveillance/include/EventTriggerPersonAppearance.h b/mv_surveillance/surveillance/include/EventTriggerPersonAppearance.h
index 3e50e374..b19b0bc5 100644
--- a/mv_surveillance/surveillance/include/EventTriggerPersonAppearance.h
+++ b/mv_surveillance/surveillance/include/EventTriggerPersonAppearance.h
@@ -23,6 +23,7 @@
*/
#include "EventTrigger.h"
+#include "MFTracker.h"
#include "EventResult.h"
#include "EventDefs.h"
@@ -30,6 +31,8 @@
#include <opencv/cv.h>
+#include <sys/time.h>
+
namespace mediavision {
namespace surveillance {
@@ -82,6 +85,7 @@ public:
* @param [in] user_data The user data to be passed to the callback function
* @param [in] numberOfPoints The number of ROI points
* @param [in] roi The intput array with ROI points
+ * @param [in] isInternal Interpretation event as internal in surveillance
*/
EventTriggerPersonAppearance(
mv_surveillance_event_trigger_h eventTrigger,
@@ -90,7 +94,8 @@ public:
mv_surveillance_event_occurred_cb callback,
void *userData,
int numberOfPoints,
- mv_point_s *roi);
+ mv_point_s *roi,
+ bool isInternal);
/**
* @brief Default destructor.
@@ -155,19 +160,22 @@ private:
void *user_data);
private:
+
void runCallbacks(mv_source_h source);
+ std::vector<bool> reinforceTrackedPersons(const CVRectangles& appearedPersons);
+
private:
- struct TrackedRectangle {
- cv::Rect rect;
- int framesCount;
+ class TrackedRectangle {
+ public:
+ TrackedRectangle(cv::Rect _rect, struct timeval _appearanceTime);
+
+ cv::Rect_<float> rect;
+
+ struct timeval appearanceTime;
- TrackedRectangle(cv::Rect _rect, int _framesCount)
- {
- rect = _rect;
- framesCount = _framesCount;
- }
+ MFTracker tracker;
};
typedef std::list<TrackedRectangle> TrackedRectangles;
diff --git a/mv_surveillance/surveillance/include/EventTriggerPersonRecognition.h b/mv_surveillance/surveillance/include/EventTriggerPersonRecognition.h
index cd3448ea..78233863 100644
--- a/mv_surveillance/surveillance/include/EventTriggerPersonRecognition.h
+++ b/mv_surveillance/surveillance/include/EventTriggerPersonRecognition.h
@@ -80,6 +80,7 @@ public:
* @param [in] user_data The user data to be passed to the callback function
* @param [in] numberOfPoints The number of ROI points
* @param [in] roi The intput array with ROI points
+ * @param [in] isInternal Interpretation event as internal in surveillance
*/
EventTriggerPersonRecognition(
mv_surveillance_event_trigger_h eventTrigger,
@@ -88,7 +89,8 @@ public:
mv_surveillance_event_occurred_cb callback,
void *userData,
int numberOfPoints,
- mv_point_s *roi);
+ mv_point_s *roi,
+ bool isInternal);
/**
* @brief Default destructor.
diff --git a/mv_surveillance/surveillance/include/HoGDetector.h b/mv_surveillance/surveillance/include/HoGDetector.h
index 76b0f2f2..b4fd68f7 100644
--- a/mv_surveillance/surveillance/include/HoGDetector.h
+++ b/mv_surveillance/surveillance/include/HoGDetector.h
@@ -61,7 +61,7 @@ struct HOGDescriptor {
enum { L2Hys = 0 };
enum { DEFAULT_NLEVELS = 64 };
- // default constructor
+ /* default constructor */
HOGDescriptor() :
winSize(64, 128),
blockSize(16, 16),
@@ -73,9 +73,10 @@ struct HOGDescriptor {
histogramNormType(HOGDescriptor::L2Hys),
L2HysThreshold(0.2),
gammaCorrection(true),
- nlevels(HOGDescriptor::DEFAULT_NLEVELS) {}
+ nlevels(HOGDescriptor::DEFAULT_NLEVELS)
+ {}
- // constructor
+ /* constructor */
HOGDescriptor(
Size _winSize,
Size _blockSize,
@@ -98,9 +99,10 @@ struct HOGDescriptor {
histogramNormType(_histogramNormType),
L2HysThreshold(_L2HysThreshold),
gammaCorrection(_gammaCorrection),
- nlevels(_nlevels) {}
+ nlevels(_nlevels)
+ {}
- // default destructor
+ /* default destructor */
virtual ~HOGDescriptor() {}
size_t getDescriptorSize() const;
@@ -118,7 +120,7 @@ struct HOGDescriptor {
Size padding = Size(),
const vector<Point>& locations = vector<Point>()) const;
- //with found weights output
+ /* with found weights output */
virtual void detect(
const Mat& img,
CV_OUT vector<Point>& foundLocations,
@@ -128,7 +130,7 @@ struct HOGDescriptor {
Size padding = Size(),
const vector<Point>& searchLocations = vector<Point>()) const;
- //without found weights output
+ /* without found weights output */
virtual void detect(
const Mat& img,
CV_OUT vector<Point>& foundLocations,
@@ -137,7 +139,7 @@ struct HOGDescriptor {
Size padding = Size(),
const vector<Point>& searchLocations = vector<Point>()) const;
- //with result weights output
+ /* with result weights output */
virtual void detectMultiScale(
const Mat& img,
CV_OUT vector<Rect>& foundLocations,
@@ -149,7 +151,7 @@ struct HOGDescriptor {
double finalThreshold = 2.0,
bool useMeanshiftGrouping = false) const;
- //without found weights output
+ /* without found weights output */
virtual void detectMultiScale(
const Mat& img,
CV_OUT vector<Rect>& foundLocations,
@@ -167,6 +169,13 @@ struct HOGDescriptor {
Size paddingTL = Size(),
Size paddingBR = Size()) const;
+ static vector<float> getDefaultPeopleDetector();
+
+ static vector<float> getDaimlerPeopleDetector();
+
+ /* read/parse Dalal's alt model file */
+ void readALTModel(std::string modelfile);
+
void groupRectangles(
vector<cv::Rect>& rectList,
vector<double>& weights,
diff --git a/mv_surveillance/surveillance/include/MFTracker.h b/mv_surveillance/surveillance/include/MFTracker.h
new file mode 100644
index 00000000..947f6d4c
--- /dev/null
+++ b/mv_surveillance/surveillance/include/MFTracker.h
@@ -0,0 +1,137 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MEDIA_VISION_MFTRACKER_H__
+#define __MEDIA_VISION_MFTRACKER_H__
+
+#include <opencv2/core/core.hpp>
+
+namespace mediavision {
+namespace surveillance {
+
+/**
+ * @class MFTracker
+ * @brief Median Flow tracker implementation.
+ *
+ * @since_tizen 3.0
+ */
+class MFTracker {
+public:
+ struct Params {
+ /**
+ * @brief TrackerMedianFlow algorithm parameters constructor
+ */
+ Params();
+
+ int mPointsInGrid; /**< Square root of number of used keypoints.
+ Increase it to trade accurateness for speed.
+ Default value is sensible and recommended */
+
+ cv::Size mWindowSize; /**< Size of the search window at each pyramid level
+ for Lucas-Kanade optical flow search used for
+ tracking */
+
+ int mPyrMaxLevel; /**< Number of pyramid levels for Lucas-Kanade optical
+ flow search used for tracking */
+ };
+
+ /**
+ * @brief @ref MFTracker constructor based on tracking algorithm parameters.
+ *
+ * @since_tizen 3.0
+ * @param [in] params Parameters for objects tracking
+ */
+ MFTracker(Params params = Params());
+
+ /**
+ * @brief Tracks the target for the video stream consisting of frames.
+ *
+ * @since_tizen 3.0
+ * @remarks Call this function alternately for each frame
+ * @param [in] frame Current frame of the video stream
+ * @param [out] result Result contour
+ * @return true if object is tracked, otherwise return false
+ */
+ bool track(const cv::Mat& frame, cv::Rect_<float>& result);
+
+ /**
+ * @brief Provides the current location of a target.
+ *
+ * @since_tizen 3.0
+ * @param [in] location Current location of a target
+ */
+ void reinforcement(const cv::Rect_<float>& location);
+
+private:
+ bool isInited() const;
+
+ bool init(const cv::Mat& image);
+
+ bool update(const cv::Mat& image);
+
+ float getLastConfidence() const;
+
+ cv::Rect_<float> getLastBoundingBox() const;
+
+ bool medianFlowImpl(cv::Mat oldImage, cv::Mat newImage, cv::Rect_<float>& oldBox);
+
+ cv::Rect_<float> vote(
+ const std::vector<cv::Point2f>& oldPoints,
+ const std::vector<cv::Point2f>& newPoints,
+ const cv::Rect_<float>& oldRect,
+ cv::Point2f& mD);
+
+ void check_FB(
+ std::vector<cv::Mat> newPyramid,
+ const std::vector<cv::Point2f>& oldPoints,
+ const std::vector<cv::Point2f>& newPoints,
+ std::vector<bool>& status);
+
+ void check_NCC(
+ const cv::Mat& oldImage,
+ const cv::Mat& newImage,
+ const std::vector<cv::Point2f>& oldPoints,
+ const std::vector<cv::Point2f>& newPoints,
+ std::vector<bool>& status);
+
+private:
+ bool __isInit; /**< Flag is used to determine the model
+ initialization */
+
+ Params __params; /**< Parameters used during tracking, see
+ @ref TrackerMedianFlow::Params */
+
+ cv::TermCriteria __termcrit; /**< Terminating criteria for OpenCV
+ Lucas–Kanade optical flow algorithm used
+ during tracking */
+
+ cv::Rect_<float> __boundingBox; /**< Tracking object bounding box */
+
+ float __confidence; /**< Confidence that object was tracked
+ correctly at the last tracking iteration */
+
+ cv::Mat __image; /**< Last image for which tracking was
+ performed */
+
+ std::vector<cv::Mat> __pyramid; /**< The pyramid had been calculated for
+ the previous frame(or when
+ initialize the model) */
+};
+
+} /* surveillance */
+} /* mediavision */
+
+#endif /* __MEDIA_VISION_MFTRACKER_H__ */
diff --git a/mv_surveillance/surveillance/include/SurveillanceHelper.h b/mv_surveillance/surveillance/include/SurveillanceHelper.h
index 1ad303a1..ffb6302e 100644
--- a/mv_surveillance/surveillance/include/SurveillanceHelper.h
+++ b/mv_surveillance/surveillance/include/SurveillanceHelper.h
@@ -60,6 +60,7 @@ public:
*/
static int convertSourceMVRGB2GrayCVNeon(mv_source_h mvSource, cv::Mat& cvSource);
#endif
+
};
diff --git a/mv_surveillance/surveillance/src/EventManager.cpp b/mv_surveillance/surveillance/src/EventManager.cpp
index 0a3a05b2..f3830926 100644
--- a/mv_surveillance/surveillance/src/EventManager.cpp
+++ b/mv_surveillance/surveillance/src/EventManager.cpp
@@ -104,7 +104,8 @@ int EventManager::registerEvent(
mv_surveillance_event_occurred_cb callback,
void *user_data,
int numberOfPoints,
- mv_point_s *roi)
+ mv_point_s *roi,
+ bool isInternal)
{
if (NULL == callback || NULL == eventType) {
LOGE("Input event trigger or callback is NULL. Event registering failed.");
@@ -132,7 +133,8 @@ int EventManager::registerEvent(
callback,
user_data,
numberOfPoints,
- roi);
+ roi,
+ isInternal);
const int error = trigger->parseEngineConfig(engineCfg);
if (error != MEDIA_VISION_ERROR_NONE) {
@@ -151,7 +153,8 @@ int EventManager::registerEvent(
callback,
user_data,
numberOfPoints,
- roi);
+ roi,
+ isInternal);
delete trigger;
} else {
@@ -167,7 +170,8 @@ int EventManager::registerEvent(
callback,
user_data,
numberOfPoints,
- roi);
+ roi,
+ isInternal);
const int error = trigger->parseEngineConfig(engineCfg);
if (error != MEDIA_VISION_ERROR_NONE) {
@@ -186,7 +190,8 @@ int EventManager::registerEvent(
callback,
user_data,
numberOfPoints,
- roi);
+ roi,
+ isInternal);
delete trigger;
} else {
@@ -202,7 +207,8 @@ int EventManager::registerEvent(
callback,
user_data,
numberOfPoints,
- roi);
+ roi,
+ isInternal);
const int error = trigger->parseEngineConfig(engineCfg);
@@ -222,7 +228,8 @@ int EventManager::registerEvent(
callback,
user_data,
numberOfPoints,
- roi);
+ roi,
+ isInternal);
delete trigger;
} else {
diff --git a/mv_surveillance/surveillance/src/EventTrigger.cpp b/mv_surveillance/surveillance/src/EventTrigger.cpp
index c2fec7c2..48a64da2 100644
--- a/mv_surveillance/surveillance/src/EventTrigger.cpp
+++ b/mv_surveillance/surveillance/src/EventTrigger.cpp
@@ -32,7 +32,8 @@ EventTrigger::EventTrigger(
mv_surveillance_event_occurred_cb callback,
void *userData,
int numberOfPoints,
- mv_point_s *roi):
+ mv_point_s *roi,
+ bool isInternal):
__videoStreamId(videoStreamId),
__roi(numberOfPoints)
{
@@ -40,6 +41,7 @@ EventTrigger::EventTrigger(
callbackData.eventTrigger = eventTrigger;
callbackData.callback = callback;
callbackData.userData = userData;
+ callbackData.isInternal = isInternal;
__callbackDataMap.insert(CallbackDataPair(triggerId, callbackData));
@@ -68,7 +70,8 @@ bool EventTrigger::subscribeCallback(
mv_surveillance_event_occurred_cb callback,
void *userData,
int numberOfPoints,
- mv_point_s *roi)
+ mv_point_s *roi,
+ bool isInternal)
{
if (isCallbackSubscribed(triggerId)) {
LOGE("Callback with id %d is already subscribed. "
@@ -80,6 +83,7 @@ bool EventTrigger::subscribeCallback(
callbackData.eventTrigger = eventTrigger;
callbackData.callback = callback;
callbackData.userData = userData;
+ callbackData.isInternal = isInternal;
__callbackDataMap.insert(CallbackDataPair(triggerId, callbackData));
diff --git a/mv_surveillance/surveillance/src/EventTriggerMovementDetection.cpp b/mv_surveillance/surveillance/src/EventTriggerMovementDetection.cpp
index cc6ee91d..71d15b4b 100644
--- a/mv_surveillance/surveillance/src/EventTriggerMovementDetection.cpp
+++ b/mv_surveillance/surveillance/src/EventTriggerMovementDetection.cpp
@@ -62,8 +62,9 @@ void mergeOverlappedRects(CVRectangles& rects)
if (intersectionArea != 0 &&
intersectionArea > std::min(area1, area2) / 2) {
- rects[i] |= rects[j];
- rects[j] = DEFAULT_RECT;
+ rects[j] |= rects[i];
+ rects[i] = DEFAULT_RECT;
+ break;
}
}
}
@@ -113,14 +114,16 @@ EventTriggerMovementDetection::EventTriggerMovementDetection(
mv_surveillance_event_occurred_cb callback,
void *userData,
int numberOfPoints,
- mv_point_s *roi) : EventTrigger(
+ mv_point_s *roi,
+ bool isInternal) : EventTrigger(
eventTrigger,
triggerId,
videoStreamId,
callback,
userData,
numberOfPoints,
- roi),
+ roi,
+ isInternal),
__previousImage(),
__eventResult(new EventResultMovementDetection()),
__diffThreshold(DEFAULT_DIFF_THRESHOLD)
@@ -189,6 +192,7 @@ int EventTriggerMovementDetection::pushSource(
diffBuffer);
if (error != MEDIA_VISION_ERROR_NONE) {
+ free(diffBuffer);
LOGE("Absolute difference calculation failed. Pushing source failed.");
return error;
}
@@ -196,6 +200,7 @@ int EventTriggerMovementDetection::pushSource(
error = applyROIToImage(diffBuffer, image.cols, image.rows);
if (error != MEDIA_VISION_ERROR_NONE || image.empty()) {
+ free(diffBuffer);
LOGE("Applying ROI failed with error %d.", error);
return error;
}
@@ -218,49 +223,36 @@ int EventTriggerMovementDetection::pushSource(
CVRectangles rects(contoursSize);
for (size_t i = 0u; i < contoursSize; ++i)
- rects[i] = cv::boundingRect(cv::Mat(contours[i]));
+ rects[i] = cv::boundingRect(contours[i]);
mergeOverlappedRects(rects);
- const size_t roiSize = __roi.size();
- CVPoints roi(roiSize);
-
- cv::Rect roiRect(0, 0, imgDiff.cols, imgDiff.rows);
-
- if (roiSize >= 3u) {
- for (size_t i = 0u; i < roiSize; ++i)
- roi[i] = cv::Point(__roi[i].x, __roi[i].y);
-
- roiRect = cv::boundingRect(roi);
- }
-
const size_t rectsSize = rects.size();
- for (size_t i = 0u; i < rectsSize; ++i)
- if (rects[i] != DEFAULT_RECT &&
- roiRect.contains(rects[i].tl()) &&
- roiRect.contains(rects[i].br())) {
+ for (size_t i = 0u; i < rectsSize; ++i) {
+ if (rects[i] != DEFAULT_RECT) {
mv_rectangle_s rectMV;
convertRectCV2MV(rects[i], rectMV);
__eventResult->__movementRegions.push_back(rectMV);
}
+ }
__previousImage = image;
__eventResult->__grayImage = __previousImage;
// Don't invoke the callback if movement wasn't detected at the frame
- if (__eventResult->__movementRegions.size() > 0) {
- CallbackDataMapConstIter iter = __callbackDataMap.begin();
+ CallbackDataMapConstIter iter = __callbackDataMap.begin();
- for (; iter != __callbackDataMap.end(); ++iter) {
- mv_surveillance_event_occurred_cb callback = iter->second.callback;
+ for (; iter != __callbackDataMap.end(); ++iter) {
+ mv_surveillance_event_occurred_cb callback = iter->second.callback;
+
+ if (__eventResult->__movementRegions.size() > 0 || iter->second.isInternal)
callback(
iter->second.eventTrigger,
source,
__videoStreamId,
__eventResult,
iter->second.userData);
- }
}
return MEDIA_VISION_ERROR_NONE;
diff --git a/mv_surveillance/surveillance/src/EventTriggerPersonAppearance.cpp b/mv_surveillance/surveillance/src/EventTriggerPersonAppearance.cpp
index 298776be..88ef012f 100644
--- a/mv_surveillance/surveillance/src/EventTriggerPersonAppearance.cpp
+++ b/mv_surveillance/surveillance/src/EventTriggerPersonAppearance.cpp
@@ -38,9 +38,11 @@ static const int MAX_VALUE_NAME_LENGHT = 255;
static const int DEFAULT_SKIP_FRAMES_COUNT = 6;
-static const int DEFAULT_FRAME_WIDTH = 640;
+static const int DEFAULT_FRAME_WIDTH = 320;
-static const int DEFAULT_FRAME_HEIGHT = 480;
+static const int DEFAULT_FRAME_HEIGHT = 240;
+
+static const time_t PERSON_LIFETIME = 2000000; /* usec */
static const cv::Size DEFAULT_FRAME_SIZE(DEFAULT_FRAME_WIDTH, DEFAULT_FRAME_HEIGHT);
@@ -48,11 +50,79 @@ static const cv::Rect ALL_IMAGE_RECT(0, 0, DEFAULT_FRAME_WIDTH, DEFAULT_FRAME_HE
static const cv::Size DEFAULT_DETECTION_STEPS = cv::Size(8, 8);
+static const float POSSIBLE_SIZE_DIFFERENCE = 0.3f;
+
+static const float MINIMAL_INTERSECTION = 0.1f;
+
+static const float TRACKING_MARGIN = 0.2f;
+
static const std::vector<float> DEFAULT_SVM_PEOPLE_DETECTOR =
cv::HOGDescriptor::getDefaultPeopleDetector();
namespace {
+cv::Rect operator&(cv::Rect r1, cv::Rect_<float> r2)
+{
+ cv::Rect r2int((int)r2.x, (int)r2.y, (int)r2.width, (int)r2.height);
+
+ return r1 & r2int;
+}
+
+cv::Rect operator|=(cv::Rect r1, cv::Rect_<float> r2)
+{
+ cv::Rect r2int((int)r2.x, (int)r2.y, (int)r2.width, (int)r2.height);
+
+ return r1 |= r2int;
+}
+
+cv::Rect_<float> cutRectForTracking(const cv::Rect_<float>& rect)
+{
+ cv::Rect_<float> res;
+
+ float widthCoof = rect.width * TRACKING_MARGIN;
+ float heightCoof = rect.height * TRACKING_MARGIN;
+
+ res.x = rect.x + widthCoof;
+ res.y = rect.y + heightCoof;
+ res.width = rect.width - widthCoof * 2;
+ res.height = rect.height - heightCoof * 2;
+
+ return res;
+}
+
+cv::Rect_<float> supplementRectAfterTracking(const cv::Rect_<float>& rect)
+{
+ cv::Rect_<float> res;
+
+ res.width = rect.width / (1 - TRACKING_MARGIN * 2);
+ res.height = rect.height / (1 - TRACKING_MARGIN * 2);
+ res.x = rect.x - (res.width - rect.width) / 2;
+ res.y = rect.y - (res.height - rect.height) / 2;
+
+ return res;
+}
+
+float sizeDifferenceFactor(const cv::Rect& r1, const cv::Rect& r2)
+{
+ float widthDiffFactor = r1.width / (float)r2.width;
+ float heightDiffFactor = r1.height / (float)r2.height;
+
+ if (widthDiffFactor > 1.f)
+ widthDiffFactor = 1.f / widthDiffFactor;
+ if (heightDiffFactor > 1.f)
+ heightDiffFactor = 1.f / heightDiffFactor;
+
+ return widthDiffFactor * heightDiffFactor *
+ (1.f - fabs(widthDiffFactor - heightDiffFactor));
+}
+
+bool isPossibleToMerge(const cv::Rect& r1, const cv::Rect& r2)
+{
+ return sizeDifferenceFactor(r1, r2) > POSSIBLE_SIZE_DIFFERENCE &&
+ ((r1.area() + r2.area()) * MINIMAL_INTERSECTION / 2.f) <
+ (r1 & r2).area();
+}
+
inline void convertRectMV2CV(const mv_rectangle_s& src, cv::Rect& dst)
{
dst.x = src.point.x;
@@ -71,6 +141,14 @@ inline void convertRectCV2MV(const cv::Rect& src, mv_rectangle_s& dst)
} /* Anonymous namespace*/
+EventTriggerPersonAppearance::TrackedRectangle::TrackedRectangle(cv::Rect _rect,
+ struct timeval _appearanceTime)
+{
+ rect = _rect;
+ appearanceTime = _appearanceTime;
+ tracker.reinforcement(cutRectForTracking(rect));
+}
+
int EventResultPersonAppearance::getResultValue(const char *valueName,
void *value) const
{
@@ -131,9 +209,10 @@ int EventResultPersonAppearance::getResultValue(const char *valueName,
EventTriggerPersonAppearance::EventTriggerPersonAppearance(
mv_surveillance_event_trigger_h eventTrigger, long int triggerId,
int videoStreamId, mv_surveillance_event_occurred_cb callback,
- void *userData, int numberOfPoints, mv_point_s *roi) :
+ void *userData, int numberOfPoints, mv_point_s *roi, bool isInternal) :
EventTrigger(eventTrigger, triggerId, videoStreamId, callback, userData,
- numberOfPoints, roi), __skipFramesCount(DEFAULT_SKIP_FRAMES_COUNT),
+ numberOfPoints, roi, isInternal),
+ __skipFramesCount(DEFAULT_SKIP_FRAMES_COUNT),
__frameCounter(0), __movementDetectedEventId(InternalTriggersCounter--),
__factorX(1.f), __factorY(1.f), __rectToDetect(ALL_IMAGE_RECT),
__rectToDetectPrevious(ALL_IMAGE_RECT), __trackedRects(),
@@ -142,10 +221,9 @@ EventTriggerPersonAppearance::EventTriggerPersonAppearance(
{
__hogClassifier.setSVMDetector(DEFAULT_SVM_PEOPLE_DETECTOR);
- EventManager::getInstance().registerEvent(
- NULL, __movementDetectedEventId,
- MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED, videoStreamId,
- NULL, movementDetectedCB, this, numberOfPoints, roi);
+ EventManager::getInstance().registerEvent(NULL, __movementDetectedEventId,
+ MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED, videoStreamId, NULL,
+ movementDetectedCB, this, numberOfPoints, roi, true);
}
EventTriggerPersonAppearance::~EventTriggerPersonAppearance()
@@ -256,25 +334,23 @@ void EventTriggerPersonAppearance::movementDetectedCB(
for (size_t j = 1u; j < numberOfMovementRegions; ++j)
trigger->__rectToDetect |= movementRegions[j];
- if (trigger->__rectToDetect.width
- < trigger->__hogClassifier.winSize.width
- || trigger->__rectToDetect.height
- < trigger->__hogClassifier.winSize.height)
+ if (trigger->__rectToDetect.width < trigger->__hogClassifier.winSize.width ||
+ trigger->__rectToDetect.height < trigger->__hogClassifier.winSize.height)
trigger->__rectToDetect |= trigger->__rectToDetectPrevious;
}
trigger->__rectToDetect &= ALL_IMAGE_RECT;
-
/* 4. Perform Hog detector or try to track using movement regions */
- if ((trigger->__skipFramesCount == 0 ||
- trigger->__frameCounter % trigger->__skipFramesCount == 0) &&
- (trigger->__rectToDetect != ALL_IMAGE_RECT)) {
+ if (trigger->__frameCounter % (trigger->__skipFramesCount + 1) == 0) {
+
+ trigger->__frameCounter = 0;
+
/* 4.1 Perform Hog detector */
TrackedRectanglesConstIter iter = trigger->__trackedRects.begin();
for (; iter != trigger->__trackedRects.end(); ++iter)
trigger->__rectToDetect |= iter->rect;
- // Slightly extend detection area...
+ /* Slightly extend detection area... */
const int xShift = .25f * trigger->__rectToDetect.width;
const int yShift = .25f * trigger->__rectToDetect.height;
trigger->__rectToDetect.x -= xShift / 2;
@@ -282,7 +358,7 @@ void EventTriggerPersonAppearance::movementDetectedCB(
trigger->__rectToDetect.width += xShift;
trigger->__rectToDetect.height += yShift;
trigger->__rectToDetect &= ALL_IMAGE_RECT;
- // and fit it to the HOG cell size
+ /* and fit it to the HOG cell size */
const int xRest = trigger->__rectToDetect.width % 8;
const int yRest = trigger->__rectToDetect.height % 8;
trigger->__rectToDetect.x += xRest / 2;
@@ -294,7 +370,7 @@ void EventTriggerPersonAppearance::movementDetectedCB(
trigger->__hogClassifier.detectMultiScale(
resizedImage(trigger->__rectToDetect), hogRects, 0,
- DEFAULT_DETECTION_STEPS, cv::Size(32, 32), 1.059, 2);
+ DEFAULT_DETECTION_STEPS, cv::Size(32, 32), 1.059, 5);
const size_t hogRectsSize = hogRects.size();
@@ -303,93 +379,178 @@ void EventTriggerPersonAppearance::movementDetectedCB(
hogRects[i].y += trigger->__rectToDetect.y;
}
- std::vector<bool> trackChecks(hogRectsSize, false);
- TrackedRectanglesIter trackRectIter = trigger->__trackedRects.begin();
- for (; trackRectIter != trigger->__trackedRects.end();
- ++trackRectIter) {
- size_t bestArea = 0;
- size_t bestIdx = 0;
- for (size_t idx = 0u; idx < hogRectsSize; ++idx) {
- if (trackChecks[idx])
- continue;
- const size_t curArea =
- (hogRects[idx] & trackRectIter->rect).area();
- if (bestArea < curArea) {
- bestArea = curArea;
- bestIdx = idx;
- }
- }
- if (bestArea > 10) {
- trackChecks[bestIdx] = true;
- trackRectIter->rect = hogRects[bestIdx];
- }
- }
+ /* Merge appearance and tracked rectangles */
+ std::vector<bool> appearedBusyRects =
+ trigger->reinforceTrackedPersons(hogRects);
+ /* Person appearance */
trigger->__appearedRects.clear();
for (size_t idx = 0u; idx < hogRectsSize; ++idx)
- if (!trackChecks[idx])
+ if (!appearedBusyRects[idx])
trigger->__appearedRects.push_back(hogRects[idx]);
- } else {
- /* 4.2 Try to track */
- CVRectanglesConstIter appearedIter = trigger->__appearedRects.begin();
- for (; appearedIter != trigger->__appearedRects.end(); ++appearedIter)
- trigger->__trackedRects.push_back(
- TrackedRectangle(*appearedIter, 7));
- trigger->__appearedRects.clear();
+ /* Person disappearance */
+ struct timeval time;
+ gettimeofday(&time, NULL);
+ TrackedRectanglesIter trackRectIter = trigger->__trackedRects.begin();
+ for (; trackRectIter != trigger->__trackedRects.end(); ++trackRectIter) {
+
+ time_t currentLifetime = 1000000 *
+ (time.tv_sec - trackRectIter->appearanceTime.tv_sec) +
+ (time.tv_usec - trackRectIter->appearanceTime.tv_usec);
+ if (currentLifetime > PERSON_LIFETIME) {
+
+ trigger->__disappearedRects.push_back(trackRectIter->rect);
+ trackRectIter = trigger->__trackedRects.erase(trackRectIter);
+ }
+ }
+ } else {
+ /* 4.2 Try to track */
TrackedRectanglesIter iter = trigger->__trackedRects.begin();
while (iter != trigger->__trackedRects.end()) {
- bool tracked = false;
- for (size_t j = 0u; j < numberOfMovementRegions; ++j) {
- cv::Rect rect = iter->rect;
- if ((rect & movementRegions[j]).area() != 0 &&
- movementRegions[j].area() <= 3 * rect.area() / 2) {
- cv::Rect r1 = rect | movementRegions[j];
- const int dx = r1.width - rect.width;
- const int dy = r1.height - rect.height;
+ cv::Rect_<float> trackingResult;
+ if (iter->tracker.track(result->__grayImage, trackingResult))
+ iter->rect = supplementRectAfterTracking(trackingResult);
- if (r1.x < movementRegions[j].x)
- r1.x += dx;
- else if (r1.x > movementRegions[j].x)
- r1.x -= dx;
+ ++iter;
+ }
+ }
- if (r1.y < movementRegions[j].y)
- r1.y += dy;
- else if (r1.y > movementRegions[j].y)
- r1.y -= dy;
+ trigger->__rectToDetectPrevious = trigger->__rectToDetect;
+ ++trigger->__frameCounter;
- r1.height = rect.height;
- r1.width = rect.width;
+ /* 5. Update event result and run callbacks */
+ trigger->runCallbacks(source);
+
+ /* 6. Migrate appeared persons to array of tracking persons */
+ struct timeval time;
+ gettimeofday(&time, NULL);
+
+ CVRectanglesConstIter appearedIter = trigger->__appearedRects.begin();
+ for (; appearedIter != trigger->__appearedRects.end(); ++appearedIter)
+ trigger->__trackedRects.push_back(
+ TrackedRectangle(*appearedIter, time));
+ trigger->__appearedRects.clear();
- iter->rect = r1;
+ /* 7. Clear array of disappeared persons */
+ trigger->__disappearedRects.clear();
+}
- tracked = true;
+std::vector<bool> EventTriggerPersonAppearance::reinforceTrackedPersons(
+ const CVRectangles& appearedPersons)
+{
+ const size_t hogRectsSize = appearedPersons.size();
+ std::vector<bool> appearedBusyRects(hogRectsSize, false);
+ if (__trackedRects.size() > 0u && hogRectsSize > 0u) {
+
+ TrackedRectanglesIter trackRectIter = __trackedRects.begin();
+ std::vector<std::vector<size_t> > intersectionAreas(
+ __trackedRects.size(), std::vector<size_t>(hogRectsSize, 0u));
+ std::vector<std::vector<size_t> > confidence(
+ __trackedRects.size(), std::vector<size_t>(hogRectsSize, 0u));
+
+ /* Merge tracked -> appearance */
+ for (size_t trIdx = 0u; trackRectIter != __trackedRects.end();
+ ++trackRectIter, ++trIdx) {
+
+ size_t bestIdx = 0u;
+ bool haveRes = false;
+ for (size_t apIdx = 0u; apIdx < hogRectsSize; ++apIdx) {
+
+ intersectionAreas[trIdx][apIdx] =
+ (appearedPersons[apIdx] & trackRectIter->rect).area();
+
+ if (intersectionAreas[trIdx][apIdx] > 0 &&
+ (intersectionAreas[trIdx][apIdx] >
+ intersectionAreas[trIdx][bestIdx] || !haveRes)) {
+
+ bestIdx = apIdx;
+ haveRes = true;
}
}
- if (tracked) {
- ++iter;
- } else {
- if (iter->framesCount == 0) {
- trigger->__disappearedRects.push_back(iter->rect);
- iter = trigger->__trackedRects.erase(iter);
- } else {
- --(iter->framesCount);
- ++iter;
+ if (haveRes)
+ confidence[trIdx][bestIdx] += intersectionAreas[trIdx][bestIdx] *
+ sizeDifferenceFactor(trackRectIter->rect, appearedPersons[bestIdx]);
+ }
+
+ /* Merge appearance -> tracked */
+ for (size_t apIdx = 0u; apIdx < hogRectsSize; ++apIdx) {
+
+ trackRectIter = __trackedRects.begin();
+ size_t bestIdx = 0u;
+ bool haveRes = false;
+ cv::Rect bestTrackedRect = trackRectIter->rect;
+
+ for (size_t trIdx = 0u; trackRectIter != __trackedRects.end();
+ ++trackRectIter, ++trIdx) {
+
+ if (intersectionAreas[trIdx][apIdx] > 0 &&
+ (intersectionAreas[trIdx][apIdx] >
+ intersectionAreas[bestIdx][apIdx] || !haveRes)) {
+
+ bestIdx = trIdx;
+ bestTrackedRect = trackRectIter->rect;
+ haveRes = true;
}
}
+
+ if (haveRes)
+ confidence[bestIdx][apIdx] += intersectionAreas[bestIdx][apIdx] *
+ sizeDifferenceFactor(bestTrackedRect, appearedPersons[apIdx]);
}
- }
- trigger->__rectToDetectPrevious = trigger->__rectToDetect;
- ++trigger->__frameCounter;
+ /* Final merge */
+ trackRectIter = __trackedRects.begin();
+ for (size_t trIdx = 0u; trackRectIter != __trackedRects.end();
+ ++trackRectIter, ++trIdx) {
- /* 5. Update event result and run callbacks */
- trigger->runCallbacks(source);
+ bool haveRes = false;
+ size_t bestIdx = 0u;
- trigger->__disappearedRects.clear();
+ for (size_t apIdx = 0u; apIdx < hogRectsSize; ++apIdx) {
+ if (!appearedBusyRects[apIdx] && (!haveRes ||
+ confidence[trIdx][apIdx] > confidence[trIdx][bestIdx])) {
+
+ bestIdx = apIdx;
+ haveRes = true;
+ }
+ }
+
+ if (isPossibleToMerge(trackRectIter->rect, appearedPersons[bestIdx]) &&
+ haveRes) {
+
+ appearedBusyRects[bestIdx] = true;
+
+ struct timeval time;
+ gettimeofday(&time, NULL);
+
+ trackRectIter->appearanceTime = time;
+ trackRectIter->rect = appearedPersons[bestIdx];
+ trackRectIter->tracker.reinforcement(
+ cutRectForTracking(trackRectIter->rect));
+ }
+ }
+
+ for (size_t apIdx = 0u; apIdx < hogRectsSize; ++apIdx) {
+ if (!appearedBusyRects[apIdx]) {
+ trackRectIter = __trackedRects.begin();
+ for (;trackRectIter != __trackedRects.end();
+ ++trackRectIter) {
+ if (isPossibleToMerge(trackRectIter->rect,
+ appearedPersons[apIdx]) && (appearedPersons[apIdx].area() / 2.f) <
+ (appearedPersons[apIdx] & trackRectIter->rect).area()) {
+ appearedBusyRects[apIdx] = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return appearedBusyRects;
}
void EventTriggerPersonAppearance::runCallbacks(mv_source_h source)
@@ -435,16 +596,17 @@ void EventTriggerPersonAppearance::runCallbacks(mv_source_h source)
__eventResult->__trackedLocations[i].height /= __factorY;
}
- // Don't invoke the callback if no appearance, disappearance or tracking
- if (appearedLocationsSize > 0 || disappearedLocationsSize > 0
- || trackedLocationsSize > 0) {
- CallbackDataMapConstIter iter = __callbackDataMap.begin();
+ /* Don't invoke the callback if no appearance, disappearance or tracking */
+ CallbackDataMapConstIter iter = __callbackDataMap.begin();
- for (; iter != __callbackDataMap.end(); ++iter) {
- mv_surveillance_event_occurred_cb callback = iter->second.callback;
+ for (; iter != __callbackDataMap.end(); ++iter) {
+ mv_surveillance_event_occurred_cb callback = iter->second.callback;
+
+
+ if (appearedLocationsSize > 0 || disappearedLocationsSize > 0
+ || trackedLocationsSize > 0 || iter->second.isInternal)
callback(iter->second.eventTrigger, source, __videoStreamId,
__eventResult, iter->second.userData);
- }
}
}
diff --git a/mv_surveillance/surveillance/src/EventTriggerPersonRecognition.cpp b/mv_surveillance/surveillance/src/EventTriggerPersonRecognition.cpp
index 7fff0945..caab6775 100644
--- a/mv_surveillance/surveillance/src/EventTriggerPersonRecognition.cpp
+++ b/mv_surveillance/surveillance/src/EventTriggerPersonRecognition.cpp
@@ -97,13 +97,15 @@ EventTriggerPersonRecognition::EventTriggerPersonRecognition(
mv_surveillance_event_occurred_cb callback,
void *userData,
int numberOfPoints,
- mv_point_s *roi) : EventTrigger(eventTrigger,
+ mv_point_s *roi,
+ bool isInternal) : EventTrigger(eventTrigger,
triggerId,
videoStreamId,
callback,
userData,
numberOfPoints,
- roi),
+ roi,
+ isInternal),
__faceRecognitionModel(NULL),
__lastFrame(NULL),
__eventResult(new EventResultPersonRecognition())
@@ -344,7 +346,7 @@ void EventTriggerPersonRecognition::faceDetectedCB(
return;
}
- EventTriggerPersonRecognition *recognitionTrigger =
+ EventTriggerPersonRecognition *trigger =
(EventTriggerPersonRecognition*)user_data;
int location_idx = 0;
@@ -353,11 +355,11 @@ void EventTriggerPersonRecognition::faceDetectedCB(
const int error = mv_face_recognize(
source,
- recognitionTrigger->__faceRecognitionModel,
+ trigger->__faceRecognitionModel,
NULL,
&faces_locations[location_idx],
faceRecognizedCB,
- recognitionTrigger);
+ trigger);
if (error != MEDIA_VISION_ERROR_NONE) {
LOGW("Face recognition for one model failed. Continue");
@@ -366,6 +368,20 @@ void EventTriggerPersonRecognition::faceDetectedCB(
LOGI("Face has been successfully recognized");
}
+
+ CallbackDataMapConstIter iter = trigger->__callbackDataMap.begin();
+
+ for (; iter != trigger->__callbackDataMap.end(); ++iter) {
+ mv_surveillance_event_occurred_cb callback = iter->second.callback;
+
+ if (trigger->__eventResult->__locations.size() > 0 || iter->second.isInternal)
+ callback(
+ iter->second.eventTrigger,
+ trigger->__lastFrame,
+ trigger->__videoStreamId,
+ trigger->__eventResult,
+ iter->second.userData);
+ }
}
void EventTriggerPersonRecognition::faceRecognizedCB(
@@ -386,18 +402,6 @@ void EventTriggerPersonRecognition::faceRecognizedCB(
(EventTriggerPersonRecognition*) user_data;
trigger->setEventResults(*face_location, *face_label, confidence);
-
- CallbackDataMapConstIter iter = trigger->__callbackDataMap.begin();
-
- for (; iter != trigger->__callbackDataMap.end(); ++iter) {
- mv_surveillance_event_occurred_cb callback = iter->second.callback;
- callback(
- iter->second.eventTrigger,
- trigger->__lastFrame,
- trigger->__videoStreamId,
- trigger->__eventResult,
- iter->second.userData);
- }
}
} /* surveillance */
diff --git a/mv_surveillance/surveillance/src/MFTracker.cpp b/mv_surveillance/surveillance/src/MFTracker.cpp
new file mode 100644
index 00000000..b5e6da59
--- /dev/null
+++ b/mv_surveillance/surveillance/src/MFTracker.cpp
@@ -0,0 +1,371 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MFTracker.h"
+
+#include <opencv/cv.h>
+
+namespace mediavision {
+namespace surveillance {
+
+namespace {
+const float FLOATEPS = 10e-6f;
+
+template<typename T>
+T getMedian(std::vector<T>& values, int size = -1)
+{
+ if (size == -1)
+ size = (int)values.size();
+
+ std::vector<T> copy(values.begin(), values.begin() + size);
+ std::sort(copy.begin(), copy.end());
+ if (size%2 == 0) {
+ return (copy[size / 2 - 1] + copy[size/2]) / ((T)2.0);
+ } else {
+ return copy[(size - 1) / 2];
+ }
+}
+
+inline float l2distance(cv::Point2f p1, cv::Point2f p2)
+{
+ const float dx = p1.x - p2.x;
+ const float dy = p1.y - p2.y;
+ return sqrtf(dx * dx + dy * dy);
+}
+} /* anonymous namespace */
+
+MFTracker::Params::Params()
+{
+ mPointsInGrid = 10;
+ mWindowSize = cv::Size(3, 3);
+ mPyrMaxLevel = 5;
+}
+
+MFTracker::MFTracker(Params params) :
+ __isInit(false),
+ __params(params),
+ __termcrit(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 20, 0.3),
+ __confidence(0.0f)
+{
+}
+
+bool MFTracker::track(const cv::Mat& frame, cv::Rect_<float>& result)
+{
+ if (!__isInit) {
+ if (__boundingBox.width <= 0 || __boundingBox.height <= 0)
+ return false;
+ if (!init(frame))
+ return false;
+ } else {
+ if (!update(frame)) {
+ __isInit = false;
+ return false;
+ }
+ }
+
+ result = __boundingBox;
+
+ return true;
+}
+
+void MFTracker::reinforcement(const cv::Rect_<float>& location)
+{
+ __isInit = false;
+
+ __boundingBox = location;
+}
+
+bool MFTracker::init(const cv::Mat& image)
+{
+ if (image.empty())
+ return false;
+
+ image.copyTo(__image);
+ buildOpticalFlowPyramid(
+ __image,
+ __pyramid,
+ __params.mWindowSize,
+ __params.mPyrMaxLevel);
+
+ __isInit = true;
+ return __isInit;
+}
+
+bool MFTracker::update(const cv::Mat& image)
+{
+ if (!__isInit || image.empty())
+ return false;
+
+ /* Handles such behaviour when preparation frame has the size
+ * different to the tracking frame size. In such case, we resize preparation
+ * frame and bounding box. Then, track as usually: */
+ if (__image.rows != image.rows || __image.cols != image.cols) {
+ const float xFactor = static_cast<float>(image.cols) / __image.cols;
+ const float yFactor = static_cast<float>(image.rows) / __image.rows;
+
+ resize(__image, __image, cv::Size(), xFactor, yFactor);
+
+ __boundingBox.x *= xFactor;
+ __boundingBox.y *= yFactor;
+ __boundingBox.width *= xFactor;
+ __boundingBox.height *= yFactor;
+ }
+
+ cv::Mat oldImage = __image;
+
+ cv::Rect_<float> oldBox = __boundingBox;
+ if(!medianFlowImpl(oldImage, image, oldBox))
+ return false;
+
+ image.copyTo(__image);
+ __boundingBox = oldBox;
+ return true;
+}
+
+bool MFTracker::isInited() const
+{
+ return __isInit;
+}
+
+float MFTracker::getLastConfidence() const
+{
+ return __confidence;
+}
+
+cv::Rect_<float> MFTracker::getLastBoundingBox() const
+{
+ return __boundingBox;
+}
+
+bool MFTracker::medianFlowImpl(
+ cv::Mat oldImage_gray, cv::Mat newImage_gray, cv::Rect_<float>& oldBox)
+{
+ std::vector<cv::Point2f> pointsToTrackOld, pointsToTrackNew;
+
+ const float gridXStep = oldBox.width / __params.mPointsInGrid;
+ const float gridYStep = oldBox.height / __params.mPointsInGrid;
+ for (int i = 0; i < __params.mPointsInGrid; i++)
+ for (int j = 0; j < __params.mPointsInGrid; j++) {
+ pointsToTrackOld.push_back(
+ cv::Point2f(oldBox.x + .5f*gridXStep + 1.f*gridXStep*j,
+ oldBox.y + .5f*gridYStep + 1.f*gridYStep*i));
+ }
+
+ const size_t numberOfPointsToTrackOld = pointsToTrackOld.size();
+ std::vector<uchar> status(numberOfPointsToTrackOld);
+ std::vector<float> errors(numberOfPointsToTrackOld);
+
+ std::vector<cv::Mat> tempPyramid;
+ cv::buildOpticalFlowPyramid(
+ newImage_gray,
+ tempPyramid,
+ __params.mWindowSize,
+ __params.mPyrMaxLevel);
+
+ cv::calcOpticalFlowPyrLK(__pyramid,
+ tempPyramid,
+ pointsToTrackOld,
+ pointsToTrackNew,
+ status,
+ errors,
+ __params.mWindowSize,
+ __params.mPyrMaxLevel,
+ __termcrit);
+
+ std::vector<cv::Point2f> di;
+ for (size_t idx = 0u; idx < numberOfPointsToTrackOld; idx++)
+ if (status[idx] == 1)
+ di.push_back(pointsToTrackNew[idx] - pointsToTrackOld[idx]);
+
+ std::vector<bool> filter_status;
+ check_FB(tempPyramid,
+ pointsToTrackOld,
+ pointsToTrackNew,
+ filter_status);
+ check_NCC(oldImage_gray,
+ newImage_gray,
+ pointsToTrackOld,
+ pointsToTrackNew,
+ filter_status);
+
+ for (size_t idx = 0u; idx < pointsToTrackOld.size(); idx++)
+ if (!filter_status[idx]) {
+ pointsToTrackOld.erase(pointsToTrackOld.begin() + idx);
+ pointsToTrackNew.erase(pointsToTrackNew.begin() + idx);
+ filter_status.erase(filter_status.begin() + idx);
+ idx--;
+ }
+
+ if (pointsToTrackOld.empty() || di.empty())
+ return false;
+
+ cv::Point2f mDisplacement;
+ cv::Rect_<float> boxCandidate =
+ vote(pointsToTrackOld, pointsToTrackNew, oldBox, mDisplacement);
+
+ std::vector<float> displacements;
+ for (size_t idx = 0u; idx < di.size(); idx++) {
+ di[idx] -= mDisplacement;
+ displacements.push_back(sqrt(di[idx].ddot(di[idx])));
+ }
+
+ __confidence =
+ (10.f - getMedian(displacements, static_cast<int>(displacements.size()))) / 10.f;
+
+ if (__confidence < 0.f) {
+ __confidence = 0.f;
+ return false;
+ }
+
+ __pyramid.swap(tempPyramid);
+ oldBox = boxCandidate;
+ return true;
+}
+
+cv::Rect_<float> MFTracker::vote(
+ const std::vector<cv::Point2f>& oldPoints,
+ const std::vector<cv::Point2f>& newPoints,
+ const cv::Rect_<float>& oldRect,
+ cv::Point2f& mD)
+{
+ cv::Rect_<float> newRect;
+ cv::Point2f newCenter(
+ oldRect.x + oldRect.width / 2.f,
+ oldRect.y + oldRect.height / 2.f);
+
+ const int n = (int)oldPoints.size();
+ std::vector<float> buf(std::max(n*(n-1) / 2, 3), 0.f);
+
+ if(oldPoints.size() == 1) {
+ newRect.x = oldRect.x+newPoints[0].x-oldPoints[0].x;
+ newRect.y = oldRect.y+newPoints[0].y-oldPoints[0].y;
+ newRect.width = oldRect.width;
+ newRect.height = oldRect.height;
+
+ return newRect;
+ }
+
+ float xshift = 0.f;
+ float yshift = 0.f;
+ for (int i = 0; i < n; i++)
+ buf[i] = newPoints[i].x - oldPoints[i].x;
+
+ xshift = getMedian(buf, n);
+ newCenter.x += xshift;
+ for (int idx = 0; idx < n; idx++)
+ buf[idx] = newPoints[idx].y - oldPoints[idx].y;
+
+ yshift = getMedian(buf, n);
+ newCenter.y += yshift;
+ mD = cv::Point2f(xshift, yshift);
+
+ if (oldPoints.size() == 1) {
+ newRect.x = newCenter.x - oldRect.width / 2.f;
+ newRect.y = newCenter.y - oldRect.height / 2.f;
+ newRect.width = oldRect.width;
+ newRect.height = oldRect.height;
+
+ return newRect;
+ }
+
+ float nd = 0.f;
+ float od = 0.f;
+ for (int i = 0, ctr = 0; i < n; i++)
+ for (int j = 0; j < i; j++) {
+ nd = l2distance(newPoints[i], newPoints[j]);
+ od = l2distance(oldPoints[i], oldPoints[j]);
+ buf[ctr] = (od == 0.f ? 0.f : nd / od);
+ ctr++;
+ }
+
+ float scale = getMedian(buf, n*(n-1) / 2);
+ newRect.x = newCenter.x - scale * oldRect.width / 2.f;
+ newRect.y = newCenter.y-scale * oldRect.height / 2.f;
+ newRect.width = scale * oldRect.width;
+ newRect.height = scale * oldRect.height;
+
+ return newRect;
+}
+
+void MFTracker::check_FB(
+ std::vector<cv::Mat> newPyramid,
+ const std::vector<cv::Point2f>& oldPoints,
+ const std::vector<cv::Point2f>& newPoints,
+ std::vector<bool>& status)
+{
+ const size_t numberOfOldPoints = oldPoints.size();
+
+ if (status.empty())
+ status = std::vector<bool>(numberOfOldPoints, true);
+
+ std::vector<uchar> LKstatus(numberOfOldPoints);
+ std::vector<float> errors(numberOfOldPoints);
+ std::vector<float> FBerror(numberOfOldPoints);
+ std::vector<cv::Point2f> pointsToTrackReprojection;
+
+ calcOpticalFlowPyrLK(newPyramid,
+ __pyramid,
+ newPoints,
+ pointsToTrackReprojection,
+ LKstatus,
+ errors,
+ __params.mWindowSize,
+ __params.mPyrMaxLevel,
+ __termcrit);
+
+ for (size_t idx = 0u; idx < numberOfOldPoints; idx++)
+ FBerror[idx] = l2distance(oldPoints[idx], pointsToTrackReprojection[idx]);
+
+ float FBerrorMedian = getMedian(FBerror) + FLOATEPS;
+ for (size_t idx = 0u; idx < numberOfOldPoints; idx++)
+ status[idx] = (FBerror[idx] < FBerrorMedian);
+}
+
+void MFTracker::check_NCC(
+ const cv::Mat& oldImage,
+ const cv::Mat& newImage,
+ const std::vector<cv::Point2f>& oldPoints,
+ const std::vector<cv::Point2f>& newPoints,
+ std::vector<bool>& status)
+{
+ std::vector<float> NCC(oldPoints.size(), 0.f);
+ cv::Size patch(30, 30);
+ cv::Mat p1;
+ cv::Mat p2;
+
+ for (size_t idx = 0u; idx < oldPoints.size(); idx++) {
+ getRectSubPix(oldImage, patch, oldPoints[idx], p1);
+ getRectSubPix(newImage, patch, newPoints[idx], p2);
+
+ const int N = 900;
+ const float s1 = sum(p1)(0);
+ const float s2 = sum(p2)(0);
+ const float n1 = norm(p1);
+ const float n2 = norm(p2);
+ const float prod = p1.dot(p2);
+ const float sq1 = sqrt(n1 * n1 - s1 * s1 / N);
+ const float sq2 = sqrt(n2 * n2 - s2 * s2 / N);
+ NCC[idx] = (sq2 == 0 ? sq1 / std::abs(sq1)
+ : (prod - s1 * s2 / N) / sq1 / sq2);
+ }
+
+ float median = getMedian(NCC) - FLOATEPS;
+ for(size_t idx = 0u; idx < oldPoints.size(); idx++)
+ status[idx] = status[idx] && (NCC[idx] > median);
+}
+
+} /* surveillance */
+} /* mediavision */
diff --git a/mv_surveillance/surveillance/src/mv_surveillance_open.cpp b/mv_surveillance/surveillance/src/mv_surveillance_open.cpp
index 4d4c3281..0f303e49 100644
--- a/mv_surveillance/surveillance/src/mv_surveillance_open.cpp
+++ b/mv_surveillance/surveillance/src/mv_surveillance_open.cpp
@@ -42,7 +42,8 @@ int mv_surveillance_subscribe_event_trigger_open(
callback,
user_data,
handle->number_of_roi_points,
- handle->roi);
+ handle->roi,
+ false);
}
int mv_surveillance_unsubscribe_event_trigger_open(
diff --git a/mv_surveillance/surveillance_lic/include/mv_surveillance_lic.h b/mv_surveillance/surveillance_lic/include/mv_surveillance_lic.h
index 734a5cb1..4ad91599 100644
--- a/mv_surveillance/surveillance_lic/include/mv_surveillance_lic.h
+++ b/mv_surveillance/surveillance_lic/include/mv_surveillance_lic.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __TIZEN_MEDIAVISION_SURVEILLANCE_LIC_H__
-#define __TIZEN_MEDIAVISION_SURVEILLANCE_LIC_H__
+#ifndef __MEDIA_VISION_SURVEILLANCE_LIC_H__
+#define __MEDIA_VISION_SURVEILLANCE_LIC_H__
#include <mv_surveillance.h>
@@ -38,6 +38,8 @@ extern "C" {
* @ref mv_surveillance_unsubscribe_event_trigger_lic().
* @param [in] event_trigger The event trigger activating calls of the
* @a callback function
+ * @param [in] video_stream_id The identifier of the video stream for which
+ * event trigger activation will be checked
* @param [in] engine_cfg The engine configuration of the event
* @param [in] callback Callback to be called each time when event
* occurrence is detected
@@ -53,10 +55,10 @@ extern "C" {
* @see mv_surveillance_unsubscribe_event_trigger_lic()
*/
int mv_surveillance_subscribe_event_trigger_lic(
- mv_surveillance_event_trigger_h event_trigger,
- mv_engine_config_h engine_cfg,
- mv_surveillance_event_occurred_cb callback,
- void *user_data);
+ mv_surveillance_event_trigger_h event_trigger,
+ int video_stream_id, mv_engine_config_h engine_cfg,
+ mv_surveillance_event_occurred_cb callback,
+ void *user_data);
/**
* @brief Allows to unsubscribe from the event and stop calling @a callback.
@@ -64,8 +66,10 @@ int mv_surveillance_subscribe_event_trigger_lic(
* @since_tizen 3.0
* @remarks To start handling trigger activation use
@ref mv_surveillance_subscribe_event_trigger_lic().
- * @param [in] event_trigger The event trigger for which subscription will be
- * stopped
+ * @param [in] event_trigger The event trigger for which subscription will
+ * be stopped
+ * @param [in] video_stream_id The identifier of the video source for which
+ * subscription will be stopped
* @return @c 0 on success, otherwise a negative error value
* @retval #MEDIA_VISION_ERROR_NONE Successful
* @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
@@ -77,7 +81,8 @@ int mv_surveillance_subscribe_event_trigger_lic(
* @see mv_surveillance_subscribe_event_trigger_lic()
*/
int mv_surveillance_unsubscribe_event_trigger_lic(
- mv_surveillance_event_trigger_h event_trigger);
+ mv_surveillance_event_trigger_h event_trigger,
+ int video_stream_id);
/**
* @brief Allows to push source to the event trigger and start calling @a callback.
@@ -96,8 +101,8 @@ int mv_surveillance_unsubscribe_event_trigger_lic(
* @see mv_surveillance_unsubscribe_event_trigger_lic()
*/
int mv_surveillance_push_source_lic(
- mv_source_h source,
- int video_stream_id);
+ mv_source_h source,
+ int video_stream_id);
/**
* @brief Starts traversing through list of supported event types.
@@ -119,8 +124,8 @@ int mv_surveillance_push_source_lic(
* @see mv_surveillance_foreach_event_result_value_name_lic()
*/
int mv_surveillance_foreach_event_type_lic(
- mv_surveillance_event_type_cb callback,
- void *user_data);
+ mv_surveillance_event_type_cb callback,
+ void *user_data);
/**
* @brief Starts traversing through list of supported event result value names.
@@ -148,9 +153,9 @@ int mv_surveillance_foreach_event_type_lic(
* @see mv_surveillance_get_result_value_lic()
*/
int mv_surveillance_foreach_event_result_value_name_lic(
- const char *event_type,
- mv_surveillance_event_result_value_name_cb callback,
- void *user_data);
+ const char *event_type,
+ mv_surveillance_event_result_value_name_cb callback,
+ void *user_data);
/**
* @brief Gets result value.
@@ -176,12 +181,12 @@ int mv_surveillance_foreach_event_result_value_name_lic(
* @see mv_surveillance_query_events_lic()
*/
int mv_surveillance_get_result_value_lic(
- mv_surveillance_result_h result,
- const char *value_name,
- void *value);
+ mv_surveillance_result_h result,
+ const char *value_name,
+ void *value);
#ifdef __cplusplus
}
#endif /* __cplusplus */
-#endif /* __TIZEN_MEDIAVISION_SURVEILLANCE_LIC_H__ */
+#endif /* __MEDIA_VISION_SURVEILLANCE_LIC_H__ */
diff --git a/mv_surveillance/surveillance_lic/src/mv_surveillance_lic.c b/mv_surveillance/surveillance_lic/src/mv_surveillance_lic.c
index 299a87d5..0a9c126c 100644
--- a/mv_surveillance/surveillance_lic/src/mv_surveillance_lic.c
+++ b/mv_surveillance/surveillance_lic/src/mv_surveillance_lic.c
@@ -18,6 +18,7 @@
int mv_surveillance_subscribe_event_trigger_lic(
mv_surveillance_event_trigger_h event_trigger,
+ int video_stream_id,
mv_engine_config_h engine_cfg,
mv_surveillance_event_occurred_cb callback,
void *user_data)
@@ -26,7 +27,8 @@ int mv_surveillance_subscribe_event_trigger_lic(
}
int mv_surveillance_unsubscribe_event_trigger_lic(
- mv_surveillance_event_trigger_h event_trigger)
+ mv_surveillance_event_trigger_h event_trigger,
+ int video_stream_id)
{
return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
@@ -47,7 +49,7 @@ int mv_surveillance_foreach_event_type_lic(
int mv_surveillance_foreach_event_result_value_name_lic(
const char *event_type,
- mv_surveillance_event_result_value_name_cb callback,
+ mv_surveillance_event_result_name_cb callback,
void *user_data)
{
return MEDIA_VISION_ERROR_NOT_SUPPORTED;
diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec
index ea2be680..3f8bf781 100644
--- a/packaging/capi-media-vision.spec
+++ b/packaging/capi-media-vision.spec
@@ -1,7 +1,7 @@
Name: capi-media-vision
Summary: Media Vision library for Tizen Native API
-Version: 0.3.20
-Release: 4
+Version: 0.3.21
+Release: 1
Group: Multimedia/Framework
License: Apache-2.0 and BSD-2.0
Source0: %{name}-%{version}.tar.gz
diff --git a/src/mv_barcode.c b/src/mv_barcode.c
index 74ebee96..460c6243 100644
--- a/src/mv_barcode.c
+++ b/src/mv_barcode.c
@@ -181,8 +181,8 @@ int mv_barcode_generate_image(
MEDIA_VISION_FUNCTION_ENTER();
if (image_path == NULL) {
- LOGE("image_path is NULL\n");
- return MEDIA_VISION_ERROR_INVALID_PATH;
+ LOGE("image_path is NULL");
+ return MEDIA_VISION_ERROR_INVALID_PATH;
}
if (type < MV_BARCODE_QR ||
diff --git a/src/mv_common.c b/src/mv_common.c
index 24e0c43b..8ba3b834 100644
--- a/src/mv_common.c
+++ b/src/mv_common.c
@@ -330,8 +330,8 @@ int mv_engine_config_foreach_supported_attribute(
MEDIA_VISION_NULL_ARG_CHECK(callback);
MEDIA_VISION_FUNCTION_ENTER();
- int ret =
- mv_engine_config_foreach_supported_attribute_c(callback, user_data);
+ int ret = mv_engine_config_foreach_supported_attribute_c(
+ callback, user_data);
MEDIA_VISION_FUNCTION_LEAVE();
return ret;
diff --git a/src/mv_face.c b/src/mv_face.c
index 3bbb0f85..ffde256e 100644
--- a/src/mv_face.c
+++ b/src/mv_face.c
@@ -473,7 +473,7 @@ int mv_face_recognition_model_add(
face_label);
#else
-ret = mv_face_recognition_model_add_open(
+ ret = mv_face_recognition_model_add_open(
source,
recognition_model,
example_location,
@@ -492,7 +492,7 @@ int mv_face_recognition_model_reset(
MEDIA_VISION_SUPPORT_CHECK(__mv_face_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(recognition_model);
- MEDIA_VISION_FUNCTION_ENTER();
+ MEDIA_VISION_FUNCTION_ENTER();
int ret = MEDIA_VISION_ERROR_NONE;
@@ -504,7 +504,7 @@ int mv_face_recognition_model_reset(
#else
-ret = mv_face_recognition_model_reset_open(
+ ret = mv_face_recognition_model_reset_open(
recognition_model,
face_label);
@@ -563,8 +563,8 @@ int mv_face_recognition_model_query_labels(
#endif /* MEDIA_VISION_FACE_LICENSE_PORT */
-MEDIA_VISION_FUNCTION_LEAVE();
-return ret;
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return ret;
}
int mv_face_tracking_model_create(
@@ -684,7 +684,7 @@ int mv_face_tracking_model_save(
if (file_name == NULL) {
LOGE("File name is NULL. The file name has to be specified");
- return MEDIA_VISION_ERROR_INVALID_PATH;
+ return MEDIA_VISION_ERROR_INVALID_PATH;
}
MEDIA_VISION_FUNCTION_ENTER();
@@ -693,15 +693,15 @@ int mv_face_tracking_model_save(
#ifdef MEDIA_VISION_FACE_LICENSE_PORT
-ret = mv_face_tracking_model_save_lic(
- file_name,
- tracking_model);
+ ret = mv_face_tracking_model_save_lic(
+ file_name,
+ tracking_model);
#else
ret = mv_face_tracking_model_save_open(
- file_name,
- tracking_model);
+ file_name,
+ tracking_model);
#endif /* MEDIA_VISION_FACE_LICENSE_PORT */
@@ -728,14 +728,14 @@ int mv_face_tracking_model_load(
#ifdef MEDIA_VISION_FACE_LICENSE_PORT
ret = mv_face_tracking_model_load_lic(
- file_name,
- tracking_model);
+ file_name,
+ tracking_model);
#else
-ret = mv_face_tracking_model_load_open(
- file_name,
- tracking_model);
+ ret = mv_face_tracking_model_load_open(
+ file_name,
+ tracking_model);
#endif /* MEDIA_VISION_FACE_LICENSE_PORT */
diff --git a/src/mv_image.c b/src/mv_image.c
index f6a4000c..3b9a1c57 100644
--- a/src/mv_image.c
+++ b/src/mv_image.c
@@ -56,13 +56,13 @@ int mv_image_recognize(
#ifdef MEDIA_VISION_IMAGE_LICENSE_PORT
- /* Use licensed image functionality here. */
+ /* Use licensed image functionality here. */
int ret = mv_image_recognize_lic(source, image_objects,
number_of_objects, engine_cfg, recognized_cb, user_data);
#else
-/* Use open image functionality here. */
+ /* Use open image functionality here. */
int ret = mv_image_recognize_open(source, image_objects,
number_of_objects, engine_cfg, recognized_cb, user_data);
@@ -89,7 +89,7 @@ int mv_image_track(
#ifdef MEDIA_VISION_IMAGE_LICENSE_PORT
/* Use licensed image functionality here. */
- int ret = mv_image_track_lic(source, image_tracking_model, engine_cfg, tracked_cb, user_data);
+ int ret = mv_image_track_lic(source, image_tracking_model, engine_cfg, tracked_cb, user_data);
#else
@@ -204,10 +204,10 @@ int mv_image_object_get_recognition_rate(
int mv_image_object_set_label(
mv_image_object_h image_object,
- int label)
+ int label)
{
MEDIA_VISION_SUPPORT_CHECK(__mv_image_check_system_info_feature_supported());
- MEDIA_VISION_INSTANCE_CHECK(image_object);
+ MEDIA_VISION_INSTANCE_CHECK(image_object);
MEDIA_VISION_FUNCTION_ENTER();
diff --git a/src/mv_private.c b/src/mv_private.c
index b46d5e72..796f8f91 100644
--- a/src/mv_private.c
+++ b/src/mv_private.c
@@ -25,28 +25,36 @@ bool __mv_check_system_info_feature_supported()
bool isFaceRecognitionSupported = false;
bool isImageRecognitionSupported = false;
- const int nRetVal1 = system_info_get_platform_bool("http://tizen.org/feature/vision.barcode_detection", &isBarcodeDetectionSupported);
+ const int nRetVal1 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.barcode_detection",
+ &isBarcodeDetectionSupported);
if (nRetVal1 != SYSTEM_INFO_ERROR_NONE) {
LOGE("SYSTEM_INFO_ERROR: vision.barcode_detection");
return false;
}
- const int nRetVal2 = system_info_get_platform_bool("http://tizen.org/feature/vision.barcode_generation", &isBarcodeGenerationSupported);
+ const int nRetVal2 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.barcode_generation",
+ &isBarcodeGenerationSupported);
if (nRetVal2 != SYSTEM_INFO_ERROR_NONE) {
LOGE("SYSTEM_INFO_ERROR: vision.barcode_generation");
return false;
}
- const int nRetVal3 = system_info_get_platform_bool("http://tizen.org/feature/vision.face_recognition", &isFaceRecognitionSupported);
+ const int nRetVal3 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.face_recognition",
+ &isFaceRecognitionSupported);
if (nRetVal3 != SYSTEM_INFO_ERROR_NONE) {
LOGE("SYSTEM_INFO_ERROR: vision.face_recognition");
return false;
}
- const int nRetVal4 = system_info_get_platform_bool("http://tizen.org/feature/vision.image_recognition", &isImageRecognitionSupported);
+ const int nRetVal4 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.image_recognition",
+ &isImageRecognitionSupported);
if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) {
LOGE("SYSTEM_INFO_ERROR: vision.image_recognition");
@@ -72,10 +80,12 @@ bool __mv_barcode_detect_check_system_info_feature_supported()
{
bool isBarcodeDetectionSupported = false;
- const int nRetVal = system_info_get_platform_bool("http://tizen.org/feature/vision.barcode_detection", &isBarcodeDetectionSupported);
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.barcode_detection",
+ &isBarcodeDetectionSupported);
if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
- LOGE("SYSTEM_INFO_ERROR: vision.barcode_detectio");
+ LOGE("SYSTEM_INFO_ERROR: vision.barcode_detection");
return false;
}
@@ -92,7 +102,9 @@ bool __mv_barcode_generate_check_system_info_feature_supported()
{
bool isBarcodeGenerationSupported = false;
- const int nRetVal = system_info_get_platform_bool("http://tizen.org/feature/vision.barcode_generation", &isBarcodeGenerationSupported);
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.barcode_generation",
+ &isBarcodeGenerationSupported);
if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
LOGE("SYSTEM_INFO_ERROR: vision.barcode_generation");
@@ -112,7 +124,9 @@ bool __mv_face_check_system_info_feature_supported()
{
bool isFaceRecognitionSupported = false;
- const int nRetVal = system_info_get_platform_bool("http://tizen.org/feature/vision.face_recognition", &isFaceRecognitionSupported);
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.face_recognition",
+ &isFaceRecognitionSupported);
if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
LOGE("SYSTEM_INFO_ERROR: vision.face_recognition");
@@ -121,9 +135,9 @@ bool __mv_face_check_system_info_feature_supported()
isFaceRecognitionSupported ?
LOGI("system_info_get_platform_bool returned "
- "Supported face recognition feature capability\n") :
+ "Supported face recognition feature capability\n") :
LOGE("system_info_get_platform_bool returned "
- "Unsupported face recognition feature capability\n");
+ "Unsupported face recognition feature capability\n");
return isFaceRecognitionSupported;
}
@@ -132,7 +146,9 @@ bool __mv_image_check_system_info_feature_supported()
{
bool isImageRecognitionSupported = false;
- const int nRetVal = system_info_get_platform_bool("http://tizen.org/feature/vision.image_recognition", &isImageRecognitionSupported);
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.image_recognition",
+ &isImageRecognitionSupported);
if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
LOGE("SYSTEM_INFO_ERROR: vision.image_recognition");
@@ -141,9 +157,9 @@ bool __mv_image_check_system_info_feature_supported()
isImageRecognitionSupported ?
LOGI("system_info_get_platform_bool returned "
- "Supported image recognition feature capability\n") :
+ "Supported image recognition feature capability\n") :
LOGE("system_info_get_platform_bool returned "
- "Unsupported image recognition feature capability\n");
+ "Unsupported image recognition feature capability\n");
return isImageRecognitionSupported;
}
diff --git a/test/testsuites/barcode/barcode_test_suite.c b/test/testsuites/barcode/barcode_test_suite.c
index 9dacb103..d6ef9de3 100644
--- a/test/testsuites/barcode/barcode_test_suite.c
+++ b/test/testsuites/barcode/barcode_test_suite.c
@@ -370,25 +370,23 @@ void barcode_detected_cb(
const int drawing_color[] = {255, 0, 0};
if (MEDIA_VISION_ERROR_NONE != draw_rectangle_on_buffer(
- minX,
- minY,
- maxX,
- maxY,
- 6,
- drawing_color,
- &image_data,
- draw_buffer)) {
+ minX,
+ minY,
+ maxX,
+ maxY,
+ 6,
+ drawing_color,
+ &image_data,
+ draw_buffer))
continue;
- }
}
}
if (file_name != NULL &&
- MEDIA_VISION_ERROR_NONE == save_image_from_buffer(file_name, draw_buffer, &image_data, 100)) {
+ MEDIA_VISION_ERROR_NONE == save_image_from_buffer(file_name, draw_buffer, &image_data, 100))
printf("Image was generated as %s\n", file_name);
- } else {
+ else
printf("ERROR: Failed to generate output file. Check file name and permissions. \n");
- }
printf("\n");
}
@@ -401,7 +399,7 @@ int generate_barcode_to_image(barcode_model_s model)
MEDIA_VISION_FUNCTION_ENTER();
if (model.message == NULL ||
- model.file_name == NULL) {
+ model.file_name == NULL) {
MEDIA_VISION_FUNCTION_LEAVE();
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -630,14 +628,18 @@ int generate_barcode_to_source(barcode_model_s model)
jpeg_file_name[strlen(model.file_name) + 4] = '\0';
}
- save_image_from_buffer(jpeg_file_name, data_buffer, &image_data, 100);
+ err = save_image_from_buffer(jpeg_file_name, data_buffer, &image_data, 100);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: Error occurred when try to save image from buffer."
+ "Error code: %i\n", err);
+ }
free(jpeg_file_name);
const int err2 = mv_destroy_source(source);
if (MEDIA_VISION_ERROR_NONE != err2) {
printf("ERROR: Error occurred when try to destroy Media Vision source."
- "Error code: %i\n", err2);
+ "Error code: %i\n", err2);
}
const int err3 = mv_destroy_engine_config(mv_engine_config);
@@ -689,7 +691,14 @@ int detect_barcode(barcode_model_s model, mv_rectangle_s roi)
mv_engine_config_foreach_supported_attribute(_mv_engine_config_supported_attribute, mv_engine_config);
- mv_engine_config_set_int_attribute(mv_engine_config, MV_BARCODE_DETECT_ATTR_TARGET, MV_BARCODE_DETECT_ATTR_TARGET_2D_BARCODE);
+ err = mv_engine_config_set_int_attribute(
+ mv_engine_config,
+ MV_BARCODE_DETECT_ATTR_TARGET,
+ MV_BARCODE_DETECT_ATTR_TARGET_2D_BARCODE);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: Errors were occurred during target attribute"
+ "configuration: %i\n", err);
+ }
mv_source_h source;
err = mv_create_source(&source);
@@ -1139,9 +1148,9 @@ int perform_detect()
while (true) {
int sel_opt = show_menu("Select colorspace to test detector on:", options, names, 11);
if (sel_opt < MEDIA_VISION_COLORSPACE_Y800 ||
- sel_opt > MEDIA_VISION_COLORSPACE_RGBA) {
+ sel_opt > MEDIA_VISION_COLORSPACE_RGBA)
continue;
- }
+
detect_model.colorspace = (mv_colorspace_e)sel_opt;
LOGI("User selection is %i", sel_opt);
break;
diff --git a/test/testsuites/common/image_helper/include/ImageHelper.h b/test/testsuites/common/image_helper/include/ImageHelper.h
index 673f4adf..7cd7e888 100644
--- a/test/testsuites/common/image_helper/include/ImageHelper.h
+++ b/test/testsuites/common/image_helper/include/ImageHelper.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __IMAGEHELPER_H__
-#define __IMAGEHELPER_H__
+#ifndef __MEDIA_VISION_IMAGEHELPER_H__
+#define __MEDIA_VISION_IMAGEHELPER_H__
#include "mv_common.h"
@@ -27,201 +27,196 @@
* @brief ImageHelper class definition.
*/
-namespace cv
-{
- template<typename _Tp> class Scalar_;
- typedef Scalar_<double> Scalar;
+namespace cv {
+ template<typename _Tp> class Scalar_;
+ typedef Scalar_<double> Scalar;
- class VideoCapture;
- class VideoWriter;
+ class VideoCapture;
+ class VideoWriter;
}
-namespace MediaVision
-{
-namespace Common
-{
+namespace MediaVision {
+namespace Common {
/**
* @class ImageHelper
* @brief Helper class that provides set of useful methods
* for image management.
*/
-class ImageHelper
-{
+class ImageHelper {
public:
- /**
- * @brief Structure to keep information about width, height and colorspace of an image.
- */
- struct ImageData
- {
- unsigned int imageWidth; /**< Image width */
- unsigned int imageHeight; /**< Image height */
- mv_colorspace_e imageColorspace; /**< Image colorspace */
- };
-
- /**
- * @brief Loads image from file to the buffer of unsigned chars.
- *
- * @since_tizen 3.0
- * @param [in] filePath Path to the image file to be loaded to the
- * @a pDataBuffer
- * @param [out] pDataBuffer The buffer of unsigned chars where image data
- * will be stored
- * @param [out] pBufferSize The size of the @a pDataBuffer
- * @param [out] pImageData The image data (structure that keeps
- * information about image width, height,
- * and colorspace)
- * @return @c 0 on success, otherwise a negative error value
- *
- * @see ImageHelper::saveImageFromBuffer()
- * @see ImageHelper::destroyLoadedBuffer()
- */
- static int loadImageToBuffer(
- const char *filePath,
- unsigned char **pDataBuffer,
- unsigned long *pBufferSize,
- ImageData *pImageData);
-
- /**
- * @brief Saves image stored into @a pDataBuffer to the file in jpeg format.
- *
- * @since_tizen 3.0
- * @param [in] filePath The path to the file where image will be saved
- * @param [in] pDataBuffer Data buffer that contains image data
- * @param [in] imageData The image data (structure that keeps
- * information about image width, height,
- * and colorspace)
- * @param [in] quality Quality for the output jpeg file (0..100)
- * @return @c 0 on success, otherwise a negative error value
- *
- * @see ImageHelper::loadImageToBuffer()
- */
- static int saveImageFromBuffer(
- const char *filePath,
- unsigned char *pDataBuffer,
- const ImageData& imageData,
- int quality = 100);
-
- /**
- * @brief Destroys loaded buffer by loadImageToBuffer().
- *
- * @since_tizen 3.0
- * @param [out] pDataBuffer The buffer of unsigned chars where image data
- * will be stored
- * @return @c 0 on success, otherwise a negative error value
- *
- * @see ImageHelper::loadImageToBuffer()
- */
- static int destroyLoadedBuffer(
- unsigned char *pDataBuffer);
-
- /**
- * @brief Draws the rectangle of specified size on the image data buffer.
- *
- * @since_tizen 3.0
- * @param [in] topLeftVertexX The rectangle top left corner
- * x coordinate
- * @param [in] topLeftVertexY The rectangle top left corner
- * y coordinate
- * @param [in] bottomRightVertexX The rectangle bottom right corner
- * x coordinate
- * @param [in] bottomRightVertexY The rectangle bottom right corner
- * y coordinate
- * @param [in] thickness The thickness of the rectangle border
- * (negative value to draw filled rectangle)
- * @param [in] color The color of the rectangle border
- * @param [in] imageData The image data (structure that keeps
- * information about image width, height,
- * and colorspace). Colorspace has to be
- * @c MEDIA_VISION_COLORSPACE_RGB888
- * @param [in, out] pDataBuffer The pointer to the image data buffer
- * which will be used for rectangle drawing
- * @return @c 0 on success, otherwise a negative error value
- */
- static int drawRectangleOnBuffer(
- int topLeftVertexX,
- int topLeftVertexY,
- int bottomRightVertexX,
- int bottomRightVertexY,
- int thickness,
- const cv::Scalar& color,
- const ImageData& imageData,
- unsigned char *pDataBuffer);
-
- /**
- * @brief Draws the quadrangle of specified size on the image data buffer.
- *
- * @since_tizen 3.0
- * @param [in] location The quadrangle structure
- * @param [in] thickness The thickness of the quadrangle border
- * @param [in] color The color of the quadrangle border
- * @param [in] imageData The image data (structure that keeps
- * information about image width, height,
- * and colorspace). Colorspace has to be
- * @c MEDIA_VISION_COLORSPACE_RGB888
- * @param [in, out] pDataBuffer The pointer to the image data buffer
- * which will be used for quadrangle drawing
- * @return @c 0 on success, otherwise a negative error value
- */
- static int drawQuadrangleOnBuffer(
- mv_quadrangle_s location,
- int thickness,
- const cv::Scalar& color,
- const ImageData& imageData,
- unsigned char *pDataBuffer);
-
- /**
- * @brief Converts image data to the image data of RGB888 colorspace.
- *
- * @since_tizen 3.0
- * @param [in] pInBuffer Buffer with image data to be converted to
- * RGB888 colorspace
- * @param [in] imageData The image data (structure that keeps
- * information about image width, height,
- * and colorspace) for source image
- * @param [out] pOutBuffer Buffer with image data to be generated as
- * a result of the conversion
- * @return @c 0 on success, otherwise a negative error value
- */
- static int convertBufferToRGB888(
- const unsigned char *pInBuffer,
- const ImageData& imageData,
- unsigned char **pOutBuffer);
-
- /**
- * @brief Determines number of channels (components) for the colorspace.
- *
- * @since_tizen 3.0
- * @param [in] colorspace Colorspace for which number of
- * components will be determined
- * @param [out] pComponentsNumber Number of components to be determined
- * @return @c 0 on success, otherwise a negative error value
- */
- static int getNumberOfComponents(
- mv_colorspace_e colorspace,
- int *pComponentsNumber);
+ /**
+ * @brief Structure to keep information about width, height and colorspace of an image.
+ */
+ struct ImageData {
+ unsigned int imageWidth; /**< Image width */
+ unsigned int imageHeight; /**< Image height */
+ mv_colorspace_e imageColorspace; /**< Image colorspace */
+ };
+
+ /**
+ * @brief Loads image from file to the buffer of unsigned chars.
+ *
+ * @since_tizen 3.0
+ * @param [in] filePath Path to the image file to be loaded to the
+ * @a pDataBuffer
+ * @param [out] pDataBuffer The buffer of unsigned chars where image data
+ * will be stored
+ * @param [out] pBufferSize The size of the @a pDataBuffer
+ * @param [out] pImageData The image data (structure that keeps
+ * information about image width, height,
+ * and colorspace)
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see ImageHelper::saveImageFromBuffer()
+ * @see ImageHelper::destroyLoadedBuffer()
+ */
+ static int loadImageToBuffer(
+ const char *filePath,
+ unsigned char **pDataBuffer,
+ unsigned long *pBufferSize,
+ ImageData *pImageData);
+
+ /**
+ * @brief Saves image stored into @a pDataBuffer to the file in jpeg format.
+ *
+ * @since_tizen 3.0
+ * @param [in] filePath The path to the file where image will be saved
+ * @param [in] pDataBuffer Data buffer that contains image data
+ * @param [in] imageData The image data (structure that keeps
+ * information about image width, height,
+ * and colorspace)
+ * @param [in] quality Quality for the output jpeg file (0..100)
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see ImageHelper::loadImageToBuffer()
+ */
+ static int saveImageFromBuffer(
+ const char *filePath,
+ unsigned char *pDataBuffer,
+ const ImageData& imageData,
+ int quality = 100);
+
+ /**
+ * @brief Destroys loaded buffer by loadImageToBuffer().
+ *
+ * @since_tizen 3.0
+ * @param [out] pDataBuffer The buffer of unsigned chars where image data
+ * will be stored
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see ImageHelper::loadImageToBuffer()
+ */
+ static int destroyLoadedBuffer(unsigned char *pDataBuffer);
+
+ /**
+ * @brief Draws the rectangle of specified size on the image data buffer.
+ *
+ * @since_tizen 3.0
+ * @param [in] topLeftVertexX The rectangle top left corner
+ * x coordinate
+ * @param [in] topLeftVertexY The rectangle top left corner
+ * y coordinate
+ * @param [in] bottomRightVertexX The rectangle bottom right corner
+ * x coordinate
+ * @param [in] bottomRightVertexY The rectangle bottom right corner
+ * y coordinate
+ * @param [in] thickness The thickness of the rectangle border
+ * (negative value to draw filled rectangle)
+ * @param [in] color The color of the rectangle border
+ * @param [in] imageData The image data (structure that keeps
+ * information about image width, height,
+ * and colorspace). Colorspace has to be
+ * @c MEDIA_VISION_COLORSPACE_RGB888
+ * @param [in, out] pDataBuffer The pointer to the image data buffer
+ * which will be used for rectangle drawing
+ * @return @c 0 on success, otherwise a negative error value
+ */
+ static int drawRectangleOnBuffer(
+ int topLeftVertexX,
+ int topLeftVertexY,
+ int bottomRightVertexX,
+ int bottomRightVertexY,
+ int thickness,
+ const cv::Scalar& color,
+ const ImageData& imageData,
+ unsigned char *pDataBuffer);
+
+ /**
+ * @brief Draws the quadrangle of specified size on the image data buffer.
+ *
+ * @since_tizen 3.0
+ * @param [in] location The quadrangle structure
+ * @param [in] thickness The thickness of the quadrangle border
+ * @param [in] color The color of the quadrangle border
+ * @param [in] imageData The image data (structure that keeps
+ * information about image width, height,
+ * and colorspace). Colorspace has to be
+ * @c MEDIA_VISION_COLORSPACE_RGB888
+ * @param [in, out] pDataBuffer The pointer to the image data buffer
+ * which will be used for quadrangle drawing
+ * @return @c 0 on success, otherwise a negative error value
+ */
+ static int drawQuadrangleOnBuffer(
+ mv_quadrangle_s location,
+ int thickness,
+ const cv::Scalar& color,
+ const ImageData& imageData,
+ unsigned char *pDataBuffer);
+
+ /**
+ * @brief Converts image data to the image data of RGB888 colorspace.
+ *
+ * @since_tizen 3.0
+ * @param [in] pInBuffer Buffer with image data to be converted to
+ * RGB888 colorspace
+ * @param [in] imageData The image data (structure that keeps
+ * information about image width, height,
+ * and colorspace) for source image
+ * @param [out] pOutBuffer Buffer with image data to be generated as
+ * a result of the conversion
+ * @return @c 0 on success, otherwise a negative error value
+ */
+ static int convertBufferToRGB888(
+ const unsigned char *pInBuffer,
+ const ImageData& imageData,
+ unsigned char **pOutBuffer);
+
+ /**
+ * @brief Determines number of channels (components) for the colorspace.
+ *
+ * @since_tizen 3.0
+ * @param [in] colorspace Colorspace for which number of
+ * components will be determined
+ * @param [out] pComponentsNumber Number of components to be determined
+ * @return @c 0 on success, otherwise a negative error value
+ */
+ static int getNumberOfComponents(
+ mv_colorspace_e colorspace,
+ int *pComponentsNumber);
private:
- /**
- * @brief Converts buffer with image data in Y800 colorspace format
- * to the buffer with image data in RGB888 colorspace format.
- *
- * @since_tizen 3.0
- * @param [in] pInBuffer The buffer with data in Y800 colorspace format
- * @param [in] imageData The image data (structure that keeps
- * information about image width, height, and
- * colorspace) for source buffer
- * @param [out] pOutBuffer The buffer that will contain converted image
- * data in RGB888 colorspace format
- * @return @c 0 on success, otherwise a negative error value
- */
- static int convertY800ToRGB(
- const unsigned char *pInBuffer,
- const ImageData& imageData,
- unsigned char **pOutBuffer);
+ /**
+ * @brief Converts buffer with image data in Y800 colorspace format
+ * to the buffer with image data in RGB888 colorspace format.
+ *
+ * @since_tizen 3.0
+ * @param [in] pInBuffer The buffer with data in Y800 colorspace format
+ * @param [in] imageData The image data (structure that keeps
+ * information about image width, height, and
+ * colorspace) for source buffer
+ * @param [out] pOutBuffer The buffer that will contain converted image
+ * data in RGB888 colorspace format
+ * @return @c 0 on success, otherwise a negative error value
+ */
+ static int convertY800ToRGB(
+ const unsigned char *pInBuffer,
+ const ImageData& imageData,
+ unsigned char **pOutBuffer);
+
};
-} /* namespace Common */
-} /* namespace MediaVision */
+} /* Common */
+} /* MediaVision */
-#endif /* __IMAGEHELPER_H__ */
+#endif /* __MEDIA_VISION_IMAGEHELPER_H__ */
diff --git a/test/testsuites/common/image_helper/include/image_helper.h b/test/testsuites/common/image_helper/include/image_helper.h
index f74c1639..fe4ce922 100644
--- a/test/testsuites/common/image_helper/include/image_helper.h
+++ b/test/testsuites/common/image_helper/include/image_helper.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __IMAGE_HELPER_H__
-#define __IMAGE_HELPER_H__
+#ifndef __MEDIA_VISION_IMAGE_HELPER_H__
+#define __MEDIA_VISION_IMAGE_HELPER_H__
#ifdef __cplusplus
extern "C" {
@@ -42,11 +42,10 @@ extern "C" {
*
* @since_tizen 3.0
*/
-typedef struct
-{
- unsigned int image_width; /**< Image width */
- unsigned int image_height; /**< Image height */
- mv_colorspace_e image_colorspace; /**< Image colorspace */
+typedef struct {
+ unsigned int image_width; /**< Image width */
+ unsigned int image_height; /**< Image height */
+ mv_colorspace_e image_colorspace; /**< Image colorspace */
} image_data_s;
/**
@@ -67,10 +66,10 @@ typedef struct
* @see destroy_loaded_buffer()
*/
int load_image_to_buffer(
- const char *file_path,
- unsigned char **data_buffer,
- unsigned long *buffer_size,
- image_data_s *image_data);
+ const char *file_path,
+ unsigned char **data_buffer,
+ unsigned long *buffer_size,
+ image_data_s *image_data);
/**
* @brief Saves image stored into @a pDataBuffer to the file in jpeg format.
@@ -87,10 +86,10 @@ int load_image_to_buffer(
* @see load_image_to_buffer()
*/
int save_image_from_buffer(
- const char *file_path,
- unsigned char *data_buffer,
- const image_data_s *image_data,
- int quality);
+ const char *file_path,
+ unsigned char *data_buffer,
+ const image_data_s *image_data,
+ int quality);
/**
* @brief Destroys loaded buffer by load_image_to_buffer().
@@ -122,14 +121,14 @@ int destroy_loaded_buffer(unsigned char *data_buffer);
* @return @c 0 on success, otherwise a negative error value
*/
int draw_rectangle_on_buffer(
- int tl_vertex_x,
- int tl_vertex_y,
- int br_vertex_x,
- int br_vertex_y,
- int thickness,
- const int rgb_color[3],
- const image_data_s *image_data,
- unsigned char *data_buffer);
+ int tl_vertex_x,
+ int tl_vertex_y,
+ int br_vertex_x,
+ int br_vertex_y,
+ int thickness,
+ const int rgb_color[3],
+ const image_data_s *image_data,
+ unsigned char *data_buffer);
/**
* @brief Draws the quadrangle of specified size on the image data buffer.
@@ -146,11 +145,11 @@ int draw_rectangle_on_buffer(
* @return @c 0 on success, otherwise a negative error value
*/
int draw_quadrangle_on_buffer(
- mv_quadrangle_s location,
- int thickness,
- const int rgb_color[3],
- const image_data_s *image_data,
- unsigned char *data_buffer);
+ mv_quadrangle_s location,
+ int thickness,
+ const int rgb_color[3],
+ const image_data_s *image_data,
+ unsigned char *data_buffer);
/**
* @brief Converts image data to the image data of RGB888 colorspace.
@@ -166,9 +165,9 @@ int draw_quadrangle_on_buffer(
* @return @c 0 on success, otherwise a negative error value
*/
int convert_buffer_to_RGB888(
- const unsigned char *in_buffer,
- const image_data_s *image_data,
- unsigned char **out_buffer);
+ const unsigned char *in_buffer,
+ const image_data_s *image_data,
+ unsigned char **out_buffer);
/**
* @brief Determines number of channels (components) for the colorspace.
@@ -180,8 +179,8 @@ int convert_buffer_to_RGB888(
* @return @c 0 on success, otherwise a negative error value
*/
int get_number_of_components(
- mv_colorspace_e colorspace,
- int *components_number);
+ mv_colorspace_e colorspace,
+ int *components_number);
#ifdef __cplusplus
}
diff --git a/test/testsuites/common/image_helper/src/ImageHelper.cpp b/test/testsuites/common/image_helper/src/ImageHelper.cpp
index 6d03b9d1..1be08fa0 100644
--- a/test/testsuites/common/image_helper/src/ImageHelper.cpp
+++ b/test/testsuites/common/image_helper/src/ImageHelper.cpp
@@ -32,17 +32,14 @@
* @brief The ImageHelper class methods implementation.
*/
-namespace MediaVision
-{
-namespace Common
-{
+namespace MediaVision {
+namespace Common {
-namespace
-{
+namespace {
-static const int OpenCVChannels = 3;
-static const mv_colorspace_e OpenCVColor = MEDIA_VISION_COLORSPACE_RGB888;
-static const int QuadrangleVertices = 4;
+const int OPEN_CV_CHANNELS = 3;
+const mv_colorspace_e OPEN_CV_COLOR = MEDIA_VISION_COLORSPACE_RGB888;
+const int QUADRANGLE_VERTICES = 4;
std::vector<std::string> getJPGExtensions()
{
@@ -79,7 +76,7 @@ int ImageHelper::loadImageToBuffer(
pImageData->imageWidth = image.cols;
pImageData->imageHeight = image.rows;
- pImageData->imageColorspace = OpenCVColor;
+ pImageData->imageColorspace = OPEN_CV_COLOR;
return MEDIA_VISION_ERROR_NONE;
}
@@ -93,26 +90,26 @@ int ImageHelper::saveImageFromBuffer(
if (filePath == NULL || pDataBuffer == NULL)
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- static const std::string defaultFilePath = "out";
- static const std::vector<std::string> jpgExtensions = getJPGExtensions();
+ static const std::string DEFAULT_FILE_PATH = "out";
+ static const std::vector<std::string> JPG_EXTENSIONS = getJPGExtensions();
bool rightExtensionFlag = false;
std::string resultFilePath(filePath);
if (resultFilePath.empty()) {
- resultFilePath = defaultFilePath;
+ resultFilePath = DEFAULT_FILE_PATH;
} else {
- for (size_t extNum = 0; extNum < jpgExtensions.size(); ++extNum) {
- if (resultFilePath.size() >= jpgExtensions[extNum].size()) {
+ for (size_t extNum = 0; extNum < JPG_EXTENSIONS.size(); ++extNum) {
+ if (resultFilePath.size() >= JPG_EXTENSIONS[extNum].size()) {
std::string givenExtension = resultFilePath.substr(
- resultFilePath.length() - jpgExtensions[extNum].size(),
- jpgExtensions[extNum].size());
+ resultFilePath.length() - JPG_EXTENSIONS[extNum].size(),
+ JPG_EXTENSIONS[extNum].size());
std::transform(
- givenExtension.begin(), givenExtension.end(),
- givenExtension.begin(), ::tolower);
+ givenExtension.begin(), givenExtension.end(),
+ givenExtension.begin(), ::tolower);
- if (givenExtension == jpgExtensions[extNum]) {
+ if (givenExtension == JPG_EXTENSIONS[extNum]) {
rightExtensionFlag = true;
break;
}
@@ -120,7 +117,7 @@ int ImageHelper::saveImageFromBuffer(
}
}
if (!rightExtensionFlag)
- resultFilePath += jpgExtensions[0];
+ resultFilePath += JPG_EXTENSIONS[0];
if (quality <= 0 || quality > 100)
quality = 100;
@@ -128,8 +125,7 @@ int ImageHelper::saveImageFromBuffer(
unsigned int width = imageData.imageWidth;
unsigned int height = imageData.imageHeight;
- /* Type of conversion from given colorspace to BGR */
- int conversionType = -1;
+ int conversionType = -1; // Type of conversion from given colorspace to BGR
unsigned int channelsNumber = 0;
switch (imageData.imageColorspace) {
case MEDIA_VISION_COLORSPACE_INVALID:
@@ -204,8 +200,9 @@ int ImageHelper::saveImageFromBuffer(
int ImageHelper::destroyLoadedBuffer(unsigned char *pDataBuffer)
{
- if (!pDataBuffer)
+ if (!pDataBuffer) {
return MEDIA_VISION_ERROR_NONE;
+ }
delete [] pDataBuffer;
pDataBuffer = NULL;
@@ -226,36 +223,36 @@ int ImageHelper::drawRectangleOnBuffer(
if (NULL == pDataBuffer)
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- cv::Mat cvImage(imageData.imageHeight, imageData.imageWidth, CV_8UC(OpenCVChannels), pDataBuffer);
+ cv::Mat cvImage(imageData.imageHeight, imageData.imageWidth, CV_8UC(OPEN_CV_CHANNELS), pDataBuffer);
cv::rectangle(
- cvImage,
- cv::Point(topLeftVertexX, topLeftVertexY),
- cv::Point(bottomRightVertexX, bottomRightVertexY),
- color,
- thickness);
+ cvImage,
+ cv::Point(topLeftVertexX, topLeftVertexY),
+ cv::Point(bottomRightVertexX, bottomRightVertexY),
+ color,
+ thickness);
return MEDIA_VISION_ERROR_NONE;
}
int ImageHelper::drawQuadrangleOnBuffer(
- mv_quadrangle_s location,
- int thickness,
- const cv::Scalar& color,
- const ImageData& imageData,
- unsigned char *pDataBuffer)
+ mv_quadrangle_s location,
+ int thickness,
+ const cv::Scalar& color,
+ const ImageData& imageData,
+ unsigned char *pDataBuffer)
{
if (NULL == pDataBuffer)
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- cv::Mat cvImage(imageData.imageHeight, imageData.imageWidth, CV_8UC(OpenCVChannels), pDataBuffer);
- for (int i = 0; i < QuadrangleVertices; ++i) {
+ cv::Mat cvImage(imageData.imageHeight, imageData.imageWidth, CV_8UC(OPEN_CV_CHANNELS), pDataBuffer);
+ for (int i = 0; i < QUADRANGLE_VERTICES; ++i) {
cv::line(
- cvImage,
- cv::Point(location.points[i].x, location.points[i].y),
- cv::Point(location.points[(i + 1) % QuadrangleVertices].x,
- location.points[(i + 1) % QuadrangleVertices].y),
- color,
- thickness);
+ cvImage,
+ cv::Point(location.points[i].x, location.points[i].y),
+ cv::Point(location.points[(i + 1) % QUADRANGLE_VERTICES].x,
+ location.points[(i + 1) % QUADRANGLE_VERTICES].y),
+ color,
+ thickness);
}
return MEDIA_VISION_ERROR_NONE;
}
@@ -328,5 +325,5 @@ int ImageHelper::convertY800ToRGB(
return MEDIA_VISION_ERROR_NONE;
}
-} /* namespace Common */
-} /* namespace MediaVision */
+} /* Common */
+} /* MediaVision */
diff --git a/test/testsuites/common/image_helper/src/image_helper.cpp b/test/testsuites/common/image_helper/src/image_helper.cpp
index ad7cb886..616e4117 100644
--- a/test/testsuites/common/image_helper/src/image_helper.cpp
+++ b/test/testsuites/common/image_helper/src/image_helper.cpp
@@ -30,53 +30,53 @@ using namespace MediaVision::Common;
image_data_s convertToCData(ImageHelper::ImageData data)
{
- image_data_s ret;
- ret.image_width = data.imageWidth;
- ret.image_height = data.imageHeight;
- ret.image_colorspace = data.imageColorspace;
- return ret;
+ image_data_s ret;
+ ret.image_width = data.imageWidth;
+ ret.image_height = data.imageHeight;
+ ret.image_colorspace = data.imageColorspace;
+ return ret;
}
ImageHelper::ImageData convertToCppData(image_data_s data)
{
- ImageHelper::ImageData ret;
- ret.imageWidth = data.image_width;
- ret.imageHeight = data.image_height;
- ret.imageColorspace = data.image_colorspace;
- return ret;
+ ImageHelper::ImageData ret;
+ ret.imageWidth = data.image_width;
+ ret.imageHeight = data.image_height;
+ ret.imageColorspace = data.image_colorspace;
+ return ret;
}
int load_image_to_buffer(
- const char *file_path,
- unsigned char **data_buffer,
- unsigned long *buffer_size,
- image_data_s *image_data)
+ const char *file_path,
+ unsigned char **data_buffer,
+ unsigned long *buffer_size,
+ image_data_s *image_data)
{
- if (image_data == NULL)
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ if (image_data == NULL)
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- int err;
- ImageHelper::ImageData imageData;
- err = ImageHelper::loadImageToBuffer(file_path, data_buffer, buffer_size, &imageData);
+ int err;
+ ImageHelper::ImageData imageData;
+ err = ImageHelper::loadImageToBuffer(file_path, data_buffer, buffer_size, &imageData);
- if (err == MEDIA_VISION_ERROR_NONE)
- *image_data = convertToCData(imageData);
+ if (err == MEDIA_VISION_ERROR_NONE)
+ *image_data = convertToCData(imageData);
- return err;
+ return err;
}
int save_image_from_buffer(
- const char *file_path,
- unsigned char *data_buffer,
- const image_data_s *image_data,
- int quality)
+ const char *file_path,
+ unsigned char *data_buffer,
+ const image_data_s *image_data,
+ int quality)
{
- if (image_data == NULL)
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ if (image_data == NULL)
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- ImageHelper::ImageData imageData = convertToCppData(*image_data);
- return ImageHelper::saveImageFromBuffer(file_path, data_buffer, imageData, quality);
+ ImageHelper::ImageData imageData = convertToCppData(*image_data);
+ return ImageHelper::saveImageFromBuffer(file_path, data_buffer, imageData, quality);
}
int destroy_loaded_buffer(unsigned char *data_buffer)
@@ -85,67 +85,67 @@ int destroy_loaded_buffer(unsigned char *data_buffer)
}
int draw_rectangle_on_buffer(
- int tl_vertex_x,
- int tl_vertex_y,
- int br_vertex_x,
- int br_vertex_y,
- int thickness,
- const int rgb_color[3],
- const image_data_s *image_data,
- unsigned char *data_buffer)
+ int tl_vertex_x,
+ int tl_vertex_y,
+ int br_vertex_x,
+ int br_vertex_y,
+ int thickness,
+ const int rgb_color[3],
+ const image_data_s *image_data,
+ unsigned char *data_buffer)
{
- if (image_data == NULL)
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ if (image_data == NULL)
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- ImageHelper::ImageData imageData = convertToCppData(*image_data);
+ ImageHelper::ImageData imageData = convertToCppData(*image_data);
- cv::Scalar color(rgb_color[2], rgb_color[1], rgb_color[0]);
+ cv::Scalar color(rgb_color[2], rgb_color[1], rgb_color[0]);
- return ImageHelper::drawRectangleOnBuffer(
- tl_vertex_x, tl_vertex_y,
- br_vertex_x, br_vertex_y,
- thickness,
- color,
- imageData, data_buffer);
+ return ImageHelper::drawRectangleOnBuffer(
+ tl_vertex_x, tl_vertex_y,
+ br_vertex_x, br_vertex_y,
+ thickness,
+ color,
+ imageData, data_buffer);
}
int draw_quadrangle_on_buffer(
- mv_quadrangle_s location,
- int thickness,
- const int rgb_color[3],
- const image_data_s *image_data,
- unsigned char *data_buffer)
+ mv_quadrangle_s location,
+ int thickness,
+ const int rgb_color[3],
+ const image_data_s *image_data,
+ unsigned char *data_buffer)
{
- if (image_data == NULL)
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ if (image_data == NULL)
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- ImageHelper::ImageData imageData = convertToCppData(*image_data);
+ ImageHelper::ImageData imageData = convertToCppData(*image_data);
- cv::Scalar color(rgb_color[2], rgb_color[1], rgb_color[0]);
+ cv::Scalar color(rgb_color[2], rgb_color[1], rgb_color[0]);
- return ImageHelper::drawQuadrangleOnBuffer(
- location,
- thickness,
- color,
- imageData,
- data_buffer);
+ return ImageHelper::drawQuadrangleOnBuffer(
+ location,
+ thickness,
+ color,
+ imageData,
+ data_buffer);
}
int convert_buffer_to_RGB888(
- const unsigned char *in_buffer,
- const image_data_s *image_data,
- unsigned char **out_buffer)
+ const unsigned char *in_buffer,
+ const image_data_s *image_data,
+ unsigned char **out_buffer)
{
- if (image_data == NULL)
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ if (image_data == NULL)
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- ImageHelper::ImageData imageData = convertToCppData(*image_data);
- return ImageHelper::convertBufferToRGB888(in_buffer, imageData, out_buffer);
+ ImageHelper::ImageData imageData = convertToCppData(*image_data);
+ return ImageHelper::convertBufferToRGB888(in_buffer, imageData, out_buffer);
}
int get_number_of_components(
- mv_colorspace_e colorspace,
- int *components_number)
+ mv_colorspace_e colorspace,
+ int *components_number)
{
- return ImageHelper::getNumberOfComponents(colorspace, components_number);
+ return ImageHelper::getNumberOfComponents(colorspace, components_number);
}
diff --git a/test/testsuites/common/testsuite_common/mv_testsuite_common.h b/test/testsuites/common/testsuite_common/mv_testsuite_common.h
index ab676360..903f3134 100644
--- a/test/testsuites/common/testsuite_common/mv_testsuite_common.h
+++ b/test/testsuites/common/testsuite_common/mv_testsuite_common.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __MV_TESTSUITE_COMMON_H__
-#define __MV_TESTSUITE_COMMON_H__
+#ifndef __MEDIA_VISION_MV_TESTSUITE_COMMON_H__
+#define __MEDIA_VISION_MV_TESTSUITE_COMMON_H__
#include "mv_common.h"
@@ -154,4 +154,4 @@ int load_mv_source_from_file(
const char *path_to_image,
mv_source_h source);
-#endif /* __MV_TESTSUITE_COMMON_H__ */
+#endif /* __MEDIA_VISION_MV_TESTSUITE_COMMON_H__ */
diff --git a/test/testsuites/common/video_helper/mv_log_cfg.h b/test/testsuites/common/video_helper/mv_log_cfg.h
index 392b5c2d..2af638bb 100644
--- a/test/testsuites/common/video_helper/mv_log_cfg.h
+++ b/test/testsuites/common/video_helper/mv_log_cfg.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __MV_LOG_CFG_H__
-#define __MV_LOG_CFG_H__
+#ifndef __MEDIA_VISION_MV_LOG_CFG_H__
+#define __MEDIA_VISION_MV_LOG_CFG_H__
#include <dlog.h>
@@ -65,4 +65,4 @@ do { \
#endif
-#endif /* __MV_LOG_CFG_H__ */
+#endif /* __MEDIA_VISON_MV_LOG_CFG_H__ */
diff --git a/test/testsuites/common/video_helper/mv_video_helper.c b/test/testsuites/common/video_helper/mv_video_helper.c
index c46fcb7b..c5293ec0 100644
--- a/test/testsuites/common/video_helper/mv_video_helper.c
+++ b/test/testsuites/common/video_helper/mv_video_helper.c
@@ -166,7 +166,7 @@ int mv_video_reader_load(
GstVideoInfo info;
if (reader == NULL || path == NULL ||
- image_data == NULL || fps == NULL) {
+ image_data == NULL || fps == NULL) {
LOGE("NULL pointer passed");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
@@ -175,8 +175,8 @@ int mv_video_reader_load(
/* Set input file location from path */
g_object_set(G_OBJECT(handle->filesrc),
- "location", path,
- NULL);
+ "location", path,
+ NULL);
/* Start playback */
if (_mv_video_reader_state_change(handle, GST_STATE_PLAYING)) {
@@ -410,8 +410,8 @@ int mv_video_writer_init(
handle->fps = fps;
g_object_set(G_OBJECT(handle->filesink),
- "location", path,
- NULL);
+ "location", path,
+ NULL);
err = _mv_video_writer_link_internals(handle);
if (MEDIA_VISION_ERROR_NONE != err) {
@@ -449,7 +449,7 @@ int mv_video_writer_write_frame(
gst_buffer_unmap(buffer, &info);
if (GST_FLOW_OK !=
- gst_app_src_push_buffer(handle->appsrc, buffer)) {
+ gst_app_src_push_buffer(handle->appsrc, buffer)) {
LOGE("Failed to push buffer to appsrc");
return MEDIA_VISION_ERROR_INVALID_OPERATION;
}
@@ -473,22 +473,22 @@ static int _mv_video_reader_create_internals(
reader->appsink = gst_element_factory_make("appsink", "appsink");
if ((!reader->pl) ||
- (!reader->filesrc) ||
- (!reader->decodebin) ||
- (!reader->videoconvert) ||
- (!reader->queue) ||
- (!reader->appsink)) {
+ (!reader->filesrc) ||
+ (!reader->decodebin) ||
+ (!reader->videoconvert) ||
+ (!reader->queue) ||
+ (!reader->appsink)) {
LOGE("Unable to create video read pipeline elements");
return MEDIA_VISION_ERROR_INVALID_OPERATION;
}
gst_bin_add_many(GST_BIN(reader->pl),
- reader->filesrc,
- reader->decodebin,
- reader->videoconvert,
- reader->queue,
- reader->appsink,
- NULL);
+ reader->filesrc,
+ reader->decodebin,
+ reader->videoconvert,
+ reader->queue,
+ reader->appsink,
+ NULL);
return MEDIA_VISION_ERROR_NONE;
}
@@ -508,21 +508,20 @@ static int _mv_video_reader_link_internals(
/* Decodebin pad will be linked during state change */
g_signal_connect(reader->decodebin,
- "pad-added",
- G_CALLBACK(cb_newpad),
- reader);
+ "pad-added",
+ G_CALLBACK(cb_newpad),
+ reader);
if (!gst_element_link_many(reader->videoconvert,
- reader->queue,
- reader->appsink,
- NULL)) {
+ reader->queue, reader->appsink, NULL)) {
+
LOGE("Unable to link videocovnert-queue-appsink");
return MEDIA_VISION_ERROR_INVALID_OPERATION;
}
caps = gst_caps_new_simple("video/x-raw",
- "format", G_TYPE_STRING, "RGB",
- NULL);
+ "format", G_TYPE_STRING, "RGB",
+ NULL);
gst_app_sink_set_caps(GST_APP_SINK(reader->appsink), caps);
gst_caps_unref(caps);
@@ -530,25 +529,25 @@ static int _mv_video_reader_link_internals(
/* Configure appsink */
gst_app_sink_set_emit_signals(GST_APP_SINK(reader->appsink), TRUE);
g_signal_connect(reader->appsink,
- "new-sample",
- G_CALLBACK(appsink_newsample),
- reader);
+ "new-sample",
+ G_CALLBACK(appsink_newsample),
+ reader);
g_signal_connect(reader->appsink,
- "eos",
- G_CALLBACK(appsink_eos),
- reader);
+ "eos",
+ G_CALLBACK(appsink_eos),
+ reader);
g_object_set(G_OBJECT(reader->appsink),
- "drop", TRUE,
- "enable-last-sample", TRUE,
- "sync", FALSE,
- NULL);
+ "drop", TRUE,
+ "enable-last-sample", TRUE,
+ "sync", FALSE,
+ NULL);
/* pad probe */
pad = gst_element_get_static_pad(reader->queue, "src");
gst_pad_add_probe(pad, GST_PAD_PROBE_TYPE_BUFFER,
- (GstPadProbeCallback)pad_probe_data_cb, reader, NULL);
+ (GstPadProbeCallback)pad_probe_data_cb, reader, NULL);
gst_object_unref(pad);
return MEDIA_VISION_ERROR_NONE;
@@ -563,7 +562,7 @@ static int _mv_video_reader_state_change(
GstState pipeline_state = GST_STATE_NULL;
state_ret = gst_element_set_state(handle->pl,
- state);
+ state);
if (GST_STATE_CHANGE_FAILURE == state_ret) {
LOGE("Set state failure");
@@ -571,12 +570,12 @@ static int _mv_video_reader_state_change(
}
LOGI("Set state [%d], change return [%d]",
- state, state_ret);
+ state, state_ret);
state_ret = gst_element_get_state(handle->pl,
- &pipeline_state,
- NULL,
- GST_CLOCK_TIME_NONE);
+ &pipeline_state,
+ NULL,
+ GST_CLOCK_TIME_NONE);
if (GST_STATE_CHANGE_FAILURE == state_ret) {
LOGE("get state failure");
@@ -600,13 +599,13 @@ static int _mv_video_writer_create_internals(
writer->filesink = gst_element_factory_make("filesink", "filesink");
if ((!writer->pl) ||
- (!writer->appsrc) ||
- (!writer->capsfilter) ||
- (!writer->videoconvert) ||
- (!writer->encoder) ||
- (!writer->queue) ||
- (!writer->muxer) ||
- (!writer->filesink)) {
+ (!writer->appsrc) ||
+ (!writer->capsfilter) ||
+ (!writer->videoconvert) ||
+ (!writer->encoder) ||
+ (!writer->queue) ||
+ (!writer->muxer) ||
+ (!writer->filesink)) {
LOGE("Unable to create video read pipeline elements\n");
return MEDIA_VISION_ERROR_INVALID_OPERATION;
}
@@ -709,16 +708,16 @@ static int _mv_video_writer_link_internals(
}
g_object_set(G_OBJECT(writer->appsrc),
- "max-bytes", 0,
- "blocksize", writer->buffer_size,
- "stream-type", 0,
- "format", GST_FORMAT_BYTES,
- NULL);
+ "max-bytes", 0,
+ "blocksize", writer->buffer_size,
+ "stream-type", 0,
+ "format", GST_FORMAT_BYTES,
+ NULL);
if (_mv_video_writer_state_change(writer,
- GST_STATE_PLAYING)) {
- LOGE("Unable to change video writer state");
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ GST_STATE_PLAYING)) {
+ LOGE("Unable to change video writer state");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
}
return MEDIA_VISION_ERROR_NONE;
@@ -733,7 +732,7 @@ static int _mv_video_writer_state_change(
GstState pipeline_state = GST_STATE_NULL;
state_ret = gst_element_set_state(handle->pl,
- state);
+ state);
if (GST_STATE_CHANGE_FAILURE == state_ret) {
LOGE("Set state failure");
@@ -893,9 +892,9 @@ static void cb_newpad(
}
static GstPadProbeReturn pad_probe_data_cb(
- GstPad *pad,
- GstPadProbeInfo *info,
- gpointer user_data)
+ GstPad *pad,
+ GstPadProbeInfo *info,
+ gpointer user_data)
{
if (user_data == NULL)
return GST_PAD_PROBE_PASS;
diff --git a/test/testsuites/common/video_helper/mv_video_helper.h b/test/testsuites/common/video_helper/mv_video_helper.h
index b22c11e2..17574d8e 100644
--- a/test/testsuites/common/video_helper/mv_video_helper.h
+++ b/test/testsuites/common/video_helper/mv_video_helper.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __MV_VIDEO_HELPER_H__
-#define __MV_VIDEO_HELPER_H__
+#ifndef __MEDIA_VISION_MV_VIDEO_HELPER_H__
+#define __MEDIA_VISION_MV_VIDEO_HELPER_H__
#include "mv_common.h"
#include "image_helper.h"
@@ -50,10 +50,10 @@ typedef void *mv_video_writer_h;
* @see mv_video_reader_set_new_sample_cb()
*/
typedef void (*mv_video_reader_new_sample_cb) (
- char *buffer,
- unsigned int buffer_size,
- image_data_s image_data,
- void *user_data);
+ char *buffer,
+ unsigned int buffer_size,
+ image_data_s image_data,
+ void *user_data);
/**
* @brief Called when stream from video reader is finished.
@@ -67,7 +67,7 @@ typedef void (*mv_video_reader_new_sample_cb) (
* @see mv_video_reader_set_eos_cb()
*/
typedef void (*mv_video_reader_eos_cb) (
- void *user_data);
+ void *user_data);
/**
* @brief Creates a video reader handle.
@@ -83,7 +83,7 @@ typedef void (*mv_video_reader_eos_cb) (
* @see mv_destroy_video_reader()
*/
int mv_create_video_reader(
- mv_video_reader_h *reader);
+ mv_video_reader_h *reader);
/**
* @brief Destroys the video reader handle and releases all its resources.
@@ -97,7 +97,7 @@ int mv_create_video_reader(
* @see mv_create_video_reader()
*/
int mv_destroy_video_reader(
- mv_video_reader_h reader);
+ mv_video_reader_h reader);
/**
* @brief Loads video from file.
@@ -116,10 +116,10 @@ int mv_destroy_video_reader(
* @pre Create a video reader handle by calling @ref mv_create_video_reader()
*/
int mv_video_reader_load(
- mv_video_reader_h reader,
- const char *path,
- image_data_s *image_data,
- unsigned int *fps);
+ mv_video_reader_h reader,
+ const char *path,
+ image_data_s *image_data,
+ unsigned int *fps);
/**
* @brief Starts reader playback.
@@ -137,7 +137,7 @@ int mv_video_reader_load(
* @post Stop reader playback by calling @ref mv_video_reader_stop()
*/
int mv_video_reader_start(
- mv_video_reader_h reader);
+ mv_video_reader_h reader);
/**
* @brief Stops reader playback.
@@ -153,7 +153,7 @@ int mv_video_reader_start(
* and call @ref mv_video_reader_load()
*/
int mv_video_reader_stop(
- mv_video_reader_h reader);
+ mv_video_reader_h reader);
/**
* @brief Sets new sample callback to video reader.
@@ -174,9 +174,9 @@ int mv_video_reader_stop(
*
*/
int mv_video_reader_set_new_sample_cb(
- mv_video_reader_h reader,
- mv_video_reader_new_sample_cb callback,
- void *user_data);
+ mv_video_reader_h reader,
+ mv_video_reader_new_sample_cb callback,
+ void *user_data);
/**
* @brief Sets end of stream callback to video reader.
@@ -197,9 +197,9 @@ int mv_video_reader_set_new_sample_cb(
*
*/
int mv_video_reader_set_eos_cb(
- mv_video_reader_h reader,
- mv_video_reader_eos_cb callback,
- void *user_data);
+ mv_video_reader_h reader,
+ mv_video_reader_eos_cb callback,
+ void *user_data);
/**
* @brief Creates a video writer handle.
@@ -215,7 +215,7 @@ int mv_video_reader_set_eos_cb(
* @see mv_destroy_video_writer()
*/
int mv_create_video_writer(
- mv_video_writer_h *writer);
+ mv_video_writer_h *writer);
/**
* @brief Destroys the video writer handle and releases all its resources.
@@ -230,7 +230,7 @@ int mv_create_video_writer(
* @see mv_create_video_writer()
*/
int mv_destroy_video_writer(
- mv_video_writer_h writer);
+ mv_video_writer_h writer);
/**
* @brief Sets path and frame size for video file to be stored.
@@ -249,10 +249,10 @@ int mv_destroy_video_writer(
* @pre Create a video writer handle by calling @ref mv_create_video_writer()
*/
int mv_video_writer_init(
- mv_video_writer_h writer,
- const char *path,
- image_data_s image_data,
- unsigned int fps);
+ mv_video_writer_h writer,
+ const char *path,
+ image_data_s image_data,
+ unsigned int fps);
/**
* @brief Writes consequently video frame to the file.
@@ -271,7 +271,7 @@ int mv_video_writer_init(
* and initialize video with @ref mv_video_writer_init()
*/
int mv_video_writer_write_frame(
- mv_video_writer_h writer,
- unsigned char *frame);
+ mv_video_writer_h writer,
+ unsigned char *frame);
-#endif /* __MV_VIDEO_HELPER_H__ */
+#endif /* __MEDIA_VISION_MV_VIDEO_HELPER_H__ */
diff --git a/test/testsuites/face/face_test_suite.c b/test/testsuites/face/face_test_suite.c
index 5cfd431b..824bafa6 100644
--- a/test/testsuites/face/face_test_suite.c
+++ b/test/testsuites/face/face_test_suite.c
@@ -95,10 +95,10 @@ void on_face_detected_cb(
unsigned int buf_size = 0;
image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID };
if (MEDIA_VISION_ERROR_NONE != mv_source_get_buffer(source, &out_buffer, &buf_size) ||
- MEDIA_VISION_ERROR_NONE != mv_source_get_width(source, &(image_data.image_width)) ||
- MEDIA_VISION_ERROR_NONE != mv_source_get_height(source, &(image_data.image_height)) ||
- MEDIA_VISION_ERROR_NONE != mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
- user_data == NULL) {
+ MEDIA_VISION_ERROR_NONE != mv_source_get_width(source, &(image_data.image_width)) ||
+ MEDIA_VISION_ERROR_NONE != mv_source_get_height(source, &(image_data.image_height)) ||
+ MEDIA_VISION_ERROR_NONE != mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
+ user_data == NULL) {
printf("ERROR: Creating out image is impossible.\n");
} else {
file_name = (char*)user_data;
@@ -108,16 +108,16 @@ void on_face_detected_cb(
int i = 0;
for (i = 0; i < number_of_faces; ++i) {
printf("\Face %i : x - %i, y - %i, width - %i, height - %i ", i,
- faces_locations[i].point.x, faces_locations[i].point.y,
+ faces_locations[i].point.x, faces_locations[i].point.y,
faces_locations[i].width, faces_locations[i].height);
if (Perform_eye_condition_recognize) {
if (MEDIA_VISION_ERROR_NONE != mv_face_eye_condition_recognize(
- source,
- engine_cfg,
- faces_locations[i],
- eye_condition_cb,
- user_data)) {
+ source,
+ engine_cfg,
+ faces_locations[i],
+ eye_condition_cb,
+ user_data)) {
printf(TEXT_RED "\nEye condition recognition for %i face failed"
TEXT_RESET "\n", i);
}
@@ -125,11 +125,11 @@ void on_face_detected_cb(
if (Perform_facial_expression_recognize) {
if (MEDIA_VISION_ERROR_NONE != mv_face_facial_expression_recognize(
- source,
- engine_cfg,
- faces_locations[i],
- face_expression_cb,
- user_data)) {
+ source,
+ engine_cfg,
+ faces_locations[i],
+ face_expression_cb,
+ user_data)) {
printf(TEXT_RED "\nFacial expression recognition for %i "
"face failed" TEXT_RESET "\n", i);
}
@@ -141,14 +141,14 @@ void on_face_detected_cb(
const int rectangle_thickness = 3;
const int drawing_color[] = {255, 0, 0};
if (MEDIA_VISION_ERROR_NONE != draw_rectangle_on_buffer(
- faces_locations[i].point.x,
- faces_locations[i].point.y,
- faces_locations[i].point.x + faces_locations[i].width,
- faces_locations[i].point.y + faces_locations[i].height,
- rectangle_thickness,
- drawing_color,
- &image_data,
- out_buffer)) {
+ faces_locations[i].point.x,
+ faces_locations[i].point.y,
+ faces_locations[i].point.x + faces_locations[i].width,
+ faces_locations[i].point.y + faces_locations[i].height,
+ rectangle_thickness,
+ drawing_color,
+ &image_data,
+ out_buffer)) {
continue;
}
}
@@ -350,11 +350,10 @@ int perform_mv_face_recognize(mv_face_recognition_model_h model)
"faces from images with face detection functionality.\n"
TEXT_RESET);
while (-1 == input_string(
- "Input file name with the face to be recognized:",
- 1024,
- &(in_file_name))) {
+ "Input file name with the face to be recognized:",
+ 1024,
+ &(in_file_name)))
printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
- }
err = load_mv_source_from_file(in_file_name, source);
@@ -431,24 +430,20 @@ int add_single_example(
TEXT_RESET);
while (-1 == input_int("Specify top left ROI x coordinate:",
- INT_MIN, INT_MAX, &(roi->point.x))) {
+ INT_MIN, INT_MAX, &(roi->point.x)))
printf("Incorrect input! Try again.\n");
- }
while (-1 == input_int("Specify top left ROI y coordinate:",
- INT_MIN, INT_MAX, &(roi->point.y))) {
+ INT_MIN, INT_MAX, &(roi->point.y)))
printf("Incorrect input! Try again.\n");
- }
while (-1 == input_int("Specify top left ROI width:",
- INT_MIN, INT_MAX, &(roi->width))) {
+ INT_MIN, INT_MAX, &(roi->width)))
printf("Incorrect input! Try again.\n");
- }
while (-1 == input_int("Specify top left ROI height:",
- INT_MIN, INT_MAX, &(roi->height))) {
+ INT_MIN, INT_MAX, &(roi->height)))
printf("Incorrect input! Try again.\n");
- }
} else {
roi = NULL;
}
@@ -470,7 +465,7 @@ int add_single_example(
printf("Incorrect input! You can use %i-%i labels only. Try again.\n",
MIN_ALLOWED_LABEL,
MAX_ALLOWED_LABEL);
- }
+ }
}
err = mv_face_recognition_model_add(source, model, roi, *face_label);
@@ -633,9 +628,8 @@ int perform_mv_face_recognition_model_save(mv_face_recognition_model_h model)
char *out_file_name = NULL;
while (input_string("Input file name to save the model:",
- 1024, &(out_file_name)) == -1) {
+ 1024, &(out_file_name)) == -1)
printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
- }
const int err = mv_face_recognition_model_save(out_file_name, model);
@@ -649,9 +643,8 @@ int perform_mv_face_recognition_model_load(mv_face_recognition_model_h *model)
char *in_file_name = NULL;
while (input_string("Input file name to load model from:",
- 1024, &(in_file_name)) == -1) {
+ 1024, &(in_file_name)) == -1)
printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
- }
const int err = mv_face_recognition_model_load(in_file_name, model);
@@ -822,7 +815,7 @@ int perform_model_evaluation(mv_face_recognition_model_h model)
int dir_n = 0;
int label_count = 0;
while (show_confirm_dialog("Add test images directory?") &&
- dir_n < max_dir_allowed) {
+ dir_n < max_dir_allowed) {
char *in_file_name = NULL;
while (-1 == input_string("Specify path to the test images directory:", 1024, &(in_file_name)))
printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
@@ -1234,7 +1227,7 @@ int load_source_from_first_video_frame(const char *video_file, mv_source_h sourc
if (MEDIA_VISION_ERROR_NONE != err) {
printf(TEXT_RED "ERROR: Errors were occurred during creating the video "
"reader! Error code: %i\n" TEXT_RESET, err);
- return err;
+ return err;
}
err = mv_video_reader_set_new_sample_cb(
@@ -1423,15 +1416,14 @@ int perform_mv_face_tracking_model_prepare(mv_face_tracking_model_h model)
snprintf(str_prompt, 100, "Specify point %i x coordinate: x%i = ",
idx - 1, idx);
while (-1 == input_int(str_prompt, INT_MIN, INT_MAX,
- &(roi.points[idx - 1].x))) {
+ &(roi.points[idx - 1].x)))
printf("Incorrect input! Try again.\n");
- }
+
snprintf(str_prompt, 100, "Specify point %i y coordinate: y%i = ",
idx - 1, idx);
while (-1 == input_int(str_prompt, INT_MIN, INT_MAX,
- &(roi.points[idx - 1].y))) {
+ &(roi.points[idx - 1].y)))
printf("Incorrect input! Try again.\n");
- }
}
err = mv_face_tracking_model_prepare(
@@ -1457,9 +1449,11 @@ int perform_mv_face_tracking_model_prepare(mv_face_tracking_model_h model)
return err;
}
-static char *track_output_dir = NULL;
-
-static int track_frame_counter = 0;
+typedef struct {
+ mv_face_tracking_model_h target;
+ mv_video_writer_h writer;
+ int frame_number;
+} tracking_cb_data;
void track_cb(
mv_source_h source,
@@ -1469,21 +1463,20 @@ void track_cb(
double confidence,
void *user_data)
{
+ tracking_cb_data *cb_data = user_data;
static bool track_catch_face = false;
- ++track_frame_counter;
-
unsigned char *out_buffer = NULL;
unsigned int buf_size = 0;
image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID };
if (MEDIA_VISION_ERROR_NONE !=
- mv_source_get_buffer(source, &out_buffer, &buf_size) ||
- MEDIA_VISION_ERROR_NONE !=
- mv_source_get_width(source, &(image_data.image_width)) ||
- MEDIA_VISION_ERROR_NONE !=
- mv_source_get_height(source, &(image_data.image_height)) ||
- MEDIA_VISION_ERROR_NONE !=
- mv_source_get_colorspace(source, &(image_data.image_colorspace))) {
+ mv_source_get_buffer(source, &out_buffer, &buf_size) ||
+ MEDIA_VISION_ERROR_NONE !=
+ mv_source_get_width(source, &(image_data.image_width)) ||
+ MEDIA_VISION_ERROR_NONE !=
+ mv_source_get_height(source, &(image_data.image_height)) ||
+ MEDIA_VISION_ERROR_NONE !=
+ mv_source_get_colorspace(source, &(image_data.image_colorspace))) {
printf("ERROR: Creating out image is impossible.\n");
return;
@@ -1492,11 +1485,11 @@ void track_cb(
if (NULL != location) {
if (!track_catch_face) {
printf(TEXT_GREEN "Frame %i : Tracked object is appeared" TEXT_RESET "\n",
- track_frame_counter);
+ cb_data->frame_number);
track_catch_face = true;
} else {
printf(TEXT_YELLOW "Frame %i : Tracked object is tracked" TEXT_RESET "\n",
- track_frame_counter);
+ cb_data->frame_number);
}
const int rectangle_thickness = 3;
@@ -1531,22 +1524,21 @@ void track_cb(
} else {
if (track_catch_face) {
printf(TEXT_RED "Frame %i : Tracked object is lost" TEXT_RESET "\n",
- track_frame_counter);
+ cb_data->frame_number);
track_catch_face = false;
} else {
printf(TEXT_YELLOW "Frame %i : Tracked object isn't detected" TEXT_RESET "\n",
- track_frame_counter);
+ cb_data->frame_number);
}
}
- char file_path[1024];
- snprintf(file_path, 1024, "%s/%05d.jpg", track_output_dir, track_frame_counter);
- if (MEDIA_VISION_ERROR_NONE == save_image_from_buffer(
- file_path, out_buffer, &image_data, 100)) {
- printf("Frame %i was outputted as %s\n", track_frame_counter, file_path);
- } else {
- printf(TEXT_RED "ERROR: Failed to generate output file %s. "
- "Check file name and permissions.\n" TEXT_RESET, file_path);
+ const int err = mv_video_writer_write_frame(cb_data->writer, out_buffer);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "ERROR: Errors were occurred during writing frame #%i"
+ "to the result video file; code %i" TEXT_RESET "\n",
+ cb_data->frame_number,
+ err);
+ return;
}
}
@@ -1581,17 +1573,25 @@ void track_on_sample_cb(
return;
}
- mv_face_tracking_model_h tracking_model =
- (mv_face_tracking_model_h)user_data;
+ tracking_cb_data *cb_data = (tracking_cb_data*)user_data;
+ ++(cb_data->frame_number);
- err = mv_face_track(source, tracking_model, NULL, track_cb, false, NULL);
+ err = mv_face_track(
+ source,
+ cb_data->target,
+ NULL,
+ track_cb,
+ false,
+ cb_data);
if (MEDIA_VISION_ERROR_NONE != err) {
printf(TEXT_RED "ERROR: Errors were occurred during tracking the face "
- TEXT_RESET "on the video frame! Error code: %i\n", err);
-
- return;
+ TEXT_RESET "on the video frame #%i! Error code: %i\n",
+ cb_data->frame_number,
+ err);
}
+
+ return;
}
/* end of stream callback */
@@ -1607,16 +1607,56 @@ void eos_cb(void *user_data)
pthread_mutex_unlock((pthread_mutex_t*)user_data);
}
-int generate_image_sequence(
- mv_face_tracking_model_h tracking_model,
- const char *track_target_file_name)
+inline void release_resources_for_process_video_file_function(
+ mv_video_reader_h *reader,
+ mv_video_writer_h *writer)
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+ if (*reader) {
+ err = mv_destroy_video_reader(*reader);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "ERROR: Errors were occurred during video reader"
+ "destroying; code %i" TEXT_RESET "\n",
+ err);
+ }
+ *reader = NULL;
+ }
+ if (*writer) {
+ err = mv_destroy_video_writer(*writer);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "ERROR: Errors were occurred during video writer"
+ "destroying; code %i" TEXT_RESET "\n",
+ err);
+ }
+ *writer = NULL;
+ }
+}
+
+int process_video_file(
+ mv_face_tracking_model_h tracking_model,
+ const char *track_target_file_name,
+ const char *track_result_file_name)
{
mv_video_reader_h reader = NULL;
+ mv_video_writer_h writer = NULL;
+
int err = mv_create_video_reader(&reader);
if (MEDIA_VISION_ERROR_NONE != err) {
printf(TEXT_RED
"ERROR: Errors were occurred during creating the video "
"reader! Error code: %i" TEXT_RESET "\n", err);
+ release_resources_for_process_video_file_function(&reader, &writer);
+
+ return err;
+ }
+
+ err = mv_create_video_writer(&writer);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during creating the video "
+ "writer! Error code: %i" TEXT_RESET "\n", err);
+ release_resources_for_process_video_file_function(&reader, &writer);
+
return err;
}
@@ -1627,31 +1667,39 @@ int generate_image_sequence(
if (MEDIA_VISION_ERROR_NONE != err) {
printf(TEXT_RED "ERROR: Errors were occurred during loading the video "
"by reader! Error code: %i" TEXT_RESET "\n", err);
+ release_resources_for_process_video_file_function(&reader, &writer);
- const int err2 = mv_destroy_video_reader(reader);
- if (MEDIA_VISION_ERROR_NONE != err2) {
- printf(TEXT_RED "ERROR: Errors were occurred during video reader "
- "destroy! Error code: %i" TEXT_RESET "\n", err);
- }
+ return err;
+ }
- return err;
+ video_info.image_colorspace = MEDIA_VISION_COLORSPACE_RGB888;
+
+ err = mv_video_writer_init(
+ writer,
+ track_result_file_name,
+ video_info,
+ fps);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "\nERROR: Errors were occurred during video writer"
+ "initializing; code %i" TEXT_RESET "\n", err);
+ release_resources_for_process_video_file_function(&reader, &writer);
+
+ return err;
}
+ tracking_cb_data cb_data;
+ cb_data.target = tracking_model;
+ cb_data.writer = writer;
+ cb_data.frame_number = 0;
err = mv_video_reader_set_new_sample_cb(
- reader,
- track_on_sample_cb,
- tracking_model);
-
+ reader,
+ track_on_sample_cb,
+ &cb_data);
if (MEDIA_VISION_ERROR_NONE != err) {
printf(TEXT_RED
"ERROR: Errors were occurred during new sample callback set!"
" Error code: %i" TEXT_RESET "\n", err);
-
- const int err2 = mv_destroy_video_reader(reader);
- if (MEDIA_VISION_ERROR_NONE != err2) {
- printf(TEXT_RED "ERROR: Errors were occurred during video reader "
- "destroy! Error code: %i" TEXT_RESET "\n", err);
- }
+ release_resources_for_process_video_file_function(&reader, &writer);
return err;
}
@@ -1666,13 +1714,7 @@ int generate_image_sequence(
printf(TEXT_RED
"ERROR: Errors were occurred during setting the eos "
"callback for reader! Error code: %i" TEXT_RESET "\n", err);
-
- const int err2 = mv_destroy_video_reader(reader);
- if (MEDIA_VISION_ERROR_NONE != err2) {
- printf(TEXT_RED
- "ERROR: Errors were occurred during video reader destroy!"
- " Error code: %i" TEXT_RESET "\n", err);
- }
+ release_resources_for_process_video_file_function(&reader, &writer);
pthread_mutex_unlock(&block_during_tracking_mutex);
pthread_mutex_destroy(&block_during_tracking_mutex);
@@ -1684,13 +1726,7 @@ int generate_image_sequence(
if (MEDIA_VISION_ERROR_NONE != err) {
printf(TEXT_RED "ERROR: Errors were occurred during starting the "
"video reader! Error code: %i" TEXT_RESET "\n", err);
-
- const int err2 = mv_destroy_video_reader(reader);
- if (MEDIA_VISION_ERROR_NONE != err2) {
- printf(TEXT_RED
- "ERROR: Errors were occurred during video reader destroy!"
- " Error code: %i" TEXT_RESET "\n", err);
- }
+ release_resources_for_process_video_file_function(&reader, &writer);
pthread_mutex_unlock(&block_during_tracking_mutex);
pthread_mutex_destroy(&block_during_tracking_mutex);
@@ -1711,11 +1747,7 @@ int generate_image_sequence(
TEXT_RESET, err);
}
- err = mv_destroy_video_reader(reader);
- if (MEDIA_VISION_ERROR_NONE != err) {
- printf(TEXT_RED "ERROR: Errors were occurred during video "
- "reader destroy! Error code: %i\n" TEXT_RESET, err);
- }
+ release_resources_for_process_video_file_function(&reader, &writer);
return MEDIA_VISION_ERROR_NONE;
}
@@ -1732,20 +1764,25 @@ int perform_mv_face_track(mv_face_tracking_model_h tracking_model)
TEXT_RESET "\n");
char *track_target_file_name = NULL;
+ char *track_result_file_name = NULL;
while (input_string("Input video file name to track on:",
- 1024, &(track_target_file_name)) == -1) {
+ 1024, &(track_target_file_name)) == -1)
printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
- }
- while (input_string("Input directory to save tracking results:",
- 1024, &(track_output_dir)) == -1) {
+ while (input_string("Input video file name to save tracking results:",
+ 1024, &(track_result_file_name)) == -1)
printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
- }
- track_frame_counter = 0;
+ const int res = process_video_file(
+ tracking_model,
+ track_target_file_name,
+ track_result_file_name);
+
+ free(track_target_file_name);
+ free(track_result_file_name);
- return generate_image_sequence(tracking_model, track_target_file_name);
+ return res;
}
int perform_track()
diff --git a/test/testsuites/image/image_test_suite.c b/test/testsuites/image/image_test_suite.c
index 4e35f816..4b54e913 100644
--- a/test/testsuites/image/image_test_suite.c
+++ b/test/testsuites/image/image_test_suite.c
@@ -240,11 +240,11 @@ void handle_recognition_result(
image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID };
if (MEDIA_VISION_ERROR_NONE != mv_source_get_buffer(source, &(out_buffer), &buffer_size) ||
- MEDIA_VISION_ERROR_NONE != mv_source_get_width(source, &(image_data.image_width)) ||
- MEDIA_VISION_ERROR_NONE != mv_source_get_height(source, &(image_data.image_height)) ||
- MEDIA_VISION_ERROR_NONE != mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
- NULL == file_name) {
- printf("ERROR: Creating out image is impossible.\n");
+ MEDIA_VISION_ERROR_NONE != mv_source_get_width(source, &(image_data.image_width)) ||
+ MEDIA_VISION_ERROR_NONE != mv_source_get_height(source, &(image_data.image_height)) ||
+ MEDIA_VISION_ERROR_NONE != mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
+ NULL == file_name) {
+ printf("ERROR: Creating out image is impossible.\n");
} else {
is_source_data_loaded = 1;
}
@@ -342,7 +342,7 @@ int generate_image_object_from_file(const char *path_to_image,
int err2 = mv_destroy_source(source);
if (MEDIA_VISION_ERROR_NONE != err2) {
- printf("\nERROR: Errors were occurred during source "
+ printf("\nERROR: Errors were occurred during source "
"destroying; code %i\n", err2);
}
@@ -601,17 +601,15 @@ int perform_recognize(mv_image_object_h *targets, int number_of_targets)
char *path_to_generated_image = NULL;
while (input_string("Input file name with image for recognizing:",
- 1024, &path_to_image) == -1) {
+ 1024, &path_to_image) == -1)
printf("Incorrect input! Try again.\n");
- }
while (input_string("Input file name for generated image:",
- 1024, &path_to_generated_image) == -1) {
+ 1024, &path_to_generated_image) == -1)
printf("Incorrect input! Try again.\n");
- }
const int err = recognize_image(path_to_image, path_to_generated_image, targets,
- number_of_targets);
+ number_of_targets);
free(path_to_image);
free(path_to_generated_image);
@@ -631,13 +629,12 @@ int perform_load_image_object(char **path_to_object, mv_image_object_h *result)
}
while (input_string("Input file name with image object to be loaded:",
- 1024, path_to_object) == -1) {
+ 1024, path_to_object) == -1)
printf("Incorrect input! Try again.\n");
- }
- int err = mv_image_object_load(result, *path_to_object);
+ int err = mv_image_object_load(*path_to_object, result);
- if (MEDIA_VISION_ERROR_NONE != err && NULL != (*result)) {
+ if (MEDIA_VISION_ERROR_NONE != err || NULL == (*result)) {
printf("Error: object isn't loaded with error code %i\n", err);
return err;
}
@@ -656,9 +653,8 @@ int perform_save_image_object(mv_image_object_h object)
char *path_to_object = NULL;
while (input_string("Input file name to be generated for image object storing:",
- 1024, &path_to_object) == -1) {
+ 1024, &path_to_object) == -1)
printf("Incorrect input! Try again.\n");
- }
err = mv_image_object_save(path_to_object, object);
@@ -685,33 +681,28 @@ int perform_generate_image_object(mv_image_object_h *result, char **path_to_imag
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
while (input_string("Input file name with image to be analyzed:",
- 1024, path_to_image) == -1) {
+ 1024, path_to_image) == -1)
printf("Incorrect input! Try again.\n");
- }
mv_rectangle_s roi;
const bool sel_roi = show_confirm_dialog("Select if you want to set ROI");
if (sel_roi) {
printf("\nInput ROI coordinates\n");
while (input_int("Input x coordinate:", INT_MIN, INT_MAX,
- &(roi.point.x)) == -1) {
+ &(roi.point.x)) == -1)
printf("Incorrect input! Try again.\n");
- }
while (input_int("Input y coordinate:", INT_MIN, INT_MAX,
- &(roi.point.y)) == -1) {
+ &(roi.point.y)) == -1)
printf("Incorrect input! Try again.\n");
- }
while (input_int("Input ROI width:", INT_MIN, INT_MAX,
- &(roi.width)) == -1) {
+ &(roi.width)) == -1)
printf("Incorrect input! Try again.\n");
- }
while (input_int("Input ROI height:", INT_MIN, INT_MAX,
- &(roi.height)) == -1) {
+ &(roi.height)) == -1)
printf("Incorrect input! Try again.\n");
- }
}
@@ -825,7 +816,7 @@ int handle_tracking_result(
return err;
}
} else {
- usleep(1000000);
+ usleep(33000);
printf("Frame #%i: object isn't found.\n", frame_number);
}
@@ -1342,10 +1333,11 @@ void perform_recognition_cases(GArray *image_objects)
break;
}
- testing_object_h added_object;
+ testing_object_h added_object = NULL;
testing_object_create(&added_object);
testing_object_fill(added_object, temporary,
- OBJECT_TYPE_IMAGE_OBJECT, SOURCE_TYPE_GENERATION, path_to_image);
+ OBJECT_TYPE_IMAGE_OBJECT, SOURCE_TYPE_GENERATION,
+ path_to_image);
if (NULL != path_to_image)
free(path_to_image);
@@ -1555,216 +1547,220 @@ void perform_tracking_cases(GArray *image_objects, GArray *image_tracking_models
int sel_opt = show_menu("Select action:", options, names, number_of_options);
switch (sel_opt) {
- case 1: {
- /* Show created set of tracking models */
- show_testing_objects("Set of image tracking models", image_tracking_models);
- break;
- }
- case 2: {
- /* Create empty tracking model (mv_image_tracking_model_create) */
- mv_image_tracking_model_h temporary_image_tracking_model = NULL;
-
- int err = mv_image_tracking_model_create(&temporary_image_tracking_model);
+ case 1: {
+ /* Show created set of tracking models */
+ show_testing_objects("Set of image tracking models", image_tracking_models);
+ break;
+ }
+ case 2: {
+ /* Create empty tracking model (mv_image_tracking_model_create) */
+ mv_image_tracking_model_h temporary_image_tracking_model = NULL;
- if (MEDIA_VISION_ERROR_NONE != err) {
- printf("ERROR: tracking model creation is failed with code %i\n", err);
- break;
- }
+ int err = mv_image_tracking_model_create(&temporary_image_tracking_model);
- testing_object_h added_object = NULL;
- testing_object_create(&added_object);
- testing_object_fill(
- added_object,
- temporary_image_tracking_model,
- OBJECT_TYPE_IMAGE_TRACKING_MODEL,
- SOURCE_TYPE_EMPTY,
- NULL);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: tracking model creation is failed with code %i\n", err);
+ break;
+ }
- add_testing_object(image_tracking_models, added_object);
- printf("\nTracking model successfully created\n");
+ testing_object_h added_object = NULL;
+ testing_object_create(&added_object);
+ testing_object_fill(
+ added_object,
+ temporary_image_tracking_model,
+ OBJECT_TYPE_IMAGE_TRACKING_MODEL,
+ SOURCE_TYPE_EMPTY,
+ NULL);
+
+ add_testing_object(image_tracking_models, added_object);
+ printf("\nTracking model successfully created\n");
+ break;
+ }
+ case 3: {
+ /* Generate model based on image object (mv_image_tracking_model_set_target) */
+ if (image_objects->len <= 0) {
+ printf("\nFirstly you must create at least one image object.\n");
break;
}
- case 3: {
- /* Generate model based on image object (mv_image_tracking_model_set_target) */
- if (image_objects->len <= 0) {
- printf("\nFirstly you must create at least one image object.\n");
- break;
- }
- mv_image_tracking_model_h temporary_image_tracking_model = NULL;
- err = mv_image_tracking_model_create(&temporary_image_tracking_model);
- if (MEDIA_VISION_ERROR_NONE != err) {
- printf("Error: tracking model isn't created with error code %i\n", err);
- break;
- }
+ mv_image_tracking_model_h temporary_image_tracking_model = NULL;
+ err = mv_image_tracking_model_create(&temporary_image_tracking_model);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("Error: tracking model isn't created with error code %i\n", err);
+ break;
+ }
- testing_object_h temporary_testing_object = NULL;
- select_testing_object(
- image_objects,
- &temporary_testing_object,
- "Select the image object for tracking");
+ testing_object_h temporary_testing_object = NULL;
+ select_testing_object(
+ image_objects,
+ &temporary_testing_object,
+ "Select the image object for tracking");
- err = mv_image_tracking_model_set_target(
- (mv_image_object_h)(temporary_testing_object->entity),
+ err = mv_image_tracking_model_set_target(
+ (mv_image_object_h)(temporary_testing_object->entity),
+ temporary_image_tracking_model);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("Error: target isn't set with error code %i\n", err);
+ int err2 = mv_image_tracking_model_destroy(
temporary_image_tracking_model);
- if (MEDIA_VISION_ERROR_NONE != err) {
- printf("Error: target isn't set with error code %i\n", err);
- break;
- }
-
- testing_object_h added_object = NULL;
- testing_object_create(&added_object);
- testing_object_fill(
- added_object,
- temporary_image_tracking_model,
- OBJECT_TYPE_IMAGE_TRACKING_MODEL,
- SOURCE_TYPE_GENERATION,
- temporary_testing_object);
-
- add_testing_object(image_tracking_models, added_object);
- printf("\nTracking model successfully generated\n");
+ if (MEDIA_VISION_ERROR_NONE != err2)
+ printf("Error: tracking model destroying return"
+ "error code %i\n", err);
break;
}
- case 4: {
- /* Load existed tracking model from file (mv_image_tracking_model_load) */
- mv_image_tracking_model_h temporary_image_tracking_model = NULL;
- char *path_to_object = NULL;
- err = perform_load_image_tracking_model(
- &path_to_object, &temporary_image_tracking_model);
+ testing_object_h added_object = NULL;
+ testing_object_create(&added_object);
+ testing_object_fill(
+ added_object,
+ temporary_image_tracking_model,
+ OBJECT_TYPE_IMAGE_TRACKING_MODEL,
+ SOURCE_TYPE_GENERATION,
+ temporary_testing_object);
+
+ add_testing_object(image_tracking_models, added_object);
+ printf("\nTracking model successfully generated\n");
+ break;
+ }
+ case 4: {
+ /* Load existed tracking model from file (mv_image_tracking_model_load) */
+ mv_image_tracking_model_h temporary_image_tracking_model = NULL;
+ char *path_to_object = NULL;
- if (MEDIA_VISION_ERROR_NONE != err) {
- printf("Loading failed (error code - %i)\n", err);
- break;
- }
+ err = perform_load_image_tracking_model(
+ &path_to_object, &temporary_image_tracking_model);
- testing_object_h added_object = NULL;
- testing_object_create(&added_object);
- testing_object_fill(
- added_object,
- temporary_image_tracking_model,
- OBJECT_TYPE_IMAGE_TRACKING_MODEL,
- SOURCE_TYPE_LOADING,
- path_to_object);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("Loading failed (error code - %i)\n", err);
+ break;
+ }
- free(path_to_object);
+ testing_object_h added_object = NULL;
+ testing_object_create(&added_object);
+ testing_object_fill(
+ added_object,
+ temporary_image_tracking_model,
+ OBJECT_TYPE_IMAGE_TRACKING_MODEL,
+ SOURCE_TYPE_LOADING,
+ path_to_object);
+
+ free(path_to_object);
- add_testing_object(image_tracking_models, added_object);
+ add_testing_object(image_tracking_models, added_object);
+ break;
+ }
+ case 5: {
+ /* Clone existed tracking model (mv_image_tracking_model_clone) */
+ if (image_tracking_models->len <= 0) {
+ printf(
+ "\nFirstly you must create at least one image "
+ "tracking model.\n");
break;
}
- case 5: {
- /* Clone existed tracking model (mv_image_tracking_model_clone) */
- if (image_tracking_models->len <= 0) {
- printf(
- "\nFirstly you must create at least one image "
- "tracking model.\n");
- break;
- }
- testing_object_h temporary_testing_object = NULL;
- select_testing_object(
- image_tracking_models,
- &temporary_testing_object,
- "Select the tracking model you want to clone");
+ testing_object_h temporary_testing_object = NULL;
+ select_testing_object(
+ image_tracking_models,
+ &temporary_testing_object,
+ "Select the tracking model you want to clone");
- mv_image_tracking_model_h temporary_image_tracking_model = NULL;
- perform_clone_image_tracking_model(
- temporary_testing_object->entity,
- &temporary_image_tracking_model);
+ mv_image_tracking_model_h temporary_image_tracking_model = NULL;
+ perform_clone_image_tracking_model(
+ temporary_testing_object->entity,
+ &temporary_image_tracking_model);
- testing_object_h added_object = NULL;
- testing_object_create(&added_object);
- testing_object_fill(
- added_object,
- temporary_image_tracking_model,
- OBJECT_TYPE_IMAGE_TRACKING_MODEL,
- SOURCE_TYPE_CLONING,
- temporary_testing_object);
+ testing_object_h added_object = NULL;
+ testing_object_create(&added_object);
+ testing_object_fill(
+ added_object,
+ temporary_image_tracking_model,
+ OBJECT_TYPE_IMAGE_TRACKING_MODEL,
+ SOURCE_TYPE_CLONING,
+ temporary_testing_object);
- add_testing_object(image_tracking_models, added_object);
- break;
+ add_testing_object(image_tracking_models, added_object);
+ break;
+ }
+ case 6: {
+ /* Save existed tracking model to the file (mv_image_tracking_model_save) */
+ if (image_tracking_models->len <= 0) {
+ printf(
+ "\nFirstly you must create at least one image "
+ "tracking model.\n");
+ break;
}
- case 6: {
- /* Save existed tracking model to the file (mv_image_tracking_model_save) */
- if (image_tracking_models->len <= 0) {
- printf(
- "\nFirstly you must create at least one image "
- "tracking model.\n");
- break;
- }
- testing_object_h temporary_testing_object = NULL;
- select_testing_object(
- image_tracking_models,
- &temporary_testing_object,
- "Select the tracking model you want to save");
+ testing_object_h temporary_testing_object = NULL;
+ select_testing_object(
+ image_tracking_models,
+ &temporary_testing_object,
+ "Select the tracking model you want to save");
- perform_save_image_tracking_model(temporary_testing_object->entity);
+ perform_save_image_tracking_model(temporary_testing_object->entity);
+ break;
+ }
+ case 7: {
+ /* Remove tracking model from created set (mv_image_tracking_model_destroy) */
+ if (image_tracking_models->len <= 0) {
+ printf(
+ "\nFirstly you must create at least one image "
+ "tracking model.\n");
break;
}
- case 7: {
- /* Remove tracking model from created set (mv_image_tracking_model_destroy) */
- if (image_tracking_models->len <= 0) {
- printf(
- "\nFirstly you must create at least one image "
- "tracking model.\n");
- break;
- }
- guint selected_index;
- err = select_testing_object_index(
- image_tracking_models,
- &selected_index,
- "Select the object you want to remove");
+ guint selected_index;
+ err = select_testing_object_index(
+ image_tracking_models,
+ &selected_index,
+ "Select the object you want to remove");
- if (MEDIA_VISION_ERROR_NONE == err) {
- remove_testing_object(image_tracking_models, selected_index);
- printf("\nTracking model successfully removed\n");
- }
- break;
+ if (MEDIA_VISION_ERROR_NONE == err) {
+ remove_testing_object(image_tracking_models, selected_index);
+ printf("\nTracking model successfully removed\n");
+ }
+ break;
+ }
+ case 8: {
+ /* Refresh tracking model (mv_image_tracking_model_refresh) */
+ if (image_tracking_models->len <= 0) {
+ printf(
+ "\nFirstly you must create at least one image "
+ "tracking model.\n");
+ break;
}
- case 8: {
- /* Refresh tracking model (mv_image_tracking_model_refresh) */
- if (image_tracking_models->len <= 0) {
- printf(
- "\nFirstly you must create at least one image "
- "tracking model.\n");
- break;
- }
- testing_object_h temporary_testing_object = NULL;
- select_testing_object(
- image_tracking_models,
- &temporary_testing_object,
- "Select the tracking model you want to refresh");
+ testing_object_h temporary_testing_object = NULL;
+ select_testing_object(
+ image_tracking_models,
+ &temporary_testing_object,
+ "Select the tracking model you want to refresh");
- perform_refresh_image_tracking_model(temporary_testing_object->entity);
+ perform_refresh_image_tracking_model(temporary_testing_object->entity);
+ break;
+ }
+ case 9: {
+ /* Track (mv_image_track) */
+ if (image_tracking_models->len <= 0) {
+ printf(
+ "\nFirstly you must create at least one image "
+ "tracking model.\n");
break;
}
- case 9: {
- /* Track (mv_image_track) */
- if (image_tracking_models->len <= 0) {
- printf(
- "\nFirstly you must create at least one image "
- "tracking model.\n");
- break;
- }
-
- testing_object_h temporary_testing_object = NULL;
- err = select_testing_object(
- image_tracking_models,
- &temporary_testing_object,
- "Select the object which you want to track on video");
- if (MEDIA_VISION_ERROR_NONE == err)
- perform_track(temporary_testing_object->entity);
+ testing_object_h temporary_testing_object = NULL;
+ err = select_testing_object(
+ image_tracking_models,
+ &temporary_testing_object,
+ "Select the object which you want to track on video");
- break;
- }
- case 10: {
- /* Back to the main menu */
- return;
- }
+ if (MEDIA_VISION_ERROR_NONE == err)
+ perform_track(temporary_testing_object->entity);
+ break;
+ }
+ case 10: {
+ /* Back to the main menu */
+ return;
+ }
}
}
}
diff --git a/test/testsuites/surveillance/surveillance_test_suite.c b/test/testsuites/surveillance/surveillance_test_suite.c
index b89301a6..6bf6fa2e 100644
--- a/test/testsuites/surveillance/surveillance_test_suite.c
+++ b/test/testsuites/surveillance/surveillance_test_suite.c
@@ -651,12 +651,12 @@ void detect_person_appeared_cb(
image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID };
if (save_results_to_image &&
- (mv_source_get_buffer(source, &out_buffer, &buf_size) ||
- mv_source_get_width(source, &(image_data.image_width)) ||
- mv_source_get_height(source, &(image_data.image_height)) ||
- mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
- out_buffer == NULL ||
- buf_size == 0)) {
+ (mv_source_get_buffer(source, &out_buffer, &buf_size) ||
+ mv_source_get_width(source, &(image_data.image_width)) ||
+ mv_source_get_height(source, &(image_data.image_height)) ||
+ mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
+ out_buffer == NULL ||
+ buf_size == 0)) {
PRINT_R("ERROR: Creating out image is impossible.");
return;
@@ -972,12 +972,12 @@ void person_recognized_cb(
image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID };
if (save_results_to_image &&
- (mv_source_get_buffer(source, &out_buffer, &buf_size) ||
- mv_source_get_width(source, &(image_data.image_width)) ||
- mv_source_get_height(source, &(image_data.image_height)) ||
- mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
- out_buffer == NULL ||
- buf_size == 0)) {
+ (mv_source_get_buffer(source, &out_buffer, &buf_size) ||
+ mv_source_get_width(source, &(image_data.image_width)) ||
+ mv_source_get_height(source, &(image_data.image_height)) ||
+ mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
+ out_buffer == NULL ||
+ buf_size == 0)) {
PRINT_R("ERROR: Creating out image is impossible.");
return;
@@ -1080,12 +1080,12 @@ void movement_detected_cb(
image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID };
if (save_results_to_image &&
- (mv_source_get_buffer(source, &out_buffer, &buf_size) ||
- mv_source_get_width(source, &(image_data.image_width)) ||
- mv_source_get_height(source, &(image_data.image_height)) ||
- mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
- out_buffer == NULL ||
- buf_size == 0)) {
+ (mv_source_get_buffer(source, &out_buffer, &buf_size) ||
+ mv_source_get_width(source, &(image_data.image_width)) ||
+ mv_source_get_height(source, &(image_data.image_height)) ||
+ mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
+ out_buffer == NULL ||
+ buf_size == 0)) {
PRINT_R("ERROR: Creating out image is impossible.");
if (movement_regions != NULL)