summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTae-Young Chung <ty83.chung@samsung.com>2015-10-21 21:05:38 +0900
committerTae-Young Chung <ty83.chung@samsung.com>2015-10-21 21:05:43 +0900
commitb54972ca08e90f8c31dc86c6c59ae059d9010294 (patch)
tree621519421650f44d9bb2c9d371059203832be281
parentcd96fa4795772731a74030246c307a127dfe0eb6 (diff)
downloadmediavision-b54972ca08e90f8c31dc86c6c59ae059d9010294.tar.gz
mediavision-b54972ca08e90f8c31dc86c6c59ae059d9010294.tar.bz2
mediavision-b54972ca08e90f8c31dc86c6c59ae059d9010294.zip
Change-Id: Ifd9f6e2e312eea7735e17aaf3bd4c416c845fd91 Signed-off-by: Tae-Young Chung <ty83.chung@samsung.com>
-rw-r--r--mv_barcode/barcode_detector/include/Barcode.h90
-rw-r--r--mv_barcode/barcode_detector/include/BarcodeUtils.h11
-rw-r--r--mv_barcode/barcode_detector/include/mv_barcode_detect_open.h11
-rw-r--r--mv_barcode/barcode_detector/src/Barcode.cpp187
-rw-r--r--mv_barcode/barcode_detector/src/BarcodeUtils.cpp146
-rw-r--r--mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp235
-rw-r--r--mv_barcode/barcode_detector_lic/include/mv_barcode_detect_lic.h10
-rw-r--r--mv_barcode/barcode_detector_lic/src/mv_barcode_detect_lic.c12
-rw-r--r--mv_barcode/barcode_generator/include/BarcodeGenerator.h123
-rw-r--r--mv_barcode/barcode_generator/include/BarcodeOptions.h90
-rw-r--r--mv_barcode/barcode_generator/include/mv_barcode_generate_open.h32
-rw-r--r--mv_barcode/barcode_generator/src/BarcodeGenerator.cpp479
-rw-r--r--mv_barcode/barcode_generator/src/mv_barcode_generate_open.cpp605
-rw-r--r--mv_barcode/barcode_generator_lic/include/mv_barcode_generate_lic.h34
-rw-r--r--mv_barcode/barcode_generator_lic/src/mv_barcode_generate_lic.c39
-rw-r--r--mv_common/include/EngineConfig.h262
-rw-r--r--mv_common/include/MediaSource.h196
-rw-r--r--mv_common/include/mv_common_c.h96
-rw-r--r--mv_common/src/EngineConfig.cpp436
-rw-r--r--mv_common/src/MediaSource.cpp124
-rw-r--r--mv_common/src/mv_common_c.cpp973
-rw-r--r--mv_face/face/include/FaceDetector.h118
-rw-r--r--mv_face/face/include/FaceExpressionRecognizer.h52
-rw-r--r--mv_face/face/include/FaceEyeCondition.h52
-rw-r--r--mv_face/face/include/FaceRecognitionModel.h411
-rw-r--r--mv_face/face/include/FaceTrackingModel.h241
-rw-r--r--mv_face/face/include/FaceUtil.h31
-rw-r--r--mv_face/face/include/TrackerMedianFlow.h126
-rw-r--r--mv_face/face/include/mv_face_open.h114
-rw-r--r--mv_face/face/src/FaceDetector.cpp111
-rw-r--r--mv_face/face/src/FaceExpressionRecognizer.cpp127
-rw-r--r--mv_face/face/src/FaceEyeCondition.cpp364
-rw-r--r--mv_face/face/src/FaceRecognitionModel.cpp823
-rw-r--r--mv_face/face/src/FaceTrackingModel.cpp244
-rw-r--r--mv_face/face/src/FaceUtil.cpp183
-rw-r--r--mv_face/face/src/TrackerMedianFlow.cpp652
-rw-r--r--mv_face/face/src/mv_face_open.cpp1705
-rw-r--r--mv_face/face_lic/include/mv_face_lic.h114
-rw-r--r--mv_face/face_lic/src/mv_face_lic.c154
-rw-r--r--mv_image/image/include/ImageConfig.h133
-rw-r--r--mv_image/image/include/ImageContourStabilizator.h87
-rw-r--r--mv_image/image/include/ImageMathUtil.h25
-rw-r--r--mv_image/image/include/ImageObject.h315
-rw-r--r--mv_image/image/include/ImageRecognizer.h121
-rw-r--r--mv_image/image/include/ImageTracker.h86
-rw-r--r--mv_image/image/include/ImageTrackingModel.h300
-rw-r--r--mv_image/image/include/mv_image_open.h74
-rw-r--r--mv_image/image/src/ImageConfig.cpp95
-rw-r--r--mv_image/image/src/ImageContourStabilizator.cpp487
-rw-r--r--mv_image/image/src/ImageMathUtil.cpp45
-rw-r--r--mv_image/image/src/ImageObject.cpp635
-rw-r--r--mv_image/image/src/ImageRecognizer.cpp476
-rw-r--r--mv_image/image/src/ImageTracker.cpp572
-rw-r--r--mv_image/image/src/ImageTrackingModel.cpp407
-rw-r--r--mv_image/image/src/mv_image_open.cpp1186
-rw-r--r--packaging/capi-media-vision.spec2
-rw-r--r--test/testsuites/barcode/barcode_test_suite.c2083
-rw-r--r--test/testsuites/face/face_test_suite.c3620
-rw-r--r--test/testsuites/image/image_test_suite.c3414
59 files changed, 11324 insertions, 12652 deletions
diff --git a/mv_barcode/barcode_detector/include/Barcode.h b/mv_barcode/barcode_detector/include/Barcode.h
index 6003fb1b..b47cacd2 100644
--- a/mv_barcode/barcode_detector/include/Barcode.h
+++ b/mv_barcode/barcode_detector/include/Barcode.h
@@ -22,64 +22,60 @@
#include <stddef.h>
#include <zbar.h>
-namespace MediaVision
-{
-namespace Barcode
-{
-
+namespace MediaVision {
+namespace Barcode {
/**
* @class Barcode
* @brief Handle to barcode object.
*/
-class Barcode
-{
+class Barcode {
public:
- /**
- * @brief Barcode constructor.
- *
- * @since_tizen 2.4
- * @remarks create copy of bar_obj
- * @param [in] barObj zbar barcode handle
- *
- */
- Barcode(const zbar::Symbol& barObj);
+ /**
+ * @brief Barcode constructor.
+ *
+ * @since_tizen 2.4
+ * @remarks create copy of bar_obj
+ * @param [in] barObj zbar barcode handle
+ *
+ */
+ Barcode(const zbar::Symbol& barObj);
- /**
- * @brief Barcode destructor.
- *
- * @since_tizen 2.4
- */
- ~Barcode();
+ /**
+ * @brief Barcode destructor.
+ *
+ * @since_tizen 2.4
+ */
+ ~Barcode();
- /**
- * @brief Gets encoded message from barcode object.
- *
- * @since_tizen 2.4
- * @return Encoded message
- */
- std::string getMessage(void) const;
+ /**
+ * @brief Gets encoded message from barcode object.
+ *
+ * @since_tizen 2.4
+ * @return Encoded message
+ */
+ std::string getMessage(void) const;
- /**
- * @brief Gets the type of the barcode.
- *
- * @since_tizen 2.4
- * @return Enumeration value corresponding to the barcode type
- */
- mv_barcode_type_e getType(void) const;
+ /**
+ * @brief Gets the type of the barcode.
+ *
+ * @since_tizen 2.4
+ * @return Enumeration value corresponding to the barcode type
+ */
+ mv_barcode_type_e getType(void) const;
- /**
- * @brief Calculates location of barcode handle from zbar.
- * location polygon.
- *
- * @since_tizen 2.4
- * @param [out] location Quadrangle that contains barcode on image
- * @return @c MEDIA_VISION_ERROR_NONE on success,
- * otherwise a negative error value
- */
- int calculateLocation(mv_quadrangle_s& location) const;
+ /**
+ * @brief Calculates location of barcode handle from zbar.
+ * location polygon.
+ *
+ * @since_tizen 2.4
+ * @param [out] location Quadrangle that contains barcode on image
+ * @return @c MEDIA_VISION_ERROR_NONE on success,
+ * otherwise a negative error value
+ */
+ int calculateLocation(mv_quadrangle_s& location) const;
private:
- const zbar::Symbol *m_pBarcodeObj; ///< Pointer to zbar barcode handle
+ const zbar::Symbol *m_pBarcodeObj; ///< Pointer to zbar barcode handle
};
} /* Barcode */
diff --git a/mv_barcode/barcode_detector/include/BarcodeUtils.h b/mv_barcode/barcode_detector/include/BarcodeUtils.h
index 4dea3653..d386cc08 100644
--- a/mv_barcode/barcode_detector/include/BarcodeUtils.h
+++ b/mv_barcode/barcode_detector/include/BarcodeUtils.h
@@ -19,15 +19,12 @@
#include "mv_common.h"
-namespace zbar
-{
- class Image;
+namespace zbar {
+class Image;
}
-namespace MediaVision
-{
-namespace Barcode
-{
+namespace MediaVision {
+namespace Barcode {
/**
* @brief This function converts media vision image handle to zbar image handle.
diff --git a/mv_barcode/barcode_detector/include/mv_barcode_detect_open.h b/mv_barcode/barcode_detector/include/mv_barcode_detect_open.h
index e13d8e7f..c6a34956 100644
--- a/mv_barcode/barcode_detector/include/mv_barcode_detect_open.h
+++ b/mv_barcode/barcode_detector/include/mv_barcode_detect_open.h
@@ -53,12 +53,11 @@ extern "C" {
* @see mv_barcode_detected_cb()
*/
int mv_barcode_detect_open(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s roi,
- mv_barcode_detected_cb detect_cb,
- void *user_data);
-
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s roi,
+ mv_barcode_detected_cb detect_cb,
+ void *user_data);
#ifdef __cplusplus
}
diff --git a/mv_barcode/barcode_detector/src/Barcode.cpp b/mv_barcode/barcode_detector/src/Barcode.cpp
index a35f8bd1..33616102 100644
--- a/mv_barcode/barcode_detector/src/Barcode.cpp
+++ b/mv_barcode/barcode_detector/src/Barcode.cpp
@@ -18,130 +18,109 @@
#include <mv_private.h>
-namespace MediaVision
-{
-namespace Barcode
-{
+namespace MediaVision {
+namespace Barcode {
Barcode::Barcode(const zbar::Symbol& barObj):
- m_pBarcodeObj(new zbar::Symbol(barObj))
+ m_pBarcodeObj(new zbar::Symbol(barObj))
{
; /* NULL */
}
Barcode::~Barcode()
{
- LOGI("Delete ZBar object");
- delete m_pBarcodeObj;
+ LOGI("Delete ZBar object");
+ delete m_pBarcodeObj;
}
std::string Barcode::getMessage(void) const
{
- LOGI("Retrieve message data from ZBar object");
- return m_pBarcodeObj->get_data();
+ LOGI("Retrieve message data from ZBar object");
+ return m_pBarcodeObj->get_data();
}
mv_barcode_type_e Barcode::getType(void) const
{
- zbar::zbar_symbol_type_t barcodeType = m_pBarcodeObj->get_type();
-
- switch (barcodeType)
- {
- case zbar::ZBAR_QRCODE:
- return MV_BARCODE_QR;
-
- case zbar::ZBAR_UPCA:
- return MV_BARCODE_UPC_A;
-
- case zbar::ZBAR_UPCE:
- return MV_BARCODE_UPC_E;
-
- case zbar::ZBAR_EAN8:
- return MV_BARCODE_EAN_8;
-
- case zbar::ZBAR_EAN13:
- return MV_BARCODE_EAN_13;
-
- case zbar::ZBAR_CODE128:
- return MV_BARCODE_CODE128;
-
- case zbar::ZBAR_CODE39:
- return MV_BARCODE_CODE39;
-
- case zbar::ZBAR_I25:
- return MV_BARCODE_I2_5;
-
- default:
- LOGE("ZBar symbol colorspace is not supported by media vision");
- return MV_BARCODE_UNDEFINED;
- }
+ zbar::zbar_symbol_type_t barcodeType = m_pBarcodeObj->get_type();
+
+ switch (barcodeType) {
+ case zbar::ZBAR_QRCODE:
+ return MV_BARCODE_QR;
+ case zbar::ZBAR_UPCA:
+ return MV_BARCODE_UPC_A;
+ case zbar::ZBAR_UPCE:
+ return MV_BARCODE_UPC_E;
+ case zbar::ZBAR_EAN8:
+ return MV_BARCODE_EAN_8;
+ case zbar::ZBAR_EAN13:
+ return MV_BARCODE_EAN_13;
+ case zbar::ZBAR_CODE128:
+ return MV_BARCODE_CODE128;
+ case zbar::ZBAR_CODE39:
+ return MV_BARCODE_CODE39;
+ case zbar::ZBAR_I25:
+ return MV_BARCODE_I2_5;
+ default:
+ LOGE("ZBar symbol colorspace is not supported by media vision");
+ return MV_BARCODE_UNDEFINED;
+ }
}
int Barcode::calculateLocation(mv_quadrangle_s& location) const
{
- const int numberOfVertexes = 4;
-
- const int locationPolygonSize = m_pBarcodeObj->get_location_size();
-
- //polygon location should contain at least 4 points
- if (locationPolygonSize < numberOfVertexes)
- {
- LOGW("Can't compute location of the barcode by %i points (less then %i).", locationPolygonSize, numberOfVertexes);
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
-
- if (locationPolygonSize == numberOfVertexes)
- {
- for (int i = 0; i < numberOfVertexes; ++i)
- {
- location.points[i].x = m_pBarcodeObj->get_location_x(i);
- location.points[i].y = m_pBarcodeObj->get_location_y(i);
- }
-
- return MEDIA_VISION_ERROR_NONE;
- }
-
- //bounding quadrangle is computing by 4 marginal points
- mv_point_s first = {m_pBarcodeObj->get_location_x(0), m_pBarcodeObj->get_location_y(0)};
-
- int minX = first.x;
- int maxX = first.x;
- int minY = first.y;
- int maxY = first.y;
-
- for (int i = 0; i < locationPolygonSize; ++i)
- {
- mv_point_s current = {m_pBarcodeObj->get_location_x(i), m_pBarcodeObj->get_location_y(i)};
- if (current.x < minX)
- {
- minX = current.x;
- }
- else if (current.x > maxX)
- {
- maxX = current.x;
- }
-
- if (current.y < minY)
- {
- minY = current.y;
- }
- else if (current.y > maxY)
- {
- maxY = current.y;
- }
- }
-
- mv_point_s bottomLeft = {minX, maxY};
- mv_point_s bottomRight = {maxX, maxY};
- mv_point_s topRight = {maxX, minY};
- mv_point_s topLeft = {minX, minY};
-
- location.points[0] = topLeft;
- location.points[1] = topRight;
- location.points[2] = bottomRight;
- location.points[3] = bottomLeft;
-
- return MEDIA_VISION_ERROR_NONE;
+ const int numberOfVertexes = 4;
+
+ const int locationPolygonSize = m_pBarcodeObj->get_location_size();
+
+ /*polygon location should contain at least 4 points */
+ if (locationPolygonSize < numberOfVertexes) {
+ LOGW("Can't compute location of the barcode by %i points (less then %i).", locationPolygonSize, numberOfVertexes);
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ if (locationPolygonSize == numberOfVertexes) {
+ for (int i = 0; i < numberOfVertexes; ++i) {
+ location.points[i].x = m_pBarcodeObj->get_location_x(i);
+ location.points[i].y = m_pBarcodeObj->get_location_y(i);
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
+ }
+
+ /* bounding quadrangle is computing by 4 marginal points */
+ mv_point_s first = {m_pBarcodeObj->get_location_x(0), m_pBarcodeObj->get_location_y(0)};
+
+ int minX = first.x;
+ int maxX = first.x;
+ int minY = first.y;
+ int maxY = first.y;
+
+ for (int i = 0; i < locationPolygonSize; ++i) {
+ mv_point_s current = {m_pBarcodeObj->get_location_x(i), m_pBarcodeObj->get_location_y(i)};
+ if (current.x < minX) {
+ minX = current.x;
+ } else if (current.x > maxX) {
+ maxX = current.x;
+ }
+
+ if (current.y < minY) {
+ minY = current.y;
+ } else if (current.y > maxY) {
+ maxY = current.y;
+ }
+ }
+
+ mv_point_s bottomLeft = {minX, maxY};
+ mv_point_s bottomRight = {maxX, maxY};
+ mv_point_s topRight = {maxX, minY};
+ mv_point_s topLeft = {minX, minY};
+
+ location.points[0] = topLeft;
+ location.points[1] = topRight;
+ location.points[2] = bottomRight;
+ location.points[3] = bottomLeft;
+
+ return MEDIA_VISION_ERROR_NONE;
}
} /* Barcode */
diff --git a/mv_barcode/barcode_detector/src/BarcodeUtils.cpp b/mv_barcode/barcode_detector/src/BarcodeUtils.cpp
index 57219eb6..a9da9230 100644
--- a/mv_barcode/barcode_detector/src/BarcodeUtils.cpp
+++ b/mv_barcode/barcode_detector/src/BarcodeUtils.cpp
@@ -21,92 +21,84 @@
#include <zbar.h>
-namespace MediaVision
-{
-namespace Barcode
-{
-
+namespace MediaVision {
+namespace Barcode {
int convertSourceMV2Zbar(mv_source_h mvSource, zbar::Image& zbarSource)
{
- int err = MEDIA_VISION_ERROR_NONE;
- unsigned char *buffer = NULL;
- unsigned int height = 0;
- unsigned int width = 0;
- unsigned int size = 0;
- mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID;
+ int err = MEDIA_VISION_ERROR_NONE;
+ unsigned char *buffer = NULL;
+ unsigned int height = 0;
+ unsigned int width = 0;
+ unsigned int size = 0;
+ mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID;
- err = mv_source_get_colorspace_c(mvSource, &colorspace);
- if (err != MEDIA_VISION_ERROR_NONE)
- {
- LOGW("Can't determine mv_source_h colorspace to convert to ZBar colorspace. Conversion failed");
- return err;
- }
+ err = mv_source_get_colorspace_c(mvSource, &colorspace);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ LOGW("Can't determine mv_source_h colorspace to convert to ZBar colorspace. Conversion failed");
+ return err;
+ }
- switch(colorspace)
- {
- case MEDIA_VISION_COLORSPACE_Y800:
- zbarSource.set_format("Y800");
- break;
- case MEDIA_VISION_COLORSPACE_I420:
- zbarSource.set_format("I420");
- break;
- case MEDIA_VISION_COLORSPACE_NV12:
- zbarSource.set_format("NV12");
- break;
- case MEDIA_VISION_COLORSPACE_YV12:
- zbarSource.set_format("YV12");
- break;
- case MEDIA_VISION_COLORSPACE_NV21:
- zbarSource.set_format("NV21");
- break;
- case MEDIA_VISION_COLORSPACE_YUYV:
- zbarSource.set_format("YUYV");
- break;
- case MEDIA_VISION_COLORSPACE_UYVY:
- zbarSource.set_format("UYVY");
- break;
- case MEDIA_VISION_COLORSPACE_422P:
- zbarSource.set_format("422P");
- break;
- case MEDIA_VISION_COLORSPACE_RGB565:
- zbarSource.set_format("RGBP");
- break;
- case MEDIA_VISION_COLORSPACE_RGB888:
- zbarSource.set_format("RGB3");
- break;
- case MEDIA_VISION_COLORSPACE_RGBA:
- zbarSource.set_format("RGB4");
- break;
- default:
- LOGE("Media vision colorspace is not supported by ZBar symbol");
- return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
- }
+ switch(colorspace) {
+ case MEDIA_VISION_COLORSPACE_Y800:
+ zbarSource.set_format("Y800");
+ break;
+ case MEDIA_VISION_COLORSPACE_I420:
+ zbarSource.set_format("I420");
+ break;
+ case MEDIA_VISION_COLORSPACE_NV12:
+ zbarSource.set_format("NV12");
+ break;
+ case MEDIA_VISION_COLORSPACE_YV12:
+ zbarSource.set_format("YV12");
+ break;
+ case MEDIA_VISION_COLORSPACE_NV21:
+ zbarSource.set_format("NV21");
+ break;
+ case MEDIA_VISION_COLORSPACE_YUYV:
+ zbarSource.set_format("YUYV");
+ break;
+ case MEDIA_VISION_COLORSPACE_UYVY:
+ zbarSource.set_format("UYVY");
+ break;
+ case MEDIA_VISION_COLORSPACE_422P:
+ zbarSource.set_format("422P");
+ break;
+ case MEDIA_VISION_COLORSPACE_RGB565:
+ zbarSource.set_format("RGBP");
+ break;
+ case MEDIA_VISION_COLORSPACE_RGB888:
+ zbarSource.set_format("RGB3");
+ break;
+ case MEDIA_VISION_COLORSPACE_RGBA:
+ zbarSource.set_format("RGB4");
+ break;
+ default:
+ LOGE("Media vision colorspace is not supported by ZBar symbol");
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
+ }
- err = mv_source_get_buffer_c(mvSource, &buffer, &size);
- if (err != MEDIA_VISION_ERROR_NONE)
- {
- LOGW("Can't get mv_source_h buffer to convert to ZBar image. Conversion failed");
- return err;
- }
+ err = mv_source_get_buffer_c(mvSource, &buffer, &size);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ LOGW("Can't get mv_source_h buffer to convert to ZBar image. Conversion failed");
+ return err;
+ }
- err = mv_source_get_height_c(mvSource, &height);
- if (err != MEDIA_VISION_ERROR_NONE)
- {
- LOGW("Can't get mv_source_h height for conversion. Conversion failed");
- return err;
- }
+ err = mv_source_get_height_c(mvSource, &height);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ LOGW("Can't get mv_source_h height for conversion. Conversion failed");
+ return err;
+ }
- err = mv_source_get_width_c(mvSource, &width);
- if (err != MEDIA_VISION_ERROR_NONE)
- {
- LOGW("Can't get mv_source_h width for conversion. Conversion failed");
- return err;
- }
+ err = mv_source_get_width_c(mvSource, &width);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ LOGW("Can't get mv_source_h width for conversion. Conversion failed");
+ return err;
+ }
- zbarSource.set_size(width, height);
- zbarSource.set_data(buffer, size);
+ zbarSource.set_size(width, height);
+ zbarSource.set_data(buffer, size);
- return err;
+ return err;
}
} /* Barcode */
diff --git a/mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp b/mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp
index dd5e557d..b2357f7a 100644
--- a/mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp
+++ b/mv_barcode/barcode_detector/src/mv_barcode_detect_open.cpp
@@ -26,128 +26,117 @@
using namespace MediaVision::Barcode;
int mv_barcode_detect_open(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s roi,
- mv_barcode_detected_cb detect_cb,
- void *user_data)
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s roi,
+ mv_barcode_detected_cb detect_cb,
+ void *user_data)
{
- if (!source || !detect_cb)
- {
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- zbar::Image image;
- int err = convertSourceMV2Zbar(source, image);
- if (err != MEDIA_VISION_ERROR_NONE)
- {
- LOGW("convertSourceMV2Zbar failed");
- return err;
- }
-
- zbar::Image greyImage = image.convert("Y800");
- greyImage.set_crop(roi.point.x, roi.point.y, roi.width, roi.height);
- zbar::ImageScanner scanner;
-
- int target_val;
- err = mv_engine_config_get_int_attribute(engine_cfg, "MV_BARCODE_DETECT_ATTR_TARGET", &target_val);
- if (err != MEDIA_VISION_ERROR_NONE)
- {
- LOGW("mv_engine_config_get_int_attribute failed");
- return err;
- }
-
- /**
- * 0 - linear barcodes and QR codes
- * 1 - only linear barcodes
- * 2 - only QR codes
- */
- switch (target_val)
- {
- case 0:
- scanner.set_config(zbar::ZBAR_NONE, zbar::ZBAR_CFG_ENABLE, 1);
- break;
- case 1:
- scanner.set_config(zbar::ZBAR_NONE, zbar::ZBAR_CFG_ENABLE, 0);
- scanner.set_config(zbar::ZBAR_UPCA, zbar::ZBAR_CFG_ENABLE, 1);
- scanner.set_config(zbar::ZBAR_UPCE, zbar::ZBAR_CFG_ENABLE, 1);
- scanner.set_config(zbar::ZBAR_EAN8, zbar::ZBAR_CFG_ENABLE, 1);
- scanner.set_config(zbar::ZBAR_EAN13, zbar::ZBAR_CFG_ENABLE, 1);
- scanner.set_config(zbar::ZBAR_CODE128, zbar::ZBAR_CFG_ENABLE, 1);
- scanner.set_config(zbar::ZBAR_CODE39, zbar::ZBAR_CFG_ENABLE, 1);
- scanner.set_config(zbar::ZBAR_I25, zbar::ZBAR_CFG_ENABLE, 1);
- break;
- case 2:
- scanner.set_config(zbar::ZBAR_NONE, zbar::ZBAR_CFG_ENABLE, 0);
- scanner.set_config(zbar::ZBAR_QRCODE, zbar::ZBAR_CFG_ENABLE, 1);
- break;
- default:
- LOGW("Unavailabe target value %d", target_val);
- }
-
- int numberOfBarcodes = scanner.scan(greyImage);
- LOGI("ZBar scanner has found %i barcodes on the mv_source_h", numberOfBarcodes);
- mv_quadrangle_s *barcodeLocations = NULL;
- mv_barcode_type_e *types = NULL;
-
- if (numberOfBarcodes == 0)
- {
- LOGI("Call the detect callback for 0 detected barcodes");
- detect_cb(source, engine_cfg, barcodeLocations, NULL, types, numberOfBarcodes, user_data);
- return MEDIA_VISION_ERROR_NONE;
- }
- else if (numberOfBarcodes < 0)
- {
- LOGW("Incorrect number of barcodes (%i), detection is terminated", numberOfBarcodes);
- return MEDIA_VISION_ERROR_INTERNAL;
- }
-
- const char **messagesArray = new const char*[numberOfBarcodes];
- barcodeLocations = new mv_quadrangle_s[numberOfBarcodes];
- types = new mv_barcode_type_e[numberOfBarcodes];
-
- int i = 0;
- //extract results and prepare them for callback passing
- for (zbar::SymbolIterator symbol = greyImage.symbol_begin();
- symbol != greyImage.symbol_end();
- ++symbol, ++i)
- {
- Barcode curBarcode(*symbol);
-
- size_t messageLength = curBarcode.getMessage().size();
- char *curMessage = new char[messageLength + 1];
- curBarcode.getMessage().copy(curMessage, messageLength);
- curMessage[messageLength] = '\0';
- messagesArray[i] = curMessage;
-
- types[i] = curBarcode.getType();
-
- int err = curBarcode.calculateLocation(barcodeLocations[i]);
- if (err != MEDIA_VISION_ERROR_NONE)
- {
- LOGW("Can't determine location for barcode, detection is terminated");
- for (int j = 0; j <= i; ++j)
- {
- delete[] messagesArray[j];
- }
- delete[] messagesArray;
- delete[] barcodeLocations;
- delete[] types;
- return err;
- }
- }
-
- LOGI("Call the detect callback for %i detected barcodes", numberOfBarcodes);
- detect_cb(source, engine_cfg, barcodeLocations, messagesArray, types, numberOfBarcodes, user_data);
-
- LOGI("Clean the memory from barcodes messages, locations and types");
- for (int j = 0; j < numberOfBarcodes; ++j)
- {
- delete[] messagesArray[j];
- }
- delete[] messagesArray;
- delete[] barcodeLocations;
- delete[] types;
-
- return MEDIA_VISION_ERROR_NONE;
+ if (!source || !detect_cb) {
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ zbar::Image image;
+ int err = convertSourceMV2Zbar(source, image);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ LOGW("convertSourceMV2Zbar failed");
+ return err;
+ }
+
+ zbar::Image greyImage = image.convert("Y800");
+ greyImage.set_crop(roi.point.x, roi.point.y, roi.width, roi.height);
+ zbar::ImageScanner scanner;
+
+ int target_val;
+ err = mv_engine_config_get_int_attribute(engine_cfg, "MV_BARCODE_DETECT_ATTR_TARGET", &target_val);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ LOGW("mv_engine_config_get_int_attribute failed");
+ return err;
+ }
+
+ /**
+ * 0 - linear barcodes and QR codes
+ * 1 - only linear barcodes
+ * 2 - only QR codes
+ */
+ switch (target_val) {
+ case 0:
+ scanner.set_config(zbar::ZBAR_NONE, zbar::ZBAR_CFG_ENABLE, 1);
+ break;
+ case 1:
+ scanner.set_config(zbar::ZBAR_NONE, zbar::ZBAR_CFG_ENABLE, 0);
+ scanner.set_config(zbar::ZBAR_UPCA, zbar::ZBAR_CFG_ENABLE, 1);
+ scanner.set_config(zbar::ZBAR_UPCE, zbar::ZBAR_CFG_ENABLE, 1);
+ scanner.set_config(zbar::ZBAR_EAN8, zbar::ZBAR_CFG_ENABLE, 1);
+ scanner.set_config(zbar::ZBAR_EAN13, zbar::ZBAR_CFG_ENABLE, 1);
+ scanner.set_config(zbar::ZBAR_CODE128, zbar::ZBAR_CFG_ENABLE, 1);
+ scanner.set_config(zbar::ZBAR_CODE39, zbar::ZBAR_CFG_ENABLE, 1);
+ scanner.set_config(zbar::ZBAR_I25, zbar::ZBAR_CFG_ENABLE, 1);
+ break;
+ case 2:
+ scanner.set_config(zbar::ZBAR_NONE, zbar::ZBAR_CFG_ENABLE, 0);
+ scanner.set_config(zbar::ZBAR_QRCODE, zbar::ZBAR_CFG_ENABLE, 1);
+ break;
+ default:
+ LOGW("Unavailabe target value %d", target_val);
+ }
+
+ int numberOfBarcodes = scanner.scan(greyImage);
+ LOGI("ZBar scanner has found %i barcodes on the mv_source_h", numberOfBarcodes);
+ mv_quadrangle_s *barcodeLocations = NULL;
+ mv_barcode_type_e *types = NULL;
+
+ if (numberOfBarcodes == 0) {
+ LOGI("Call the detect callback for 0 detected barcodes");
+ detect_cb(source, engine_cfg, barcodeLocations, NULL, types, numberOfBarcodes, user_data);
+ return MEDIA_VISION_ERROR_NONE;
+ } else if (numberOfBarcodes < 0) {
+ LOGW("Incorrect number of barcodes (%i), detection is terminated", numberOfBarcodes);
+ return MEDIA_VISION_ERROR_INTERNAL;
+ }
+
+ const char **messagesArray = new const char*[numberOfBarcodes];
+ barcodeLocations = new mv_quadrangle_s[numberOfBarcodes];
+ types = new mv_barcode_type_e[numberOfBarcodes];
+
+ int i = 0;
+ /* extract results and prepare them for callback passing */
+ for (zbar::SymbolIterator symbol = greyImage.symbol_begin();
+ symbol != greyImage.symbol_end();
+ ++symbol, ++i) {
+ Barcode curBarcode(*symbol);
+
+ size_t messageLength = curBarcode.getMessage().size();
+ char *curMessage = new char[messageLength + 1];
+ curBarcode.getMessage().copy(curMessage, messageLength);
+ curMessage[messageLength] = '\0';
+ messagesArray[i] = curMessage;
+
+ types[i] = curBarcode.getType();
+
+ int err = curBarcode.calculateLocation(barcodeLocations[i]);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ LOGW("Can't determine location for barcode, detection is terminated");
+ for (int j = 0; j <= i; ++j) {
+ delete[] messagesArray[j];
+ }
+ delete[] messagesArray;
+ delete[] barcodeLocations;
+ delete[] types;
+ return err;
+ }
+ }
+
+ LOGI("Call the detect callback for %i detected barcodes", numberOfBarcodes);
+ detect_cb(source, engine_cfg, barcodeLocations, messagesArray, types, numberOfBarcodes, user_data);
+
+ LOGI("Clean the memory from barcodes messages, locations and types");
+ for (int j = 0; j < numberOfBarcodes; ++j) {
+ delete[] messagesArray[j];
+ }
+ delete[] messagesArray;
+ delete[] barcodeLocations;
+ delete[] types;
+
+ return MEDIA_VISION_ERROR_NONE;
}
diff --git a/mv_barcode/barcode_detector_lic/include/mv_barcode_detect_lic.h b/mv_barcode/barcode_detector_lic/include/mv_barcode_detect_lic.h
index b1197239..40d71636 100644
--- a/mv_barcode/barcode_detector_lic/include/mv_barcode_detect_lic.h
+++ b/mv_barcode/barcode_detector_lic/include/mv_barcode_detect_lic.h
@@ -50,11 +50,11 @@ extern "C" {
* @see mv_barcode_detected_cb()
*/
int mv_barcode_detect_lic(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s roi,
- mv_barcode_detected_cb detect_cb,
- void *user_data);
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s roi,
+ mv_barcode_detected_cb detect_cb,
+ void *user_data);
#ifdef __cplusplus
}
diff --git a/mv_barcode/barcode_detector_lic/src/mv_barcode_detect_lic.c b/mv_barcode/barcode_detector_lic/src/mv_barcode_detect_lic.c
index 5dc9fc02..e54a69ee 100644
--- a/mv_barcode/barcode_detector_lic/src/mv_barcode_detect_lic.c
+++ b/mv_barcode/barcode_detector_lic/src/mv_barcode_detect_lic.c
@@ -17,11 +17,11 @@
#include "mv_barcode_detect_lic.h"
int mv_barcode_detect_lic(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s roi,
- mv_barcode_detected_cb detect_cb,
- void *user_data)
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s roi,
+ mv_barcode_detected_cb detect_cb,
+ void *user_data)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
diff --git a/mv_barcode/barcode_generator/include/BarcodeGenerator.h b/mv_barcode/barcode_generator/include/BarcodeGenerator.h
index 40659aea..9fb0104b 100644
--- a/mv_barcode/barcode_generator/include/BarcodeGenerator.h
+++ b/mv_barcode/barcode_generator/include/BarcodeGenerator.h
@@ -26,78 +26,73 @@
* @brief This file contains the BarcodeGenerator class.
*/
-namespace MediaVision
-{
-namespace Barcode
-{
-
+namespace MediaVision {
+namespace Barcode {
/**
* @brief This class implements barcode generation.
* @details 1D Barcodes and 2D QR codes are supported.
*
* @since_tizen 2.4
*/
-class BarcodeGenerator
-{
+class BarcodeGenerator {
public:
+ /**
+ * @brief This method generates Barcodes image according to options.
+ *
+ * @since_tizen 2.4
+ * @param [in] imageFileName Image file name which will be generated
+ * @param [in] imageFormat Image file format which will be generated
+ * @param [in] imageWidth Image file width which will be generated
+ * @param [in] imageHeight Image file height which will be generated
+ * @param [in] message Input message to be encoded
+ * @param [in] type Barcode type (1D barcode or 2D QR code)
+ * @param [in] encodingMode Encoding mode (for QR codes only)
+ * @param [in] correctionLevel Error correction level (for QR codes only)
+ * @param [in] qrVersion QR code version (1 ~ 40, 0 for 1D barcodes)
+ * @param [in] showText Show text or not
+ * @return BARCODE_ERROR_NONE from BarcodeError which is 0 if success,
+ * BarcodeError value otherwise
+ */
+ static int generateBarcodeToImage(
+ const std::string& imageFileName,
+ BarcodeImageFormat imageFormat,
+ const int imageWidth,
+ const int imageHeight,
+ const std::string& message,
+ BarcodeType type,
+ BarcodeQREncodingMode encodingMode = BARCODE_QR_MODE_UNAVAILABLE,
+ BarcodeQRErrorCorrectionLevel correctionLevel = BARCODE_QR_ECC_UNAVAILABLE,
+ int qrVersion = 0,
+ int showText = 0);
- /**
- * @brief This method generates Barcodes image according to options.
- *
- * @since_tizen 2.4
- * @param [in] imageFileName Image file name which will be generated
- * @param [in] imageFormat Image file format which will be generated
- * @param [in] imageWidth Image file width which will be generated
- * @param [in] imageHeight Image file height which will be generated
- * @param [in] message Input message to be encoded
- * @param [in] type Barcode type (1D barcode or 2D QR code)
- * @param [in] encodingMode Encoding mode (for QR codes only)
- * @param [in] correctionLevel Error correction level (for QR codes only)
- * @param [in] qrVersion QR code version (1 ~ 40, 0 for 1D barcodes)
- * @param [in] showText Show text or not
- * @return BARCODE_ERROR_NONE from BarcodeError which is 0 if success,
- * BarcodeError value otherwise
- */
- static int generateBarcodeToImage(
- const std::string& imageFileName,
- BarcodeImageFormat imageFormat,
- const int imageWidth,
- const int imageHeight,
- const std::string& message,
- BarcodeType type,
- BarcodeQREncodingMode encodingMode = BARCODE_QR_MODE_UNAVAILABLE,
- BarcodeQRErrorCorrectionLevel correctionLevel = BARCODE_QR_ECC_UNAVAILABLE,
- int qrVersion = 0,
- int showText = 0);
-
- /**
- * @brief This method generates Barcodes image buffer according to options.
- *
- * @since_tizen 2.4
- * @param [out] imageBuffer Image buffer with image to be generated
- * @param [out] imageWidth Image buffer width which will be generated
- * @param [out] imageHeight Image buffer height which will be generated
- * @param [out] imageChannels Image buffer channels number which will be generated
- * @param [in] message Input message to be encoded
- * @param [in] type Barcode type (1D barcode or 2D QR code)
- * @param [in] encodingMode Encoding mode (for QR codes only)
- * @param [in] correctionLevel Error correction level (for QR codes only)
- * @param [in] qrVersion QR code version (1 ~ 40, 0 for 1D barcodes)
- * @param [in] showText Show text or not
- * @return BARCODE_ERROR_NONE from BarcodeError which is 0 if success,
- * BarcodeError value otherwise
- */
- static int generateBarcodeToBuffer(
- unsigned char **imageBuffer,
- unsigned int *imageWidth,
- unsigned int *imageHeight,
- unsigned int *imageChannels,
- const std::string& message,
- BarcodeType type,
- BarcodeQREncodingMode encodingMode = BARCODE_QR_MODE_UNAVAILABLE,
- BarcodeQRErrorCorrectionLevel correctionLevel = BARCODE_QR_ECC_UNAVAILABLE,
- int qrVersion = 0,
- int showText = 0);
+ /**
+ * @brief This method generates Barcodes image buffer according to options.
+ *
+ * @since_tizen 2.4
+ * @param [out] imageBuffer Image buffer with image to be generated
+ * @param [out] imageWidth Image buffer width which will be generated
+ * @param [out] imageHeight Image buffer height which will be generated
+ * @param [out] imageChannels Image buffer channels number which will be generated
+ * @param [in] message Input message to be encoded
+ * @param [in] type Barcode type (1D barcode or 2D QR code)
+ * @param [in] encodingMode Encoding mode (for QR codes only)
+ * @param [in] correctionLevel Error correction level (for QR codes only)
+ * @param [in] qrVersion QR code version (1 ~ 40, 0 for 1D barcodes)
+ * @param [in] showText Show text or not
+ * @return BARCODE_ERROR_NONE from BarcodeError which is 0 if success,
+ * BarcodeError value otherwise
+ */
+ static int generateBarcodeToBuffer(
+ unsigned char **imageBuffer,
+ unsigned int *imageWidth,
+ unsigned int *imageHeight,
+ unsigned int *imageChannels,
+ const std::string& message,
+ BarcodeType type,
+ BarcodeQREncodingMode encodingMode = BARCODE_QR_MODE_UNAVAILABLE,
+ BarcodeQRErrorCorrectionLevel correctionLevel = BARCODE_QR_ECC_UNAVAILABLE,
+ int qrVersion = 0,
+ int showText = 0);
};
} /* Barcode */
diff --git a/mv_barcode/barcode_generator/include/BarcodeOptions.h b/mv_barcode/barcode_generator/include/BarcodeOptions.h
index c6435500..2ebf4025 100644
--- a/mv_barcode/barcode_generator/include/BarcodeOptions.h
+++ b/mv_barcode/barcode_generator/include/BarcodeOptions.h
@@ -22,26 +22,22 @@
* @brief This file contains the Barcode options.
*/
-namespace MediaVision
-{
-namespace Barcode
-{
-
+namespace MediaVision {
+namespace Barcode {
/**
* @brief The Barcode type enumeration.
*
* @since_tizen 2.4
*/
-enum BarcodeType
-{
- BARCODE_QR = 58,
- BARCODE_UPCA = 34,
- BARCODE_UPCE = 37,
- BARCODE_EAN8 = 13,
- BARCODE_EAN13 = BARCODE_EAN8,
- BARCODE_CODE39 = 8,
- BARCODE_CODE128 = 20,
- BARCODE_INTERLEAVE_2_5 = 3
+enum BarcodeType {
+ BARCODE_QR = 58,
+ BARCODE_UPCA = 34,
+ BARCODE_UPCE = 37,
+ BARCODE_EAN8 = 13,
+ BARCODE_EAN13 = BARCODE_EAN8,
+ BARCODE_CODE39 = 8,
+ BARCODE_CODE128 = 20,
+ BARCODE_INTERLEAVE_2_5 = 3
};
/**
@@ -50,30 +46,27 @@ enum BarcodeType
* @since_tizen 2.4
* @remarks This is unavailable for 1D barcodes.
*/
-enum BarcodeQRErrorCorrectionLevel
-{
- BARCODE_QR_ECC_UNAVAILABLE = 0,
- BARCODE_QR_ECC_LOW = 1,
- BARCODE_QR_ECC_MEDIUM = 2,
- BARCODE_QR_ECC_QUARTILE = 3,
- BARCODE_QR_ECC_HIGH = 4
+enum BarcodeQRErrorCorrectionLevel {
+ BARCODE_QR_ECC_UNAVAILABLE = 0,
+ BARCODE_QR_ECC_LOW = 1,
+ BARCODE_QR_ECC_MEDIUM = 2,
+ BARCODE_QR_ECC_QUARTILE = 3,
+ BARCODE_QR_ECC_HIGH = 4
};
-
/**
* @brief The Barcode encoding mode enumeration.
*
* @since_tizen 2.4
* @remarks This is unavailable for 1D barcodes.
*/
-enum BarcodeQREncodingMode
-{
- BARCODE_QR_MODE_NUMERIC = 1,
- BARCODE_QR_MODE_ALPHANUMERIC = 1,
- BARCODE_QR_MODE_BYTE = 0,
- BARCODE_QR_MODE_UTF8 = 1,
- BARCODE_QR_MODE_KANJI = 3,
- BARCODE_QR_MODE_UNAVAILABLE
+enum BarcodeQREncodingMode {
+ BARCODE_QR_MODE_NUMERIC = 1,
+ BARCODE_QR_MODE_ALPHANUMERIC = 1,
+ BARCODE_QR_MODE_BYTE = 0,
+ BARCODE_QR_MODE_UTF8 = 1,
+ BARCODE_QR_MODE_KANJI = 3,
+ BARCODE_QR_MODE_UNAVAILABLE
};
/**
@@ -81,11 +74,10 @@ enum BarcodeQREncodingMode
*
* @since_tizen 2.4
*/
-enum BarcodeImageFormat
-{
- BARCODE_IMAGE_JPG,
- BARCODE_IMAGE_PNG,
- BARCODE_IMAGE_BMP
+enum BarcodeImageFormat {
+ BARCODE_IMAGE_JPG,
+ BARCODE_IMAGE_PNG,
+ BARCODE_IMAGE_BMP
};
/**
@@ -93,8 +85,7 @@ enum BarcodeImageFormat
*
* @since_tizen 2.4
*/
-enum BarcodeGenTextOpt
-{
+enum BarcodeGenTextOpt {
BARCODE_GEN_TEXT_INVISIBLE,
BARCODE_GEN_TEXT_VISIBLE
};
@@ -104,18 +95,17 @@ enum BarcodeGenTextOpt
*
* @since_tizen 2.4
*/
-enum BarcodeError
-{
- BARCODE_ERROR_NONE = 0,
- BARCODE_WARNING_INVALID_OPTION = 2,
- BARCODE_ERROR_TOO_LONG = 5,
- BARCODE_ERROR_INVALID_DATA = 6,
- BARCODE_ERROR_INVALID_CHECK = 7,
- BARCODE_ERROR_INVALID_OPTION = 8,
- BARCODE_ERROR_ENCODING_PROBLEM = 9,
- BARCODE_ERROR_FILE_ACCESS = 10,
- BARCODE_ERROR_MEMORY = 11,
- BARCODE_ERROR_INVALID_PATH =12,
+enum BarcodeError {
+ BARCODE_ERROR_NONE = 0,
+ BARCODE_WARNING_INVALID_OPTION = 2,
+ BARCODE_ERROR_TOO_LONG = 5,
+ BARCODE_ERROR_INVALID_DATA = 6,
+ BARCODE_ERROR_INVALID_CHECK = 7,
+ BARCODE_ERROR_INVALID_OPTION = 8,
+ BARCODE_ERROR_ENCODING_PROBLEM = 9,
+ BARCODE_ERROR_FILE_ACCESS = 10,
+ BARCODE_ERROR_MEMORY = 11,
+ BARCODE_ERROR_INVALID_PATH = 12,
};
} /* Barcode */
diff --git a/mv_barcode/barcode_generator/include/mv_barcode_generate_open.h b/mv_barcode/barcode_generator/include/mv_barcode_generate_open.h
index d3134ace..bb1e8b6b 100644
--- a/mv_barcode/barcode_generator/include/mv_barcode_generate_open.h
+++ b/mv_barcode/barcode_generator/include/mv_barcode_generate_open.h
@@ -55,12 +55,12 @@ extern "C" {
* @see mv_barcode_generate_image_open()
*/
int mv_barcode_generate_source_open(mv_engine_config_h engine_cfg,
- const char *message,
- mv_barcode_type_e type,
- mv_barcode_qr_mode_e qr_enc_mode,
- mv_barcode_qr_ecc_e qr_ecc,
- int qr_version,
- mv_source_h image);
+ const char *message,
+ mv_barcode_type_e type,
+ mv_barcode_qr_mode_e qr_enc_mode,
+ mv_barcode_qr_ecc_e qr_ecc,
+ int qr_version,
+ mv_source_h image);
/**
* @brief Generates image file with barcode.
@@ -91,16 +91,16 @@ int mv_barcode_generate_source_open(mv_engine_config_h engine_cfg,
* @see mv_barcode_generate_source_open()
*/
int mv_barcode_generate_image_open(
- mv_engine_config_h engine_cfg,
- const char *message,
- int image_width,
- int image_height,
- mv_barcode_type_e type,
- mv_barcode_qr_mode_e qr_enc_mode,
- mv_barcode_qr_ecc_e qr_ecc,
- int qr_version,
- const char *image_path,
- mv_barcode_image_format_e image_format);
+ mv_engine_config_h engine_cfg,
+ const char *message,
+ int image_width,
+ int image_height,
+ mv_barcode_type_e type,
+ mv_barcode_qr_mode_e qr_enc_mode,
+ mv_barcode_qr_ecc_e qr_ecc,
+ int qr_version,
+ const char *image_path,
+ mv_barcode_image_format_e image_format);
#ifdef __cplusplus
}
diff --git a/mv_barcode/barcode_generator/src/BarcodeGenerator.cpp b/mv_barcode/barcode_generator/src/BarcodeGenerator.cpp
index 624a3f51..522f65f0 100644
--- a/mv_barcode/barcode_generator/src/BarcodeGenerator.cpp
+++ b/mv_barcode/barcode_generator/src/BarcodeGenerator.cpp
@@ -28,277 +28,256 @@
#include <vector>
#include <unistd.h>
-namespace MediaVision
-{
-namespace Barcode
-{
-
-namespace
-{
+namespace MediaVision {
+namespace Barcode {
+namespace {
int getFormatEncodingInfo(
- BarcodeImageFormat imageFormat,
- std::vector<std::string>& extensions,
- std::vector<int>& compressionParams)
+ BarcodeImageFormat imageFormat,
+ std::vector<std::string>& extensions,
+ std::vector<int>& compressionParams)
{
- static const int PNGCompressionLevel = 3;
-
- compressionParams.clear();
- extensions.clear();
-
- switch (imageFormat)
- {
- case BARCODE_IMAGE_PNG:
- compressionParams.push_back(CV_IMWRITE_PNG_COMPRESSION);
- compressionParams.push_back(PNGCompressionLevel);
- extensions.push_back(".png");
- break;
- case BARCODE_IMAGE_JPG:
- extensions.push_back(".jpg");
- extensions.push_back(".jpeg");
- extensions.push_back(".jpe");
- break;
- case BARCODE_IMAGE_BMP:
- extensions.push_back(".bmp");
- extensions.push_back(".dib");
- break;
- default:
- return BARCODE_ERROR_INVALID_OPTION;
- }
- return BARCODE_ERROR_NONE;
+ static const int PNGCompressionLevel = 3;
+
+ compressionParams.clear();
+ extensions.clear();
+
+ switch (imageFormat) {
+ case BARCODE_IMAGE_PNG:
+ compressionParams.push_back(CV_IMWRITE_PNG_COMPRESSION);
+ compressionParams.push_back(PNGCompressionLevel);
+ extensions.push_back(".png");
+ break;
+ case BARCODE_IMAGE_JPG:
+ extensions.push_back(".jpg");
+ extensions.push_back(".jpeg");
+ extensions.push_back(".jpe");
+ break;
+ case BARCODE_IMAGE_BMP:
+ extensions.push_back(".bmp");
+ extensions.push_back(".dib");
+ break;
+ default:
+ return BARCODE_ERROR_INVALID_OPTION;
+ }
+ return BARCODE_ERROR_NONE;
}
int createBarcode(
- const std::string& message,
- BarcodeType type,
- BarcodeQREncodingMode encodingMode,
- BarcodeQRErrorCorrectionLevel correctionLevel,
- int qrVersion,
- int showText,
- zint_symbol *symbol)
+ const std::string& message,
+ BarcodeType type,
+ BarcodeQREncodingMode encodingMode,
+ BarcodeQRErrorCorrectionLevel correctionLevel,
+ int qrVersion,
+ int showText,
+ zint_symbol *symbol)
{
- // set input values
- symbol->symbology = type;
- symbol->input_mode = encodingMode;
- symbol->option_1 = correctionLevel;
- symbol->option_2 = qrVersion;
- symbol->scale = 1;
- symbol->show_hrt = showText;
-
- // set default values
- std::strncpy(symbol->fgcolour, "000000", 10);
- std::strncpy(symbol->bgcolour, "ffffff", 10);
- symbol->border_width = 1;
- symbol->height = 50;
-
- if (type == BARCODE_QR) {
- symbol->whitespace_width = 0;
- } else {
- symbol->whitespace_width = 10;
- }
-
- // create barcode
- const int rotationAngle = 0;
- int error = ZBarcode_Encode_and_Buffer(
- symbol,
- (unsigned char*)(message.c_str()),
- message.length(),
- rotationAngle);
-
- return error;
+ /* set input values */
+ symbol->symbology = type;
+ symbol->input_mode = encodingMode;
+ symbol->option_1 = correctionLevel;
+ symbol->option_2 = qrVersion;
+ symbol->scale = 1;
+ symbol->show_hrt = showText;
+
+ /* set default values */
+ std::strncpy(symbol->fgcolour, "000000", 10);
+ std::strncpy(symbol->bgcolour, "ffffff", 10);
+ symbol->border_width = 1;
+ symbol->height = 50;
+
+ if (type == BARCODE_QR) {
+ symbol->whitespace_width = 0;
+ } else {
+ symbol->whitespace_width = 10;
+ }
+
+ /* create barcode */
+ const int rotationAngle = 0;
+ int error = ZBarcode_Encode_and_Buffer(
+ symbol,
+ (unsigned char*)(message.c_str()),
+ message.length(),
+ rotationAngle);
+
+ return error;
}
int writeBufferToImageFile(
- zint_symbol *symbol,
- const std::string& imageFileName,
- BarcodeImageFormat imageFormat,
- const int imageWidth,
- const int imageHeight)
+ zint_symbol *symbol,
+ const std::string& imageFileName,
+ BarcodeImageFormat imageFormat,
+ const int imageWidth,
+ const int imageHeight)
{
- if (imageWidth <= 0 || imageHeight <= 0)
- {
- LOGE("Barcode image size is invalid: %i x %i. Terminate write to "
- "the image operation", imageWidth, imageHeight);
- return BARCODE_ERROR_INVALID_DATA;
- }
-
- /* find directory */
- std::string prefix_imageFileName = imageFileName.substr(0, imageFileName.find_last_of('/'));
- LOGD("prefix_path: %s", prefix_imageFileName.c_str());
-
- /* check the directory is available */
- if (access(prefix_imageFileName.c_str(),F_OK))
- {
- LOGE("Can't save barcode image to the path. The path[%s] doesn't existed.", prefix_imageFileName.c_str());
- return BARCODE_ERROR_INVALID_PATH;
- }
-
- // check current extension
- std::vector<std::string> expectedExtensions;
- std::vector<int> compressionParams;
-
- int error = getFormatEncodingInfo(imageFormat,
- expectedExtensions, compressionParams);
-
- if (BARCODE_ERROR_NONE != error || expectedExtensions.empty())
- {
- LOGE("Image format is incorrectly specified or not supported");
- return error;
- }
-
- bool rightExtensionFlag = false;
-
- std::string resultFilePath(imageFileName);
-
- for (size_t extNum = 0; extNum < expectedExtensions.size(); ++extNum)
- {
- if (resultFilePath.size() >= expectedExtensions[extNum].size())
- {
- std::string givenExtension = resultFilePath.substr(
- resultFilePath.length() - expectedExtensions[extNum].size(),
- expectedExtensions[extNum].size());
-
- std::transform(
- givenExtension.begin(), givenExtension.end(),
- givenExtension.begin(), ::tolower);
-
- if (givenExtension == expectedExtensions[extNum])
- {
- rightExtensionFlag = true;
- break;
- }
- }
- }
-
- if (!rightExtensionFlag)
- {
- resultFilePath += expectedExtensions[0];
- }
-
- cv::Mat image(symbol->bitmap_height, symbol->bitmap_width, CV_8UC3, symbol->bitmap);
- cv::resize(image, image, cv::Size(imageWidth, imageHeight), 0, 0, cv::INTER_AREA);
-
- error = cv::imwrite(resultFilePath, image, compressionParams) ?
- BARCODE_ERROR_NONE : BARCODE_ERROR_INVALID_DATA;
-
- if (BARCODE_ERROR_NONE != error)
- {
- LOGE("Write barcode image to file %s operation failed.",
- resultFilePath.c_str());
- return error;
- }
-
- return error;
+ if (imageWidth <= 0 || imageHeight <= 0) {
+ LOGE("Barcode image size is invalid: %i x %i. Terminate write to "
+ "the image operation", imageWidth, imageHeight);
+ return BARCODE_ERROR_INVALID_DATA;
+ }
+
+ /* find directory */
+ std::string prefix_imageFileName = imageFileName.substr(0, imageFileName.find_last_of('/'));
+ LOGD("prefix_path: %s", prefix_imageFileName.c_str());
+
+ /* check the directory is available */
+ if (access(prefix_imageFileName.c_str(), F_OK)) {
+ LOGE("Can't save barcode image to the path. The path[%s] doesn't existed.", prefix_imageFileName.c_str());
+ return BARCODE_ERROR_INVALID_PATH;
+ }
+
+ /* check current extension */
+ std::vector<std::string> expectedExtensions;
+ std::vector<int> compressionParams;
+
+ int error = getFormatEncodingInfo(imageFormat,
+ expectedExtensions, compressionParams);
+
+ if (BARCODE_ERROR_NONE != error || expectedExtensions.empty()) {
+ LOGE("Image format is incorrectly specified or not supported");
+ return error;
+ }
+
+ bool rightExtensionFlag = false;
+
+ std::string resultFilePath(imageFileName);
+
+ for (size_t extNum = 0; extNum < expectedExtensions.size(); ++extNum) {
+ if (resultFilePath.size() >= expectedExtensions[extNum].size()) {
+ std::string givenExtension = resultFilePath.substr(
+ resultFilePath.length() - expectedExtensions[extNum].size(),
+ expectedExtensions[extNum].size());
+
+ std::transform(
+ givenExtension.begin(), givenExtension.end(),
+ givenExtension.begin(), ::tolower);
+
+ if (givenExtension == expectedExtensions[extNum]) {
+ rightExtensionFlag = true;
+ break;
+ }
+ }
+ }
+
+ if (!rightExtensionFlag) {
+ resultFilePath += expectedExtensions[0];
+ }
+
+ cv::Mat image(symbol->bitmap_height, symbol->bitmap_width, CV_8UC3, symbol->bitmap);
+ cv::resize(image, image, cv::Size(imageWidth, imageHeight), 0, 0, cv::INTER_AREA);
+
+ error = cv::imwrite(resultFilePath, image, compressionParams) ?
+ BARCODE_ERROR_NONE : BARCODE_ERROR_INVALID_DATA;
+
+ if (BARCODE_ERROR_NONE != error) {
+ LOGE("Write barcode image to file %s operation failed.",
+ resultFilePath.c_str());
+ return error;
+ }
+
+ return error;
}
} /* anonymous namespace */
int BarcodeGenerator::generateBarcodeToImage(
- const std::string& imageFileName,
- BarcodeImageFormat imageFormat,
- const int imageWidth,
- const int imageHeight,
- const std::string& message,
- BarcodeType type,
- BarcodeQREncodingMode encodingMode,
- BarcodeQRErrorCorrectionLevel correctionLevel,
- int qrVersion,
- int showText)
+ const std::string& imageFileName,
+ BarcodeImageFormat imageFormat,
+ const int imageWidth,
+ const int imageHeight,
+ const std::string& message,
+ BarcodeType type,
+ BarcodeQREncodingMode encodingMode,
+ BarcodeQRErrorCorrectionLevel correctionLevel,
+ int qrVersion,
+ int showText)
{
- zint_symbol *symbol = ZBarcode_Create();
-
- if(symbol == NULL)
- {
- LOGE("ZBarcode creation failed");
-
- return BARCODE_ERROR_ENCODING_PROBLEM;
- }
-
- int error = createBarcode(
- message,
- type,
- encodingMode,
- correctionLevel,
- qrVersion,
- showText,
- symbol);
-
- if (error != BARCODE_ERROR_NONE)
- {
- LOGE("Barcode creation failed, clean memory");
- ZBarcode_Delete(symbol);
- return error;
- }
-
- error = writeBufferToImageFile(
- symbol,
- imageFileName,
- imageFormat,
- imageWidth,
- imageHeight);
- if (error != BARCODE_ERROR_NONE)
- {
- LOGE("Barcode [%s] file write fail, clean memory", imageFileName.c_str());
- }
- else
- {
- LOGI("Barcode image [%s] is successfully generated, clean memory", imageFileName.c_str());
- }
-
- ZBarcode_Delete(symbol);
-
- return error;
+ zint_symbol *symbol = ZBarcode_Create();
+
+ if(symbol == NULL) {
+ LOGE("ZBarcode creation failed");
+ return BARCODE_ERROR_ENCODING_PROBLEM;
+ }
+
+ int error = createBarcode(
+ message,
+ type,
+ encodingMode,
+ correctionLevel,
+ qrVersion,
+ showText,
+ symbol);
+
+ if (error != BARCODE_ERROR_NONE) {
+ LOGE("Barcode creation failed, clean memory");
+ ZBarcode_Delete(symbol);
+ return error;
+ }
+
+ error = writeBufferToImageFile(
+ symbol,
+ imageFileName,
+ imageFormat,
+ imageWidth,
+ imageHeight);
+ if (error != BARCODE_ERROR_NONE) {
+ LOGE("Barcode [%s] file write fail, clean memory", imageFileName.c_str());
+ } else {
+ LOGI("Barcode image [%s] is successfully generated, clean memory", imageFileName.c_str());
+ }
+
+ ZBarcode_Delete(symbol);
+
+ return error;
}
int BarcodeGenerator::generateBarcodeToBuffer(
- unsigned char **imageBuffer,
- unsigned int *imageWidth,
- unsigned int *imageHeight,
- unsigned int *imageChannels,
- const std::string& message,
- BarcodeType type,
- BarcodeQREncodingMode encodingMode,
- BarcodeQRErrorCorrectionLevel correctionLevel,
- int qrVersion,
- int showText)
+ unsigned char **imageBuffer,
+ unsigned int *imageWidth,
+ unsigned int *imageHeight,
+ unsigned int *imageChannels,
+ const std::string& message,
+ BarcodeType type,
+ BarcodeQREncodingMode encodingMode,
+ BarcodeQRErrorCorrectionLevel correctionLevel,
+ int qrVersion,
+ int showText)
{
- zint_symbol *symbol = ZBarcode_Create();
-
- if(symbol == NULL)
- {
- LOGE("ZBarcode creation failed");
-
- return BARCODE_ERROR_ENCODING_PROBLEM;
- }
-
- int error = createBarcode(
- message,
- type,
- encodingMode,
- correctionLevel,
- qrVersion,
- showText,
- symbol);
-
- if (error != BARCODE_ERROR_NONE)
- {
- LOGE("Barcode creation failed, clean memory");
- ZBarcode_Delete(symbol);
- return error;
- }
-
- // fill output buffer
- *imageWidth = symbol->bitmap_width;
- *imageHeight = symbol->bitmap_height;
- *imageChannels = 3;
- const unsigned int imageBufferSize = (*imageWidth) * (*imageHeight) * (*imageChannels);
- *imageBuffer = new unsigned char [imageBufferSize];
- memmove(*imageBuffer, symbol->bitmap, imageBufferSize);
-
- LOGI("Barcode buffer has been successfully generated, clean memory");
- ZBarcode_Delete(symbol);
-
- return BARCODE_ERROR_NONE;
+ zint_symbol *symbol = ZBarcode_Create();
+
+ if(symbol == NULL) {
+ LOGE("ZBarcode creation failed");
+
+ return BARCODE_ERROR_ENCODING_PROBLEM;
+ }
+
+ int error = createBarcode(
+ message,
+ type,
+ encodingMode,
+ correctionLevel,
+ qrVersion,
+ showText,
+ symbol);
+
+ if (error != BARCODE_ERROR_NONE) {
+ LOGE("Barcode creation failed, clean memory");
+ ZBarcode_Delete(symbol);
+ return error;
+ }
+
+ /* fill output buffer */
+ *imageWidth = symbol->bitmap_width;
+ *imageHeight = symbol->bitmap_height;
+ *imageChannels = 3;
+ const unsigned int imageBufferSize = (*imageWidth) * (*imageHeight) * (*imageChannels);
+ *imageBuffer = new unsigned char[imageBufferSize];
+ memmove(*imageBuffer, symbol->bitmap, imageBufferSize);
+
+ LOGI("Barcode buffer has been successfully generated, clean memory");
+ ZBarcode_Delete(symbol);
+
+ return BARCODE_ERROR_NONE;
}
} /* Barcode */
diff --git a/mv_barcode/barcode_generator/src/mv_barcode_generate_open.cpp b/mv_barcode/barcode_generator/src/mv_barcode_generate_open.cpp
index 19657d3e..44c68b22 100644
--- a/mv_barcode/barcode_generator/src/mv_barcode_generate_open.cpp
+++ b/mv_barcode/barcode_generator/src/mv_barcode_generate_open.cpp
@@ -26,353 +26,326 @@
using namespace MediaVision::Barcode;
-namespace
-{
-
+namespace {
int alphanumToUpper(std::string& strToTransform)
{
- std::string tempString = strToTransform;
- std::transform(tempString.begin(), tempString.end(),
- tempString.begin(), ::toupper);
-
- if (std::string::npos != tempString.find_first_not_of("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:"))
- {
- LOGE("Barcode message can't be converted according to support "
- "alphanumeric (0..9, A..Z, space, $, %, *, +, -, ., /, :) "
- "mode: %s", strToTransform.c_str());
- return BARCODE_ERROR_INVALID_DATA;
- }
-
- LOGI("Barcode message was converted according to support alphanumeric "
- "mode: %s -> %s", strToTransform.c_str(), tempString.c_str());
- strToTransform = tempString;
- return BARCODE_ERROR_NONE;
+ std::string tempString = strToTransform;
+ std::transform(tempString.begin(), tempString.end(),
+ tempString.begin(), ::toupper);
+
+ if (std::string::npos != tempString.find_first_not_of("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:")) {
+ LOGE("Barcode message can't be converted according to support "
+ "alphanumeric (0..9, A..Z, space, $, %, *, +, -, ., /, :) "
+ "mode: %s", strToTransform.c_str());
+ return BARCODE_ERROR_INVALID_DATA;
+ }
+
+ LOGI("Barcode message was converted according to support alphanumeric "
+ "mode: %s -> %s", strToTransform.c_str(), tempString.c_str());
+ strToTransform = tempString;
+ return BARCODE_ERROR_NONE;
}
BarcodeType convertBarcodeType(mv_barcode_type_e type)
{
- BarcodeType barcodeType = BARCODE_QR;
- switch (type)
- {
- case MV_BARCODE_UPC_A:
- barcodeType = BARCODE_UPCA;
- break;
- case MV_BARCODE_UPC_E:
- barcodeType = BARCODE_UPCE;
- break;
- case MV_BARCODE_EAN_8:
- barcodeType = BARCODE_EAN8;
- break;
- case MV_BARCODE_EAN_13:
- barcodeType = BARCODE_EAN13;
- break;
- case MV_BARCODE_CODE128:
- barcodeType = BARCODE_CODE128;
- break;
- case MV_BARCODE_CODE39:
- barcodeType = BARCODE_CODE39;
- break;
- case MV_BARCODE_I2_5:
- barcodeType = BARCODE_INTERLEAVE_2_5;
- break;
- default:
- break;
- }
-
- LOGI("Media vision barcode type has been converted to ZInt barcode type "
- "(%i -> %i)", type, barcodeType);
- return barcodeType;
+ BarcodeType barcodeType = BARCODE_QR;
+ switch (type) {
+ case MV_BARCODE_UPC_A:
+ barcodeType = BARCODE_UPCA;
+ break;
+ case MV_BARCODE_UPC_E:
+ barcodeType = BARCODE_UPCE;
+ break;
+ case MV_BARCODE_EAN_8:
+ barcodeType = BARCODE_EAN8;
+ break;
+ case MV_BARCODE_EAN_13:
+ barcodeType = BARCODE_EAN13;
+ break;
+ case MV_BARCODE_CODE128:
+ barcodeType = BARCODE_CODE128;
+ break;
+ case MV_BARCODE_CODE39:
+ barcodeType = BARCODE_CODE39;
+ break;
+ case MV_BARCODE_I2_5:
+ barcodeType = BARCODE_INTERLEAVE_2_5;
+ break;
+ default:
+ break;
+ }
+
+ LOGI("Media vision barcode type has been converted to ZInt barcode type "
+ "(%i -> %i)", type, barcodeType);
+ return barcodeType;
}
BarcodeQREncodingMode convertEncodingMode(mv_barcode_qr_mode_e mode)
{
- BarcodeQREncodingMode encodingMode = BARCODE_QR_MODE_ALPHANUMERIC;
-
- switch (mode)
- {
- case MV_BARCODE_QR_MODE_NUMERIC:
- encodingMode = BARCODE_QR_MODE_NUMERIC;
- break;
- case MV_BARCODE_QR_MODE_BYTE:
- encodingMode = BARCODE_QR_MODE_BYTE;
- break;
- case MV_BARCODE_QR_MODE_UTF8:
- encodingMode = BARCODE_QR_MODE_UTF8;
- break;
- default:
- break;
- }
-
- LOGI("Media vision QRCode encoding mode has been converted to "
- "ZInt encoding mode (%i -> %i)", mode, encodingMode);
- return encodingMode;
+ BarcodeQREncodingMode encodingMode = BARCODE_QR_MODE_ALPHANUMERIC;
+
+ switch (mode) {
+ case MV_BARCODE_QR_MODE_NUMERIC:
+ encodingMode = BARCODE_QR_MODE_NUMERIC;
+ break;
+ case MV_BARCODE_QR_MODE_BYTE:
+ encodingMode = BARCODE_QR_MODE_BYTE;
+ break;
+ case MV_BARCODE_QR_MODE_UTF8:
+ encodingMode = BARCODE_QR_MODE_UTF8;
+ break;
+ default:
+ break;
+ }
+
+ LOGI("Media vision QRCode encoding mode has been converted to "
+ "ZInt encoding mode (%i -> %i)", mode, encodingMode);
+ return encodingMode;
}
BarcodeQRErrorCorrectionLevel convertECC(mv_barcode_qr_ecc_e ecc)
{
- BarcodeQRErrorCorrectionLevel ecclevel = BARCODE_QR_ECC_LOW;
-
- switch (ecc)
- {
- case MV_BARCODE_QR_ECC_MEDIUM:
- ecclevel = BARCODE_QR_ECC_MEDIUM;
- break;
- case MV_BARCODE_QR_ECC_QUARTILE:
- ecclevel = BARCODE_QR_ECC_QUARTILE;
- break;
- case MV_BARCODE_QR_ECC_HIGH:
- ecclevel = BARCODE_QR_ECC_HIGH;
- break;
- default:
- break;
- }
-
- LOGI("Media vision ECC level has been converted to "
- "ZInt ECC level (%i -> %i)", ecc, ecclevel);
- return ecclevel;
+ BarcodeQRErrorCorrectionLevel ecclevel = BARCODE_QR_ECC_LOW;
+
+ switch (ecc) {
+ case MV_BARCODE_QR_ECC_MEDIUM:
+ ecclevel = BARCODE_QR_ECC_MEDIUM;
+ break;
+ case MV_BARCODE_QR_ECC_QUARTILE:
+ ecclevel = BARCODE_QR_ECC_QUARTILE;
+ break;
+ case MV_BARCODE_QR_ECC_HIGH:
+ ecclevel = BARCODE_QR_ECC_HIGH;
+ break;
+ default:
+ break;
+ }
+
+ LOGI("Media vision ECC level has been converted to "
+ "ZInt ECC level (%i -> %i)", ecc, ecclevel);
+ return ecclevel;
}
int convertBarcodeError(int barcodeError)
{
- int mvError = MEDIA_VISION_ERROR_NONE;
-
- switch (barcodeError)
- {
- case BARCODE_WARNING_INVALID_OPTION:
- mvError = MEDIA_VISION_ERROR_INVALID_PARAMETER;
- break;
- case BARCODE_ERROR_TOO_LONG:
- mvError = MEDIA_VISION_ERROR_MSG_TOO_LONG;
- break;
- case BARCODE_ERROR_INVALID_DATA:
- mvError = MEDIA_VISION_ERROR_INVALID_DATA;
- break;
- case BARCODE_ERROR_INVALID_CHECK:
- mvError = MEDIA_VISION_ERROR_INVALID_PARAMETER;
- break;
- case BARCODE_ERROR_INVALID_OPTION:
- mvError = MEDIA_VISION_ERROR_INVALID_PARAMETER;
- break;
- case BARCODE_ERROR_ENCODING_PROBLEM:
- mvError = MEDIA_VISION_ERROR_INTERNAL;
- break;
- case BARCODE_ERROR_FILE_ACCESS:
- mvError = MEDIA_VISION_ERROR_PERMISSION_DENIED;
- break;
- case BARCODE_ERROR_MEMORY:
- mvError = MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- break;
- case BARCODE_ERROR_INVALID_PATH:
- mvError = MEDIA_VISION_ERROR_INVALID_PATH;
- default:
- break;
- }
-
- LOGI("ZInt error code has been converted to the media vision error code "
- "(%i -> (0x%08x))", barcodeError, mvError);
- return mvError;
+ int mvError = MEDIA_VISION_ERROR_NONE;
+
+ switch (barcodeError) {
+ case BARCODE_WARNING_INVALID_OPTION:
+ mvError = MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ break;
+ case BARCODE_ERROR_TOO_LONG:
+ mvError = MEDIA_VISION_ERROR_MSG_TOO_LONG;
+ break;
+ case BARCODE_ERROR_INVALID_DATA:
+ mvError = MEDIA_VISION_ERROR_INVALID_DATA;
+ break;
+ case BARCODE_ERROR_INVALID_CHECK:
+ mvError = MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ break;
+ case BARCODE_ERROR_INVALID_OPTION:
+ mvError = MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ break;
+ case BARCODE_ERROR_ENCODING_PROBLEM:
+ mvError = MEDIA_VISION_ERROR_INTERNAL;
+ break;
+ case BARCODE_ERROR_FILE_ACCESS:
+ mvError = MEDIA_VISION_ERROR_PERMISSION_DENIED;
+ break;
+ case BARCODE_ERROR_MEMORY:
+ mvError = MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ break;
+ case BARCODE_ERROR_INVALID_PATH:
+ mvError = MEDIA_VISION_ERROR_INVALID_PATH;
+ default:
+ break;
+ }
+
+ LOGI("ZInt error code has been converted to the media vision error code "
+ "(%i -> (0x%08x))", barcodeError, mvError);
+ return mvError;
}
BarcodeImageFormat convertImageFormat(mv_barcode_image_format_e format)
{
- BarcodeImageFormat imageFormat = BARCODE_IMAGE_PNG;
-
- switch (format)
- {
- case MV_BARCODE_IMAGE_FORMAT_JPG:
- imageFormat = BARCODE_IMAGE_JPG;
- break;
- case MV_BARCODE_IMAGE_FORMAT_BMP:
- imageFormat = BARCODE_IMAGE_BMP;
- break;
- default:
- break;
- }
-
- LOGI("Media vision image format has been converted to "
- "internal image format (%i -> %i)", format, imageFormat);
- return imageFormat;
+ BarcodeImageFormat imageFormat = BARCODE_IMAGE_PNG;
+
+ switch (format) {
+ case MV_BARCODE_IMAGE_FORMAT_JPG:
+ imageFormat = BARCODE_IMAGE_JPG;
+ break;
+ case MV_BARCODE_IMAGE_FORMAT_BMP:
+ imageFormat = BARCODE_IMAGE_BMP;
+ break;
+ default:
+ break;
+ }
+
+ LOGI("Media vision image format has been converted to "
+ "internal image format (%i -> %i)", format, imageFormat);
+ return imageFormat;
}
} /* anonymous namespace */
int mv_barcode_generate_source_open(
- mv_engine_config_h engine_cfg,
- const char *message,
- mv_barcode_type_e type,
- mv_barcode_qr_mode_e qr_enc_mode,
- mv_barcode_qr_ecc_e qr_ecc,
- int qr_version,
- mv_source_h image)
+ mv_engine_config_h engine_cfg,
+ const char *message,
+ mv_barcode_type_e type,
+ mv_barcode_qr_mode_e qr_enc_mode,
+ mv_barcode_qr_ecc_e qr_ecc,
+ int qr_version,
+ mv_source_h image)
{
- std::string messageStr = std::string(message);
-
- if (qr_enc_mode == MV_BARCODE_QR_MODE_NUMERIC &&
- messageStr.find_first_not_of("0123456789") != std::string::npos)
- {
- LOGE("Barcode message can't be used according to support "
- "numeric (0..9) mode: %s", messageStr.c_str());
- return MEDIA_VISION_ERROR_INVALID_DATA;
- }
-
- int error = BARCODE_ERROR_NONE;
- if (MV_BARCODE_QR == type &&
- MV_BARCODE_QR_MODE_ALPHANUMERIC == qr_enc_mode)
- {
- error = alphanumToUpper(messageStr);
- if (BARCODE_ERROR_NONE != error)
- {
- return convertBarcodeError(error);
- }
- }
-
- unsigned char *imageBuffer = NULL;
- unsigned int imageWidth = 0u;
- unsigned int imageHeight = 0u;
- unsigned int imageChannels = 0u;
-
- int showText = 0;
- error = mv_engine_config_get_int_attribute(engine_cfg, "MV_BARCODE_GENERATE_ATTR_TEXT", &showText);
- if (error != MEDIA_VISION_ERROR_NONE)
- {
- LOGW("mv_engine_config_get_int_attribute failed");
- return error;
- }
-
- if (showText == BARCODE_GEN_TEXT_VISIBLE && type == MV_BARCODE_QR)
- {
- LOGW("QR code generation with visible text is not supported");
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
-
- error = BarcodeGenerator::generateBarcodeToBuffer(
- &imageBuffer,
- &imageWidth,
- &imageHeight,
- &imageChannels,
- messageStr,
- convertBarcodeType(type),
- convertEncodingMode(qr_enc_mode),
- convertECC(qr_ecc),
- qr_version,
- showText);
-
- if (error != BARCODE_ERROR_NONE)
- {
- LOGE("Barcode generation to the buffer failed");
- if (NULL != imageBuffer)
- {
- LOGI("Delete temporal buffer");
- delete[] imageBuffer;
- }
- return convertBarcodeError(error);
- }
-
- const unsigned int imageBufferSize = imageWidth * imageHeight * imageChannels;
-
- LOGI("Barcode has been generated to the buffer: "
- "Buffer size = %ui x %ui; Channels = %ui; Message = %s",
- imageWidth, imageHeight, imageChannels, messageStr.c_str());
-
- error = mv_source_fill_by_buffer_c(
- image,
- imageBuffer,
- imageBufferSize,
- imageWidth,
- imageHeight,
- MEDIA_VISION_COLORSPACE_RGB888);
-
- if (error != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Meidiavision source fill by generated buffer failed");
- }
-
- if (NULL != imageBuffer)
- {
- LOGI("Delete temporal buffer");
- delete[] imageBuffer;
- }
-
- return error;
+ std::string messageStr = std::string(message);
+
+ if (qr_enc_mode == MV_BARCODE_QR_MODE_NUMERIC &&
+ messageStr.find_first_not_of("0123456789") != std::string::npos) {
+ LOGE("Barcode message can't be used according to support "
+ "numeric (0..9) mode: %s", messageStr.c_str());
+ return MEDIA_VISION_ERROR_INVALID_DATA;
+ }
+
+ int error = BARCODE_ERROR_NONE;
+ if (MV_BARCODE_QR == type &&
+ MV_BARCODE_QR_MODE_ALPHANUMERIC == qr_enc_mode) {
+ error = alphanumToUpper(messageStr);
+ if (BARCODE_ERROR_NONE != error) {
+ return convertBarcodeError(error);
+ }
+ }
+
+ unsigned char *imageBuffer = NULL;
+ unsigned int imageWidth = 0u;
+ unsigned int imageHeight = 0u;
+ unsigned int imageChannels = 0u;
+
+ int showText = 0;
+ error = mv_engine_config_get_int_attribute(engine_cfg, "MV_BARCODE_GENERATE_ATTR_TEXT", &showText);
+ if (error != MEDIA_VISION_ERROR_NONE) {
+ LOGW("mv_engine_config_get_int_attribute failed");
+ return error;
+ }
+
+ if (showText == BARCODE_GEN_TEXT_VISIBLE && type == MV_BARCODE_QR) {
+ LOGW("QR code generation with visible text is not supported");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ error = BarcodeGenerator::generateBarcodeToBuffer(
+ &imageBuffer,
+ &imageWidth,
+ &imageHeight,
+ &imageChannels,
+ messageStr,
+ convertBarcodeType(type),
+ convertEncodingMode(qr_enc_mode),
+ convertECC(qr_ecc),
+ qr_version,
+ showText);
+
+ if (error != BARCODE_ERROR_NONE) {
+ LOGE("Barcode generation to the buffer failed");
+ if (NULL != imageBuffer) {
+ LOGI("Delete temporal buffer");
+ delete[] imageBuffer;
+ }
+ return convertBarcodeError(error);
+ }
+
+ const unsigned int imageBufferSize = imageWidth * imageHeight * imageChannels;
+
+ LOGI("Barcode has been generated to the buffer: "
+ "Buffer size = %ui x %ui; Channels = %ui; Message = %s",
+ imageWidth, imageHeight, imageChannels, messageStr.c_str());
+
+ error = mv_source_fill_by_buffer_c(
+ image,
+ imageBuffer,
+ imageBufferSize,
+ imageWidth,
+ imageHeight,
+ MEDIA_VISION_COLORSPACE_RGB888);
+
+ if (error != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Meidiavision source fill by generated buffer failed");
+ }
+
+ if (NULL != imageBuffer) {
+ LOGI("Delete temporal buffer");
+ delete[] imageBuffer;
+ }
+
+ return error;
}
int mv_barcode_generate_image_open(
- mv_engine_config_h engine_cfg,
- const char *message,
- int image_width,
- int image_height,
- mv_barcode_type_e type,
- mv_barcode_qr_mode_e qr_enc_mode,
- mv_barcode_qr_ecc_e qr_ecc,
- int qr_version,
- const char *image_path,
- mv_barcode_image_format_e image_format)
+ mv_engine_config_h engine_cfg,
+ const char *message,
+ int image_width,
+ int image_height,
+ mv_barcode_type_e type,
+ mv_barcode_qr_mode_e qr_enc_mode,
+ mv_barcode_qr_ecc_e qr_ecc,
+ int qr_version,
+ const char *image_path,
+ mv_barcode_image_format_e image_format)
{
- std::string messageStr = std::string(message);
-
- if (qr_enc_mode == MV_BARCODE_QR_MODE_NUMERIC &&
- messageStr.find_first_not_of("0123456789") != std::string::npos)
- {
- LOGE("Barcode message can't be used according to support "
- "numeric (0..9) mode: %s", messageStr.c_str());
- return MEDIA_VISION_ERROR_INVALID_DATA;
- }
-
- if (NULL == image_path)
- {
- LOGE("Can't save barcode image to the path[%p]. The path has to be specified", image_path);
- return MEDIA_VISION_ERROR_INVALID_PATH;
- }
-
- int error = BARCODE_ERROR_NONE;
- if (MV_BARCODE_QR == type &&
- MV_BARCODE_QR_MODE_ALPHANUMERIC == qr_enc_mode)
- {
- error = alphanumToUpper(messageStr);
- if (BARCODE_ERROR_NONE != error)
- {
- return convertBarcodeError(error);
- }
- }
-
- int showText = 0;
- error = mv_engine_config_get_int_attribute(engine_cfg, "MV_BARCODE_GENERATE_ATTR_TEXT", &showText);
- if (error != MEDIA_VISION_ERROR_NONE)
- {
- LOGW("mv_engine_config_get_int_attribute failed");
- return error;
- }
-
- if (showText == BARCODE_GEN_TEXT_VISIBLE && type == MV_BARCODE_QR)
- {
- LOGW("QR code generation with visible text is not supported");
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
-
- error = BarcodeGenerator::generateBarcodeToImage(
- std::string(image_path),
- convertImageFormat(image_format),
- image_width,
- image_height,
- messageStr,
- convertBarcodeType(type),
- convertEncodingMode(qr_enc_mode),
- convertECC(qr_ecc),
- qr_version,
- showText);
-
- if (error != BARCODE_ERROR_NONE)
- {
- LOGE("Barcode generation to the image file failed");
- }
- else
- {
- LOGI("Barcode has been generated to the image: "
- "Image size = %ui x %ui; Message = %s",
- image_width, image_height, messageStr.c_str());
- }
-
- return convertBarcodeError(error);
+ std::string messageStr = std::string(message);
+
+ if (qr_enc_mode == MV_BARCODE_QR_MODE_NUMERIC &&
+ messageStr.find_first_not_of("0123456789") != std::string::npos) {
+ LOGE("Barcode message can't be used according to support "
+ "numeric (0..9) mode: %s", messageStr.c_str());
+ return MEDIA_VISION_ERROR_INVALID_DATA;
+ }
+
+ if (NULL == image_path) {
+ LOGE("Can't save barcode image to the path[%p]. The path has to be specified", image_path);
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
+
+ int error = BARCODE_ERROR_NONE;
+ if (MV_BARCODE_QR == type &&
+ MV_BARCODE_QR_MODE_ALPHANUMERIC == qr_enc_mode) {
+ error = alphanumToUpper(messageStr);
+ if (BARCODE_ERROR_NONE != error) {
+ return convertBarcodeError(error);
+ }
+ }
+
+ int showText = 0;
+ error = mv_engine_config_get_int_attribute(engine_cfg, "MV_BARCODE_GENERATE_ATTR_TEXT", &showText);
+ if (error != MEDIA_VISION_ERROR_NONE) {
+ LOGW("mv_engine_config_get_int_attribute failed");
+ return error;
+ }
+
+ if (showText == BARCODE_GEN_TEXT_VISIBLE && type == MV_BARCODE_QR) {
+ LOGW("QR code generation with visible text is not supported");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ error = BarcodeGenerator::generateBarcodeToImage(
+ std::string(image_path),
+ convertImageFormat(image_format),
+ image_width,
+ image_height,
+ messageStr,
+ convertBarcodeType(type),
+ convertEncodingMode(qr_enc_mode),
+ convertECC(qr_ecc),
+ qr_version,
+ showText);
+
+ if (error != BARCODE_ERROR_NONE) {
+ LOGE("Barcode generation to the image file failed");
+ } else {
+ LOGI("Barcode has been generated to the image: "
+ "Image size = %ui x %ui; Message = %s",
+ image_width, image_height, messageStr.c_str());
+ }
+
+ return convertBarcodeError(error);
}
-
diff --git a/mv_barcode/barcode_generator_lic/include/mv_barcode_generate_lic.h b/mv_barcode/barcode_generator_lic/include/mv_barcode_generate_lic.h
index 2076b0c1..2fbd9352 100644
--- a/mv_barcode/barcode_generator_lic/include/mv_barcode_generate_lic.h
+++ b/mv_barcode/barcode_generator_lic/include/mv_barcode_generate_lic.h
@@ -55,13 +55,13 @@ extern "C" {
* @see mv_barcode_generate_image_lic()
*/
int mv_barcode_generate_source_lic(
- mv_engine_config_h engine_cfg,
- const char *message,
- mv_barcode_type_e type,
- mv_barcode_qr_mode_e qr_enc_mode,
- mv_barcode_qr_ecc_e qr_ecc,
- int qr_version,
- mv_source_h image);
+ mv_engine_config_h engine_cfg,
+ const char *message,
+ mv_barcode_type_e type,
+ mv_barcode_qr_mode_e qr_enc_mode,
+ mv_barcode_qr_ecc_e qr_ecc,
+ int qr_version,
+ mv_source_h image);
/**
* @brief Generates image file with barcode.
@@ -89,16 +89,16 @@ int mv_barcode_generate_source_lic(
* @see mv_barcode_generate_source_lic()
*/
int mv_barcode_generate_image_lic(
- mv_engine_config_h engine_cfg,
- const char *message,
- int image_width,
- int image_height,
- mv_barcode_type_e type,
- mv_barcode_qr_mode_e qr_enc_mode,
- mv_barcode_qr_ecc_e qr_ecc,
- int qr_version,
- const char *image_path,
- mv_barcode_image_format_e image_format);
+ mv_engine_config_h engine_cfg,
+ const char *message,
+ int image_width,
+ int image_height,
+ mv_barcode_type_e type,
+ mv_barcode_qr_mode_e qr_enc_mode,
+ mv_barcode_qr_ecc_e qr_ecc,
+ int qr_version,
+ const char *image_path,
+ mv_barcode_image_format_e image_format);
#ifdef __cplusplus
}
diff --git a/mv_barcode/barcode_generator_lic/src/mv_barcode_generate_lic.c b/mv_barcode/barcode_generator_lic/src/mv_barcode_generate_lic.c
index d57621fc..fada9e79 100644
--- a/mv_barcode/barcode_generator_lic/src/mv_barcode_generate_lic.c
+++ b/mv_barcode/barcode_generator_lic/src/mv_barcode_generate_lic.c
@@ -17,29 +17,28 @@
#include "mv_barcode_generate_lic.h"
int mv_barcode_generate_source_lic(
- mv_engine_config_h engine_cfg,
- const char *message,
- mv_barcode_type_e type,
- mv_barcode_qr_mode_e qr_enc_mode,
- mv_barcode_qr_ecc_e qr_ecc,
- int qr_version,
- mv_source_h image)
+ mv_engine_config_h engine_cfg,
+ const char *message,
+ mv_barcode_type_e type,
+ mv_barcode_qr_mode_e qr_enc_mode,
+ mv_barcode_qr_ecc_e qr_ecc,
+ int qr_version,
+ mv_source_h image)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;;
}
int mv_barcode_generate_image_lic(
- mv_engine_config_h engine_cfg,
- const char *message,
- int image_width,
- int image_height,
- mv_barcode_type_e type,
- mv_barcode_qr_mode_e qr_enc_mode,
- mv_barcode_qr_ecc_e qr_ecc,
- int qr_version,
- const char *image_path,
- mv_barcode_image_format_e image_format)
+ mv_engine_config_h engine_cfg,
+ const char *message,
+ int image_width,
+ int image_height,
+ mv_barcode_type_e type,
+ mv_barcode_qr_mode_e qr_enc_mode,
+ mv_barcode_qr_ecc_e qr_ecc,
+ int qr_version,
+ const char *image_path,
+ mv_barcode_image_format_e image_format)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
-
diff --git a/mv_common/include/EngineConfig.h b/mv_common/include/EngineConfig.h
index ac580c9d..5b7e871e 100644
--- a/mv_common/include/EngineConfig.h
+++ b/mv_common/include/EngineConfig.h
@@ -27,154 +27,150 @@
* @brief Engine Configuration class definition.
*/
-namespace MediaVision
-{
-namespace Common
-{
+namespace MediaVision {
+namespace Common {
typedef std::map<std::string, double>::const_iterator DictDblConstIter;
typedef std::map<std::string, int>::const_iterator DictIntConstIter;
typedef std::map<std::string, bool>::const_iterator DictBoolConstIter;
typedef std::map<std::string, std::string>::const_iterator DictStrConstIter;
-class EngineConfig
-{
+class EngineConfig {
public:
- /**
- * @brief Engine configuration constructor.
- * @details Create new engine configuration dictionary and set default
- * attributes values.
- *
- * @since_tizen 2.4
- */
- EngineConfig();
-
- /**
- * @brief Engine configuration destructor.
- */
- virtual ~EngineConfig();
-
- /**
- * @brief Sets attribute with double value.
- *
- * @since_tizen 2.4
- * @param [in] key The string name of the attribute
- * @param [in] value The double attribute value to be set
- * @return @c MEDIA_VISION_ERROR_NONE on success,\n
- * otherwise a negative error value
- */
- int setAttribute(const std::string& key, const double value);
-
- /**
- * @brief Sets attribute with integer value.
- *
- * @since_tizen 2.4
- * @param [in] key The string name of the attribute
- * @param [in] value The integer attribute value to be set
- * @return @c MEDIA_VISION_ERROR_NONE on success,\n
- * otherwise a negative error value
- */
- int setAttribute(const std::string& key, const int value);
-
- /**
- * @brief Sets attribute with boolean value.
- *
- * @since_tizen 2.4
- * @param [in] key The string name of the attribute
- * @param [in] value The boolean attribute value to be set
- * @return @c MEDIA_VISION_ERROR_NONE on success,\n
- * otherwise a negative error value
- */
- int setAttribute(const std::string& key, const bool value);
-
- /**
- * @brief Sets attribute with string value.
- *
- * @since_tizen 2.4
- * @param [in] key The string name of the attribute
- * @param [in] value The string attribute value to be set
- * @return @c MEDIA_VISION_ERROR_NONE on success,\n
- * otherwise a negative error value
- */
- int setAttribute(const std::string& key, const std::string& value);
-
- /**
- * @brief Gets double attribute value by attribute name.
- *
- * @since_tizen 2.4
- * @param [in] key The string name of the attribute
- * @param [out] value r The double attribute value to be obtained
- * @return @c MEDIA_VISION_ERROR_NONE on success,\n
- * otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key
- * doesn't exist in the engine configuration dictionary
- */
- int getDoubleAttribute(const std::string& key, double *value) const;
-
- /**
- * @brief Gets integer attribute value by attribute name.
- *
- * @since_tizen 2.4
- * @param [in] key The string name of the attribute
- * @param [out] value The integer attribute value to be obtained
- * @return @c MEDIA_VISION_ERROR_NONE on success,\n
- * otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key
- * doesn't exist in the engine configuration dictionary
- */
- int getIntegerAttribute(const std::string& key, int *value) const;
-
- /**
- * @brief Gets boolean attribute value by attribute name.
- *
- * @since_tizen 2.4
- * @param [in] key The string name of the attribute
- * @param [out] value The boolean attribute value to be obtained
- * @return @c MEDIA_VISION_ERROR_NONE on success,\n
- * otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key
- * doesn't exist in the engine configuration dictionary
- */
- int getBooleanAttribute(const std::string& key, bool *value) const;
-
- /**
- * @brief Gets string attribute value by attribute name.
- *
- * @since_tizen 2.4
- * @param [in] key The string name of the attribute
- * @param [out] value The string attribute value to be obtained
- * @return @c MEDIA_VISION_ERROR_NONE on success,\n
- * otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key
- * doesn't exist in the engine configuration dictionary
- */
- int getStringAttribute(const std::string& key, std::string *value) const;
+ /**
+ * @brief Engine configuration constructor.
+ * @details Create new engine configuration dictionary and set default
+ * attributes values.
+ *
+ * @since_tizen 2.4
+ */
+ EngineConfig();
+
+ /**
+ * @brief Engine configuration destructor.
+ */
+ virtual ~EngineConfig();
+
+ /**
+ * @brief Sets attribute with double value.
+ *
+ * @since_tizen 2.4
+ * @param [in] key The string name of the attribute
+ * @param [in] value The double attribute value to be set
+ * @return @c MEDIA_VISION_ERROR_NONE on success,\n
+ * otherwise a negative error value
+ */
+ int setAttribute(const std::string& key, const double value);
+
+ /**
+ * @brief Sets attribute with integer value.
+ *
+ * @since_tizen 2.4
+ * @param [in] key The string name of the attribute
+ * @param [in] value The integer attribute value to be set
+ * @return @c MEDIA_VISION_ERROR_NONE on success,\n
+ * otherwise a negative error value
+ */
+ int setAttribute(const std::string& key, const int value);
+
+ /**
+ * @brief Sets attribute with boolean value.
+ *
+ * @since_tizen 2.4
+ * @param [in] key The string name of the attribute
+ * @param [in] value The boolean attribute value to be set
+ * @return @c MEDIA_VISION_ERROR_NONE on success,\n
+ * otherwise a negative error value
+ */
+ int setAttribute(const std::string& key, const bool value);
+
+ /**
+ * @brief Sets attribute with string value.
+ *
+ * @since_tizen 2.4
+ * @param [in] key The string name of the attribute
+ * @param [in] value The string attribute value to be set
+ * @return @c MEDIA_VISION_ERROR_NONE on success,\n
+ * otherwise a negative error value
+ */
+ int setAttribute(const std::string& key, const std::string& value);
+
+ /**
+ * @brief Gets double attribute value by attribute name.
+ *
+ * @since_tizen 2.4
+ * @param [in] key The string name of the attribute
+ * @param [out] value r The double attribute value to be obtained
+ * @return @c MEDIA_VISION_ERROR_NONE on success,\n
+ * otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key
+ * doesn't exist in the engine configuration dictionary
+ */
+ int getDoubleAttribute(const std::string& key, double *value) const;
+
+ /**
+ * @brief Gets integer attribute value by attribute name.
+ *
+ * @since_tizen 2.4
+ * @param [in] key The string name of the attribute
+ * @param [out] value The integer attribute value to be obtained
+ * @return @c MEDIA_VISION_ERROR_NONE on success,\n
+ * otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key
+ * doesn't exist in the engine configuration dictionary
+ */
+ int getIntegerAttribute(const std::string& key, int *value) const;
+
+/**
+ * @brief Gets boolean attribute value by attribute name.
+ *
+ * @since_tizen 2.4
+ * @param [in] key The string name of the attribute
+ * @param [out] value The boolean attribute value to be obtained
+ * @return @c MEDIA_VISION_ERROR_NONE on success,\n
+ * otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key
+ * doesn't exist in the engine configuration dictionary
+ */
+ int getBooleanAttribute(const std::string& key, bool *value) const;
+
+ /**
+ * @brief Gets string attribute value by attribute name.
+ *
+ * @since_tizen 2.4
+ * @param [in] key The string name of the attribute
+ * @param [out] value The string attribute value to be obtained
+ * @return @c MEDIA_VISION_ERROR_NONE on success,\n
+ * otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key
+ * doesn't exist in the engine configuration dictionary
+ */
+ int getStringAttribute(const std::string& key, std::string *value) const;
public:
- static bool setDefaultConfigFilePath(const std::string& confFilePath);
+ static bool setDefaultConfigFilePath(const std::string& confFilePath);
- static const std::map<std::string, double>& getDefaultDblDict();
- static const std::map<std::string, int>& getDefaultIntDict();
- static const std::map<std::string, bool>& getDefaultBoolDict();
- static const std::map<std::string, std::string>& getDefaultStrDict();
- static int cacheDictionaries(
- bool isLazyCache = true,
- std::string configFilePath = DefConfigFilePath);
+ static const std::map<std::string, double>& getDefaultDblDict();
+ static const std::map<std::string, int>& getDefaultIntDict();
+ static const std::map<std::string, bool>& getDefaultBoolDict();
+ static const std::map<std::string, std::string>& getDefaultStrDict();
+ static int cacheDictionaries(
+ bool isLazyCache = true,
+ std::string configFilePath = DefConfigFilePath);
private:
- std::map<std::string, double> m_dblDict;
- std::map<std::string, int> m_intDict;
- std::map<std::string, bool> m_boolDict;
- std::map<std::string, std::string> m_strDict;
+ std::map<std::string, double> m_dblDict;
+ std::map<std::string, int> m_intDict;
+ std::map<std::string, bool> m_boolDict;
+ std::map<std::string, std::string> m_strDict;
private:
- static std::string DefConfigFilePath;
-
- static std::map<std::string, double> DefDblDict;
- static std::map<std::string, int> DefIntDict;
- static std::map<std::string, bool> DefBoolDict;
- static std::map<std::string, std::string> DefStrDict;
+ static std::string DefConfigFilePath;
+ static std::map<std::string, double> DefDblDict;
+ static std::map<std::string, int> DefIntDict;
+ static std::map<std::string, bool> DefBoolDict;
+ static std::map<std::string, std::string> DefStrDict;
};
} /* Common */
diff --git a/mv_common/include/MediaSource.h b/mv_common/include/MediaSource.h
index 454f69fd..cc79f740 100644
--- a/mv_common/include/MediaSource.h
+++ b/mv_common/include/MediaSource.h
@@ -25,121 +25,115 @@
* @brief This file contains the MediaSource class.
*/
-namespace MediaVision
-{
-namespace Common
-{
-
+namespace MediaVision {
+namespace Common {
/**
* @class MediaSource
* @brief The Media Source container
* @details It is class which contains Media Source information. This class
* will be use in the Media Vision as simple image.
*/
-class MediaSource
-{
+class MediaSource {
public:
+ /**
+ * @brief Creates a MediaSource.
+ * @details Default parameters values of the MediaSource will be: zero for
+ * width, height and buffer size; NULL for buffer;
+ * MEDIA_VISION_COLORSPACE_INVALID for colorspace.
+ *
+ * @since_tizen 2.4
+ *
+ * @see MediaSource::~MediaSource()
+ */
+ MediaSource();
+
+ /**
+ * @brief Destroys the MediaSource and releases all its resources.
+ *
+ * @since_tizen 2.4
+ *
+ * @see MediaSource::MediaSource()
+ */
+ virtual ~MediaSource();
+
+ /**
+ * @brief Clears the MediaSource.
+ * @details Releases all internal resources and set parameters to default values.
+ *
+ * @since_tizen 2.4
+ *
+ * @see MediaSource::MediaSource()
+ * @see MediaSource::fill()
+ */
+ void clear(void);
+
+ /**
+ * @brief Fills the MediaSource based on the buffer and metadata.
+ *
+ * @since_tizen 2.4
+ * @param [in] buffer The buffer of image data
+ * @param [in] bufferSize The buffer size
+ * @param [in] width The image width
+ * @param [in] height The image height
+ * @param [in] colorspace The image colorspace
+ * @return true if filled process is ok. Otherwise return false.
+ *
+ * @see MediaSource::MediaSource()
+ * @see MediaSource::clear()
+ */
+ bool fill(const unsigned char *buffer, unsigned int bufferSize, unsigned int
+ width, unsigned int height, mv_colorspace_e colorspace);
+
+ /**
+ * @brief Gets data buffer of the MediaSource.
+ *
+ * @since_tizen 2.4
+ * @return Pointer to the data buffer.
+ */
+ unsigned char *getBuffer(void) const;
- /**
- * @brief Creates a MediaSource.
- * @details Default parameters values of the MediaSource will be: zero for
- * width, height and buffer size; NULL for buffer;
- * MEDIA_VISION_COLORSPACE_INVALID for colorspace.
- *
- * @since_tizen 2.4
- *
- * @see MediaSource::~MediaSource()
- */
- MediaSource();
-
- /**
- * @brief Destroys the MediaSource and releases all its resources.
- *
- * @since_tizen 2.4
- *
- * @see MediaSource::MediaSource()
- */
- virtual ~MediaSource();
-
- /**
- * @brief Clears the MediaSource.
- * @details Releases all internal resources and set parameters to default values.
- *
- * @since_tizen 2.4
- *
- * @see MediaSource::MediaSource()
- * @see MediaSource::fill()
- */
- void clear(void);
-
- /**
- * @brief Fills the MediaSource based on the buffer and metadata.
- *
- * @since_tizen 2.4
- * @param [in] buffer The buffer of image data
- * @param [in] bufferSize The buffer size
- * @param [in] width The image width
- * @param [in] height The image height
- * @param [in] colorspace The image colorspace
- * @return true if filled process is ok. Otherwise return false.
- *
- * @see MediaSource::MediaSource()
- * @see MediaSource::clear()
- */
- bool fill(const unsigned char *buffer, unsigned int bufferSize, unsigned int
- width, unsigned int height, mv_colorspace_e colorspace);
-
- /**
- * @brief Gets data buffer of the MediaSource.
- *
- * @since_tizen 2.4
- * @return Pointer to the data buffer.
- */
- unsigned char *getBuffer(void) const;
-
- /**
- * @brief Gets buffer size of the MediaSource.
- *
- * @since_tizen 2.4
- * @return Size of data buffer.
- */
- unsigned int getBufferSize(void) const;
-
- /**
- * @brief Gets image width of the MediaSource.
- *
- * @since_tizen 2.4
- * @return Width of image.
- */
- unsigned int getWidth(void) const;
-
- /**
- * @brief Gets image height of the MediaSource.
- *
- * @since_tizen 2.4
- * @return Height of image.
- */
- unsigned int getHeight(void) const;
-
- /**
- * @brief Gets image colorspace of the MediaSource.
- *
- * @since_tizen 2.4
- * @return Colorspace of image.
- */
- mv_colorspace_e getColorspace(void) const;
+/**
+ * @brief Gets buffer size of the MediaSource.
+ *
+ * @since_tizen 2.4
+ * @return Size of data buffer.
+ */
+ unsigned int getBufferSize(void) const;
+
+ /**
+ * @brief Gets image width of the MediaSource.
+ *
+ * @since_tizen 2.4
+ * @return Width of image.
+ */
+ unsigned int getWidth(void) const;
+
+ /**
+ * @brief Gets image height of the MediaSource.
+ *
+ * @since_tizen 2.4
+ * @return Height of image.
+ */
+ unsigned int getHeight(void) const;
+
+ /**
+ * @brief Gets image colorspace of the MediaSource.
+ *
+ * @since_tizen 2.4
+ * @return Colorspace of image.
+ */
+ mv_colorspace_e getColorspace(void) const;
private:
+ unsigned char *m_pBuffer; /**< The data buffer */
- unsigned char *m_pBuffer; /**< The data buffer */
-
- unsigned int m_bufferSize; /**< The buffer size */
+ unsigned int m_bufferSize; /**< The buffer size */
- unsigned int m_width; /**< The image width */
+ unsigned int m_width; /**< The image width */
- unsigned int m_height; /**< The image height */
+ unsigned int m_height; /**< The image height */
- mv_colorspace_e m_colorspace; /**< The image colorspace */
+ mv_colorspace_e m_colorspace; /**< The image colorspace */
};
} /* Common */
diff --git a/mv_common/include/mv_common_c.h b/mv_common/include/mv_common_c.h
index a7fa1bb0..9afa2d14 100644
--- a/mv_common/include/mv_common_c.h
+++ b/mv_common/include/mv_common_c.h
@@ -42,7 +42,7 @@ extern "C" {
* @see mv_destroy_source_c()
*/
int mv_create_source_c(
- mv_source_h *source);
+ mv_source_h *source);
/**
* @brief Destroys the source handle and releases all its resources.
@@ -56,7 +56,7 @@ int mv_create_source_c(
* @see mv_create_source_c()
*/
int mv_destroy_source_c(
- mv_source_h source);
+ mv_source_h source);
/**
* @brief Fills the media source based on the media packet.
@@ -78,8 +78,8 @@ int mv_destroy_source_c(
* @see mv_destroy_source_c()
*/
int mv_source_fill_by_media_packet_c(
- mv_source_h source,
- media_packet_h media_packet);
+ mv_source_h source,
+ media_packet_h media_packet);
/**
* @brief Fills the media source based on the buffer and metadata.
@@ -101,12 +101,12 @@ int mv_source_fill_by_media_packet_c(
* @see mv_source_clear_c()
*/
int mv_source_fill_by_buffer_c(
- mv_source_h source,
- unsigned char *data_buffer,
- unsigned int buffer_size,
- unsigned int image_width,
- unsigned int image_height,
- mv_colorspace_e image_colorspace);
+ mv_source_h source,
+ unsigned char *data_buffer,
+ unsigned int buffer_size,
+ unsigned int image_width,
+ unsigned int image_height,
+ mv_colorspace_e image_colorspace);
/**
* @brief Clears the buffer of the media source.
@@ -120,7 +120,7 @@ int mv_source_fill_by_buffer_c(
* @see mv_source_fill_by_buffer_c()
*/
int mv_source_clear_c(
- mv_source_h source);
+ mv_source_h source);
/**
* @brief Gets buffer of the media source.
@@ -142,9 +142,9 @@ int mv_source_clear_c(
* @see mv_source_get_colorspace_c()
*/
int mv_source_get_buffer_c(
- mv_source_h source,
- unsigned char **data_buffer,
- unsigned int *buffer_size);
+ mv_source_h source,
+ unsigned char **data_buffer,
+ unsigned int *buffer_size);
/**
* @brief Gets height of the media source.
@@ -161,8 +161,8 @@ int mv_source_get_buffer_c(
* @see mv_source_get_buffer_c()
*/
int mv_source_get_height_c(
- mv_source_h source,
- unsigned int *image_height);
+ mv_source_h source,
+ unsigned int *image_height);
/**
* @brief Gets width of the media source.
@@ -179,8 +179,8 @@ int mv_source_get_height_c(
* @see mv_source_get_buffer_c()
*/
int mv_source_get_width_c(
- mv_source_h source,
- unsigned int *image_width);
+ mv_source_h source,
+ unsigned int *image_width);
/**
* @brief Gets colorspace of the media source.
@@ -197,8 +197,8 @@ int mv_source_get_width_c(
* @see mv_source_get_buffer_c()
*/
int mv_source_get_colorspace_c(
- mv_source_h source,
- mv_colorspace_e *image_colorspace);
+ mv_source_h source,
+ mv_colorspace_e *image_colorspace);
/**
* @brief Creates the handle to the configuration of engine.
@@ -222,7 +222,7 @@ int mv_source_get_colorspace_c(
* @see mv_engine_config_get_string_attribute_c()
*/
int mv_create_engine_config_c(
- mv_engine_config_h *engine_cfg);
+ mv_engine_config_h *engine_cfg);
/**
* @brief Destroys the engine configuration handle and releases all its
@@ -239,7 +239,7 @@ int mv_create_engine_config_c(
* @see mv_create_engine_config_c()
*/
int mv_destroy_engine_config_c(
- mv_engine_config_h engine_cfg);
+ mv_engine_config_h engine_cfg);
/**
* @brief Sets the double attribute to the configuration.
@@ -261,9 +261,9 @@ int mv_destroy_engine_config_c(
* @see mv_engine_config_set_string_attribute_c()
*/
int mv_engine_config_set_double_attribute_c(
- mv_engine_config_h engine_cfg,
- const char *name,
- double value);
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ double value);
/**
* @brief Sets the integer attribute to the configuration.
@@ -285,9 +285,9 @@ int mv_engine_config_set_double_attribute_c(
* @see mv_engine_config_set_string_attribute_c()
*/
int mv_engine_config_set_int_attribute_c(
- mv_engine_config_h engine_cfg,
- const char *name,
- int value);
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ int value);
/**
* @brief Sets the boolean attribute to the configuration.
@@ -309,9 +309,9 @@ int mv_engine_config_set_int_attribute_c(
* @see mv_engine_config_set_string_attribute_c()
*/
int mv_engine_config_set_bool_attribute_c(
- mv_engine_config_h engine_cfg,
- const char *name,
- bool attribute);
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ bool attribute);
/**
* @brief Sets the string attribute to the configuration.
@@ -333,9 +333,9 @@ int mv_engine_config_set_bool_attribute_c(
* @see mv_engine_config_set_bool_attribute_c()
*/
int mv_engine_config_set_string_attribute_c(
- mv_engine_config_h engine_cfg,
- const char *name,
- const char *value);
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ const char *value);
/**
* @brief Gets the double attribute from the configuration dictionary.
@@ -359,9 +359,9 @@ int mv_engine_config_set_string_attribute_c(
* @see mv_engine_config_get_string_attribute_c()
*/
int mv_engine_config_get_double_attribute_c(
- mv_engine_config_h engine_cfg,
- const char *name,
- double *value);
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ double *value);
/**
* @brief Gets the integer attribute from the configuration dictionary.
@@ -385,9 +385,9 @@ int mv_engine_config_get_double_attribute_c(
* @see mv_engine_config_get_string_attribute_c()
*/
int mv_engine_config_get_int_attribute_c(
- mv_engine_config_h engine_cfg,
- const char *name,
- int *value);
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ int *value);
/**
* @brief Gets the boolean attribute from the configuration dictionary.
@@ -411,9 +411,9 @@ int mv_engine_config_get_int_attribute_c(
* @see mv_engine_config_get_string_attribute_c()
*/
int mv_engine_config_get_bool_attribute_c(
- mv_engine_config_h engine_cfg,
- const char *name,
- bool *value);
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ bool *value);
/**
* @brief Gets the string attribute from the configuration dictionary.
@@ -439,9 +439,9 @@ int mv_engine_config_get_bool_attribute_c(
* @see mv_engine_config_get_bool_attribute_c()
*/
int mv_engine_config_get_string_attribute_c(
- mv_engine_config_h engine_cfg,
- const char *name,
- char **value);
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ char **value);
/**
* @brief Traverses the list of supported attribute names and types.
@@ -482,8 +482,8 @@ int mv_engine_config_get_string_attribute_c(
* @see mv_engine_config_get_string_attribute_c()
*/
int mv_engine_config_foreach_supported_attribute_c(
- mv_supported_attribute_cb callback,
- void *user_data);
+ mv_supported_attribute_cb callback,
+ void *user_data);
#ifdef __cplusplus
}
diff --git a/mv_common/src/EngineConfig.cpp b/mv_common/src/EngineConfig.cpp
index 64bf1d99..38e545ff 100644
--- a/mv_common/src/EngineConfig.cpp
+++ b/mv_common/src/EngineConfig.cpp
@@ -25,13 +25,11 @@
* @brief Engine Configuration class methods implementation.
*/
-namespace MediaVision
-{
-namespace Common
-{
+namespace MediaVision {
+namespace Common {
std::string EngineConfig::DefConfigFilePath =
- std::string("/usr/share/config/capi-media-vision/media-vision-config.json");
+ std::string("/usr/share/config/capi-media-vision/media-vision-config.json");
std::map<std::string, double> EngineConfig::DefDblDict;
std::map<std::string, int> EngineConfig::DefIntDict;
@@ -40,323 +38,297 @@ std::map<std::string, std::string> EngineConfig::DefStrDict;
EngineConfig::EngineConfig()
{
- // Force load default attributes from configuration file
- cacheDictionaries(false);
-
- // Insert default attribute values into creating engine configuration
- m_dblDict.insert(getDefaultDblDict().begin(), getDefaultDblDict().end());
- m_intDict.insert(getDefaultIntDict().begin(), getDefaultIntDict().end());
- m_boolDict.insert(getDefaultBoolDict().begin(), getDefaultBoolDict().end());
- m_strDict.insert(getDefaultStrDict().begin(), getDefaultStrDict().end());
+ // Force load default attributes from configuration file
+ cacheDictionaries(false);
+
+ // Insert default attribute values into creating engine configuration
+ m_dblDict.insert(getDefaultDblDict().begin(), getDefaultDblDict().end());
+ m_intDict.insert(getDefaultIntDict().begin(), getDefaultIntDict().end());
+ m_boolDict.insert(getDefaultBoolDict().begin(), getDefaultBoolDict().end());
+ m_strDict.insert(getDefaultStrDict().begin(), getDefaultStrDict().end());
}
EngineConfig::~EngineConfig()
{
- ; /* NULL */
+ ; /* NULL */
}
int EngineConfig::setAttribute(const std::string& key, const double value)
{
- LOGI("Set double attribute for the engine config %p. [%s] = %f",
- this, key.c_str(), value);
+ LOGI("Set double attribute for the engine config %p. [%s] = %f",
+ this, key.c_str(), value);
- if (m_dblDict.find(key) == m_dblDict.end())
- {
- LOGE("Double attribute [%s] can't be set because isn't supported", key.c_str());
- return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
- }
+ if (m_dblDict.find(key) == m_dblDict.end()) {
+ LOGE("Double attribute [%s] can't be set because isn't supported", key.c_str());
+ return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
+ }
- m_dblDict[key] = value;
+ m_dblDict[key] = value;
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int EngineConfig::setAttribute(const std::string& key, const int value)
{
- LOGI("Set integer attribute for the engine config %p. [%s] = %i",
- this, key.c_str(), value);
+ LOGI("Set integer attribute for the engine config %p. [%s] = %i",
+ this, key.c_str(), value);
- if (m_intDict.find(key) == m_intDict.end())
- {
- LOGE("Integer attribute [%s] can't be set because isn't supported", key.c_str());
- return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
- }
+ if (m_intDict.find(key) == m_intDict.end()) {
+ LOGE("Integer attribute [%s] can't be set because isn't supported", key.c_str());
+ return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
+ }
- m_intDict[key] = value;
+ m_intDict[key] = value;
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int EngineConfig::setAttribute(const std::string& key, const bool value)
{
- LOGI("Set boolean attribute for the engine config %p. [%s] = %s",
- this, key.c_str(), value ? "TRUE" : "FALSE");
+ LOGI("Set boolean attribute for the engine config %p. [%s] = %s",
+ this, key.c_str(), value ? "TRUE" : "FALSE");
- if (m_boolDict.find(key) == m_boolDict.end())
- {
- LOGE("Boolean attribute [%s] can't be set because isn't supported", key.c_str());
- return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
- }
+ if (m_boolDict.find(key) == m_boolDict.end()) {
+ LOGE("Boolean attribute [%s] can't be set because isn't supported", key.c_str());
+ return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
+ }
- m_boolDict[key] = value;
+ m_boolDict[key] = value;
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int EngineConfig::setAttribute(const std::string& key, const std::string& value)
{
- LOGI("Set string attribute for the engine config %p. [%s] = %s",
- this, key.c_str(), value.c_str());
+ LOGI("Set string attribute for the engine config %p. [%s] = %s",
+ this, key.c_str(), value.c_str());
- if (m_strDict.find(key) == m_strDict.end())
- {
- LOGE("String attribute [%s] can't be set because isn't supported", key.c_str());
- return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
- }
+ if (m_strDict.find(key) == m_strDict.end()) {
+ LOGE("String attribute [%s] can't be set because isn't supported", key.c_str());
+ return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
+ }
- m_strDict[key] = value;
+ m_strDict[key] = value;
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int EngineConfig::getDoubleAttribute(const std::string& key, double *value) const
{
- DictDblConstIter dictIter = m_dblDict.find(key);
- if (dictIter == m_dblDict.end())
- {
- LOGE("Attempt to access to the unsupported double attribute [%s] "
- "of the engine config %p", key.c_str(), this);
- return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
- }
+ DictDblConstIter dictIter = m_dblDict.find(key);
+ if (dictIter == m_dblDict.end()) {
+ LOGE("Attempt to access to the unsupported double attribute [%s] "
+ "of the engine config %p", key.c_str(), this);
+ return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
+ }
- LOGD("Get double attribute from the engine config %p. [%s] = %f",
- this, dictIter->first.c_str(), dictIter->second);
+ LOGD("Get double attribute from the engine config %p. [%s] = %f",
+ this, dictIter->first.c_str(), dictIter->second);
- *value = dictIter->second;
+ *value = dictIter->second;
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int EngineConfig::getIntegerAttribute(const std::string& key, int *value) const
{
- DictIntConstIter dictIter = m_intDict.find(key);
- if (dictIter == m_intDict.end())
- {
- LOGE("Attempt to access to the unsupported integer attribute [%s] "
- "of the engine config %p", key.c_str(), this);
- return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
- }
+ DictIntConstIter dictIter = m_intDict.find(key);
+ if (dictIter == m_intDict.end()) {
+ LOGE("Attempt to access to the unsupported integer attribute [%s] "
+ "of the engine config %p", key.c_str(), this);
+ return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
+ }
- LOGD("Get integer attribute from the engine config %p. [%s] = %i",
- this, dictIter->first.c_str(), dictIter->second);
+ LOGD("Get integer attribute from the engine config %p. [%s] = %i",
+ this, dictIter->first.c_str(), dictIter->second);
- *value = dictIter->second;
+ *value = dictIter->second;
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int EngineConfig::getBooleanAttribute(const std::string& key, bool *value) const
{
- DictBoolConstIter dictIter = m_boolDict.find(key);
- if (dictIter == m_boolDict.end())
- {
- LOGE("Attempt to access to the unsupported boolean attribute [%s] "
- "of the engine config %p", key.c_str(), this);
- return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
- }
+ DictBoolConstIter dictIter = m_boolDict.find(key);
+ if (dictIter == m_boolDict.end()) {
+ LOGE("Attempt to access to the unsupported boolean attribute [%s] "
+ "of the engine config %p", key.c_str(), this);
+ return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
+ }
- LOGD("Get boolean attribute from the engine config %p. [%s] = %s",
- this, dictIter->first.c_str(), dictIter->second ? "TRUE" : "FALSE");
+ LOGD("Get boolean attribute from the engine config %p. [%s] = %s",
+ this, dictIter->first.c_str(), dictIter->second ? "TRUE" : "FALSE");
- *value = dictIter->second;
+ *value = dictIter->second;
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int EngineConfig::getStringAttribute(const std::string& key, std::string *value) const
{
- DictStrConstIter dictIter = m_strDict.find(key);
- if (dictIter == m_strDict.end())
- {
- LOGE("Attempt to access to the unsupported string attribute [%s] "
- "of the engine config %p", key.c_str(), this);
- return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
- }
+ DictStrConstIter dictIter = m_strDict.find(key);
+ if (dictIter == m_strDict.end()) {
+ LOGE("Attempt to access to the unsupported string attribute [%s] "
+ "of the engine config %p", key.c_str(), this);
+ return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
+ }
- LOGD("Get string attribute from the engine config %p. [%s] = %s",
- this, dictIter->first.c_str(), dictIter->second.c_str());
+ LOGD("Get string attribute from the engine config %p. [%s] = %s",
+ this, dictIter->first.c_str(), dictIter->second.c_str());
- *value = dictIter->second;
+ *value = dictIter->second;
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
-// static
+/*
+ * static
+ */
bool EngineConfig::setDefaultConfigFilePath(const std::string& confFilePath)
{
- if (0 != DefConfigFilePath.compare(confFilePath))
- {
- DefConfigFilePath = confFilePath;
- return true;
- }
+ if (0 != DefConfigFilePath.compare(confFilePath)) {
+ DefConfigFilePath = confFilePath;
+ return true;
+ }
- return false;
+ return false;
}
const std::map<std::string, double>& EngineConfig::getDefaultDblDict()
{
- cacheDictionaries();
+ cacheDictionaries();
- return DefDblDict;
+ return DefDblDict;
}
const std::map<std::string, int>& EngineConfig::getDefaultIntDict()
{
- cacheDictionaries();
+ cacheDictionaries();
- return DefIntDict;
+ return DefIntDict;
}
const std::map<std::string, bool>& EngineConfig::getDefaultBoolDict()
{
- cacheDictionaries();
+ cacheDictionaries();
- return DefBoolDict;
+ return DefBoolDict;
}
const std::map<std::string, std::string>& EngineConfig::getDefaultStrDict()
{
- cacheDictionaries();
+ cacheDictionaries();
- return DefStrDict;
+ return DefStrDict;
}
int EngineConfig::cacheDictionaries(bool isLazyCache, std::string configFilePath)
{
- static bool isCached = false;
- if (!isLazyCache || !isCached)
- {
- LOGI("Start to cache default attributes from engine configuration file.");
-
- DefDblDict.clear();
- DefIntDict.clear();
- DefBoolDict.clear();
- DefStrDict.clear();
-
- const char *conf_file = configFilePath.c_str();
- JsonParser *parser;
- GError *error = NULL;
-
- parser = json_parser_new();
- json_parser_load_from_file(parser, conf_file, &error);
- if (error)
- {
- LOGW("Unable to parse file '%s': %s\n", conf_file, error->message);
- g_error_free(error);
- g_object_unref(parser);
- return MEDIA_VISION_ERROR_NO_DATA;
- }
-
- JsonNode *root = json_parser_get_root(parser);
- if (JSON_NODE_OBJECT != json_node_get_node_type(root))
- {
- LOGW("Can't parse tests configuration file. "
- "Incorrect json markup.");
- g_object_unref(parser);
- return MEDIA_VISION_ERROR_NO_DATA;
- }
-
- JsonObject *jobj = json_node_get_object(root);
-
- if (!json_object_has_member(jobj, "attributes"))
- {
- LOGW("Can't parse tests configuration file. "
- "No 'attributes' section.");
- g_object_unref(parser);
- return MEDIA_VISION_ERROR_NO_DATA;
- }
-
- JsonNode *attr_node =
- json_object_get_member(jobj, "attributes");
-
- if (JSON_NODE_ARRAY != json_node_get_node_type(attr_node))
- {
- LOGW("Can't parse tests configuration file. "
- "'attributes' section isn't array.");
- g_object_unref(parser);
- return MEDIA_VISION_ERROR_NO_DATA;
- }
-
- JsonArray *attr_array = json_node_get_array(attr_node);
-
- const guint attr_num = json_array_get_length(attr_array);
-
- guint attrInd = 0;
- for (; attrInd < attr_num; ++attrInd)
- {
- JsonNode *attr_node = json_array_get_element(attr_array, attrInd);
-
- if (JSON_NODE_OBJECT != json_node_get_node_type(attr_node))
- {
- LOGW("Attribute %u wasn't parsed from json file.", attrInd);
- continue;
- }
-
- JsonObject *attr_obj = json_node_get_object(attr_node);
-
- if (!json_object_has_member(attr_obj, "name") ||
- !json_object_has_member(attr_obj, "type") ||
- !json_object_has_member(attr_obj, "value"))
- {
- LOGW("Attribute %u wasn't parsed from json file.", attrInd);
- continue;
- }
-
- const char *nameStr =
- (char*)json_object_get_string_member(attr_obj, "name");
- const char *typeStr =
- (char*)json_object_get_string_member(attr_obj, "type");
-
- if (NULL == nameStr || NULL == typeStr)
- {
- LOGW("Attribute %i wasn't parsed from json file. name and/or "
- "type of the attribute are parsed as NULL.", attrInd);
- continue;
- }
- else if (0 == strcmp("double", typeStr))
- {
- DefDblDict[std::string(nameStr)] =
- (double)json_object_get_double_member(attr_obj, "value");
- }
- else if (0 == strcmp("integer", typeStr))
- {
- DefIntDict[std::string(nameStr)] =
- (int)json_object_get_int_member(attr_obj, "value");
- }
- else if (0 == strcmp("boolean", typeStr))
- {
- DefBoolDict[std::string(nameStr)] =
- json_object_get_boolean_member(attr_obj, "value") ? true : false;
- }
- else if (0 == strcmp("string", typeStr))
- {
- DefStrDict[std::string(nameStr)] =
- (char*)json_object_get_string_member(attr_obj, "value");
- }
- else
- {
- LOGW("Attribute %i:%s wasn't parsed from json file. "
- "Type isn't supported.", attrInd, nameStr);
- continue;
- }
- }
-
- g_object_unref(parser);
- isCached = true;
- }
-
- return MEDIA_VISION_ERROR_NONE;
+ static bool isCached = false;
+ if (!isLazyCache || !isCached) {
+ LOGI("Start to cache default attributes from engine configuration file.");
+
+ DefDblDict.clear();
+ DefIntDict.clear();
+ DefBoolDict.clear();
+ DefStrDict.clear();
+
+ const char *conf_file = configFilePath.c_str();
+ JsonParser *parser;
+ GError *error = NULL;
+
+ parser = json_parser_new();
+ json_parser_load_from_file(parser, conf_file, &error);
+ if (error) {
+ LOGW("Unable to parse file '%s': %s\n", conf_file, error->message);
+ g_error_free(error);
+ g_object_unref(parser);
+ return MEDIA_VISION_ERROR_NO_DATA;
+ }
+
+ JsonNode *root = json_parser_get_root(parser);
+ if (JSON_NODE_OBJECT != json_node_get_node_type(root)) {
+ LOGW("Can't parse tests configuration file. "
+ "Incorrect json markup.");
+ g_object_unref(parser);
+ return MEDIA_VISION_ERROR_NO_DATA;
+ }
+
+ JsonObject *jobj = json_node_get_object(root);
+
+ if (!json_object_has_member(jobj, "attributes")) {
+ LOGW("Can't parse tests configuration file. "
+ "No 'attributes' section.");
+ g_object_unref(parser);
+ return MEDIA_VISION_ERROR_NO_DATA;
+ }
+
+ JsonNode *attr_node =
+ json_object_get_member(jobj, "attributes");
+
+ if (JSON_NODE_ARRAY != json_node_get_node_type(attr_node)) {
+ LOGW("Can't parse tests configuration file. "
+ "'attributes' section isn't array.");
+ g_object_unref(parser);
+ return MEDIA_VISION_ERROR_NO_DATA;
+ }
+
+ JsonArray *attr_array = json_node_get_array(attr_node);
+
+ const guint attr_num = json_array_get_length(attr_array);
+
+ guint attrInd = 0;
+ for (; attrInd < attr_num; ++attrInd) {
+ JsonNode *attr_node = json_array_get_element(attr_array, attrInd);
+
+ if (JSON_NODE_OBJECT != json_node_get_node_type(attr_node)) {
+ LOGW("Attribute %u wasn't parsed from json file.", attrInd);
+ continue;
+ }
+
+ JsonObject *attr_obj = json_node_get_object(attr_node);
+
+ if (!json_object_has_member(attr_obj, "name") ||
+ !json_object_has_member(attr_obj, "type") ||
+ !json_object_has_member(attr_obj, "value")) {
+ LOGW("Attribute %u wasn't parsed from json file.", attrInd);
+ continue;
+ }
+
+ const char *nameStr =
+ (char*)json_object_get_string_member(attr_obj, "name");
+ const char *typeStr =
+ (char*)json_object_get_string_member(attr_obj, "type");
+
+ if (NULL == nameStr || NULL == typeStr) {
+ LOGW("Attribute %i wasn't parsed from json file. name and/or "
+ "type of the attribute are parsed as NULL.", attrInd);
+ continue;
+ } else if (0 == strcmp("double", typeStr)) {
+ DefDblDict[std::string(nameStr)] =
+ (double)json_object_get_double_member(attr_obj, "value");
+ } else if (0 == strcmp("integer", typeStr)) {
+ DefIntDict[std::string(nameStr)] =
+ (int)json_object_get_int_member(attr_obj, "value");
+ } else if (0 == strcmp("boolean", typeStr)) {
+ DefBoolDict[std::string(nameStr)] =
+ json_object_get_boolean_member(attr_obj, "value") ? true : false;
+ } else if (0 == strcmp("string", typeStr)) {
+ DefStrDict[std::string(nameStr)] =
+ (char*)json_object_get_string_member(attr_obj, "value");
+ } else {
+ LOGW("Attribute %i:%s wasn't parsed from json file. "
+ "Type isn't supported.", attrInd, nameStr);
+ continue;
+ }
+ }
+
+ g_object_unref(parser);
+ isCached = true;
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
}
} /* namespace Common */
diff --git a/mv_common/src/MediaSource.cpp b/mv_common/src/MediaSource.cpp
index 12886253..bca35c58 100644
--- a/mv_common/src/MediaSource.cpp
+++ b/mv_common/src/MediaSource.cpp
@@ -20,104 +20,100 @@
#include <cstring>
-namespace MediaVision
-{
-namespace Common
-{
-
-MediaSource::MediaSource() : m_pBuffer (NULL), m_bufferSize (0), m_width (0),
- m_height (0), m_colorspace (MEDIA_VISION_COLORSPACE_INVALID)
+namespace MediaVision {
+namespace Common {
+MediaSource::MediaSource() :
+ m_pBuffer(NULL),
+ m_bufferSize(0),
+ m_width(0),
+ m_height(0),
+ m_colorspace(MEDIA_VISION_COLORSPACE_INVALID)
{
}
MediaSource::~MediaSource()
{
- clear();
+ clear();
}
void MediaSource::clear(void)
{
- if (m_pBuffer != NULL)
- {
- LOGD("Delete internal buffer for media source %p", this);
- delete[] m_pBuffer;
- }
- LOGD("Set defaults for media source %p : buffer = NULL; "
- "bufferSize = 0; width = 0; height = 0; "
- "colorspace = MEDIA_VISION_COLORSPACE_INVALID", this);
- m_pBuffer = NULL;
- m_bufferSize = 0;
- m_width = 0;
- m_height = 0;
- m_colorspace = MEDIA_VISION_COLORSPACE_INVALID;
+ if (m_pBuffer != NULL) {
+ LOGD("Delete internal buffer for media source %p", this);
+ delete[] m_pBuffer;
+ }
+ LOGD("Set defaults for media source %p : buffer = NULL; "
+ "bufferSize = 0; width = 0; height = 0; "
+ "colorspace = MEDIA_VISION_COLORSPACE_INVALID", this);
+ m_pBuffer = NULL;
+ m_bufferSize = 0;
+ m_width = 0;
+ m_height = 0;
+ m_colorspace = MEDIA_VISION_COLORSPACE_INVALID;
}
bool MediaSource::fill(const unsigned char *buffer, unsigned int bufferSize,
unsigned int width, unsigned int height, mv_colorspace_e colorspace)
{
- if (bufferSize == 0 || buffer == NULL)
- {
- return false;
- }
-
- LOGD("Call clear() first for media source %p", this);
- clear();
-
- try
- {
- LOGD("Allocate memory for buffer in media source %p", this);
- m_pBuffer = new unsigned char[bufferSize];
- }
- catch(...)
- {
- LOGE("Memory allocating for buffer in media source %p failed!", this);
- m_pBuffer = NULL;
- return false;
- }
-
- LOGD("Copy data from external buffer (%p) to the internal buffer (%p) of "
- "media source %p", buffer, m_pBuffer, this);
- std::memcpy(m_pBuffer, buffer, bufferSize);
-
- LOGD("Assign new size of the internal buffer of media source %p. "
- "New size is %ui.", this, bufferSize);
- m_bufferSize = bufferSize;
-
- LOGD("Assign new size (%ui x %ui) of the internal buffer image for "
- "the media source %p", width, height, this);
- m_width = width;
- m_height = height;
-
- LOGD("Assign new colorspace (%i) of the internal buffer image for "
- "the media source %p", colorspace, this);
- m_colorspace = colorspace;
-
- return true;
+ if (bufferSize == 0 || buffer == NULL) {
+ return false;
+ }
+
+ LOGD("Call clear() first for media source %p", this);
+ clear();
+
+ try {
+ LOGD("Allocate memory for buffer in media source %p", this);
+ m_pBuffer = new unsigned char[bufferSize];
+ } catch(...) {
+ LOGE("Memory allocating for buffer in media source %p failed!", this);
+ m_pBuffer = NULL;
+ return false;
+ }
+
+ LOGD("Copy data from external buffer (%p) to the internal buffer (%p) of "
+ "media source %p", buffer, m_pBuffer, this);
+ std::memcpy(m_pBuffer, buffer, bufferSize);
+
+ LOGD("Assign new size of the internal buffer of media source %p. "
+ "New size is %ui.", this, bufferSize);
+ m_bufferSize = bufferSize;
+
+ LOGD("Assign new size (%ui x %ui) of the internal buffer image for "
+ "the media source %p", width, height, this);
+ m_width = width;
+ m_height = height;
+
+ LOGD("Assign new colorspace (%i) of the internal buffer image for "
+ "the media source %p", colorspace, this);
+ m_colorspace = colorspace;
+
+ return true;
}
unsigned char *MediaSource::getBuffer(void) const
{
- return m_pBuffer;
+ return m_pBuffer;
}
unsigned int MediaSource::getBufferSize(void) const
{
- return m_bufferSize;
+ return m_bufferSize;
}
unsigned int MediaSource::getWidth(void) const
{
- return m_width;
+ return m_width;
}
unsigned int MediaSource::getHeight(void) const
{
- return m_height;
+ return m_height;
}
mv_colorspace_e MediaSource::getColorspace(void) const
{
- return m_colorspace;
+ return m_colorspace;
}
} /* Common */
diff --git a/mv_common/src/mv_common_c.cpp b/mv_common/src/mv_common_c.cpp
index ececf203..9242a56d 100644
--- a/mv_common/src/mv_common_c.cpp
+++ b/mv_common/src/mv_common_c.cpp
@@ -25,266 +25,246 @@
#include <media_packet.h>
int mv_create_source_c(
- mv_source_h *source_ptr)
+ mv_source_h *source_ptr)
{
- if (source_ptr == NULL)
- {
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (source_ptr == NULL) {
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- LOGD("Creating media vision source");
- (*source_ptr) = ((mv_source_h)new MediaVision::Common::MediaSource());
+ LOGD("Creating media vision source");
+ (*source_ptr) = ((mv_source_h)new MediaVision::Common::MediaSource());
- if (*source_ptr == NULL)
- {
- LOGE("Failed to create media vision source");
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
- LOGD("Media vision source [%p] has been created", *source_ptr);
+ if (*source_ptr == NULL) {
+ LOGE("Failed to create media vision source");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+ LOGD("Media vision source [%p] has been created", *source_ptr);
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_destroy_source_c(
- mv_source_h source)
+ mv_source_h source)
{
- if (!source)
- {
- LOGE("Media source can't be destroyed because handle is NULL");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (!source) {
+ LOGE("Media source can't be destroyed because handle is NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- LOGD("Destroying media vision source [%p]", source);
- delete ((MediaVision::Common::MediaSource*)source);
- LOGD("Media vision source has been destroyed");
+ LOGD("Destroying media vision source [%p]", source);
+ delete ((MediaVision::Common::MediaSource*)source);
+ LOGD("Media vision source has been destroyed");
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_source_fill_by_media_packet_c(
- mv_source_h source,
- media_packet_h media_packet)
+ mv_source_h source,
+ media_packet_h media_packet)
{
- if (!source || !media_packet)
- {
- LOGE("Media source can't be filled by media_packet handle because "
- "one of the source or media_packet handles is NULL. "
- "source = %p; media_packet = %p", source, media_packet);
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- bool is_video = false;
- int image_width = 0;
- int image_height = 0;
- media_format_h format = NULL;
- media_format_mimetype_e mimetype = MEDIA_FORMAT_I420;
- unsigned char *data_buffer = NULL;
- uint64_t buffer_size = 0;
- mv_colorspace_e image_colorspace = MEDIA_VISION_COLORSPACE_INVALID;
-
- int ret = media_packet_is_video(media_packet, &is_video);
- if (ret != MEDIA_PACKET_ERROR_NONE)
- {
- LOGE("media_packet_is_video() failed, mv_source_h fill skipped");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- if (!is_video)
- {
- LOGE("Media packet isn't video, mv_source_h fill skipped");
- return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
- }
-
- ret = media_packet_get_format(media_packet, &format);
- if (ret != MEDIA_PACKET_ERROR_NONE)
- {
- LOGE("media_packet_get_format() failed, mv_source_h fill skipped");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- ret = media_format_get_video_info(
- format, &mimetype, &image_width, &image_height, NULL, NULL);
- if (ret != MEDIA_PACKET_ERROR_NONE)
- {
- LOGE("media_format_get_video_info() failed, mv_source_h fill skipped");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- if (mimetype > MEDIA_FORMAT_H261 && mimetype <= MEDIA_FORMAT_MPEG4_ASP)
- {
- LOGE("Media format mimetype is not the raw video, mv_source_h fill skipped");
- return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
- }
-
- switch (mimetype)
- {
- case MEDIA_FORMAT_I420:
- image_colorspace = MEDIA_VISION_COLORSPACE_I420;
- break;
- case MEDIA_FORMAT_NV12:
- image_colorspace = MEDIA_VISION_COLORSPACE_NV12;
- break;
- case MEDIA_FORMAT_YV12:
- image_colorspace = MEDIA_VISION_COLORSPACE_YV12;
- break;
- case MEDIA_FORMAT_NV21:
- image_colorspace = MEDIA_VISION_COLORSPACE_NV21;
- break;
- case MEDIA_FORMAT_YUYV:
- image_colorspace = MEDIA_VISION_COLORSPACE_YUYV;
- break;
- case MEDIA_FORMAT_UYVY:
- image_colorspace = MEDIA_VISION_COLORSPACE_UYVY;
- break;
- case MEDIA_FORMAT_422P:
- image_colorspace = MEDIA_VISION_COLORSPACE_422P;
- break;
- case MEDIA_FORMAT_RGB565:
- image_colorspace = MEDIA_VISION_COLORSPACE_RGB565;
- break;
- case MEDIA_FORMAT_RGB888:
- image_colorspace = MEDIA_VISION_COLORSPACE_RGB888;
- break;
- case MEDIA_FORMAT_RGBA:
- image_colorspace = MEDIA_VISION_COLORSPACE_RGBA;
- break;
- default:
- LOGE("Format of the media packet buffer is not supported by media "
- "vision source (media_format_h mimetype=%i)", mimetype);
- return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
- }
-
- ret = media_packet_get_buffer_data_ptr(media_packet, (void**)&data_buffer);
- if (ret != MEDIA_PACKET_ERROR_NONE)
- {
- LOGE("media_packet_get_buffer_data_ptr() failed, mv_source_h fill skipped");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- ret = media_packet_get_buffer_size(media_packet, &buffer_size);
- if (ret != MEDIA_PACKET_ERROR_NONE)
- {
- LOGE("media_packet_get_buffer_size() failed, mv_source_h fill skipped");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- if (!((MediaVision::Common::MediaSource*)source)->fill(data_buffer,
- buffer_size, (unsigned int)image_width, (unsigned int)image_height, image_colorspace))
- {
- LOGE("mv_source_h filling from media_packet_h failed");
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
-
- LOGD("Media source has been filled from media packet");
- return MEDIA_VISION_ERROR_NONE;
+ if (!source || !media_packet) {
+ LOGE("Media source can't be filled by media_packet handle because "
+ "one of the source or media_packet handles is NULL. "
+ "source = %p; media_packet = %p", source, media_packet);
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ bool is_video = false;
+ int image_width = 0;
+ int image_height = 0;
+ media_format_h format = NULL;
+ media_format_mimetype_e mimetype = MEDIA_FORMAT_I420;
+ unsigned char *data_buffer = NULL;
+ uint64_t buffer_size = 0;
+ mv_colorspace_e image_colorspace = MEDIA_VISION_COLORSPACE_INVALID;
+
+ int ret = media_packet_is_video(media_packet, &is_video);
+ if (ret != MEDIA_PACKET_ERROR_NONE) {
+ LOGE("media_packet_is_video() failed, mv_source_h fill skipped");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!is_video) {
+ LOGE("Media packet isn't video, mv_source_h fill skipped");
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
+ }
+
+ ret = media_packet_get_format(media_packet, &format);
+ if (ret != MEDIA_PACKET_ERROR_NONE) {
+ LOGE("media_packet_get_format() failed, mv_source_h fill skipped");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ ret = media_format_get_video_info(
+ format, &mimetype, &image_width, &image_height, NULL, NULL);
+ if (ret != MEDIA_PACKET_ERROR_NONE) {
+ LOGE("media_format_get_video_info() failed, mv_source_h fill skipped");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (mimetype > MEDIA_FORMAT_H261 && mimetype <= MEDIA_FORMAT_MPEG4_ASP) {
+ LOGE("Media format mimetype is not the raw video, mv_source_h fill skipped");
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
+ }
+
+ switch (mimetype) {
+ case MEDIA_FORMAT_I420:
+ image_colorspace = MEDIA_VISION_COLORSPACE_I420;
+ break;
+ case MEDIA_FORMAT_NV12:
+ image_colorspace = MEDIA_VISION_COLORSPACE_NV12;
+ break;
+ case MEDIA_FORMAT_YV12:
+ image_colorspace = MEDIA_VISION_COLORSPACE_YV12;
+ break;
+ case MEDIA_FORMAT_NV21:
+ image_colorspace = MEDIA_VISION_COLORSPACE_NV21;
+ break;
+ case MEDIA_FORMAT_YUYV:
+ image_colorspace = MEDIA_VISION_COLORSPACE_YUYV;
+ break;
+ case MEDIA_FORMAT_UYVY:
+ image_colorspace = MEDIA_VISION_COLORSPACE_UYVY;
+ break;
+ case MEDIA_FORMAT_422P:
+ image_colorspace = MEDIA_VISION_COLORSPACE_422P;
+ break;
+ case MEDIA_FORMAT_RGB565:
+ image_colorspace = MEDIA_VISION_COLORSPACE_RGB565;
+ break;
+ case MEDIA_FORMAT_RGB888:
+ image_colorspace = MEDIA_VISION_COLORSPACE_RGB888;
+ break;
+ case MEDIA_FORMAT_RGBA:
+ image_colorspace = MEDIA_VISION_COLORSPACE_RGBA;
+ break;
+ default:
+ LOGE("Format of the media packet buffer is not supported by media "
+ "vision source (media_format_h mimetype=%i)", mimetype);
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
+ }
+
+ ret = media_packet_get_buffer_data_ptr(media_packet, (void**)&data_buffer);
+ if (ret != MEDIA_PACKET_ERROR_NONE) {
+ LOGE("media_packet_get_buffer_data_ptr() failed, mv_source_h fill skipped");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ ret = media_packet_get_buffer_size(media_packet, &buffer_size);
+ if (ret != MEDIA_PACKET_ERROR_NONE) {
+ LOGE("media_packet_get_buffer_size() failed, mv_source_h fill skipped");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!((MediaVision::Common::MediaSource*)source)->fill(data_buffer, buffer_size,
+ (unsigned int)image_width, (unsigned int)image_height, image_colorspace)) {
+ LOGE("mv_source_h filling from media_packet_h failed");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+
+ LOGD("Media source has been filled from media packet");
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_source_fill_by_buffer_c(
- mv_source_h source,
- unsigned char *data_buffer,
- unsigned int buffer_size,
- unsigned int image_width,
- unsigned int image_height,
- mv_colorspace_e image_colorspace)
+ mv_source_h source,
+ unsigned char *data_buffer,
+ unsigned int buffer_size,
+ unsigned int image_width,
+ unsigned int image_height,
+ mv_colorspace_e image_colorspace)
{
- if (!source || buffer_size == 0 || data_buffer == NULL)
- {
- LOGE("Media source can't be filled by buffer because "
- "one of the source or data_buffer is NULL or buffer_size = 0. "
- "source = %p; data_buffer = %p; buffer_size = %u",
- source, data_buffer, buffer_size);
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- if (!((MediaVision::Common::MediaSource*)source)->fill(data_buffer,
- buffer_size, image_width, image_height, image_colorspace))
- {
- LOGE("mv_source_h filling from buffer failed");
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
-
- LOGD("Media source has been filled from buffer");
- return MEDIA_VISION_ERROR_NONE;
+ if (!source || buffer_size == 0 || data_buffer == NULL) {
+ LOGE("Media source can't be filled by buffer because "
+ "one of the source or data_buffer is NULL or buffer_size = 0. "
+ "source = %p; data_buffer = %p; buffer_size = %u",
+ source, data_buffer, buffer_size);
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!((MediaVision::Common::MediaSource*)source)->fill(data_buffer,
+ buffer_size, image_width, image_height, image_colorspace)) {
+ LOGE("mv_source_h filling from buffer failed");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+
+ LOGD("Media source has been filled from buffer");
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_source_clear_c(
- mv_source_h source)
+ mv_source_h source)
{
- if (!source)
- {
- LOGE("Media source can't be cleared because source handle is NULL");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (!source) {
+ LOGE("Media source can't be cleared because source handle is NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- LOGD("Clear media vision source [%p]", source);
- ((MediaVision::Common::MediaSource*)source)->clear();
- LOGD("Media vision source [%p] has been cleared", source);
+ LOGD("Clear media vision source [%p]", source);
+ ((MediaVision::Common::MediaSource*)source)->clear();
+ LOGD("Media vision source [%p] has been cleared", source);
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_source_get_buffer_c(
- mv_source_h source,
- unsigned char **buffer,
- unsigned int *size)
+ mv_source_h source,
+ unsigned char **buffer,
+ unsigned int *size)
{
- if (!source)
- {
- LOGE("Impossible to get buffer for NULL mv_source_h handle");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (!source) {
+ LOGE("Impossible to get buffer for NULL mv_source_h handle");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- LOGD("Get media vision source [%p] buffer and buffer size to be returned", source);
- *buffer = ((MediaVision::Common::MediaSource*)source)->getBuffer();
- *size = ((MediaVision::Common::MediaSource*)source)->getBufferSize();
- LOGD("Media vision source [%p] buffer (%p) and buffer size (%ui) has been returned", source, buffer, *size);
+ LOGD("Get media vision source [%p] buffer and buffer size to be returned", source);
+ *buffer = ((MediaVision::Common::MediaSource*)source)->getBuffer();
+ *size = ((MediaVision::Common::MediaSource*)source)->getBufferSize();
+ LOGD("Media vision source [%p] buffer (%p) and buffer size (%ui) has been returned", source, buffer, *size);
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_source_get_height_c(
- mv_source_h source,
- unsigned int *height)
+ mv_source_h source,
+ unsigned int *height)
{
- if (!source)
- {
- LOGE("Impossible to get height for NULL mv_source_h handle");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (!source) {
+ LOGE("Impossible to get height for NULL mv_source_h handle");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- LOGD("Get media vision source [%p] height to be returned", source);
- *height = ((MediaVision::Common::MediaSource*)source)->getHeight();
- LOGD("Media vision source [%p] height (%ui) has been returned", source, *height);
+ LOGD("Get media vision source [%p] height to be returned", source);
+ *height = ((MediaVision::Common::MediaSource*)source)->getHeight();
+ LOGD("Media vision source [%p] height (%ui) has been returned", source, *height);
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_source_get_width_c(
- mv_source_h source,
- unsigned int *width)
+ mv_source_h source,
+ unsigned int *width)
{
- if (!source)
- {
- LOGE("Impossible to get width for NULL mv_source_h handle");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (!source) {
+ LOGE("Impossible to get width for NULL mv_source_h handle");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- LOGD("Get media vision source [%p] width to be returned", source);
- *width = ((MediaVision::Common::MediaSource*)source)->getWidth();
- LOGD("Media vision source [%p] width (%ui) has been returned", source, *width);
+ LOGD("Get media vision source [%p] width to be returned", source);
+ *width = ((MediaVision::Common::MediaSource*)source)->getWidth();
+ LOGD("Media vision source [%p] width (%ui) has been returned", source, *width);
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_source_get_colorspace_c(
- mv_source_h source,
- mv_colorspace_e *colorspace)
+ mv_source_h source,
+ mv_colorspace_e *colorspace)
{
- if (!source)
- {
- LOGE("Impossible to get colorspace for NULL mv_source_h handle");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (!source) {
+ LOGE("Impossible to get colorspace for NULL mv_source_h handle");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
LOGD("Get media vision source [%p] colorspace to be returned", source);
*colorspace = ((MediaVision::Common::MediaSource*)source)->getColorspace();
@@ -296,349 +276,318 @@ int mv_source_get_colorspace_c(
int mv_create_engine_config_c(
mv_engine_config_h *engine_cfg)
{
- if (engine_cfg == NULL)
- {
- LOGE("Impossible to create mv_engine_config_h handle");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (engine_cfg == NULL) {
+ LOGE("Impossible to create mv_engine_config_h handle");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+ LOGD("Creating media vision engine config");
+ (*engine_cfg) = ((mv_engine_config_h)new MediaVision::Common::EngineConfig());
+ LOGD("Media vision engine config [%p] has been created", *engine_cfg);
- LOGD("Creating media vision engine config");
- (*engine_cfg) = ((mv_engine_config_h)new MediaVision::Common::EngineConfig());
- LOGD("Media vision engine config [%p] has been created", *engine_cfg);
+ if (*engine_cfg == NULL) {
+ LOGE("Failed to create mv_engine_config_h handle");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
- if (*engine_cfg == NULL)
- {
- LOGE("Failed to create mv_engine_config_h handle");
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
-
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_destroy_engine_config_c(
- mv_engine_config_h engine_cfg)
+ mv_engine_config_h engine_cfg)
{
- if (!engine_cfg)
- {
- LOGE("Impossible to destroy NULL mv_engine_config_h handle");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (!engine_cfg) {
+ LOGE("Impossible to destroy NULL mv_engine_config_h handle");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- LOGD("Destroying media vision engine config [%p]", engine_cfg);
- delete ((MediaVision::Common::EngineConfig*)engine_cfg);
- LOGD("Media vision engine config has been destroyed");
+ LOGD("Destroying media vision engine config [%p]", engine_cfg);
+ delete ((MediaVision::Common::EngineConfig*)engine_cfg);
+ LOGD("Media vision engine config has been destroyed");
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_engine_config_set_double_attribute_c(
- mv_engine_config_h engine_cfg,
- const char *name,
- double value)
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ double value)
{
- if (!engine_cfg || name == NULL)
- {
- LOGE("Impossible to set attribute. One of the required parameters is "
- "NULL. engine_cfg = %p; name = %p;",
- engine_cfg, name);
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->setAttribute(
- std::string(name), value);
-
- if (ret != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Failed to set attribute [%s] with value %f. Error code (0x%08x)",
- name, value, ret);
- return ret;
- }
-
- LOGD("Attribute [%s] (value %f) has been set", name, value);
- return ret;
+ if (!engine_cfg || name == NULL) {
+ LOGE("Impossible to set attribute. One of the required parameters is "
+ "NULL. engine_cfg = %p; name = %p;",
+ engine_cfg, name);
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->setAttribute(
+ std::string(name), value);
+
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Failed to set attribute [%s] with value %f. Error code (0x%08x)",
+ name, value, ret);
+ return ret;
+ }
+
+ LOGD("Attribute [%s] (value %f) has been set", name, value);
+ return ret;
}
int mv_engine_config_set_int_attribute_c(
- mv_engine_config_h engine_cfg,
- const char *name,
- int value)
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ int value)
{
- if (!engine_cfg || name == NULL)
- {
- LOGE("Impossible to set attribute. One of the required parameters is "
- "NULL. engine_cfg = %p; name = %p;",
- engine_cfg, name);
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->setAttribute(
- std::string(name), value);
-
- if (ret != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Failed to set attribute [%s] with value %i. Error code (0x%08x)",
- name, value, ret);
- return ret;
- }
-
- LOGD("Attribute [%s] (value %i) has been set", name, value);
-
- return ret;
+ if (!engine_cfg || name == NULL) {
+ LOGE("Impossible to set attribute. One of the required parameters is "
+ "NULL. engine_cfg = %p; name = %p;",
+ engine_cfg, name);
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->setAttribute(
+ std::string(name), value);
+
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Failed to set attribute [%s] with value %i. Error code (0x%08x)",
+ name, value, ret);
+ return ret;
+ }
+
+ LOGD("Attribute [%s] (value %i) has been set", name, value);
+
+ return ret;
}
int mv_engine_config_set_bool_attribute_c(
- mv_engine_config_h engine_cfg,
- const char *name,
- bool value)
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ bool value)
{
- if (!engine_cfg || name == NULL)
- {
- LOGE("Impossible to set attribute. One of the required parameters is "
- "NULL. engine_cfg = %p; name = %p;",
- engine_cfg, name);
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->setAttribute(
- std::string(name), value);
-
- if (ret != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Failed to set attribute [%s] with value %s. Error code (0x%08x)",
- name, value ? "TRUE" : "FALSE", ret);
- return ret;
- }
-
- LOGD("Attribute [%s] (value %s) has been set",
- name, value ? "TRUE" : "FALSE");
- return ret;
+ if (!engine_cfg || name == NULL) {
+ LOGE("Impossible to set attribute. One of the required parameters is "
+ "NULL. engine_cfg = %p; name = %p;",
+ engine_cfg, name);
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->setAttribute(
+ std::string(name), value);
+
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Failed to set attribute [%s] with value %s. Error code (0x%08x)",
+ name, value ? "TRUE" : "FALSE", ret);
+ return ret;
+ }
+
+ LOGD("Attribute [%s] (value %s) has been set",
+ name, value ? "TRUE" : "FALSE");
+ return ret;
}
int mv_engine_config_set_string_attribute_c(
- mv_engine_config_h engine_cfg,
- const char *name,
- const char *value)
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ const char *value)
{
- if (!engine_cfg || name == NULL || value == NULL)
- {
- LOGE("Impossible to set attribute. One of the required parameters is "
- "NULL. engine_cfg = %p; name = %p; value = %p;",
- engine_cfg, name, value);
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->setAttribute(
- std::string(name), std::string(value));
-
- if (ret != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Failed to set attribute [%s] with value %s. Error code (0x%08x)",
- name, value, ret);
- return ret;
- }
-
- LOGD("Attribute [%s] (value %s) has been set", name, value);
- return ret;
+ if (!engine_cfg || name == NULL || value == NULL) {
+ LOGE("Impossible to set attribute. One of the required parameters is "
+ "NULL. engine_cfg = %p; name = %p; value = %p;",
+ engine_cfg, name, value);
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->setAttribute(
+ std::string(name), std::string(value));
+
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Failed to set attribute [%s] with value %s. Error code (0x%08x)",
+ name, value, ret);
+ return ret;
+ }
+
+ LOGD("Attribute [%s] (value %s) has been set", name, value);
+ return ret;
}
int mv_engine_config_get_double_attribute_c(
- mv_engine_config_h engine_cfg,
- const char *name,
- double *value)
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ double *value)
{
- if (!engine_cfg || name == NULL || value == NULL)
- {
- LOGE("Impossible to get attribute. One of the required parameters is "
- "NULL. engine_cfg = %p; name = %p; value = %p;",
- engine_cfg, name, value);
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->getDoubleAttribute(
- std::string(name), value);
-
- if (ret != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Failed to get attribute [%s]. Error code (0x%08x)",
- name, ret);
- return ret;
- }
-
- LOGD("Attribute [%s] (value %f) has been gotten",
- name, *value);
- return ret;
+ if (!engine_cfg || name == NULL || value == NULL) {
+ LOGE("Impossible to get attribute. One of the required parameters is "
+ "NULL. engine_cfg = %p; name = %p; value = %p;",
+ engine_cfg, name, value);
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->getDoubleAttribute(
+ std::string(name), value);
+
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Failed to get attribute [%s]. Error code (0x%08x)",
+ name, ret);
+ return ret;
+ }
+
+ LOGD("Attribute [%s] (value %f) has been gotten",
+ name, *value);
+ return ret;
}
int mv_engine_config_get_int_attribute_c(
- mv_engine_config_h engine_cfg,
- const char *name,
- int *value)
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ int *value)
{
- if (!engine_cfg || name == NULL || value == NULL)
- {
- LOGE("Impossible to get attribute. One of the required parameters is "
- "NULL. engine_cfg = %p; name = %p; value = %p;",
- engine_cfg, name, value);
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->getIntegerAttribute(
- std::string(name), value);
-
- if (ret != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Failed to get attribute [%s]. Error code (0x%08x)",
- name, ret);
- return ret;
- }
-
- LOGD("Attribute [%s] (value %i) has been gotten",
- name, *value);
- return ret;
+ if (!engine_cfg || name == NULL || value == NULL) {
+ LOGE("Impossible to get attribute. One of the required parameters is "
+ "NULL. engine_cfg = %p; name = %p; value = %p;",
+ engine_cfg, name, value);
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->getIntegerAttribute(
+ std::string(name), value);
+
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Failed to get attribute [%s]. Error code (0x%08x)",
+ name, ret);
+ return ret;
+ }
+
+ LOGD("Attribute [%s] (value %i) has been gotten",
+ name, *value);
+ return ret;
}
int mv_engine_config_get_bool_attribute_c(
- mv_engine_config_h engine_cfg,
- const char *name,
- bool *value)
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ bool *value)
{
- if (!engine_cfg || name == NULL || value == NULL)
- {
- LOGE("Impossible to get attribute. One of the required parameters is "
- "NULL. engine_cfg = %p; name = %p; value = %p;",
- engine_cfg, name, value);
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->getBooleanAttribute(
- std::string(name), value);
-
- if (ret != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Failed to get attribute [%s]. Error code (0x%08x)",
- name, ret);
- return ret;
- }
-
- LOGD("Attribute [%s] (value %s) has been gotten",
- name, *value ? "TRUE" : "FALSE");
- return ret;
+ if (!engine_cfg || name == NULL || value == NULL) {
+ LOGE("Impossible to get attribute. One of the required parameters is "
+ "NULL. engine_cfg = %p; name = %p; value = %p;",
+ engine_cfg, name, value);
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->getBooleanAttribute(
+ std::string(name), value);
+
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Failed to get attribute [%s]. Error code (0x%08x)",
+ name, ret);
+ return ret;
+ }
+
+ LOGD("Attribute [%s] (value %s) has been gotten",
+ name, *value ? "TRUE" : "FALSE");
+ return ret;
}
int mv_engine_config_get_string_attribute_c(
- mv_engine_config_h engine_cfg,
- const char *name,
- char **value)
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ char **value)
{
- if (!engine_cfg || name == NULL || value == NULL)
- {
- LOGE("Impossible to get attribute. One of the required parameters is "
- "NULL. engine_cfg = %p; name = %p; value = %p;",
- engine_cfg, name, value);
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- std::string attributeValue;
- int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->getStringAttribute(
- std::string(name), &attributeValue);
-
- if (ret != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Failed to get attribute [%s]. Error code (0x%08x)",
- name, ret);
- return ret;
- }
-
- LOGD("Convert string to char*");
- int stringSize = attributeValue.size();
- (*value) = new char[stringSize + 1];
-
- if (attributeValue.copy(*value, stringSize) != attributeValue.size())
- {
- LOGE("Conversion from string to char* failed");
- delete[] (*value);
- (*value) = NULL;
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
- (*value)[stringSize] = '\0';
-
- LOGD("Attribute [%s] (value %s) has been gotten",
- name, *value);
- return ret;
+ if (!engine_cfg || name == NULL || value == NULL) {
+ LOGE("Impossible to get attribute. One of the required parameters is "
+ "NULL. engine_cfg = %p; name = %p; value = %p;",
+ engine_cfg, name, value);
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ std::string attributeValue;
+ int ret = ((MediaVision::Common::EngineConfig*)engine_cfg)->getStringAttribute(
+ std::string(name), &attributeValue);
+
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Failed to get attribute [%s]. Error code (0x%08x)",
+ name, ret);
+ return ret;
+ }
+
+ LOGD("Convert string to char*");
+ int stringSize = attributeValue.size();
+ (*value) = new char[stringSize + 1];
+
+ if (attributeValue.copy(*value, stringSize) != attributeValue.size()) {
+ LOGE("Conversion from string to char* failed");
+ delete[] (*value);
+ (*value) = NULL;
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+ (*value)[stringSize] = '\0';
+
+ LOGD("Attribute [%s] (value %s) has been gotten",
+ name, *value);
+ return ret;
}
int mv_engine_config_foreach_supported_attribute_c(
mv_supported_attribute_cb callback,
void *user_data)
{
- if (NULL == callback)
- {
- LOGE("Impossible to traverse supported by Media Vision engine "
- "configuration attributes. Callback is NULL");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- using namespace MediaVision::Common;
-
- int err = EngineConfig::cacheDictionaries();
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- LOGE("Failed to get attribute names/types. "
- "Failed to cache attributes from file");
- return err;
- }
-
- DictDblConstIter dblDictIter = EngineConfig::getDefaultDblDict().begin();
- DictIntConstIter intDictIter = EngineConfig::getDefaultIntDict().begin();
- DictBoolConstIter boolDictIter = EngineConfig::getDefaultBoolDict().begin();
- DictStrConstIter strDictIter = EngineConfig::getDefaultStrDict().begin();
-
- while (dblDictIter != EngineConfig::getDefaultDblDict().end())
- {
- if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_DOUBLE,
- dblDictIter->first.c_str(), user_data))
- {
- LOGD("Attribute names/types traverse has been stopped by the user");
- return MEDIA_VISION_ERROR_NONE;
- }
- ++dblDictIter;
- }
-
- while (intDictIter != EngineConfig::getDefaultIntDict().end())
- {
- if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_INTEGER,
- intDictIter->first.c_str(), user_data))
- {
- LOGD("Attribute names/types traverse has been stopped by the user");
- return MEDIA_VISION_ERROR_NONE;
- }
- ++intDictIter;
- }
-
- while (boolDictIter != EngineConfig::getDefaultBoolDict().end())
- {
- if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_BOOLEAN,
- boolDictIter->first.c_str(), user_data))
- {
- LOGD("Attribute names/types traverse has been stopped by the user");
- return MEDIA_VISION_ERROR_NONE;
- }
- ++boolDictIter;
- }
-
- while (strDictIter != EngineConfig::getDefaultStrDict().end())
- {
- if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_STRING,
- strDictIter->first.c_str(), user_data))
- {
- LOGD("Attribute names/types traverse has been stopped by the user");
- return MEDIA_VISION_ERROR_NONE;
- }
- ++strDictIter;
- }
-
- LOGD("Attribute names/types has been gotten");
- return MEDIA_VISION_ERROR_NONE;
+ if (NULL == callback) {
+ LOGE("Impossible to traverse supported by Media Vision engine "
+ "configuration attributes. Callback is NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ using namespace MediaVision::Common;
+
+ int err = EngineConfig::cacheDictionaries();
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ LOGE("Failed to get attribute names/types. "
+ "Failed to cache attributes from file");
+ return err;
+ }
+
+ DictDblConstIter dblDictIter = EngineConfig::getDefaultDblDict().begin();
+ DictIntConstIter intDictIter = EngineConfig::getDefaultIntDict().begin();
+ DictBoolConstIter boolDictIter = EngineConfig::getDefaultBoolDict().begin();
+ DictStrConstIter strDictIter = EngineConfig::getDefaultStrDict().begin();
+
+ while (dblDictIter != EngineConfig::getDefaultDblDict().end()) {
+ if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_DOUBLE,
+ dblDictIter->first.c_str(), user_data)) {
+ LOGD("Attribute names/types traverse has been stopped by the user");
+ return MEDIA_VISION_ERROR_NONE;
+ }
+ ++dblDictIter;
+ }
+
+ while (intDictIter != EngineConfig::getDefaultIntDict().end()) {
+ if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_INTEGER,
+ intDictIter->first.c_str(), user_data)) {
+ LOGD("Attribute names/types traverse has been stopped by the user");
+ return MEDIA_VISION_ERROR_NONE;
+ }
+ ++intDictIter;
+ }
+
+ while (boolDictIter != EngineConfig::getDefaultBoolDict().end()) {
+ if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_BOOLEAN,
+ boolDictIter->first.c_str(), user_data)) {
+ LOGD("Attribute names/types traverse has been stopped by the user");
+ return MEDIA_VISION_ERROR_NONE;
+ }
+ ++boolDictIter;
+ }
+
+ while (strDictIter != EngineConfig::getDefaultStrDict().end()) {
+ if (!callback(MV_ENGINE_CONFIG_ATTR_TYPE_STRING,
+ strDictIter->first.c_str(), user_data)) {
+ LOGD("Attribute names/types traverse has been stopped by the user");
+ return MEDIA_VISION_ERROR_NONE;
+ }
+ ++strDictIter;
+ }
+
+ LOGD("Attribute names/types has been gotten");
+ return MEDIA_VISION_ERROR_NONE;
}
diff --git a/mv_face/face/include/FaceDetector.h b/mv_face/face/include/FaceDetector.h
index f014a542..b9b28883 100644
--- a/mv_face/face/include/FaceDetector.h
+++ b/mv_face/face/include/FaceDetector.h
@@ -27,11 +27,8 @@
* detection functionality.
*/
-namespace MediaVision
-{
-namespace Face
-{
-
+namespace MediaVision {
+namespace Face {
/**
* @class FaceDetector
* @brief The Face Detector container.
@@ -39,71 +36,68 @@ namespace Face
*
* @since_tizen 3.0
*/
-class FaceDetector
-{
+class FaceDetector {
public:
+ /**
+ * @brief Creates a FaceDetector.
+ *
+ * @since_tizen 3.0
+ */
+ FaceDetector();
- /**
- * @brief Creates a FaceDetector.
- *
- * @since_tizen 3.0
- */
- FaceDetector();
-
- /**
- * @brief Destroys the FaceDetector and releases all its resources.
- *
- * @since_tizen 3.0
- */
- virtual ~FaceDetector();
+ /**
+ * @brief Destroys the FaceDetector and releases all its resources.
+ *
+ * @since_tizen 3.0
+ */
+ virtual ~FaceDetector();
- /**
- * @brief Performs face detection functionality.
- * @details Use this function to launch face detection algorithm which
- * used the haarcascade set by setHaarcascadeFilepath().
- *
- * @since_tizen 3.0
- * @param [in] image The image where faces will be detected
- * @param [in] roi Region of image where faces will be detected
- * @param [in] minSize Minimum size of faces which will be detected
- * @param [out] faceLocations The result locations of detected faces.
- * @return true if detect process is completely finished. Otherwise return false.
- *
- * @pre Set a face haarcascade by calling setHaarcascadeFilepath()
- *
- * @see setHaarcascadeFilepath()
- */
- bool detectFaces(
- const cv::Mat& image,
- const cv::Rect& roi,
- const cv::Size& minSize,
- std::vector<cv::Rect>& faceLocations);
+ /**
+ * @brief Performs face detection functionality.
+ * @details Use this function to launch face detection algorithm which
+ * used the haarcascade set by setHaarcascadeFilepath().
+ *
+ * @since_tizen 3.0
+ * @param [in] image The image where faces will be detected
+ * @param [in] roi Region of image where faces will be detected
+ * @param [in] minSize Minimum size of faces which will be detected
+ * @param [out] faceLocations The result locations of detected faces.
+ * @return true if detect process is completely finished. Otherwise return false.
+ *
+ * @pre Set a face haarcascade by calling setHaarcascadeFilepath()
+ *
+ * @see setHaarcascadeFilepath()
+ */
+ bool detectFaces(
+ const cv::Mat& image,
+ const cv::Rect& roi,
+ const cv::Size& minSize,
+ std::vector<cv::Rect>& faceLocations);
- /**
- * @brief Loads haar cascade classifier for detection process.
- * @details This method is mandatory for normally detecting process.
- *
- * @since_tizen 3.0
- * @param [in] haarcascadeFilepath The path to the file, which contains haar
- * cascade classifier information for
- * detection process.
- * @return true if cascade is loaded from file and ready for detecting
- * process. Otherwise is false.
- */
- bool loadHaarcascade(const std::string& haarcascadeFilepath);
+ /**
+ * @brief Loads haar cascade classifier for detection process.
+ * @details This method is mandatory for normally detecting process.
+ *
+ * @since_tizen 3.0
+ * @param [in] haarcascadeFilepath The path to the file, which contains haar
+ * cascade classifier information for
+ * detection process.
+ * @return true if cascade is loaded from file and ready for detecting
+ * process. Otherwise is false.
+ */
+ bool loadHaarcascade(const std::string& haarcascadeFilepath);
private:
+ cv::CascadeClassifier m_faceCascade; /**< Cascade classifier of the face
+ detecting process. */
- cv::CascadeClassifier m_faceCascade; /**< Cascade classifier of the face
- detecting process. */
-
- std::string m_haarcascadeFilepath; /**< Path to the file, which contains
- cascade classifier information. */
+ std::string m_haarcascadeFilepath; /**< Path to the file, which contains
+ cascade classifier information. */
- bool m_faceCascadeIsLoaded; /**< Flag to determine the state of the
- m_faceCascade class. true if cascade is loaded
- from file and is ready to detecting process.
- Otherwise is false. */
+ bool m_faceCascadeIsLoaded; /**< Flag to determine the state of the
+ m_faceCascade class. true if cascade is loaded
+ from file and is ready to detecting process.
+ Otherwise is false. */
};
} /* Face */
diff --git a/mv_face/face/include/FaceExpressionRecognizer.h b/mv_face/face/include/FaceExpressionRecognizer.h
index fb445a41..284e7d91 100644
--- a/mv_face/face/include/FaceExpressionRecognizer.h
+++ b/mv_face/face/include/FaceExpressionRecognizer.h
@@ -22,8 +22,7 @@
#include <string>
-namespace cv
-{
+namespace cv {
class Mat;
}
@@ -33,18 +32,14 @@ namespace cv
* the facial expressions recognition functionality.
*/
-namespace MediaVision
-{
-namespace Face
-{
-
+namespace MediaVision {
+namespace Face {
/**
* @brief Face expression recognition configuration.
*
* @since_tizen 3.0
*/
-struct FaceRecognizerConfig
-{
+struct FaceRecognizerConfig {
FaceRecognizerConfig();
std::string mHaarcascadeFilepath;
};
@@ -56,27 +51,26 @@ struct FaceRecognizerConfig
*
* @since_tizen 3.0
*/
-class FaceExpressionRecognizer
-{
+class FaceExpressionRecognizer {
public:
- /**
- * @brief Recognizes facial expression on the image with known face location.
- *
- * @since_tizen 3.0
- * @param [in] grayImage The grayscale image with face
- * @param [in] faceLocation The location of the face on the @a image
- * @param [out] faceExpression Expression recognized for the face at
- * @a faceLocation
- * @param [in] config The configuration will be used for
- * facial expression recognition
- *
- * @see MediaVision::Face::FaceRecognizerConfig
- */
- static int recognizeFaceExpression(
- const cv::Mat& grayImage,
- const mv_rectangle_s& faceLocation,
- mv_face_facial_expression_e *faceExpression,
- const FaceRecognizerConfig& config = FaceRecognizerConfig());
+ /**
+ * @brief Recognizes facial expression on the image with known face location.
+ *
+ * @since_tizen 3.0
+ * @param [in] grayImage The grayscale image with face
+ * @param [in] faceLocation The location of the face on the @a image
+ * @param [out] faceExpression Expression recognized for the face at
+ * @a faceLocation
+ * @param [in] config The configuration will be used for
+ * facial expression recognition
+ *
+ * @see MediaVision::Face::FaceRecognizerConfig
+ */
+ static int recognizeFaceExpression(
+ const cv::Mat& grayImage,
+ const mv_rectangle_s& faceLocation,
+ mv_face_facial_expression_e *faceExpression,
+ const FaceRecognizerConfig& config = FaceRecognizerConfig());
};
} /* Face */
diff --git a/mv_face/face/include/FaceEyeCondition.h b/mv_face/face/include/FaceEyeCondition.h
index 56e10389..78c09927 100644
--- a/mv_face/face/include/FaceEyeCondition.h
+++ b/mv_face/face/include/FaceEyeCondition.h
@@ -28,11 +28,8 @@
* eye condition recognition functionality.
*/
-namespace MediaVision
-{
-namespace Face
-{
-
+namespace MediaVision {
+namespace Face {
/**
* @class FaceEyeCondition
* @brief The FaceEyeCondition implements the face
@@ -40,34 +37,31 @@ namespace Face
*
* @since_tizen 3.0
*/
-class FaceEyeCondition
-{
+class FaceEyeCondition {
public:
-
- /**
- * @brief Recognizes eye condition on the image with face location.
- *
- * @since_tizen 3.0
- * @param [in] grayImage The image in gray scale with face where
- * eye condition will be recognized
- * @param [in] faceLocation The rectangle with face location
- * @param [out] eyeCondition The eye condition which was recognized
- * @return @c 0 on success, otherwise a negative error value
- */
- static int recognizeEyeCondition(
- const cv::Mat& grayImage,
- mv_rectangle_s faceLocation,
- mv_face_eye_condition_e *eyeCondition);
+ /**
+ * @brief Recognizes eye condition on the image with face location.
+ *
+ * @since_tizen 3.0
+ * @param [in] grayImage The image in gray scale with face where
+ * eye condition will be recognized
+ * @param [in] faceLocation The rectangle with face location
+ * @param [out] eyeCondition The eye condition which was recognized
+ * @return @c 0 on success, otherwise a negative error value
+ */
+ static int recognizeEyeCondition(
+ const cv::Mat& grayImage,
+ mv_rectangle_s faceLocation,
+ mv_face_eye_condition_e *eyeCondition);
private:
+ static void splitEyes(
+ /*[in]*/ const cv::Mat& grayImage,
+ /*[in]*/ mv_rectangle_s faceLocation,
+ /*[out]*/ cv::Mat& leftEye,
+ /*[out]*/ cv::Mat& rightEye);
- static void splitEyes(
- /*[in]*/ const cv::Mat& grayImage,
- /*[in]*/ mv_rectangle_s faceLocation,
- /*[out]*/ cv::Mat& leftEye,
- /*[out]*/ cv::Mat& rightEye);
-
- static int isEyeOpen(/*[in]*/const cv::Mat& eye);
+ static int isEyeOpen(/*[in]*/const cv::Mat& eye);
};
} /* Face */
diff --git a/mv_face/face/include/FaceRecognitionModel.h b/mv_face/face/include/FaceRecognitionModel.h
index b4888f2a..15232e17 100644
--- a/mv_face/face/include/FaceRecognitionModel.h
+++ b/mv_face/face/include/FaceRecognitionModel.h
@@ -31,54 +31,50 @@
* provides face recognition model interface.
*/
-namespace MediaVision
-{
-namespace Face
-{
-
+namespace MediaVision {
+namespace Face {
/**
* @brief Structure containing supported recognition algorithms settings.
*
* @since_tizen 3.0
*/
-struct FaceRecognitionModelConfig
-{
- /**
- * @brief Default constructor for the @ref FaceRecognitionModelConfig
- *
- * @since_tizen 3.0
- */
- FaceRecognitionModelConfig();
+struct FaceRecognitionModelConfig {
+ /**
+ * @brief Default constructor for the @ref FaceRecognitionModelConfig
+ *
+ * @since_tizen 3.0
+ */
+ FaceRecognitionModelConfig();
- bool operator!=(
- const FaceRecognitionModelConfig& other) const;
+ bool operator!=(
+ const FaceRecognitionModelConfig& other) const;
- FaceRecognitionModelType mModelType; /**<
- Type of the recognition algorithm */
+ FaceRecognitionModelType mModelType; /**<
+ Type of the recognition algorithm */
- int mNumComponents; /**< How many principal components will be included
- to the Eigenvectors */
+ int mNumComponents; /**< How many principal components will be included
+ to the Eigenvectors */
- double mThreshold; /**< Minimal distance between principal components of
- the model allowed */
+ double mThreshold; /**< Minimal distance between principal components of
+ the model allowed */
- int mRadius; /**< Radius of the local features for LBHP algorithm */
+ int mRadius; /**< Radius of the local features for LBHP algorithm */
- int mNeighbors; /**< How many neighboring pixels has to be analyzed
- when LBHP learning applied. Usually set as
- 8*radius */
+ int mNeighbors; /**< How many neighboring pixels has to be analyzed
+ when LBHP learning applied. Usually set as
+ 8*radius */
- int mGridX; /**< X size of the spatial histogram (LBPH) */
+ int mGridX; /**< X size of the spatial histogram (LBPH) */
- int mGridY; /**< Y size of the spatial histogram (LBPH) */
+ int mGridY; /**< Y size of the spatial histogram (LBPH) */
- int mImgWidth; /**< Width of the image to resize the samples for
- algorithms working on the samples of the same
- size (Eigenfaces, Fisherfaces) */
+ int mImgWidth; /**< Width of the image to resize the samples for
+ Eigenfaces and Fisherfaces algorithms working
+ on the samples of the same size */
- int mImgHeight; /**< Height of the image to resize the samples for
- algorithms working on the samples of the same
- size (Eigenfaces, Fisherfaces) */
+ int mImgHeight; /**< Height of the image to resize the samples for
+ Eigenfaces and Fisherfaces algorithms working
+ on the samples of the same size */
};
/**
@@ -88,21 +84,20 @@ struct FaceRecognitionModelConfig
*
* @since_tizen 3.0
*/
-struct FaceRecognitionResults
-{
- /**
- * @brief Default constructor for the @ref FaceRecognitionResults
- *
- * @since_tizen 3.0
- */
- FaceRecognitionResults();
-
- bool mIsRecognized; /**< The flag indication success of the
- recognition */
- cv::Rect_<int> mFaceLocation; /**< Location of the face where face has
- been recognized */
- int mFaceLabel; /**< Unique label of the face */
- double mConfidence; /**< Recognition confidence level */
+struct FaceRecognitionResults {
+ /**
+ * @brief Default constructor for the @ref FaceRecognitionResults
+ *
+ * @since_tizen 3.0
+ */
+ FaceRecognitionResults();
+
+ bool mIsRecognized; /**< The flag indication success of the
+ recognition */
+ cv::Rect_<int> mFaceLocation; /**< Location of the face where face has
+ been recognized */
+ int mFaceLabel; /**< Unique label of the face */
+ double mConfidence; /**< Recognition confidence level */
};
/**
@@ -111,177 +106,173 @@ struct FaceRecognitionResults
*
* @since_tizen 3.0
*/
-class FaceRecognitionModel
-{
+class FaceRecognitionModel {
public:
-
- /**
- * @brief Creates a FaceRecognitionModel class instance.
- *
- * @since_tizen 3.0
- */
- FaceRecognitionModel();
-
- /**
- * @brief Creates a FaceRecognitionModel class instance based on existed
- * instance.
- *
- * @since_tizen 3.0
- * @param [in] origin The FaceRecognitionModel object that will be used
- * for creation of new one
- */
- FaceRecognitionModel(const FaceRecognitionModel& origin);
-
- /**
- * @brief @ref FaceRecognitionModel copy assignment operator.
- * @details Fills the information based on the @a copy
- *
- * @since_tizen 3.0
- * @param [in] copy @ref FaceRecognitionModel object which will be
- * copied
- */
- FaceRecognitionModel& operator=(const FaceRecognitionModel& copy);
-
- /**
- * @brief Destroys the FaceRecognitionModel class instance including all
- * its resources.
- *
- * @since_tizen 3.0
- */
- ~FaceRecognitionModel();
-
- /**
- * @brief Serializes FaceRecognitionModel object to the file.
- *
- * @since_tizen 3.0
- * @param [in] fileName The name of the file to which serialized
- * FaceRecognitionModel object will be saved
- * @return @c 0 on success, otherwise a negative error value
- *
- * @see FaceRecognitionModel::load()
- */
- int save(const std::string& fileName);
-
- /**
- * @brief Deserializes FaceRecognitionModel object from the file.
- *
- * @since_tizen 3.0
- * @param [in] fileName The name to the file from which serialized
- * FaceRecognitionModel object will be deserialized
- * @return @c 0 on success, otherwise a negative error value
- *
- * @see FaceRecognitionModel::save()
- */
- int load(const std::string& fileName);
-
- /**
- * @brief Adds face image example for face labeled by @a faceLabel
- *
- * @since_tizen 3.0
- * @param [in] faceImage Face image to be added to the training set
- * @param [in] faceLabel Label that defines class of the face
- * @return @c 0 on success, otherwise a negative error value
- *
- * @see FaceRecognitionModel::resetFaceExamples()
- */
- int addFaceExample(const cv::Mat& faceImage, int faceLabel);
-
- /**
- * @brief Clears the internal set of face image examples.
- *
- * @since_tizen 3.0
- * @remarks Internal set of face image examples contains all samples
- * collected with @ref FaceRecognitionModel::addPositiveExample()
- * method.
- * @return @c 0 on success, otherwise a negative error value
- *
- * @see FaceRecognitionModel::addFaceExample()
- */
- int resetFaceExamples(void);
-
- /**
- * @brief Clears the internal set of face image examples labeled with
- * @a faceLabel.
- *
- * @since_tizen 3.0
- * @remarks Internal set of face image examples contains all samples
- * collected with @ref FaceRecognitionModel::addPositiveExample()
- * method.
- * @param faceLabel Unique for the model face label
- * @return @c 0 on success, otherwise a negative error value
- *
- * @see FaceRecognitionModel::addFaceExample()
- */
- int resetFaceExamples(int faceLabel);
-
- /**
- * @brief Getter for the face labels learned by the model.
- *
- * @since_tizen 3.0
- * @remarks Returning vector will contain only labels had been learned by
- * FaceRecognitionModel::learn() method.
- * @return Vector of the face labels known by the model
- *
- * @see FaceRecognitionModel::addFaceExample()
- * @see FaceRecognitionModel::learn()
- */
- const std::set<int>& getFaceLabels(void) const;
-
- /**
- * @brief Learns recognition model based on the set of collected face image
- * examples.
- *
- * @since_tizen 3.0
- * @param [in] config Configuration of the algorithm to be used for
- * learning the model
- * @return @c 0 on success, otherwise a negative error value
- *
- * @see FaceRecognitionModel::addFaceExample()
- */
- int learn(const FaceRecognitionModelConfig& config = FaceRecognitionModelConfig());
-
- /**
- * @brief Recognizes faces in the image and outputs recognition results to
- * the @a results structure.
- *
- * @since_tizen 3.0
- * @param [in] config Configuration of the algorithm to be used for
- * face recognition
- * @param [out] results Structure that will contain recognition results
- * @return @c 0 on success, otherwise a negative error value
- *
- * @see FaceRecognitionModel::learn()
- */
- int recognize(const cv::Mat& image, FaceRecognitionResults& results);
+/**
+ * @brief Creates a FaceRecognitionModel class instance.
+ *
+ * @since_tizen 3.0
+ */
+ FaceRecognitionModel();
+
+ /**
+ * @brief Creates a FaceRecognitionModel class instance based on existed
+ * instance.
+ *
+ * @since_tizen 3.0
+ * @param [in] origin The FaceRecognitionModel object that will be used
+ * for creation of new one
+ */
+ FaceRecognitionModel(const FaceRecognitionModel& origin);
+
+ /**
+ * @brief @ref FaceRecognitionModel copy assignment operator.
+ * @details Fills the information based on the @a copy
+ *
+ * @since_tizen 3.0
+ * @param [in] copy @ref FaceRecognitionModel object which will be
+ * copied
+ */
+ FaceRecognitionModel& operator=(const FaceRecognitionModel& copy);
+
+ /**
+ * @brief Destroys the FaceRecognitionModel class instance including all
+ * its resources.
+ *
+ * @since_tizen 3.0
+ */
+ ~FaceRecognitionModel();
+
+ /**
+ * @brief Serializes FaceRecognitionModel object to the file.
+ *
+ * @since_tizen 3.0
+ * @param [in] fileName The name of the file to which serialized
+ * FaceRecognitionModel object will be saved
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceRecognitionModel::load()
+ */
+ int save(const std::string& fileName);
+
+ /**
+ * @brief Deserializes FaceRecognitionModel object from the file.
+ *
+ * @since_tizen 3.0
+ * @param [in] fileName The name to the file from which serialized
+ * FaceRecognitionModel object will be deserialized
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceRecognitionModel::save()
+ */
+ int load(const std::string& fileName);
+
+ /**
+ * @brief Adds face image example for face labeled by @a faceLabel
+ *
+ * @since_tizen 3.0
+ * @param [in] faceImage Face image to be added to the training set
+ * @param [in] faceLabel Label that defines class of the face
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceRecognitionModel::resetFaceExamples()
+ */
+ int addFaceExample(const cv::Mat& faceImage, int faceLabel);
+
+ /**
+ * @brief Clears the internal set of face image examples.
+ *
+ * @since_tizen 3.0
+ * @remarks Internal set of face image examples contains all samples
+ * collected with @ref FaceRecognitionModel::addPositiveExample()
+ * method.
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceRecognitionModel::addFaceExample()
+ */
+ int resetFaceExamples(void);
+
+ /**
+ * @brief Clears the internal set of face image examples labeled with
+ * @a faceLabel.
+ *
+ * @since_tizen 3.0
+ * @remarks Internal set of face image examples contains all samples
+ * collected with @ref FaceRecognitionModel::addPositiveExample()
+ * method.
+ * @param faceLabel Unique for the model face label
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceRecognitionModel::addFaceExample()
+ */
+ int resetFaceExamples(int faceLabel);
+
+ /**
+ * @brief Getter for the face labels learned by the model.
+ *
+ * @since_tizen 3.0
+ * @remarks Returning vector will contain only labels had been learned by
+ * FaceRecognitionModel::learn() method.
+ * @return Vector of the face labels known by the model
+ *
+ * @see FaceRecognitionModel::addFaceExample()
+ * @see FaceRecognitionModel::learn()
+ */
+ const std::set<int>& getFaceLabels(void) const;
+
+ /**
+ * @brief Learns recognition model based on the set of collected face image
+ * examples.
+ *
+ * @since_tizen 3.0
+ * @param [in] config Configuration of the algorithm to be used for
+ * learning the model
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceRecognitionModel::addFaceExample()
+ */
+ int learn(const FaceRecognitionModelConfig& config = FaceRecognitionModelConfig());
+
+ /**
+ * @brief Recognizes faces in the image and outputs recognition results to
+ * the @a results structure.
+ *
+ * @since_tizen 3.0
+ * @param [in] config Configuration of the algorithm to be used for
+ * face recognition
+ * @param [out] results Structure that will contain recognition results
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceRecognitionModel::learn()
+ */
+ int recognize(const cv::Mat& image, FaceRecognitionResults& results);
private:
-
- /**
- * Factory method for creating of the recognition algorithm based on input
- * configuration:
- */
- static cv::Ptr<cv::FaceRecognizer> CreateRecognitionAlgorithm(
- const FaceRecognitionModelConfig& config =
- FaceRecognitionModelConfig());
+ /**
+ * Factory method for creating of the recognition algorithm based on input
+ * configuration:
+ */
+ static cv::Ptr<cv::FaceRecognizer> CreateRecognitionAlgorithm(
+ const FaceRecognitionModelConfig& config =
+ FaceRecognitionModelConfig());
private:
+ bool m_canRecognize; /**< The flag showing possibility to recognize with
+ the face recognition model */
- bool m_canRecognize; /**< The flag showing possibility to recognize with
- the face recognition model */
-
- std::map<int, std::vector<cv::Mat> > m_faceSamples; /**< Samples of the
- images which
- will be used for
- the learning */
+ std::map<int, std::vector<cv::Mat> > m_faceSamples; /**< Samples of the
+ images which
+ will be used for
+ the learning */
- FaceRecognitionModelConfig m_learnAlgorithmConfig; /**< Configuration of the
- learning method */
+ FaceRecognitionModelConfig m_learnAlgorithmConfig; /**< Configuration of the
+ learning method */
- cv::Ptr<cv::FaceRecognizer> m_recognizer; /**< Recognizer associated with
- the current model */
+ cv::Ptr<cv::FaceRecognizer> m_recognizer; /**< Recognizer associated with
+ the current model */
- std::set<int> m_learnedLabels; /**< Vector of the labels had been learned
- by the model */
+ std::set<int> m_learnedLabels; /**< Vector of the labels had been learned
+ by the model */
};
} /* Face */
diff --git a/mv_face/face/include/FaceTrackingModel.h b/mv_face/face/include/FaceTrackingModel.h
index daa56c75..1fb6ccfd 100644
--- a/mv_face/face/include/FaceTrackingModel.h
+++ b/mv_face/face/include/FaceTrackingModel.h
@@ -25,33 +25,29 @@
* provides face tracking model interface.
*/
-namespace MediaVision
-{
-namespace Face
-{
-
+namespace MediaVision {
+namespace Face {
/**
* @brief Structure where results of
* @ref MediaVision::Face::FaceTrackingModel::track() call are stored.
*
* @since_tizen 3.0
*/
-struct FaceTrackingResults
-{
- /**
- * @brief Default constructor for the @ref FaceTrackingResults
- *
- * @since_tizen 3.0
- */
- FaceTrackingResults();
-
- bool mIsTracked; /**< The flag indication success of the
- tracking */
- cv::Rect_<float> mFaceLocation; /**< Location of the face at the current
- track iteration where face position
- is predicted */
- float mConfidence; /**< Tracking confidence level
- (0.0 .. 1.0) */
+struct FaceTrackingResults {
+ /**
+ * @brief Default constructor for the @ref FaceTrackingResults
+ *
+ * @since_tizen 3.0
+ */
+ FaceTrackingResults();
+
+ bool mIsTracked; /**< The flag indication success of the
+ tracking */
+ cv::Rect_<float> mFaceLocation; /**< Location of the face at the current
+ track iteration where face position
+ is predicted */
+ float mConfidence; /**< Tracking confidence level
+ (0.0 .. 1.0) */
};
/**
@@ -60,113 +56,110 @@ struct FaceTrackingResults
*
* @since_tizen 3.0
*/
-class FaceTrackingModel
-{
+class FaceTrackingModel {
public:
- /**
- * @brief Creates a FaceTrackingModel class instance.
- *
- * @since_tizen 3.0
- */
- FaceTrackingModel();
-
- /**
- * @brief Creates a FaceTrackingModel class instance based on existed
- * instance.
- *
- * @since_tizen 3.0
- * @param [in] origin The FaceTrackingModel object that will be used
- * for creation of new one
- */
- FaceTrackingModel(const FaceTrackingModel& origin);
-
- /**
- * @brief @ref FaceTrackingModel copy assignment operator.
- * @details Fills the information based on the @a copy
- *
- * @since_tizen 3.0
- * @param [in] copy @ref FaceTrackingModel object which will be
- * copied
- */
- FaceTrackingModel& operator=(const FaceTrackingModel& copy);
-
- /**
- * @brief Destroys the FaceTrackingModel class instance including all
- * its resources.
- *
- * @since_tizen 3.0
- */
- ~FaceTrackingModel();
-
- /**
- * @brief Serializes FaceTrackingModel object to the file.
- *
- * @since_tizen 3.0
- * @param [in] fileName The name to the file to which serialized
- * FaceTrackingModel object will be saved
- * @return @c 0 on success, otherwise a negative error value
- *
- * @see FaceTrackingModel::load()
- */
- int save(const std::string& fileName);
-
- /**
- * @brief Deserializes FaceTrackingModel object from the file.
- *
- * @since_tizen 3.0
- * @param [in] fileName The name of the file from which serialized
- * FaceTrackingModel object will be deserialized
- * @return @c 0 on success, otherwise a negative error value
- *
- * @see FaceTrackingModel::save()
- */
- int load(const std::string& fileName);
-
- /**
- * @brief Prepares FaceTrackingModel object to the next tracking session.
- *
- * @since_tizen 3.0
- * @param [in] image First frame of the video or image sequence for
- * which tracking will be started
- * @return @c 0 on success, otherwise a negative error value
- *
- * @see FaceTrackingModel::save()
- */
- int prepare(const cv::Mat& image);
-
- /**
- * @brief Prepares FaceTrackingModel object to the next tracking session.
- *
- * @since_tizen 3.0
- * @param [in] image First frame of the video or image sequence for
- * which tracking will be started
- * @param [in] boundingBox Rectangular location of the face on the @a
- * image
- * @return @c 0 on success, otherwise a negative error value
- *
- * @see FaceTrackingModel::save()
- */
- int prepare(const cv::Mat& image, const cv::Rect_<float>& boundingBox);
-
- /**
- * @brief Performs one tracking iteration for the video frame or image
- * from the continuous sequence of images.
- *
- * @since_tizen 3.0
- * @param [in] image
- * @param [out] boundingBox
- */
- int track(const cv::Mat& image, FaceTrackingResults& results);
+ /**
+ * @brief Creates a FaceTrackingModel class instance.
+ *
+ * @since_tizen 3.0
+ */
+ FaceTrackingModel();
+
+ /**
+ * @brief Creates a FaceTrackingModel class instance based on existed
+ * instance.
+ *
+ * @since_tizen 3.0
+ * @param [in] origin The FaceTrackingModel object that will be used
+ * for creation of new one
+ */
+ FaceTrackingModel(const FaceTrackingModel& origin);
+
+ /**
+ * @brief @ref FaceTrackingModel copy assignment operator.
+ * @details Fills the information based on the @a copy
+ *
+ * @since_tizen 3.0
+ * @param [in] copy @ref FaceTrackingModel object which will be
+ * copied
+ */
+ FaceTrackingModel& operator=(const FaceTrackingModel& copy);
+
+ /**
+ * @brief Destroys the FaceTrackingModel class instance including all
+ * its resources.
+ *
+ * @since_tizen 3.0
+ */
+ ~FaceTrackingModel();
+
+ /**
+ * @brief Serializes FaceTrackingModel object to the file.
+ *
+ * @since_tizen 3.0
+ * @param [in] fileName The name to the file to which serialized
+ * FaceTrackingModel object will be saved
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceTrackingModel::load()
+ */
+ int save(const std::string& fileName);
+
+ /**
+ * @brief Deserializes FaceTrackingModel object from the file.
+ *
+ * @since_tizen 3.0
+ * @param [in] fileName The name of the file from which serialized
+ * FaceTrackingModel object will be deserialized
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceTrackingModel::save()
+ */
+ int load(const std::string& fileName);
+
+ /**
+ * @brief Prepares FaceTrackingModel object to the next tracking session.
+ *
+ * @since_tizen 3.0
+ * @param [in] image First frame of the video or image sequence for
+ * which tracking will be started
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceTrackingModel::save()
+ */
+ int prepare(const cv::Mat& image);
+
+ /**
+ * @brief Prepares FaceTrackingModel object to the next tracking session.
+ *
+ * @since_tizen 3.0
+ * @param [in] image First frame of the video or image sequence for
+ * which tracking will be started
+ * @param [in] boundingBox Rectangular location of the face on the @a
+ * image
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceTrackingModel::save()
+ */
+ int prepare(const cv::Mat& image, const cv::Rect_<float>& boundingBox);
+
+ /**
+ * @brief Performs one tracking iteration for the video frame or image
+ * from the continuous sequence of images.
+ *
+ * @since_tizen 3.0
+ * @param [in] image
+ * @param [out] boundingBox
+ */
+ int track(const cv::Mat& image, FaceTrackingResults& results);
private:
+ bool m_canTrack; /**< The flag showing possibility
+ of the tracking model to
+ perform track */
- bool m_canTrack; /**< The flag showing possibility
- of the tracking model to
- perform track */
-
- cv::Ptr<cv::TrackerMedianFlow> m_tracker; /**< Underlying OpenCV tracking
- model */
-
+ cv::Ptr<cv::TrackerMedianFlow> m_tracker; /**< Underlying OpenCV tracking
+ model */
};
} /* Face */
diff --git a/mv_face/face/include/FaceUtil.h b/mv_face/face/include/FaceUtil.h
index d79757df..a6e19137 100644
--- a/mv_face/face/include/FaceUtil.h
+++ b/mv_face/face/include/FaceUtil.h
@@ -25,23 +25,19 @@
* @file FaceUtil.h
* @brief This file contains the useful functionality for Face module.
*/
-namespace MediaVision
-{
-namespace Face
-{
-
+namespace MediaVision {
+namespace Face {
/**
* @brief Enumeration of supported learning algorithms.
*
* @since_tizen 3.0
*/
-enum FaceRecognitionModelType
-{
- MEDIA_VISION_FACE_MODEL_TYPE_UNKNOWN = 0, /**< Unknown algorithm type */
- MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES = 1, /**< Eigenfaces algorithm */
- MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES = 2, /**< Fisherfaces algorithm */
- MEDIA_VISION_FACE_MODEL_TYPE_LBPH = 3 /**< Local Binary Patterns
- Histograms algorithm */
+enum FaceRecognitionModelType {
+ MEDIA_VISION_FACE_MODEL_TYPE_UNKNOWN = 0, /**< Unknown algorithm type */
+ MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES = 1, /**< Eigenfaces algorithm */
+ MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES = 2, /**< Fisherfaces algorithm */
+ MEDIA_VISION_FACE_MODEL_TYPE_LBPH = 3 /**< Local Binary Patterns
+ Histograms algorithm */
};
/**
@@ -49,14 +45,13 @@ enum FaceRecognitionModelType
*
* @since_tizen 3.0
*/
-struct RecognitionParams
-{
- RecognitionParams(FaceRecognitionModelType algType);
+struct RecognitionParams {
+ RecognitionParams(FaceRecognitionModelType algType);
- RecognitionParams();
+ RecognitionParams();
- FaceRecognitionModelType mRecognitionAlgType;
- /**< The type of the learning algorithm. */
+ FaceRecognitionModelType mRecognitionAlgType; /**< The type of
+ the learning algorithm */
};
/**
diff --git a/mv_face/face/include/TrackerMedianFlow.h b/mv_face/face/include/TrackerMedianFlow.h
index 7112a146..44b46c7c 100644
--- a/mv_face/face/include/TrackerMedianFlow.h
+++ b/mv_face/face/include/TrackerMedianFlow.h
@@ -59,95 +59,91 @@ by authors to outperform MIL). During the implementation period the code at
<http://www.aonsquared.co.uk/node/5>, the courtesy of the author Arthur Amarra, was used for the
reference purpose.
*/
-class TrackerMedianFlow : public virtual Algorithm
-{
+class TrackerMedianFlow : public virtual Algorithm {
public:
+ struct Params {
+ /**
+ * @brief TrackerMedianFlow algorithm parameters constructor
+ */
+ Params();
+ void read(const FileNode& fn);
+ void write(FileStorage& fs) const;
- struct Params
- {
- /**
- * @brief TrackerMedianFlow algorithm parameters constructor
- */
- Params();
- void read(const FileNode& fn);
- void write(FileStorage& fs) const;
-
- int mPointsInGrid; /**< Square root of number of keypoints used.
- Increase it to trade accurateness for speed.
- Default value is sensible and recommended */
+ int mPointsInGrid; /**< Square root of number of keypoints used.
+ Increase it to trade accurateness for speed.
+ Default value is sensible and recommended */
- Size mWindowSize; /**< Size of the search window at each pyramid level
- for Lucas-Kanade optical flow search used for
- tracking */
+ Size mWindowSize; /**< Size of the search window at each pyramid level
+ for Lucas-Kanade optical flow search used for
+ tracking */
- int mPyrMaxLevel; /**< Number of pyramid levels for Lucas-Kanade optical
- flow search used for tracking */
- };
+ int mPyrMaxLevel; /**< Number of pyramid levels for Lucas-Kanade optical
+ flow search used for tracking */
+ };
- TrackerMedianFlow(Params paramsIn = Params());
+ TrackerMedianFlow(Params paramsIn = Params());
- bool copyTo(TrackerMedianFlow& copy) const;
+ bool copyTo(TrackerMedianFlow& copy) const;
- bool init(const Mat& image, const Rect_<float>& boundingBox);
- bool update(const Mat& image, Rect_<float>& boundingBox);
+ bool init(const Mat& image, const Rect_<float>& boundingBox);
+ bool update(const Mat& image, Rect_<float>& boundingBox);
- bool isInited() const;
+ bool isInited() const;
- float getLastConfidence() const;
- Rect_<float> getLastBoundingBox() const;
+ float getLastConfidence() const;
+ Rect_<float> getLastBoundingBox() const;
- void read(FileStorage& fn);
- void write(FileStorage& fs) const;
+ void read(FileStorage& fn);
+ void write(FileStorage& fs) const;
private:
+ bool isInit;
- bool isInit;
-
- bool medianFlowImpl(Mat oldImage, Mat newImage, Rect_<float>& oldBox);
+ bool medianFlowImpl(Mat oldImage, Mat newImage, Rect_<float>& oldBox);
- Rect_<float> vote(
- const std::vector<Point2f>& oldPoints,
- const std::vector<Point2f>& newPoints,
- const Rect_<float>& oldRect,
- Point2f& mD);
+ Rect_<float> vote(
+ const std::vector<Point2f>& oldPoints,
+ const std::vector<Point2f>& newPoints,
+ const Rect_<float>& oldRect,
+ Point2f& mD);
- template<typename T>
- T getMedian(
- std::vector<T>& values, int size = -1);
+ template<typename T>
+ T getMedian(
+ std::vector<T>& values, int size = -1);
- void check_FB(
- std::vector<Mat> newPyramid,
- const std::vector<Point2f>& oldPoints,
- const std::vector<Point2f>& newPoints,
- std::vector<bool>& status);
+ void check_FB(
+ std::vector<Mat> newPyramid,
+ const std::vector<Point2f>& oldPoints,
+ const std::vector<Point2f>& newPoints,
+ std::vector<bool>& status);
- void check_NCC(
- const Mat& oldImage,
- const Mat& newImage,
- const std::vector<Point2f>& oldPoints,
- const std::vector<Point2f>& newPoints,
- std::vector<bool>& status);
+ void check_NCC(
+ const Mat& oldImage,
+ const Mat& newImage,
+ const std::vector<Point2f>& oldPoints,
+ const std::vector<Point2f>& newPoints,
+ std::vector<bool>& status);
- inline float l2distance(Point2f p1, Point2f p2);
+ inline float l2distance(Point2f p1, Point2f p2);
- Params params; /**< Parameters used during tracking, see
- @ref TrackerMedianFlow::Params */
+ Params params; /**< Parameters used during tracking, see
+ @ref TrackerMedianFlow::Params */
- TermCriteria termcrit; /**< Terminating criteria for OpenCV
- Lucas–Kanade optical flow algorithm used
- during tracking */
+ TermCriteria termcrit; /**< Terminating criteria for OpenCV
+ Lucas–Kanade optical flow algorithm used
+ during tracking */
- Rect_<float> m_boundingBox; /**< Tracking object bounding box */
+ Rect_<float> m_boundingBox; /**< Tracking object bounding box */
- float m_confidence; /**< Confidence that face was tracked correctly
- at the last tracking iteration */
+ float m_confidence; /**< Confidence that face was tracked correctly
+ at the last tracking iteration */
- Mat m_image; /**< Last image for which tracking was
- performed */
+ Mat m_image; /**< Last image for which tracking was
+ performed */
- std::vector<Mat> m_pyramid; /**< The pyramid had been calculated for
- the previous frame (or when
- initialize the model) */
+ std::vector<Mat> m_pyramid; /**< The pyramid had been calculated for
+ the previous frame or when
+ initialize the model */
};
} /* namespace cv */
diff --git a/mv_face/face/include/mv_face_open.h b/mv_face/face/include/mv_face_open.h
index a127d5ab..8346b4f0 100644
--- a/mv_face/face/include/mv_face_open.h
+++ b/mv_face/face/include/mv_face_open.h
@@ -65,10 +65,10 @@ extern "C" {
* @see mv_face_detected_cb
*/
int mv_face_detect_open(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- mv_face_detected_cb detected_cb,
- void *user_data);
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_face_detected_cb detected_cb,
+ void *user_data);
/********************/
@@ -126,12 +126,12 @@ int mv_face_detect_open(
* @see mv_face_recognized_cb
*/
int mv_face_recognize_open(
- mv_source_h source,
- mv_face_recognition_model_h recognition_model,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s *face_location,
- mv_face_recognized_cb recognized_cb,
- void *user_data);
+ mv_source_h source,
+ mv_face_recognition_model_h recognition_model,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s *face_location,
+ mv_face_recognized_cb recognized_cb,
+ void *user_data);
/*****************/
@@ -190,12 +190,12 @@ int mv_face_recognize_open(
* @see mv_face_tracked_cb
*/
int mv_face_track_open(
- mv_source_h source,
- mv_face_tracking_model_h tracking_model,
- mv_engine_config_h engine_cfg,
- mv_face_tracked_cb tracked_cb,
- bool do_learn,
- void *user_data);
+ mv_source_h source,
+ mv_face_tracking_model_h tracking_model,
+ mv_engine_config_h engine_cfg,
+ mv_face_tracked_cb tracked_cb,
+ bool do_learn,
+ void *user_data);
/********************************/
@@ -233,11 +233,11 @@ int mv_face_track_open(
* @see mv_face_eye_condition_recognized_cb
*/
int mv_face_eye_condition_recognize_open(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s face_location,
- mv_face_eye_condition_recognized_cb eye_condition_recognized_cb,
- void *user_data);
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s face_location,
+ mv_face_eye_condition_recognized_cb eye_condition_recognized_cb,
+ void *user_data);
/************************************/
@@ -274,11 +274,11 @@ int mv_face_eye_condition_recognize_open(
* @see mv_face_facial_expression_recognized_cb
*/
int mv_face_facial_expression_recognize_open(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s face_location,
- mv_face_facial_expression_recognized_cb expression_recognized_cb,
- void *user_data);
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s face_location,
+ mv_face_facial_expression_recognized_cb expression_recognized_cb,
+ void *user_data);
/*******************************/
/* Recognition model behavior */
@@ -313,7 +313,7 @@ int mv_face_facial_expression_recognize_open(
* @see mv_face_recognition_model_destroy_open()
*/
int mv_face_recognition_model_create_open(
- mv_face_recognition_model_h *recognition_model);
+ mv_face_recognition_model_h *recognition_model);
/**
* @brief Destroys the face recognition model handle and releases all its
@@ -329,7 +329,7 @@ int mv_face_recognition_model_create_open(
* @see mv_face_recognition_model_create_open()
*/
int mv_face_recognition_model_destroy_open(
- mv_face_recognition_model_h recognition_model);
+ mv_face_recognition_model_h recognition_model);
/**
* @brief Creates a copy of existed recognition model handle and clones all its
@@ -353,8 +353,8 @@ int mv_face_recognition_model_destroy_open(
* @see mv_face_recognition_model_create_open()
*/
int mv_face_recognition_model_clone_open(
- mv_face_recognition_model_h src,
- mv_face_recognition_model_h *dst);
+ mv_face_recognition_model_h src,
+ mv_face_recognition_model_h *dst);
/**
* @brief Saves recognition model to the file.
@@ -387,8 +387,8 @@ int mv_face_recognition_model_clone_open(
* @see mv_face_recognition_model_create_open()
*/
int mv_face_recognition_model_save_open(
- const char *file_name,
- mv_face_recognition_model_h recognition_model);
+ const char *file_name,
+ mv_face_recognition_model_h recognition_model);
/**
* @brief Loads recognition model from file.
@@ -420,8 +420,8 @@ int mv_face_recognition_model_save_open(
* @see mv_face_recognition_model_destroy_open()
*/
int mv_face_recognition_model_load_open(
- const char *file_name,
- mv_face_recognition_model_h *recognition_model);
+ const char *file_name,
+ mv_face_recognition_model_h *recognition_model);
/**
* @brief Adds face image example to be used for face recognition model learning
@@ -460,10 +460,10 @@ int mv_face_recognition_model_load_open(
* @see mv_face_recognition_model_learn_open()
*/
int mv_face_recognition_model_add_open(
- const mv_source_h source,
- mv_face_recognition_model_h recognition_model,
- const mv_rectangle_s *example_location,
- int face_label);
+ const mv_source_h source,
+ mv_face_recognition_model_h recognition_model,
+ const mv_rectangle_s *example_location,
+ int face_label);
/**
* @brief Remove from @a recognition_model all collected with
@@ -498,8 +498,8 @@ int mv_face_recognition_model_add_open(
* @see mv_face_recognition_model_learn_open()
*/
int mv_face_recognition_model_reset_open(
- mv_face_recognition_model_h recognition_model,
- const int *face_label);
+ mv_face_recognition_model_h recognition_model,
+ const int *face_label);
/**
* @brief Learns face recognition model.
@@ -555,8 +555,8 @@ int mv_face_recognition_model_reset_open(
* @see mv_face_recognize_open()
*/
int mv_face_recognition_model_learn_open(
- mv_engine_config_h engine_cfg,
- mv_face_recognition_model_h recognition_model);
+ mv_engine_config_h engine_cfg,
+ mv_face_recognition_model_h recognition_model);
/**
* @brief Queries labels list and number of labels had been learned by the model.
@@ -585,9 +585,9 @@ int mv_face_recognition_model_learn_open(
* @see mv_face_recognition_model_learn_open()
*/
int mv_face_recognition_model_query_labels_open(
- mv_face_recognition_model_h recognition_model,
- int **labels,
- unsigned int *number_of_labels);
+ mv_face_recognition_model_h recognition_model,
+ int **labels,
+ unsigned int *number_of_labels);
/***************************/
/* Tracking model behavior */
@@ -628,7 +628,7 @@ int mv_face_recognition_model_query_labels_open(
* @see mv_face_tracking_model_load_open()
*/
int mv_face_tracking_model_create_open(
- mv_face_tracking_model_h *tracking_model);
+ mv_face_tracking_model_h *tracking_model);
/**
* @brief Call this function to destroy the face tracking model handle and
@@ -645,7 +645,7 @@ int mv_face_tracking_model_create_open(
* @see mv_face_tracking_model_create_open()
*/
int mv_face_tracking_model_destroy_open(
- mv_face_tracking_model_h tracking_model);
+ mv_face_tracking_model_h tracking_model);
/**
* @brief Call this function to initialize tracking model by the location of the
@@ -689,10 +689,10 @@ int mv_face_tracking_model_destroy_open(
* @see mv_face_track_open()
*/
int mv_face_tracking_model_prepare_open(
- mv_face_tracking_model_h tracking_model,
- mv_engine_config_h engine_cfg,
- mv_source_h source,
- mv_quadrangle_s */*location*/);
+ mv_face_tracking_model_h tracking_model,
+ mv_engine_config_h engine_cfg,
+ mv_source_h source,
+ mv_quadrangle_s */*location*/);
/**
* @brief Call this function to make a copy of existed tracking model handle and
@@ -717,8 +717,8 @@ int mv_face_tracking_model_prepare_open(
* @see mv_face_tracking_model_create_open()
*/
int mv_face_tracking_model_clone_open(
- mv_face_tracking_model_h src,
- mv_face_tracking_model_h *dst);
+ mv_face_tracking_model_h src,
+ mv_face_tracking_model_h *dst);
/**
* @brief Call this method to save tracking model to the file.
@@ -747,8 +747,8 @@ int mv_face_tracking_model_clone_open(
* @see mv_face_tracking_model_create_open()
*/
int mv_face_tracking_model_save_open(
- const char *file_name,
- mv_face_tracking_model_h tracking_model);
+ const char *file_name,
+ mv_face_tracking_model_h tracking_model);
/**
* @brief Call this method to load a tracking model from file.
@@ -781,8 +781,8 @@ int mv_face_tracking_model_save_open(
* @see mv_face_tracking_model_destroy_open()
*/
int mv_face_tracking_model_load_open(
- const char *file_name,
- mv_face_tracking_model_h *tracking_model);
+ const char *file_name,
+ mv_face_tracking_model_h *tracking_model);
#ifdef __cplusplus
}
diff --git a/mv_face/face/src/FaceDetector.cpp b/mv_face/face/src/FaceDetector.cpp
index 21d81958..d9b4fe5b 100644
--- a/mv_face/face/src/FaceDetector.cpp
+++ b/mv_face/face/src/FaceDetector.cpp
@@ -16,89 +16,78 @@
#include "FaceDetector.h"
-namespace MediaVision
-{
-namespace Face
-{
-
+namespace MediaVision {
+namespace Face {
FaceDetector::FaceDetector() :
- m_faceCascade(),
- m_haarcascadeFilepath(),
- m_faceCascadeIsLoaded(false)
+ m_faceCascade(),
+ m_haarcascadeFilepath(),
+ m_faceCascadeIsLoaded(false)
{
; /* NULL */
}
FaceDetector::~FaceDetector()
{
- ; /* NULL */
+ ; /* NULL */
}
bool FaceDetector::detectFaces(
- const cv::Mat& image,
- const cv::Rect& roi,
- const cv::Size& minSize,
- std::vector<cv::Rect>& faceLocations)
+ const cv::Mat& image,
+ const cv::Rect& roi,
+ const cv::Size& minSize,
+ std::vector<cv::Rect>& faceLocations)
{
- if (!m_faceCascadeIsLoaded)
- {
- return false;
- }
+ if (!m_faceCascadeIsLoaded) {
+ return false;
+ }
- faceLocations.clear();
+ faceLocations.clear();
- cv::Mat intrestingRegion = image;
+ cv::Mat intrestingRegion = image;
- bool roiIsUsed = false;
- if (roi.x >= 0 && roi.y >= 0 && roi.width > 0 && roi.height > 0 &&
- (roi.x + roi.width) <= image.cols && (roi.y + roi.height) <= image.rows)
- {
- intrestingRegion = intrestingRegion(roi);
- roiIsUsed = true;
- }
+ bool roiIsUsed = false;
+ if (roi.x >= 0 && roi.y >= 0 && roi.width > 0 && roi.height > 0 &&
+ (roi.x + roi.width) <= image.cols &&
+ (roi.y + roi.height) <= image.rows) {
+ intrestingRegion = intrestingRegion(roi);
+ roiIsUsed = true;
+ }
- if (minSize.width > 0 && minSize.height > 0 &&
- minSize.width <= image.cols && minSize.height <= image.rows)
- {
- m_faceCascade.detectMultiScale(
- intrestingRegion,
- faceLocations,
- 1.1,
- 3,
- 0,
- minSize);
- }
- else
- {
- m_faceCascade.detectMultiScale(intrestingRegion, faceLocations);
- }
+ if (minSize.width > 0 && minSize.height > 0 &&
+ minSize.width <= image.cols && minSize.height <= image.rows) {
+ m_faceCascade.detectMultiScale(
+ intrestingRegion,
+ faceLocations,
+ 1.1,
+ 3,
+ 0,
+ minSize);
+ } else {
+ m_faceCascade.detectMultiScale(intrestingRegion, faceLocations);
+ }
- if (roiIsUsed)
- {
- const size_t numberOfLocations = faceLocations.size();
- for (size_t i = 0u; i < numberOfLocations; ++i)
- {
- faceLocations[i].x += roi.x;
- faceLocations[i].y += roi.y;
- }
- }
+ if (roiIsUsed) {
+ const size_t numberOfLocations = faceLocations.size();
+ for (size_t i = 0u; i < numberOfLocations; ++i) {
+ faceLocations[i].x += roi.x;
+ faceLocations[i].y += roi.y;
+ }
+ }
- return true;
+ return true;
}
bool FaceDetector::loadHaarcascade(const std::string& haarcascadeFilepath)
{
+ if (!m_faceCascadeIsLoaded ||
+ m_haarcascadeFilepath != haarcascadeFilepath) {
+ if (!(m_faceCascadeIsLoaded = m_faceCascade.load(haarcascadeFilepath))) {
+ return false;
+ }
+ m_haarcascadeFilepath = haarcascadeFilepath;
+ }
- if (!m_faceCascadeIsLoaded || m_haarcascadeFilepath != haarcascadeFilepath)
- {
- if (!(m_faceCascadeIsLoaded = m_faceCascade.load(haarcascadeFilepath)))
- {
- return false;
- }
- m_haarcascadeFilepath = haarcascadeFilepath;
- }
-
- return true;
+ return true;
}
} /* Face */
diff --git a/mv_face/face/src/FaceExpressionRecognizer.cpp b/mv_face/face/src/FaceExpressionRecognizer.cpp
index 51d9d05e..e32ddc09 100644
--- a/mv_face/face/src/FaceExpressionRecognizer.cpp
+++ b/mv_face/face/src/FaceExpressionRecognizer.cpp
@@ -22,83 +22,74 @@
#include <opencv/cv.h>
-namespace MediaVision
-{
-namespace Face
-{
-
+namespace MediaVision {
+namespace Face {
static const int MinDetectionWidth = 30;
static const int MinDetectionHeight = 30;
FaceRecognizerConfig::FaceRecognizerConfig() :
- mHaarcascadeFilepath(
- "/usr/share/OpenCV/haarcascades/haarcascade_smile.xml")
+ mHaarcascadeFilepath(
+ "/usr/share/OpenCV/haarcascades/haarcascade_smile.xml")
{
- ; /* NULL */
+ ; /* NULL */
}
int FaceExpressionRecognizer::recognizeFaceExpression(
- const cv::Mat& grayImage,
- const mv_rectangle_s& faceLocation,
- mv_face_facial_expression_e *faceExpression,
- const FaceRecognizerConfig& config)
+ const cv::Mat& grayImage,
+ const mv_rectangle_s& faceLocation,
+ mv_face_facial_expression_e *faceExpression,
+ const FaceRecognizerConfig& config)
{
- if (NULL == faceExpression)
- {
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- const int smileRectHeight = cvRound((float)faceLocation.height / 2);
-
- const cv::Rect roi(
- faceLocation.point.x,
- faceLocation.point.y + faceLocation.height - smileRectHeight,
- faceLocation.width,
- smileRectHeight);
-
- if (roi.width < MinDetectionWidth ||
- roi.height < MinDetectionHeight)
- {
- (*faceExpression) = MV_FACE_UNKNOWN;
- return MEDIA_VISION_ERROR_NONE;
- }
-
- if (0 > roi.x ||
- 0 > roi.y ||
- roi.x + roi.width > grayImage.cols ||
- roi.y + roi.height > grayImage.rows)
- {
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
-
- const cv::Mat mouthImg(grayImage, roi);
-
- std::vector<cv::Rect> areas;
-
- cv::CascadeClassifier smileClassifier;
- smileClassifier.load(config.mHaarcascadeFilepath);
- smileClassifier.detectMultiScale(
- mouthImg,
- areas,
- 1.1,
- 80,
- cv::CASCADE_FIND_BIGGEST_OBJECT |
- cv::CASCADE_DO_CANNY_PRUNING |
- cv::CASCADE_SCALE_IMAGE,
- cv::Size(MinDetectionWidth, MinDetectionHeight));
-
- (*faceExpression) = MV_FACE_UNKNOWN;
- const size_t smilesFoundSize = areas.size();
- if (smilesFoundSize == 0)
- {
- (*faceExpression) = MV_FACE_NEUTRAL;
- }
- else if (smilesFoundSize == 1)
- {
- (*faceExpression) = MV_FACE_SMILE;
- }
-
- return MEDIA_VISION_ERROR_NONE;
+ if (NULL == faceExpression) {
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ const int smileRectHeight = cvRound((float)faceLocation.height / 2);
+
+ const cv::Rect roi(
+ faceLocation.point.x,
+ faceLocation.point.y + faceLocation.height - smileRectHeight,
+ faceLocation.width,
+ smileRectHeight);
+
+ if (roi.width < MinDetectionWidth ||
+ roi.height < MinDetectionHeight) {
+ (*faceExpression) = MV_FACE_UNKNOWN;
+ return MEDIA_VISION_ERROR_NONE;
+ }
+
+ if (0 > roi.x ||
+ 0 > roi.y ||
+ roi.x + roi.width > grayImage.cols ||
+ roi.y + roi.height > grayImage.rows) {
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ const cv::Mat mouthImg(grayImage, roi);
+
+ std::vector<cv::Rect> areas;
+
+ cv::CascadeClassifier smileClassifier;
+ smileClassifier.load(config.mHaarcascadeFilepath);
+ smileClassifier.detectMultiScale(
+ mouthImg,
+ areas,
+ 1.1,
+ 80,
+ cv::CASCADE_FIND_BIGGEST_OBJECT |
+ cv::CASCADE_DO_CANNY_PRUNING |
+ cv::CASCADE_SCALE_IMAGE,
+ cv::Size(MinDetectionWidth, MinDetectionHeight));
+
+ (*faceExpression) = MV_FACE_UNKNOWN;
+ const size_t smilesFoundSize = areas.size();
+ if (smilesFoundSize == 0) {
+ (*faceExpression) = MV_FACE_NEUTRAL;
+ } else if (smilesFoundSize == 1) {
+ (*faceExpression) = MV_FACE_SMILE;
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
}
} /* Face */
diff --git a/mv_face/face/src/FaceEyeCondition.cpp b/mv_face/face/src/FaceEyeCondition.cpp
index 9432d1e1..10d9e6e8 100644
--- a/mv_face/face/src/FaceEyeCondition.cpp
+++ b/mv_face/face/src/FaceEyeCondition.cpp
@@ -20,209 +20,189 @@
#include <vector>
-namespace MediaVision
-{
-namespace Face
-{
-
+namespace MediaVision {
+namespace Face {
void FaceEyeCondition::splitEyes(
- const cv::Mat& grayImage,
- mv_rectangle_s faceLocation,
- cv::Mat& leftEye,
- cv::Mat& rightEye)
+ const cv::Mat& grayImage,
+ mv_rectangle_s faceLocation,
+ cv::Mat& leftEye,
+ cv::Mat& rightEye)
{
- leftEye = grayImage.rowRange(0, grayImage.rows / 2 - grayImage.rows / 10)
- .colRange(grayImage.cols / 2 + grayImage.cols / 10,
- grayImage.cols)
- .clone();
-
- rightEye = grayImage.rowRange(grayImage.rows / 2 + grayImage.rows / 10,
- grayImage.rows)
- .colRange(grayImage.cols / 2 + grayImage.cols / 10,
- grayImage.cols)
- .clone();
-
- const cv::Rect faceRect(
- faceLocation.point.x,
- faceLocation.point.y,
- faceLocation.width,
- faceLocation.height);
-
- const cv::Rect eyeAreaRight(
- faceRect.x + faceRect.width / 16,
- (int) (faceRect.y + (faceRect.height / 4.5)),
- (faceRect.width - 2 * faceRect.width / 16) / 2,
- (int) (faceRect.height / 3.0));
-
- const cv::Rect eyeAreaLeft(
- faceRect.x + faceRect.width / 16
- + (faceRect.width - 2 * faceRect.width / 16) / 2,
- (int) (faceRect.y + (faceRect.height / 4.5)),
- (faceRect.width - 2 * faceRect.width / 16) / 2,
- (int) (faceRect.height / 3.0));
-
- const double xLeftEyeCenter = (2 * eyeAreaLeft.x + eyeAreaLeft.width) / 2.;
- const double yLeftEyeCenter = (2 * eyeAreaLeft.y + eyeAreaLeft.height) / 2.;
-
- const double xRightEyeCenter = (2 * eyeAreaRight.x + eyeAreaRight.width) / 2.;
- const double yRightEyeCenter = (2 * eyeAreaRight.y + eyeAreaRight.height) / 2.;
-
- const cv::Rect leftEyeRect(xLeftEyeCenter - eyeAreaLeft.width / 4,
- yLeftEyeCenter - eyeAreaLeft.height / 4,
- eyeAreaLeft.width / 2,
- eyeAreaLeft.height / 2);
-
- const cv::Rect rightEyeRect(xRightEyeCenter - eyeAreaRight.width / 4,
- yRightEyeCenter - eyeAreaRight.height / 4,
- eyeAreaRight.width / 2,
- eyeAreaRight.height / 2);
-
- cv::resize(
- grayImage(leftEyeRect),
- leftEye,
- leftEye.size());
+ leftEye = grayImage.rowRange(0, grayImage.rows / 2 - grayImage.rows / 10)
+ .colRange(grayImage.cols / 2 + grayImage.cols / 10,
+ grayImage.cols)
+ .clone();
+
+ rightEye = grayImage.rowRange(grayImage.rows / 2 + grayImage.rows / 10,
+ grayImage.rows)
+ .colRange(grayImage.cols / 2 + grayImage.cols / 10,
+ grayImage.cols)
+ .clone();
+
+ const cv::Rect faceRect(
+ faceLocation.point.x,
+ faceLocation.point.y,
+ faceLocation.width,
+ faceLocation.height);
+
+ const cv::Rect eyeAreaRight(
+ faceRect.x + faceRect.width / 16,
+ (int) (faceRect.y + (faceRect.height / 4.5)),
+ (faceRect.width - 2 * faceRect.width / 16) / 2,
+ (int) (faceRect.height / 3.0));
+
+ const cv::Rect eyeAreaLeft(
+ faceRect.x + faceRect.width / 16
+ + (faceRect.width - 2 * faceRect.width / 16) / 2,
+ (int) (faceRect.y + (faceRect.height / 4.5)),
+ (faceRect.width - 2 * faceRect.width / 16) / 2,
+ (int) (faceRect.height / 3.0));
+
+ const double xLeftEyeCenter = (2 * eyeAreaLeft.x + eyeAreaLeft.width) / 2.;
+ const double yLeftEyeCenter = (2 * eyeAreaLeft.y + eyeAreaLeft.height) / 2.;
+
+ const double xRightEyeCenter = (2 * eyeAreaRight.x + eyeAreaRight.width) / 2.;
+ const double yRightEyeCenter = (2 * eyeAreaRight.y + eyeAreaRight.height) / 2.;
+
+ const cv::Rect leftEyeRect(xLeftEyeCenter - eyeAreaLeft.width / 4,
+ yLeftEyeCenter - eyeAreaLeft.height / 4,
+ eyeAreaLeft.width / 2,
+ eyeAreaLeft.height / 2);
+
+ const cv::Rect rightEyeRect(xRightEyeCenter - eyeAreaRight.width / 4,
+ yRightEyeCenter - eyeAreaRight.height / 4,
+ eyeAreaRight.width / 2,
+ eyeAreaRight.height / 2);
+
+ cv::resize(
+ grayImage(leftEyeRect),
+ leftEye,
+ leftEye.size());
cv::resize(
- grayImage(rightEyeRect),
- rightEye,
- rightEye.size());
+ grayImage(rightEyeRect),
+ rightEye,
+ rightEye.size());
}
int FaceEyeCondition::isEyeOpen(const cv::Mat& eye)
{
- int isOpen = MV_FACE_EYES_CLOSED;
-
- cv::Mat eyeEqualized;
- cv::equalizeHist(eye, eyeEqualized);
-
- const int thresold = 8;
- eyeEqualized = eyeEqualized < thresold;
-
- std::vector<std::vector<cv::Point> > contours;
- std::vector<cv::Vec4i> hierarchy;
-
- cv::findContours(
- eyeEqualized,
- contours,
- hierarchy,
- CV_RETR_CCOMP,
- CV_CHAIN_APPROX_SIMPLE);
-
- const size_t contoursSize = contours.size();
-
- if (!contoursSize)
- {
- return MV_FACE_EYES_NOT_FOUND;
- }
-
- const int xCenter = eyeEqualized.cols / 2;
- const int yCenter = eyeEqualized.rows / 2;
- const int width = eyeEqualized.cols / 2.5;
- const int height = eyeEqualized.rows / 2.5;
-
- const cv::Rect boundThresold(xCenter - width, yCenter - height, 2 * width, 2 * height);
-
- const int widthHeightRatio = 3;
- const double areaRatio = 0.005;
- const double areaSmallRatio = 0.0005;
- size_t rectanglesInsideCount = 0u;
-
- for (size_t i = 0; i < contoursSize; ++i)
- {
- const cv::Rect currentRect = cv::boundingRect(contours[i]);
- const double currentArea = cv::contourArea(contours[i]);
-
- if (boundThresold.contains(currentRect.br()) &&
- boundThresold.contains(currentRect.tl()) &&
- currentArea > areaRatio * boundThresold.area() &&
- currentRect.width < widthHeightRatio * currentRect.height)
- {
- isOpen = MV_FACE_EYES_OPEN;
- }
- else if (boundThresold.contains(currentRect.br()) &&
- boundThresold.contains(currentRect.tl()) &&
- currentArea > areaSmallRatio * boundThresold.area())
- {
- ++rectanglesInsideCount;
- }
- }
-
- if (rectanglesInsideCount > 8u)
- {
- isOpen = MV_FACE_EYES_CLOSED;
- }
-
- return isOpen;
+ int isOpen = MV_FACE_EYES_CLOSED;
+
+ cv::Mat eyeEqualized;
+ cv::equalizeHist(eye, eyeEqualized);
+
+ const int thresold = 8;
+ eyeEqualized = eyeEqualized < thresold;
+
+ std::vector<std::vector<cv::Point> > contours;
+ std::vector<cv::Vec4i> hierarchy;
+
+ cv::findContours(
+ eyeEqualized,
+ contours,
+ hierarchy,
+ CV_RETR_CCOMP,
+ CV_CHAIN_APPROX_SIMPLE);
+
+ const size_t contoursSize = contours.size();
+
+ if (!contoursSize) {
+ return MV_FACE_EYES_NOT_FOUND;
+ }
+
+ const int xCenter = eyeEqualized.cols / 2;
+ const int yCenter = eyeEqualized.rows / 2;
+ const int width = eyeEqualized.cols / 2.5;
+ const int height = eyeEqualized.rows / 2.5;
+
+ const cv::Rect boundThresold(xCenter - width, yCenter - height, 2 * width, 2 * height);
+
+ const int widthHeightRatio = 3;
+ const double areaRatio = 0.005;
+ const double areaSmallRatio = 0.0005;
+ size_t rectanglesInsideCount = 0u;
+
+ for (size_t i = 0; i < contoursSize; ++i) {
+ const cv::Rect currentRect = cv::boundingRect(contours[i]);
+ const double currentArea = cv::contourArea(contours[i]);
+
+ if (boundThresold.contains(currentRect.br()) &&
+ boundThresold.contains(currentRect.tl()) &&
+ currentArea > areaRatio * boundThresold.area() &&
+ currentRect.width < widthHeightRatio * currentRect.height) {
+ isOpen = MV_FACE_EYES_OPEN;
+ } else if (boundThresold.contains(currentRect.br()) &&
+ boundThresold.contains(currentRect.tl()) &&
+ currentArea > areaSmallRatio * boundThresold.area()) {
+ ++rectanglesInsideCount;
+ }
+ }
+
+ if (rectanglesInsideCount > 8u) {
+ isOpen = MV_FACE_EYES_CLOSED;
+ }
+
+ return isOpen;
}
int FaceEyeCondition::recognizeEyeCondition(
- const cv::Mat& grayImage,
- mv_rectangle_s faceLocation,
- mv_face_eye_condition_e *eyeCondition)
+ const cv::Mat& grayImage,
+ mv_rectangle_s faceLocation,
+ mv_face_eye_condition_e *eyeCondition)
{
- if (grayImage.empty())
- {
- *eyeCondition = MV_FACE_EYES_NOT_FOUND;
-
- LOGE("Input image is empty. Eye condition recognition failed.");
- return MEDIA_VISION_ERROR_NO_DATA;
- }
-
- if (faceLocation.height <= 0 || faceLocation.width <= 0 ||
- faceLocation.point.x < 0 || faceLocation.point.y < 0 ||
- (faceLocation.point.x + faceLocation.width) > grayImage.cols ||
- (faceLocation.point.y + faceLocation.height) > grayImage.rows)
- {
- *eyeCondition = MV_FACE_EYES_NOT_FOUND;
-
- LOGE("Input face location is wrong. Eye condition recognition failed.");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- if (NULL == eyeCondition)
- {
- *eyeCondition = MV_FACE_EYES_NOT_FOUND;
-
- LOGE("Output eye condition is NULL. Eye condition recognition failed.");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- // split left and right eyes
- cv::Mat leftEye;
- cv::Mat rightEye;
- splitEyes(grayImage, faceLocation, leftEye, rightEye);
-
- // recognize eyes conditions
- const int isOpenLeft = isEyeOpen(leftEye);
-
- if (isOpenLeft == MV_FACE_EYES_CLOSED)
- {
- *eyeCondition = MV_FACE_EYES_CLOSED;
-
- return MEDIA_VISION_ERROR_NONE;
- }
- else if (isOpenLeft == MV_FACE_EYES_NOT_FOUND)
- {
- *eyeCondition = MV_FACE_EYES_NOT_FOUND;
-
- return MEDIA_VISION_ERROR_NONE;
- }
-
- const int isOpenRight = isEyeOpen(rightEye);
-
- if (isOpenRight == MV_FACE_EYES_OPEN)
- {
- *eyeCondition = MV_FACE_EYES_OPEN;
- }
- else if (isOpenRight == MV_FACE_EYES_CLOSED)
- {
- *eyeCondition = MV_FACE_EYES_CLOSED;
- }
- else
- {
- *eyeCondition = MV_FACE_EYES_NOT_FOUND;
- }
-
- return MEDIA_VISION_ERROR_NONE;
+ if (grayImage.empty()) {
+ *eyeCondition = MV_FACE_EYES_NOT_FOUND;
+
+ LOGE("Input image is empty. Eye condition recognition failed.");
+ return MEDIA_VISION_ERROR_NO_DATA;
+ }
+
+ if (faceLocation.height <= 0 || faceLocation.width <= 0 ||
+ faceLocation.point.x < 0 || faceLocation.point.y < 0 ||
+ (faceLocation.point.x + faceLocation.width) > grayImage.cols ||
+ (faceLocation.point.y + faceLocation.height) > grayImage.rows) {
+ *eyeCondition = MV_FACE_EYES_NOT_FOUND;
+
+ LOGE("Input face location is wrong. Eye condition recognition failed.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (NULL == eyeCondition) {
+ *eyeCondition = MV_FACE_EYES_NOT_FOUND;
+
+ LOGE("Output eye condition is NULL. Eye condition recognition failed.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ /* split left and right eyes */
+ cv::Mat leftEye;
+ cv::Mat rightEye;
+ splitEyes(grayImage, faceLocation, leftEye, rightEye);
+
+ /* recognize eyes conditions */
+ const int isOpenLeft = isEyeOpen(leftEye);
+
+ if (isOpenLeft == MV_FACE_EYES_CLOSED) {
+ *eyeCondition = MV_FACE_EYES_CLOSED;
+
+ return MEDIA_VISION_ERROR_NONE;
+ } else if (isOpenLeft == MV_FACE_EYES_NOT_FOUND) {
+ *eyeCondition = MV_FACE_EYES_NOT_FOUND;
+
+ return MEDIA_VISION_ERROR_NONE;
+ }
+
+ const int isOpenRight = isEyeOpen(rightEye);
+
+ if (isOpenRight == MV_FACE_EYES_OPEN) {
+ *eyeCondition = MV_FACE_EYES_OPEN;
+ } else if (isOpenRight == MV_FACE_EYES_CLOSED) {
+ *eyeCondition = MV_FACE_EYES_CLOSED;
+ } else {
+ *eyeCondition = MV_FACE_EYES_NOT_FOUND;
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
}
} /* Face */
diff --git a/mv_face/face/src/FaceRecognitionModel.cpp b/mv_face/face/src/FaceRecognitionModel.cpp
index 1887cea8..c18de177 100644
--- a/mv_face/face/src/FaceRecognitionModel.cpp
+++ b/mv_face/face/src/FaceRecognitionModel.cpp
@@ -26,520 +26,473 @@
#include <stdio.h>
#include <unistd.h>
-namespace MediaVision
-{
-namespace Face
-{
-
-namespace
-{
-
+namespace MediaVision {
+namespace Face {
+namespace {
int CopyOpenCVAlgorithmParameters(const cv::Ptr<cv::FaceRecognizer>& srcAlg,
- cv::Ptr<cv::FaceRecognizer>& dstAlg)
+ cv::Ptr<cv::FaceRecognizer>& dstAlg)
{
- char tempPath[1024];
-
- sprintf(tempPath, "/tmp/alg_copy_%p_%p", srcAlg.obj, dstAlg.obj);
-
- srcAlg->save(tempPath);
- dstAlg->load(tempPath);
-
- if (0 != remove(tempPath))
- {
- LOGW("Error removing serialized FaceRecognizer in %s", tempPath);
- }
-
- // todo: consider to uncomment this lines if OpenCV will support deep
- // copy of AlgorithmInfo objects:
-
- /*std::vector<std::string> paramNames;
- srcAlg->getParams(paramNames);
- size_t paramSize = paramNames.size();
- for (size_t i = 0; i < paramSize; ++i)
- {
- int pType = srcAlg->paramType(paramNames[i]);
-
- switch(pType)
- {
- case cv::Param::INT:
- case cv::Param::UNSIGNED_INT:
- case cv::Param::UINT64:
- case cv::Param::SHORT:
- case cv::Param::UCHAR:
- dstAlg->set(paramNames[i], srcAlg->getInt(paramNames[i]));
- break;
- case cv::Param::BOOLEAN:
- dstAlg->set(paramNames[i], srcAlg->getBool(paramNames[i]));
- break;
- case cv::Param::REAL:
- case cv::Param::FLOAT:
- dstAlg->set(paramNames[i], srcAlg->getDouble(paramNames[i]));
- break;
- case cv::Param::STRING:
- dstAlg->set(paramNames[i], srcAlg->getString(paramNames[i]));
- break;
- case cv::Param::MAT:
- dstAlg->set(paramNames[i], srcAlg->getMat(paramNames[i]));
- break;
- case cv::Param::MAT_VECTOR:
- {
- //std::vector<cv::Mat> value = srcAlg->getMatVector(paramNames[i]);
- //dstAlg->info()->addParam(*(dstAlg.obj), paramNames[i].c_str(), value);
- dstAlg->set(paramNames[i], srcAlg->getMatVector(paramNames[i]));
- break;
- }
- case cv::Param::ALGORITHM:
- dstAlg->set(paramNames[i], srcAlg->getAlgorithm(paramNames[i]));
- break;
- default:
- LOGE("While copying algorothm parameters unsupported parameter "
- "%s was found.", paramNames[i].c_str());
-
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
- break;
- }
- }*/
-
- return MEDIA_VISION_ERROR_NONE;
+ char tempPath[1024];
+
+ sprintf(tempPath, "/tmp/alg_copy_%p_%p", srcAlg.obj, dstAlg.obj);
+
+ srcAlg->save(tempPath);
+ dstAlg->load(tempPath);
+
+ if (0 != remove(tempPath)) {
+ LOGW("Error removing serialized FaceRecognizer in %s", tempPath);
+ }
+
+ /* todo: consider to uncomment this lines if OpenCV will support deep
+ / copy of AlgorithmInfo objects: */
+
+ /*std::vector<std::string> paramNames;
+ srcAlg->getParams(paramNames);
+ size_t paramSize = paramNames.size();
+ for (size_t i = 0; i < paramSize; ++i) {
+ int pType = srcAlg->paramType(paramNames[i]);
+
+ switch(pType) {
+ case cv::Param::INT:
+ case cv::Param::UNSIGNED_INT:
+ case cv::Param::UINT64:
+ case cv::Param::SHORT:
+ case cv::Param::UCHAR:
+ dstAlg->set(paramNames[i], srcAlg->getInt(paramNames[i]));
+ break;
+ case cv::Param::BOOLEAN:
+ dstAlg->set(paramNames[i], srcAlg->getBool(paramNames[i]));
+ break;
+ case cv::Param::REAL:
+ case cv::Param::FLOAT:
+ dstAlg->set(paramNames[i], srcAlg->getDouble(paramNames[i]));
+ break;
+ case cv::Param::STRING:
+ dstAlg->set(paramNames[i], srcAlg->getString(paramNames[i]));
+ break;
+ case cv::Param::MAT:
+ dstAlg->set(paramNames[i], srcAlg->getMat(paramNames[i]));
+ break;
+ case cv::Param::MAT_VECTOR:
+ {
+ //std::vector<cv::Mat> value = srcAlg->getMatVector(paramNames[i]);
+ //dstAlg->info()->addParam(*(dstAlg.obj), paramNames[i].c_str(), value);
+ dstAlg->set(paramNames[i], srcAlg->getMatVector(paramNames[i]));
+ break;
+ }
+ case cv::Param::ALGORITHM:
+ dstAlg->set(paramNames[i], srcAlg->getAlgorithm(paramNames[i]));
+ break;
+ default:
+ LOGE("While copying algorothm parameters unsupported parameter "
+ "%s was found.", paramNames[i].c_str());
+
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ }
+ }*/
+
+ return MEDIA_VISION_ERROR_NONE;
}
void ParseOpenCVLabels(
- const cv::Ptr<cv::FaceRecognizer>& recognizer,
- std::set<int>& outLabels)
+ const cv::Ptr<cv::FaceRecognizer>& recognizer,
+ std::set<int>& outLabels)
{
- if (!recognizer.empty())
- {
- cv::Mat labels = recognizer->getMat("labels");
- for(int i = 0; i < labels.rows; ++i)
- {
- outLabels.insert(labels.at<int>(i, 0));
- }
- }
+ if (!recognizer.empty()) {
+ cv::Mat labels = recognizer->getMat("labels");
+ for (int i = 0; i < labels.rows; ++i) {
+ outLabels.insert(labels.at<int>(i, 0));
+ }
+ }
}
} /* anonymous namespace */
FaceRecognitionModelConfig::FaceRecognitionModelConfig() :
- mModelType(MEDIA_VISION_FACE_MODEL_TYPE_UNKNOWN),
- mNumComponents(0),
- mThreshold(DBL_MAX),
- mRadius(1),
- mNeighbors(8),
- mGridX(8),
- mGridY(8),
- mImgWidth(150),
- mImgHeight(150)
+ mModelType(MEDIA_VISION_FACE_MODEL_TYPE_UNKNOWN),
+ mNumComponents(0),
+ mThreshold(DBL_MAX),
+ mRadius(1),
+ mNeighbors(8),
+ mGridX(8),
+ mGridY(8),
+ mImgWidth(150),
+ mImgHeight(150)
{
- ; /* NULL */
+ ; /* NULL */
}
FaceRecognitionResults::FaceRecognitionResults() :
- mIsRecognized(false),
- mFaceLabel(-1),
- mConfidence(0.0)
+ mIsRecognized(false),
+ mFaceLabel(-1),
+ mConfidence(0.0)
{
- ; /* NULL */
+ ; /* NULL */
}
bool FaceRecognitionModelConfig::operator!=(
- const FaceRecognitionModelConfig& other) const
+ const FaceRecognitionModelConfig& other) const
{
- return mModelType != other.mModelType ||
- mNumComponents != other.mNumComponents ||
- mThreshold != other.mThreshold ||
- mRadius != other.mRadius ||
- mNeighbors != other.mNeighbors ||
- mGridX != other.mGridX ||
- mGridY != other.mGridY ||
- mImgWidth != other.mImgWidth ||
- mImgHeight != other.mImgHeight;
+ return mModelType != other.mModelType ||
+ mNumComponents != other.mNumComponents ||
+ mThreshold != other.mThreshold ||
+ mRadius != other.mRadius ||
+ mNeighbors != other.mNeighbors ||
+ mGridX != other.mGridX ||
+ mGridY != other.mGridY ||
+ mImgWidth != other.mImgWidth ||
+ mImgHeight != other.mImgHeight;
}
FaceRecognitionModel::FaceRecognitionModel() :
- m_canRecognize(false),
- m_recognizer(NULL)
+ m_canRecognize(false),
+ m_recognizer(NULL)
{
- ; /* NULL */
+ ; /* NULL */
}
FaceRecognitionModel::FaceRecognitionModel(const FaceRecognitionModel& origin) :
- m_canRecognize(origin.m_canRecognize),
- m_faceSamples(origin.m_faceSamples),
- m_learnAlgorithmConfig(origin.m_learnAlgorithmConfig),
- m_recognizer(CreateRecognitionAlgorithm(origin.m_learnAlgorithmConfig)),
- m_learnedLabels(origin.m_learnedLabels)
+ m_canRecognize(origin.m_canRecognize),
+ m_faceSamples(origin.m_faceSamples),
+ m_learnAlgorithmConfig(origin.m_learnAlgorithmConfig),
+ m_recognizer(CreateRecognitionAlgorithm(origin.m_learnAlgorithmConfig)),
+ m_learnedLabels(origin.m_learnedLabels)
{
- if (!m_recognizer.empty())
- {
- CopyOpenCVAlgorithmParameters(origin.m_recognizer, m_recognizer);
- }
+ if (!m_recognizer.empty()) {
+ CopyOpenCVAlgorithmParameters(origin.m_recognizer, m_recognizer);
+ }
}
FaceRecognitionModel& FaceRecognitionModel::operator=(
- const FaceRecognitionModel& copy)
+ const FaceRecognitionModel& copy)
{
- if (this != &copy)
- {
- m_canRecognize = copy.m_canRecognize;
- m_faceSamples = copy.m_faceSamples;
- m_learnAlgorithmConfig = copy.m_learnAlgorithmConfig;
- m_recognizer = CreateRecognitionAlgorithm(m_learnAlgorithmConfig);
- m_learnedLabels = copy.m_learnedLabels;
-
- if (!m_recognizer.empty())
- {
- CopyOpenCVAlgorithmParameters(copy.m_recognizer, m_recognizer);
- }
- }
-
- return *this;
+ if (this != &copy) {
+ m_canRecognize = copy.m_canRecognize;
+ m_faceSamples = copy.m_faceSamples;
+ m_learnAlgorithmConfig = copy.m_learnAlgorithmConfig;
+ m_recognizer = CreateRecognitionAlgorithm(m_learnAlgorithmConfig);
+ m_learnedLabels = copy.m_learnedLabels;
+
+ if (!m_recognizer.empty()) {
+ CopyOpenCVAlgorithmParameters(copy.m_recognizer, m_recognizer);
+ }
+ }
+
+ return *this;
}
FaceRecognitionModel::~FaceRecognitionModel()
{
- ; /* NULL */
+ ; /* NULL */
}
int FaceRecognitionModel::save(const std::string& fileName)
{
- if (!m_recognizer.empty())
- {
- /* find directory */
- std::string prefix_path = std::string(app_get_data_path());
- LOGD("prefix_path: %s", prefix_path.c_str());
-
- std::string filePath;
- filePath += prefix_path;
- filePath += fileName;
-
- /* check the directory is available */
- std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/'));
- if (access(prefix_path_check.c_str(),F_OK))
- {
- LOGE("Can't save recognition model. Path[%s] doesn't existed.", prefix_path_check.c_str());
-
- return MEDIA_VISION_ERROR_INVALID_PATH;
- }
-
- cv::FileStorage storage(filePath, cv::FileStorage::WRITE);
- if (!storage.isOpened())
- {
- LOGE("Can't save recognition model. Write to file permission denied.");
- return MEDIA_VISION_ERROR_PERMISSION_DENIED;
- }
-
- switch (m_learnAlgorithmConfig.mModelType)
- {
- case MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES:
- storage << "algorithm" << "Eigenfaces";
- break;
- case MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES:
- storage << "algorithm" << "Fisherfaces";
- break;
- case MEDIA_VISION_FACE_MODEL_TYPE_LBPH:
- storage << "algorithm" << "LBPH";
- break;
- default:
- storage.release();
- return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
- }
-
- storage << "can_recognize" << m_canRecognize;
- m_recognizer->save(storage);
-
- storage.release();
- }
- else
- {
- LOGE("Attempt to save recognition model before learn");
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
-
- return MEDIA_VISION_ERROR_NONE;
+ if (!m_recognizer.empty()) {
+ /* find directory */
+ std::string prefix_path = std::string(app_get_data_path());
+ LOGD("prefix_path: %s", prefix_path.c_str());
+
+ std::string filePath;
+ filePath += prefix_path;
+ filePath += fileName;
+
+ /* check the directory is available */
+ std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/'));
+ if (access(prefix_path_check.c_str(), F_OK)) {
+ LOGE("Can't save recognition model. Path[%s] doesn't existed.", prefix_path_check.c_str());
+
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
+
+ cv::FileStorage storage(filePath, cv::FileStorage::WRITE);
+ if (!storage.isOpened()) {
+ LOGE("Can't save recognition model. Write to file permission denied.");
+ return MEDIA_VISION_ERROR_PERMISSION_DENIED;
+ }
+
+ switch (m_learnAlgorithmConfig.mModelType) {
+ case MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES:
+ storage << "algorithm" << "Eigenfaces";
+ break;
+ case MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES:
+ storage << "algorithm" << "Fisherfaces";
+ break;
+ case MEDIA_VISION_FACE_MODEL_TYPE_LBPH:
+ storage << "algorithm" << "LBPH";
+ break;
+ default:
+ storage.release();
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
+ }
+
+ storage << "can_recognize" << m_canRecognize;
+ m_recognizer->save(storage);
+
+ storage.release();
+ } else {
+ LOGE("Attempt to save recognition model before learn");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
}
int FaceRecognitionModel::load(const std::string& fileName)
{
- /* find directory */
- std::string prefix_path = std::string(app_get_data_path());
- LOGD("prefix_path: %s", prefix_path.c_str());
-
- std::string filePath;
- filePath += prefix_path;
- filePath += fileName;
-
- if (access(filePath.c_str(),F_OK))
- {
- LOGE("Can't load face recognition model. File[%s] doesn't exist.", filePath.c_str());
-
- return MEDIA_VISION_ERROR_INVALID_PATH;
- }
-
- cv::FileStorage storage(filePath, cv::FileStorage::READ);
- if (!storage.isOpened())
- {
- LOGE("Can't load recognition model. Read from file permission denied.");
-
- return MEDIA_VISION_ERROR_PERMISSION_DENIED;
- }
-
- LOGD("Loading recognition model from file.");
-
- std::string algName;
- int canRecognize = 0;
- storage["algorithm"] >> algName;
- storage["can_recognize"] >> canRecognize;
-
- cv::Ptr<cv::FaceRecognizer> tempRecognizer;
- FaceRecognitionModelConfig tempConfig;
- std::set<int> tempLearnedLabels;
-
- if (algName == "Eigenfaces")
- {
- tempRecognizer = cv::createEigenFaceRecognizer();
- tempRecognizer->load(storage);
- tempConfig.mModelType =
- MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES;
- tempConfig.mNumComponents =
- tempRecognizer->getInt("ncomponents");
- ParseOpenCVLabels(tempRecognizer, tempLearnedLabels);
- }
- else if (algName == "Fisherfaces")
- {
- tempRecognizer = cv::createFisherFaceRecognizer();
- tempRecognizer->load(storage);
- tempConfig.mModelType =
- MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES;
- tempConfig.mNumComponents =
- tempRecognizer->getInt("ncomponents");
- ParseOpenCVLabels(tempRecognizer, tempLearnedLabels);
- }
- else if (algName == "LBPH")
- {
- tempRecognizer = cv::createLBPHFaceRecognizer();
- tempRecognizer->load(storage);
- tempConfig.mModelType =
- MEDIA_VISION_FACE_MODEL_TYPE_LBPH;
- tempConfig.mGridX = tempRecognizer->getInt("grid_x");
- tempConfig.mGridY = tempRecognizer->getInt("grid_y");
- tempConfig.mNeighbors = tempRecognizer->getInt("neighbors");
- tempConfig.mRadius = tempRecognizer->getInt("radius");
- ParseOpenCVLabels(tempRecognizer, tempLearnedLabels);
- }
- else
- {
- tempConfig = FaceRecognitionModelConfig();
- LOGE("Failed to load face recognition model from file. File is in "
- "unsupported format");
-
- storage.release();
-
- return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
- }
-
- tempConfig.mThreshold = tempRecognizer->getDouble("threshold");
-
- LOGD("Recognition model of [%s] type has been loaded from file",
- algName.c_str());
-
- storage.release();
-
- m_recognizer = tempRecognizer;
- m_learnAlgorithmConfig = tempConfig;
- m_canRecognize = (canRecognize == 1);
- m_learnedLabels.clear();
- m_learnedLabels = tempLearnedLabels;
-
- return MEDIA_VISION_ERROR_NONE;
+ /* find directory */
+ std::string prefix_path = std::string(app_get_data_path());
+ LOGD("prefix_path: %s", prefix_path.c_str());
+
+ std::string filePath;
+ filePath += prefix_path;
+ filePath += fileName;
+
+ if (access(filePath.c_str(), F_OK)) {
+ LOGE("Can't load face recognition model. File[%s] doesn't exist.", filePath.c_str());
+
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
+
+ cv::FileStorage storage(filePath, cv::FileStorage::READ);
+ if (!storage.isOpened()) {
+ LOGE("Can't load recognition model. Read from file permission denied.");
+
+ return MEDIA_VISION_ERROR_PERMISSION_DENIED;
+ }
+
+ LOGD("Loading recognition model from file.");
+
+ std::string algName;
+ int canRecognize = 0;
+ storage["algorithm"] >> algName;
+ storage["can_recognize"] >> canRecognize;
+
+ cv::Ptr<cv::FaceRecognizer> tempRecognizer;
+ FaceRecognitionModelConfig tempConfig;
+ std::set<int> tempLearnedLabels;
+
+ if (algName == "Eigenfaces") {
+ tempRecognizer = cv::createEigenFaceRecognizer();
+ tempRecognizer->load(storage);
+ tempConfig.mModelType =
+ MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES;
+ tempConfig.mNumComponents =
+ tempRecognizer->getInt("ncomponents");
+ ParseOpenCVLabels(tempRecognizer, tempLearnedLabels);
+ } else if (algName == "Fisherfaces") {
+ tempRecognizer = cv::createFisherFaceRecognizer();
+ tempRecognizer->load(storage);
+ tempConfig.mModelType =
+ MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES;
+ tempConfig.mNumComponents =
+ tempRecognizer->getInt("ncomponents");
+ ParseOpenCVLabels(tempRecognizer, tempLearnedLabels);
+ } else if (algName == "LBPH") {
+ tempRecognizer = cv::createLBPHFaceRecognizer();
+ tempRecognizer->load(storage);
+ tempConfig.mModelType =
+ MEDIA_VISION_FACE_MODEL_TYPE_LBPH;
+ tempConfig.mGridX = tempRecognizer->getInt("grid_x");
+ tempConfig.mGridY = tempRecognizer->getInt("grid_y");
+ tempConfig.mNeighbors = tempRecognizer->getInt("neighbors");
+ tempConfig.mRadius = tempRecognizer->getInt("radius");
+ ParseOpenCVLabels(tempRecognizer, tempLearnedLabels);
+ } else {
+ tempConfig = FaceRecognitionModelConfig();
+ LOGE("Failed to load face recognition model from file. File is in "
+ "unsupported format");
+
+ storage.release();
+
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
+ }
+
+ tempConfig.mThreshold = tempRecognizer->getDouble("threshold");
+
+ LOGD("Recognition model of [%s] type has been loaded from file",
+ algName.c_str());
+
+ storage.release();
+
+ m_recognizer = tempRecognizer;
+ m_learnAlgorithmConfig = tempConfig;
+ m_canRecognize = (canRecognize == 1);
+ m_learnedLabels.clear();
+ m_learnedLabels = tempLearnedLabels;
+
+ return MEDIA_VISION_ERROR_NONE;
}
int FaceRecognitionModel::addFaceExample(
- const cv::Mat& faceImage,
- int faceLabel)
+ const cv::Mat& faceImage,
+ int faceLabel)
{
- m_faceSamples[faceLabel].push_back(faceImage);
+ m_faceSamples[faceLabel].push_back(faceImage);
- LOGD("Added face image example for label %i for recognition model",
- faceLabel);
+ LOGD("Added face image example for label %i for recognition model",
+ faceLabel);
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int FaceRecognitionModel::resetFaceExamples(void)
{
- m_faceSamples.clear();
+ m_faceSamples.clear();
- LOGD("All face image examples have been removed from recognition model");
+ LOGD("All face image examples have been removed from recognition model");
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int FaceRecognitionModel::resetFaceExamples(int faceLabel)
{
- if (1 > m_faceSamples.erase(faceLabel))
- {
- LOGD("Failed to remove face image examples for label %i. "
- "No such examples", faceLabel);
+ if (1 > m_faceSamples.erase(faceLabel)) {
+ LOGD("Failed to remove face image examples for label %i. "
+ "No such examples", faceLabel);
- return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
- }
+ return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
+ }
- LOGD("Face image examples for label %i have been removed from "
- "recognition model", faceLabel);
+ LOGD("Face image examples for label %i have been removed from "
+ "recognition model", faceLabel);
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
const std::set<int>& FaceRecognitionModel::getFaceLabels(void) const
{
- return m_learnedLabels;
+ return m_learnedLabels;
}
int FaceRecognitionModel::learn(const FaceRecognitionModelConfig& config)
{
- bool isIncremental = false;
- bool isUnisize = false;
-
- if (MEDIA_VISION_FACE_MODEL_TYPE_LBPH == config.mModelType)
- {
- isIncremental = true;
- }
-
- if (MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES == config.mModelType ||
- MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES == config.mModelType)
- {
- isUnisize = true;
- }
-
- std::vector<cv::Mat> samples;
- std::vector<int> labels;
- std::set<int> learnedLabels;
-
- if (isIncremental)
- {
- learnedLabels.insert(m_learnedLabels.begin(), m_learnedLabels.end());
- }
-
- std::map<int, std::vector<cv::Mat> >::const_iterator it =
- m_faceSamples.begin();
- for (; it != m_faceSamples.end(); ++it)
- {
- const size_t faceClassSamplesSize = it->second.size();
- labels.insert(labels.end(), faceClassSamplesSize, it->first);
- learnedLabels.insert(it->first);
-
- if (!isUnisize)
- {
- LOGD("%u examples has been added with label %i",
- it->second.size(), it->first);
- samples.insert(samples.end(), it->second.begin(), it->second.end());
- }
- else
- {
- for (size_t sampleInd = 0; sampleInd < faceClassSamplesSize; ++sampleInd)
- {
- cv::Mat resizedSample;
- cv::resize(it->second[sampleInd],
- resizedSample,
- cv::Size(config.mImgWidth, config.mImgHeight),
- 1.0, 1.0, cv::INTER_CUBIC);
- samples.push_back(resizedSample);
- }
- }
- }
-
- const size_t samplesSize = samples.size();
- const size_t labelsSize = labels.size();
-
- if (0 != samplesSize && samplesSize == labelsSize)
- {
- LOGD("Start to learn the model for %u samples and %u labels",
- samplesSize, labelsSize);
-
- if (m_learnAlgorithmConfig != config || m_recognizer.empty())
- {
- m_recognizer = CreateRecognitionAlgorithm(config);
- }
-
- if (m_recognizer.empty())
- {
- LOGE("Can't create recognition algorithm for recognition model. "
- "Configuration is not supported by any of known algorithms.");
-
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
- }
-
- isIncremental ? m_recognizer->update(samples, labels) :
- m_recognizer->train(samples, labels);
- m_canRecognize = true;
- m_learnedLabels.clear();
- m_learnedLabels = learnedLabels;
- }
- else
- {
- LOGE("Can't create recognition algorithm for no examples. Try to add "
- "some face examples before learning");
-
- return MEDIA_VISION_ERROR_NO_DATA;
- }
-
- m_learnAlgorithmConfig = config;
-
- LOGD("Recognition model has been learned");
-
- return MEDIA_VISION_ERROR_NONE;
+ bool isIncremental = false;
+ bool isUnisize = false;
+
+ if (MEDIA_VISION_FACE_MODEL_TYPE_LBPH == config.mModelType) {
+ isIncremental = true;
+ }
+
+ if (MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES == config.mModelType ||
+ MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES == config.mModelType) {
+ isUnisize = true;
+ }
+
+ std::vector<cv::Mat> samples;
+ std::vector<int> labels;
+ std::set<int> learnedLabels;
+
+ if (isIncremental) {
+ learnedLabels.insert(m_learnedLabels.begin(), m_learnedLabels.end());
+ }
+
+ std::map<int, std::vector<cv::Mat> >::const_iterator it =
+ m_faceSamples.begin();
+ for (; it != m_faceSamples.end(); ++it) {
+ const size_t faceClassSamplesSize = it->second.size();
+ labels.insert(labels.end(), faceClassSamplesSize, it->first);
+ learnedLabels.insert(it->first);
+
+ if (!isUnisize) {
+ LOGD("%u examples has been added with label %i",
+ it->second.size(), it->first);
+ samples.insert(samples.end(), it->second.begin(), it->second.end());
+ } else {
+ for (size_t sampleInd = 0; sampleInd < faceClassSamplesSize; ++sampleInd) {
+ cv::Mat resizedSample;
+ cv::resize(it->second[sampleInd],
+ resizedSample,
+ cv::Size(config.mImgWidth, config.mImgHeight),
+ 1.0, 1.0, cv::INTER_CUBIC);
+ samples.push_back(resizedSample);
+ }
+ }
+ }
+
+ const size_t samplesSize = samples.size();
+ const size_t labelsSize = labels.size();
+
+ if (0 != samplesSize && samplesSize == labelsSize) {
+ LOGD("Start to learn the model for %u samples and %u labels",
+ samplesSize, labelsSize);
+
+ if (m_learnAlgorithmConfig != config || m_recognizer.empty()) {
+ m_recognizer = CreateRecognitionAlgorithm(config);
+ }
+
+ if (m_recognizer.empty()) {
+ LOGE("Can't create recognition algorithm for recognition model. "
+ "Configuration is not supported by any of known algorithms.");
+
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ }
+
+ isIncremental ? m_recognizer->update(samples, labels) :
+ m_recognizer->train(samples, labels);
+ m_canRecognize = true;
+ m_learnedLabels.clear();
+ m_learnedLabels = learnedLabels;
+ } else {
+ LOGE("Can't create recognition algorithm for no examples. Try to add "
+ "some face examples before learning");
+
+ return MEDIA_VISION_ERROR_NO_DATA;
+ }
+
+ m_learnAlgorithmConfig = config;
+
+ LOGD("Recognition model has been learned");
+
+ return MEDIA_VISION_ERROR_NONE;
}
int FaceRecognitionModel::recognize(const cv::Mat& image, FaceRecognitionResults& results)
{
- if (!m_recognizer.empty() && m_canRecognize)
- {
- double absConf = 0.0;
- m_recognizer->predict(image, results.mFaceLabel, absConf);
- // Normalize the absolute value of the confidence
- absConf = exp(7.5 - (0.05 * absConf));
- results.mConfidence = absConf / (1 + absConf);
- results.mIsRecognized = true;
- results.mFaceLocation = cv::Rect(0, 0, image.cols, image.rows);
- }
- else
- {
- LOGE("Attempt to recognize faces with untrained model");
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
-
- return MEDIA_VISION_ERROR_NONE;
+ if (!m_recognizer.empty() && m_canRecognize) {
+ double absConf = 0.0;
+ m_recognizer->predict(image, results.mFaceLabel, absConf);
+ /* Normalize the absolute value of the confidence */
+ absConf = exp(7.5 - (0.05 * absConf));
+ results.mConfidence = absConf / (1 + absConf);
+ results.mIsRecognized = true;
+ results.mFaceLocation = cv::Rect(0, 0, image.cols, image.rows);
+ } else {
+ LOGE("Attempt to recognize faces with untrained model");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
}
cv::Ptr<cv::FaceRecognizer> FaceRecognitionModel::CreateRecognitionAlgorithm(
- const FaceRecognitionModelConfig& config)
+ const FaceRecognitionModelConfig& config)
{
- cv::Ptr<cv::FaceRecognizer> tempRecognizer;
- switch (config.mModelType)
- {
- case MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES:
- tempRecognizer = cv::createEigenFaceRecognizer(
- config.mNumComponents,
- config.mThreshold);
- break;
- case MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES:
- tempRecognizer = cv::createFisherFaceRecognizer(
- config.mNumComponents,
- config.mThreshold);
- break;
- case MEDIA_VISION_FACE_MODEL_TYPE_LBPH:
- tempRecognizer = cv::createLBPHFaceRecognizer(
- config.mRadius,
- config.mNeighbors,
- config.mGridX,
- config.mGridY,
- config.mThreshold);
- break;
- default:
- return NULL;
- }
-
- return tempRecognizer;
+ cv::Ptr<cv::FaceRecognizer> tempRecognizer;
+ switch (config.mModelType) {
+ case MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES:
+ tempRecognizer = cv::createEigenFaceRecognizer(
+ config.mNumComponents,
+ config.mThreshold);
+ break;
+ case MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES:
+ tempRecognizer = cv::createFisherFaceRecognizer(
+ config.mNumComponents,
+ config.mThreshold);
+ break;
+ case MEDIA_VISION_FACE_MODEL_TYPE_LBPH:
+ tempRecognizer = cv::createLBPHFaceRecognizer(
+ config.mRadius,
+ config.mNeighbors,
+ config.mGridX,
+ config.mGridY,
+ config.mThreshold);
+ break;
+ default:
+ return NULL;
+ }
+
+ return tempRecognizer;
}
} /* Face */
diff --git a/mv_face/face/src/FaceTrackingModel.cpp b/mv_face/face/src/FaceTrackingModel.cpp
index 2c4fdd6b..25fdcb8e 100644
--- a/mv_face/face/src/FaceTrackingModel.cpp
+++ b/mv_face/face/src/FaceTrackingModel.cpp
@@ -23,194 +23,174 @@
#include <unistd.h>
-namespace MediaVision
-{
-namespace Face
-{
-
+namespace MediaVision {
+namespace Face {
FaceTrackingResults::FaceTrackingResults() :
- mIsTracked(false),
- mConfidence(0.f)
+ mIsTracked(false),
+ mConfidence(0.f)
{
- ; /* NULL */
+ ; /* NULL */
}
FaceTrackingModel::FaceTrackingModel() :
- m_canTrack(false),
- m_tracker(new cv::TrackerMedianFlow())
+ m_canTrack(false),
+ m_tracker(new cv::TrackerMedianFlow())
{
- ; /* NULL */
+ ; /* NULL */
}
FaceTrackingModel::FaceTrackingModel(const FaceTrackingModel& origin) :
- m_canTrack(origin.m_canTrack),
- m_tracker(new cv::TrackerMedianFlow())
+ m_canTrack(origin.m_canTrack),
+ m_tracker(new cv::TrackerMedianFlow())
{
- if (!origin.m_tracker.empty())
- {
- origin.m_tracker->copyTo(*(m_tracker.obj));
- }
+ if (!origin.m_tracker.empty()) {
+ origin.m_tracker->copyTo(*(m_tracker.obj));
+ }
}
FaceTrackingModel& FaceTrackingModel::operator=(const FaceTrackingModel& copy)
{
- if (this != &copy)
- {
- m_canTrack = copy.m_canTrack;
- m_tracker = cv::Ptr<cv::TrackerMedianFlow>(new cv::TrackerMedianFlow());
- if (!copy.m_tracker.empty())
- {
- copy.m_tracker->copyTo(*(m_tracker.obj));
- }
- }
-
- return *this;
+ if (this != &copy) {
+ m_canTrack = copy.m_canTrack;
+ m_tracker = cv::Ptr<cv::TrackerMedianFlow>(new cv::TrackerMedianFlow());
+ if (!copy.m_tracker.empty()) {
+ copy.m_tracker->copyTo(*(m_tracker.obj));
+ }
+ }
+
+ return *this;
}
FaceTrackingModel::~FaceTrackingModel()
{
- ; /* NULL */
+ ; /* NULL */
}
int FaceTrackingModel::save(const std::string& fileName)
{
- if (m_tracker.empty())
- {
- LOGE("Can't save tracking model. No tracking algorithm is used");
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
+ if (m_tracker.empty()) {
+ LOGE("Can't save tracking model. No tracking algorithm is used");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
- std::string prefix_path = std::string(app_get_data_path());
- LOGD("prefix_path: %s", prefix_path.c_str());
+ std::string prefix_path = std::string(app_get_data_path());
+ LOGD("prefix_path: %s", prefix_path.c_str());
- std::string filePath;
- filePath += prefix_path;
- filePath += fileName;
+ std::string filePath;
+ filePath += prefix_path;
+filePath += fileName;
- /* check the directory is available */
- std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/'));
- if (access(prefix_path_check.c_str(),F_OK))
- {
- LOGE("Can't save tracking model. Path[%s] doesn't existed.", prefix_path_check.c_str());
+ /* check the directory is available */
+ std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/'));
+ if (access(prefix_path_check.c_str(), F_OK)) {
+ LOGE("Can't save tracking model. Path[%s] doesn't existed.", prefix_path_check.c_str());
- return MEDIA_VISION_ERROR_INVALID_PATH;
- }
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
- cv::FileStorage storage(filePath, cv::FileStorage::WRITE);
- if (!storage.isOpened())
- {
- LOGE("Can't save tracking model. Write to file permission denied.");
- return MEDIA_VISION_ERROR_PERMISSION_DENIED;
- }
+ cv::FileStorage storage(filePath, cv::FileStorage::WRITE);
+ if (!storage.isOpened()) {
+ LOGE("Can't save tracking model. Write to file permission denied.");
+ return MEDIA_VISION_ERROR_PERMISSION_DENIED;
+ }
- LOGD("Storing tracking model to the file started.");
+ LOGD("Storing tracking model to the file started.");
- storage << "canTrack" << (m_canTrack ? 1 : 0);
- m_tracker->write(storage);
+ storage << "canTrack" << (m_canTrack ? 1 : 0);
+ m_tracker->write(storage);
- LOGD("Storing tracking model to the file finished.");
+ LOGD("Storing tracking model to the file finished.");
- storage.release();
+ storage.release();
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int FaceTrackingModel::load(const std::string& fileName)
{
- /* find directory */
- std::string prefix_path = std::string(app_get_data_path());
- LOGD("prefix_path: %s", prefix_path.c_str());
+ /* find directory */
+ std::string prefix_path = std::string(app_get_data_path());
+ LOGD("prefix_path: %s", prefix_path.c_str());
- std::string filePath;
- filePath += prefix_path;
- filePath += fileName;
+ std::string filePath;
+ filePath += prefix_path;
+ filePath += fileName;
- if (access(filePath.c_str(), F_OK))
- {
- LOGE("Can't load face tracking model. File[%s] doesn't exist.", filePath.c_str());
+ if (access(filePath.c_str(), F_OK)) {
+ LOGE("Can't load face tracking model. File[%s] doesn't exist.", filePath.c_str());
- return MEDIA_VISION_ERROR_INVALID_PATH;
- }
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
- cv::FileStorage storage(filePath, cv::FileStorage::READ);
- if (!storage.isOpened())
- {
- LOGE("Can't load tracking model. Read from file permission denied.");
- return MEDIA_VISION_ERROR_PERMISSION_DENIED;
- }
+ cv::FileStorage storage(filePath, cv::FileStorage::READ);
+ if (!storage.isOpened()) {
+ LOGE("Can't load tracking model. Read from file permission denied.");
+ return MEDIA_VISION_ERROR_PERMISSION_DENIED;
+ }
- int canTrack = 0;
- storage["canTrack"] >> canTrack;
- m_canTrack = (0 != canTrack);
- m_tracker->read(storage);
+ int canTrack = 0;
+ storage["canTrack"] >> canTrack;
+ m_canTrack = (0 != canTrack);
+ m_tracker->read(storage);
- LOGD("Loading tracking model from file.");
+ LOGD("Loading tracking model from file.");
- storage.release();
+ storage.release();
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int FaceTrackingModel::prepare(const cv::Mat& image)
{
- if (m_tracker.empty())
- {
- LOGE("Failed to prepare tracking model. No tracking algorithm "
- "is available.");
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
-
- cv::Rect_<float> lastBoundingBox;
- if (!m_tracker->isInited())
- {
- lastBoundingBox.x = 0;
- lastBoundingBox.y = 0;
- lastBoundingBox.width = image.cols;
- lastBoundingBox.height = image.rows;
- }
- else
- {
- lastBoundingBox = m_tracker->getLastBoundingBox();
- }
-
- return prepare(image, lastBoundingBox);
+ if (m_tracker.empty()) {
+ LOGE("Failed to prepare tracking model. No tracking algorithm "
+ "is available.");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ cv::Rect_<float> lastBoundingBox;
+ if (!m_tracker->isInited()) {
+ lastBoundingBox.x = 0;
+ lastBoundingBox.y = 0;
+ lastBoundingBox.width = image.cols;
+ lastBoundingBox.height = image.rows;
+ } else {
+ lastBoundingBox = m_tracker->getLastBoundingBox();
+ }
+
+ return prepare(image, lastBoundingBox);
}
int FaceTrackingModel::prepare(
- const cv::Mat& image,
- const cv::Rect_<float>& boundingBox)
+ const cv::Mat& image,
+ const cv::Rect_<float>& boundingBox)
{
- if (m_tracker.empty())
- {
- LOGE("Failed to prepare tracking model. No tracking algorithm "
- "is available.");
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
-
- if (!m_tracker->init(image, boundingBox))
- {
- LOGE("Failed to prepare tracking model.");
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
-
- m_canTrack = true;
- return MEDIA_VISION_ERROR_NONE;
+ if (m_tracker.empty()) {
+ LOGE("Failed to prepare tracking model. No tracking algorithm "
+ "is available.");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ if (!m_tracker->init(image, boundingBox)) {
+ LOGE("Failed to prepare tracking model.");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ m_canTrack = true;
+ return MEDIA_VISION_ERROR_NONE;
}
int FaceTrackingModel::track(const cv::Mat& image, FaceTrackingResults& results)
{
- if (!m_tracker.empty() && m_canTrack)
- {
- results.mIsTracked = m_tracker->update(image, results.mFaceLocation);
- results.mConfidence = m_tracker->getLastConfidence();
- }
- else
- {
- LOGE("Attempt to track face with not prepared model");
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
-
- return MEDIA_VISION_ERROR_NONE;
+ if (!m_tracker.empty() && m_canTrack) {
+ results.mIsTracked = m_tracker->update(image, results.mFaceLocation);
+ results.mConfidence = m_tracker->getLastConfidence();
+ } else {
+ LOGE("Attempt to track face with not prepared model");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
}
} /* Face */
diff --git a/mv_face/face/src/FaceUtil.cpp b/mv_face/face/src/FaceUtil.cpp
index 7d49dd3e..1430fe14 100644
--- a/mv_face/face/src/FaceUtil.cpp
+++ b/mv_face/face/src/FaceUtil.cpp
@@ -21,117 +21,110 @@
#include <opencv2/imgproc/types_c.h>
#include <opencv2/highgui/highgui.hpp>
-namespace MediaVision
-{
-namespace Face
-{
-
+namespace MediaVision {
+namespace Face {
RecognitionParams::RecognitionParams(FaceRecognitionModelType algType) :
- mRecognitionAlgType(algType)
+ mRecognitionAlgType(algType)
{
- ; /* NULL */
+ ; /* NULL */
}
RecognitionParams::RecognitionParams() :
- mRecognitionAlgType(MEDIA_VISION_FACE_MODEL_TYPE_LBPH)
+ mRecognitionAlgType(MEDIA_VISION_FACE_MODEL_TYPE_LBPH)
{
- ; /* NULL */
+ ; /* NULL */
}
int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource)
{
- MEDIA_VISION_INSTANCE_CHECK(mvSource);
+ MEDIA_VISION_INSTANCE_CHECK(mvSource);
- int depth = CV_8U; // Default depth. 1 byte for channel.
- unsigned int channelsNumber = 0;
- unsigned int width = 0, height = 0;
- unsigned int bufferSize = 0;
- unsigned char *buffer = NULL;
+ int depth = CV_8U; /* Default depth. 1 byte for channel. */
+ unsigned int channelsNumber = 0;
+ unsigned int width = 0, height = 0;
+ unsigned int bufferSize = 0;
+ unsigned char *buffer = NULL;
- mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID;
+ mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID;
- MEDIA_VISION_ASSERT(mv_source_get_width(mvSource, &width),
- "Failed to get the width.");
- MEDIA_VISION_ASSERT(mv_source_get_height(mvSource, &height),
- "Failed to get the height.");
- MEDIA_VISION_ASSERT(mv_source_get_colorspace(mvSource, &colorspace),
- "Failed to get the colorspace.");
- MEDIA_VISION_ASSERT(mv_source_get_buffer(mvSource, &buffer, &bufferSize),
- "Failed to get the buffer size.");
+ MEDIA_VISION_ASSERT(mv_source_get_width(mvSource, &width),
+ "Failed to get the width.");
+ MEDIA_VISION_ASSERT(mv_source_get_height(mvSource, &height),
+ "Failed to get the height.");
+ MEDIA_VISION_ASSERT(mv_source_get_colorspace(mvSource, &colorspace),
+ "Failed to get the colorspace.");
+ MEDIA_VISION_ASSERT(mv_source_get_buffer(mvSource, &buffer, &bufferSize),
+ "Failed to get the buffer size.");
- int conversionType = -1; // Type of conversion from given colorspace to gray
- switch(colorspace)
- {
- case MEDIA_VISION_COLORSPACE_INVALID:
- LOGE("Error: mv_source has invalid colorspace.");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- case MEDIA_VISION_COLORSPACE_Y800:
- channelsNumber = 1;
- // Without convertion
- break;
- case MEDIA_VISION_COLORSPACE_I420:
- channelsNumber = 1;
- height *= 1.5;
- conversionType = CV_YUV2GRAY_I420;
- break;
- case MEDIA_VISION_COLORSPACE_NV12:
- channelsNumber = 1;
- height *= 1.5;
- conversionType = CV_YUV2GRAY_NV12;
- break;
- case MEDIA_VISION_COLORSPACE_YV12:
- channelsNumber = 1;
- height *= 1.5;
- conversionType = CV_YUV2GRAY_YV12;
- break;
- case MEDIA_VISION_COLORSPACE_NV21:
- channelsNumber = 1;
- height *= 1.5;
- conversionType = CV_YUV2GRAY_NV21;
- break;
- case MEDIA_VISION_COLORSPACE_YUYV:
- channelsNumber = 2;
- conversionType = CV_YUV2GRAY_YUYV;
- break;
- case MEDIA_VISION_COLORSPACE_UYVY:
- channelsNumber = 2;
- conversionType = CV_YUV2GRAY_UYVY;
- break;
- case MEDIA_VISION_COLORSPACE_422P:
- channelsNumber = 2;
- conversionType = CV_YUV2GRAY_Y422;
- break;
- case MEDIA_VISION_COLORSPACE_RGB565:
- channelsNumber = 2;
- conversionType = CV_BGR5652GRAY;
- break;
- case MEDIA_VISION_COLORSPACE_RGB888:
- channelsNumber = 3;
- conversionType = CV_RGB2GRAY;
- break;
- case MEDIA_VISION_COLORSPACE_RGBA:
- channelsNumber = 4;
- conversionType = CV_RGBA2GRAY;
- break;
- default:
- LOGE("Error: mv_source has unsupported colorspace.");
- return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
- }
+ int conversionType = -1; // Type of conversion from given colorspace to gray
+ switch(colorspace) {
+ case MEDIA_VISION_COLORSPACE_INVALID:
+ LOGE("Error: mv_source has invalid colorspace.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ case MEDIA_VISION_COLORSPACE_Y800:
+ channelsNumber = 1;
+ /* Without convertion */
+ break;
+ case MEDIA_VISION_COLORSPACE_I420:
+ channelsNumber = 1;
+ height *= 1.5;
+ conversionType = CV_YUV2GRAY_I420;
+ break;
+ case MEDIA_VISION_COLORSPACE_NV12:
+ channelsNumber = 1;
+ height *= 1.5;
+ conversionType = CV_YUV2GRAY_NV12;
+ break;
+ case MEDIA_VISION_COLORSPACE_YV12:
+ channelsNumber = 1;
+ height *= 1.5;
+ conversionType = CV_YUV2GRAY_YV12;
+ break;
+ case MEDIA_VISION_COLORSPACE_NV21:
+ channelsNumber = 1;
+ height *= 1.5;
+ conversionType = CV_YUV2GRAY_NV21;
+ break;
+ case MEDIA_VISION_COLORSPACE_YUYV:
+ channelsNumber = 2;
+ conversionType = CV_YUV2GRAY_YUYV;
+ break;
+ case MEDIA_VISION_COLORSPACE_UYVY:
+ channelsNumber = 2;
+ conversionType = CV_YUV2GRAY_UYVY;
+ break;
+ case MEDIA_VISION_COLORSPACE_422P:
+ channelsNumber = 2;
+ conversionType = CV_YUV2GRAY_Y422;
+ break;
+ case MEDIA_VISION_COLORSPACE_RGB565:
+ channelsNumber = 2;
+ conversionType = CV_BGR5652GRAY;
+ break;
+ case MEDIA_VISION_COLORSPACE_RGB888:
+ channelsNumber = 3;
+ conversionType = CV_RGB2GRAY;
+ break;
+ case MEDIA_VISION_COLORSPACE_RGBA:
+ channelsNumber = 4;
+ conversionType = CV_RGBA2GRAY;
+ break;
+ default:
+ LOGE("Error: mv_source has unsupported colorspace.");
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
+ }
- if (conversionType == -1) // Without conversion
- {
- cvSource = cv::Mat(cv::Size(width, height),
- CV_MAKETYPE(depth, channelsNumber), buffer).clone();
- }
- else // Conversion
- {
- // Class for representation the given image as cv::Mat before conversion
- cv::Mat origin(cv::Size(width, height),
- CV_MAKETYPE(depth, channelsNumber), buffer);
- cv::cvtColor(origin, cvSource, conversionType);
- }
+ if (conversionType == -1) {/* Without conversion */
+ cvSource = cv::Mat(cv::Size(width, height),
+ CV_MAKETYPE(depth, channelsNumber), buffer).clone();
+ } else {/* With conversion */
+ /* Class for representation the given image as cv::Mat before conversion */
+ cv::Mat origin(cv::Size(width, height),
+ CV_MAKETYPE(depth, channelsNumber), buffer);
+ cv::cvtColor(origin, cvSource, conversionType);
+ }
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
} /* Face */
diff --git a/mv_face/face/src/TrackerMedianFlow.cpp b/mv_face/face/src/TrackerMedianFlow.cpp
index a7a3b4f0..ee4bc983 100644
--- a/mv_face/face/src/TrackerMedianFlow.cpp
+++ b/mv_face/face/src/TrackerMedianFlow.cpp
@@ -47,414 +47,388 @@
#include <algorithm>
#include <cmath>
-namespace
-{
- float FloatEps = 10e-6f;
+namespace {
+ float FloatEps = 10e-6f;
} /* anonymous namespace */
-namespace cv
-{
-
+namespace cv {
TrackerMedianFlow::Params::Params()
{
- mPointsInGrid = 10;
- mWindowSize = Size(3, 3);
- mPyrMaxLevel = 5;
+ mPointsInGrid = 10;
+ mWindowSize = Size(3, 3);
+ mPyrMaxLevel = 5;
}
-void TrackerMedianFlow::Params::read( const cv::FileNode& fn )
+void TrackerMedianFlow::Params::read(const cv::FileNode& fn)
{
- mPointsInGrid = fn["pointsInGrid"];
- int winSizeHeight = fn["windowSizeHeight"];
- int winSizeWidth = fn["windowSizeWidth"];
- mWindowSize = Size(winSizeHeight, winSizeWidth);
- mPyrMaxLevel = fn["pyrMaxLevel"];
+ mPointsInGrid = fn["pointsInGrid"];
+ int winSizeHeight = fn["windowSizeHeight"];
+ int winSizeWidth = fn["windowSizeWidth"];
+ mWindowSize = Size(winSizeHeight, winSizeWidth);
+ mPyrMaxLevel = fn["pyrMaxLevel"];
}
-void TrackerMedianFlow::Params::write( cv::FileStorage& fs ) const
+void TrackerMedianFlow::Params::write(cv::FileStorage& fs) const
{
- fs << "pointsInGrid" << mPointsInGrid;
- fs << "windowSizeHeight" << mWindowSize.height;
- fs << "windowSizeWidth" << mWindowSize.width;
- fs << "pyrMaxLevel" << mPyrMaxLevel;
+ fs << "pointsInGrid" << mPointsInGrid;
+ fs << "windowSizeHeight" << mWindowSize.height;
+ fs << "windowSizeWidth" << mWindowSize.width;
+ fs << "pyrMaxLevel" << mPyrMaxLevel;
}
TrackerMedianFlow::TrackerMedianFlow(Params paramsIn) :
- termcrit(TermCriteria::COUNT | TermCriteria::EPS,20,0.3),
- m_confidence(0.0)
+ termcrit(TermCriteria::COUNT | TermCriteria::EPS, 20, 0.3),
+ m_confidence(0.0)
{
- params = paramsIn;
- isInit = false;
+ params = paramsIn;
+ isInit = false;
}
bool TrackerMedianFlow::copyTo(TrackerMedianFlow& copy) const
{
- copy.isInit = isInit;
- copy.params = params;
- copy.termcrit = termcrit;
- copy.m_boundingBox = m_boundingBox;
- copy.m_confidence = m_confidence;
- m_image.copyTo(copy.m_image);
- return true;
+ copy.isInit = isInit;
+ copy.params = params;
+ copy.termcrit = termcrit;
+ copy.m_boundingBox = m_boundingBox;
+ copy.m_confidence = m_confidence;
+ m_image.copyTo(copy.m_image);
+ return true;
}
bool TrackerMedianFlow::init(const Mat& image, const Rect_<float>& boundingBox)
{
- if (image.empty())
- {
- return false;
- }
-
- image.copyTo(m_image);
- buildOpticalFlowPyramid(
- m_image, m_pyramid, params.mWindowSize, params.mPyrMaxLevel);
- m_boundingBox = boundingBox;
-
- isInit = true;
- return isInit;
+ if (image.empty()) {
+ return false;
+ }
+
+ image.copyTo(m_image);
+ buildOpticalFlowPyramid(
+ m_image, m_pyramid, params.mWindowSize, params.mPyrMaxLevel);
+ m_boundingBox = boundingBox;
+
+ isInit = true;
+ return isInit;
}
bool TrackerMedianFlow::update(const Mat& image, Rect_<float>& boundingBox)
{
- if (!isInit || image.empty()) return false;
-
- // Handles such behaviour when preparation frame has the size
- // different to the tracking frame size. In such case, we resize preparation
- // frame and bounding box. Then, track as usually:
- if (m_image.rows != image.rows || m_image.cols != image.cols)
- {
- const float xFactor = (float) image.cols / m_image.cols;
- const float yFactor = (float) image.rows / m_image.rows;
-
- resize(m_image, m_image, Size(), xFactor, yFactor);
-
- m_boundingBox.x *= xFactor;
- m_boundingBox.y *= yFactor;
- m_boundingBox.width *= xFactor;
- m_boundingBox.height *= yFactor;
- }
-
- Mat oldImage = m_image;
-
- Rect_<float> oldBox = m_boundingBox;
- if(!medianFlowImpl(oldImage, image, oldBox))
- {
- return false;
- }
-
- boundingBox = oldBox;
- image.copyTo(m_image);
- m_boundingBox = boundingBox;
- return true;
+ if (!isInit || image.empty())
+ return false;
+
+ /* Handles such behaviour when preparation frame has the size
+ * different to the tracking frame size. In such case, we resize preparation
+ * frame and bounding box. Then, track as usually:
+ */
+ if (m_image.rows != image.rows || m_image.cols != image.cols) {
+ const float xFactor = (float) image.cols / m_image.cols;
+ const float yFactor = (float) image.rows / m_image.rows;
+
+ resize(m_image, m_image, Size(), xFactor, yFactor);
+
+ m_boundingBox.x *= xFactor;
+ m_boundingBox.y *= yFactor;
+ m_boundingBox.width *= xFactor;
+ m_boundingBox.height *= yFactor;
+ }
+
+ Mat oldImage = m_image;
+
+ Rect_<float> oldBox = m_boundingBox;
+ if(!medianFlowImpl(oldImage, image, oldBox)) {
+ return false;
+ }
+
+ boundingBox = oldBox;
+ image.copyTo(m_image);
+ m_boundingBox = boundingBox;
+ return true;
}
bool TrackerMedianFlow::isInited() const
{
- return isInit;
+ return isInit;
}
float TrackerMedianFlow::getLastConfidence() const
{
- return m_confidence;
+ return m_confidence;
}
Rect_<float> TrackerMedianFlow::getLastBoundingBox() const
{
- return m_boundingBox;
+ return m_boundingBox;
}
bool TrackerMedianFlow::medianFlowImpl(
- Mat oldImage_gray, Mat newImage_gray, Rect_<float>& oldBox)
+ Mat oldImage_gray, Mat newImage_gray, Rect_<float>& oldBox)
{
- std::vector<Point2f> pointsToTrackOld, pointsToTrackNew;
-
- const float gridXStep = oldBox.width / params.mPointsInGrid;
- const float gridYStep = oldBox.height / params.mPointsInGrid;
- for (int i = 0; i < params.mPointsInGrid; i++)
- {
- for (int j = 0; j < params.mPointsInGrid; j++)
- {
- pointsToTrackOld.push_back(
- Point2f(oldBox.x + .5f*gridXStep + 1.f*gridXStep*j,
- oldBox.y + .5f*gridYStep + 1.f*gridYStep*i));
- }
- }
-
- std::vector<uchar> status(pointsToTrackOld.size());
- std::vector<float> errors(pointsToTrackOld.size());
-
- std::vector<Mat> tempPyramid;
- buildOpticalFlowPyramid(
- newImage_gray,
- tempPyramid,
- params.mWindowSize,
- params.mPyrMaxLevel);
-
- calcOpticalFlowPyrLK(m_pyramid,
- tempPyramid,
- pointsToTrackOld,
- pointsToTrackNew,
- status,
- errors,
- params.mWindowSize,
- params.mPyrMaxLevel,
- termcrit);
-
- std::vector<Point2f> di;
- for (size_t idx = 0u; idx < pointsToTrackOld.size(); idx++)
- {
- if (status[idx] == 1)
- {
- di.push_back(pointsToTrackNew[idx] - pointsToTrackOld[idx]);
- }
- }
-
- std::vector<bool> filter_status;
- check_FB(tempPyramid,
- pointsToTrackOld,
- pointsToTrackNew,
- filter_status);
- check_NCC(oldImage_gray,
- newImage_gray,
- pointsToTrackOld,
- pointsToTrackNew,
- filter_status);
-
- for (size_t idx = 0u; idx < pointsToTrackOld.size(); idx++)
- {
- if (!filter_status[idx])
- {
- pointsToTrackOld.erase(pointsToTrackOld.begin() + idx);
- pointsToTrackNew.erase(pointsToTrackNew.begin() + idx);
- filter_status.erase(filter_status.begin() + idx);
- idx--;
- }
- }
-
- if (pointsToTrackOld.size() == 0 || di.size() == 0)
- {
- return false;
- }
-
- Point2f mDisplacement;
- Rect_<float> boxCandidate =
- vote(pointsToTrackOld, pointsToTrackNew, oldBox, mDisplacement);
-
- std::vector<float> displacements;
- for (size_t idx = 0u; idx < di.size(); idx++)
- {
- di[idx] -= mDisplacement;
- displacements.push_back(sqrt(di[idx].ddot(di[idx])));
- }
-
- m_confidence =
- (10.f - getMedian(displacements,(int)displacements.size())) / 10.f;
- if (m_confidence <= 0.f)
- {
- m_confidence = 0.f;
- return false;
- }
-
- m_pyramid.swap(tempPyramid);
- oldBox = boxCandidate;
- return true;
+ std::vector<Point2f> pointsToTrackOld, pointsToTrackNew;
+
+ const float gridXStep = oldBox.width / params.mPointsInGrid;
+ const float gridYStep = oldBox.height / params.mPointsInGrid;
+ for (int i = 0; i < params.mPointsInGrid; i++) {
+ for (int j = 0; j < params.mPointsInGrid; j++) {
+ pointsToTrackOld.push_back(
+ Point2f(oldBox.x + .5f*gridXStep + 1.f*gridXStep*j,
+ oldBox.y + .5f*gridYStep + 1.f*gridYStep*i));
+ }
+ }
+
+ std::vector<uchar> status(pointsToTrackOld.size());
+ std::vector<float> errors(pointsToTrackOld.size());
+
+ std::vector<Mat> tempPyramid;
+ buildOpticalFlowPyramid(
+ newImage_gray,
+ tempPyramid,
+ params.mWindowSize,
+ params.mPyrMaxLevel);
+
+ calcOpticalFlowPyrLK(m_pyramid,
+ tempPyramid,
+ pointsToTrackOld,
+ pointsToTrackNew,
+ status,
+ errors,
+ params.mWindowSize,
+ params.mPyrMaxLevel,
+ termcrit);
+
+ std::vector<Point2f> di;
+ for (size_t idx = 0u; idx < pointsToTrackOld.size(); idx++) {
+ if (status[idx] == 1) {
+ di.push_back(pointsToTrackNew[idx] - pointsToTrackOld[idx]);
+ }
+ }
+
+ std::vector<bool> filter_status;
+ check_FB(tempPyramid,
+ pointsToTrackOld,
+ pointsToTrackNew,
+ filter_status);
+
+ check_NCC(oldImage_gray,
+ newImage_gray,
+ pointsToTrackOld,
+ pointsToTrackNew,
+ filter_status);
+
+ for (size_t idx = 0u; idx < pointsToTrackOld.size(); idx++) {
+ if (!filter_status[idx]) {
+ pointsToTrackOld.erase(pointsToTrackOld.begin() + idx);
+ pointsToTrackNew.erase(pointsToTrackNew.begin() + idx);
+ filter_status.erase(filter_status.begin() + idx);
+ idx--;
+ }
+ }
+
+ if (pointsToTrackOld.size() == 0 || di.size() == 0) {
+ return false;
+ }
+
+ Point2f mDisplacement;
+ Rect_<float> boxCandidate =
+ vote(pointsToTrackOld, pointsToTrackNew, oldBox, mDisplacement);
+
+ std::vector<float> displacements;
+ for (size_t idx = 0u; idx < di.size(); idx++) {
+ di[idx] -= mDisplacement;
+ displacements.push_back(sqrt(di[idx].ddot(di[idx])));
+ }
+
+ m_confidence =
+ (10.f - getMedian(displacements, (int)displacements.size())) / 10.f;
+ if (m_confidence <= 0.f) {
+ m_confidence = 0.f;
+ return false;
+ }
+
+ m_pyramid.swap(tempPyramid);
+ oldBox = boxCandidate;
+ return true;
}
Rect_<float> TrackerMedianFlow::vote(
- const std::vector<Point2f>& oldPoints,
- const std::vector<Point2f>& newPoints,
- const Rect_<float>& oldRect,
- Point2f& mD)
+ const std::vector<Point2f>& oldPoints,
+ const std::vector<Point2f>& newPoints,
+ const Rect_<float>& oldRect,
+ Point2f& mD)
{
- Rect_<float> newRect;
- Point2d newCenter(oldRect.x + oldRect.width/2.0,
- oldRect.y + oldRect.height/2.0);
-
- int n = (int)oldPoints.size();
- std::vector<float> buf(std::max( n*(n-1) / 2, 3), 0.f);
-
- if(oldPoints.size() == 1)
- {
- newRect.x = oldRect.x+newPoints[0].x-oldPoints[0].x;
- newRect.y = oldRect.y+newPoints[0].y-oldPoints[0].y;
- newRect.width=oldRect.width;
- newRect.height=oldRect.height;
- return newRect;
- }
-
- float xshift = 0.f;
- float yshift = 0.f;
- for(int i = 0; i < n; i++)
- {
- buf[i] = newPoints[i].x - oldPoints[i].x;
- }
-
- xshift = getMedian(buf, n);
- newCenter.x += xshift;
- for(int idx = 0; idx < n; idx++)
- {
- buf[idx] = newPoints[idx].y - oldPoints[idx].y;
- }
-
- yshift = getMedian(buf, n);
- newCenter.y += yshift;
- mD = Point2f(xshift, yshift);
-
- if(oldPoints.size() == 1)
- {
- newRect.x = newCenter.x - oldRect.width / 2.0;
- newRect.y = newCenter.y - oldRect.height / 2.0;
- newRect.width = oldRect.width;
- newRect.height = oldRect.height;
- return newRect;
- }
-
- float nd = 0.f;
- float od = 0.f;
- for (int i = 0, ctr = 0; i < n; i++)
- {
- for(int j = 0; j < i; j++)
- {
- nd = l2distance(newPoints[i], newPoints[j]);
- od = l2distance(oldPoints[i], oldPoints[j]);
- buf[ctr] = (od == 0.f ? 0.f : nd / od);
- ctr++;
- }
- }
-
- float scale = getMedian(buf, n*(n-1) / 2);
- newRect.x = newCenter.x - scale * oldRect.width / 2.f;
- newRect.y = newCenter.y-scale * oldRect.height / 2.f;
- newRect.width = scale * oldRect.width;
- newRect.height = scale * oldRect.height;
-
- return newRect;
+ Rect_<float> newRect;
+ Point2d newCenter(oldRect.x + oldRect.width/2.0,
+ oldRect.y + oldRect.height/2.0);
+
+ int n = (int)oldPoints.size();
+ std::vector<float>buf(std::max(n*(n-1) / 2, 3), 0.f);
+
+ if(oldPoints.size() == 1) {
+ newRect.x = oldRect.x+newPoints[0].x-oldPoints[0].x;
+ newRect.y = oldRect.y+newPoints[0].y-oldPoints[0].y;
+ newRect.width = oldRect.width;
+ newRect.height = oldRect.height;
+ return newRect;
+ }
+
+ float xshift = 0.f;
+ float yshift = 0.f;
+ for(int i = 0; i < n; i++) {
+ buf[i] = newPoints[i].x - oldPoints[i].x;
+ }
+
+ xshift = getMedian(buf, n);
+ newCenter.x += xshift;
+ for(int idx = 0; idx < n; idx++) {
+ buf[idx] = newPoints[idx].y - oldPoints[idx].y;
+ }
+
+ yshift = getMedian(buf, n);
+ newCenter.y += yshift;
+ mD = Point2f(xshift, yshift);
+
+ if(oldPoints.size() == 1) {
+ newRect.x = newCenter.x - oldRect.width / 2.0;
+ newRect.y = newCenter.y - oldRect.height / 2.0;
+ newRect.width = oldRect.width;
+ newRect.height = oldRect.height;
+ return newRect;
+ }
+
+ float nd = 0.f;
+ float od = 0.f;
+ for (int i = 0, ctr = 0; i < n; i++) {
+ for(int j = 0; j < i; j++) {
+ nd = l2distance(newPoints[i], newPoints[j]);
+ od = l2distance(oldPoints[i], oldPoints[j]);
+ buf[ctr] = (od == 0.f ? 0.f : nd / od);
+ ctr++;
+ }
+ }
+
+ float scale = getMedian(buf, n*(n-1) / 2);
+ newRect.x = newCenter.x - scale * oldRect.width / 2.f;
+ newRect.y = newCenter.y-scale * oldRect.height / 2.f;
+ newRect.width = scale * oldRect.width;
+ newRect.height = scale * oldRect.height;
+
+ return newRect;
}
template<typename T>
T TrackerMedianFlow::getMedian(std::vector<T>& values, int size)
{
- if (size == -1)
- {
- size = (int)values.size();
- }
-
- std::vector<T> copy(values.begin(), values.begin() + size);
- std::sort(copy.begin(),copy.end());
- if(size%2==0)
- {
- return (copy[size/2-1]+copy[size/2])/((T)2.0);
- }
- else
- {
- return copy[(size - 1) / 2];
- }
+ if (size == -1) {
+ size = (int)values.size();
+ }
+
+ std::vector<T> copy(values.begin(), values.begin() + size);
+ std::sort(copy.begin(), copy.end());
+ if(size%2 == 0) {
+ return (copy[size/2-1]+copy[size/2])/((T)2.0);
+ } else {
+ return copy[(size - 1) / 2];
+ }
}
float TrackerMedianFlow::l2distance(Point2f p1, Point2f p2)
{
- float dx = p1.x - p2.x;
- float dy = p1.y - p2.y;
- return sqrt(dx * dx + dy * dy);
+ float dx = p1.x - p2.x;
+ float dy = p1.y - p2.y;
+ return sqrt(dx * dx + dy * dy);
}
void TrackerMedianFlow::check_FB(
- std::vector<Mat> newPyramid,
- const std::vector<Point2f>& oldPoints,
- const std::vector<Point2f>& newPoints,
- std::vector<bool>& status)
+ std::vector<Mat> newPyramid,
+ const std::vector<Point2f>& oldPoints,
+ const std::vector<Point2f>& newPoints,
+ std::vector<bool>& status)
{
- if(status.size() == 0)
- {
- status = std::vector<bool>(oldPoints.size(), true);
- }
-
- std::vector<uchar> LKstatus(oldPoints.size());
- std::vector<float> errors(oldPoints.size());
- std::vector<float> FBerror(oldPoints.size());
- std::vector<Point2f> pointsToTrackReprojection;
-
- calcOpticalFlowPyrLK(newPyramid,
- m_pyramid,
- newPoints,
- pointsToTrackReprojection,
- LKstatus,
- errors,
- params.mWindowSize,
- params.mPyrMaxLevel,
- termcrit);
-
- for (size_t idx = 0u; idx < oldPoints.size(); idx++)
- {
- FBerror[idx] = l2distance(oldPoints[idx], pointsToTrackReprojection[idx]);
- }
-
- float FBerrorMedian = getMedian(FBerror) + FloatEps;
- for (size_t idx = 0u; idx < oldPoints.size(); idx++)
- {
- status[idx] = (FBerror[idx] < FBerrorMedian);
- }
+ if(status.size() == 0) {
+ status = std::vector<bool>(oldPoints.size(), true);
+ }
+
+ std::vector<uchar> LKstatus(oldPoints.size());
+ std::vector<float> errors(oldPoints.size());
+ std::vector<float> FBerror(oldPoints.size());
+ std::vector<Point2f> pointsToTrackReprojection;
+
+ calcOpticalFlowPyrLK(newPyramid,
+ m_pyramid,
+ newPoints,
+ pointsToTrackReprojection,
+ LKstatus,
+ errors,
+ params.mWindowSize,
+ params.mPyrMaxLevel,
+ termcrit);
+
+ for (size_t idx = 0u; idx < oldPoints.size(); idx++) {
+ FBerror[idx] =
+ l2distance(oldPoints[idx], pointsToTrackReprojection[idx]);
+ }
+
+ float FBerrorMedian = getMedian(FBerror) + FloatEps;
+ for (size_t idx = 0u; idx < oldPoints.size(); idx++) {
+ status[idx] = (FBerror[idx] < FBerrorMedian);
+ }
}
void TrackerMedianFlow::check_NCC(
- const Mat& oldImage,
- const Mat& newImage,
- const std::vector<Point2f>& oldPoints,
- const std::vector<Point2f>& newPoints,
- std::vector<bool>& status)
+ const Mat& oldImage,
+ const Mat& newImage,
+ const std::vector<Point2f>& oldPoints,
+ const std::vector<Point2f>& newPoints,
+ std::vector<bool>& status)
{
- std::vector<float> NCC(oldPoints.size(), 0.f);
- Size patch(30, 30);
- Mat p1;
- Mat p2;
-
- for (size_t idx = 0u; idx < oldPoints.size(); idx++)
- {
- getRectSubPix(oldImage, patch, oldPoints[idx], p1);
- getRectSubPix(newImage, patch, newPoints[idx], p2);
-
- const int N = 900;
- const float s1 = sum(p1)(0);
- const float s2 = sum(p2)(0);
- const float n1 = norm(p1);
- const float n2 = norm(p2);
- const float prod = p1.dot(p2);
- const float sq1 = sqrt(n1 * n1 - s1 * s1 / N);
- const float sq2 = sqrt(n2 * n2 - s2 * s2 / N);
- NCC[idx] = (sq2==0 ? sq1 / std::abs(sq1)
- : (prod - s1 * s2 / N) / sq1 / sq2);
- }
-
- float median = getMedian(NCC) - FloatEps;
- for(size_t idx = 0u; idx < oldPoints.size(); idx++)
- {
- status[idx] = status[idx] && (NCC[idx] > median);
- }
+ std::vector<float> NCC(oldPoints.size(), 0.f);
+ Size patch(30, 30);
+ Mat p1;
+ Mat p2;
+
+ for (size_t idx = 0u; idx < oldPoints.size(); idx++) {
+ getRectSubPix(oldImage, patch, oldPoints[idx], p1);
+ getRectSubPix(newImage, patch, newPoints[idx], p2);
+
+ const int N = 900;
+ const float s1 = sum(p1)(0);
+ const float s2 = sum(p2)(0);
+ const float n1 = norm(p1);
+ const float n2 = norm(p2);
+ const float prod = p1.dot(p2);
+ const float sq1 = sqrt(n1 * n1 - s1 * s1 / N);
+ const float sq2 = sqrt(n2 * n2 - s2 * s2 / N);
+ NCC[idx] = (sq2 == 0 ? sq1 / std::abs(sq1)
+ : (prod - s1 * s2 / N) / sq1 / sq2);
+ }
+
+ float median = getMedian(NCC) - FloatEps;
+ for(size_t idx = 0u; idx < oldPoints.size(); idx++) {
+ status[idx] = status[idx] && (NCC[idx] > median);
+ }
}
-void TrackerMedianFlow::read( cv::FileStorage& fs )
+void TrackerMedianFlow::read(cv::FileStorage& fs)
{
- params.read(fs.root());
- float bbX = 0.f;
- float bbY = 0.f;
- float bbW = 0.f;
- float bbH = 0.f;
- fs["lastLocationX"] >> bbX;
- fs["lastLocationY"] >> bbY;
- fs["lastLocationW"] >> bbW;
- fs["lastLocationH"] >> bbH;
- m_boundingBox = Rect_<float>(bbX, bbY, bbW, bbH);
- fs["lastImage"] >> m_image;
+ params.read(fs.root());
+ float bbX = 0.f;
+ float bbY = 0.f;
+ float bbW = 0.f;
+ float bbH = 0.f;
+ fs["lastLocationX"] >> bbX;
+ fs["lastLocationY"] >> bbY;
+ fs["lastLocationW"] >> bbW;
+ fs["lastLocationH"] >> bbH;
+ m_boundingBox = Rect_<float>(bbX, bbY, bbW, bbH);
+ fs["lastImage"] >> m_image;
}
-void TrackerMedianFlow::write( cv::FileStorage& fs ) const
+void TrackerMedianFlow::write(cv::FileStorage& fs) const
{
- params.write(fs);
- fs << "lastLocationX" << m_boundingBox.x;
- fs << "lastLocationY" << m_boundingBox.y;
- fs << "lastLocationW" << m_boundingBox.width;
- fs << "lastLocationH" << m_boundingBox.height;
- fs << "lastImage" << m_image;
+ params.write(fs);
+ fs << "lastLocationX" << m_boundingBox.x;
+ fs << "lastLocationY" << m_boundingBox.y;
+ fs << "lastLocationW" << m_boundingBox.width;
+ fs << "lastLocationH" << m_boundingBox.height;
+ fs << "lastImage" << m_image;
}
} /* namespace cv */
diff --git a/mv_face/face/src/mv_face_open.cpp b/mv_face/face/src/mv_face_open.cpp
index 41f2398e..0412f85b 100644
--- a/mv_face/face/src/mv_face_open.cpp
+++ b/mv_face/face/src/mv_face_open.cpp
@@ -34,1015 +34,918 @@ using namespace ::MediaVision::Face;
static const RecognitionParams defaultRecognitionParams = RecognitionParams();
static void extractRecognitionParams(
- mv_engine_config_h engine_cfg,
- RecognitionParams& recognitionParams)
+ mv_engine_config_h engine_cfg,
+ RecognitionParams& recognitionParams)
{
- mv_engine_config_h working_cfg = NULL;
-
- if (NULL == engine_cfg)
- {
- mv_create_engine_config(&working_cfg);
- }
- else
- {
- working_cfg = engine_cfg;
- }
-
- int algType = 0;
- mv_engine_config_get_int_attribute_c(
- working_cfg,
- "MV_FACE_RECOGNITION_MODEL_TYPE",
- &algType);
-
- if (0 < algType && 4 > algType)
- {
- recognitionParams.mRecognitionAlgType =
- (FaceRecognitionModelType)algType;
- }
- else
- {
- recognitionParams.mRecognitionAlgType =
- defaultRecognitionParams.mRecognitionAlgType;
- }
-
- if (NULL == engine_cfg)
- {
- mv_destroy_engine_config(working_cfg);
- }
+ mv_engine_config_h working_cfg = NULL;
+
+ if (NULL == engine_cfg) {
+ mv_create_engine_config(&working_cfg);
+ } else {
+ working_cfg = engine_cfg;
+ }
+
+ int algType = 0;
+ mv_engine_config_get_int_attribute_c(
+ working_cfg,
+ "MV_FACE_RECOGNITION_MODEL_TYPE",
+ &algType);
+
+ if (0 < algType && 4 > algType) {
+ recognitionParams.mRecognitionAlgType =
+ (FaceRecognitionModelType)algType;
+ } else {
+ recognitionParams.mRecognitionAlgType =
+ defaultRecognitionParams.mRecognitionAlgType;
+ }
+
+ if (NULL == engine_cfg) {
+ mv_destroy_engine_config(working_cfg);
+ }
}
inline void convertRectCV2MV(const cv::Rect& src, mv_rectangle_s& dst)
{
- dst.point.x = src.x;
- dst.point.y = src.y;
- dst.width = src.width;
- dst.height = src.height;
+ dst.point.x = src.x;
+ dst.point.y = src.y;
+ dst.width = src.width;
+ dst.height = src.height;
}
int mv_face_detect_open(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- mv_face_detected_cb detected_cb,
- void *user_data)
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_face_detected_cb detected_cb,
+ void *user_data)
{
- cv::Mat image;
-
- int error = convertSourceMV2GrayCV(source, image);
- if (error != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Convertion mv_source_h to gray failed");
- return error;
- }
-
- char *haarcascadeFilepath;
- error = mv_engine_config_get_string_attribute_c(
- engine_cfg,
- "MV_FACE_DETECTION_MODEL_FILE_PATH",
- &haarcascadeFilepath);
-
- //default path
- std::string haarcascadeFilePathStr =
- "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml";
-
- if (error == MEDIA_VISION_ERROR_NONE)
- {
- LOGI("Haarcascade file was set as default");
- haarcascadeFilePathStr = std::string(haarcascadeFilepath);
-
- delete[] haarcascadeFilepath;
- }
- else
- {
- LOGE("Error occurred during face detection haarcascade file receiving."
- " (%i)", error);
- }
-
- static FaceDetector faceDetector;
-
- if (!faceDetector.loadHaarcascade(haarcascadeFilePathStr))
- {
- LOGE("Loading Haarcascade failed");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- cv::Rect roi(-1, -1, -1, -1);
- error = mv_engine_config_get_int_attribute_c(
- engine_cfg,
- MV_FACE_DETECTION_ROI_X,
- &roi.x);
- if (error != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Error occurred during face detection roi (x) receiving."
- " (%i)", error);
- }
-
- error = mv_engine_config_get_int_attribute_c(
- engine_cfg,
- MV_FACE_DETECTION_ROI_Y,
- &roi.y);
- if (error != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Error occurred during face detection roi (y) receiving."
- " (%i)", error);
- }
-
- error = mv_engine_config_get_int_attribute_c(
- engine_cfg,
- MV_FACE_DETECTION_ROI_WIDTH,
- &roi.width);
- if (error != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Error occurred during face detection roi (width) receiving."
- " (%i)", error);
- }
-
- error = mv_engine_config_get_int_attribute_c(
- engine_cfg,
- MV_FACE_DETECTION_ROI_HEIGHT,
- &roi.height);
- if (error != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Error occurred during face detection roi (height) receiving."
- " (%i)", error);
- }
-
- cv::Size minSize(-1, -1);
- error = mv_engine_config_get_int_attribute_c(
- engine_cfg,
- MV_FACE_DETECTION_MIN_SIZE_WIDTH,
- &minSize.width);
- if (error != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Error occurred during face detection minimum width receiving."
- " (%i)", error);
- }
-
- error = mv_engine_config_get_int_attribute_c(
- engine_cfg,
- MV_FACE_DETECTION_MIN_SIZE_HEIGHT,
- &minSize.height);
- if (error != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Error occurred during face detection minimum height receiving."
- " (%i)", error);
- }
-
- std::vector<cv::Rect> faceLocations;
- if (!faceDetector.detectFaces(image, roi, minSize, faceLocations))
- {
- LOGE("Face detection in OpenCV failed");
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
-
- static const int StartMaxResultsNumber = 50;
- static std::vector<mv_rectangle_s> results(StartMaxResultsNumber);
-
- const int numberOfResults = faceLocations.size();
- if (numberOfResults > StartMaxResultsNumber)
- {
- results.resize(numberOfResults);
- }
-
- for(int rectNum = 0; rectNum < numberOfResults; ++rectNum)
- {
- convertRectCV2MV(faceLocations[rectNum], results[rectNum]);
- }
-
- LOGI("Call the detect callback for %i detected faces", numberOfResults);
- detected_cb(source, engine_cfg, results.data(), numberOfResults, user_data);
-
- return MEDIA_VISION_ERROR_NONE;
+ cv::Mat image;
+
+ int error = convertSourceMV2GrayCV(source, image);
+ if (error != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Convertion mv_source_h to gray failed");
+ return error;
+ }
+
+ char *haarcascadeFilepath;
+ error = mv_engine_config_get_string_attribute_c(
+ engine_cfg,
+ "MV_FACE_DETECTION_MODEL_FILE_PATH",
+ &haarcascadeFilepath);
+
+ /* default path */
+ std::string haarcascadeFilePathStr =
+ "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml";
+
+ if (error == MEDIA_VISION_ERROR_NONE) {
+ LOGI("Haarcascade file was set as default");
+ haarcascadeFilePathStr = std::string(haarcascadeFilepath);
+
+ delete[] haarcascadeFilepath;
+ } else {
+ LOGE("Error occurred during face detection haarcascade file receiving."
+ " (%i)", error);
+ }
+
+ static FaceDetector faceDetector;
+
+ if (!faceDetector.loadHaarcascade(haarcascadeFilePathStr)) {
+ LOGE("Loading Haarcascade failed");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ cv::Rect roi(-1, -1, -1, -1);
+ error = mv_engine_config_get_int_attribute_c(
+ engine_cfg,
+ MV_FACE_DETECTION_ROI_X,
+ &roi.x);
+ if (error != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Error occurred during face detection roi (x) receiving."
+ " (%i)", error);
+ }
+
+ error = mv_engine_config_get_int_attribute_c(
+ engine_cfg,
+ MV_FACE_DETECTION_ROI_Y,
+ &roi.y);
+ if (error != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Error occurred during face detection roi (y) receiving."
+ " (%i)", error);
+}
+
+ error = mv_engine_config_get_int_attribute_c(
+ engine_cfg,
+ MV_FACE_DETECTION_ROI_WIDTH,
+ &roi.width);
+ if (error != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Error occurred during face detection roi (width) receiving."
+ " (%i)", error);
+ }
+
+ error = mv_engine_config_get_int_attribute_c(
+ engine_cfg,
+ MV_FACE_DETECTION_ROI_HEIGHT,
+ &roi.height);
+ if (error != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Error occurred during face detection roi (height) receiving."
+ " (%i)", error);
+ }
+
+ cv::Size minSize(-1, -1);
+ error = mv_engine_config_get_int_attribute_c(
+ engine_cfg,
+ MV_FACE_DETECTION_MIN_SIZE_WIDTH,
+ &minSize.width);
+ if (error != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Error occurred during face detection minimum width receiving."
+ " (%i)", error);
+ }
+
+ error = mv_engine_config_get_int_attribute_c(
+ engine_cfg,
+ MV_FACE_DETECTION_MIN_SIZE_HEIGHT,
+ &minSize.height);
+ if (error != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Error occurred during face detection minimum height receiving."
+ " (%i)", error);
+ }
+
+ std::vector<cv::Rect> faceLocations;
+ if (!faceDetector.detectFaces(image, roi, minSize, faceLocations)) {
+ LOGE("Face detection in OpenCV failed");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ static const int StartMaxResultsNumber = 50;
+ static std::vector<mv_rectangle_s> results(StartMaxResultsNumber);
+
+ const int numberOfResults = faceLocations.size();
+ if (numberOfResults > StartMaxResultsNumber) {
+ results.resize(numberOfResults);
+ }
+
+ for (int rectNum = 0; rectNum < numberOfResults; ++rectNum) {
+ convertRectCV2MV(faceLocations[rectNum], results[rectNum]);
+ }
+
+ LOGI("Call the detect callback for %i detected faces", numberOfResults);
+ detected_cb(source, engine_cfg, results.data(), numberOfResults, user_data);
+
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_face_recognize_open(
- mv_source_h source,
- mv_face_recognition_model_h recognition_model,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s *face_location,
- mv_face_recognized_cb recognized_cb,
- void *user_data)
+ mv_source_h source,
+ mv_face_recognition_model_h recognition_model,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s *face_location,
+ mv_face_recognized_cb recognized_cb,
+ void *user_data)
{
- if (!source)
- {
- LOGE("Can't recognize for the NULL Media Vision source handle");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
- if (!recognized_cb)
- {
- LOGE("Recognition failed. Can't output recognition results without "
- "callback function");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
- if (!recognition_model)
- {
- LOGE("Can't recognize for the NULL Media Vision Face recognition model");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- FaceRecognitionModel *pRecModel = static_cast<FaceRecognitionModel*>(recognition_model);
-
- if (!pRecModel)
- {
- LOGE("Face recognition failed. Incorrect Media Vision Face recognition model handle is used");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- cv::Mat grayImage;
- int ret = convertSourceMV2GrayCV(source, grayImage);
-
- if (MEDIA_VISION_ERROR_NONE != ret)
- {
- LOGE("Convertion mv_source_h to gray failed");
- return ret;
- }
-
- cv::Mat image;
- if (NULL == face_location)
- {
- image = grayImage;
- }
- else
- {
- cv::Rect_<int> roi;
- roi.x = face_location->point.x;
- roi.y = face_location->point.y;
- roi.width = face_location->width;
- roi.height = face_location->height;
- image = grayImage(roi);
- }
-
- FaceRecognitionResults results;
-
- LOGD("Face recognition is started");
-
- ret = pRecModel->recognize(image, results);
-
- if (MEDIA_VISION_ERROR_NONE != ret)
- {
- LOGE("Error occurred during the recognition. Failed");
- return ret;
- }
-
- if (!results.mIsRecognized)
- {
- recognized_cb(
- source,
- recognition_model,
- engine_cfg,
- NULL,
- NULL,
- 0.0,
- user_data);
- }
- else
- {
- mv_rectangle_s location;
- location.point.x = results.mFaceLocation.x;
- location.point.y = results.mFaceLocation.y;
- location.width = results.mFaceLocation.width;
- location.height = results.mFaceLocation.height;
-
- if (face_location != NULL)
- {
- location.point.x += face_location->point.x;
- location.point.y += face_location->point.y;
- }
-
- recognized_cb(
- source,
- recognition_model,
- engine_cfg,
- &location,
- &(results.mFaceLabel),
- results.mConfidence,
- user_data);
- }
-
- LOGD("Face recognition is finished");
-
- return ret;
+ if (!source) {
+ LOGE("Can't recognize for the NULL Media Vision source handle");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!recognized_cb) {
+ LOGE("Recognition failed. Can't output recognition results without "
+ "callback function");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!recognition_model) {
+ LOGE("Can't recognize for the NULL Media Vision Face recognition model");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ FaceRecognitionModel *pRecModel = static_cast<FaceRecognitionModel*>(recognition_model);
+
+ if (!pRecModel) {
+ LOGE("Face recognition failed. Incorrect Media Vision Face recognition model handle is used");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ cv::Mat grayImage;
+ int ret = convertSourceMV2GrayCV(source, grayImage);
+
+ if (MEDIA_VISION_ERROR_NONE != ret) {
+ LOGE("Convertion mv_source_h to gray failed");
+ return ret;
+ }
+
+ cv::Mat image;
+ if (NULL == face_location) {
+ image = grayImage;
+ } else {
+ cv::Rect_<int> roi;
+ roi.x = face_location->point.x;
+ roi.y = face_location->point.y;
+ roi.width = face_location->width;
+ roi.height = face_location->height;
+ image = grayImage(roi);
+ }
+
+ FaceRecognitionResults results;
+
+ LOGD("Face recognition is started");
+
+ ret = pRecModel->recognize(image, results);
+
+ if (MEDIA_VISION_ERROR_NONE != ret) {
+ LOGE("Error occurred during the recognition. Failed");
+ return ret;
+ }
+
+ if (!results.mIsRecognized) {
+ recognized_cb(
+ source,
+ recognition_model,
+ engine_cfg,
+ NULL,
+ NULL,
+ 0.0,
+ user_data);
+ } else {
+ mv_rectangle_s location;
+ location.point.x = results.mFaceLocation.x;
+ location.point.y = results.mFaceLocation.y;
+ location.width = results.mFaceLocation.width;
+ location.height = results.mFaceLocation.height;
+
+ if (face_location != NULL) {
+ location.point.x += face_location->point.x;
+ location.point.y += face_location->point.y;
+ }
+
+ recognized_cb(
+ source,
+ recognition_model,
+ engine_cfg,
+ &location,
+ &(results.mFaceLabel),
+ results.mConfidence,
+ user_data);
+ }
+
+ LOGD("Face recognition is finished");
+
+ return ret;
}
int mv_face_track_open(
- mv_source_h source,
- mv_face_tracking_model_h tracking_model,
- mv_engine_config_h engine_cfg,
- mv_face_tracked_cb tracked_cb,
- bool /*do_learn*/,
- void *user_data)
+ mv_source_h source,
+ mv_face_tracking_model_h tracking_model,
+ mv_engine_config_h engine_cfg,
+ mv_face_tracked_cb tracked_cb,
+ bool /*do_learn*/,
+ void *user_data)
{
- if (!source)
- {
- LOGE("Can't track for the NULL Media Vision source handle");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
- if (!tracked_cb)
- {
- LOGE("Tracking failed. Can't output tracking results without "
- "callback function");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
- if (!tracking_model)
- {
- LOGE("Can't track for the NULL Media Vision Face tracking model");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- FaceTrackingModel *pTrackModel =
- static_cast<FaceTrackingModel*>(tracking_model);
-
- if (!pTrackModel)
- {
- LOGE("Face tracking failed. "
- "Incorrect Media Vision Face tracking model handle is used");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- cv::Mat grayImage;
- int ret = convertSourceMV2GrayCV(source, grayImage);
-
- if (MEDIA_VISION_ERROR_NONE != ret)
- {
- LOGE("Convertion mv_source_h to gray failed");
- return ret;
- }
-
- FaceTrackingResults results;
- ret = pTrackModel->track(grayImage, results);
-
- if (MEDIA_VISION_ERROR_NONE != ret)
- {
- LOGE("Tracking can't be performed. "
- "Check that tracking model is prepared when tracking starts");
- return ret;
- }
-
- if (results.mIsTracked)
- {
- mv_quadrangle_s predictedLocation;
- predictedLocation.points[0].x = results.mFaceLocation.x;
- predictedLocation.points[0].y = results.mFaceLocation.y;
- predictedLocation.points[1].x =
- results.mFaceLocation.x + results.mFaceLocation.width;
- predictedLocation.points[1].y = results.mFaceLocation.y;
- predictedLocation.points[2].x =
- results.mFaceLocation.x + results.mFaceLocation.width;
- predictedLocation.points[2].y =
- results.mFaceLocation.y + results.mFaceLocation.height;
- predictedLocation.points[3].x = results.mFaceLocation.x;
- predictedLocation.points[3].y =
- results.mFaceLocation.y + results.mFaceLocation.height;
- tracked_cb(
- source,
- tracking_model,
- engine_cfg,
- &predictedLocation,
- results.mConfidence,
- user_data);
- }
- else
- {
- tracked_cb(
- source,
- tracking_model,
- engine_cfg,
- NULL,
- results.mConfidence,
- user_data);
- }
-
- return ret;
+ if (!source) {
+ LOGE("Can't track for the NULL Media Vision source handle");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!tracked_cb) {
+ LOGE("Tracking failed. Can't output tracking results without "
+ "callback function");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!tracking_model) {
+ LOGE("Can't track for the NULL Media Vision Face tracking model");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ FaceTrackingModel *pTrackModel =
+ static_cast<FaceTrackingModel*>(tracking_model);
+
+ if (!pTrackModel) {
+ LOGE("Face tracking failed. "
+ "Incorrect Media Vision Face tracking model handle is used");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ cv::Mat grayImage;
+ int ret = convertSourceMV2GrayCV(source, grayImage);
+
+ if (MEDIA_VISION_ERROR_NONE != ret) {
+ LOGE("Convertion mv_source_h to gray failed");
+ return ret;
+ }
+
+ FaceTrackingResults results;
+ ret = pTrackModel->track(grayImage, results);
+
+ if (MEDIA_VISION_ERROR_NONE != ret) {
+ LOGE("Tracking can't be performed. "
+ "Check that tracking model is prepared when tracking starts");
+ return ret;
+ }
+
+ if (results.mIsTracked) {
+ mv_quadrangle_s predictedLocation;
+ predictedLocation.points[0].x = results.mFaceLocation.x;
+ predictedLocation.points[0].y = results.mFaceLocation.y;
+ predictedLocation.points[1].x =
+ results.mFaceLocation.x + results.mFaceLocation.width;
+ predictedLocation.points[1].y = results.mFaceLocation.y;
+ predictedLocation.points[2].x =
+ results.mFaceLocation.x + results.mFaceLocation.width;
+ predictedLocation.points[2].y =
+ results.mFaceLocation.y + results.mFaceLocation.height;
+ predictedLocation.points[3].x = results.mFaceLocation.x;
+ predictedLocation.points[3].y =
+ results.mFaceLocation.y + results.mFaceLocation.height;
+ tracked_cb(
+ source,
+ tracking_model,
+ engine_cfg,
+ &predictedLocation,
+ results.mConfidence,
+ user_data);
+ } else {
+ tracked_cb(
+ source,
+ tracking_model,
+ engine_cfg,
+ NULL,
+ results.mConfidence,
+ user_data);
+ }
+
+ return ret;
}
int mv_face_eye_condition_recognize_open(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s face_location,
- mv_face_eye_condition_recognized_cb eye_condition_recognized_cb,
- void *user_data)
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s face_location,
+ mv_face_eye_condition_recognized_cb eye_condition_recognized_cb,
+ void *user_data)
{
- cv::Mat image;
-
- int error = convertSourceMV2GrayCV(source, image);
- if (error != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Convertion mv_source_h to gray failed");
- return error;
- }
-
- mv_face_eye_condition_e eye_condition;
- error = FaceEyeCondition::recognizeEyeCondition(
- image,
- face_location,
- &eye_condition);
-
- if (error != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("eye contition recognition failed");
- return error;
- }
-
- eye_condition_recognized_cb(
- source,
- engine_cfg,
- face_location,
- eye_condition,
- user_data);
-
- return MEDIA_VISION_ERROR_NONE;
+ cv::Mat image;
+
+ int error = convertSourceMV2GrayCV(source, image);
+ if (error != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Convertion mv_source_h to gray failed");
+ return error;
+ }
+
+ mv_face_eye_condition_e eye_condition;
+ error = FaceEyeCondition::recognizeEyeCondition(
+ image,
+ face_location,
+ &eye_condition);
+
+ if (error != MEDIA_VISION_ERROR_NONE) {
+ LOGE("eye contition recognition failed");
+ return error;
+ }
+
+ eye_condition_recognized_cb(
+ source,
+ engine_cfg,
+ face_location,
+ eye_condition,
+ user_data);
+
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_face_facial_expression_recognize_open(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s face_location,
- mv_face_facial_expression_recognized_cb expression_recognized_cb,
- void *user_data)
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s face_location,
+ mv_face_facial_expression_recognized_cb expression_recognized_cb,
+ void *user_data)
{
- cv::Mat image;
-
- int error = convertSourceMV2GrayCV(source, image);
- if (error != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Convertion mv_source_h to gray failed");
- return error;
- }
-
- mv_face_facial_expression_e expression;
- error = FaceExpressionRecognizer::recognizeFaceExpression(
- image, face_location, &expression);
-
- if (error != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("eye contition recognition failed");
- return error;
- }
-
- expression_recognized_cb(
- source,
- engine_cfg,
- face_location,
- expression,
- user_data);
-
- return MEDIA_VISION_ERROR_NONE;
+ cv::Mat image;
+
+ int error = convertSourceMV2GrayCV(source, image);
+ if (error != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Convertion mv_source_h to gray failed");
+ return error;
+ }
+
+ mv_face_facial_expression_e expression;
+ error = FaceExpressionRecognizer::recognizeFaceExpression(
+ image, face_location, &expression);
+
+ if (error != MEDIA_VISION_ERROR_NONE) {
+ LOGE("eye contition recognition failed");
+ return error;
+ }
+
+ expression_recognized_cb(
+ source,
+ engine_cfg,
+ face_location,
+ expression,
+ user_data);
+
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_face_recognition_model_create_open(
- mv_face_recognition_model_h *recognition_model)
+ mv_face_recognition_model_h *recognition_model)
{
- if (recognition_model == NULL)
- {
- LOGE("Recognition model can't be created because handle pointer is NULL");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (recognition_model == NULL) {
+ LOGE("Recognition model can't be created because handle pointer is NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- (*recognition_model) =
- static_cast<mv_face_recognition_model_h>(new (std::nothrow)FaceRecognitionModel());
+ (*recognition_model) =
+ static_cast<mv_face_recognition_model_h>(new (std::nothrow)FaceRecognitionModel());
- if (*recognition_model == NULL)
- {
- LOGE("Failed to create media vision recognition model");
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
+ if (*recognition_model == NULL) {
+ LOGE("Failed to create media vision recognition model");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
- LOGD("Recognition model [%p] has been created", *recognition_model);
+ LOGD("Recognition model [%p] has been created", *recognition_model);
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_face_recognition_model_destroy_open(
- mv_face_recognition_model_h recognition_model)
+ mv_face_recognition_model_h recognition_model)
{
- if (!recognition_model)
- {
- LOGE("Recognition model can't be destroyed because handle is NULL");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (!recognition_model) {
+ LOGE("Recognition model can't be destroyed because handle is NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- LOGD("Destroying media vision recognition model [%p]", recognition_model);
- delete static_cast<FaceRecognitionModel*>(recognition_model);
- LOGD("Media vision recognition model has been destroyed");
+ LOGD("Destroying media vision recognition model [%p]", recognition_model);
+ delete static_cast<FaceRecognitionModel*>(recognition_model);
+ LOGD("Media vision recognition model has been destroyed");
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_face_recognition_model_clone_open(
- mv_face_recognition_model_h src,
- mv_face_recognition_model_h *dst)
+ mv_face_recognition_model_h src,
+ mv_face_recognition_model_h *dst)
{
- if (!src || !dst)
- {
- LOGE("Can't clone recognition model. Both source and destination"
- "recognition model handles has to be not NULL");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (!src || !dst) {
+ LOGE("Can't clone recognition model. Both source and destination"
+ "recognition model handles has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- (*dst) = static_cast<mv_face_recognition_model_h>(new (std::nothrow)FaceRecognitionModel());
+ (*dst) = static_cast<mv_face_recognition_model_h>(new (std::nothrow)FaceRecognitionModel());
- if (*dst == NULL)
- {
- LOGE("Failed to create media vision recognition model");
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
+ if (*dst == NULL) {
+ LOGE("Failed to create media vision recognition model");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
- LOGD("Recognition model [%p] has been created", *dst);
+ LOGD("Recognition model [%p] has been created", *dst);
- const FaceRecognitionModel *pSrcModel = static_cast<FaceRecognitionModel*>(src);
- FaceRecognitionModel *pDstModel = static_cast<FaceRecognitionModel*>(*dst);
+ const FaceRecognitionModel *pSrcModel = static_cast<FaceRecognitionModel*>(src);
+ FaceRecognitionModel *pDstModel = static_cast<FaceRecognitionModel*>(*dst);
- *pDstModel = *pSrcModel;
+ *pDstModel = *pSrcModel;
- LOGD("Media vision recognition model has been cloned");
- return MEDIA_VISION_ERROR_NONE;
+ LOGD("Media vision recognition model has been cloned");
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_face_recognition_model_save_open(
- const char *file_name,
- mv_face_recognition_model_h recognition_model)
+ const char *file_name,
+ mv_face_recognition_model_h recognition_model)
{
- if (!recognition_model)
- {
- LOGE("Can't save recognition model to the file. Handle has to be not NULL");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- if (NULL == file_name)
- {
- LOGE("Can't save recognition model to the file. File name has to be specified");
- return MEDIA_VISION_ERROR_INVALID_PATH;
- }
-
- FaceRecognitionModel *pRecModel = static_cast<FaceRecognitionModel*>(recognition_model);
- const int ret = pRecModel->save(std::string(file_name));
-
- if (MEDIA_VISION_ERROR_NONE != ret)
- {
- LOGE("Error occurred when save recognition model to the file");
- return ret;
- }
-
- LOGD("Media vision recognition model has been saved to the file [%s]", file_name);
- return ret;
+ if (!recognition_model) {
+ LOGE("Can't save recognition model to the file. Handle has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (NULL == file_name) {
+ LOGE("Can't save recognition model to the file. File name has to be specified");
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
+
+ FaceRecognitionModel *pRecModel = static_cast<FaceRecognitionModel*>(recognition_model);
+ const int ret = pRecModel->save(std::string(file_name));
+
+ if (MEDIA_VISION_ERROR_NONE != ret) {
+ LOGE("Error occurred when save recognition model to the file");
+ return ret;
+ }
+
+ LOGD("Media vision recognition model has been saved to the file [%s]", file_name);
+ return ret;
}
int mv_face_recognition_model_load_open(
- const char *file_name,
- mv_face_recognition_model_h *recognition_model)
+ const char *file_name,
+ mv_face_recognition_model_h *recognition_model)
{
- if (!recognition_model)
- {
- LOGE("Can't load recognition model from the file. "
- "Handle has to be not NULL");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- if (NULL == file_name)
- {
- LOGE("Can't load recognition model from the file. "
- "File name has to be specified");
- return MEDIA_VISION_ERROR_INVALID_PATH;
- }
-
- (*recognition_model) =
- static_cast<mv_face_recognition_model_h>(new (std::nothrow)FaceRecognitionModel());
-
- if (*recognition_model == NULL)
- {
- LOGE("Failed to create media vision recognition model");
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
-
- FaceRecognitionModel *pRecModel =
- static_cast<FaceRecognitionModel*>(*recognition_model);
-
- if (!pRecModel)
- {
- LOGE("Loading of the face recognition model from file failed. "
- "Incorrect Media Vision Face recognition model handle is used");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- const int ret = pRecModel->load(std::string(file_name));
-
- if (MEDIA_VISION_ERROR_NONE != ret)
- {
- LOGE("Error occurred when loading recognition model to the file");
- return ret;
- }
-
- LOGD("Media vision recognition model has been loaded from the file [%s]", file_name);
- return ret;
+ if (!recognition_model) {
+ LOGE("Can't load recognition model from the file. "
+ "Handle has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (NULL == file_name) {
+ LOGE("Can't load recognition model from the file. "
+ "File name has to be specified");
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
+
+ (*recognition_model) =
+ static_cast<mv_face_recognition_model_h>(new (std::nothrow)FaceRecognitionModel());
+
+ if (*recognition_model == NULL) {
+ LOGE("Failed to create media vision recognition model");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+
+ FaceRecognitionModel *pRecModel =
+ static_cast<FaceRecognitionModel*>(*recognition_model);
+
+ if (!pRecModel) {
+ LOGE("Loading of the face recognition model from file failed. "
+ "Incorrect Media Vision Face recognition model handle is used");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ const int ret = pRecModel->load(std::string(file_name));
+
+ if (MEDIA_VISION_ERROR_NONE != ret) {
+ LOGE("Error occurred when loading recognition model to the file");
+ return ret;
+ }
+
+ LOGD("Media vision recognition model has been loaded from the file [%s]", file_name);
+ return ret;
}
int mv_face_recognition_model_add_open(
- const mv_source_h source,
- mv_face_recognition_model_h recognition_model,
- const mv_rectangle_s *example_location,
- int face_label)
+ const mv_source_h source,
+ mv_face_recognition_model_h recognition_model,
+ const mv_rectangle_s *example_location,
+ int face_label)
{
- if (!source)
- {
- LOGE("Can't add face image example for recognition model. "
- "Media Vision source handle has to be not NULL");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- if (!recognition_model)
- {
- LOGE("Can't add face image example for recognition model. "
- "Model handle has to be not NULL");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- FaceRecognitionModel *pRecModel =
- static_cast<FaceRecognitionModel*>(recognition_model);
-
- if (!pRecModel)
- {
- LOGE("Add face image example to the model failed. "
- "Incorrect Media Vision Face recognition model handle is used");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- cv::Mat image;
- int ret = convertSourceMV2GrayCV(source, image);
- if (MEDIA_VISION_ERROR_NONE != ret)
- {
- LOGE("Convertion mv_source_h to gray failed");
- return ret;
- }
-
- if (!example_location)
- {
- ret = pRecModel->addFaceExample(image, face_label);
- }
- else
- {
- cv::Rect_<int> roi;
- roi.x = example_location->point.x;
- roi.y = example_location->point.y;
- roi.width = example_location->width;
- roi.height = example_location->height;
- ret = pRecModel->addFaceExample(image(roi).clone(), face_label);
- }
-
- if (MEDIA_VISION_ERROR_NONE != ret)
- {
- LOGE("Error occurred when adding face image example to the recognition model");
- return ret;
- }
-
- LOGD("The face image example labeled %i has been added "
- "to the Media Vision recognition model", face_label);
- return ret;
+ if (!source) {
+ LOGE("Can't add face image example for recognition model. "
+ "Media Vision source handle has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!recognition_model) {
+ LOGE("Can't add face image example for recognition model. "
+ "Model handle has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ FaceRecognitionModel *pRecModel =
+ static_cast<FaceRecognitionModel*>(recognition_model);
+
+ if (!pRecModel) {
+ LOGE("Add face image example to the model failed. "
+ "Incorrect Media Vision Face recognition model handle is used");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ cv::Mat image;
+ int ret = convertSourceMV2GrayCV(source, image);
+ if (MEDIA_VISION_ERROR_NONE != ret) {
+ LOGE("Convertion mv_source_h to gray failed");
+ return ret;
+ }
+
+ if (!example_location) {
+ ret = pRecModel->addFaceExample(image, face_label);
+ } else {
+ cv::Rect_<int> roi;
+ roi.x = example_location->point.x;
+ roi.y = example_location->point.y;
+ roi.width = example_location->width;
+ roi.height = example_location->height;
+ ret = pRecModel->addFaceExample(image(roi).clone(), face_label);
+ }
+
+ if (MEDIA_VISION_ERROR_NONE != ret) {
+ LOGE("Error occurred when adding face image example to the recognition model");
+ return ret;
+ }
+
+ LOGD("The face image example labeled %i has been added "
+ "to the Media Vision recognition model", face_label);
+ return ret;
}
int mv_face_recognition_model_reset_open(
- mv_face_recognition_model_h recognition_model,
- const int *face_label)
+ mv_face_recognition_model_h recognition_model,
+ const int *face_label)
{
- if (!recognition_model)
- {
- LOGE("Can't reset positive examples for NULL recognition model");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- FaceRecognitionModel *pRecModel =
- static_cast<FaceRecognitionModel*>(recognition_model);
-
- if (!pRecModel)
- {
- LOGE("Loading of the face recognition model from file failed. "
- "Incorrect Media Vision Face recognition model handle is used");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- int ret = (NULL != face_label ?
- pRecModel->resetFaceExamples(*face_label) :
- pRecModel->resetFaceExamples());
-
- if (MEDIA_VISION_ERROR_NONE != ret)
- {
- LOGE("Error occurred when reset positive examples of the recognition model");
- return ret;
- }
-
- LOGD("The positive examples has been removed from recognition model");
- return ret;
+ if (!recognition_model) {
+ LOGE("Can't reset positive examples for NULL recognition model");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ FaceRecognitionModel *pRecModel =
+ static_cast<FaceRecognitionModel*>(recognition_model);
+
+ if (!pRecModel) {
+ LOGE("Loading of the face recognition model from file failed. "
+ "Incorrect Media Vision Face recognition model handle is used");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ int ret = (NULL != face_label ?
+ pRecModel->resetFaceExamples(*face_label) :
+ pRecModel->resetFaceExamples());
+
+ if (MEDIA_VISION_ERROR_NONE != ret) {
+ LOGE("Error occurred when reset positive examples of the recognition model");
+ return ret;
+ }
+
+ LOGD("The positive examples has been removed from recognition model");
+ return ret;
}
int mv_face_recognition_model_learn_open(
- mv_engine_config_h engine_cfg,
- mv_face_recognition_model_h recognition_model)
+ mv_engine_config_h engine_cfg,
+ mv_face_recognition_model_h recognition_model)
{
- if (!recognition_model)
- {
- LOGE("Can't learn recognition model. Model handle has to be not NULL");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- FaceRecognitionModel *pRecModel =
- static_cast<FaceRecognitionModel*>(recognition_model);
-
- if (!pRecModel)
- {
- LOGE("Learning of the face recognition model failed. "
- "Incorrect Media Vision Face recognition model handle is used");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- RecognitionParams recognitionParams;
- extractRecognitionParams(engine_cfg, recognitionParams);
- FaceRecognitionModelConfig learnConfig;
- learnConfig.mModelType = recognitionParams.mRecognitionAlgType;
-
- const int ret = pRecModel->learn(learnConfig);
-
- if (MEDIA_VISION_ERROR_NONE != ret)
- {
- LOGE("Error occurred when learn face recognition model");
- return ret;
- }
-
- LOGD("Face recognition model has been learned");
- return ret;
+ if (!recognition_model) {
+ LOGE("Can't learn recognition model. Model handle has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ FaceRecognitionModel *pRecModel =
+ static_cast<FaceRecognitionModel*>(recognition_model);
+
+ if (!pRecModel) {
+ LOGE("Learning of the face recognition model failed. "
+ "Incorrect Media Vision Face recognition model handle is used");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ RecognitionParams recognitionParams;
+ extractRecognitionParams(engine_cfg, recognitionParams);
+ FaceRecognitionModelConfig learnConfig;
+ learnConfig.mModelType = recognitionParams.mRecognitionAlgType;
+
+ const int ret = pRecModel->learn(learnConfig);
+
+ if (MEDIA_VISION_ERROR_NONE != ret) {
+ LOGE("Error occurred when learn face recognition model");
+ return ret;
+ }
+
+ LOGD("Face recognition model has been learned");
+ return ret;
}
int mv_face_recognition_model_query_labels_open(
- mv_face_recognition_model_h recognition_model,
- int **labels,
- unsigned int *number_of_labels)
+ mv_face_recognition_model_h recognition_model,
+ int **labels,
+ unsigned int *number_of_labels)
{
- if (!recognition_model)
- {
- LOGE("Can't get list of labels for NULL recognition model");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- if (NULL == labels || NULL == number_of_labels)
- {
- LOGE("Can't get list of labels. labels and number_of_labels out "
- "parameters both has to be not NULL.");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- FaceRecognitionModel *pRecModel =
- static_cast<FaceRecognitionModel*>(recognition_model);
-
- if (!pRecModel)
- {
- LOGE("Learning of the face recognition model failed. "
- "Incorrect Media Vision Face recognition model handle is used");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- const std::set<int>& learnedLabels = pRecModel->getFaceLabels();
- *number_of_labels = learnedLabels.size();
- (*labels) = new int[*number_of_labels];
-
- std::set<int>::const_iterator it = learnedLabels.begin();
- int i = 0;
- for (; it != learnedLabels.end(); ++it)
- {
- (*labels)[i] = *it;
- ++i;
- }
-
- LOGD("List of the labels learned by the recognition model has been retrieved");
- return MEDIA_VISION_ERROR_NONE;
+ if (!recognition_model) {
+ LOGE("Can't get list of labels for NULL recognition model");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (NULL == labels || NULL == number_of_labels) {
+ LOGE("Can't get list of labels. labels and number_of_labels out "
+ "parameters both has to be not NULL.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ FaceRecognitionModel *pRecModel =
+ static_cast<FaceRecognitionModel*>(recognition_model);
+
+ if (!pRecModel) {
+ LOGE("Learning of the face recognition model failed. "
+ "Incorrect Media Vision Face recognition model handle is used");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ const std::set<int>& learnedLabels = pRecModel->getFaceLabels();
+ *number_of_labels = learnedLabels.size();
+ (*labels) = new int[*number_of_labels];
+
+ std::set<int>::const_iterator it = learnedLabels.begin();
+ int i = 0;
+ for (; it != learnedLabels.end(); ++it) {
+ (*labels)[i] = *it;
+ ++i;
+ }
+
+ LOGD("List of the labels learned by the recognition model has been retrieved");
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_face_tracking_model_create_open(
- mv_face_tracking_model_h *tracking_model)
+ mv_face_tracking_model_h *tracking_model)
{
- if (tracking_model == NULL)
- {
- LOGE("Tracking model can't be created because handle pointer is NULL");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (tracking_model == NULL) {
+ LOGE("Tracking model can't be created because handle pointer is NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- (*tracking_model) =
- static_cast<mv_face_tracking_model_h>(new (std::nothrow)FaceTrackingModel());
+ (*tracking_model) =
+ static_cast<mv_face_tracking_model_h>(new (std::nothrow)FaceTrackingModel());
- if (*tracking_model == NULL)
- {
- LOGE("Failed to create media vision tracking model");
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
+ if (*tracking_model == NULL) {
+ LOGE("Failed to create media vision tracking model");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
- LOGD("Tracking model [%p] has been created", *tracking_model);
+ LOGD("Tracking model [%p] has been created", *tracking_model);
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_face_tracking_model_destroy_open(
- mv_face_tracking_model_h tracking_model)
+ mv_face_tracking_model_h tracking_model)
{
- if (!tracking_model)
- {
- LOGE("Tracking model can't be destroyed because handle is NULL");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (!tracking_model) {
+ LOGE("Tracking model can't be destroyed because handle is NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- LOGD("Destroying media vision tracking model [%p]", tracking_model);
- delete static_cast<FaceTrackingModel*>(tracking_model);
- LOGD("Media vision tracking model has been destroyed");
+ LOGD("Destroying media vision tracking model [%p]", tracking_model);
+ delete static_cast<FaceTrackingModel*>(tracking_model);
+ LOGD("Media vision tracking model has been destroyed");
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_face_tracking_model_prepare_open(
- mv_face_tracking_model_h tracking_model,
- mv_engine_config_h /*engine_cfg*/,
- mv_source_h source,
- mv_quadrangle_s *location)
+ mv_face_tracking_model_h tracking_model,
+ mv_engine_config_h /*engine_cfg*/,
+ mv_source_h source,
+ mv_quadrangle_s *location)
{
- if (!tracking_model)
- {
- LOGE("Can't prepare tracking model. Handle has to be not NULL");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- if (!source)
- {
- LOGE("Can't prepare tracking model. "
- "Media Vision source handle has to be not NULL");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- FaceTrackingModel *pTrackModel =
- static_cast<FaceTrackingModel*>(tracking_model);
-
- if (!pTrackModel)
- {
- LOGE("Preparation of the face tracking model failed. "
- "Incorrect Media Vision Face tracking model handle is used");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- cv::Mat image;
- int ret = convertSourceMV2GrayCV(source, image);
- if (MEDIA_VISION_ERROR_NONE != ret)
- {
- LOGE("Convertion mv_source_h to gray failed");
- return ret;
- }
-
- cv::Rect_<double> roi;
- if (!location)
- {
- ret = pTrackModel->prepare(image);
- }
- else
- {
- int minX = image.cols;
- int minY = image.rows;
- int maxX = 0.0;
- int maxY = 0.0;
- for (unsigned i = 0; i < 4; ++i)
- {
- minX = minX > location->points[i].x ? location->points[i].x : minX;
- minY = minY > location->points[i].y ? location->points[i].y : minY;
- maxX = maxX < location->points[i].x ? location->points[i].x : maxX;
- maxY = maxY < location->points[i].y ? location->points[i].y : maxY;
- }
-
- roi.x = minX;
- roi.y = minY;
- roi.width = maxX - minX;
- roi.height = maxY - minY;
- ret = pTrackModel->prepare(image, roi);
- }
-
- if (MEDIA_VISION_ERROR_NONE != ret)
- {
- LOGE("Error occurred when prepare face tracking model");
- return ret;
- }
-
- LOGD("Face tracking model has been prepared");
-
- return ret;
+ if (!tracking_model) {
+ LOGE("Can't prepare tracking model. Handle has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!source) {
+ LOGE("Can't prepare tracking model. "
+ "Media Vision source handle has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ FaceTrackingModel *pTrackModel =
+ static_cast<FaceTrackingModel*>(tracking_model);
+
+ if (!pTrackModel) {
+ LOGE("Preparation of the face tracking model failed. "
+ "Incorrect Media Vision Face tracking model handle is used");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ cv::Mat image;
+ int ret = convertSourceMV2GrayCV(source, image);
+ if (MEDIA_VISION_ERROR_NONE != ret) {
+ LOGE("Convertion mv_source_h to gray failed");
+ return ret;
+ }
+
+ cv::Rect_<double> roi;
+ if (!location) {
+ ret = pTrackModel->prepare(image);
+ } else {
+ int minX = image.cols;
+ int minY = image.rows;
+ int maxX = 0.0;
+ int maxY = 0.0;
+ for (unsigned i = 0; i < 4; ++i) {
+ minX = minX > location->points[i].x ? location->points[i].x : minX;
+ minY = minY > location->points[i].y ? location->points[i].y : minY;
+ maxX = maxX < location->points[i].x ? location->points[i].x : maxX;
+ maxY = maxY < location->points[i].y ? location->points[i].y : maxY;
+ }
+
+ roi.x = minX;
+ roi.y = minY;
+ roi.width = maxX - minX;
+ roi.height = maxY - minY;
+ ret = pTrackModel->prepare(image, roi);
+ }
+
+ if (MEDIA_VISION_ERROR_NONE != ret) {
+ LOGE("Error occurred when prepare face tracking model");
+ return ret;
+ }
+
+ LOGD("Face tracking model has been prepared");
+
+ return ret;
}
int mv_face_tracking_model_clone_open(
- mv_face_tracking_model_h src,
- mv_face_tracking_model_h *dst)
+ mv_face_tracking_model_h src,
+ mv_face_tracking_model_h *dst)
{
- if (!src || !dst)
- {
- LOGE("Can't clone tracking model. Both source and destination"
- "tracking model handles has to be not NULL");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (!src || !dst) {
+ LOGE("Can't clone tracking model. Both source and destination"
+ "tracking model handles has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- (*dst) = static_cast<mv_face_tracking_model_h>(new (std::nothrow)FaceTrackingModel());
+ (*dst) = static_cast<mv_face_tracking_model_h>(new (std::nothrow)FaceTrackingModel());
- if (*dst == NULL)
- {
- LOGE("Failed to create media vision tracking model");
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
+ if (*dst == NULL) {
+ LOGE("Failed to create media vision tracking model");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
- LOGD("Tracking model [%p] has been created", *dst);
+ LOGD("Tracking model [%p] has been created", *dst);
- const FaceTrackingModel *pSrcModel = static_cast<FaceTrackingModel*>(src);
- FaceTrackingModel *pDstModel = static_cast<FaceTrackingModel*>(*dst);
+ const FaceTrackingModel *pSrcModel = static_cast<FaceTrackingModel*>(src);
+ FaceTrackingModel *pDstModel = static_cast<FaceTrackingModel*>(*dst);
- *pDstModel = *pSrcModel;
+ *pDstModel = *pSrcModel;
- LOGD("Media vision tracking model has been cloned");
+ LOGD("Media vision tracking model has been cloned");
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_face_tracking_model_save_open(
- const char *file_name,
- mv_face_tracking_model_h tracking_model)
+ const char *file_name,
+ mv_face_tracking_model_h tracking_model)
{
- if (!tracking_model)
- {
- LOGE("Can't save tracking model to the file. "
- "Handle has to be not NULL");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- if (NULL == file_name)
- {
- LOGE("Can't save tracking model to the file. "
- "File name has to be specified");
- return MEDIA_VISION_ERROR_INVALID_PATH;
- }
-
- FaceTrackingModel *pTrackModel = static_cast<FaceTrackingModel*>(tracking_model);
-
- if (!pTrackModel)
- {
- LOGE("Saving of the face tracking model to file failed. "
- "Incorrect Media Vision Face tracking model handle is used");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- const int ret = pTrackModel->save(std::string(file_name));
-
- if (MEDIA_VISION_ERROR_NONE != ret)
- {
- LOGE("Error occurred when save tracking model to the file");
- return ret;
- }
-
- LOGD("Media vision tracking model has been saved to the file [%s]", file_name);
-
- return ret;
+ if (!tracking_model) {
+ LOGE("Can't save tracking model to the file. "
+ "Handle has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (NULL == file_name) {
+ LOGE("Can't save tracking model to the file. "
+ "File name has to be specified");
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
+
+ FaceTrackingModel *pTrackModel = static_cast<FaceTrackingModel*>(tracking_model);
+
+ if (!pTrackModel) {
+ LOGE("Saving of the face tracking model to file failed. "
+ "Incorrect Media Vision Face tracking model handle is used");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ const int ret = pTrackModel->save(std::string(file_name));
+
+ if (MEDIA_VISION_ERROR_NONE != ret) {
+ LOGE("Error occurred when save tracking model to the file");
+ return ret;
+ }
+
+ LOGD("Media vision tracking model has been saved to the file [%s]", file_name);
+
+ return ret;
}
int mv_face_tracking_model_load_open(
- const char *file_name,
- mv_face_tracking_model_h *tracking_model)
+ const char *file_name,
+ mv_face_tracking_model_h *tracking_model)
{
- if (!tracking_model)
- {
- LOGE("Can't load tracking model from the file. "
- "Handle has to be not NULL");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- if (NULL == file_name)
- {
- LOGE("Can't load tracking model from the file. "
- "File name has to be specified");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- (*tracking_model) =
- static_cast<mv_face_tracking_model_h>(new (std::nothrow)FaceTrackingModel());
-
- if (*tracking_model == NULL)
- {
- LOGE("Failed to create media vision tracking model");
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
-
- FaceTrackingModel *pTrackModel =
- static_cast<FaceTrackingModel*>(*tracking_model);
-
- if (!pTrackModel)
- {
- LOGE("Loading of the face tracking model from file failed. "
- "Incorrect Media Vision Face tracking model handle is used");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- const int ret = pTrackModel->load(std::string(file_name));
-
- if (MEDIA_VISION_ERROR_NONE != ret)
- {
- LOGE("Error occurred when save recognition model to the file");
- return ret;
- }
-
- LOGD("Media vision recognition model has been loaded from the file [%s]", file_name);
-
- return ret;
+ if (!tracking_model) {
+ LOGE("Can't load tracking model from the file. "
+ "Handle has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (NULL == file_name) {
+ LOGE("Can't load tracking model from the file. "
+ "File name has to be specified");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ (*tracking_model) =
+ static_cast<mv_face_tracking_model_h>(new (std::nothrow)FaceTrackingModel());
+
+ if (*tracking_model == NULL) {
+ LOGE("Failed to create media vision tracking model");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+
+ FaceTrackingModel *pTrackModel =
+ static_cast<FaceTrackingModel*>(*tracking_model);
+
+ if (!pTrackModel) {
+ LOGE("Loading of the face tracking model from file failed. "
+ "Incorrect Media Vision Face tracking model handle is used");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ const int ret = pTrackModel->load(std::string(file_name));
+
+ if (MEDIA_VISION_ERROR_NONE != ret) {
+ LOGE("Error occurred when save recognition model to the file");
+ return ret;
+ }
+
+ LOGD("Media vision recognition model has been loaded from the file [%s]", file_name);
+
+ return ret;
}
diff --git a/mv_face/face_lic/include/mv_face_lic.h b/mv_face/face_lic/include/mv_face_lic.h
index dec74b48..42a39af0 100644
--- a/mv_face/face_lic/include/mv_face_lic.h
+++ b/mv_face/face_lic/include/mv_face_lic.h
@@ -62,10 +62,10 @@ extern "C" {
* @see mv_face_detected_cb
*/
int mv_face_detect_lic(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- mv_face_detected_cb detected_cb,
- void *user_data);
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_face_detected_cb detected_cb,
+ void *user_data);
/********************/
@@ -120,12 +120,12 @@ int mv_face_detect_lic(
* @see mv_face_recognized_cb
*/
int mv_face_recognize_lic(
- mv_source_h source,
- mv_face_recognition_model_h recognition_model,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s *face_location,
- mv_face_recognized_cb recognized_cb,
- void *user_data);
+ mv_source_h source,
+ mv_face_recognition_model_h recognition_model,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s *face_location,
+ mv_face_recognized_cb recognized_cb,
+ void *user_data);
/*****************/
@@ -181,12 +181,12 @@ int mv_face_recognize_lic(
* @see mv_face_tracked_cb
*/
int mv_face_track_lic(
- mv_source_h source,
- mv_face_tracking_model_h tracking_model,
- mv_engine_config_h engine_cfg,
- mv_face_tracked_cb tracked_cb,
- bool do_learn,
- void *user_data);
+ mv_source_h source,
+ mv_face_tracking_model_h tracking_model,
+ mv_engine_config_h engine_cfg,
+ mv_face_tracked_cb tracked_cb,
+ bool do_learn,
+ void *user_data);
/********************************/
@@ -222,11 +222,11 @@ int mv_face_track_lic(
* @see mv_face_eye_condition_recognized_cb
*/
int mv_face_eye_condition_recognize_lic(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s face_location,
- mv_face_eye_condition_recognized_cb eye_condition_recognized_cb,
- void *user_data);
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s face_location,
+ mv_face_eye_condition_recognized_cb eye_condition_recognized_cb,
+ void *user_data);
/************************************/
@@ -261,11 +261,11 @@ int mv_face_eye_condition_recognize_lic(
* @see mv_face_facial_expression_recognized_cb
*/
int mv_face_facial_expression_recognize_lic(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s face_location,
- mv_face_facial_expression_recognized_cb expression_recognized_cb,
- void *user_data);
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s face_location,
+ mv_face_facial_expression_recognized_cb expression_recognized_cb,
+ void *user_data);
/*******************************/
/* Recognition model behavior */
@@ -307,7 +307,7 @@ int mv_face_facial_expression_recognize_lic(
* @see mv_face_recognition_model_destroy_lic()
*/
int mv_face_recognition_model_create_lic(
- mv_face_recognition_model_h *recognition_model);
+ mv_face_recognition_model_h *recognition_model);
/**
* @brief Destroys the face recognition model handle and releases all its
@@ -326,7 +326,7 @@ int mv_face_recognition_model_create_lic(
* @see mv_face_recognition_model_create_lic()
*/
int mv_face_recognition_model_destroy_lic(
- mv_face_recognition_model_h recognition_model);
+ mv_face_recognition_model_h recognition_model);
/**
* @brief Creates a copy of existed recognition model handle and clones all its
@@ -350,8 +350,8 @@ int mv_face_recognition_model_destroy_lic(
* @see mv_face_recognition_model_create_lic()
*/
int mv_face_recognition_model_clone_lic(
- mv_face_recognition_model_h src,
- mv_face_recognition_model_h *dst);
+ mv_face_recognition_model_h src,
+ mv_face_recognition_model_h *dst);
/**
* @brief Saves recognition model to the file.
@@ -381,8 +381,8 @@ int mv_face_recognition_model_clone_lic(
* @see mv_face_recognition_model_create_lic()
*/
int mv_face_recognition_model_save_lic(
- const char *file_name,
- mv_face_recognition_model_h recognition_model);
+ const char *file_name,
+ mv_face_recognition_model_h recognition_model);
/**
* @brief Loads recognition model from file.
@@ -413,8 +413,8 @@ int mv_face_recognition_model_save_lic(
* @see mv_face_recognition_model_create_lic()
*/
int mv_face_recognition_model_load_lic(
- const char *file_name,
- mv_face_recognition_model_h *recognition_model);
+ const char *file_name,
+ mv_face_recognition_model_h *recognition_model);
/**
* @brief Adds face image example to be used for face recognition model learning
@@ -455,10 +455,10 @@ int mv_face_recognition_model_load_lic(
* @see mv_face_recognition_model_learn_lic()
*/
int mv_face_recognition_model_add_lic(
- const mv_source_h source,
- mv_face_recognition_model_h recognition_model,
- const mv_rectangle_s *example_location,
- int face_label);
+ const mv_source_h source,
+ mv_face_recognition_model_h recognition_model,
+ const mv_rectangle_s *example_location,
+ int face_label);
/**
* @brief Remove from @a recognition_model all collected with
@@ -493,8 +493,8 @@ int mv_face_recognition_model_add_lic(
* @see mv_face_recognition_model_learn_lic()
*/
int mv_face_recognition_model_reset_lic(
- mv_face_recognition_model_h recognition_model,
- const int *face_label);
+ mv_face_recognition_model_h recognition_model,
+ const int *face_label);
/**
* @brief Learns face recognition model.
@@ -549,8 +549,8 @@ int mv_face_recognition_model_reset_lic(
* @see mv_face_recognize_lic()
*/
int mv_face_recognition_model_learn_lic(
- mv_engine_config_h engine_cfg,
- mv_face_recognition_model_h recognition_model);
+ mv_engine_config_h engine_cfg,
+ mv_face_recognition_model_h recognition_model);
/**
* @brief Queries labels list and number of labels had been learned by the model.
@@ -580,9 +580,9 @@ int mv_face_recognition_model_learn_lic(
* @see mv_face_recognition_model_learn_lic()
*/
int mv_face_recognition_model_query_labels_lic(
- mv_face_recognition_model_h recognition_model,
- int **labels,
- unsigned int *number_of_labels);
+ mv_face_recognition_model_h recognition_model,
+ int **labels,
+ unsigned int *number_of_labels);
/***************************/
/* Tracking model behavior */
@@ -622,7 +622,7 @@ int mv_face_recognition_model_query_labels_lic(
* @see mv_face_tracking_model_load_lic()
*/
int mv_face_tracking_model_create_lic(
- mv_face_tracking_model_h *tracking_model);
+ mv_face_tracking_model_h *tracking_model);
/**
* @brief Calls this function to destroy the face tracking model handle and
@@ -641,7 +641,7 @@ int mv_face_tracking_model_create_lic(
* @see mv_face_tracking_model_create_lic()
*/
int mv_face_tracking_model_destroy_lic(
- mv_face_tracking_model_h tracking_model);
+ mv_face_tracking_model_h tracking_model);
/**
* @brief Calls this function to initialize tracking model by the location of the
@@ -686,10 +686,10 @@ int mv_face_tracking_model_destroy_lic(
* @see mv_face_track_lic()
*/
int mv_face_tracking_model_prepare_lic(
- mv_face_tracking_model_h tracking_model,
- mv_engine_config_h engine_cfg,
- mv_source_h source,
- mv_quadrangle_s *location);
+ mv_face_tracking_model_h tracking_model,
+ mv_engine_config_h engine_cfg,
+ mv_source_h source,
+ mv_quadrangle_s *location);
/**
* @brief Calls this function to make a copy of existed tracking model handle and
@@ -712,8 +712,8 @@ int mv_face_tracking_model_prepare_lic(
* @see mv_face_tracking_model_create_lic()
*/
int mv_face_tracking_model_clone_lic(
- mv_face_tracking_model_h src,
- mv_face_tracking_model_h *dst);
+ mv_face_tracking_model_h src,
+ mv_face_tracking_model_h *dst);
/**
* @brief Calls this method to save tracking model to the file.
@@ -739,8 +739,8 @@ int mv_face_tracking_model_clone_lic(
* @see mv_face_tracking_model_create_lic()
*/
int mv_face_tracking_model_save_lic(
- const char *file_name,
- mv_face_tracking_model_h tracking_model);
+ const char *file_name,
+ mv_face_tracking_model_h tracking_model);
/**
* @brief Calls this method to load a tracking model from file.
@@ -768,8 +768,8 @@ int mv_face_tracking_model_save_lic(
* @see mv_face_tracking_model_create_lic()
*/
int mv_face_tracking_model_load_lic(
- const char *file_name,
- mv_face_tracking_model_h *tracking_model);
+ const char *file_name,
+ mv_face_tracking_model_h *tracking_model);
#ifdef __cplusplus
}
diff --git a/mv_face/face_lic/src/mv_face_lic.c b/mv_face/face_lic/src/mv_face_lic.c
index 00a516eb..e37f367f 100644
--- a/mv_face/face_lic/src/mv_face_lic.c
+++ b/mv_face/face_lic/src/mv_face_lic.c
@@ -21,12 +21,12 @@
/******************/
int mv_face_detect_lic(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- mv_face_detected_cb detected_cb,
- void *user_data)
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_face_detected_cb detected_cb,
+ void *user_data)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
@@ -35,14 +35,14 @@ int mv_face_detect_lic(
/********************/
int mv_face_recognize_lic(
- mv_source_h source,
- mv_face_recognition_model_h recognition_model,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s *face_location,
- mv_face_recognized_cb recognized_cb,
- void *user_data)
+ mv_source_h source,
+ mv_face_recognition_model_h recognition_model,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s *face_location,
+ mv_face_recognized_cb recognized_cb,
+ void *user_data)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
@@ -51,14 +51,14 @@ int mv_face_recognize_lic(
/*****************/
int mv_face_track_lic(
- mv_source_h source,
- mv_face_tracking_model_h tracking_model,
- mv_engine_config_h engine_cfg,
- mv_face_tracked_cb tracked_cb,
- bool do_learn,
- void *user_data)
+ mv_source_h source,
+ mv_face_tracking_model_h tracking_model,
+ mv_engine_config_h engine_cfg,
+ mv_face_tracked_cb tracked_cb,
+ bool do_learn,
+ void *user_data)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
@@ -67,13 +67,13 @@ int mv_face_track_lic(
/********************************/
int mv_face_eye_condition_recognize_lic(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s face_location,
- mv_face_eye_condition_recognized_cb eye_condition_recognized_cb,
- void *user_data)
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s face_location,
+ mv_face_eye_condition_recognized_cb eye_condition_recognized_cb,
+ void *user_data)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
@@ -82,13 +82,13 @@ int mv_face_eye_condition_recognize_lic(
/************************************/
int mv_face_facial_expression_recognize_lic(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s face_location,
- mv_face_facial_expression_recognized_cb expression_recognized_cb,
- void *user_data)
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s face_location,
+ mv_face_facial_expression_recognized_cb expression_recognized_cb,
+ void *user_data)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
@@ -97,67 +97,67 @@ int mv_face_facial_expression_recognize_lic(
/******************************/
int mv_face_recognition_model_create_lic(
- mv_face_recognition_model_h *recognition_model)
+ mv_face_recognition_model_h *recognition_model)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
int mv_face_recognition_model_destroy_lic(
- mv_face_recognition_model_h recognition_model)
+ mv_face_recognition_model_h recognition_model)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
int mv_face_recognition_model_clone_lic(
- mv_face_recognition_model_h src,
- mv_face_recognition_model_h *dst)
+ mv_face_recognition_model_h src,
+ mv_face_recognition_model_h *dst)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
int mv_face_recognition_model_save_lic(
- const char *file_name,
- mv_face_recognition_model_h recognition_model)
+ const char *file_name,
+ mv_face_recognition_model_h recognition_model)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
int mv_face_recognition_model_load_lic(
- const char *file_name,
- mv_face_recognition_model_h recognition_model_h)
+ const char *file_name,
+ mv_face_recognition_model_h recognition_model_h)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
int mv_face_recognition_model_add_lic(
- const mv_source_h source,
- mv_face_recognition_model_h recognition_model,
- const mv_rectangle_s *example_location,
- int face_label)
+ const mv_source_h source,
+ mv_face_recognition_model_h recognition_model,
+ const mv_rectangle_s *example_location,
+ int face_label)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
int mv_face_recognition_model_reset_lic(
- mv_face_recognition_model_h recognition_model,
- const int *face_label)
+ mv_face_recognition_model_h recognition_model,
+ const int *face_label)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
int mv_face_recognition_model_learn_lic(
- mv_engine_config_h engine_cfg,
- mv_face_recognition_model_h recognition_model)
+ mv_engine_config_h engine_cfg,
+ mv_face_recognition_model_h recognition_model)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
int mv_face_recognition_model_query_labels_lic(
- mv_face_recognition_model_h recognition_model,
- int **labels,
- unsigned int *number_of_labels)
+ mv_face_recognition_model_h recognition_model,
+ int **labels,
+ unsigned int *number_of_labels)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
@@ -166,43 +166,43 @@ int mv_face_recognition_model_query_labels_lic(
/***************************/
int mv_face_tracking_model_create_lic(
- mv_face_tracking_model_h *tracking_model)
+ mv_face_tracking_model_h *tracking_model)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
int mv_face_tracking_model_destroy_lic(
- mv_face_tracking_model_h tracking_model)
+ mv_face_tracking_model_h tracking_model)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
int mv_face_tracking_model_prepare_lic(
- mv_face_tracking_model_h tracking_model,
- mv_engine_config_h engine_cfg,
- mv_source_h source,
- mv_quadrangle_s *location)
+ mv_face_tracking_model_h tracking_model,
+ mv_engine_config_h engine_cfg,
+ mv_source_h source,
+ mv_quadrangle_s *location)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
int mv_face_tracking_model_clone_lic(
- mv_face_tracking_model_h src,
- mv_face_tracking_model_h *dst)
+ mv_face_tracking_model_h src,
+ mv_face_tracking_model_h *dst)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
int mv_face_tracking_model_save_lic(
- const char *file_name,
- mv_face_tracking_model_h tracking_model)
+ const char *file_name,
+ mv_face_tracking_model_h tracking_model)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
int mv_face_tracking_model_load_lic(
- const char *file_name,
- mv_face_tracking_model_h tracking_model)
+ const char *file_name,
+ mv_face_tracking_model_h tracking_model)
{
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
}
diff --git a/mv_image/image/include/ImageConfig.h b/mv_image/image/include/ImageConfig.h
index 2d43430f..8f1b3480 100644
--- a/mv_image/image/include/ImageConfig.h
+++ b/mv_image/image/include/ImageConfig.h
@@ -24,28 +24,24 @@
* @brief This file contains Image Module utility.
*/
-namespace MediaVision
-{
-namespace Image
-{
-
+namespace MediaVision {
+namespace Image {
/**
* @brief Contains parameters for features extracting from image objects.
*
* @since_tizen 3.0
*/
-struct FeaturesExtractingParams
-{
- FeaturesExtractingParams(
- double scaleFactor,
- int maximumFeaturesNumber);
+struct FeaturesExtractingParams {
+ FeaturesExtractingParams(
+ double scaleFactor,
+ int maximumFeaturesNumber);
- FeaturesExtractingParams();
+ FeaturesExtractingParams();
- double mScaleFactor; /**< Recognition scale factor for the ORB detector. */
+ double mScaleFactor; /**< Recognition scale factor for the ORB detector. */
- int mMaximumFeaturesNumber; /**< Maximum number of features, which will be
- extracted from object image. */
+ int mMaximumFeaturesNumber; /**< Maximum number of features, which will be
+ extracted from object image. */
};
/**
@@ -53,24 +49,23 @@ struct FeaturesExtractingParams
*
* @since_tizen 3.0
*/
-struct RecognitionParams
-{
- RecognitionParams(
- int minMatchesNumber,
- double requiredMatchesPart,
- double allowableMatchesPartError);
+struct RecognitionParams {
+ RecognitionParams(
+ int minMatchesNumber,
+ double requiredMatchesPart,
+ double allowableMatchesPartError);
- RecognitionParams();
+ RecognitionParams();
- int mMinMatchesNumber; /**< The minimum matches number, which
- will be taken into account for image objects recognition. */
+ int mMinMatchesNumber; /**< The minimum matches number, which
+ will be taken into account for image objects recognition. */
- double mRequiredMatchesPart; /**< The part of matches, which will be taken
- into account for image objects recognition. Too low value will
- result in unsustainable behavior, but effect of object overlapping
- will be reduced. Value can be from 0 to 1.*/
+ double mRequiredMatchesPart; /**< The part of matches, which will be taken
+ into account for image objects recognition. Too low value will
+ result in unsustainable behavior, but effect of object overlapping
+ will be reduced. Value can be from 0 to 1.*/
- double mAllowableMatchesPartError; /**< Allowable error of matches number. */
+ double mAllowableMatchesPartError; /**< Allowable error of matches number. */
};
/**
@@ -79,29 +74,31 @@ struct RecognitionParams
*
* @since_tizen 3.0
*/
-struct StabilizationParams
-{
- StabilizationParams(
- int historyAmount,
- double allowableShift,
- double stabilizationSpeed,
- double stabilizationAcceleration);
-
- StabilizationParams();
-
- int mHistoryAmount; /**< Number of previous recognition results, which
- will influence the stabilization. */
-
- double mAllowableShift; /**< Relative value of maximum shift per one frame,
- which will be ignored by stabilization (relative to the object size
- in the current frame). */
-
- double mStabilizationSpeed; /**< Start speed with which the object will be
- stabilized. */
-
- double mStabilizationAcceleration; /**< Acceleration with which the object
- will be stabilized. (relative to the distance from current location
- to stabilized location). Value can be from 0 to 1.*/
+struct StabilizationParams {
+ StabilizationParams(
+ int historyAmount,
+ double allowableShift,
+ double stabilizationSpeed,
+ double stabilizationAcceleration);
+
+ StabilizationParams();
+
+ int mHistoryAmount; /**< Number of previous recognition results, which
+ will influence the stabilization. */
+
+ double mAllowableShift; /**< Relative value of maximum shift per one frame,
+ which will be ignored by stabilization.
+ It is relative to the object size
+ in the current frame. */
+
+ double mStabilizationSpeed; /**< Start speed with which the object will be
+ stabilized. */
+
+ double mStabilizationAcceleration; /**< Acceleration with which the object
+ will be stabilized.
+ It is relative to the distance from
+ current location to stabilized location.
+ Value can be from 0 to 1.*/
};
/**
@@ -109,28 +106,28 @@ struct StabilizationParams
*
* @since_tizen 3.0
*/
-struct TrackingParams
-{
- TrackingParams(
- FeaturesExtractingParams framesFeaturesExtractingParams,
- RecognitionParams recognitionParams,
- StabilizationParams stabilizationParams,
- double expectedOffset);
+struct TrackingParams {
+ TrackingParams(
+ FeaturesExtractingParams framesFeaturesExtractingParams,
+ RecognitionParams recognitionParams,
+ StabilizationParams stabilizationParams,
+ double expectedOffset);
- TrackingParams();
+ TrackingParams();
- FeaturesExtractingParams mFramesFeaturesExtractingParams; /**< Parameters
- for extracting features from frames. */
+ FeaturesExtractingParams mFramesFeaturesExtractingParams; /**< Parameters
+ for extracting features
+ from frames. */
- RecognitionParams mRecognitionParams; /**< Parameters for intermediate
- recognition. */
+ RecognitionParams mRecognitionParams; /**< Parameters for intermediate
+ recognition. */
- StabilizationParams mStabilizationParams; /**< Parameters for contour
- stabilization during tracking. */
+ StabilizationParams mStabilizationParams; /**< Parameters for contour
+ stabilization during tracking. */
- double mExpectedOffset; /**< Relative offset value, for which expected the
- object offset. (relative to the object size in the current
- frame). */
+ double mExpectedOffset; /**< Relative offset value, for which expected the
+ object offset. The value is relative to
+ the object size in the current frame. */
};
} /* Image */
diff --git a/mv_image/image/include/ImageContourStabilizator.h b/mv_image/image/include/ImageContourStabilizator.h
index 1fae7979..4d159cd3 100644
--- a/mv_image/image/include/ImageContourStabilizator.h
+++ b/mv_image/image/include/ImageContourStabilizator.h
@@ -27,11 +27,8 @@
* during tracking.
*/
-namespace MediaVision
-{
-namespace Image
-{
-
+namespace MediaVision {
+namespace Image {
/**
* @class ImageContourStabilizator
* @brief This class contains functionality for image contour stabilization
@@ -39,61 +36,57 @@ namespace Image
*
* @since_tizen 3.0
*/
-class ImageContourStabilizator
-{
+class ImageContourStabilizator {
public:
-
- /**
- * @brief @ref ImageContourStabilizator default constructor.
- *
- * @since_tizen 3.0
- */
- ImageContourStabilizator();
-
- /**
- * @brief Stabilizes @a contour.
- *
- * @since_tizen 3.0
- * @remarks Call this function alternately for each contour from sequence
- * @param [in,out] contour @ref contour, which will be stabilized
- * @param [in] params configuration parameters
- * @return true if contour is stabilized, otherwise return false
- */
- bool stabilize(
- std::vector<cv::Point2f>& contour,
- const StabilizationParams& params);
-
- /**
- * @brief Resets stabilization process.
- *
- * @since_tizen 3.0
- * @remarks Call it before starting track on the new sequence of contours.
- */
- void reset(void);
+ /**
+ * @brief @ref ImageContourStabilizator default constructor.
+ *
+ * @since_tizen 3.0
+ */
+ ImageContourStabilizator();
+
+ /**
+ * @brief Stabilizes @a contour.
+ *
+ * @since_tizen 3.0
+ * @remarks Call this function alternately for each contour from sequence
+ * @param [in,out] contour @ref contour, which will be stabilized
+ * @param [in] params configuration parameters
+ * @return true if contour is stabilized, otherwise return false
+ */
+ bool stabilize(
+ std::vector<cv::Point2f>& contour,
+ const StabilizationParams& params);
+
+ /**
+ * @brief Resets stabilization process.
+ *
+ * @since_tizen 3.0
+ * @remarks Call it before starting track on the new sequence of contours.
+ */
+ void reset(void);
private:
-
- std::vector<cv::Point2f> computeStabilizedQuadrangleContour(void);
+ std::vector<cv::Point2f> computeStabilizedQuadrangleContour(void);
private:
+ static const size_t MovingHistoryAmount = 3u;
- static const size_t MovingHistoryAmount = 3u;
-
- std::vector<float> m_speeds;
+ std::vector<float> m_speeds;
- std::vector<size_t> m_currentCornersSpeed;
+ std::vector<size_t> m_currentCornersSpeed;
- std::deque<std::vector<cv::Point2f> > m_movingHistory;
+ std::deque<std::vector<cv::Point2f> > m_movingHistory;
- std::vector<cv::Point2f> m_lastStabilizedContour;
+ std::vector<cv::Point2f> m_lastStabilizedContour;
- size_t m_currentHistoryAmount;
+ size_t m_currentHistoryAmount;
- int m_tempContourIndex;
+ int m_tempContourIndex;
- std::vector<float> m_priorities;
+ std::vector<float> m_priorities;
- bool m_isPrepared;
+ bool m_isPrepared;
};
} /* Image */
diff --git a/mv_image/image/include/ImageMathUtil.h b/mv_image/image/include/ImageMathUtil.h
index ebc95d70..f839ac97 100644
--- a/mv_image/image/include/ImageMathUtil.h
+++ b/mv_image/image/include/ImageMathUtil.h
@@ -24,15 +24,12 @@
* @brief This file contains math utility for Image Module.
*/
-namespace MediaVision
-{
-namespace Image
-{
-
+namespace MediaVision {
+namespace Image {
const size_t MinimumNumberOfFeatures = 4u; /* Minimum number of features
- when perspective transform
- parameters calculation
- have sense */
+ when perspective transform
+ parameters calculation
+ have sense */
const size_t NumberOfQuadrangleCorners = 4u; /* Number of quadrangle corneres */
@@ -45,8 +42,8 @@ const size_t NumberOfQuadrangleCorners = 4u; /* Number of quadrangle corneres */
* @return distance between two points
*/
float getDistance(
- const cv::Point2f& point1,
- const cv::Point2f& point2);
+ const cv::Point2f& point1,
+ const cv::Point2f& point2);
/**
* @brief Calculates area of triangle.
@@ -58,9 +55,9 @@ float getDistance(
* @return area of triangle
*/
float getTriangleArea(
- const cv::Point2f& point1,
- const cv::Point2f& point2,
- const cv::Point2f& point3);
+ const cv::Point2f& point1,
+ const cv::Point2f& point2,
+ const cv::Point2f& point3);
/**
* @brief Calculates area of quadrangle.
@@ -70,7 +67,7 @@ float getTriangleArea(
* @return area of quadrangle
*/
float getQuadrangleArea(
- const cv::Point2f points[NumberOfQuadrangleCorners]);
+ const cv::Point2f points[NumberOfQuadrangleCorners]);
} /* Image */
} /* MediaVision */
diff --git a/mv_image/image/include/ImageObject.h b/mv_image/image/include/ImageObject.h
index a4945540..4e33e558 100644
--- a/mv_image/image/include/ImageObject.h
+++ b/mv_image/image/include/ImageObject.h
@@ -26,11 +26,8 @@
* @brief This file contains the @ref ImageObject class.
*/
-namespace MediaVision
-{
-namespace Image
-{
-
+namespace MediaVision {
+namespace Image {
/**
* @class ImageObject
* @brief This class contains the image information, which will
@@ -38,180 +35,174 @@ namespace Image
*
* @since_tizen 3.0
*/
-class ImageObject
-{
-
+class ImageObject {
public:
-
- /**
- * @brief @ref ImageObject default constructor.
- *
- * @since_tizen 3.0
- */
- ImageObject();
-
- /**
- * @brief @ref ImageObject constructor based on image.
- *
- * @since_tizen 3.0
- * @remarks Detects keypoints and extracts features from image and creates
- * new @ref ImageObject
- * @param [in] image The image for which instance of @ref ImageObject
- * will be created
- * @param [in] params Features extracting parameters
- */
- ImageObject(const cv::Mat& image, const FeaturesExtractingParams& params);
-
- /**
- * @brief @ref ImageObject copy constructor.
- * @details Creates copy of @ref ImageObject
- *
- * @since_tizen 3.0
- * @param [in] copy @ref ImageObject which will be copied
- */
- ImageObject(const ImageObject& copy);
-
- /**
- * @brief @ref ImageObject copy assignment operator.
- * @details Fills the information based on the @a copy
- *
- * @since_tizen 3.0
- * @param [in] copy @ref ImageObject which will be copied
- *
- */
- ImageObject& operator=(const ImageObject& copy);
-
- /**
- * @brief @ref ImageObject destructor.
- *
- * @since_tizen 3.0
- */
- virtual ~ImageObject();
-
- /**
- * @brief Fills @ref ImageObject class based on image.
- * @details Detects keypoints and extracts features from image and creates
- * new @ref ImageObject
- *
- * @since_tizen 3.0
- * @param [in] image The image for which instance of @ref ImageObject
- * will be created
- * @param [in] params Features extracting parameters
- */
- void fill(const cv::Mat& image, const FeaturesExtractingParams& params);
-
- /**
- * @brief Fills @ref ImageObject class based on image.
- * @details Detects keypoints and extracts features from image and creates
- * new @ref ImageObject
- *
- * @since_tizen 3.0
- * @param [in] image The image for which instance of @ref
- * ImageObject will be created
- * @param [in] boundingBox Bounding box of the object being analyzed in
- * the @a image
- * @param [in] params Features extracting parameters
- * @return @a true on success, otherwise a @a false value
- * @retval true Successful
- * @retval false Invalid ROI (bounding box)
- */
- bool fill(
- const cv::Mat& image,
- const cv::Rect& boundingBox,
- const FeaturesExtractingParams& params);
-
- /**
- * @brief Gets a value that determines how well an @ref ImageObject can be recognized.
- * @details Confidence can be from 0 to 1. If the recognition rate is 0 object can
- * not be recognized
- *
- * @since_tizen 3.0
- * @return A value that determines how well an @ref ImageObject can be recognized.
- */
- float getRecognitionRate(void) const;
-
- /**
- * @brief Check whether the object is filled.
- * @details Image object is empty if it wasn't filled.
- *
- * @since_tizen 3.0
- * @remarks Empty object can not be recognized or tracked. Fill the object
- * by using corresponding constructor or function @ref fill() to
- * make image object valid. Also you can load image object which is
- * not empty by using @ref load().
- * @return @c false if object is filled, otherwise return @c true
- */
- bool isEmpty() const;
-
- /**
- * @brief Sets a label for the image object.
- *
- * @since_tizen 3.0
- * @param [in] label The label which will be assigned to the image object
- */
- void setLabel(int label);
-
- /**
- * @brief Gets a label of object.
- *
- * @since_tizen 3.0
- * @param [out] label The label of image object
- * @return @c true if object is labeled, otherwise return @c false
- */
- bool getLabel(int& label) const;
-
- /**
- * @brief Stores the @ref ImageObject in a file.
- *
- * @since_tizen 3.0
- * @param [in] fileName File name which will be generated
- * @return @a 0 on success, otherwise a negative error value
- */
- int save(const char *fileName) const;
-
- /**
- * @brief Loads the @ref ImageObject from the file.
- *
- * @since_tizen 3.0
- * @param [in] fileName File name from which will be loaded an @ref ImageObject
- * @return @a 0 on success, otherwise a negative error value
- */
- int load(const char *fileName);
+ /**
+ * @brief @ref ImageObject default constructor.
+ *
+ * @since_tizen 3.0
+ */
+ ImageObject();
+
+ /**
+ * @brief @ref ImageObject constructor based on image.
+ *
+ * @since_tizen 3.0
+ * @remarks Detects keypoints and extracts features from image and creates
+ * new @ref ImageObject
+ * @param [in] image The image for which instance of @ref ImageObject
+ * will be created
+ * @param [in] params Features extracting parameters
+ */
+ ImageObject(const cv::Mat& image, const FeaturesExtractingParams& params);
+
+ /**
+ * @brief @ref ImageObject copy constructor.
+ * @details Creates copy of @ref ImageObject
+ *
+ * @since_tizen 3.0
+ * @param [in] copy @ref ImageObject which will be copied
+ */
+ ImageObject(const ImageObject& copy);
+
+ /**
+ * @brief @ref ImageObject copy assignment operator.
+ * @details Fills the information based on the @a copy
+ *
+ * @since_tizen 3.0
+ * @param [in] copy @ref ImageObject which will be copied
+ *
+ */
+ ImageObject& operator=(const ImageObject& copy);
+
+ /**
+ * @brief @ref ImageObject destructor.
+ *
+ * @since_tizen 3.0
+ */
+ virtual ~ImageObject();
+
+ /**
+ * @brief Fills @ref ImageObject class based on image.
+ * @details Detects keypoints and extracts features from image and creates
+ * new @ref ImageObject
+ *
+ * @since_tizen 3.0
+ * @param [in] image The image for which instance of @ref ImageObject
+ * will be created
+ * @param [in] params Features extracting parameters
+ */
+ void fill(const cv::Mat& image, const FeaturesExtractingParams& params);
+
+ /**
+ * @brief Fills @ref ImageObject class based on image.
+ * @details Detects keypoints and extracts features from image and creates
+ * new @ref ImageObject
+ *
+ * @since_tizen 3.0
+ * @param [in] image The image for which instance of @ref
+ * ImageObject will be created
+ * @param [in] boundingBox Bounding box of the object being analyzed in
+ * the @a image
+ * @param [in] params Features extracting parameters
+ * @return @a true on success, otherwise a @a false value
+ * @retval true Successful
+ * @retval false Invalid ROI (bounding box)
+ */
+ bool fill(
+ const cv::Mat& image,
+ const cv::Rect& boundingBox,
+ const FeaturesExtractingParams& params);
+
+ /**
+ * @brief Gets a value that determines how well an @ref ImageObject can be recognized.
+ * @details Confidence can be from 0 to 1. If the recognition rate is 0 object can
+ * not be recognized
+ *
+ * @since_tizen 3.0
+ * @return A value that determines how well an @ref ImageObject can be recognized.
+ */
+ float getRecognitionRate(void) const;
+
+ /**
+ * @brief Check whether the object is filled.
+ * @details Image object is empty if it wasn't filled.
+ *
+ * @since_tizen 3.0
+ * @remarks Empty object can not be recognized or tracked. Fill the object
+ * by using corresponding constructor or function @ref fill() to
+ * make image object valid. Also you can load image object which is
+ * not empty by using @ref load().
+ * @return @c false if object is filled, otherwise return @c true
+ */
+ bool isEmpty() const;
+
+ /**
+ * @brief Sets a label for the image object.
+ *
+ * @since_tizen 3.0
+ * @param [in] label The label which will be assigned to the image object
+ */
+ void setLabel(int label);
+
+ /**
+ * @brief Gets a label of object.
+ *
+ * @since_tizen 3.0
+ * @param [out] label The label of image object
+ * @return @c true if object is labeled, otherwise return @c false
+ */
+ bool getLabel(int& label) const;
+
+ /**
+ * @brief Stores the @ref ImageObject in a file.
+ *
+ * @since_tizen 3.0
+ * @param [in] fileName File name which will be generated
+ * @return @a 0 on success, otherwise a negative error value
+ */
+ int save(const char *fileName) const;
+
+ /**
+ * @brief Loads the @ref ImageObject from the file.
+ *
+ * @since_tizen 3.0
+ * @param [in] fileName File name from which will be loaded an @ref ImageObject
+ * @return @a 0 on success, otherwise a negative error value
+ */
+ int load(const char *fileName);
private:
-
- static const int MinWidth = 5;
- static const int MinHeight = 5;
+ static const int MinWidth = 5;
+ static const int MinHeight = 5;
private:
+ void extractFeatures(
+ const cv::Mat& image,
+ const FeaturesExtractingParams& params);
- void extractFeatures(
- const cv::Mat& image,
- const FeaturesExtractingParams& params);
-
- void computeRecognitionRate(const cv::Mat& image);
+ void computeRecognitionRate(const cv::Mat& image);
private:
+ bool m_isEmpty;
- bool m_isEmpty;
-
- bool m_isLabeled;
+ bool m_isLabeled;
- int m_label;
+ int m_label;
- std::vector<cv::Point2f> m_boundingContour;
+ std::vector<cv::Point2f> m_boundingContour;
- std::vector<cv::KeyPoint> m_objectKeypoints;
+ std::vector<cv::KeyPoint> m_objectKeypoints;
- cv::Mat m_objectDescriptors;
+ cv::Mat m_objectDescriptors;
- float m_recognitionRate;
+ float m_recognitionRate;
- friend class ImageRecognizer;
+ friend class ImageRecognizer;
- friend std::ostream& operator << (std::ostream& os, const ImageObject& obj);
+ friend std::ostream& operator << (std::ostream& os, const ImageObject& obj);
- friend std::istream& operator >> (std::istream& is, ImageObject& obj);
+ friend std::istream& operator >> (std::istream& is, ImageObject& obj);
};
} /* Image */
diff --git a/mv_image/image/include/ImageRecognizer.h b/mv_image/image/include/ImageRecognizer.h
index 8494e3aa..5117a279 100644
--- a/mv_image/image/include/ImageRecognizer.h
+++ b/mv_image/image/include/ImageRecognizer.h
@@ -28,86 +28,79 @@
* @brief This file contains functionality for image object recognition.
*/
-namespace MediaVision
-{
-namespace Image
-{
-
+namespace MediaVision {
+namespace Image {
/**
* @class ImageRecognizer
* @brief This class contains functionality for image object recognition.
*
* @since_tizen 3.0
*/
-class ImageRecognizer
-{
+class ImageRecognizer {
public:
-
- /**
- * @brief @ref ImageRecognizer constructor based on scene image.
- *
- * @since_tizen 3.0
- * @param [in] sceneImage The scene in which image objects will be recognized
- * @param [in] params Scene features extracting parameters
- */
- ImageRecognizer(const cv::Mat& sceneImage,
- const FeaturesExtractingParams& params);
-
- /**
- * @brief @ref ImageRecognizer constructor based on thes scene @ref ImageObject.
- *
- * @since_tizen 3.0
- * @param [in] scene The scene for which the objects will be recognized by
- * calling method recognize()
- */
- ImageRecognizer(const ImageObject& scene);
-
- /**
- * @brief @ref ImageRecognizer destructor.
- *
- * @since_tizen 3.0
- */
- virtual ~ImageRecognizer();
-
- /**
- * @brief Recognizes the @a target on the scene.
- *
- * @since_tizen 3.0
- * @param [in] target @ref ImageObject, which will be recognized
- * @param [in] params Recognition parameters
- * @param [out] contour The result contour of @a target object on the scene
- * @return true if object is found on the scene, otherwise return false
- */
- bool recognize(
- const ImageObject& target,
- const RecognitionParams& params,
- std::vector<cv::Point2f>& contour) const;
+ /**
+ * @brief @ref ImageRecognizer constructor based on scene image.
+ *
+ * @since_tizen 3.0
+ * @param [in] sceneImage The scene in which image objects will be recognized
+ * @param [in] params Scene features extracting parameters
+ */
+ ImageRecognizer(const cv::Mat& sceneImage,
+ const FeaturesExtractingParams& params);
+
+ /**
+ * @brief @ref ImageRecognizer constructor based on thes scene @ref ImageObject.
+ *
+ * @since_tizen 3.0
+ * @param [in] scene The scene for which the objects will be recognized by
+ * calling method recognize()
+ */
+ ImageRecognizer(const ImageObject& scene);
+
+ /**
+ * @brief @ref ImageRecognizer destructor.
+ *
+ * @since_tizen 3.0
+ */
+ virtual ~ImageRecognizer();
+
+ /**
+ * @brief Recognizes the @a target on the scene.
+ *
+ * @since_tizen 3.0
+ * @param [in] target @ref ImageObject, which will be recognized
+ * @param [in] params Recognition parameters
+ * @param [out] contour The result contour of @a target object on the scene
+ * @return true if object is found on the scene, otherwise return false
+ */
+ bool recognize(
+ const ImageObject& target,
+ const RecognitionParams& params,
+ std::vector<cv::Point2f>& contour) const;
private:
+ ImageRecognizer();
- ImageRecognizer();
+ bool findHomophraphyMatrix(
+ const ImageObject& target,
+ const RecognitionParams& params,
+ cv::Mat& homophraphyMatrix) const;
- bool findHomophraphyMatrix(
- const ImageObject& target,
- const RecognitionParams& params,
- cv::Mat& homophraphyMatrix) const;
+ size_t matchesSelection(
+ std::vector<cv::DMatch>& examples,
+ unsigned int filterAmount, unsigned int allowableError) const;
- size_t matchesSelection(
- std::vector<cv::DMatch>& examples,
- unsigned int filterAmount, unsigned int allowableError) const;
+ float computeLinearSupportElement(
+ const std::vector<cv::DMatch>& examples,
+ int requiredNumber, int leftLimit, int rightLimit) const;
- float computeLinearSupportElement(
- const std::vector<cv::DMatch>& examples,
- int requiredNumber, int leftLimit, int rightLimit) const;
-
- static bool isPossibleQuadrangleCorners(
- const cv::Point2f corners[NumberOfQuadrangleCorners]);
+ static bool isPossibleQuadrangleCorners(
+ const cv::Point2f corners[NumberOfQuadrangleCorners]);
private:
+ ImageObject m_scene;
- ImageObject m_scene;
-
- cv::BFMatcher m_matcher;
+ cv::BFMatcher m_matcher;
};
} /* Image */
diff --git a/mv_image/image/include/ImageTracker.h b/mv_image/image/include/ImageTracker.h
index 2bfd5b1b..ea577f48 100644
--- a/mv_image/image/include/ImageTracker.h
+++ b/mv_image/image/include/ImageTracker.h
@@ -26,75 +26,65 @@
* @brief This file contains functionality for image object tracking.
*/
-namespace MediaVision
-{
-namespace Image
-{
-
+namespace MediaVision {
+namespace Image {
class ImageRecognizer;
class ImageTrackingModel;
-
/**
* @class ImageTracker
* @brief This class contains functionality for image object tracking.
*
* @since_tizen 3.0
*/
-class ImageTracker
-{
+class ImageTracker {
private:
+ struct RecognitionInfo {
+ cv::Mat mFrame;
- struct RecognitionInfo
- {
- cv::Mat mFrame;
-
- RecognitionParams mRecognitionParams;
+ RecognitionParams mRecognitionParams;
- FeaturesExtractingParams mSceneFeaturesExtractingParams;
+ FeaturesExtractingParams mSceneFeaturesExtractingParams;
- ImageTrackingModel *mpTarget;
- };
+ ImageTrackingModel *mpTarget;
+ };
- static void *recognitionThreadFunc(void *recognitionInfo);
+ static void *recognitionThreadFunc(void *recognitionInfo);
public:
-
- /**
- * @brief @ref ImageTracker constructor based on tracking algorithm
- * parameters.
- *
- * @since_tizen 3.0
- * @param [in] trackingParams Parameters for image objects tracking
- */
- ImageTracker(const TrackingParams& trackingParams);
-
- /**
- * @brief Tracks the @a target for the video stream consisting of frames.
- *
- * @since_tizen 3.0
- * @remarks Call this function alternately for each frame
- * @param [in] frame Current frame of the video stream
- * @param [in,out] target @ref ImageTrackingModel, which will be tracked
- */
- void track(const cv::Mat& frame, ImageTrackingModel& target);
+ /**
+ * @brief @ref ImageTracker constructor based on tracking algorithm
+ * parameters.
+ *
+ * @since_tizen 3.0
+ * @param [in] trackingParams Parameters for image objects tracking
+ */
+ ImageTracker(const TrackingParams& trackingParams);
+
+ /**
+ * @brief Tracks the @a target for the video stream consisting of frames.
+ *
+ * @since_tizen 3.0
+ * @remarks Call this function alternately for each frame
+ * @param [in] frame Current frame of the video stream
+ * @param [in,out] target @ref ImageTrackingModel, which will be tracked
+ */
+ void track(const cv::Mat& frame, ImageTrackingModel& target);
private:
+ void trackDetectedObject(
+ const cv::Mat& frame,
+ ImageTrackingModel& target);
- void trackDetectedObject(
- const cv::Mat& frame,
- ImageTrackingModel& target);
-
- void trackUndetectedObject(
- const cv::Mat& frame,
- ImageTrackingModel& target);
+ void trackUndetectedObject(
+ const cv::Mat& frame,
+ ImageTrackingModel& target);
- cv::Rect computeExpectedArea(
- const ImageTrackingModel& target,
- const cv::Size& frameSize);
+ cv::Rect computeExpectedArea(
+ const ImageTrackingModel& target,
+ const cv::Size& frameSize);
private:
-
- TrackingParams m_trackingParams;
+ TrackingParams m_trackingParams;
};
} /* Image */
diff --git a/mv_image/image/include/ImageTrackingModel.h b/mv_image/image/include/ImageTrackingModel.h
index 2f55c2d1..2c07b990 100644
--- a/mv_image/image/include/ImageTrackingModel.h
+++ b/mv_image/image/include/ImageTrackingModel.h
@@ -33,184 +33,176 @@
* @brief This file contains the @ref ImageTrackingModel class.
*/
-namespace MediaVision
-{
-namespace Image
-{
-
+namespace MediaVision {
+namespace Image {
class ImageContourStabilizator;
-
/**
* @class ImageTrackingModel
* @brief This class contains the tracking functionality for image objects.
*
* @since_tizen 3.0
*/
-class ImageTrackingModel
-{
+class ImageTrackingModel {
private:
- /**
- * @brief @ref ImageTrackingModel state enumeration.
- *
- * @since_tizen 3.0
- */
- enum State
- {
- Invalid, /**< Invalid tracking model can not be tracked. Set not
- empty image object as target by using function
- @ref setTarget() to make tracking model valid, also
- you can load valid tracking model by using @ref load() */
- Undetected, /**< The object was not recognized on the last frame. Ready
- for further recognition */
- Appeared, /**< The object was recognized on one of the last frames
- after its absence */
- Tracked, /**< The object was recognized on the last frame. Its
- location can be obtained by calling method getLocation() */
- InProcess /**< The object is in the recognition process */
- };
+ /**
+ * @brief @ref ImageTrackingModel state enumeration.
+ *
+ * @since_tizen 3.0
+ */
+ enum State {
+ Invalid, /**< Invalid tracking model can not be tracked. Set not
+ empty image object as target by using function
+ @ref setTarget() to make tracking model valid, also
+ you can load valid tracking model by using @ref load() */
+ Undetected, /**< The object was not recognized on the last frame. Ready
+ for further recognition */
+ Appeared, /**< The object was recognized on one of the last frames
+ after its absence */
+ Tracked, /**< The object was recognized on the last frame. Its
+ location can be obtained by calling method getLocation() */
+ InProcess /**< The object is in the recognition process */
+ };
public:
-
- /**
- * @brief @ref ImageTrackingModel default constructor
- *
- * @since_tizen 3.0
- */
- ImageTrackingModel();
-
- /**
- * @brief @ref ImageTrackingModel constructor based on tracking algorithm
- * parameters.
- *
- * @since_tizen 3.0
- * @param[in] recognitionObject @ref ImageObject which will be tracked
- */
- ImageTrackingModel(const ImageObject& recognitionObject);
-
- /**
- * @brief @ref ImageTrackingModel copy constructor.
- * @details Creates copy of @ref ImageTrackingModel
- *
- * @since_tizen 3.0
- * @param [in] copy @ref ImageTrackingModel which will be copied
- */
- ImageTrackingModel(const ImageTrackingModel& copy);
-
- /**
- * @brief @ref ImageTrackingModel destructor.
- *
- * @since_tizen 3.0
- */
- ~ImageTrackingModel();
-
- /**
- * @brief Sets @ref ImageObject as target which will be tracked.
- *
- * @since_tizen 3.0
- * @param [in] target @ref ImageObject which will be tracked
- */
- void setTarget(const ImageObject& target);
-
- /**
- * @brief Checks whether the tracking model is valid for tracking.
- * @details Image tracking model is valid if its target is set and not empty.
- *
- * @since_tizen 3.0
- * @remarks Invalid tracking model can not be tracked. Set not empty target
- * by using corresponding constructor or function @ref setTarget()
- * to make tracking model valid. Also you can load valid tracking
- * model by using @ref load().
- * @return @c true if tracking model is valid, otherwise return @c false
- */
- bool isValid() const;
-
- /**
- * @brief Refreshes tracking model.
- *
- * @since_tizen 3.0
- * @remarks Call it before starting track on the new video stream.
- */
- void refresh(void);
-
- /**
- * @brief @ref ImageTrackingModel copy assignment operator.
- * @details Fills the information based on the @a copy
- *
- * @since_tizen 3.0
- * @param [in] copy @ref ImageTrackingModel which will be copied
- */
- ImageTrackingModel& operator=(const ImageTrackingModel& copy);
-
- /**
- * @brief Stores the @ref ImageTrackingModel in a file.
- *
- * @since_tizen 3.0
- * @param [in] filepath File name which will be generated
- * @return @a 0 on success, otherwise a negative error value
- */
- int save(const char *filepath) const;
-
- /**
- * @brief Loads the @ref ImageTrackingModel from the file.
- *
- * @since_tizen 3.0
- * @param [in] filepath File name from which will be loaded a model
- * @return @a 0 on success, otherwise a negative error value
- */
- int load(const char *filepath);
-
- /**
- * @brief Checks state of the @ref ImageTrackingModel.
- *
- * @since_tizen 3.0
- * @return @a true if object was detected on the last processed frame,
- * otherwise a @a false value
- */
- bool isDetected() const;
-
- /**
- * @brief Gets last location of the @ref ImageTrackingModel.
- *
- * @since_tizen 3.0
- * @return Last detected location
- */
- std::vector<cv::Point2f> getLastlocation() const;
+ /**
+ * @brief @ref ImageTrackingModel default constructor
+ *
+ * @since_tizen 3.0
+ */
+ ImageTrackingModel();
+
+ /**
+ * @brief @ref ImageTrackingModel constructor based on tracking algorithm
+ * parameters.
+ *
+ * @since_tizen 3.0
+ * @param[in] recognitionObject @ref ImageObject which will be tracked
+ */
+ ImageTrackingModel(const ImageObject& recognitionObject);
+
+ /**
+ * @brief @ref ImageTrackingModel copy constructor.
+ * @details Creates copy of @ref ImageTrackingModel
+ *
+ * @since_tizen 3.0
+ * @param [in] copy @ref ImageTrackingModel which will be copied
+ */
+ ImageTrackingModel(const ImageTrackingModel& copy);
+
+ /**
+ * @brief @ref ImageTrackingModel destructor.
+ *
+ * @since_tizen 3.0
+ */
+ ~ImageTrackingModel();
+
+ /**
+ * @brief Sets @ref ImageObject as target which will be tracked.
+ *
+ * @since_tizen 3.0
+ * @param [in] target @ref ImageObject which will be tracked
+ */
+ void setTarget(const ImageObject& target);
+
+ /**
+ * @brief Checks whether the tracking model is valid for tracking.
+ * @details Image tracking model is valid if its target is set and not empty.
+ *
+ * @since_tizen 3.0
+ * @remarks Invalid tracking model can not be tracked. Set not empty target
+ * by using corresponding constructor or function @ref setTarget()
+ * to make tracking model valid. Also you can load valid tracking
+ * model by using @ref load().
+ * @return @c true if tracking model is valid, otherwise return @c false
+ */
+ bool isValid() const;
+
+ /**
+ * @brief Refreshes tracking model.
+ *
+ * @since_tizen 3.0
+ * @remarks Call it before starting track on the new video stream.
+ */
+ void refresh(void);
+
+ /**
+ * @brief @ref ImageTrackingModel copy assignment operator.
+ * @details Fills the information based on the @a copy
+ *
+ * @since_tizen 3.0
+ * @param [in] copy @ref ImageTrackingModel which will be copied
+ */
+ ImageTrackingModel& operator=(const ImageTrackingModel& copy);
+
+ /**
+ * @brief Stores the @ref ImageTrackingModel in a file.
+ *
+ * @since_tizen 3.0
+ * @param [in] filepath File name which will be generated
+ * @return @a 0 on success, otherwise a negative error value
+ */
+ int save(const char *filepath) const;
+
+ /**
+ * @brief Loads the @ref ImageTrackingModel from the file.
+ *
+ * @since_tizen 3.0
+ * @param [in] filepath File name from which will be loaded a model
+ * @return @a 0 on success, otherwise a negative error value
+ */
+ int load(const char *filepath);
+
+ /**
+ * @brief Checks state of the @ref ImageTrackingModel.
+ *
+ * @since_tizen 3.0
+ * @return @a true if object was detected on the last processed frame,
+ * otherwise a @a false value
+ */
+ bool isDetected() const;
+
+ /**
+ * @brief Gets last location of the @ref ImageTrackingModel.
+ *
+ * @since_tizen 3.0
+ * @return Last detected location
+ */
+ std::vector<cv::Point2f> getLastlocation() const;
private:
+ ImageObject m_recognitionObject;
- ImageObject m_recognitionObject;
-
- ImageContourStabilizator m_stabilizator;
+ ImageContourStabilizator m_stabilizator;
- std::vector<cv::Point2f> m_lastLocation;
+ std::vector<cv::Point2f> m_lastLocation;
- State m_state;
+ State m_state;
- pthread_t m_recognitionThread;
+ pthread_t m_recognitionThread;
- mutable pthread_mutex_t m_globalGuard;
+ mutable pthread_mutex_t m_globalGuard;
- mutable pthread_spinlock_t m_lastLocationGuard;
+ mutable pthread_spinlock_t m_lastLocationGuard;
- mutable pthread_spinlock_t m_stateGuard;
+ mutable pthread_spinlock_t m_stateGuard;
- friend std::ostream& operator << (
- std::ostream& os,
- const ImageTrackingModel::State& state);
+ friend std::ostream& operator << (
+ std::ostream& os,
+ const ImageTrackingModel::State& state);
- friend std::istream& operator >> (
- std::istream& is,
- ImageTrackingModel::State& state);
+ friend std::istream& operator >> (
+ std::istream& is,
+ ImageTrackingModel::State& state);
- friend std::ostream& operator << (
- std::ostream& os,
- const ImageTrackingModel& obj);
+ friend std::ostream& operator << (
+ std::ostream& os,
+ const ImageTrackingModel& obj);
- friend std::istream& operator >> (
- std::istream& is,
- ImageTrackingModel& obj);
+ friend std::istream& operator >> (
+ std::istream& is,
+ ImageTrackingModel& obj);
- friend class ImageTracker;
+ friend class ImageTracker;
};
} /* Image */
diff --git a/mv_image/image/include/mv_image_open.h b/mv_image/image/include/mv_image_open.h
index f6128fd5..a73df98b 100644
--- a/mv_image/image/include/mv_image_open.h
+++ b/mv_image/image/include/mv_image_open.h
@@ -77,12 +77,12 @@ extern "C" {
* @see mv_engine_config_h
*/
int mv_image_recognize_open(
- mv_source_h source,
- const mv_image_object_h *image_objects,
- int number_of_objects,
- mv_engine_config_h engine_cfg,
- mv_image_recognized_cb recognized_cb,
- void *user_data);
+ mv_source_h source,
+ const mv_image_object_h *image_objects,
+ int number_of_objects,
+ mv_engine_config_h engine_cfg,
+ mv_image_recognized_cb recognized_cb,
+ void *user_data);
/*************************/
/* Image object tracking */
@@ -140,11 +140,11 @@ int mv_image_recognize_open(
* @see mv_image_tracking_model_destroy_open()
*/
int mv_image_track_open(
- mv_source_h source,
- mv_image_tracking_model_h image_tracking_model,
- mv_engine_config_h engine_cfg,
- mv_image_tracked_cb tracked_cb,
- void *user_data);
+ mv_source_h source,
+ mv_image_tracking_model_h image_tracking_model,
+ mv_engine_config_h engine_cfg,
+ mv_image_tracked_cb tracked_cb,
+ void *user_data);
/**************************/
/* Image object behaviour */
@@ -165,7 +165,7 @@ int mv_image_track_open(
* @see mv_image_object_destroy_open()
*/
int mv_image_object_create_open(
- mv_image_object_h *image_object);
+ mv_image_object_h *image_object);
/**
* @brief Destroys the image object.
@@ -179,7 +179,7 @@ int mv_image_object_create_open(
* @see mv_image_object_create_open()
*/
int mv_image_object_destroy_open(
- mv_image_object_h image_object);
+ mv_image_object_h image_object);
/**
* @brief Fills the image object.
@@ -219,10 +219,10 @@ int mv_image_object_destroy_open(
* @see mv_engine_config_h
*/
int mv_image_object_fill_open(
- mv_image_object_h image_object,
- mv_engine_config_h engine_cfg,
- mv_source_h source,
- mv_rectangle_s *location);
+ mv_image_object_h image_object,
+ mv_engine_config_h engine_cfg,
+ mv_source_h source,
+ mv_rectangle_s *location);
/**
* @brief Gets a value that determines how well an image object can be recognized.
@@ -255,8 +255,8 @@ int mv_image_object_fill_open(
* @see mv_engine_config_h
*/
int mv_image_object_get_recognition_rate_open(
- mv_image_object_h image_object,
- double *recognition_rate);
+ mv_image_object_h image_object,
+ double *recognition_rate);
/**
* @brief Sets a label for the image object.
@@ -281,8 +281,8 @@ int mv_image_object_get_recognition_rate_open(
* @see mv_image_object_destroy_open()
*/
int mv_image_object_set_label_open(
- mv_image_object_h image_object,
- int label);
+ mv_image_object_h image_object,
+ int label);
/**
* @brief Gets a label of image object.
@@ -309,8 +309,8 @@ int mv_image_object_set_label_open(
* @see mv_image_object_destroy_open()
*/
int mv_image_object_get_label_open(
- mv_image_object_h image_object,
- int *label);
+ mv_image_object_h image_object,
+ int *label);
/**
* @brief Clones the image object.
@@ -330,8 +330,8 @@ int mv_image_object_get_label_open(
* @see mv_image_object_destroy_open()
*/
int mv_image_object_clone_open(
- mv_image_object_h src,
- mv_image_object_h *dst);
+ mv_image_object_h src,
+ mv_image_object_h *dst);
/**
* @brief Saves the image object.
@@ -351,7 +351,7 @@ int mv_image_object_clone_open(
* @see mv_image_object_destroy_open()
*/
int mv_image_object_save_open(
- const char *file_name, mv_image_object_h image_object);
+ const char *file_name, mv_image_object_h image_object);
/**
* @brief Loads an image object from the file.
@@ -377,7 +377,7 @@ int mv_image_object_save_open(
* @see mv_image_object_destroy_open()
*/
int mv_image_object_load_open(
- const char *file_name, mv_image_object_h *image_object);
+ const char *file_name, mv_image_object_h *image_object);
/**********************************/
/* Image tracking model behaviour */
@@ -398,7 +398,7 @@ int mv_image_object_load_open(
* @see mv_image_tracking_model_destroy_open()
*/
int mv_image_tracking_model_create_open(
- mv_image_tracking_model_h *image_tracking_model);
+ mv_image_tracking_model_h *image_tracking_model);
/**
* @brief Sets target of image tracking model.
@@ -431,8 +431,8 @@ int mv_image_tracking_model_create_open(
* @see mv_image_tracking_model_destroy_open()
*/
int mv_image_tracking_model_set_target_open(
- mv_image_object_h image_object,
- mv_image_tracking_model_h image_tracking_model);
+ mv_image_object_h image_object,
+ mv_image_tracking_model_h image_tracking_model);
/**
* @brief Destroys the image tracking model.
@@ -449,7 +449,7 @@ int mv_image_tracking_model_set_target_open(
* @see mv_image_tracking_model_create_open()
*/
int mv_image_tracking_model_destroy_open(
- mv_image_tracking_model_h image_tracking_model);
+ mv_image_tracking_model_h image_tracking_model);
/**
* @brief Refreshes the state of image tracking model.
@@ -480,8 +480,8 @@ int mv_image_tracking_model_destroy_open(
* @see mv_image_tracking_model_destroy_open()
*/
int mv_image_tracking_model_refresh_open(
- mv_image_tracking_model_h image_tracking_model,
- mv_engine_config_h engine_cfg);
+ mv_image_tracking_model_h image_tracking_model,
+ mv_engine_config_h engine_cfg);
/**
* @brief Clones the image tracking model.
@@ -499,8 +499,8 @@ int mv_image_tracking_model_refresh_open(
* @see mv_image_tracking_model_destroy_open()
*/
int mv_image_tracking_model_clone_open(
- mv_image_tracking_model_h src,
- mv_image_tracking_model_h *dst);
+ mv_image_tracking_model_h src,
+ mv_image_tracking_model_h *dst);
/**
* @brief Saves the image tracking model.
@@ -526,7 +526,7 @@ int mv_image_tracking_model_clone_open(
* @see mv_image_tracking_model_destroy_open()
*/
int mv_image_tracking_model_save_open(
- const char *file_name, mv_image_tracking_model_h image_tracking_model);
+ const char *file_name, mv_image_tracking_model_h image_tracking_model);
/**
* @brief Loads an image tracking model from the file.
@@ -552,7 +552,7 @@ int mv_image_tracking_model_save_open(
* @see mv_image_tracking_model_destroy_open()
*/
int mv_image_tracking_model_load_open(
- const char *file_name, mv_image_tracking_model_h *image_tracking_model);
+ const char *file_name, mv_image_tracking_model_h *image_tracking_model);
#ifdef __cplusplus
}
diff --git a/mv_image/image/src/ImageConfig.cpp b/mv_image/image/src/ImageConfig.cpp
index 47fdaef4..a058965f 100644
--- a/mv_image/image/src/ImageConfig.cpp
+++ b/mv_image/image/src/ImageConfig.cpp
@@ -16,36 +16,33 @@
#include "ImageConfig.h"
-namespace MediaVision
-{
-namespace Image
-{
-
+namespace MediaVision {
+namespace Image {
FeaturesExtractingParams::FeaturesExtractingParams(
- double scaleFactor,
- int maximumFeaturesNumber) :
- mScaleFactor(scaleFactor),
- mMaximumFeaturesNumber(maximumFeaturesNumber)
+ double scaleFactor,
+ int maximumFeaturesNumber) :
+ mScaleFactor(scaleFactor),
+ mMaximumFeaturesNumber(maximumFeaturesNumber)
{
- ; /* NULL */
+ ; /* NULL */
}
FeaturesExtractingParams::FeaturesExtractingParams() :
- mScaleFactor(1.2),
- mMaximumFeaturesNumber(800)
+ mScaleFactor(1.2),
+ mMaximumFeaturesNumber(800)
{
- ; /* NULL */
+ ; /* NULL */
}
RecognitionParams::RecognitionParams(
- int minMatchesNumber,
- double requiredMatchesPart,
- double allowableMatchesPartError) :
- mMinMatchesNumber(minMatchesNumber),
- mRequiredMatchesPart(requiredMatchesPart),
- mAllowableMatchesPartError(allowableMatchesPartError)
+ int minMatchesNumber,
+ double requiredMatchesPart,
+ double allowableMatchesPartError) :
+ mMinMatchesNumber(minMatchesNumber),
+ mRequiredMatchesPart(requiredMatchesPart),
+ mAllowableMatchesPartError(allowableMatchesPartError)
{
- ; /* NULL */
+ ; /* NULL */
}
RecognitionParams::RecognitionParams() :
@@ -53,51 +50,51 @@ RecognitionParams::RecognitionParams() :
mRequiredMatchesPart(1.0),
mAllowableMatchesPartError(0.0)
{
- ; /* NULL */
+ ; /* NULL */
}
StabilizationParams::StabilizationParams(
- int historyAmount,
- double allowableShift,
- double stabilizationSpeed,
- double stabilizationAcceleration) :
- mHistoryAmount(historyAmount),
- mAllowableShift(allowableShift),
- mStabilizationSpeed(stabilizationSpeed),
- mStabilizationAcceleration(stabilizationAcceleration)
+ int historyAmount,
+ double allowableShift,
+ double stabilizationSpeed,
+ double stabilizationAcceleration) :
+ mHistoryAmount(historyAmount),
+ mAllowableShift(allowableShift),
+ mStabilizationSpeed(stabilizationSpeed),
+ mStabilizationAcceleration(stabilizationAcceleration)
{
- ; /* NULL */
+ ; /* NULL */
}
StabilizationParams::StabilizationParams() :
- mHistoryAmount(1),
- mAllowableShift(0.0),
- mStabilizationSpeed(0.0),
- mStabilizationAcceleration(1.0)
+ mHistoryAmount(1),
+ mAllowableShift(0.0),
+ mStabilizationSpeed(0.0),
+ mStabilizationAcceleration(1.0)
{
- ; /* NULL */
+ ; /* NULL */
}
TrackingParams::TrackingParams(
- FeaturesExtractingParams framesFeaturesExtractingParams,
- RecognitionParams recognitionParams,
- StabilizationParams stabilizationParams,
- double expectedOffset) :
- mFramesFeaturesExtractingParams(framesFeaturesExtractingParams),
- mRecognitionParams(recognitionParams),
- mStabilizationParams(stabilizationParams),
- mExpectedOffset(expectedOffset)
+ FeaturesExtractingParams framesFeaturesExtractingParams,
+ RecognitionParams recognitionParams,
+ StabilizationParams stabilizationParams,
+ double expectedOffset) :
+ mFramesFeaturesExtractingParams(framesFeaturesExtractingParams),
+ mRecognitionParams(recognitionParams),
+ mStabilizationParams(stabilizationParams),
+ mExpectedOffset(expectedOffset)
{
- ; /* NULL */
+ ; /* NULL */
}
TrackingParams::TrackingParams() :
- mFramesFeaturesExtractingParams(),
- mRecognitionParams(),
- mStabilizationParams(),
- mExpectedOffset(0.0)
+ mFramesFeaturesExtractingParams(),
+ mRecognitionParams(),
+ mStabilizationParams(),
+ mExpectedOffset(0.0)
{
- ; /* NULL */
+ ; /* NULL */
}
} /* Image */
diff --git a/mv_image/image/src/ImageContourStabilizator.cpp b/mv_image/image/src/ImageContourStabilizator.cpp
index 1c486593..2b6dddc4 100644
--- a/mv_image/image/src/ImageContourStabilizator.cpp
+++ b/mv_image/image/src/ImageContourStabilizator.cpp
@@ -19,281 +19,250 @@
#include "mv_private.h"
-namespace MediaVision
-{
-namespace Image
-{
-
+namespace MediaVision {
+namespace Image {
ImageContourStabilizator::ImageContourStabilizator() :
- m_movingHistory(MovingHistoryAmount),
- m_priorities(MovingHistoryAmount)
+ m_movingHistory(MovingHistoryAmount),
+ m_priorities(MovingHistoryAmount)
{
- reset();
-
- // increasing the stabilization rate
- m_speeds.push_back(0.3f);
- m_speeds.push_back(0.4f);
- m_speeds.push_back(0.5f);
- m_speeds.push_back(0.6f);
- m_speeds.push_back(0.8f);
- m_speeds.push_back(1.f);
-
- // calculation of priorities for positions in the moving history
- for (size_t i = 0u; i < MovingHistoryAmount; ++i)
- {
- // linear dependence on the elapsed time
- m_priorities[i] = (i + 1) / ((MovingHistoryAmount + 1) * MovingHistoryAmount / 2.0f);
- }
+ reset();
+
+ /* increasing the stabilization rate */
+ m_speeds.push_back(0.3f);
+ m_speeds.push_back(0.4f);
+ m_speeds.push_back(0.5f);
+ m_speeds.push_back(0.6f);
+ m_speeds.push_back(0.8f);
+ m_speeds.push_back(1.f);
+
+ /* calculation of priorities for positions in the moving history */
+ for (size_t i = 0u; i < MovingHistoryAmount; ++i) {
+ /* linear dependence on the elapsed time */
+ m_priorities[i] = (i + 1) / ((MovingHistoryAmount + 1) * MovingHistoryAmount / 2.0f);
+ }
}
void ImageContourStabilizator::reset(void)
{
- m_isPrepared = false;
- m_tempContourIndex = -1;
- m_currentHistoryAmount = 0;
+ m_isPrepared = false;
+ m_tempContourIndex = -1;
+ m_currentHistoryAmount = 0;
- LOGI("Outlier is detected.");
+ LOGI("Outlier is detected.");
}
bool ImageContourStabilizator::stabilize(
- std::vector<cv::Point2f>& contour,
- const StabilizationParams& /*params*/)
+ std::vector<cv::Point2f>& contour,
+ const StabilizationParams& /*params*/)
{
- // current implementation stabilizes quadrangles only
- if (contour.size() != NumberOfQuadrangleCorners)
- {
- LOGW("Not stabilized. Empty contour.");
-
- return false;
- }
-
- m_currentCornersSpeed.resize(contour.size(), 0);
-
- if (contour[0].x == contour[1].x && contour[0].y == contour[1].y)
- {
- LOGW("Not stabilized. Invalid contour.");
-
- return false;
- }
-
- if (m_lastStabilizedContour.empty())
- {
- m_lastStabilizedContour = contour;
- }
-
- std::vector<cv::Point2f> stabilizedState;
-
- // history amount < 2 it's no sense
- if (MovingHistoryAmount >= 2)
- {
- // first sample
- if (m_tempContourIndex == -1)
- {
- m_movingHistory[1] = contour;
- m_tempContourIndex = 1;
- m_currentHistoryAmount = 1;
-
- LOGI("Not stabilized. Too small moving history. (the first one)");
-
- return false;
- }
-
- // too short moving history
- if (m_currentHistoryAmount < MovingHistoryAmount - 1)
- {
- ++m_currentHistoryAmount;
- ++m_tempContourIndex;
- m_movingHistory[m_tempContourIndex] = contour;
-
- LOGI("Not stabilized. Too small moving history.");
-
- return false;
- }
-
- // saving into moving history
- m_movingHistory.pop_front();
- m_movingHistory.push_back(contour);
-
- if (!m_isPrepared)
- {
- m_lastStabilizedContour = m_movingHistory[MovingHistoryAmount - 2];
-
- LOGI("Not stabilized. Too small moving history. (the last one)");
-
- m_isPrepared = true;
- }
-
- // stabilization
- stabilizedState = computeStabilizedQuadrangleContour();
-
- if (stabilizedState.empty())
- {
- stabilizedState = m_lastStabilizedContour;
- }
- }
- else
- {
- stabilizedState = m_lastStabilizedContour;
- }
-
- const float tolerantShift = getQuadrangleArea(contour.data()) * 0.00006f + 1.3f;
-
- const size_t contourSize = stabilizedState.size();
- for (size_t i = 0u; i < contourSize; ++i)
- {
- if (fabs(getDistance(stabilizedState[i], contour[i])) > tolerantShift)
- {
- const float dirX = m_lastStabilizedContour[i].x - contour[i].x;
- const float dirY = m_lastStabilizedContour[i].y - contour[i].y;
-
- const float speedX = dirX * m_speeds[m_currentCornersSpeed[i]];
- const float speedY = dirY * m_speeds[m_currentCornersSpeed[i]];
-
- // final moving
- m_lastStabilizedContour[i].x -= speedX;
- m_lastStabilizedContour[i].y -= speedY;
-
- if (m_currentCornersSpeed[i] < m_speeds.size() - 1)
- {
- ++m_currentCornersSpeed[i];
- }
- }
- else
- {
- m_currentCornersSpeed[i] = 0;
- }
- }
+ /* current implementation stabilizes quadrangles only */
+ if (contour.size() != NumberOfQuadrangleCorners) {
+ LOGW("Not stabilized. Empty contour.");
+
+ return false;
+ }
+
+ m_currentCornersSpeed.resize(contour.size(), 0);
+
+ if (contour[0].x == contour[1].x && contour[0].y == contour[1].y) {
+ LOGW("Not stabilized. Invalid contour.");
+
+ return false;
+ }
+
+ if (m_lastStabilizedContour.empty()) {
+ m_lastStabilizedContour = contour;
+ }
+
+ std::vector<cv::Point2f> stabilizedState;
+
+ /* history amount < 2 it's no sense */
+ if (MovingHistoryAmount >= 2) {
+ /* first sample */
+ if (m_tempContourIndex == -1) {
+ m_movingHistory[1] = contour;
+ m_tempContourIndex = 1;
+ m_currentHistoryAmount = 1;
+
+ LOGI("Not stabilized. Too small moving history. (the first one)");
+
+ return false;
+ }
+
+ /* too short moving history */
+ if (m_currentHistoryAmount < MovingHistoryAmount - 1) {
+ ++m_currentHistoryAmount;
+ ++m_tempContourIndex;
+ m_movingHistory[m_tempContourIndex] = contour;
+
+ LOGI("Not stabilized. Too small moving history.");
+
+ return false;
+ }
+
+ /* saving into moving history */
+ m_movingHistory.pop_front();
+ m_movingHistory.push_back(contour);
+
+ if (!m_isPrepared) {
+ m_lastStabilizedContour = m_movingHistory[MovingHistoryAmount - 2];
+
+ LOGI("Not stabilized. Too small moving history. (the last one)");
+
+ m_isPrepared = true;
+ }
+
+ /* stabilization */
+ stabilizedState = computeStabilizedQuadrangleContour();
+
+ if (stabilizedState.empty()) {
+ stabilizedState = m_lastStabilizedContour;
+ }
+ } else {
+ stabilizedState = m_lastStabilizedContour;
+ }
+
+ const float tolerantShift = getQuadrangleArea(contour.data()) * 0.00006f + 1.3f;
+
+ const size_t contourSize = stabilizedState.size();
+ for (size_t i = 0u; i < contourSize; ++i) {
+ if (fabs(getDistance(stabilizedState[i], contour[i])) > tolerantShift) {
+ const float dirX = m_lastStabilizedContour[i].x - contour[i].x;
+ const float dirY = m_lastStabilizedContour[i].y - contour[i].y;
+
+ const float speedX = dirX * m_speeds[m_currentCornersSpeed[i]];
+ const float speedY = dirY * m_speeds[m_currentCornersSpeed[i]];
+
+ /* final moving */
+ m_lastStabilizedContour[i].x -= speedX;
+ m_lastStabilizedContour[i].y -= speedY;
+
+ if (m_currentCornersSpeed[i] < m_speeds.size() - 1) {
+ ++m_currentCornersSpeed[i];
+ }
+ } else {
+ m_currentCornersSpeed[i] = 0;
+ }
+ }
+
+ /* m_lastStabilizedContour = stabilizedState; */
+ contour = m_lastStabilizedContour;
+
+ LOGI("Contour successfully stabilized.");
- // m_lastStabilizedContour = stabilizedState;
- contour = m_lastStabilizedContour;
-
- LOGI("Contour successfully stabilized.");
-
- return true;
+ return true;
}
std::vector<cv::Point2f> ImageContourStabilizator::computeStabilizedQuadrangleContour(void)
{
- // final contour
- std::vector<cv::Point2f> stabilizedState(
- NumberOfQuadrangleCorners, cv::Point2f(0.f, 0.f));
-
- // calculation the direction of contour corners to a new location
- std::vector<cv::Point2f> directions(
- NumberOfQuadrangleCorners, cv::Point2f(0.f, 0.f));
-
- // computing expected directions and outliers searching
- bool expressiveTime = false;
- float summPriorityWithoutToLastPos[NumberOfQuadrangleCorners];
- float priorityToLastPos[NumberOfQuadrangleCorners];
- std::vector<cv::Point2f> directionsToLastPos(NumberOfQuadrangleCorners);
- for (size_t j = 0u; j < NumberOfQuadrangleCorners; ++j)
- {
- // calculation the moving directions and computing average direction
- std::vector<cv::Point2f> trackDirections(MovingHistoryAmount - 1);
- cv::Point2f averageDirections(0.f, 0.f);
-
- for (size_t i = 0u; i < MovingHistoryAmount - 1; ++i)
- {
- averageDirections.x += (trackDirections[i].x =
- m_movingHistory[i+1][j].x - m_movingHistory[i][j].x) /
- (MovingHistoryAmount - 1);
-
- averageDirections.y += (trackDirections[i].y =
- m_movingHistory[i+1][j].y - m_movingHistory[i][j].y) /
- (MovingHistoryAmount - 1);
- }
-
- // calculation a deviations and select outlier
- std::vector<float> directionDistances(MovingHistoryAmount - 1);
- float maxDistance = 0.f, prevMaxDistance = 0.f;
- int idxWithMaxDistance = 0;
- int numExpressiveDirection = -1;
- for (size_t i = 0u; i < MovingHistoryAmount - 1; ++i)
- {
- directionDistances[i] = getDistance(
- trackDirections[i],
- averageDirections);
-
- if (directionDistances[i] > prevMaxDistance)
- {
- if (directionDistances[i] > maxDistance)
- {
- prevMaxDistance = maxDistance;
- maxDistance = directionDistances[i];
- idxWithMaxDistance = i;
- }
- else
- {
- prevMaxDistance = directionDistances[i];
- }
- }
- }
-
- // check outlier
- if (0.6f * maxDistance > prevMaxDistance)
- {
- LOGI("Outlier is detected.");
-
- numExpressiveDirection = idxWithMaxDistance;
- }
-
- // final direction computing
- float summPriority = 0.f;
- for (size_t i = 0u; i < MovingHistoryAmount - 1; ++i)
- {
- if ((int)i != numExpressiveDirection)
- {
- directions[j].x += trackDirections[i].x * m_priorities[i];
- directions[j].y += trackDirections[i].y * m_priorities[i];
- summPriority += m_priorities[i];
- }
- }
- if (numExpressiveDirection == MovingHistoryAmount - 1)
- {
- expressiveTime = true;
- }
-
- summPriorityWithoutToLastPos[j] = summPriority;
- priorityToLastPos[j] = m_priorities[MovingHistoryAmount - 1];
-
- directions[j].x -= directionsToLastPos[j].x =
- (m_lastStabilizedContour[j].x -
- m_movingHistory[MovingHistoryAmount - 1][j].x) *
- priorityToLastPos[j];
-
- directions[j].y -= directionsToLastPos[j].y =
- (m_lastStabilizedContour[j].y -
- m_movingHistory[MovingHistoryAmount - 1][j].y) *
- priorityToLastPos[j];
-
- summPriority += priorityToLastPos[j];
-
- directions[j].x /= summPriority;
- directions[j].y /= summPriority;
- }
-
- // final corners computing
- for (size_t j = 0u; j < NumberOfQuadrangleCorners; ++j)
- {
- if (expressiveTime)
- {
- directions[j].x *= (summPriorityWithoutToLastPos[j] +
- priorityToLastPos[j]);
- directions[j].x -= directionsToLastPos[j].x;
- directions[j].x /= summPriorityWithoutToLastPos[j];
-
- directions[j].y *= (summPriorityWithoutToLastPos[j] +
- priorityToLastPos[j]);
- directions[j].y -= directionsToLastPos[j].y;
- directions[j].y /= summPriorityWithoutToLastPos[j];
- }
-
- stabilizedState[j].x = m_lastStabilizedContour[j].x + directions[j].x;
- stabilizedState[j].y = m_lastStabilizedContour[j].y + directions[j].y;
- }
-
- return stabilizedState;
+ /* final contour */
+ std::vector<cv::Point2f> stabilizedState(
+ NumberOfQuadrangleCorners, cv::Point2f(0.f, 0.f));
+
+ /* calculation the direction of contour corners to a new location */
+ std::vector<cv::Point2f> directions(
+ NumberOfQuadrangleCorners, cv::Point2f(0.f, 0.f));
+
+ /* computing expected directions and outliers searching */
+ bool expressiveTime = false;
+ float summPriorityWithoutToLastPos[NumberOfQuadrangleCorners];
+ float priorityToLastPos[NumberOfQuadrangleCorners];
+ std::vector<cv::Point2f> directionsToLastPos(NumberOfQuadrangleCorners);
+ for (size_t j = 0u; j < NumberOfQuadrangleCorners; ++j) {
+ /* calculation the moving directions and computing average direction */
+ std::vector<cv::Point2f> trackDirections(MovingHistoryAmount - 1);
+ cv::Point2f averageDirections(0.f, 0.f);
+
+ for (size_t i = 0u; i < MovingHistoryAmount - 1; ++i) {
+ averageDirections.x += (trackDirections[i].x =
+ m_movingHistory[i+1][j].x - m_movingHistory[i][j].x) /
+ (MovingHistoryAmount - 1);
+
+ averageDirections.y += (trackDirections[i].y =
+ m_movingHistory[i+1][j].y - m_movingHistory[i][j].y) /
+ (MovingHistoryAmount - 1);
+ }
+
+ /* calculation a deviations and select outlier */
+ std::vector<float> directionDistances(MovingHistoryAmount - 1);
+ float maxDistance = 0.f, prevMaxDistance = 0.f;
+ int idxWithMaxDistance = 0;
+ int numExpressiveDirection = -1;
+ for (size_t i = 0u; i < MovingHistoryAmount - 1; ++i) {
+ directionDistances[i] = getDistance(
+ trackDirections[i],
+ averageDirections);
+
+ if (directionDistances[i] > prevMaxDistance) {
+ if (directionDistances[i] > maxDistance) {
+ prevMaxDistance = maxDistance;
+ maxDistance = directionDistances[i];
+ idxWithMaxDistance = i;
+ } else {
+ prevMaxDistance = directionDistances[i];
+ }
+ }
+ }
+
+ /* check outlier */
+ if (0.6f * maxDistance > prevMaxDistance) {
+ LOGI("Outlier is detected.");
+
+ numExpressiveDirection = idxWithMaxDistance;
+ }
+
+ /* final direction computing */
+ float summPriority = 0.f;
+ for (size_t i = 0u; i < MovingHistoryAmount - 1; ++i) {
+ if ((int)i != numExpressiveDirection) {
+ directions[j].x += trackDirections[i].x * m_priorities[i];
+ directions[j].y += trackDirections[i].y * m_priorities[i];
+ summPriority += m_priorities[i];
+ }
+ }
+
+ if (numExpressiveDirection == MovingHistoryAmount - 1) {
+ expressiveTime = true;
+ }
+
+ summPriorityWithoutToLastPos[j] = summPriority;
+ priorityToLastPos[j] = m_priorities[MovingHistoryAmount - 1];
+
+ directions[j].x -= directionsToLastPos[j].x =
+ (m_lastStabilizedContour[j].x -
+ m_movingHistory[MovingHistoryAmount - 1][j].x) *
+ priorityToLastPos[j];
+
+ directions[j].y -= directionsToLastPos[j].y =
+ (m_lastStabilizedContour[j].y -
+ m_movingHistory[MovingHistoryAmount - 1][j].y) *
+ priorityToLastPos[j];
+
+ summPriority += priorityToLastPos[j];
+
+ directions[j].x /= summPriority;
+ directions[j].y /= summPriority;
+ }
+
+ /* final corners computing */
+ for (size_t j = 0u; j < NumberOfQuadrangleCorners; ++j) {
+ if (expressiveTime) {
+ directions[j].x *= (summPriorityWithoutToLastPos[j] +
+ priorityToLastPos[j]);
+ directions[j].x -= directionsToLastPos[j].x;
+ directions[j].x /= summPriorityWithoutToLastPos[j];
+
+ directions[j].y *= (summPriorityWithoutToLastPos[j] +
+ priorityToLastPos[j]);
+ directions[j].y -= directionsToLastPos[j].y;
+ directions[j].y /= summPriorityWithoutToLastPos[j];
+ }
+
+ stabilizedState[j].x = m_lastStabilizedContour[j].x + directions[j].x;
+ stabilizedState[j].y = m_lastStabilizedContour[j].y + directions[j].y;
+ }
+
+ return stabilizedState;
}
} /* Image */
diff --git a/mv_image/image/src/ImageMathUtil.cpp b/mv_image/image/src/ImageMathUtil.cpp
index 8bf5ba83..0da2dbc0 100644
--- a/mv_image/image/src/ImageMathUtil.cpp
+++ b/mv_image/image/src/ImageMathUtil.cpp
@@ -16,43 +16,40 @@
#include "ImageMathUtil.h"
-namespace MediaVision
-{
-namespace Image
-{
-
+namespace MediaVision {
+namespace Image {
float getDistance(
- const cv::Point2f& point1,
- const cv::Point2f& point2)
+ const cv::Point2f& point1,
+ const cv::Point2f& point2)
{
- return sqrt(
- (point1.x - point2.x) * (point1.x - point2.x) +
- (point1.y - point2.y) * (point1.y - point2.y));
+ return sqrt(
+ (point1.x - point2.x) * (point1.x - point2.x) +
+ (point1.y - point2.y) * (point1.y - point2.y));
}
float getTriangleArea(
- const cv::Point2f& point1,
- const cv::Point2f& point2,
- const cv::Point2f& point3)
+ const cv::Point2f& point1,
+ const cv::Point2f& point2,
+ const cv::Point2f& point3)
{
- float distances[3];
+ float distances[3];
- distances[0] = getDistance(point1, point2);
- distances[1] = getDistance(point2, point3);
- distances[2] = getDistance(point3, point1);
+ distances[0] = getDistance(point1, point2);
+ distances[1] = getDistance(point2, point3);
+ distances[2] = getDistance(point3, point1);
- const float semiperimeter = (distances[0] + distances[1] + distances[2]) / 2.0f;
+ const float semiperimeter = (distances[0] + distances[1] + distances[2]) / 2.0f;
- return sqrt(semiperimeter *
- (semiperimeter - distances[0]) *
- (semiperimeter - distances[1]) *
- (semiperimeter - distances[2]));
+ return sqrt(semiperimeter *
+ (semiperimeter - distances[0]) *
+ (semiperimeter - distances[1]) *
+ (semiperimeter - distances[2]));
}
float getQuadrangleArea(const cv::Point2f points[NumberOfQuadrangleCorners])
{
- return getTriangleArea(points[0], points[1], points[2]) +
- getTriangleArea(points[0], points[3], points[2]);
+ return getTriangleArea(points[0], points[1], points[2]) +
+ getTriangleArea(points[0], points[3], points[2]);
}
} /* Image */
diff --git a/mv_image/image/src/ImageObject.cpp b/mv_image/image/src/ImageObject.cpp
index 531ec62d..a5626056 100644
--- a/mv_image/image/src/ImageObject.cpp
+++ b/mv_image/image/src/ImageObject.cpp
@@ -29,446 +29,417 @@
#include <fstream>
#include <unistd.h>
-namespace MediaVision
-{
-namespace Image
-{
-
+namespace MediaVision {
+namespace Image {
ImageObject::ImageObject() :
- m_isEmpty(true),
- m_isLabeled(false),
- m_label(0),
- m_recognitionRate(0.f)
+ m_isEmpty(true),
+ m_isLabeled(false),
+ m_label(0),
+ m_recognitionRate(0.f)
{
- ; /* NULL */
+ ; /* NULL */
}
ImageObject::ImageObject(const cv::Mat& image, const FeaturesExtractingParams& params) :
- m_isEmpty(true),
- m_isLabeled(false),
- m_label(0),
- m_recognitionRate(0.f)
+ m_isEmpty(true),
+ m_isLabeled(false),
+ m_label(0),
+ m_recognitionRate(0.f)
{
- fill(image, params);
+ fill(image, params);
}
ImageObject::ImageObject(const ImageObject& copy) :
- m_isEmpty(copy.m_isEmpty),
- m_isLabeled(copy.m_isLabeled),
- m_label(copy.m_label),
- m_boundingContour(copy.m_boundingContour),
- m_objectKeypoints(copy.m_objectKeypoints),
- m_objectDescriptors(copy.m_objectDescriptors.clone()),
- m_recognitionRate(copy.m_recognitionRate)
+ m_isEmpty(copy.m_isEmpty),
+ m_isLabeled(copy.m_isLabeled),
+ m_label(copy.m_label),
+ m_boundingContour(copy.m_boundingContour),
+ m_objectKeypoints(copy.m_objectKeypoints),
+ m_objectDescriptors(copy.m_objectDescriptors.clone()),
+ m_recognitionRate(copy.m_recognitionRate)
{
- ; /* NULL */
+ ; /* NULL */
}
ImageObject& ImageObject::operator=(const ImageObject& copy)
{
- if (this != &copy)
- {
- m_isEmpty = copy.m_isEmpty;
- m_isLabeled = copy.m_isLabeled;
- m_label = copy.m_label;
- m_boundingContour = copy.m_boundingContour;
- m_objectKeypoints = copy.m_objectKeypoints;
- m_objectDescriptors = copy.m_objectDescriptors.clone();
- m_recognitionRate = copy.m_recognitionRate;
- }
- return *this;
+ if (this != &copy) {
+ m_isEmpty = copy.m_isEmpty;
+ m_isLabeled = copy.m_isLabeled;
+ m_label = copy.m_label;
+ m_boundingContour = copy.m_boundingContour;
+ m_objectKeypoints = copy.m_objectKeypoints;
+ m_objectDescriptors = copy.m_objectDescriptors.clone();
+ m_recognitionRate = copy.m_recognitionRate;
+ }
+ return *this;
}
ImageObject::~ImageObject()
{
- ; /* NULL */
+ ; /* NULL */
}
void ImageObject::fill(const cv::Mat& image, const FeaturesExtractingParams& params)
{
- m_isEmpty = false;
- m_boundingContour.resize(NumberOfQuadrangleCorners);
+ m_isEmpty = false;
+ m_boundingContour.resize(NumberOfQuadrangleCorners);
- m_boundingContour[0].x = 0.f;
- m_boundingContour[0].y = 0.f;
+ m_boundingContour[0].x = 0.f;
+ m_boundingContour[0].y = 0.f;
- m_boundingContour[1].x = image.cols;
- m_boundingContour[1].y = 0.f;
+ m_boundingContour[1].x = image.cols;
+ m_boundingContour[1].y = 0.f;
- m_boundingContour[2].x = image.cols;
- m_boundingContour[2].y = image.rows;
+ m_boundingContour[2].x = image.cols;
+ m_boundingContour[2].y = image.rows;
- m_boundingContour[3].x = 0.f;
- m_boundingContour[3].y = image.rows;
+ m_boundingContour[3].x = 0.f;
+ m_boundingContour[3].y = image.rows;
- extractFeatures(image, params);
+ extractFeatures(image, params);
- computeRecognitionRate(image);
+ computeRecognitionRate(image);
- LOGI("[%s] Image object is filled.", __FUNCTION__);
+ LOGI("[%s] Image object is filled.", __FUNCTION__);
}
bool ImageObject::fill(const cv::Mat& image, const cv::Rect& boundingBox,
- const FeaturesExtractingParams& params)
+ const FeaturesExtractingParams& params)
{
- if ((0 > boundingBox.x) || (0 >= boundingBox.width) ||
- (0 > boundingBox.y) || (0 >= boundingBox.height) ||
- (image.cols < (boundingBox.x + boundingBox.width)) ||
- (image.rows < (boundingBox.y + boundingBox.height)))
- {
- LOGE("[%s] Invalid ROI.", __FUNCTION__);
- return false;
- }
+ if ((0 > boundingBox.x) || (0 >= boundingBox.width) ||
+ (0 > boundingBox.y) || (0 >= boundingBox.height) ||
+ (image.cols < (boundingBox.x + boundingBox.width)) ||
+ (image.rows < (boundingBox.y + boundingBox.height))) {
+ LOGE("[%s] Invalid ROI.", __FUNCTION__);
+ return false;
+ }
- m_isEmpty = false;
- m_boundingContour.resize(NumberOfQuadrangleCorners);
+ m_isEmpty = false;
+ m_boundingContour.resize(NumberOfQuadrangleCorners);
- m_boundingContour[0].x = 0.f;
- m_boundingContour[0].y = 0.f;
+ m_boundingContour[0].x = 0.f;
+ m_boundingContour[0].y = 0.f;
- m_boundingContour[1].x = boundingBox.width;
- m_boundingContour[1].y = 0.f;
+ m_boundingContour[1].x = boundingBox.width;
+ m_boundingContour[1].y = 0.f;
- m_boundingContour[2].x = boundingBox.width;
- m_boundingContour[2].y = boundingBox.height;
+ m_boundingContour[2].x = boundingBox.width;
+ m_boundingContour[2].y = boundingBox.height;
- m_boundingContour[3].x = 0.f;
- m_boundingContour[3].y = boundingBox.height;
+ m_boundingContour[3].x = 0.f;
+ m_boundingContour[3].y = boundingBox.height;
- cv::Mat objectImage(image, boundingBox);
+ cv::Mat objectImage(image, boundingBox);
- extractFeatures(objectImage, params);
+ extractFeatures(objectImage, params);
- computeRecognitionRate(image);
+ computeRecognitionRate(image);
- LOGI("[%s] Image object is filled.", __FUNCTION__);
+ LOGI("[%s] Image object is filled.", __FUNCTION__);
- return true;
+ return true;
}
void ImageObject::extractFeatures(const cv::Mat& image,
- const FeaturesExtractingParams& params)
+ const FeaturesExtractingParams& params)
{
- cv::ORB orb(params.mMaximumFeaturesNumber, params.mScaleFactor);
-
- if (image.cols < MinWidth || image.rows < MinHeight)
- {
- LOGW("[%s] Area is too small, recognition rate is 0.", __FUNCTION__);
- m_objectKeypoints.clear();
- m_objectDescriptors = cv::Mat();
- }
- else
- {
- orb.detect(image, m_objectKeypoints);
- orb.compute(image, m_objectKeypoints, m_objectDescriptors);
- }
+ cv::ORB orb(params.mMaximumFeaturesNumber, params.mScaleFactor);
+
+ if (image.cols < MinWidth || image.rows < MinHeight) {
+ LOGW("[%s] Area is too small, recognition rate is 0.", __FUNCTION__);
+ m_objectKeypoints.clear();
+ m_objectDescriptors = cv::Mat();
+ } else {
+ orb.detect(image, m_objectKeypoints);
+ orb.compute(image, m_objectKeypoints, m_objectDescriptors);
+ }
}
void ImageObject::computeRecognitionRate(const cv::Mat& image)
{
- const size_t numberOfKeypoints = m_objectKeypoints.size();
-
- // it is impossible to calculate the perspective transformation parameters
- // if number of key points less than MinimumNumberOfFeatures (4)
- if (numberOfKeypoints < MinimumNumberOfFeatures)
- {
- m_recognitionRate = 0.f;
- return;
- }
-
- static const size_t xCellsNumber = 10u;
- static const size_t yCellsNumber = 10u;
-
- cv::Mat cells[xCellsNumber][yCellsNumber];
- size_t accumulationCounter[xCellsNumber][yCellsNumber];
-
- const size_t cellWidth = image.cols / xCellsNumber;
- const size_t cellHeight = image.rows / yCellsNumber;
-
- for (size_t x = 0u; x < xCellsNumber; ++x)
- {
- for (size_t y = 0u; y < yCellsNumber; ++y)
- {
- cells[x][y] = image(cv::Rect(
- x * cellWidth,
- y * cellHeight,
- cellWidth,
- cellHeight));
-
- accumulationCounter[x][y] = 0;
- }
- }
-
- for (size_t i = 0u; i < numberOfKeypoints; ++i)
- {
- size_t xCellIdx = m_objectKeypoints[i].pt.x / cellWidth;
- if (xCellIdx >= xCellsNumber)
- {
- xCellIdx = xCellsNumber - 1;
- }
- size_t yCellIdx = m_objectKeypoints[i].pt.y / cellHeight;
- if (yCellIdx >= yCellsNumber)
- {
- yCellIdx = yCellsNumber - 1;
- }
- ++(accumulationCounter[xCellIdx][yCellIdx]);
- }
-
- const float exceptedNumber = numberOfKeypoints /
- (float)(xCellsNumber * yCellsNumber);
-
- float distributedEvaluation = 0.f;
-
- for (size_t x = 0u; x < xCellsNumber; ++x)
- {
- for (size_t y = 0u; y < yCellsNumber; ++y)
- {
- distributedEvaluation += (accumulationCounter[x][y] - exceptedNumber) *
- (accumulationCounter[x][y] - exceptedNumber) / exceptedNumber;
- }
- }
-
- float maximumDistributedEvaluation = (xCellsNumber * yCellsNumber - 1) *
- exceptedNumber;
-
- maximumDistributedEvaluation += (numberOfKeypoints - exceptedNumber) *
- (numberOfKeypoints - exceptedNumber) / exceptedNumber;
-
- distributedEvaluation = 1 -
- (distributedEvaluation / maximumDistributedEvaluation);
-
- // Exponentiation to find an approximate confidence value based on the
- // number of key points on the image.
- const float cardinalityEvaluation = pow(-0.9, numberOfKeypoints - 3) + 1.0f;
-
- m_recognitionRate =
- distributedEvaluation *
- cardinalityEvaluation;
+ const size_t numberOfKeypoints = m_objectKeypoints.size();
+
+ /* it is impossible to calculate the perspective transformation parameters
+ * if number of key points less than MinimumNumberOfFeatures (4)
+ */
+ if (numberOfKeypoints < MinimumNumberOfFeatures) {
+ m_recognitionRate = 0.f;
+ return;
+ }
+
+ static const size_t xCellsNumber = 10u;
+ static const size_t yCellsNumber = 10u;
+
+ cv::Mat cells[xCellsNumber][yCellsNumber];
+ size_t accumulationCounter[xCellsNumber][yCellsNumber];
+
+ const size_t cellWidth = image.cols / xCellsNumber;
+ const size_t cellHeight = image.rows / yCellsNumber;
+
+ for (size_t x = 0u; x < xCellsNumber; ++x) {
+ for (size_t y = 0u; y < yCellsNumber; ++y) {
+ cells[x][y] = image(cv::Rect(
+ x * cellWidth,
+ y * cellHeight,
+ cellWidth,
+ cellHeight));
+
+ accumulationCounter[x][y] = 0;
+ }
+ }
+
+ for (size_t i = 0u; i < numberOfKeypoints; ++i) {
+ size_t xCellIdx = m_objectKeypoints[i].pt.x / cellWidth;
+ if (xCellIdx >= xCellsNumber) {
+ xCellIdx = xCellsNumber - 1;
+ }
+ size_t yCellIdx = m_objectKeypoints[i].pt.y / cellHeight;
+ if (yCellIdx >= yCellsNumber) {
+ yCellIdx = yCellsNumber - 1;
+ }
+ ++(accumulationCounter[xCellIdx][yCellIdx]);
+ }
+
+ const float exceptedNumber = numberOfKeypoints /
+ (float)(xCellsNumber * yCellsNumber);
+
+ float distributedEvaluation = 0.f;
+
+ for (size_t x = 0u; x < xCellsNumber; ++x) {
+ for (size_t y = 0u; y < yCellsNumber; ++y) {
+ distributedEvaluation += (accumulationCounter[x][y] - exceptedNumber) *
+ (accumulationCounter[x][y] - exceptedNumber) / exceptedNumber;
+ }
+ }
+
+ float maximumDistributedEvaluation = (xCellsNumber * yCellsNumber - 1) *
+ exceptedNumber;
+
+ maximumDistributedEvaluation += (numberOfKeypoints - exceptedNumber) *
+ (numberOfKeypoints - exceptedNumber) / exceptedNumber;
+
+ distributedEvaluation = 1 -
+ (distributedEvaluation / maximumDistributedEvaluation);
+
+ /* Exponentiation to find an approximate confidence value based on the
+ * number of key points on the image.
+ */
+ const float cardinalityEvaluation = pow(-0.9, numberOfKeypoints - 3) + 1.0f;
+
+ m_recognitionRate =
+ distributedEvaluation *
+ cardinalityEvaluation;
}
float ImageObject::getRecognitionRate(void) const
{
- return m_recognitionRate;
+ return m_recognitionRate;
}
bool ImageObject::isEmpty() const
{
- return m_isEmpty;
+ return m_isEmpty;
}
void ImageObject::setLabel(int label)
{
- m_isLabeled = true;
- m_label = label;
+ m_isLabeled = true;
+ m_label = label;
}
bool ImageObject::getLabel(int& label) const
{
- if (!m_isLabeled)
- {
- LOGW("[%s] Image hasn't label.", __FUNCTION__);
- return false;
- }
- label = m_label;
- return true;
+ if (!m_isLabeled) {
+ LOGW("[%s] Image hasn't label.", __FUNCTION__);
+ return false;
+ }
+ label = m_label;
+ return true;
}
int ImageObject::save(const char *fileName) const
{
- std::string prefix_path = std::string(app_get_data_path());
- LOGD("prefix_path: %s", prefix_path.c_str());
+ std::string prefix_path = std::string(app_get_data_path());
+ LOGD("prefix_path: %s", prefix_path.c_str());
- std::string filePath;
- filePath += prefix_path;
- filePath += fileName;
+ std::string filePath;
+ filePath += prefix_path;
+ filePath += fileName;
- /* check the directory is available */
- std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/'));
- if (access(prefix_path_check.c_str(), F_OK))
- {
- LOGE("Can't save image object. Path[%s] doesn't existed.", prefix_path_check.c_str());
+ /* check the directory is available */
+ std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/'));
+ if (access(prefix_path_check.c_str(), F_OK)) {
+ LOGE("Can't save image object. Path[%s] doesn't existed.", prefix_path_check.c_str());
- return MEDIA_VISION_ERROR_INVALID_PATH;
- }
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
- std::ofstream out;
+ std::ofstream out;
- out.open(filePath.c_str());
+ out.open(filePath.c_str());
- if (!out.is_open())
- {
- LOGE("[%s] Can't create/open file.", __FUNCTION__);
- return MEDIA_VISION_ERROR_PERMISSION_DENIED;
- }
+ if (!out.is_open()) {
+ LOGE("[%s] Can't create/open file.", __FUNCTION__);
+ return MEDIA_VISION_ERROR_PERMISSION_DENIED;
+ }
- out<<(*this);
+ out << (*this);
- out.close();
- LOGI("[%s] Image object is saved.", __FUNCTION__);
+ out.close();
+ LOGI("[%s] Image object is saved.", __FUNCTION__);
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int ImageObject::load(const char *fileName)
{
- /* find directory */
- std::string prefix_path = std::string(app_get_data_path());
- LOGD("prefix_path: %s", prefix_path.c_str());
+ /* find directory */
+ std::string prefix_path = std::string(app_get_data_path());
+ LOGD("prefix_path: %s", prefix_path.c_str());
- std::string filePath;
- filePath += prefix_path;
- filePath += fileName;
+ std::string filePath;
+ filePath += prefix_path;
+ filePath += fileName;
- if (access(filePath.c_str(), F_OK))
- {
- LOGE("Can't load image object model. Path[%s] doesn't existed.", filePath.c_str());
+ if (access(filePath.c_str(), F_OK)) {
+ LOGE("Can't load image object model. Path[%s] doesn't existed.", filePath.c_str());
- return MEDIA_VISION_ERROR_INVALID_PATH;
- }
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
- std::ifstream in;
- in.open(filePath.c_str());
+ std::ifstream in;
+ in.open(filePath.c_str());
- if (!in.is_open())
- {
- LOGE("[%s] Can't open file.", __FUNCTION__);
- return MEDIA_VISION_ERROR_PERMISSION_DENIED;
- }
+ if (!in.is_open()) {
+ LOGE("[%s] Can't open file.", __FUNCTION__);
+ return MEDIA_VISION_ERROR_PERMISSION_DENIED;
+ }
- in>>(*this);
+ in >> (*this);
- if (!in.good())
- {
- LOGE("[%s] Unexpected end of file.", __FUNCTION__);
- return MEDIA_VISION_ERROR_PERMISSION_DENIED;
- }
+ if (!in.good()) {
+ LOGE("[%s] Unexpected end of file.", __FUNCTION__);
+ return MEDIA_VISION_ERROR_PERMISSION_DENIED;
+ }
- in.close();
- LOGI("[%s] Image object is loaded.", __FUNCTION__);
+ in.close();
+ LOGI("[%s] Image object is loaded.", __FUNCTION__);
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
std::ostream& operator << (std::ostream& os, const ImageObject& obj)
{
- os<<std::setprecision(7);
-
- os<<obj.m_isEmpty<<'\n';
- os<<obj.m_isLabeled<<'\n';
- os<<obj.m_label<<'\n';
-
- os<<obj.m_boundingContour.size()<<'\n';
- for (size_t pointNum = 0u; pointNum < obj.m_boundingContour.size(); ++pointNum)
- {
- os<<obj.m_boundingContour[pointNum].x<<' ';
- os<<obj.m_boundingContour[pointNum].y<<'\n';
- }
-
- os<<obj.m_objectKeypoints.size()<<'\n';
- for (size_t keypointNum = 0u; keypointNum < obj.m_objectKeypoints.size();++keypointNum)
- {
- os<<obj.m_objectKeypoints[keypointNum].pt.x<<' ';
- os<<obj.m_objectKeypoints[keypointNum].pt.y<<' ';
- os<<obj.m_objectKeypoints[keypointNum].size<<' ';
- os<<obj.m_objectKeypoints[keypointNum].response<<' ';
- os<<obj.m_objectKeypoints[keypointNum].angle<<' ';
- os<<obj.m_objectKeypoints[keypointNum].octave<<' ';
- os<<obj.m_objectKeypoints[keypointNum].class_id<<'\n';
- }
-
- os<<obj.m_objectDescriptors.rows<<' ';
- os<<obj.m_objectDescriptors.cols<<' ';
- os<<obj.m_objectDescriptors.type()<<'\n';
- for (int descriptorNum = 0; descriptorNum < obj.m_objectDescriptors.rows;
- ++descriptorNum)
- {
- for (int featureNum = 0; featureNum < obj.m_objectDescriptors.cols;
- ++featureNum, os<<'\n')
- {
- os<<(int)obj.m_objectDescriptors.at<uchar>(descriptorNum, featureNum)<<' ';
- }
- }
-
- return os;
+ os << std::setprecision(7);
+
+ os << obj.m_isEmpty << '\n';
+ os << obj.m_isLabeled << '\n';
+ os << obj.m_label << '\n';
+
+ os << obj.m_boundingContour.size() << '\n';
+ for (size_t pointNum = 0u; pointNum < obj.m_boundingContour.size(); ++pointNum) {
+ os << obj.m_boundingContour[pointNum].x << ' ';
+ os << obj.m_boundingContour[pointNum].y << '\n';
+ }
+
+ os << obj.m_objectKeypoints.size() << '\n';
+ for (size_t keypointNum = 0u; keypointNum < obj.m_objectKeypoints.size(); ++keypointNum) {
+ os << obj.m_objectKeypoints[keypointNum].pt.x << ' ';
+ os << obj.m_objectKeypoints[keypointNum].pt.y << ' ';
+ os << obj.m_objectKeypoints[keypointNum].size << ' ';
+ os << obj.m_objectKeypoints[keypointNum].response << ' ';
+ os << obj.m_objectKeypoints[keypointNum].angle << ' ';
+ os << obj.m_objectKeypoints[keypointNum].octave << ' ';
+ os << obj.m_objectKeypoints[keypointNum].class_id << '\n';
+ }
+
+ os << obj.m_objectDescriptors.rows << ' ';
+ os << obj.m_objectDescriptors.cols << ' ';
+ os << obj.m_objectDescriptors.type() << '\n';
+ for (int descriptorNum = 0; descriptorNum < obj.m_objectDescriptors.rows;
+ ++descriptorNum) {
+ for (int featureNum = 0; featureNum < obj.m_objectDescriptors.cols;
+ ++featureNum, os << '\n') {
+ os << (int)obj.m_objectDescriptors.at<uchar>(descriptorNum, featureNum) << ' ';
+ }
+ }
+
+ return os;
}
std::istream& operator >> (std::istream& is, ImageObject& obj)
{
- size_t numberOfContourPoints = 0u;
- size_t numberOfKeyPoints = 0u;
- int rows = 0, cols = 0;
- int descriptorType = 0;
+ size_t numberOfContourPoints = 0u;
+ size_t numberOfKeyPoints = 0u;
+ int rows = 0, cols = 0;
+ int descriptorType = 0;
- ImageObject temporal;
+ ImageObject temporal;
#define MEDIA_VISION_CHECK_IFSTREAM \
- if (!is.good()) \
- { \
- return is; \
- }
-
- is>>temporal.m_isEmpty;
- MEDIA_VISION_CHECK_IFSTREAM
- is>>temporal.m_isLabeled;
- MEDIA_VISION_CHECK_IFSTREAM
- is>>temporal.m_label;
- MEDIA_VISION_CHECK_IFSTREAM
-
- is>>numberOfContourPoints;
- MEDIA_VISION_CHECK_IFSTREAM
-
- temporal.m_boundingContour.resize(numberOfContourPoints);
- for (size_t pointNum = 0; pointNum < temporal.m_boundingContour.size(); ++pointNum)
- {
- is>>temporal.m_boundingContour[pointNum].x;
- MEDIA_VISION_CHECK_IFSTREAM
- is>>temporal.m_boundingContour[pointNum].y;
- MEDIA_VISION_CHECK_IFSTREAM
- }
-
- is>>numberOfKeyPoints;
- temporal.m_objectKeypoints.resize(numberOfKeyPoints);
- for (size_t keypointNum = 0; keypointNum < temporal.m_objectKeypoints.size(); ++keypointNum)
- {
- is>>temporal.m_objectKeypoints[keypointNum].pt.x;
- MEDIA_VISION_CHECK_IFSTREAM
- is>>temporal.m_objectKeypoints[keypointNum].pt.y;
- MEDIA_VISION_CHECK_IFSTREAM
- is>>temporal.m_objectKeypoints[keypointNum].size;
- MEDIA_VISION_CHECK_IFSTREAM
- is>>temporal.m_objectKeypoints[keypointNum].response;
- MEDIA_VISION_CHECK_IFSTREAM
- is>>temporal.m_objectKeypoints[keypointNum].angle;
- MEDIA_VISION_CHECK_IFSTREAM
- is>>temporal.m_objectKeypoints[keypointNum].octave;
- MEDIA_VISION_CHECK_IFSTREAM
- is>>temporal.m_objectKeypoints[keypointNum].class_id;
- MEDIA_VISION_CHECK_IFSTREAM
- }
-
- is>>rows;
- MEDIA_VISION_CHECK_IFSTREAM
- is>>cols;
- MEDIA_VISION_CHECK_IFSTREAM
- is>>descriptorType;
- MEDIA_VISION_CHECK_IFSTREAM
- temporal.m_objectDescriptors = cv::Mat(rows, cols, descriptorType);
- int value = 0;
- for (int descriptorNum = 0; descriptorNum < temporal.m_objectDescriptors.rows; ++descriptorNum)
- {
- for (int featureNum = 0; featureNum < temporal.m_objectDescriptors.cols; ++featureNum)
- {
- is>>value;
- MEDIA_VISION_CHECK_IFSTREAM
- temporal.m_objectDescriptors.at<uchar>(descriptorNum, featureNum) = (uchar)value;
- }
- }
+ if (!is.good()) { \
+ return is; \
+ }
+
+ is >> temporal.m_isEmpty;
+ MEDIA_VISION_CHECK_IFSTREAM
+ is >> temporal.m_isLabeled;
+ MEDIA_VISION_CHECK_IFSTREAM
+ is >> temporal.m_label;
+ MEDIA_VISION_CHECK_IFSTREAM
+
+ is >> numberOfContourPoints;
+ MEDIA_VISION_CHECK_IFSTREAM
+
+ temporal.m_boundingContour.resize(numberOfContourPoints);
+ for (size_t pointNum = 0; pointNum < temporal.m_boundingContour.size(); ++pointNum) {
+ is >> temporal.m_boundingContour[pointNum].x;
+ MEDIA_VISION_CHECK_IFSTREAM
+ is >> temporal.m_boundingContour[pointNum].y;
+ MEDIA_VISION_CHECK_IFSTREAM
+ }
+
+ is >> numberOfKeyPoints;
+ temporal.m_objectKeypoints.resize(numberOfKeyPoints);
+ for (size_t keypointNum = 0; keypointNum < temporal.m_objectKeypoints.size(); ++keypointNum) {
+ is >> temporal.m_objectKeypoints[keypointNum].pt.x;
+ MEDIA_VISION_CHECK_IFSTREAM
+ is >> temporal.m_objectKeypoints[keypointNum].pt.y;
+ MEDIA_VISION_CHECK_IFSTREAM
+ is >> temporal.m_objectKeypoints[keypointNum].size;
+ MEDIA_VISION_CHECK_IFSTREAM
+ is >> temporal.m_objectKeypoints[keypointNum].response;
+ MEDIA_VISION_CHECK_IFSTREAM
+ is >> temporal.m_objectKeypoints[keypointNum].angle;
+ MEDIA_VISION_CHECK_IFSTREAM
+ is >> temporal.m_objectKeypoints[keypointNum].octave;
+ MEDIA_VISION_CHECK_IFSTREAM
+ is >> temporal.m_objectKeypoints[keypointNum].class_id;
+ MEDIA_VISION_CHECK_IFSTREAM
+ }
+
+ is >> rows;
+ MEDIA_VISION_CHECK_IFSTREAM
+ is >> cols;
+ MEDIA_VISION_CHECK_IFSTREAM
+ is >> descriptorType;
+ MEDIA_VISION_CHECK_IFSTREAM
+ temporal.m_objectDescriptors = cv::Mat(rows, cols, descriptorType);
+ int value = 0;
+ for (int descriptorNum = 0; descriptorNum < temporal.m_objectDescriptors.rows; ++descriptorNum) {
+ for (int featureNum = 0; featureNum < temporal.m_objectDescriptors.cols; ++featureNum) {
+ is >> value;
+ MEDIA_VISION_CHECK_IFSTREAM
+ temporal.m_objectDescriptors.at<uchar>(descriptorNum, featureNum) = (uchar)value;
+ }
+ }
#undef MEDIA_VISION_CHECK_IFSTREAM
- obj = temporal;
+ obj = temporal;
- return is;
+ return is;
}
} /* Image */
diff --git a/mv_image/image/src/ImageRecognizer.cpp b/mv_image/image/src/ImageRecognizer.cpp
index c9215556..73dd3353 100644
--- a/mv_image/image/src/ImageRecognizer.cpp
+++ b/mv_image/image/src/ImageRecognizer.cpp
@@ -19,293 +19,263 @@
#include "mv_private.h"
-namespace MediaVision
-{
-namespace Image
-{
-
+namespace MediaVision {
+namespace Image {
ImageRecognizer::ImageRecognizer(
- const cv::Mat& sceneImage,
- const FeaturesExtractingParams& params) :
- m_scene(sceneImage, params)
+ const cv::Mat& sceneImage,
+ const FeaturesExtractingParams& params) :
+ m_scene(sceneImage, params)
{
- ; /* NULL */
+ ; /* NULL */
}
ImageRecognizer::ImageRecognizer(const ImageObject& scene) :
- m_scene(scene)
+ m_scene(scene)
{
- ; /* NULL */
+ ; /* NULL */
}
ImageRecognizer::~ImageRecognizer()
{
- ; /* NULL */
+ ; /* NULL */
}
bool ImageRecognizer::recognize(
- const ImageObject& target,
- const RecognitionParams& params,
- std::vector<cv::Point2f>& contour) const
+ const ImageObject& target,
+ const RecognitionParams& params,
+ std::vector<cv::Point2f>& contour) const
{
- cv::Mat homophraphyMatrix;
-
- contour.clear();
-
- if (MinimumNumberOfFeatures > target.m_objectKeypoints.size())
- {
- LOGW("[%s] Image object can't be recognized (Recognition rate is too small).", __FUNCTION__);
- return false;
- }
- if (MinimumNumberOfFeatures > m_scene.m_objectKeypoints.size())
- {
- LOGW("[%s] Scene image can't be analyzed (Too few features for recognition).", __FUNCTION__);
- return false;
- }
-
- if(!findHomophraphyMatrix(target, params, homophraphyMatrix))
- {
- LOGE("[%s] Can't match the features.", __FUNCTION__);
- return false;
- }
-
- cv::perspectiveTransform(target.m_boundingContour, contour, homophraphyMatrix);
-
- if (target.m_boundingContour.size() == NumberOfQuadrangleCorners)
- {
- if (!isPossibleQuadrangleCorners(contour.data()))
- {
- LOGI("[%s] Image object isn't recognized.", __FUNCTION__);
- contour.clear();
- return false;
- }
- }
-
- LOGI("[%s] Image object is recognized.", __FUNCTION__);
- return true;
+ cv::Mat homophraphyMatrix;
+
+ contour.clear();
+
+ if (MinimumNumberOfFeatures > target.m_objectKeypoints.size()) {
+ LOGW("[%s] Image object can't be recognized (Recognition rate is too small).", __FUNCTION__);
+ return false;
+ }
+
+ if (MinimumNumberOfFeatures > m_scene.m_objectKeypoints.size()) {
+ LOGW("[%s] Scene image can't be analyzed (Too few features for recognition).", __FUNCTION__);
+ return false;
+ }
+
+ if(!findHomophraphyMatrix(target, params, homophraphyMatrix)) {
+ LOGE("[%s] Can't match the features.", __FUNCTION__);
+ return false;
+ }
+
+ cv::perspectiveTransform(target.m_boundingContour, contour, homophraphyMatrix);
+
+ if (target.m_boundingContour.size() == NumberOfQuadrangleCorners) {
+ if (!isPossibleQuadrangleCorners(contour.data())) {
+ LOGI("[%s] Image object isn't recognized.", __FUNCTION__);
+ contour.clear();
+ return false;
+ }
+ }
+
+ LOGI("[%s] Image object is recognized.", __FUNCTION__);
+ return true;
}
bool ImageRecognizer::findHomophraphyMatrix(
- const ImageObject& target,
- const RecognitionParams& params,
- cv::Mat& homophraphyMatrix) const
+ const ImageObject& target,
+ const RecognitionParams& params,
+ cv::Mat& homophraphyMatrix) const
{
- std::vector<cv::DMatch> matches;
-
- m_matcher.match(target.m_objectDescriptors, m_scene.m_objectDescriptors, matches);
-
- size_t matchesNumber = matches.size();
-
- if (MinimumNumberOfFeatures > matchesNumber)
- {
- LOGE("[%s] Can't match the features.", __FUNCTION__);
- return false;
- }
-
- size_t requiredMatchesNumber =
- params.mRequiredMatchesPart * matchesNumber;
-
- size_t allowableMatchesNumberError =
- params.mAllowableMatchesPartError * requiredMatchesNumber;
-
- if (matchesNumber - allowableMatchesNumberError >
- (size_t)params.mMinMatchesNumber &&
- requiredMatchesNumber + allowableMatchesNumberError <
- matchesNumber)
- {
- if (requiredMatchesNumber - allowableMatchesNumberError <
- (size_t)params.mMinMatchesNumber)
- {
- if (requiredMatchesNumber + allowableMatchesNumberError >
- (size_t)params.mMinMatchesNumber)
- {
- requiredMatchesNumber = ((size_t)params.mMinMatchesNumber +
- requiredMatchesNumber + allowableMatchesNumberError) / 2;
-
- allowableMatchesNumberError = requiredMatchesNumber-
- (size_t)params.mMinMatchesNumber +
- allowableMatchesNumberError;
- }
- else
- {
- const size_t minimalAllowableMatchesNumberError = 2u;
-
- requiredMatchesNumber = params.mMinMatchesNumber +
- minimalAllowableMatchesNumberError;
-
- allowableMatchesNumberError = minimalAllowableMatchesNumberError;
- }
- }
-
- const size_t filterAmount = matchesSelection(matches,
- requiredMatchesNumber,
- allowableMatchesNumberError);
-
- if (filterAmount >= MinimumNumberOfFeatures)
- {
- matches.resize(filterAmount);
- }
- else
- {
- LOGW("[%s] Wrong filtration of feature matches.", __FUNCTION__);
- }
-
- matchesNumber = matches.size();
- }
-
- std::vector<cv::Point2f> objectPoints(matchesNumber);
- std::vector<cv::Point2f> scenePoints(matchesNumber);
-
- for (size_t matchIdx = 0; matchIdx < matchesNumber; ++matchIdx)
- {
- objectPoints[matchIdx] =
- target.m_objectKeypoints[matches[matchIdx].queryIdx].pt;
-
- scenePoints[matchIdx] =
- m_scene.m_objectKeypoints[matches[matchIdx].trainIdx].pt;
- }
-
- homophraphyMatrix = cv::findHomography(objectPoints, scenePoints, CV_RANSAC);
-
- return true;
+ std::vector<cv::DMatch> matches;
+
+ m_matcher.match(target.m_objectDescriptors, m_scene.m_objectDescriptors, matches);
+
+ size_t matchesNumber = matches.size();
+
+ if (MinimumNumberOfFeatures > matchesNumber) {
+ LOGE("[%s] Can't match the features.", __FUNCTION__);
+ return false;
+ }
+
+ size_t requiredMatchesNumber =
+ params.mRequiredMatchesPart * matchesNumber;
+
+ size_t allowableMatchesNumberError =
+ params.mAllowableMatchesPartError * requiredMatchesNumber;
+
+ if ((matchesNumber - allowableMatchesNumberError) >
+ (size_t)params.mMinMatchesNumber &&
+ (requiredMatchesNumber + allowableMatchesNumberError) <
+ matchesNumber) {
+ if ((requiredMatchesNumber - allowableMatchesNumberError) <
+ (size_t)params.mMinMatchesNumber) {
+ if ((requiredMatchesNumber + allowableMatchesNumberError) >
+ (size_t)params.mMinMatchesNumber) {
+ requiredMatchesNumber = ((size_t)params.mMinMatchesNumber +
+ requiredMatchesNumber + allowableMatchesNumberError) / 2;
+
+ allowableMatchesNumberError = requiredMatchesNumber-
+ (size_t)params.mMinMatchesNumber +
+ allowableMatchesNumberError;
+ } else {
+ const size_t minimalAllowableMatchesNumberError = 2u;
+
+ requiredMatchesNumber = params.mMinMatchesNumber +
+ minimalAllowableMatchesNumberError;
+
+ allowableMatchesNumberError = minimalAllowableMatchesNumberError;
+ }
+ }
+
+ const size_t filterAmount = matchesSelection(matches,
+ requiredMatchesNumber,
+ allowableMatchesNumberError);
+
+ if (filterAmount >= MinimumNumberOfFeatures) {
+ matches.resize(filterAmount);
+ } else {
+ LOGW("[%s] Wrong filtration of feature matches.", __FUNCTION__);
+ }
+
+ matchesNumber = matches.size();
+ }
+
+ std::vector<cv::Point2f> objectPoints(matchesNumber);
+ std::vector<cv::Point2f> scenePoints(matchesNumber);
+
+ for (size_t matchIdx = 0; matchIdx < matchesNumber; ++matchIdx) {
+ objectPoints[matchIdx] =
+ target.m_objectKeypoints[matches[matchIdx].queryIdx].pt;
+
+ scenePoints[matchIdx] =
+ m_scene.m_objectKeypoints[matches[matchIdx].trainIdx].pt;
+ }
+
+ homophraphyMatrix = cv::findHomography(objectPoints, scenePoints, CV_RANSAC);
+
+ return true;
}
size_t ImageRecognizer::matchesSelection(
- std::vector<cv::DMatch>& examples,
- unsigned int filterAmount, unsigned int allowableError) const
+ std::vector<cv::DMatch>& examples,
+ unsigned int filterAmount, unsigned int allowableError) const
{
- size_t sizeOfExamples = examples.size();
-
- if ((filterAmount + allowableError) > sizeOfExamples)
- {
- return examples.size();
- }
-
- int startLeftLimit = 0;
- int startRightLimit = sizeOfExamples - 1;
-
- int leftLimit = startLeftLimit;
- int rightLimit = startRightLimit;
-
- int requiredNumber = filterAmount;
-
- float supportElement = 0.f;
-
- while (true)
- {
- if (leftLimit >= rightLimit)
- {
- if (leftLimit < (requiredNumber - (int)allowableError))
- {
- leftLimit = requiredNumber + (int)allowableError;
- }
-
- break;
- }
-
- supportElement = computeLinearSupportElement(examples, requiredNumber,
- leftLimit, rightLimit);
-
- // Iteration similar quicksort
- while (true)
- {
- // Search the leftmost element which have bigger confidence than support element
- while (examples[leftLimit].distance <= supportElement &&
- leftLimit < startRightLimit)
- {
- ++leftLimit;
- }
-
- // Search the rightmost element which have smaller confidence than support element
- while (examples[rightLimit].distance >= supportElement &&
- rightLimit >= startLeftLimit)
- {
- --rightLimit;
- }
-
- if (leftLimit >= rightLimit)
- {
- break;
- }
-
- // Swap
- std::swap(examples[leftLimit], examples[rightLimit]);
- }
- if (abs(filterAmount - leftLimit) <= (int)allowableError)
- {
- break;
- }
- if ((int)filterAmount > leftLimit)
- {
- requiredNumber -= leftLimit - startLeftLimit;
-
- rightLimit = startRightLimit;
- startLeftLimit = leftLimit;
- }
- else
- {
- leftLimit = startLeftLimit;
- startRightLimit = rightLimit;
- }
- }
-
- return (size_t)leftLimit;
+ size_t sizeOfExamples = examples.size();
+
+ if ((filterAmount + allowableError) > sizeOfExamples) {
+ return examples.size();
+ }
+
+ int startLeftLimit = 0;
+ int startRightLimit = sizeOfExamples - 1;
+
+ int leftLimit = startLeftLimit;
+ int rightLimit = startRightLimit;
+
+ int requiredNumber = filterAmount;
+
+ float supportElement = 0.f;
+
+ while (true) {
+ if (leftLimit >= rightLimit) {
+ if (leftLimit < (requiredNumber - (int)allowableError)) {
+ leftLimit = requiredNumber + (int)allowableError;
+ }
+
+ break;
+ }
+
+ supportElement = computeLinearSupportElement(examples, requiredNumber,
+ leftLimit, rightLimit);
+
+ /* Iteration similar quicksort */
+ while (true) {
+ /* Search the leftmost element
+ *which have bigger confidence than support element
+ */
+ while (examples[leftLimit].distance <= supportElement &&
+ leftLimit < startRightLimit) {
+ ++leftLimit;
+ }
+
+ /* Search the rightmost element
+ *which have smaller confidence than support element
+ */
+ while (examples[rightLimit].distance >= supportElement &&
+ rightLimit >= startLeftLimit) {
+ --rightLimit;
+ }
+
+ if (leftLimit >= rightLimit) {
+ break;
+ }
+
+ /* Swap */
+ std::swap(examples[leftLimit], examples[rightLimit]);
+ }
+ if (abs(filterAmount - leftLimit) <= (int)allowableError) {
+ break;
+ }
+ if ((int)filterAmount > leftLimit) {
+ requiredNumber -= leftLimit - startLeftLimit;
+
+ rightLimit = startRightLimit;
+ startLeftLimit = leftLimit;
+ } else {
+ leftLimit = startLeftLimit;
+ startRightLimit = rightLimit;
+ }
+ }
+
+ return (size_t)leftLimit;
}
float ImageRecognizer::computeLinearSupportElement(const std::vector<cv::DMatch>& examples,
- int requiredNumber, int leftLimit, int rightLimit) const
+ int requiredNumber, int leftLimit, int rightLimit) const
{
- int sizeOfExamples = rightLimit - leftLimit + 1;
-
- if (sizeOfExamples <= 1)
- {
- return examples[leftLimit].distance;
- }
-
- float minValue = examples[leftLimit].distance;
- float maxValue = examples[leftLimit].distance;
-
- // Finding the maximum and minimum values
- for (int i = leftLimit + 1; i <= rightLimit; ++i)
- {
- if (minValue > examples[i].distance)
- {
- minValue = examples[i].distance;
- }
- else if (maxValue < examples[i].distance)
- {
- maxValue = examples[i].distance;
- }
- }
-
- // Linear approximation. f(x) = k*x + b
- // f(sizeOfExamples) = maxValue; f(1) = minValue;
- const float b = (maxValue - minValue * sizeOfExamples) / (1 - sizeOfExamples);
- const float k = minValue - b;
-
- // Calculation of the support element
- return k * requiredNumber + b;
+ int sizeOfExamples = rightLimit - leftLimit + 1;
+
+ if (sizeOfExamples <= 1) {
+ return examples[leftLimit].distance;
+ }
+
+ float minValue = examples[leftLimit].distance;
+ float maxValue = examples[leftLimit].distance;
+
+ /* Finding the maximum and minimum values */
+ for (int i = leftLimit + 1; i <= rightLimit; ++i) {
+ if (minValue > examples[i].distance) {
+ minValue = examples[i].distance;
+ } else if (maxValue < examples[i].distance) {
+ maxValue = examples[i].distance;
+ }
+ }
+
+ /* Linear approximation. f(x) = k*x + b
+ * f(sizeOfExamples) = maxValue; f(1) = minValue;
+ */
+ const float b = (maxValue - minValue * sizeOfExamples) / (1 - sizeOfExamples);
+ const float k = minValue - b;
+
+ /* Calculation of the support element */
+ return k * requiredNumber + b;
}
bool ImageRecognizer::isPossibleQuadrangleCorners(
- const cv::Point2f corners[NumberOfQuadrangleCorners])
+ const cv::Point2f corners[NumberOfQuadrangleCorners])
{
- static const float Epsilon = cv::TermCriteria::EPS;
- static const float MinSizeOfDetectedArea = 30.f;
+ static const float Epsilon = cv::TermCriteria::EPS;
+ static const float MinSizeOfDetectedArea = 30.f;
- const float firstSemiArea = getTriangleArea(corners[0], corners[2], corners[1]) +
- getTriangleArea(corners[0], corners[2], corners[3]);
+ const float firstSemiArea = getTriangleArea(corners[0], corners[2], corners[1]) +
+ getTriangleArea(corners[0], corners[2], corners[3]);
- const float secondSemiArea = getTriangleArea(corners[1], corners[3], corners[2]) +
- getTriangleArea(corners[1], corners[3], corners[0]);
+ const float secondSemiArea = getTriangleArea(corners[1], corners[3], corners[2]) +
+ getTriangleArea(corners[1], corners[3], corners[0]);
- if (Epsilon < fabs(firstSemiArea - secondSemiArea) ||
- MinSizeOfDetectedArea > (firstSemiArea + secondSemiArea))
- {
- return false;
- }
+ if (Epsilon < fabs(firstSemiArea - secondSemiArea) ||
+ MinSizeOfDetectedArea > (firstSemiArea + secondSemiArea)) {
+ return false;
+ }
- return true;
+ return true;
}
} /* Image */
diff --git a/mv_image/image/src/ImageTracker.cpp b/mv_image/image/src/ImageTracker.cpp
index 9c114f58..400205c7 100644
--- a/mv_image/image/src/ImageTracker.cpp
+++ b/mv_image/image/src/ImageTracker.cpp
@@ -25,347 +25,307 @@
#include <pthread.h>
-namespace MediaVision
-{
-namespace Image
-{
-
+namespace MediaVision {
+namespace Image {
ImageTracker::ImageTracker(const TrackingParams& trackingParams) :
- m_trackingParams(trackingParams)
+ m_trackingParams(trackingParams)
{
- ; /* NULL */
+ ; /* NULL */
}
void ImageTracker::track(const cv::Mat& frame, ImageTrackingModel& target)
{
- ImageTrackingModel::State currentState = ImageTrackingModel::Undetected;
-
- while (pthread_mutex_trylock(&target.m_globalGuard) != 0)
- {
- pthread_spin_lock(&target.m_stateGuard);
- currentState = target.m_state;
- pthread_spin_unlock(&target.m_stateGuard);
-
- if (ImageTrackingModel::InProcess == currentState)
- {
- LOGI("[%s] Calling is skipped. Object is recognizing.", __FUNCTION__);
- return;
- }
- }
-
- pthread_spin_lock(&target.m_stateGuard);
- currentState = target.m_state;
- pthread_spin_unlock(&target.m_stateGuard);
-
- if (ImageTrackingModel::Invalid == currentState)
- {
- pthread_mutex_unlock(&target.m_globalGuard);
- LOGE("[%s] Tracking model is invalid.", __FUNCTION__);
- return;
- }
-
- switch (target.m_state)
- {
- case ImageTrackingModel::Appeared:
- case ImageTrackingModel::Tracked:
- {
- pthread_spin_lock(&target.m_stateGuard);
- target.m_state = ImageTrackingModel::InProcess;
- pthread_spin_unlock(&target.m_stateGuard);
-
- trackDetectedObject(frame, target);
- break;
- }
- case ImageTrackingModel::Undetected:
- {
- pthread_spin_lock(&target.m_stateGuard);
- target.m_state = ImageTrackingModel::InProcess;
- pthread_spin_unlock(&target.m_stateGuard);
-
- trackUndetectedObject(frame, target);
-
- // Recognition thread is started. Don't use target here, just exit!
- return;
- }
- case ImageTrackingModel::InProcess:
- default:
- {
- // Abnormal behaviour:
- // tracking model state is InProcess but globalGuard is not locked
- LOGE("[%s] Abnormal behaviour. Tracking model status is"
- "\"InProgress\" but it is not in progress.", __FUNCTION__);
-
- pthread_spin_lock(&target.m_stateGuard);
- if (target.m_recognitionObject.isEmpty())
- {
- target.m_state = ImageTrackingModel::Invalid;
- LOGI("[%s] Tracking model status is changed on \"Invalid\"", __FUNCTION__);
- }
- else
- {
- target.m_state = ImageTrackingModel::Undetected;
- LOGI("[%s] Tracking model status is changed on \"Undetected\"", __FUNCTION__);
- }
- pthread_spin_unlock(&target.m_stateGuard);
-
- pthread_mutex_unlock(&target.m_globalGuard);
- break;
- }
- }
+ ImageTrackingModel::State currentState = ImageTrackingModel::Undetected;
+
+ while (pthread_mutex_trylock(&target.m_globalGuard) != 0) {
+ pthread_spin_lock(&target.m_stateGuard);
+ currentState = target.m_state;
+ pthread_spin_unlock(&target.m_stateGuard);
+
+ if (ImageTrackingModel::InProcess == currentState) {
+ LOGI("[%s] Calling is skipped. Object is recognizing.", __FUNCTION__);
+ return;
+ }
+ }
+
+ pthread_spin_lock(&target.m_stateGuard);
+ currentState = target.m_state;
+ pthread_spin_unlock(&target.m_stateGuard);
+
+ if (ImageTrackingModel::Invalid == currentState) {
+ pthread_mutex_unlock(&target.m_globalGuard);
+ LOGE("[%s] Tracking model is invalid.", __FUNCTION__);
+ return;
+ }
+
+ switch (target.m_state) {
+ case ImageTrackingModel::Appeared:
+ case ImageTrackingModel::Tracked: {
+ pthread_spin_lock(&target.m_stateGuard);
+ target.m_state = ImageTrackingModel::InProcess;
+ pthread_spin_unlock(&target.m_stateGuard);
+
+ trackDetectedObject(frame, target);
+ break;
+ }
+ case ImageTrackingModel::Undetected: {
+ pthread_spin_lock(&target.m_stateGuard);
+ target.m_state = ImageTrackingModel::InProcess;
+ pthread_spin_unlock(&target.m_stateGuard);
+
+ trackUndetectedObject(frame, target);
+
+ /* Recognition thread is started. Don't use target here, just exit! */
+ return;
+ }
+ case ImageTrackingModel::InProcess:
+ default: {
+ /* Abnormal behaviour:
+ * tracking model state is InProcess but globalGuard is not locked
+ */
+ LOGE("[%s] Abnormal behaviour. Tracking model status is"
+ "\"InProgress\" but it is not in progress.", __FUNCTION__);
+
+ pthread_spin_lock(&target.m_stateGuard);
+ if (target.m_recognitionObject.isEmpty()) {
+ target.m_state = ImageTrackingModel::Invalid;
+ LOGI("[%s] Tracking model status is changed on \"Invalid\"", __FUNCTION__);
+ } else {
+ target.m_state = ImageTrackingModel::Undetected;
+ LOGI("[%s] Tracking model status is changed on \"Undetected\"", __FUNCTION__);
+ }
+ pthread_spin_unlock(&target.m_stateGuard);
+
+ pthread_mutex_unlock(&target.m_globalGuard);
+ break;
+ }
+ }
}
void ImageTracker::trackDetectedObject(
- const cv::Mat& frame,
- ImageTrackingModel& target)
+ const cv::Mat& frame,
+ ImageTrackingModel& target)
{
- cv::Rect expectedArea = computeExpectedArea(target, frame.size());
-
- std::vector<cv::Point2f> resultContour;
-
- ImageRecognizer recognizer(
- frame(expectedArea),
- m_trackingParams.mFramesFeaturesExtractingParams);
-
- const bool isRecognized = recognizer.recognize(
- target.m_recognitionObject,
- m_trackingParams.mRecognitionParams,
- resultContour);
-
- if (isRecognized)
- {
- for (size_t pointIdx = 0; pointIdx < resultContour.size(); ++pointIdx)
- {
- resultContour[pointIdx].x += expectedArea.x;
- resultContour[pointIdx].y += expectedArea.y;
- }
-
- if (m_trackingParams.mStabilizationParams.mHistoryAmount > 0)
- {
- target.m_stabilizator.stabilize(
- resultContour,
- m_trackingParams.mStabilizationParams);
- }
-
- target.m_stabilizator.stabilize(
- resultContour,
- m_trackingParams.mStabilizationParams);
-
- pthread_spin_lock(&target.m_lastLocationGuard);
- target.m_lastLocation = resultContour;
- pthread_spin_unlock(&target.m_lastLocationGuard);
-
- pthread_spin_lock(&target.m_stateGuard);
- target.m_state = ImageTrackingModel::Tracked;
- pthread_spin_unlock(&target.m_stateGuard);
-
- LOGI("[%s] Object is successfully tracked.", __FUNCTION__);
- }
- else
- {
- target.m_stabilizator.reset();
-
- pthread_spin_lock(&target.m_stateGuard);
- target.m_state = ImageTrackingModel::Undetected;
- pthread_spin_unlock(&target.m_stateGuard);
-
- LOGI("[%s] Object is lost.", __FUNCTION__);
- }
-
- pthread_mutex_unlock(&target.m_globalGuard);
+ cv::Rect expectedArea = computeExpectedArea(target, frame.size());
+
+ std::vector<cv::Point2f> resultContour;
+
+ ImageRecognizer recognizer(
+ frame(expectedArea),
+ m_trackingParams.mFramesFeaturesExtractingParams);
+
+ const bool isRecognized = recognizer.recognize(
+ target.m_recognitionObject,
+ m_trackingParams.mRecognitionParams,
+ resultContour);
+
+ if (isRecognized) {
+ for (size_t pointIdx = 0; pointIdx < resultContour.size(); ++pointIdx) {
+ resultContour[pointIdx].x += expectedArea.x;
+ resultContour[pointIdx].y += expectedArea.y;
+ }
+
+ if (m_trackingParams.mStabilizationParams.mHistoryAmount > 0) {
+ target.m_stabilizator.stabilize(
+ resultContour,
+ m_trackingParams.mStabilizationParams);
+ }
+
+ target.m_stabilizator.stabilize(
+ resultContour,
+ m_trackingParams.mStabilizationParams);
+
+ pthread_spin_lock(&target.m_lastLocationGuard);
+ target.m_lastLocation = resultContour;
+ pthread_spin_unlock(&target.m_lastLocationGuard);
+
+ pthread_spin_lock(&target.m_stateGuard);
+ target.m_state = ImageTrackingModel::Tracked;
+ pthread_spin_unlock(&target.m_stateGuard);
+
+ LOGI("[%s] Object is successfully tracked.", __FUNCTION__);
+ } else {
+ target.m_stabilizator.reset();
+
+ pthread_spin_lock(&target.m_stateGuard);
+ target.m_state = ImageTrackingModel::Undetected;
+ pthread_spin_unlock(&target.m_stateGuard);
+
+ LOGI("[%s] Object is lost.", __FUNCTION__);
+ }
+
+ pthread_mutex_unlock(&target.m_globalGuard);
}
void *ImageTracker::recognitionThreadFunc(void *recognitionInfo)
{
- if (NULL == recognitionInfo)
- {
- return NULL;
- }
+ if (NULL == recognitionInfo) {
+ return NULL;
+ }
- RecognitionInfo *recogInfo = (RecognitionInfo*)recognitionInfo;
+ RecognitionInfo *recogInfo = (RecognitionInfo*)recognitionInfo;
- std::vector<cv::Point2f> resultContour;
+ std::vector<cv::Point2f> resultContour;
- ImageRecognizer recognizer(
- recogInfo->mFrame,
- recogInfo->mSceneFeaturesExtractingParams);
+ ImageRecognizer recognizer(
+ recogInfo->mFrame,
+ recogInfo->mSceneFeaturesExtractingParams);
- bool isRecognized = recognizer.recognize(
- recogInfo->mpTarget->m_recognitionObject,
- recogInfo->mRecognitionParams,
- resultContour);
+ bool isRecognized = recognizer.recognize(
+ recogInfo->mpTarget->m_recognitionObject,
+ recogInfo->mRecognitionParams,
+ resultContour);
- if (isRecognized)
- {
- recogInfo->mpTarget->m_stabilizator.reset();
+ if (isRecognized) {
+ recogInfo->mpTarget->m_stabilizator.reset();
- pthread_spin_lock(&(recogInfo->mpTarget->m_lastLocationGuard));
- recogInfo->mpTarget->m_lastLocation = resultContour;
- pthread_spin_unlock(&(recogInfo->mpTarget->m_lastLocationGuard));
+ pthread_spin_lock(&(recogInfo->mpTarget->m_lastLocationGuard));
+ recogInfo->mpTarget->m_lastLocation = resultContour;
+ pthread_spin_unlock(&(recogInfo->mpTarget->m_lastLocationGuard));
- pthread_spin_lock(&(recogInfo->mpTarget->m_stateGuard));
- recogInfo->mpTarget->m_state = ImageTrackingModel::Appeared;
- pthread_spin_unlock(&(recogInfo->mpTarget->m_stateGuard));
- }
- else
- {
- pthread_spin_lock(&(recogInfo->mpTarget->m_stateGuard));
- recogInfo->mpTarget->m_state = ImageTrackingModel::Undetected;
- pthread_spin_unlock(&(recogInfo->mpTarget->m_stateGuard));
- }
+ pthread_spin_lock(&(recogInfo->mpTarget->m_stateGuard));
+ recogInfo->mpTarget->m_state = ImageTrackingModel::Appeared;
+ pthread_spin_unlock(&(recogInfo->mpTarget->m_stateGuard));
+ } else {
+ pthread_spin_lock(&(recogInfo->mpTarget->m_stateGuard));
+ recogInfo->mpTarget->m_state = ImageTrackingModel::Undetected;
+ pthread_spin_unlock(&(recogInfo->mpTarget->m_stateGuard));
+ }
- recogInfo->mpTarget->m_recognitionThread = 0;
+ recogInfo->mpTarget->m_recognitionThread = 0;
- pthread_mutex_unlock(&(recogInfo->mpTarget->m_globalGuard));
+ pthread_mutex_unlock(&(recogInfo->mpTarget->m_globalGuard));
- delete recogInfo;
+ delete recogInfo;
- return NULL;
+ return NULL;
}
void ImageTracker::trackUndetectedObject(
- const cv::Mat& frame,
- ImageTrackingModel& target)
+ const cv::Mat& frame,
+ ImageTrackingModel& target)
{
- RecognitionInfo *recognitionInfo = new RecognitionInfo;
-
- recognitionInfo->mFrame = frame.clone();
- recognitionInfo->mpTarget = &target;
-
- recognitionInfo->mRecognitionParams =
- m_trackingParams.mRecognitionParams;
- recognitionInfo->mSceneFeaturesExtractingParams =
- m_trackingParams.mFramesFeaturesExtractingParams;
-
- if (target.m_recognitionThread)
- {
- // Abnormal behaviour:
- // Recognition thread isn't finished but guardian mutex is unlocked
- LOGE("[%s] Abnormal behaviour. Recognition thread isn't finished but"
- "guardian mutex is unlocked.", __FUNCTION__);
-
- LOGI("[%s] Try to wait recognition thread.", __FUNCTION__);
- pthread_join(target.m_recognitionThread, NULL);
- target.m_recognitionThread = 0;
- LOGI("[%s] Recognition thread is finished.", __FUNCTION__);
- }
-
- const int err = pthread_create(
- &target.m_recognitionThread,
- NULL,
- recognitionThreadFunc,
- recognitionInfo);
-
- if (0 == err)
- {
- LOGI("[%s] Recognition thread is started.", __FUNCTION__);
- // Recognition thread is started. Don't use target here, just exit!
- return;
- }
- LOGE("[%s] Recognition thread creation is failed.", __FUNCTION__);
-
- pthread_spin_lock(&target.m_stateGuard);
- if (target.m_recognitionObject.isEmpty())
- {
- target.m_state = ImageTrackingModel::Invalid;
- LOGI("[%s] Tracking model status is changed on \"Invalid\"", __FUNCTION__);
- }
- else
- {
- target.m_state = ImageTrackingModel::Undetected;
- LOGI("[%s] Tracking model status is changed on \"Undetected\"", __FUNCTION__);
- }
- pthread_spin_unlock(&target.m_stateGuard);
-
- pthread_mutex_unlock(&target.m_globalGuard);
+ RecognitionInfo *recognitionInfo = new RecognitionInfo;
+
+ recognitionInfo->mFrame = frame.clone();
+ recognitionInfo->mpTarget = &target;
+
+ recognitionInfo->mRecognitionParams =
+ m_trackingParams.mRecognitionParams;
+ recognitionInfo->mSceneFeaturesExtractingParams =
+ m_trackingParams.mFramesFeaturesExtractingParams;
+
+ if (target.m_recognitionThread) {
+ /* Abnormal behaviour:
+ * Recognition thread isn't finished but guardian mutex is unlocked
+ */
+ LOGE("[%s] Abnormal behaviour. Recognition thread isn't finished but"
+ "guardian mutex is unlocked.", __FUNCTION__);
+
+ LOGI("[%s] Try to wait recognition thread.", __FUNCTION__);
+ pthread_join(target.m_recognitionThread, NULL);
+ target.m_recognitionThread = 0;
+ LOGI("[%s] Recognition thread is finished.", __FUNCTION__);
+ }
+
+ const int err = pthread_create(
+ &target.m_recognitionThread,
+ NULL,
+ recognitionThreadFunc,
+ recognitionInfo);
+
+ if (0 == err) {
+ LOGI("[%s] Recognition thread is started.", __FUNCTION__);
+ /* Recognition thread is started. Don't use target here, just exit! */
+ return;
+ }
+ LOGE("[%s] Recognition thread creation is failed.", __FUNCTION__);
+
+ pthread_spin_lock(&target.m_stateGuard);
+ if (target.m_recognitionObject.isEmpty()) {
+ target.m_state = ImageTrackingModel::Invalid;
+ LOGI("[%s] Tracking model status is changed on \"Invalid\"", __FUNCTION__);
+ } else {
+ target.m_state = ImageTrackingModel::Undetected;
+ LOGI("[%s] Tracking model status is changed on \"Undetected\"", __FUNCTION__);
+ }
+ pthread_spin_unlock(&target.m_stateGuard);
+
+ pthread_mutex_unlock(&target.m_globalGuard);
}
cv::Rect ImageTracker::computeExpectedArea(
const ImageTrackingModel& target,
const cv::Size& frameSize)
{
- if (target.m_state == ImageTrackingModel::Appeared)
- {
- LOGI("[%s] Expected area for appeared object is full frame.", __FUNCTION__);
- return cv::Rect(0, 0, frameSize.width, frameSize.height);
- }
-
- if (target.m_lastLocation.empty())
- {
- LOGW("[%s] Can't compute expected area for object without last"
- "location.",__FUNCTION__);
- return cv::Rect(0, 0, 0, 0);
- }
-
- cv::Point2f ltCorner(target.m_lastLocation[0]);
- cv::Point2f rbCorner(target.m_lastLocation[0]);
-
- const size_t contourPointsNumber = target.m_lastLocation.size();
-
- for (size_t pointNum = 1; pointNum < contourPointsNumber; ++pointNum)
- {
- if (ltCorner.x > target.m_lastLocation[pointNum].x)
- {
- ltCorner.x = target.m_lastLocation[pointNum].x;
- }
- else if (rbCorner.x < target.m_lastLocation[pointNum].x)
- {
- rbCorner.x = target.m_lastLocation[pointNum].x;
- }
-
- if (ltCorner.y > target.m_lastLocation[pointNum].y)
- {
- ltCorner.y = target.m_lastLocation[pointNum].y;
- }
- else if (rbCorner.y < target.m_lastLocation[pointNum].y)
- {
- rbCorner.y = target.m_lastLocation[pointNum].y;
- }
- }
-
- cv::Point2f center(
- (ltCorner.x + rbCorner.x) / 2.0f,
- (ltCorner.y + rbCorner.y) / 2.0f);
-
- cv::Size2f halfSize(
- (center.x - ltCorner.x) * (1 + m_trackingParams.mExpectedOffset),
- (center.y - ltCorner.y) * (1 + m_trackingParams.mExpectedOffset));
-
-
- cv::Rect expectedArea(
- center.x - halfSize.width, center.y - halfSize.height,
- halfSize.width * 2, halfSize.height * 2);
-
- if (expectedArea.x < 0)
- {
- expectedArea.width += expectedArea.x;
- expectedArea.x = 0;
- }
-
- if (expectedArea.y < 0)
- {
- expectedArea.height += expectedArea.y;
- expectedArea.y = 0;
- }
-
- if (expectedArea.x + expectedArea.width > frameSize.width)
- {
- expectedArea.width = frameSize.width - expectedArea.x;
- }
-
- if (expectedArea.y + expectedArea.height > frameSize.height)
- {
- expectedArea.height = frameSize.height - expectedArea.y;
- }
-
- if (expectedArea.width <= 0 || expectedArea.height <= 0)
- {
- expectedArea.x = 0;
- expectedArea.y = 0;
- expectedArea.width = 0;
- expectedArea.height = 0;
- }
-
- return expectedArea;
+ if (target.m_state == ImageTrackingModel::Appeared) {
+ LOGI("[%s] Expected area for appeared object is full frame.", __FUNCTION__);
+ return cv::Rect(0, 0, frameSize.width, frameSize.height);
+ }
+
+ if (target.m_lastLocation.empty()) {
+ LOGW("[%s] Can't compute expected area for object without last"
+ "location.", __FUNCTION__);
+ return cv::Rect(0, 0, 0, 0);
+ }
+
+ cv::Point2f ltCorner(target.m_lastLocation[0]);
+ cv::Point2f rbCorner(target.m_lastLocation[0]);
+
+ const size_t contourPointsNumber = target.m_lastLocation.size();
+
+ for (size_t pointNum = 1; pointNum < contourPointsNumber; ++pointNum) {
+ if (ltCorner.x > target.m_lastLocation[pointNum].x) {
+ ltCorner.x = target.m_lastLocation[pointNum].x;
+ } else if (rbCorner.x < target.m_lastLocation[pointNum].x) {
+ rbCorner.x = target.m_lastLocation[pointNum].x;
+ }
+
+ if (ltCorner.y > target.m_lastLocation[pointNum].y) {
+ ltCorner.y = target.m_lastLocation[pointNum].y;
+ } else if (rbCorner.y < target.m_lastLocation[pointNum].y) {
+ rbCorner.y = target.m_lastLocation[pointNum].y;
+ }
+ }
+
+ cv::Point2f center(
+ (ltCorner.x + rbCorner.x) / 2.0f,
+ (ltCorner.y + rbCorner.y) / 2.0f);
+
+ cv::Size2f halfSize(
+ (center.x - ltCorner.x) * (1 + m_trackingParams.mExpectedOffset),
+ (center.y - ltCorner.y) * (1 + m_trackingParams.mExpectedOffset));
+
+ cv::Rect expectedArea(
+ center.x - halfSize.width, center.y - halfSize.height,
+ halfSize.width * 2, halfSize.height * 2);
+
+ if (expectedArea.x < 0) {
+ expectedArea.width += expectedArea.x;
+ expectedArea.x = 0;
+ }
+
+ if (expectedArea.y < 0) {
+ expectedArea.height += expectedArea.y;
+ expectedArea.y = 0;
+ }
+
+ if (expectedArea.x + expectedArea.width > frameSize.width) {
+ expectedArea.width = frameSize.width - expectedArea.x;
+ }
+
+ if (expectedArea.y + expectedArea.height > frameSize.height) {
+ expectedArea.height = frameSize.height - expectedArea.y;
+ }
+
+ if (expectedArea.width <= 0 || expectedArea.height <= 0) {
+ expectedArea.x = 0;
+ expectedArea.y = 0;
+ expectedArea.width = 0;
+ expectedArea.height = 0;
+ }
+
+ return expectedArea;
}
} /* Image */
diff --git a/mv_image/image/src/ImageTrackingModel.cpp b/mv_image/image/src/ImageTrackingModel.cpp
index e0a75c97..014a6296 100644
--- a/mv_image/image/src/ImageTrackingModel.cpp
+++ b/mv_image/image/src/ImageTrackingModel.cpp
@@ -24,242 +24,227 @@
#include <fstream>
#include <unistd.h>
-namespace MediaVision
-{
-namespace Image
-{
-
+namespace MediaVision {
+namespace Image {
ImageTrackingModel::ImageTrackingModel() :
- m_recognitionObject(),
- m_lastLocation(0),
- m_state(Invalid),
- m_recognitionThread(0)
+ m_recognitionObject(),
+ m_lastLocation(0),
+ m_state(Invalid),
+ m_recognitionThread(0)
{
- pthread_mutex_init(&m_globalGuard, NULL);
- pthread_spin_init(&m_lastLocationGuard, PTHREAD_PROCESS_SHARED);
- pthread_spin_init(&m_stateGuard, PTHREAD_PROCESS_SHARED);
+ pthread_mutex_init(&m_globalGuard, NULL);
+ pthread_spin_init(&m_lastLocationGuard, PTHREAD_PROCESS_SHARED);
+ pthread_spin_init(&m_stateGuard, PTHREAD_PROCESS_SHARED);
}
ImageTrackingModel::ImageTrackingModel(const ImageObject& recognitionObject) :
- m_recognitionObject(recognitionObject),
- m_lastLocation(0),
- m_state(Invalid),
- m_recognitionThread(0)
+ m_recognitionObject(recognitionObject),
+ m_lastLocation(0),
+ m_state(Invalid),
+ m_recognitionThread(0)
{
- if (!recognitionObject.isEmpty())
- {
- m_state = Undetected;
- }
- pthread_mutex_init(&m_globalGuard, NULL);
- pthread_spin_init(&m_lastLocationGuard, PTHREAD_PROCESS_SHARED);
- pthread_spin_init(&m_stateGuard, PTHREAD_PROCESS_SHARED);
+ if (!recognitionObject.isEmpty()) {
+ m_state = Undetected;
+ }
+ pthread_mutex_init(&m_globalGuard, NULL);
+ pthread_spin_init(&m_lastLocationGuard, PTHREAD_PROCESS_SHARED);
+ pthread_spin_init(&m_stateGuard, PTHREAD_PROCESS_SHARED);
}
ImageTrackingModel::ImageTrackingModel(const ImageTrackingModel& copy) :
- m_recognitionThread(0)
+ m_recognitionThread(0)
{
- pthread_mutex_init(&m_globalGuard, NULL);
- pthread_spin_init(&m_lastLocationGuard, PTHREAD_PROCESS_SHARED);
- pthread_spin_init(&m_stateGuard, PTHREAD_PROCESS_SHARED);
+ pthread_mutex_init(&m_globalGuard, NULL);
+ pthread_spin_init(&m_lastLocationGuard, PTHREAD_PROCESS_SHARED);
+ pthread_spin_init(&m_stateGuard, PTHREAD_PROCESS_SHARED);
- *this = copy;
+ *this = copy;
}
ImageTrackingModel::~ImageTrackingModel()
{
- if (m_recognitionThread)
- {
- pthread_join(m_recognitionThread, NULL);
- }
-
- pthread_mutex_destroy(&m_globalGuard);
- pthread_spin_destroy(&m_lastLocationGuard);
- pthread_spin_destroy(&m_stateGuard);
+ if (m_recognitionThread) {
+ pthread_join(m_recognitionThread, NULL);
+ }
+
+ pthread_mutex_destroy(&m_globalGuard);
+ pthread_spin_destroy(&m_lastLocationGuard);
+ pthread_spin_destroy(&m_stateGuard);
}
void ImageTrackingModel::setTarget(const ImageObject& target)
{
- pthread_mutex_lock(&m_globalGuard);
+ pthread_mutex_lock(&m_globalGuard);
- pthread_spin_lock(&m_stateGuard);
- m_state = target.isEmpty() ? Invalid : Undetected;
- pthread_spin_unlock(&m_stateGuard);
+ pthread_spin_lock(&m_stateGuard);
+ m_state = target.isEmpty() ? Invalid : Undetected;
+ pthread_spin_unlock(&m_stateGuard);
- pthread_spin_lock(&m_lastLocationGuard);
- m_lastLocation.clear();
- pthread_spin_unlock(&m_lastLocationGuard);
+ pthread_spin_lock(&m_lastLocationGuard);
+ m_lastLocation.clear();
+ pthread_spin_unlock(&m_lastLocationGuard);
- LOGI("[%s] Target is set into tracking model.", __FUNCTION__);
+ LOGI("[%s] Target is set into tracking model.", __FUNCTION__);
- m_recognitionObject = target;
+ m_recognitionObject = target;
- pthread_mutex_unlock(&m_globalGuard);
+ pthread_mutex_unlock(&m_globalGuard);
}
void ImageTrackingModel::refresh(void)
{
- pthread_mutex_lock(&m_globalGuard);
+ pthread_mutex_lock(&m_globalGuard);
- pthread_spin_lock(&m_stateGuard);
- m_state = m_recognitionObject.isEmpty() ? Invalid : Undetected;
- pthread_spin_unlock(&m_stateGuard);
+ pthread_spin_lock(&m_stateGuard);
+ m_state = m_recognitionObject.isEmpty() ? Invalid : Undetected;
+ pthread_spin_unlock(&m_stateGuard);
- pthread_spin_lock(&m_lastLocationGuard);
- m_lastLocation.clear();
- pthread_spin_unlock(&m_lastLocationGuard);
+ pthread_spin_lock(&m_lastLocationGuard);
+ m_lastLocation.clear();
+ pthread_spin_unlock(&m_lastLocationGuard);
- LOGI("[%s] Image tracking model is refreshed.", __FUNCTION__);
+ LOGI("[%s] Image tracking model is refreshed.", __FUNCTION__);
- pthread_mutex_unlock(&m_globalGuard);
+ pthread_mutex_unlock(&m_globalGuard);
}
bool ImageTrackingModel::isValid() const
{
- bool result = false;
+ bool result = false;
- pthread_spin_lock(&m_stateGuard);
- result = (m_state != Invalid);
- pthread_spin_unlock(&m_stateGuard);
+ pthread_spin_lock(&m_stateGuard);
+ result = (m_state != Invalid);
+ pthread_spin_unlock(&m_stateGuard);
- return result;
+ return result;
}
ImageTrackingModel& ImageTrackingModel::operator=(const ImageTrackingModel& copy)
{
- if (this != &copy)
- {
- pthread_mutex_t *higherMutex = &m_globalGuard;
- pthread_mutex_t *lowerMutex = &copy.m_globalGuard;
-
- if (higherMutex < lowerMutex)
- {
- std::swap(higherMutex, lowerMutex);
- }
-
- pthread_mutex_lock(higherMutex);
- pthread_mutex_lock(lowerMutex);
-
- m_recognitionObject = copy.m_recognitionObject;
-
- pthread_spin_lock(&m_lastLocationGuard);
- m_lastLocation = copy.m_lastLocation;
- pthread_spin_unlock(&m_lastLocationGuard);
-
- if (copy.m_state == InProcess)
- {
- pthread_spin_lock(&m_stateGuard);
- m_state = m_recognitionObject.isEmpty() ? Invalid : Undetected;
- pthread_spin_unlock(&m_stateGuard);
- }
- else
- {
- pthread_spin_lock(&m_stateGuard);
- m_state = copy.m_state;
- pthread_spin_unlock(&m_stateGuard);
- }
-
- pthread_mutex_unlock(lowerMutex);
- pthread_mutex_unlock(higherMutex);
- }
-
- return *this;
+ if (this != &copy) {
+ pthread_mutex_t *higherMutex = &m_globalGuard;
+ pthread_mutex_t *lowerMutex = &copy.m_globalGuard;
+
+ if (higherMutex < lowerMutex) {
+ std::swap(higherMutex, lowerMutex);
+ }
+
+ pthread_mutex_lock(higherMutex);
+ pthread_mutex_lock(lowerMutex);
+
+ m_recognitionObject = copy.m_recognitionObject;
+
+ pthread_spin_lock(&m_lastLocationGuard);
+ m_lastLocation = copy.m_lastLocation;
+ pthread_spin_unlock(&m_lastLocationGuard);
+
+ if (copy.m_state == InProcess) {
+ pthread_spin_lock(&m_stateGuard);
+ m_state = m_recognitionObject.isEmpty() ? Invalid : Undetected;
+ pthread_spin_unlock(&m_stateGuard);
+ } else {
+ pthread_spin_lock(&m_stateGuard);
+ m_state = copy.m_state;
+ pthread_spin_unlock(&m_stateGuard);
+ }
+
+ pthread_mutex_unlock(lowerMutex);
+ pthread_mutex_unlock(higherMutex);
+ }
+
+ return *this;
}
int ImageTrackingModel::save(const char *fileName) const
{
- std::string prefix_path = std::string(app_get_data_path());
- LOGD("prefix_path: %s", prefix_path.c_str());
+ std::string prefix_path = std::string(app_get_data_path());
+ LOGD("prefix_path: %s", prefix_path.c_str());
- std::string filePath;
- filePath += prefix_path;
- filePath += fileName;
+ std::string filePath;
+ filePath += prefix_path;
+ filePath += fileName;
- /* check the directory is available */
- std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/'));
- if (access(prefix_path_check.c_str(),F_OK))
- {
- LOGE("Can't save tracking model. Path[%s] doesn't existed.", prefix_path_check.c_str());
+ /* check the directory is available */
+ std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/'));
+ if (access(prefix_path_check.c_str(), F_OK)) {
+ LOGE("Can't save tracking model. Path[%s] doesn't existed.", prefix_path_check.c_str());
- return MEDIA_VISION_ERROR_INVALID_PATH;
- }
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
- std::ofstream out;
- out.open(filePath.c_str());
+ std::ofstream out;
+ out.open(filePath.c_str());
- if (!out.is_open())
- {
- LOGE("[%s] Can't create/open file.", __FUNCTION__);
- return MEDIA_VISION_ERROR_PERMISSION_DENIED;
- }
+ if (!out.is_open()) {
+ LOGE("[%s] Can't create/open file.", __FUNCTION__);
+ return MEDIA_VISION_ERROR_PERMISSION_DENIED;
+ }
- out<<(*this);
+ out << (*this);
- out.close();
- LOGI("[%s] Image tracking model is saved.", __FUNCTION__);
+ out.close();
+ LOGI("[%s] Image tracking model is saved.", __FUNCTION__);
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int ImageTrackingModel::load(const char *fileName)
{
- /* find directory */
- std::string prefix_path = std::string(app_get_data_path());
- LOGD("prefix_path: %s", prefix_path.c_str());
+ /* find directory */
+ std::string prefix_path = std::string(app_get_data_path());
+ LOGD("prefix_path: %s", prefix_path.c_str());
- std::string filePath;
- filePath += prefix_path;
- filePath += fileName;
+ std::string filePath;
+ filePath += prefix_path;
+ filePath += fileName;
- if (access(filePath.c_str(),F_OK))
- {
- LOGE("Can't load tracking model. Path[%s] doesn't existed.", filePath.c_str());
+ if (access(filePath.c_str(), F_OK)) {
+ LOGE("Can't load tracking model. Path[%s] doesn't existed.", filePath.c_str());
- return MEDIA_VISION_ERROR_INVALID_PATH;
- }
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
- std::ifstream in;
- in.open(filePath.c_str());
+ std::ifstream in;
+ in.open(filePath.c_str());
- if (!in.is_open())
- {
- LOGE("[%s] Can't open file.", __FUNCTION__);
- return MEDIA_VISION_ERROR_PERMISSION_DENIED;
- }
+ if (!in.is_open()) {
+ LOGE("[%s] Can't open file.", __FUNCTION__);
+ return MEDIA_VISION_ERROR_PERMISSION_DENIED;
+ }
- in>>(*this);
+ in >> (*this);
- if (!in.good())
- {
- LOGE("[%s] Unexpected end of file.", __FUNCTION__);
- return MEDIA_VISION_ERROR_PERMISSION_DENIED;
- }
+ if (!in.good()) {
+ LOGE("[%s] Unexpected end of file.", __FUNCTION__);
+ return MEDIA_VISION_ERROR_PERMISSION_DENIED;
+ }
- in.close();
- LOGI("[%s] Image tracking model is loaded.", __FUNCTION__);
+ in.close();
+ LOGI("[%s] Image tracking model is loaded.", __FUNCTION__);
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
bool ImageTrackingModel::isDetected() const
{
- bool result = false;
+ bool result = false;
- pthread_spin_lock(&m_stateGuard);
- result = (m_state == Tracked);
- pthread_spin_unlock(&m_stateGuard);
+ pthread_spin_lock(&m_stateGuard);
+ result = (m_state == Tracked);
+ pthread_spin_unlock(&m_stateGuard);
- return result;
+ return result;
}
std::vector<cv::Point2f> ImageTrackingModel::getLastlocation() const
{
- std::vector<cv::Point2f> result;
+ std::vector<cv::Point2f> result;
- pthread_spin_lock(&m_lastLocationGuard);
- result = m_lastLocation;
- pthread_spin_unlock(&m_lastLocationGuard);
+ pthread_spin_lock(&m_lastLocationGuard);
+ result = m_lastLocation;
+ pthread_spin_unlock(&m_lastLocationGuard);
- return result;
+ return result;
}
#define STATE_UNSEEN_IO_ID 0
@@ -267,34 +252,28 @@ std::vector<cv::Point2f> ImageTrackingModel::getLastlocation() const
std::ostream& operator << (std::ostream& os, const ImageTrackingModel::State& state)
{
- if (ImageTrackingModel::Tracked == state)
- {
- os<<STATE_VISIBLE_IO_ID;
- }
- else
- {
- os<<STATE_UNSEEN_IO_ID;
- }
-
- return os;
+ if (ImageTrackingModel::Tracked == state) {
+ os << STATE_VISIBLE_IO_ID;
+ } else {
+ os << STATE_UNSEEN_IO_ID;
+ }
+
+ return os;
}
std::istream& operator >> (std::istream& is, ImageTrackingModel::State& state)
{
- int stateId = -1;
+ int stateId = -1;
- is>>stateId;
+ is >> stateId;
- if (STATE_VISIBLE_IO_ID == stateId)
- {
- state = ImageTrackingModel::Tracked;
- }
- else
- {
- state = ImageTrackingModel::Undetected;
- }
+ if (STATE_VISIBLE_IO_ID == stateId) {
+ state = ImageTrackingModel::Tracked;
+ } else {
+ state = ImageTrackingModel::Undetected;
+ }
- return is;
+ return is;
}
#undef STATE_UNSEEN_IO_ID
@@ -302,63 +281,59 @@ std::istream& operator >> (std::istream& is, ImageTrackingModel::State& state)
std::ostream& operator << (std::ostream& os, const ImageTrackingModel& obj)
{
- os<<std::setprecision(7);
+ os << std::setprecision(7);
- pthread_mutex_lock(&obj.m_globalGuard);
+ pthread_mutex_lock(&obj.m_globalGuard);
- os<<obj.m_recognitionObject;
+ os << obj.m_recognitionObject;
- os<<obj.m_lastLocation.size();
- for (size_t pointNum = 0u; pointNum < obj.m_lastLocation.size(); ++pointNum)
- {
- os<<' '<<obj.m_lastLocation[pointNum].x<<' '<<obj.m_lastLocation[pointNum].y;
- }
- os<<'\n';
+ os << obj.m_lastLocation.size();
+ for (size_t pointNum = 0u; pointNum < obj.m_lastLocation.size(); ++pointNum) {
+ os << ' ' << obj.m_lastLocation[pointNum].x << ' ' << obj.m_lastLocation[pointNum].y;
+ }
+ os << '\n';
- os<<obj.m_state<<'\n';
+ os << obj.m_state << '\n';
- pthread_mutex_unlock(&obj.m_globalGuard);
+ pthread_mutex_unlock(&obj.m_globalGuard);
- return os;
+ return os;
}
std::istream& operator >> (std::istream& is, ImageTrackingModel& obj)
{
#define MEDIA_VISION_CHECK_IFSTREAM \
- if (!is.good()) \
- { \
- return is; \
- }
+ if (!is.good()) { \
+ return is; \
+ }
- ImageTrackingModel temporal;
+ ImageTrackingModel temporal;
- is>>obj.m_recognitionObject;
- MEDIA_VISION_CHECK_IFSTREAM
+ is >> obj.m_recognitionObject;
+ MEDIA_VISION_CHECK_IFSTREAM
- size_t lastLocationAmount = 0u;
- is>>lastLocationAmount;
- MEDIA_VISION_CHECK_IFSTREAM
+ size_t lastLocationAmount = 0u;
+ is >> lastLocationAmount;
+ MEDIA_VISION_CHECK_IFSTREAM
- temporal.m_lastLocation.resize(lastLocationAmount);
- for (size_t pointNum = 0u; pointNum < lastLocationAmount; ++pointNum)
- {
- is>>temporal.m_lastLocation[pointNum].x;
- MEDIA_VISION_CHECK_IFSTREAM
- is>>temporal.m_lastLocation[pointNum].y;
- MEDIA_VISION_CHECK_IFSTREAM
- }
+ temporal.m_lastLocation.resize(lastLocationAmount);
+ for (size_t pointNum = 0u; pointNum < lastLocationAmount; ++pointNum) {
+ is >> temporal.m_lastLocation[pointNum].x;
+ MEDIA_VISION_CHECK_IFSTREAM
+ is >> temporal.m_lastLocation[pointNum].y;
+ MEDIA_VISION_CHECK_IFSTREAM
+ }
- is>>temporal.m_state;
- MEDIA_VISION_CHECK_IFSTREAM
+ is >> temporal.m_state;
+ MEDIA_VISION_CHECK_IFSTREAM
- if (temporal.m_recognitionObject.isEmpty())
- {
- temporal.m_state = ImageTrackingModel::Invalid;
- }
+ if (temporal.m_recognitionObject.isEmpty()) {
+ temporal.m_state = ImageTrackingModel::Invalid;
+ }
- obj = temporal;
+ obj = temporal;
- return is;
+ return is;
}
} /* Image */
diff --git a/mv_image/image/src/mv_image_open.cpp b/mv_image/image/src/mv_image_open.cpp
index 1d351a44..8c811688 100644
--- a/mv_image/image/src/mv_image_open.cpp
+++ b/mv_image/image/src/mv_image_open.cpp
@@ -26,759 +26,697 @@
#include <opencv/cv.h>
-namespace
-{
-
+namespace {
const MediaVision::Image::FeaturesExtractingParams
- defaultObjectFeaturesExtractingParams(1.2, 1000);
+ defaultObjectFeaturesExtractingParams(1.2, 1000);
const MediaVision::Image::FeaturesExtractingParams
- defaultSceneFeaturesExtractingParams(1.2, 5000);
+ defaultSceneFeaturesExtractingParams(1.2, 5000);
const MediaVision::Image::RecognitionParams
- defaultRecognitionParams(15, 0.33, 0.1);
+ defaultRecognitionParams(15, 0.33, 0.1);
const MediaVision::Image::StabilizationParams
- defaultStabilizationParams(3, 0.006, 2, 0.001);
+ defaultStabilizationParams(3, 0.006, 2, 0.001);
const MediaVision::Image::TrackingParams
- defaultTrackingParams(
- defaultSceneFeaturesExtractingParams,
- defaultRecognitionParams,
- defaultStabilizationParams,
- 0.0);
+ defaultTrackingParams(
+ defaultSceneFeaturesExtractingParams,
+ defaultRecognitionParams,
+ defaultStabilizationParams,
+ 0.0);
void extractTargetFeaturesExtractingParams(
- mv_engine_config_h engine_cfg,
- MediaVision::Image::FeaturesExtractingParams& featuresExtractingParams)
+ mv_engine_config_h engine_cfg,
+ MediaVision::Image::FeaturesExtractingParams& featuresExtractingParams)
{
- mv_engine_config_h working_cfg = NULL;
-
- if (NULL == engine_cfg)
- {
- mv_create_engine_config(&working_cfg);
- }
- else
- {
- working_cfg = engine_cfg;
- }
-
- featuresExtractingParams = defaultObjectFeaturesExtractingParams;
-
- mv_engine_config_get_double_attribute_c(
- working_cfg,
- "MV_IMAGE_RECOGNITION_OBJECT_SCALE_FACTOR",
- &featuresExtractingParams.mScaleFactor);
-
- mv_engine_config_get_int_attribute_c(
- working_cfg,
- "MV_IMAGE_RECOGNITION_OBJECT_MAX_KEYPOINTS_NUM",
- &featuresExtractingParams.mMaximumFeaturesNumber);
-
- if (NULL == engine_cfg)
- {
- mv_destroy_engine_config(working_cfg);
- }
+ mv_engine_config_h working_cfg = NULL;
+
+ if (NULL == engine_cfg) {
+ mv_create_engine_config(&working_cfg);
+ } else {
+ working_cfg = engine_cfg;
+ }
+
+ featuresExtractingParams = defaultObjectFeaturesExtractingParams;
+
+ mv_engine_config_get_double_attribute_c(
+ working_cfg,
+ "MV_IMAGE_RECOGNITION_OBJECT_SCALE_FACTOR",
+ &featuresExtractingParams.mScaleFactor);
+
+ mv_engine_config_get_int_attribute_c(
+ working_cfg,
+ "MV_IMAGE_RECOGNITION_OBJECT_MAX_KEYPOINTS_NUM",
+ &featuresExtractingParams.mMaximumFeaturesNumber);
+
+ if (NULL == engine_cfg) {
+ mv_destroy_engine_config(working_cfg);
+ }
}
void extractSceneFeaturesExtractingParams(
- mv_engine_config_h engine_cfg,
- MediaVision::Image::FeaturesExtractingParams& featuresExtractingParams)
+ mv_engine_config_h engine_cfg,
+ MediaVision::Image::FeaturesExtractingParams& featuresExtractingParams)
{
- mv_engine_config_h working_cfg = NULL;
-
- if (NULL == engine_cfg)
- {
- mv_create_engine_config(&working_cfg);
- }
- else
- {
- working_cfg = engine_cfg;
- }
-
- featuresExtractingParams = defaultSceneFeaturesExtractingParams;
-
- mv_engine_config_get_double_attribute_c(
- working_cfg,
- "MV_IMAGE_RECOGNITION_SCENE_SCALE_FACTOR",
- &featuresExtractingParams.mScaleFactor);
-
- mv_engine_config_get_int_attribute_c(
- working_cfg,
- "MV_IMAGE_RECOGNITION_SCENE_MAX_KEYPOINTS_NUM",
- &featuresExtractingParams.mMaximumFeaturesNumber);
-
- if (NULL == engine_cfg)
- {
- mv_destroy_engine_config(working_cfg);
- }
+ mv_engine_config_h working_cfg = NULL;
+
+ if (NULL == engine_cfg) {
+ mv_create_engine_config(&working_cfg);
+ } else {
+ working_cfg = engine_cfg;
+ }
+
+ featuresExtractingParams = defaultSceneFeaturesExtractingParams;
+
+ mv_engine_config_get_double_attribute_c(
+ working_cfg,
+ "MV_IMAGE_RECOGNITION_SCENE_SCALE_FACTOR",
+ &featuresExtractingParams.mScaleFactor);
+
+ mv_engine_config_get_int_attribute_c(
+ working_cfg,
+ "MV_IMAGE_RECOGNITION_SCENE_MAX_KEYPOINTS_NUM",
+ &featuresExtractingParams.mMaximumFeaturesNumber);
+
+ if (NULL == engine_cfg) {
+ mv_destroy_engine_config(working_cfg);
+ }
}
void extractRecognitionParams(
- mv_engine_config_h engine_cfg,
- MediaVision::Image::RecognitionParams& recognitionParams)
+ mv_engine_config_h engine_cfg,
+ MediaVision::Image::RecognitionParams& recognitionParams)
{
- mv_engine_config_h working_cfg = NULL;
-
- if (NULL == engine_cfg)
- {
- mv_create_engine_config(&working_cfg);
- }
- else
- {
- working_cfg = engine_cfg;
- }
-
- recognitionParams = defaultRecognitionParams;
-
- mv_engine_config_get_int_attribute_c(
- working_cfg,
- "MV_IMAGE_RECOGNITION_MIN_MATCH_NUM",
- &recognitionParams.mMinMatchesNumber);
-
- mv_engine_config_get_double_attribute_c(
- working_cfg,
- "MV_IMAGE_RECOGNITION_REQ_MATCH_PART",
- &recognitionParams.mRequiredMatchesPart);
-
- mv_engine_config_get_double_attribute_c(
- working_cfg,
- "MV_IMAGE_RECOGNITION_TOLERANT_MATCH_PART_ERR",
- &recognitionParams.mAllowableMatchesPartError);
-
- if (NULL == engine_cfg)
- {
- mv_destroy_engine_config(working_cfg);
- }
+ mv_engine_config_h working_cfg = NULL;
+
+ if (NULL == engine_cfg) {
+ mv_create_engine_config(&working_cfg);
+ } else {
+ working_cfg = engine_cfg;
+ }
+
+ recognitionParams = defaultRecognitionParams;
+
+ mv_engine_config_get_int_attribute_c(
+ working_cfg,
+ "MV_IMAGE_RECOGNITION_MIN_MATCH_NUM",
+ &recognitionParams.mMinMatchesNumber);
+
+ mv_engine_config_get_double_attribute_c(
+ working_cfg,
+ "MV_IMAGE_RECOGNITION_REQ_MATCH_PART",
+ &recognitionParams.mRequiredMatchesPart);
+
+ mv_engine_config_get_double_attribute_c(
+ working_cfg,
+ "MV_IMAGE_RECOGNITION_TOLERANT_MATCH_PART_ERR",
+ &recognitionParams.mAllowableMatchesPartError);
+
+ if (NULL == engine_cfg) {
+ mv_destroy_engine_config(working_cfg);
+ }
}
void extractStabilizationParams(
- mv_engine_config_h engine_cfg,
- MediaVision::Image::StabilizationParams& stabilizationParams)
+ mv_engine_config_h engine_cfg,
+ MediaVision::Image::StabilizationParams& stabilizationParams)
{
- mv_engine_config_h working_cfg = NULL;
-
- if (NULL == engine_cfg)
- {
- mv_create_engine_config(&working_cfg);
- }
- else
- {
- working_cfg = engine_cfg;
- }
-
- stabilizationParams = defaultStabilizationParams;
-
- bool useStabilization = true;
- mv_engine_config_get_bool_attribute_c(
- working_cfg,
- "MV_IMAGE_TRACKING_USE_STABLIZATION",
- &useStabilization);
-
- if (!useStabilization)
- {
- stabilizationParams.mHistoryAmount = 0;
- if (NULL == engine_cfg)
- {
- mv_destroy_engine_config(working_cfg);
- }
- return;
- }
-
- mv_engine_config_get_int_attribute_c(
- working_cfg,
- "MV_IMAGE_TRACKING_HISTORY_AMOUNT",
- &stabilizationParams.mHistoryAmount);
-
- mv_engine_config_get_double_attribute_c(
- working_cfg,
- "MV_IMAGE_TRACKING_STABLIZATION_TOLERANT_SHIFT",
- &stabilizationParams.mAllowableShift);
-
- mv_engine_config_get_double_attribute_c(
- working_cfg,
- "MV_IMAGE_TRACKING_STABLIZATION_SPEED",
- &stabilizationParams.mStabilizationSpeed);
-
- mv_engine_config_get_double_attribute_c(
- working_cfg,
- "MV_IMAGE_TRACKING_STABLIZATION_ACCELERATION",
- &stabilizationParams.mStabilizationAcceleration);
-
- if (NULL == engine_cfg)
- {
- mv_destroy_engine_config(working_cfg);
- }
+ mv_engine_config_h working_cfg = NULL;
+
+ if (NULL == engine_cfg) {
+ mv_create_engine_config(&working_cfg);
+ } else {
+ working_cfg = engine_cfg;
+ }
+
+ stabilizationParams = defaultStabilizationParams;
+
+ bool useStabilization = true;
+ mv_engine_config_get_bool_attribute_c(
+ working_cfg,
+ "MV_IMAGE_TRACKING_USE_STABLIZATION",
+ &useStabilization);
+
+ if (!useStabilization) {
+ stabilizationParams.mHistoryAmount = 0;
+ if (NULL == engine_cfg) {
+ mv_destroy_engine_config(working_cfg);
+ }
+ return;
+ }
+
+ mv_engine_config_get_int_attribute_c(
+ working_cfg,
+ "MV_IMAGE_TRACKING_HISTORY_AMOUNT",
+ &stabilizationParams.mHistoryAmount);
+
+ mv_engine_config_get_double_attribute_c(
+ working_cfg,
+ "MV_IMAGE_TRACKING_STABLIZATION_TOLERANT_SHIFT",
+ &stabilizationParams.mAllowableShift);
+
+ mv_engine_config_get_double_attribute_c(
+ working_cfg,
+ "MV_IMAGE_TRACKING_STABLIZATION_SPEED",
+ &stabilizationParams.mStabilizationSpeed);
+
+ mv_engine_config_get_double_attribute_c(
+ working_cfg,
+ "MV_IMAGE_TRACKING_STABLIZATION_ACCELERATION",
+ &stabilizationParams.mStabilizationAcceleration);
+
+ if (NULL == engine_cfg) {
+ mv_destroy_engine_config(working_cfg);
+ }
}
void extractTrackingParams(
- mv_engine_config_h engine_cfg,
- MediaVision::Image::TrackingParams& trackingParams)
+ mv_engine_config_h engine_cfg,
+ MediaVision::Image::TrackingParams& trackingParams)
{
- mv_engine_config_h working_cfg = NULL;
-
- if (NULL == engine_cfg)
- {
- mv_create_engine_config(&working_cfg);
- }
- else
- {
- working_cfg = engine_cfg;
- }
-
- trackingParams = defaultTrackingParams;
-
- extractSceneFeaturesExtractingParams(
- working_cfg,
- trackingParams.mFramesFeaturesExtractingParams);
-
- extractRecognitionParams(
- working_cfg,
- trackingParams.mRecognitionParams);
-
- extractStabilizationParams(
- working_cfg,
- trackingParams.mStabilizationParams);
-
- mv_engine_config_get_double_attribute_c(
- working_cfg,
- "MV_IMAGE_TRACKING_EXPECTED_OFFSET",
- &trackingParams.mExpectedOffset);
-
- if (NULL == engine_cfg)
- {
- mv_destroy_engine_config(working_cfg);
- }
+ mv_engine_config_h working_cfg = NULL;
+
+ if (NULL == engine_cfg) {
+ mv_create_engine_config(&working_cfg);
+ } else {
+ working_cfg = engine_cfg;
+ }
+
+ trackingParams = defaultTrackingParams;
+
+ extractSceneFeaturesExtractingParams(
+ working_cfg,
+ trackingParams.mFramesFeaturesExtractingParams);
+
+ extractRecognitionParams(
+ working_cfg,
+ trackingParams.mRecognitionParams);
+
+ extractStabilizationParams(
+ working_cfg,
+ trackingParams.mStabilizationParams);
+
+ mv_engine_config_get_double_attribute_c(
+ working_cfg,
+ "MV_IMAGE_TRACKING_EXPECTED_OFFSET",
+ &trackingParams.mExpectedOffset);
+
+ if (NULL == engine_cfg) {
+ mv_destroy_engine_config(working_cfg);
+ }
}
int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource)
{
- MEDIA_VISION_INSTANCE_CHECK(mvSource);
-
- int depth = CV_8U; // Default depth. 1 byte for channel.
- unsigned int channelsNumber = 0u;
- unsigned int width = 0u, height = 0u;
- unsigned int bufferSize = 0u;
- unsigned char *buffer = NULL;
-
- mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID;
-
- MEDIA_VISION_ASSERT(mv_source_get_width(mvSource, &width),
- "Failed to get the width.");
- MEDIA_VISION_ASSERT(mv_source_get_height(mvSource, &height),
- "Failed to get the height.");
- MEDIA_VISION_ASSERT(mv_source_get_colorspace(mvSource, &colorspace),
- "Failed to get the colorspace.");
- MEDIA_VISION_ASSERT(mv_source_get_buffer(mvSource, &buffer, &bufferSize),
- "Failed to get the buffer size.");
-
- int conversionType = -1; // Type of conversion from given colorspace to gray
- switch(colorspace)
- {
- case MEDIA_VISION_COLORSPACE_INVALID:
- LOGE("Error: mv_source has invalid colorspace.");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- case MEDIA_VISION_COLORSPACE_Y800:
- channelsNumber = 1;
- // Without convertion
- break;
- case MEDIA_VISION_COLORSPACE_I420:
- channelsNumber = 1;
- height *= 1.5;
- conversionType = CV_YUV2GRAY_I420;
- break;
- case MEDIA_VISION_COLORSPACE_NV12:
- channelsNumber = 1;
- height *= 1.5;
- conversionType = CV_YUV2GRAY_NV12;
- break;
- case MEDIA_VISION_COLORSPACE_YV12:
- channelsNumber = 1;
- height *= 1.5;
- conversionType = CV_YUV2GRAY_YV12;
- break;
- case MEDIA_VISION_COLORSPACE_NV21:
- channelsNumber = 1;
- height *= 1.5;
- conversionType = CV_YUV2GRAY_NV21;
- break;
- case MEDIA_VISION_COLORSPACE_YUYV:
- channelsNumber = 2;
- conversionType = CV_YUV2GRAY_YUYV;
- break;
- case MEDIA_VISION_COLORSPACE_UYVY:
- channelsNumber = 2;
- conversionType = CV_YUV2GRAY_UYVY;
- break;
- case MEDIA_VISION_COLORSPACE_422P:
- channelsNumber = 2;
- conversionType = CV_YUV2GRAY_Y422;
- break;
- case MEDIA_VISION_COLORSPACE_RGB565:
- channelsNumber = 2;
- conversionType = CV_BGR5652GRAY;
- break;
- case MEDIA_VISION_COLORSPACE_RGB888:
- channelsNumber = 3;
- conversionType = CV_RGB2GRAY;
- break;
- case MEDIA_VISION_COLORSPACE_RGBA:
- channelsNumber = 4;
- conversionType = CV_RGBA2GRAY;
- break;
- default:
- LOGE("Error: mv_source has unsupported colorspace.");
- return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
- }
-
- if (conversionType == -1) // Without conversion
- {
- cvSource = cv::Mat(cv::Size(width, height),
- CV_MAKETYPE(depth, channelsNumber), buffer).clone();
- }
- else // Conversion
- {
- // Class for representation the given image as cv::Mat before conversion
- cv::Mat origin(cv::Size(width, height),
- CV_MAKETYPE(depth, channelsNumber), buffer);
- cv::cvtColor(origin, cvSource, conversionType);
- }
-
- return MEDIA_VISION_ERROR_NONE;
+ MEDIA_VISION_INSTANCE_CHECK(mvSource);
+
+ int depth = CV_8U; // Default depth. 1 byte for channel.
+ unsigned int channelsNumber = 0u;
+ unsigned int width = 0u, height = 0u;
+ unsigned int bufferSize = 0u;
+ unsigned char *buffer = NULL;
+
+ mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID;
+
+ MEDIA_VISION_ASSERT(mv_source_get_width(mvSource, &width),
+ "Failed to get the width.");
+ MEDIA_VISION_ASSERT(mv_source_get_height(mvSource, &height),
+ "Failed to get the height.");
+ MEDIA_VISION_ASSERT(mv_source_get_colorspace(mvSource, &colorspace),
+ "Failed to get the colorspace.");
+ MEDIA_VISION_ASSERT(mv_source_get_buffer(mvSource, &buffer, &bufferSize),
+ "Failed to get the buffer size.");
+
+ int conversionType = -1; /* Type of conversion from given colorspace to gray */
+ switch(colorspace) {
+ case MEDIA_VISION_COLORSPACE_INVALID:
+ LOGE("Error: mv_source has invalid colorspace.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ case MEDIA_VISION_COLORSPACE_Y800:
+ channelsNumber = 1;
+ /* Without convertion */
+ break;
+ case MEDIA_VISION_COLORSPACE_I420:
+ channelsNumber = 1;
+ height *= 1.5;
+ conversionType = CV_YUV2GRAY_I420;
+ break;
+ case MEDIA_VISION_COLORSPACE_NV12:
+ channelsNumber = 1;
+ height *= 1.5;
+ conversionType = CV_YUV2GRAY_NV12;
+ break;
+ case MEDIA_VISION_COLORSPACE_YV12:
+ channelsNumber = 1;
+ height *= 1.5;
+ conversionType = CV_YUV2GRAY_YV12;
+ break;
+ case MEDIA_VISION_COLORSPACE_NV21:
+ channelsNumber = 1;
+ height *= 1.5;
+ conversionType = CV_YUV2GRAY_NV21;
+ break;
+ case MEDIA_VISION_COLORSPACE_YUYV:
+ channelsNumber = 2;
+ conversionType = CV_YUV2GRAY_YUYV;
+ break;
+ case MEDIA_VISION_COLORSPACE_UYVY:
+ channelsNumber = 2;
+ conversionType = CV_YUV2GRAY_UYVY;
+ break;
+ case MEDIA_VISION_COLORSPACE_422P:
+ channelsNumber = 2;
+ conversionType = CV_YUV2GRAY_Y422;
+ break;
+ case MEDIA_VISION_COLORSPACE_RGB565:
+ channelsNumber = 2;
+ conversionType = CV_BGR5652GRAY;
+ break;
+ case MEDIA_VISION_COLORSPACE_RGB888:
+ channelsNumber = 3;
+ conversionType = CV_RGB2GRAY;
+ break;
+ case MEDIA_VISION_COLORSPACE_RGBA:
+ channelsNumber = 4;
+ conversionType = CV_RGBA2GRAY;
+ break;
+ default:
+ LOGE("Error: mv_source has unsupported colorspace.");
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
+ }
+
+ if (conversionType == -1) {/* Without conversion */
+ cvSource = cv::Mat(cv::Size(width, height),
+ CV_MAKETYPE(depth, channelsNumber), buffer).clone();
+ } else {/* With conversion */
+ /* Class for representation the given image as cv::Mat before conversion */
+ cv::Mat origin(cv::Size(width, height),
+ CV_MAKETYPE(depth, channelsNumber), buffer);
+ cv::cvtColor(origin, cvSource, conversionType);
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
}
} /* anonymous namespace */
int mv_image_recognize_open(
- mv_source_h source,
- const mv_image_object_h *image_objects,
- int number_of_objects,
- mv_engine_config_h engine_cfg,
- mv_image_recognized_cb recognized_cb,
- void *user_data)
+ mv_source_h source,
+ const mv_image_object_h *image_objects,
+ int number_of_objects,
+ mv_engine_config_h engine_cfg,
+ mv_image_recognized_cb recognized_cb,
+ void *user_data)
{
- MEDIA_VISION_INSTANCE_CHECK(source);
- MEDIA_VISION_NULL_ARG_CHECK(image_objects);
- for (int objectNum = 0; objectNum < number_of_objects; ++objectNum)
- {
- MEDIA_VISION_INSTANCE_CHECK(image_objects[objectNum]);
- }
- MEDIA_VISION_NULL_ARG_CHECK(recognized_cb);
-
- cv::Mat scene;
- MEDIA_VISION_ASSERT(
- convertSourceMV2GrayCV(source, scene),
- "Failed to convert mv_source.");
-
- MediaVision::Image::FeaturesExtractingParams featuresExtractingParams;
- extractSceneFeaturesExtractingParams(engine_cfg, featuresExtractingParams);
-
- MediaVision::Image::RecognitionParams recognitionParams;
- extractRecognitionParams(engine_cfg, recognitionParams);
-
- MediaVision::Image::ImageRecognizer recognizer(scene,
- featuresExtractingParams);
-
- mv_quadrangle_s *resultLocations[number_of_objects];
-
- for (int objectNum = 0; objectNum < number_of_objects; ++objectNum)
- {
- std::vector<cv::Point2f> resultContour;
- bool isRecognized = recognizer.recognize(
- *((MediaVision::Image::ImageObject*)image_objects[objectNum]),
- recognitionParams, resultContour);
- if (isRecognized && (resultContour.size() ==
- MediaVision::Image::NumberOfQuadrangleCorners))
- {
- resultLocations[objectNum] = new mv_quadrangle_s;
- for (size_t pointNum = 0u;
- pointNum < MediaVision::Image::NumberOfQuadrangleCorners;
- ++pointNum)
- {
- resultLocations[objectNum]->points[pointNum].x =
- resultContour[pointNum].x;
- resultLocations[objectNum]->points[pointNum].y =
- resultContour[pointNum].y;
- }
- }
- else
- {
- resultLocations[objectNum] = NULL;
- }
- }
-
- recognized_cb(
- source,
- engine_cfg,
- image_objects,
- resultLocations,
- number_of_objects,
- user_data);
-
- for (int objectNum = 0; objectNum < number_of_objects; ++objectNum)
- {
- if (resultLocations[objectNum] != NULL)
- {
- delete resultLocations[objectNum];
- resultLocations[objectNum] = NULL;
- }
- }
-
- return MEDIA_VISION_ERROR_NONE;
+ MEDIA_VISION_INSTANCE_CHECK(source);
+ MEDIA_VISION_NULL_ARG_CHECK(image_objects);
+ for (int objectNum = 0; objectNum < number_of_objects; ++objectNum) {
+ MEDIA_VISION_INSTANCE_CHECK(image_objects[objectNum]);
+ }
+ MEDIA_VISION_NULL_ARG_CHECK(recognized_cb);
+
+ cv::Mat scene;
+ MEDIA_VISION_ASSERT(
+ convertSourceMV2GrayCV(source, scene),
+ "Failed to convert mv_source.");
+
+ MediaVision::Image::FeaturesExtractingParams featuresExtractingParams;
+ extractSceneFeaturesExtractingParams(engine_cfg, featuresExtractingParams);
+
+ MediaVision::Image::RecognitionParams recognitionParams;
+ extractRecognitionParams(engine_cfg, recognitionParams);
+
+ MediaVision::Image::ImageRecognizer recognizer(scene,
+ featuresExtractingParams);
+
+ mv_quadrangle_s *resultLocations[number_of_objects];
+
+ for (int objectNum = 0; objectNum < number_of_objects; ++objectNum) {
+ std::vector<cv::Point2f> resultContour;
+ bool isRecognized = recognizer.recognize(
+ *((MediaVision::Image::ImageObject*)image_objects[objectNum]),
+ recognitionParams, resultContour);
+ if (isRecognized && (resultContour.size() ==
+ MediaVision::Image::NumberOfQuadrangleCorners)) {
+ resultLocations[objectNum] = new mv_quadrangle_s;
+ for (size_t pointNum = 0u;
+ pointNum < MediaVision::Image::NumberOfQuadrangleCorners;
+ ++pointNum) {
+ resultLocations[objectNum]->points[pointNum].x =
+ resultContour[pointNum].x;
+ resultLocations[objectNum]->points[pointNum].y =
+ resultContour[pointNum].y;
+ }
+ } else {
+ resultLocations[objectNum] = NULL;
+ }
+ }
+
+ recognized_cb(
+ source,
+ engine_cfg,
+ image_objects,
+ resultLocations,
+ number_of_objects,
+ user_data);
+
+ for (int objectNum = 0; objectNum < number_of_objects; ++objectNum) {
+ if (resultLocations[objectNum] != NULL) {
+ delete resultLocations[objectNum];
+ resultLocations[objectNum] = NULL;
+ }
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_image_track_open(
- mv_source_h source,
- mv_image_tracking_model_h image_tracking_model,
- mv_engine_config_h engine_cfg,
- mv_image_tracked_cb tracked_cb,
- void *user_data)
+ mv_source_h source,
+ mv_image_tracking_model_h image_tracking_model,
+ mv_engine_config_h engine_cfg,
+ mv_image_tracked_cb tracked_cb,
+ void *user_data)
{
- MEDIA_VISION_INSTANCE_CHECK(source);
- MEDIA_VISION_INSTANCE_CHECK(image_tracking_model);
- MEDIA_VISION_NULL_ARG_CHECK(tracked_cb);
-
- if (!((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->isValid())
- {
- LOGE("[%s] Image tracking model is invalid.", __FUNCTION__);
- return MEDIA_VISION_ERROR_INVALID_DATA;
- }
-
- MediaVision::Image::TrackingParams trackingParams;
- extractTrackingParams(engine_cfg, trackingParams);
-
- cv::Mat frame;
- MEDIA_VISION_ASSERT(
- convertSourceMV2GrayCV(source, frame),
- "Failed to convert mv_source.");
-
- MediaVision::Image::ImageTracker tracker(trackingParams);
-
- MediaVision::Image::ImageTrackingModel *trackingModel =
- (MediaVision::Image::ImageTrackingModel*)image_tracking_model;
-
- tracker.track(frame, *trackingModel);
-
- std::vector<cv::Point2f> resultContour = trackingModel->getLastlocation();
-
- if (trackingModel->isDetected() &&
- MediaVision::Image::NumberOfQuadrangleCorners == resultContour.size())
- {
- mv_quadrangle_s result;
- for (size_t pointNum = 0u;
- pointNum < MediaVision::Image::NumberOfQuadrangleCorners;
- ++pointNum)
- {
- result.points[pointNum].x = resultContour[pointNum].x;
- result.points[pointNum].y = resultContour[pointNum].y;
- }
- tracked_cb(source, image_tracking_model, engine_cfg, &result, user_data);
- }
- else
- {
- tracked_cb(source, image_tracking_model, engine_cfg, NULL, user_data);
- }
-
- return MEDIA_VISION_ERROR_NONE;
+ MEDIA_VISION_INSTANCE_CHECK(source);
+ MEDIA_VISION_INSTANCE_CHECK(image_tracking_model);
+ MEDIA_VISION_NULL_ARG_CHECK(tracked_cb);
+
+ if (!((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->isValid()) {
+ LOGE("[%s] Image tracking model is invalid.", __FUNCTION__);
+ return MEDIA_VISION_ERROR_INVALID_DATA;
+ }
+
+ MediaVision::Image::TrackingParams trackingParams;
+ extractTrackingParams(engine_cfg, trackingParams);
+
+ cv::Mat frame;
+ MEDIA_VISION_ASSERT(
+ convertSourceMV2GrayCV(source, frame),
+ "Failed to convert mv_source.");
+
+ MediaVision::Image::ImageTracker tracker(trackingParams);
+
+ MediaVision::Image::ImageTrackingModel *trackingModel =
+ (MediaVision::Image::ImageTrackingModel*)image_tracking_model;
+
+ tracker.track(frame, *trackingModel);
+
+ std::vector<cv::Point2f> resultContour = trackingModel->getLastlocation();
+
+ if (trackingModel->isDetected() &&
+ MediaVision::Image::NumberOfQuadrangleCorners == resultContour.size()) {
+ mv_quadrangle_s result;
+ for (size_t pointNum = 0u;
+ pointNum < MediaVision::Image::NumberOfQuadrangleCorners;
+ ++pointNum) {
+ result.points[pointNum].x = resultContour[pointNum].x;
+ result.points[pointNum].y = resultContour[pointNum].y;
+ }
+ tracked_cb(source, image_tracking_model, engine_cfg, &result, user_data);
+ } else {
+ tracked_cb(source, image_tracking_model, engine_cfg, NULL, user_data);
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_image_object_create_open(
- mv_image_object_h *image_object)
+ mv_image_object_h *image_object)
{
- MEDIA_VISION_NULL_ARG_CHECK(image_object);
+ MEDIA_VISION_NULL_ARG_CHECK(image_object);
- (*image_object) = (mv_image_object_h)new (std::nothrow)MediaVision::Image::ImageObject();
- if (*image_object == NULL)
- {
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
+ (*image_object) = (mv_image_object_h)new (std::nothrow)MediaVision::Image::ImageObject();
+ if (*image_object == NULL) {
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_image_object_destroy_open(
- mv_image_object_h image_object)
+ mv_image_object_h image_object)
{
- MEDIA_VISION_INSTANCE_CHECK(image_object);
+ MEDIA_VISION_INSTANCE_CHECK(image_object);
- delete (MediaVision::Image::ImageObject*)image_object;
+ delete (MediaVision::Image::ImageObject*)image_object;
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_image_object_fill_open(
- mv_image_object_h image_object,
- mv_engine_config_h engine_cfg,
- mv_source_h source,
- mv_rectangle_s *location)
+ mv_image_object_h image_object,
+ mv_engine_config_h engine_cfg,
+ mv_source_h source,
+ mv_rectangle_s *location)
{
- MEDIA_VISION_INSTANCE_CHECK(image_object);
- MEDIA_VISION_INSTANCE_CHECK(source);
-
- cv::Mat image;
- MEDIA_VISION_ASSERT(
- convertSourceMV2GrayCV(source, image),
- "Failed to convert mv_source.");
-
- MediaVision::Image::FeaturesExtractingParams featuresExtractingParams;
- extractTargetFeaturesExtractingParams(engine_cfg, featuresExtractingParams);
-
- if (NULL == location)
- {
- ((MediaVision::Image::ImageObject*)image_object)->fill(image,
- featuresExtractingParams);
- }
- else
- {
- if (!((MediaVision::Image::ImageObject*)image_object)->fill(image,
- cv::Rect(location->point.x, location->point.y,
- location->width, location->height),
- featuresExtractingParams))
- {
- // Wrong ROI (bounding box)
- LOGE("[%s] Wrong ROI.", __FUNCTION__);
- return MEDIA_VISION_ERROR_INVALID_DATA;
- }
- }
-
- return MEDIA_VISION_ERROR_NONE;
+ MEDIA_VISION_INSTANCE_CHECK(image_object);
+ MEDIA_VISION_INSTANCE_CHECK(source);
+
+ cv::Mat image;
+ MEDIA_VISION_ASSERT(
+ convertSourceMV2GrayCV(source, image),
+ "Failed to convert mv_source.");
+
+ MediaVision::Image::FeaturesExtractingParams featuresExtractingParams;
+ extractTargetFeaturesExtractingParams(engine_cfg, featuresExtractingParams);
+
+ if (NULL == location) {
+ ((MediaVision::Image::ImageObject*)image_object)->fill(image,
+ featuresExtractingParams);
+ } else {
+ if (!((MediaVision::Image::ImageObject*)image_object)->fill(image,
+ cv::Rect(location->point.x, location->point.y,
+ location->width, location->height),
+ featuresExtractingParams)) {
+ /* Wrong ROI (bounding box) */
+ LOGE("[%s] Wrong ROI.", __FUNCTION__);
+ return MEDIA_VISION_ERROR_INVALID_DATA;
+ }
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_image_object_get_recognition_rate_open(
- mv_image_object_h image_object,
- double *recognition_rate)
+ mv_image_object_h image_object,
+ double *recognition_rate)
{
- MEDIA_VISION_INSTANCE_CHECK(image_object);
- MEDIA_VISION_NULL_ARG_CHECK(recognition_rate);
+ MEDIA_VISION_INSTANCE_CHECK(image_object);
+ MEDIA_VISION_NULL_ARG_CHECK(recognition_rate);
- (*recognition_rate) =
- ((MediaVision::Image::ImageObject*)image_object)->getRecognitionRate();
+ (*recognition_rate) =
+ ((MediaVision::Image::ImageObject*)image_object)->getRecognitionRate();
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_image_object_set_label_open(
- mv_image_object_h image_object,
- int label)
+ mv_image_object_h image_object,
+ int label)
{
- MEDIA_VISION_INSTANCE_CHECK(image_object);
+ MEDIA_VISION_INSTANCE_CHECK(image_object);
- ((MediaVision::Image::ImageObject*)image_object)->setLabel(label);
+ ((MediaVision::Image::ImageObject*)image_object)->setLabel(label);
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_image_object_get_label_open(
- mv_image_object_h image_object,
- int *label)
+ mv_image_object_h image_object,
+ int *label)
{
- MEDIA_VISION_INSTANCE_CHECK(image_object);
- MEDIA_VISION_NULL_ARG_CHECK(label);
+ MEDIA_VISION_INSTANCE_CHECK(image_object);
+ MEDIA_VISION_NULL_ARG_CHECK(label);
- if (!((MediaVision::Image::ImageObject*)image_object)->getLabel(*label))
- {
- LOGW("[%s] Image object haven't a label.", __FUNCTION__);
- return MEDIA_VISION_ERROR_NO_DATA;
- }
+ if (!((MediaVision::Image::ImageObject*)image_object)->getLabel(*label)) {
+ LOGW("[%s] Image object haven't a label.", __FUNCTION__);
+ return MEDIA_VISION_ERROR_NO_DATA;
+ }
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_image_object_clone_open(
- mv_image_object_h src,
- mv_image_object_h *dst)
+ mv_image_object_h src,
+ mv_image_object_h *dst)
{
- MEDIA_VISION_INSTANCE_CHECK(src);
- MEDIA_VISION_NULL_ARG_CHECK(dst);
+ MEDIA_VISION_INSTANCE_CHECK(src);
+ MEDIA_VISION_NULL_ARG_CHECK(dst);
- (*dst) = (mv_image_object_h)new (std::nothrow)MediaVision::Image::ImageObject();
- if (*dst == NULL)
- {
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
+ (*dst) = (mv_image_object_h)new (std::nothrow)MediaVision::Image::ImageObject();
+ if (*dst == NULL) {
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
- *(MediaVision::Image::ImageObject*)(*dst) =
- *(MediaVision::Image::ImageObject*)src;
+ *(MediaVision::Image::ImageObject*)(*dst) =
+ *(MediaVision::Image::ImageObject*)src;
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_image_object_save_open(
- const char *file_name, mv_image_object_h image_object)
+ const char *file_name, mv_image_object_h image_object)
{
- MEDIA_VISION_INSTANCE_CHECK(image_object);
-
- if (file_name == NULL)
- {
- LOGE("File name is NULL. The file name has to be specified");
- return MEDIA_VISION_ERROR_INVALID_PATH;
- }
-
- int ret = ((MediaVision::Image::ImageObject*)image_object)->save(file_name);
- if (ret != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Fail to save image object.");
- return ret;
- }
-
- return MEDIA_VISION_ERROR_NONE;
+ MEDIA_VISION_INSTANCE_CHECK(image_object);
+
+ if (file_name == NULL) {
+ LOGE("File name is NULL. The file name has to be specified");
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
+
+ int ret = ((MediaVision::Image::ImageObject*)image_object)->save(file_name);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to save image object.");
+ return ret;
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_image_object_load_open(
- const char *file_name, mv_image_object_h *image_object)
+ const char *file_name, mv_image_object_h *image_object)
{
- MEDIA_VISION_NULL_ARG_CHECK(image_object);
-
- if (file_name == NULL)
- {
- LOGE("File name is NULL. The file name has to be specified");
- return MEDIA_VISION_ERROR_INVALID_PATH;
- }
-
- (*image_object) = (mv_image_object_h)new (std::nothrow)MediaVision::Image::ImageObject();
- if (*image_object == NULL)
- {
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
-
- int ret = ((MediaVision::Image::ImageObject*)(*image_object))->load(file_name);
- if (ret != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Fail to save image object.");
- return ret;
- }
-
- return MEDIA_VISION_ERROR_NONE;
+ MEDIA_VISION_NULL_ARG_CHECK(image_object);
+
+ if (file_name == NULL) {
+ LOGE("File name is NULL. The file name has to be specified");
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
+
+ (*image_object) = (mv_image_object_h)new (std::nothrow)MediaVision::Image::ImageObject();
+ if (*image_object == NULL) {
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+
+ int ret = ((MediaVision::Image::ImageObject*)(*image_object))->load(file_name);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to save image object.");
+ return ret;
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_image_tracking_model_create_open(
- mv_image_tracking_model_h *image_tracking_model)
+ mv_image_tracking_model_h *image_tracking_model)
{
- MEDIA_VISION_NULL_ARG_CHECK(image_tracking_model);
+ MEDIA_VISION_NULL_ARG_CHECK(image_tracking_model);
- (*image_tracking_model) = (mv_image_tracking_model_h)
- new (std::nothrow)MediaVision::Image::ImageTrackingModel();
- if (*image_tracking_model == NULL)
- {
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
+ (*image_tracking_model) = (mv_image_tracking_model_h)
+ new (std::nothrow)MediaVision::Image::ImageTrackingModel();
+ if (*image_tracking_model == NULL) {
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_image_tracking_model_set_target_open(
- mv_image_object_h image_object,
- mv_image_tracking_model_h image_tracking_model)
+ mv_image_object_h image_object,
+ mv_image_tracking_model_h image_tracking_model)
{
- MEDIA_VISION_INSTANCE_CHECK(image_tracking_model);
- MEDIA_VISION_INSTANCE_CHECK(image_object);
+ MEDIA_VISION_INSTANCE_CHECK(image_tracking_model);
+ MEDIA_VISION_INSTANCE_CHECK(image_object);
- if (((MediaVision::Image::ImageObject*)image_object)->isEmpty())
- {
- LOGE("[%s] Target is empty and can't be set as target of tracking"
- "model.", __FUNCTION__);
- return MEDIA_VISION_ERROR_INVALID_DATA;
- }
+ if (((MediaVision::Image::ImageObject*)image_object)->isEmpty()) {
+ LOGE("[%s] Target is empty and can't be set as target of tracking"
+ "model.", __FUNCTION__);
+ return MEDIA_VISION_ERROR_INVALID_DATA;
+ }
- ((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->setTarget(
- *(MediaVision::Image::ImageObject*)image_object);
+ ((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->setTarget(
+ *(MediaVision::Image::ImageObject*)image_object);
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_image_tracking_model_destroy_open(
- mv_image_tracking_model_h image_tracking_model)
+ mv_image_tracking_model_h image_tracking_model)
{
- MEDIA_VISION_INSTANCE_CHECK(image_tracking_model);
+ MEDIA_VISION_INSTANCE_CHECK(image_tracking_model);
- delete (MediaVision::Image::ImageTrackingModel*)image_tracking_model;
+ delete (MediaVision::Image::ImageTrackingModel*)image_tracking_model;
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_image_tracking_model_refresh_open(
- mv_image_tracking_model_h image_tracking_model,
- mv_engine_config_h /*engine_cfg*/)
+ mv_image_tracking_model_h image_tracking_model,
+ mv_engine_config_h /*engine_cfg*/)
{
- MEDIA_VISION_INSTANCE_CHECK(image_tracking_model);
+ MEDIA_VISION_INSTANCE_CHECK(image_tracking_model);
- if (!((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->isValid())
- {
- LOGE("[%s] Image tracking model is invalid.", __FUNCTION__);
- return MEDIA_VISION_ERROR_INVALID_DATA;
- }
+ if (!((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->isValid()) {
+ LOGE("[%s] Image tracking model is invalid.", __FUNCTION__);
+ return MEDIA_VISION_ERROR_INVALID_DATA;
+ }
- ((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->refresh();
+ ((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->refresh();
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_image_tracking_model_clone_open(
- mv_image_tracking_model_h src,
- mv_image_tracking_model_h *dst)
+ mv_image_tracking_model_h src,
+ mv_image_tracking_model_h *dst)
{
- MEDIA_VISION_INSTANCE_CHECK(src);
- MEDIA_VISION_NULL_ARG_CHECK(dst);
+ MEDIA_VISION_INSTANCE_CHECK(src);
+ MEDIA_VISION_NULL_ARG_CHECK(dst);
- (*dst) = (mv_image_tracking_model_h)new (std::nothrow)MediaVision::Image::ImageTrackingModel();
- if (*dst == NULL)
- {
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
+ (*dst) = (mv_image_tracking_model_h)new (std::nothrow)MediaVision::Image::ImageTrackingModel();
+ if (*dst == NULL) {
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
- *(MediaVision::Image::ImageObject*)(*dst) = *(MediaVision::Image::ImageObject*)src;
+ *(MediaVision::Image::ImageObject*)(*dst) = *(MediaVision::Image::ImageObject*)src;
- LOGD("Image tracking model has been successfully cloned");
- return MEDIA_VISION_ERROR_NONE;
+ LOGD("Image tracking model has been successfully cloned");
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_image_tracking_model_save_open(
- const char *file_name, mv_image_tracking_model_h image_tracking_model)
+ const char *file_name, mv_image_tracking_model_h image_tracking_model)
{
- MEDIA_VISION_INSTANCE_CHECK(image_tracking_model);
-
- if (file_name == NULL)
- {
- LOGE("File name is NULL. The file name has to be specified");
- return MEDIA_VISION_ERROR_INVALID_PATH;
- }
-
- int ret = ((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->save(file_name);
- if (ret != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Failed to save image tracking model");
- return ret;
- }
-
- LOGD("Image tracking model has been successfully saved");
- return MEDIA_VISION_ERROR_NONE;
+ MEDIA_VISION_INSTANCE_CHECK(image_tracking_model);
+
+ if (file_name == NULL) {
+ LOGE("File name is NULL. The file name has to be specified");
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
+
+ int ret = ((MediaVision::Image::ImageTrackingModel*)image_tracking_model)->save(file_name);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Failed to save image tracking model");
+ return ret;
+ }
+
+ LOGD("Image tracking model has been successfully saved");
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_image_tracking_model_load_open(
- const char *file_name, mv_image_tracking_model_h *image_tracking_model)
+ const char *file_name, mv_image_tracking_model_h *image_tracking_model)
{
- MEDIA_VISION_NULL_ARG_CHECK(image_tracking_model);
-
- if (file_name == NULL)
- {
- LOGE("File path is NULL. The file name has to be specified");
- return MEDIA_VISION_ERROR_INVALID_PATH;
- }
-
- (*image_tracking_model) =
- (mv_image_tracking_model_h) new (std::nothrow)MediaVision::Image::ImageTrackingModel();
-
- if (*image_tracking_model == NULL)
- {
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
-
- int ret = ((MediaVision::Image::ImageTrackingModel*)(*image_tracking_model))->load(file_name);
- if (ret != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Failed to load image tracking model");
- return ret;
- }
-
- LOGD("Image tracking model has been successfully loaded");
- return MEDIA_VISION_ERROR_NONE;
+ MEDIA_VISION_NULL_ARG_CHECK(image_tracking_model);
+
+ if (file_name == NULL) {
+ LOGE("File path is NULL. The file name has to be specified");
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
+
+ (*image_tracking_model) = (mv_image_tracking_model_h)
+ new (std::nothrow)MediaVision::Image::ImageTrackingModel();
+
+ if (*image_tracking_model == NULL) {
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+
+ int ret = ((MediaVision::Image::ImageTrackingModel*)(*image_tracking_model))->load(file_name);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Failed to load image tracking model");
+ return ret;
+ }
+
+ LOGD("Image tracking model has been successfully loaded");
+ return MEDIA_VISION_ERROR_NONE;
}
diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec
index adf0cee5..9d68686f 100644
--- a/packaging/capi-media-vision.spec
+++ b/packaging/capi-media-vision.spec
@@ -1,6 +1,6 @@
Name: capi-media-vision
Summary: Media Vision library for Tizen Native API
-Version: 0.2.3
+Version: 0.2.4
Release: 0
Group: Multimedia/Framework
License: Apache-2.0 and BSD-2.0
diff --git a/test/testsuites/barcode/barcode_test_suite.c b/test/testsuites/barcode/barcode_test_suite.c
index c9d2980f..b4e8606c 100644
--- a/test/testsuites/barcode/barcode_test_suite.c
+++ b/test/testsuites/barcode/barcode_test_suite.c
@@ -28,1252 +28,1133 @@
#include <libavcodec/avcodec.h>
#include <libavutil/pixfmt.h>
-typedef struct
-{
- mv_barcode_type_e type;
- mv_barcode_qr_ecc_e ecc;
- mv_barcode_qr_mode_e mode;
- int version;
- size_t width;
- size_t height;
- mv_barcode_image_format_e out_image_format;
- mv_colorspace_e colorspace;
- char *message;
- char *file_name;
- char *out_file_name;
- unsigned char *out_buffer_ptr;
+typedef struct {
+ mv_barcode_type_e type;
+ mv_barcode_qr_ecc_e ecc;
+ mv_barcode_qr_mode_e mode;
+ int version;
+ size_t width;
+ size_t height;
+ mv_barcode_image_format_e out_image_format;
+ mv_colorspace_e colorspace;
+ char *message;
+ char *file_name;
+ char *out_file_name;
+ unsigned char *out_buffer_ptr;
} barcode_model_s;
-typedef enum
-{
- MV_TS_GENERATE_TO_IMAGE_FCN,
- MV_TS_GENERATE_TO_SOURCE_FCN
+typedef enum {
+ MV_TS_GENERATE_TO_IMAGE_FCN,
+ MV_TS_GENERATE_TO_SOURCE_FCN
} generation_fcn_e;
int convert_rgb_to(unsigned char *src_buffer, unsigned char **dst_buffer,
- image_data_s image_data, mv_colorspace_e dst_colorspace,
- unsigned long *cvt_buffer_size)
+ image_data_s image_data, mv_colorspace_e dst_colorspace,
+ unsigned long *cvt_buffer_size)
{
- enum PixelFormat pixel_format = PIX_FMT_NONE;
-
- MEDIA_VISION_FUNCTION_ENTER();
-
- switch (dst_colorspace)
- {
- case MEDIA_VISION_COLORSPACE_Y800:
- pixel_format = PIX_FMT_GRAY8;
- break;
- case MEDIA_VISION_COLORSPACE_I420:
- pixel_format = PIX_FMT_YUV420P;
- break;
- case MEDIA_VISION_COLORSPACE_NV12:
- pixel_format = PIX_FMT_NV12;
- break;
- case MEDIA_VISION_COLORSPACE_YV12:
- pixel_format = PIX_FMT_YUV420P; // the same as I420 with inversed U and V
- break;
- case MEDIA_VISION_COLORSPACE_NV21:
- pixel_format = PIX_FMT_NV21;
- break;
- case MEDIA_VISION_COLORSPACE_YUYV:
- pixel_format = PIX_FMT_YUYV422;
- break;
- case MEDIA_VISION_COLORSPACE_UYVY:
- pixel_format = PIX_FMT_UYVY422;
- break;
- case MEDIA_VISION_COLORSPACE_422P:
- pixel_format = PIX_FMT_YUV422P;
- break;
- case MEDIA_VISION_COLORSPACE_RGB565:
- pixel_format = PIX_FMT_RGB565BE;
- break;
- case MEDIA_VISION_COLORSPACE_RGBA:
- pixel_format = PIX_FMT_RGBA;
- break;
- case MEDIA_VISION_COLORSPACE_RGB888:
- *cvt_buffer_size = image_data.image_width * image_data.image_height * 3;
- (*dst_buffer) = (unsigned char*)malloc(*cvt_buffer_size);
- memcpy(*dst_buffer, src_buffer, *cvt_buffer_size);
-
- MEDIA_VISION_FUNCTION_LEAVE();
- return MEDIA_VISION_ERROR_NONE;
- default:
- MEDIA_VISION_FUNCTION_LEAVE();
- return MEDIA_VISION_ERROR_NOT_SUPPORTED;
- }
-
- AVPicture src_picture;
- AVPicture dst_picture;
-
- avpicture_fill(&src_picture, (uint8_t*)src_buffer, PIX_FMT_RGB24,
- image_data.image_width, image_data.image_height);
-
- avpicture_alloc(&dst_picture, pixel_format,
- image_data.image_width, image_data.image_height);
-
- struct SwsContext *context = sws_getContext(
- image_data.image_width, image_data.image_height, PIX_FMT_RGB24,
- image_data.image_width, image_data.image_height, pixel_format,
- SWS_FAST_BILINEAR, 0, 0, 0);
-
- sws_scale(context, (const uint8_t* const*)src_picture.data,
- src_picture.linesize, 0, image_data.image_height,
- dst_picture.data, dst_picture.linesize);
-
- *cvt_buffer_size = avpicture_get_size(pixel_format,
- image_data.image_width, image_data.image_height);
- (*dst_buffer) = (unsigned char*)malloc(*cvt_buffer_size);
- memcpy(*dst_buffer, dst_picture.data[0], *cvt_buffer_size);
-
- avpicture_free(&dst_picture);
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return MEDIA_VISION_ERROR_NONE;
+ enum PixelFormat pixel_format = PIX_FMT_NONE;
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ switch (dst_colorspace) {
+ case MEDIA_VISION_COLORSPACE_Y800:
+ pixel_format = PIX_FMT_GRAY8;
+ break;
+ case MEDIA_VISION_COLORSPACE_I420:
+ pixel_format = PIX_FMT_YUV420P;
+ break;
+ case MEDIA_VISION_COLORSPACE_NV12:
+ pixel_format = PIX_FMT_NV12;
+ break;
+ case MEDIA_VISION_COLORSPACE_YV12:
+ /* the same as I420 with inversed U and V */
+ pixel_format = PIX_FMT_YUV420P;
+ break;
+ case MEDIA_VISION_COLORSPACE_NV21:
+ pixel_format = PIX_FMT_NV21;
+ break;
+ case MEDIA_VISION_COLORSPACE_YUYV:
+ pixel_format = PIX_FMT_YUYV422;
+ break;
+ case MEDIA_VISION_COLORSPACE_UYVY:
+ pixel_format = PIX_FMT_UYVY422;
+ break;
+ case MEDIA_VISION_COLORSPACE_422P:
+ pixel_format = PIX_FMT_YUV422P;
+ break;
+ case MEDIA_VISION_COLORSPACE_RGB565:
+ pixel_format = PIX_FMT_RGB565BE;
+ break;
+ case MEDIA_VISION_COLORSPACE_RGBA:
+ pixel_format = PIX_FMT_RGBA;
+ break;
+ case MEDIA_VISION_COLORSPACE_RGB888:
+ *cvt_buffer_size = image_data.image_width * image_data.image_height * 3;
+ (*dst_buffer) = (unsigned char*)malloc(*cvt_buffer_size);
+ memcpy(*dst_buffer, src_buffer, *cvt_buffer_size);
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return MEDIA_VISION_ERROR_NONE;
+ default:
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ }
+
+ AVPicture src_picture;
+ AVPicture dst_picture;
+
+ avpicture_fill(&src_picture, (uint8_t*)src_buffer, PIX_FMT_RGB24,
+ image_data.image_width, image_data.image_height);
+
+ avpicture_alloc(&dst_picture, pixel_format,
+ image_data.image_width, image_data.image_height);
+
+ struct SwsContext *context = sws_getContext(
+ image_data.image_width, image_data.image_height, PIX_FMT_RGB24,
+ image_data.image_width, image_data.image_height, pixel_format,
+ SWS_FAST_BILINEAR, 0, 0, 0);
+
+ sws_scale(context, (const uint8_t * const *)src_picture.data,
+ src_picture.linesize, 0, image_data.image_height,
+ dst_picture.data, dst_picture.linesize);
+
+ *cvt_buffer_size = avpicture_get_size(pixel_format,
+ image_data.image_width, image_data.image_height);
+ (*dst_buffer) = (unsigned char*)malloc(*cvt_buffer_size);
+ memcpy(*dst_buffer, dst_picture.data[0], *cvt_buffer_size);
+
+ avpicture_free(&dst_picture);
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return MEDIA_VISION_ERROR_NONE;
}
int find_min_x(const mv_quadrangle_s *quadrangle, int *minX)
{
- MEDIA_VISION_FUNCTION_ENTER();
+ MEDIA_VISION_FUNCTION_ENTER();
- if (NULL == quadrangle)
- {
- MEDIA_VISION_FUNCTION_LEAVE();
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (NULL == quadrangle) {
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- *minX = quadrangle->points[0].x;
- *minX = quadrangle->points[1].x < *minX ? quadrangle->points[1].x : *minX;
- *minX = quadrangle->points[2].x < *minX ? quadrangle->points[2].x : *minX;
- *minX = quadrangle->points[3].x < *minX ? quadrangle->points[3].x : *minX;
+ *minX = quadrangle->points[0].x;
+ *minX = quadrangle->points[1].x < *minX ? quadrangle->points[1].x : *minX;
+ *minX = quadrangle->points[2].x < *minX ? quadrangle->points[2].x : *minX;
+ *minX = quadrangle->points[3].x < *minX ? quadrangle->points[3].x : *minX;
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_LEAVE();
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int find_min_y(const mv_quadrangle_s *quadrangle, int *minY)
{
- MEDIA_VISION_FUNCTION_ENTER();
+ MEDIA_VISION_FUNCTION_ENTER();
- if (NULL == quadrangle)
- {
- MEDIA_VISION_FUNCTION_LEAVE();
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (NULL == quadrangle) {
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- *minY = quadrangle->points[0].y;
- *minY = quadrangle->points[1].y < *minY ? quadrangle->points[1].y : *minY;
- *minY = quadrangle->points[2].y < *minY ? quadrangle->points[2].y : *minY;
- *minY = quadrangle->points[3].y < *minY ? quadrangle->points[3].y : *minY;
+ *minY = quadrangle->points[0].y;
+ *minY = quadrangle->points[1].y < *minY ? quadrangle->points[1].y : *minY;
+ *minY = quadrangle->points[2].y < *minY ? quadrangle->points[2].y : *minY;
+ *minY = quadrangle->points[3].y < *minY ? quadrangle->points[3].y : *minY;
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_LEAVE();
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int find_max_x(const mv_quadrangle_s *quadrangle, int *maxX)
{
- MEDIA_VISION_FUNCTION_ENTER();
+ MEDIA_VISION_FUNCTION_ENTER();
- if (NULL == quadrangle)
- {
- MEDIA_VISION_FUNCTION_LEAVE();
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (NULL == quadrangle) {
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- *maxX = quadrangle->points[0].x;
- *maxX = quadrangle->points[1].x > *maxX ? quadrangle->points[1].x : *maxX;
- *maxX = quadrangle->points[2].x > *maxX ? quadrangle->points[2].x : *maxX;
- *maxX = quadrangle->points[3].x > *maxX ? quadrangle->points[3].x : *maxX;
+ *maxX = quadrangle->points[0].x;
+ *maxX = quadrangle->points[1].x > *maxX ? quadrangle->points[1].x : *maxX;
+ *maxX = quadrangle->points[2].x > *maxX ? quadrangle->points[2].x : *maxX;
+ *maxX = quadrangle->points[3].x > *maxX ? quadrangle->points[3].x : *maxX;
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_LEAVE();
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int find_max_y(const mv_quadrangle_s *quadrangle, int *maxY)
{
- MEDIA_VISION_FUNCTION_ENTER();
+ MEDIA_VISION_FUNCTION_ENTER();
- if (NULL == quadrangle)
- {
- MEDIA_VISION_FUNCTION_LEAVE();
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (NULL == quadrangle) {
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- *maxY = quadrangle->points[0].y;
- *maxY = quadrangle->points[1].y > *maxY ? quadrangle->points[1].y : *maxY;
- *maxY = quadrangle->points[2].y > *maxY ? quadrangle->points[2].y : *maxY;
- *maxY = quadrangle->points[3].y > *maxY ? quadrangle->points[3].y : *maxY;
+ *maxY = quadrangle->points[0].y;
+ *maxY = quadrangle->points[1].y > *maxY ? quadrangle->points[1].y : *maxY;
+ *maxY = quadrangle->points[2].y > *maxY ? quadrangle->points[2].y : *maxY;
+ *maxY = quadrangle->points[3].y > *maxY ? quadrangle->points[3].y : *maxY;
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_LEAVE();
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
bool _mv_engine_config_supported_attribute(mv_config_attribute_type_e attribute_type,
- const char *attribute_name, void *user_data)
+ const char *attribute_name, void *user_data)
{
- printf("Callback call for engine configuration attribute\n");
-
- if (user_data == NULL)
- {
- return false;
- }
-
- mv_engine_config_h mv_engine_config = (mv_engine_config_h *)user_data;
-
- int int_value = 0;
- double double_value = 0.0;
- bool bool_value = false;
- char str_value[1024];
- switch (attribute_type)
- {
- case MV_ENGINE_CONFIG_ATTR_TYPE_DOUBLE:
- if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE ==
- mv_engine_config_get_double_attribute(
- mv_engine_config, attribute_name, &double_value))
- {
- printf("Default double attribute %s wasn't set in engine\n",
- attribute_name);
- return false;
- }
- printf("Default double attribute %s was set to %f in engine\n",
- attribute_name, double_value);
- break;
- case MV_ENGINE_CONFIG_ATTR_TYPE_INTEGER:
- if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE ==
- mv_engine_config_get_int_attribute(
- mv_engine_config, attribute_name, &int_value))
- {
- printf("Default integer attribute %s wasn't set in engine\n",
- attribute_name);
- return false;
- }
- printf("Default interget attribute %s was set to %d in engine\n",
- attribute_name, int_value);
- break;
- case MV_ENGINE_CONFIG_ATTR_TYPE_BOOLEAN:
- if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE ==
- mv_engine_config_get_bool_attribute(
- mv_engine_config, attribute_name, &bool_value))
- {
- printf("Default bool attribute %s wasn't set in engine\n",
- attribute_name);
- return false;
- }
- printf("Default bool attribute %s was set to %s in engine\n",
- attribute_name, bool_value ? "TRUE" : "FALSE");
- break;
- case MV_ENGINE_CONFIG_ATTR_TYPE_STRING:
- if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE ==
- mv_engine_config_get_string_attribute(
- mv_engine_config, attribute_name, &str_value))
- {
- printf("Default string ttribute %s wasn't set in engine\n",
- attribute_name);
- return false;
- }
- printf("Default string attribute %s was set to %s in engine\n",
- attribute_name, str_value);
- break;
- default:
- printf("Not supported attribute type\n");
- return false;
- }
-
-
- return true;
+ printf("Callback call for engine configuration attribute\n");
+
+ if (user_data == NULL)
+ return false;
+
+ mv_engine_config_h mv_engine_config = (mv_engine_config_h *)user_data;
+
+ int int_value = 0;
+ double double_value = 0.0;
+ bool bool_value = false;
+ char str_value[1024];
+ switch (attribute_type) {
+ case MV_ENGINE_CONFIG_ATTR_TYPE_DOUBLE:
+ if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE ==
+ mv_engine_config_get_double_attribute(
+ mv_engine_config, attribute_name, &double_value)) {
+ printf("Default double attribute %s wasn't set in engine\n",
+ attribute_name);
+ return false;
+ }
+ printf("Default double attribute %s was set to %f in engine\n",
+ attribute_name, double_value);
+ break;
+ case MV_ENGINE_CONFIG_ATTR_TYPE_INTEGER:
+ if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE ==
+ mv_engine_config_get_int_attribute(
+ mv_engine_config, attribute_name, &int_value)) {
+ printf("Default integer attribute %s wasn't set in engine\n",
+ attribute_name);
+ return false;
+ }
+ printf("Default interget attribute %s was set to %d in engine\n",
+ attribute_name, int_value);
+ break;
+ case MV_ENGINE_CONFIG_ATTR_TYPE_BOOLEAN:
+ if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE ==
+ mv_engine_config_get_bool_attribute(
+ mv_engine_config, attribute_name, &bool_value)) {
+ printf("Default bool attribute %s wasn't set in engine\n",
+ attribute_name);
+ return false;
+ }
+ printf("Default bool attribute %s was set to %s in engine\n",
+ attribute_name, bool_value ? "TRUE" : "FALSE");
+ break;
+ case MV_ENGINE_CONFIG_ATTR_TYPE_STRING:
+ if (MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE ==
+ mv_engine_config_get_string_attribute(
+ mv_engine_config, attribute_name, &str_value)) {
+ printf("Default string ttribute %s wasn't set in engine\n",
+ attribute_name);
+ return false;
+ }
+ printf("Default string attribute %s was set to %s in engine\n",
+ attribute_name, str_value);
+ break;
+ default:
+ printf("Not supported attribute type\n");
+ return false;
+ }
+
+ return true;
}
void barcode_detected_cb(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- const mv_quadrangle_s *barcodes_locations,
- const char *messages[],
- const mv_barcode_type_e *types,
- int number_of_barcodes,
- void *user_data)
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ const mv_quadrangle_s *barcodes_locations,
+ const char *messages[],
+ const mv_barcode_type_e *types,
+ int number_of_barcodes,
+ void *user_data)
{
- MEDIA_VISION_FUNCTION_ENTER();
-
- printf("%i barcodes were detected on the image.\n", number_of_barcodes);
- if (number_of_barcodes > 0)
- {
- int is_source_data_loaded = 0;
-
- char *file_name = NULL;
- unsigned char *out_buffer = NULL;
- unsigned char *draw_buffer = NULL;
- unsigned int buf_size = 0;
- image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID };
- // Check Media Vision source:
- if (MEDIA_VISION_ERROR_NONE != mv_source_get_buffer(source, &out_buffer, &buf_size) ||
- MEDIA_VISION_ERROR_NONE != mv_source_get_width(source, &(image_data.image_width)) ||
- MEDIA_VISION_ERROR_NONE != mv_source_get_height(source, &(image_data.image_height)) ||
- MEDIA_VISION_ERROR_NONE != mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
- user_data == NULL)
- {
- printf("ERROR: Creating out image is impossible.\n");
- }
- else
- {
- file_name = ((barcode_model_s *)user_data)->out_file_name;
- draw_buffer = ((barcode_model_s *)user_data)->out_buffer_ptr;
- image_data.image_colorspace = MEDIA_VISION_COLORSPACE_RGB888;
- is_source_data_loaded = 1;
- }
-
- int i = 0;
- for (i = 0; i < number_of_barcodes; ++i)
- {
- const char *cur_message = messages[i];
- mv_barcode_type_e cur_type = types[i];
- const char *str_type = NULL;
- switch (cur_type)
- {
- case MV_BARCODE_QR:
- str_type = "QR";
- break;
- case MV_BARCODE_UPC_A:
- str_type = "UPC-A";
- break;
- case MV_BARCODE_UPC_E:
- str_type = "UPC-E";
- break;
- case MV_BARCODE_EAN_8:
- case MV_BARCODE_EAN_13:
- str_type = "EAN-8/13";
- break;
- case MV_BARCODE_CODE128:
- str_type = "CODE128";
- break;
- case MV_BARCODE_CODE39:
- str_type = "CODE39";
- break;
- case MV_BARCODE_I2_5:
- str_type = "I25";
- break;
- default:
- str_type = "Undetected";
- break;
- }
- printf("\tBarcode %i : type is %s\n", i, str_type);
- if (cur_message != NULL)
- {
- printf("\t message is %s\n", cur_message);
- }
- else
- {
- printf("\t message wasn't detected\n");
- }
-
- if (is_source_data_loaded == 1)
- {
- int minX = 0;
- int minY = 0;
- int maxX = 0;
- int maxY = 0;
- if (MEDIA_VISION_ERROR_NONE != find_min_x(&barcodes_locations[i], &minX) ||
- MEDIA_VISION_ERROR_NONE != find_min_y(&barcodes_locations[i], &minY) ||
- MEDIA_VISION_ERROR_NONE != find_max_x(&barcodes_locations[i], &maxX) ||
- MEDIA_VISION_ERROR_NONE != find_max_y(&barcodes_locations[i], &maxY))
- {
- continue;
- }
-
- const int rectangle_thickness = 6;
- const int drawing_color[] = {255, 0, 0};
- if (MEDIA_VISION_ERROR_NONE != draw_rectangle_on_buffer(
- minX,
- minY,
- maxX,
- maxY,
- drawing_color,
- rectangle_thickness,
- &image_data,
- draw_buffer))
- {
- continue;
- }
- }
- }
-
- if (file_name != NULL &&
- MEDIA_VISION_ERROR_NONE == save_image_from_buffer(file_name, draw_buffer, &image_data, 100))
- {
- printf("Image was generated as %s\n", file_name);
- }
- else
- {
- printf("ERROR: Failed to generate output file. Check file name and permissions. \n");
- }
-
- printf("\n");
- }
-
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ printf("%i barcodes were detected on the image.\n", number_of_barcodes);
+ if (number_of_barcodes > 0) {
+ int is_source_data_loaded = 0;
+
+ char *file_name = NULL;
+ unsigned char *out_buffer = NULL;
+ unsigned char *draw_buffer = NULL;
+ unsigned int buf_size = 0;
+ image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID };
+ /* Check Media Vision source: */
+ if (MEDIA_VISION_ERROR_NONE != mv_source_get_buffer(source, &out_buffer, &buf_size) ||
+ MEDIA_VISION_ERROR_NONE != mv_source_get_width(source, &(image_data.image_width)) ||
+ MEDIA_VISION_ERROR_NONE != mv_source_get_height(source, &(image_data.image_height)) ||
+ MEDIA_VISION_ERROR_NONE != mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
+ user_data == NULL) {
+ printf("ERROR: Creating out image is impossible.\n");
+ } else {
+ file_name = ((barcode_model_s *)user_data)->out_file_name;
+ draw_buffer = ((barcode_model_s *)user_data)->out_buffer_ptr;
+ image_data.image_colorspace = MEDIA_VISION_COLORSPACE_RGB888;
+ is_source_data_loaded = 1;
+ }
+
+ int i = 0;
+ for (i = 0; i < number_of_barcodes; ++i) {
+ const char *cur_message = messages[i];
+ mv_barcode_type_e cur_type = types[i];
+ const char *str_type = NULL;
+ switch (cur_type) {
+ case MV_BARCODE_QR:
+ str_type = "QR";
+ break;
+ case MV_BARCODE_UPC_A:
+ str_type = "UPC-A";
+ break;
+ case MV_BARCODE_UPC_E:
+ str_type = "UPC-E";
+ break;
+ case MV_BARCODE_EAN_8:
+ case MV_BARCODE_EAN_13:
+ str_type = "EAN-8/13";
+ break;
+ case MV_BARCODE_CODE128:
+ str_type = "CODE128";
+ break;
+ case MV_BARCODE_CODE39:
+ str_type = "CODE39";
+ break;
+ case MV_BARCODE_I2_5:
+ str_type = "I25";
+ break;
+ default:
+ str_type = "Undetected";
+ break;
+ }
+ printf("\tBarcode %i : type is %s\n", i, str_type);
+ if (cur_message != NULL)
+ printf("\t message is %s\n", cur_message);
+ else
+ printf("\t message wasn't detected\n");
+
+ if (is_source_data_loaded == 1) {
+ int minX = 0;
+ int minY = 0;
+ int maxX = 0;
+ int maxY = 0;
+ if (MEDIA_VISION_ERROR_NONE != find_min_x(&barcodes_locations[i], &minX) ||
+ MEDIA_VISION_ERROR_NONE != find_min_y(&barcodes_locations[i], &minY) ||
+ MEDIA_VISION_ERROR_NONE != find_max_x(&barcodes_locations[i], &maxX) ||
+ MEDIA_VISION_ERROR_NONE != find_max_y(&barcodes_locations[i], &maxY)) {
+ continue;
+ }
+
+ const int rectangle_thickness = 6;
+ const int drawing_color[] = {255, 0, 0};
+ if (MEDIA_VISION_ERROR_NONE != draw_rectangle_on_buffer(
+ minX,
+ minY,
+ maxX,
+ maxY,
+ drawing_color,
+ rectangle_thickness,
+ &image_data,
+ draw_buffer)) {
+ continue;
+ }
+ }
+ }
+
+ if (file_name != NULL &&
+ MEDIA_VISION_ERROR_NONE == save_image_from_buffer(file_name, draw_buffer, &image_data, 100)) {
+ printf("Image was generated as %s\n", file_name);
+ } else {
+ printf("ERROR: Failed to generate output file. Check file name and permissions. \n");
+ }
+
+ printf("\n");
+ }
+
+ MEDIA_VISION_FUNCTION_LEAVE();
}
int generate_barcode_to_image(barcode_model_s model)
{
- MEDIA_VISION_FUNCTION_ENTER();
-
- if (model.message == NULL ||
- model.file_name == NULL)
- {
- MEDIA_VISION_FUNCTION_LEAVE();
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- LOGI("Call the mv_barcode_generate_image() function");
-
- const int err = mv_barcode_generate_image(
- NULL,
- model.message,
- model.width,
- model.height,
- model.type,
- model.mode,
- model.ecc,
- model.version,
- model.file_name,
- model.out_image_format);
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return err;
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ if (model.message == NULL ||
+ model.file_name == NULL) {
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ LOGI("Call the mv_barcode_generate_image() function");
+
+ const int err = mv_barcode_generate_image(
+ NULL,
+ model.message,
+ model.width,
+ model.height,
+ model.type,
+ model.mode,
+ model.ecc,
+ model.version,
+ model.file_name,
+ model.out_image_format);
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return err;
}
int generate_barcode_to_source(barcode_model_s model)
{
- MEDIA_VISION_FUNCTION_ENTER();
-
- if (model.message == NULL ||
- model.file_name == NULL)
- {
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- LOGI("mv_source_h creation started");
-
- mv_source_h source = NULL;
- int err = mv_create_source(&source);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: Error occurred when trying to create Media Vision "
- "source. Error code: %i\n", err);
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return err;
- }
-
- LOGI("mv_source_h creation finished");
-
- LOGI("Call the mv_barcode_generate_source() function");
-
- err = mv_barcode_generate_source(
- NULL,
- model.message,
- model.type,
- model.mode,
- model.ecc,
- model.version,
- source);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: Error occurred during generation barcode to the "
- "Media Vision source. Error code: %i\n", err);
-
- const int err2 = mv_destroy_source(source);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf("ERROR: Error occurred when try to destroy Media Vision source."
- "Error code: %i\n", err2);
- }
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return err;
- }
-
- unsigned char *data_buffer = NULL;
- unsigned int buffer_size = 0;
- unsigned int image_width = 0;
- unsigned int image_height = 0;
- mv_colorspace_e image_colorspace = MEDIA_VISION_COLORSPACE_INVALID;
-
- bool is_source_corrupted = false;
- err = mv_source_get_buffer(source, &data_buffer, &buffer_size);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: Error occurred when trying to get buffer from "
- "Media Vision source. Error code: %i\n", err);
- is_source_corrupted = true;
- }
-
- err = mv_source_get_width(source, &image_width);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: Error occurred when trying to get width of "
- "Media Vision source. Error code: %i\n", err);
- is_source_corrupted = true;
- }
-
- err = mv_source_get_height(source, &image_height);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: Error occurred when trying to get height of "
- "Media Vision source. Error code: %i\n", err);
- is_source_corrupted = true;
- }
-
- err = mv_source_get_colorspace(source, &image_colorspace);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: Error occurred when trying to get colorspace of "
- "Media Vision source. Error code: %i\n", err);
- is_source_corrupted = true;
- }
-
- if (is_source_corrupted)
- {
- err = mv_destroy_source(source);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: Error occurred when trying to destroy Media Vision "
- "source. Error code: %i\n", err);
- }
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return MEDIA_VISION_ERROR_INTERNAL;
- }
-
- const image_data_s image_data = { image_width, image_height, image_colorspace };
-
- char *jpeg_file_name = "";
- if (0 == strcmp(model.file_name + strlen(model.file_name) - 4, ".jpg") ||
- 0 == strcmp(model.file_name + strlen(model.file_name) - 5, ".jpeg"))
- {
- jpeg_file_name = (char*)malloc(strlen(model.file_name) + 1);
- strcpy(jpeg_file_name, model.file_name);
- jpeg_file_name[strlen(model.file_name)] = '\0';
- }
- else
- {
- jpeg_file_name = (char*)malloc(strlen(model.file_name) + 5);
- strcpy(jpeg_file_name, model.file_name);
- strcpy(jpeg_file_name + strlen(model.file_name), ".jpg");
- jpeg_file_name[strlen(model.file_name) + 4] = '\0';
- }
-
- save_image_from_buffer(jpeg_file_name, data_buffer, &image_data, 100);
-
- free(jpeg_file_name);
-
- const int err2 = mv_destroy_source(source);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf("ERROR: Error occurred when try to destroy Media Vision source."
- "Error code: %i\n", err2);
- }
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return err;
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ if (model.message == NULL ||
+ model.file_name == NULL) {
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ LOGI("mv_source_h creation started");
+
+ mv_source_h source = NULL;
+ int err = mv_create_source(&source);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: Error occurred when trying to create Media Vision "
+ "source. Error code: %i\n", err);
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return err;
+ }
+
+ LOGI("mv_source_h creation finished");
+
+ LOGI("Call the mv_barcode_generate_source() function");
+
+ err = mv_barcode_generate_source(
+ NULL,
+ model.message,
+ model.type,
+ model.mode,
+ model.ecc,
+ model.version,
+ source);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: Error occurred during generation barcode to the "
+ "Media Vision source. Error code: %i\n", err);
+
+ const int err2 = mv_destroy_source(source);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf("ERROR: Error occurred when try to destroy Media Vision source."
+ "Error code: %i\n", err2);
+ }
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return err;
+ }
+
+ unsigned char *data_buffer = NULL;
+ unsigned int buffer_size = 0;
+ unsigned int image_width = 0;
+ unsigned int image_height = 0;
+ mv_colorspace_e image_colorspace = MEDIA_VISION_COLORSPACE_INVALID;
+
+ bool is_source_corrupted = false;
+ err = mv_source_get_buffer(source, &data_buffer, &buffer_size);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: Error occurred when trying to get buffer from "
+ "Media Vision source. Error code: %i\n", err);
+ is_source_corrupted = true;
+ }
+
+ err = mv_source_get_width(source, &image_width);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: Error occurred when trying to get width of "
+ "Media Vision source. Error code: %i\n", err);
+ is_source_corrupted = true;
+ }
+
+ err = mv_source_get_height(source, &image_height);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: Error occurred when trying to get height of "
+ "Media Vision source. Error code: %i\n", err);
+ is_source_corrupted = true;
+ }
+
+ err = mv_source_get_colorspace(source, &image_colorspace);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: Error occurred when trying to get colorspace of "
+ "Media Vision source. Error code: %i\n", err);
+ is_source_corrupted = true;
+ }
+
+ if (is_source_corrupted) {
+ err = mv_destroy_source(source);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: Error occurred when trying to destroy Media Vision "
+ "source. Error code: %i\n", err);
+ }
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return MEDIA_VISION_ERROR_INTERNAL;
+ }
+
+ const image_data_s image_data = { image_width, image_height, image_colorspace };
+
+ char *jpeg_file_name = "";
+ if (0 == strcmp(model.file_name + strlen(model.file_name) - 4, ".jpg") ||
+ 0 == strcmp(model.file_name + strlen(model.file_name) - 5, ".jpeg")) {
+ jpeg_file_name = (char*)malloc(strlen(model.file_name) + 1);
+ strcpy(jpeg_file_name, model.file_name);
+ jpeg_file_name[strlen(model.file_name)] = '\0';
+ } else {
+ jpeg_file_name = (char*)malloc(strlen(model.file_name) + 5);
+ strcpy(jpeg_file_name, model.file_name);
+ strcpy(jpeg_file_name + strlen(model.file_name), ".jpg");
+ jpeg_file_name[strlen(model.file_name) + 4] = '\0';
+ }
+
+ save_image_from_buffer(jpeg_file_name, data_buffer, &image_data, 100);
+
+ free(jpeg_file_name);
+
+ const int err2 = mv_destroy_source(source);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf("ERROR: Error occurred when try to destroy Media Vision source."
+ "Error code: %i\n", err2);
+ }
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return err;
}
int detect_barcode(barcode_model_s model, mv_rectangle_s roi)
{
- MEDIA_VISION_FUNCTION_ENTER();
+ MEDIA_VISION_FUNCTION_ENTER();
- unsigned char *data_buffer = NULL;
- unsigned long buffer_size = 0;
- image_data_s image_data;
+ unsigned char *data_buffer = NULL;
+ unsigned long buffer_size = 0;
+ image_data_s image_data;
- int err = load_image_to_buffer(
- model.file_name, &data_buffer, &buffer_size, &image_data);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: Errors were occurred during opening the file!!! code: %i\n", err);
+ int err = load_image_to_buffer(
+ model.file_name, &data_buffer, &buffer_size, &image_data);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: Errors were occurred during opening the file!!! code: %i\n", err);
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_LEAVE();
- return err;
- }
+ return err;
+ }
- unsigned char *converted_buffer = NULL;
- unsigned long converted_buffer_size = 0;
- err = convert_rgb_to(data_buffer, &converted_buffer, image_data, model.colorspace, &converted_buffer_size);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: Can't convert to the selected colorspace!!! code: %i\n", err);
+ unsigned char *converted_buffer = NULL;
+ unsigned long converted_buffer_size = 0;
+ err = convert_rgb_to(data_buffer, &converted_buffer, image_data, model.colorspace, &converted_buffer_size);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: Can't convert to the selected colorspace!!! code: %i\n", err);
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_LEAVE();
- return err;
- }
+ return err;
+ }
- model.out_buffer_ptr = data_buffer;
+ model.out_buffer_ptr = data_buffer;
- mv_engine_config_h mv_engine_config;
- err = mv_create_engine_config(&mv_engine_config);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: Errors were occurred during creating the media engine config: %i\n", err);
- }
+ mv_engine_config_h mv_engine_config;
+ err = mv_create_engine_config(&mv_engine_config);
+ if (MEDIA_VISION_ERROR_NONE != err)
+ printf("ERROR: Errors were occurred during creating the media engine config: %i\n", err);
- mv_engine_config_foreach_supported_attribute(_mv_engine_config_supported_attribute, mv_engine_config);
+ mv_engine_config_foreach_supported_attribute(_mv_engine_config_supported_attribute, mv_engine_config);
- mv_engine_config_set_int_attribute(mv_engine_config, MV_BARCODE_DETECT_ATTR_TARGET, MV_BARCODE_DETECT_ATTR_TARGET_2D_BARCODE);
+ mv_engine_config_set_int_attribute(mv_engine_config, MV_BARCODE_DETECT_ATTR_TARGET, MV_BARCODE_DETECT_ATTR_TARGET_2D_BARCODE);
- mv_source_h source;
- err = mv_create_source(&source);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: Errors were occurred during creating the source!!! code: %i\n", err);
+ mv_source_h source;
+ err = mv_create_source(&source);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: Errors were occurred during creating the source!!! code: %i\n", err);
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_LEAVE();
- return err;
- }
+ return err;
+ }
- err = mv_source_fill_by_buffer(source, converted_buffer, converted_buffer_size,
- image_data.image_width, image_data.image_height, model.colorspace);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: Errors were occurred during filling the source!!! code: %i\n", err);
+ err = mv_source_fill_by_buffer(source, converted_buffer, converted_buffer_size,
+ image_data.image_width, image_data.image_height, model.colorspace);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: Errors were occurred during filling the source!!! code: %i\n", err);
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_LEAVE();
- return err;
- }
+ return err;
+ }
- if (converted_buffer != NULL)
- {
- free(converted_buffer);
- }
+ if (converted_buffer != NULL)
+ free(converted_buffer);
- err = mv_barcode_detect(source, mv_engine_config, roi, barcode_detected_cb, &model);
+ err = mv_barcode_detect(source, mv_engine_config, roi, barcode_detected_cb, &model);
- if (data_buffer != NULL)
- {
- free(data_buffer);
- }
+ if (data_buffer != NULL)
+ free(data_buffer);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: Errors were occurred during barcode detection!!! code: %i\n", err);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: Errors were occurred during barcode detection!!! code: %i\n", err);
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_LEAVE();
- return err;
- }
+ return err;
+ }
- err = mv_destroy_source(source);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: Errors were occurred during destroying the source!!! code: %i\n", err);
- }
+ err = mv_destroy_source(source);
+ if (MEDIA_VISION_ERROR_NONE != err)
+ printf("ERROR: Errors were occurred during destroying the source!!! code: %i\n", err);
- err = mv_destroy_engine_config(mv_engine_config);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: Error were occurred during destroying the source!!! code: %i\n", err);
- }
+ err = mv_destroy_engine_config(mv_engine_config);
+ if (MEDIA_VISION_ERROR_NONE != err)
+ printf("ERROR: Error were occurred during destroying the source!!! code: %i\n", err);
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_LEAVE();
- return err;
+ return err;
}
int input_string(const char *prompt, size_t max_len, char **string)
{
- MEDIA_VISION_FUNCTION_ENTER();
-
- printf("\n");
- printf("%s ", prompt);
-
- if (scanf("\n") != 0)
- {
- MEDIA_VISION_FUNCTION_LEAVE();
- return -1;
- }
-
- char buffer[max_len];
- int last_char = 0;
- buffer[last_char] = '\0';
- buffer[sizeof(buffer) - 1] = ~'\0';
- if (fgets(buffer, sizeof(buffer), stdin) == NULL)
- {
- MEDIA_VISION_FUNCTION_LEAVE();
- return -1;
- }
- size_t real_string_len = strlen(buffer);
- buffer[real_string_len - 1] = '\0';
- *string = (char*)malloc(real_string_len * sizeof(char));
- strcpy(*string, buffer);
-
- size_t str_len = strlen(*string);
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return str_len;
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ printf("\n");
+ printf("%s ", prompt);
+
+ if (scanf("\n") != 0) {
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return -1;
+ }
+
+ char buffer[max_len];
+ int last_char = 0;
+ buffer[last_char] = '\0';
+ buffer[sizeof(buffer) - 1] = ~'\0';
+ if (fgets(buffer, sizeof(buffer), stdin) == NULL) {
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return -1;
+ }
+ size_t real_string_len = strlen(buffer);
+ buffer[real_string_len - 1] = '\0';
+ *string = (char*)malloc(real_string_len * sizeof(char));
+ strcpy(*string, buffer);
+
+ size_t str_len = strlen(*string);
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return str_len;
}
int input_size(const char *prompt, size_t max_size, size_t *size)
{
- MEDIA_VISION_FUNCTION_ENTER();
+ MEDIA_VISION_FUNCTION_ENTER();
- printf("\n");
- printf("%s ", prompt);
+ printf("\n");
+ printf("%s ", prompt);
- if (scanf("%20zu", size) == 0)
- {
- if (scanf("%*[^\n]%*c") != 0)
- {
- printf("ERROR: Reading the input line error.\n");
- MEDIA_VISION_FUNCTION_LEAVE();
- return -1;
- }
- printf("ERROR: Incorrect input.\n");
- MEDIA_VISION_FUNCTION_LEAVE();
- return -1;
- }
+ if (scanf("%20zu", size) == 0) {
+ if (scanf("%*[^\n]%*c") != 0) {
+ printf("ERROR: Reading the input line error.\n");
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return -1;
+ }
+ printf("ERROR: Incorrect input.\n");
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return -1;
+ }
- int ret = (*size > max_size ? -1 : 0);
+ int ret = (*size > max_size ? -1 : 0);
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_LEAVE();
- return ret;
+ return ret;
}
int input_int(const char *prompt, int min_value, int max_value, int *value)
{
- MEDIA_VISION_FUNCTION_ENTER();
+ MEDIA_VISION_FUNCTION_ENTER();
- printf("\n");
- printf("%s ", prompt);
+ printf("\n");
+ printf("%s ", prompt);
- if (scanf("%20i", value) == 0)
- {
- if (scanf("%*[^\n]%*c") != 0)
- {
- printf("ERROR: Reading the input line error.\n");
- MEDIA_VISION_FUNCTION_LEAVE();
- return -1;
- }
- printf("ERROR: Incorrect input.\n");
- MEDIA_VISION_FUNCTION_LEAVE();
- return -1;
- }
+ if (scanf("%20i", value) == 0) {
+ if (scanf("%*[^\n]%*c") != 0) {
+ printf("ERROR: Reading the input line error.\n");
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return -1;
+ }
+ printf("ERROR: Incorrect input.\n");
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return -1;
+ }
- int ret = (*value < min_value || *value > max_value ? -1 : 0);
+ int ret = (*value < min_value || *value > max_value ? -1 : 0);
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_LEAVE();
- return ret;
+ return ret;
}
int show_menu(const char *title, const int *options, const char **names, int cnt)
{
- MEDIA_VISION_FUNCTION_ENTER();
-
- printf("***************************\n");
- printf("* %23s *\n", title);
- printf("*-------------------------*\n");
- int i = 0;
- for (i = 0; i < cnt; ++i)
- {
- printf("* %2i. %19s *\n", options[i], names[i]);
- }
- printf("***************************\n\n");
- int selection = 0;
- printf("Your choise: ");
- if (scanf("%20i", &selection) == 0)
- {
- if (scanf("%*[^\n]%*c") != 0)
- {
- printf("ERROR: Reading the input line error.\n");
- MEDIA_VISION_FUNCTION_LEAVE();
- return -1;
- }
- printf("ERROR: Incorrect input.\n");
- }
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return selection;
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ printf("***************************\n");
+ printf("* %23s *\n", title);
+ printf("*-------------------------*\n");
+ int i = 0;
+ for (i = 0; i < cnt; ++i)
+ printf("* %2i. %19s *\n", options[i], names[i]);
+
+ printf("***************************\n\n");
+ int selection = 0;
+ printf("Your choise: ");
+ if (scanf("%20i", &selection) == 0) {
+ if (scanf("%*[^\n]%*c") != 0) {
+ printf("ERROR: Reading the input line error.\n");
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return -1;
+ }
+ printf("ERROR: Incorrect input.\n");
+ }
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return selection;
}
mv_barcode_type_e select_type(void)
{
- mv_barcode_type_e selected_type = MV_BARCODE_UNDEFINED;
- int sel_opt = 0;
- const int options[8] = { 1, 2, 3, 4, 5, 6, 7, 8 };
- const char *names[8] = { "qr", "upca", "upce", "ean8", "ean13", "code39", "code128", "interleave25" };
-
- MEDIA_VISION_FUNCTION_ENTER();
-
- while (sel_opt == 0)
- {
- sel_opt = show_menu("Select barcode type:", options, names, 8);
-
- switch (sel_opt)
- {
- case 1:
- selected_type = MV_BARCODE_QR;
- break;
- case 2:
- selected_type = MV_BARCODE_UPC_A;
- break;
- case 3:
- selected_type = MV_BARCODE_UPC_E;
- break;
- case 4:
- selected_type = MV_BARCODE_EAN_8;
- break;
- case 5:
- selected_type = MV_BARCODE_EAN_13;
- break;
- case 6:
- selected_type = MV_BARCODE_CODE39;
- break;
- case 7:
- selected_type = MV_BARCODE_CODE128;
- break;
- case 8:
- selected_type = MV_BARCODE_I2_5;
- break;
- default:
- sel_opt = 0;
- break;
- }
- }
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return selected_type;
+ mv_barcode_type_e selected_type = MV_BARCODE_UNDEFINED;
+ int sel_opt = 0;
+ const int options[8] = { 1, 2, 3, 4, 5, 6, 7, 8 };
+ const char *names[8] = { "qr", "upca", "upce", "ean8", "ean13", "code39", "code128", "interleave25" };
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Select barcode type:", options, names, 8);
+
+ switch (sel_opt) {
+ case 1:
+ selected_type = MV_BARCODE_QR;
+ break;
+ case 2:
+ selected_type = MV_BARCODE_UPC_A;
+ break;
+ case 3:
+ selected_type = MV_BARCODE_UPC_E;
+ break;
+ case 4:
+ selected_type = MV_BARCODE_EAN_8;
+ break;
+ case 5:
+ selected_type = MV_BARCODE_EAN_13;
+ break;
+ case 6:
+ selected_type = MV_BARCODE_CODE39;
+ break;
+ case 7:
+ selected_type = MV_BARCODE_CODE128;
+ break;
+ case 8:
+ selected_type = MV_BARCODE_I2_5;
+ break;
+ default:
+ sel_opt = 0;
+ break;
+ }
+ }
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return selected_type;
}
mv_barcode_qr_mode_e select_mode(void)
{
- mv_barcode_qr_mode_e selected_mode = MV_BARCODE_QR_MODE_UNAVAILABLE;
- int sel_opt = 0;
- const int options[4] = { 1, 2, 3, 4 };
- const char *names[4] = { "numeric", "alphanumeric", "byte", "utf8" };
-
- MEDIA_VISION_FUNCTION_ENTER();
-
- while (sel_opt == 0)
- {
- sel_opt = show_menu("Select encoding mode:", options, names, 4);
- switch (sel_opt)
- {
- case 1:
- selected_mode = MV_BARCODE_QR_MODE_NUMERIC;
- break;
- case 2:
- selected_mode = MV_BARCODE_QR_MODE_ALPHANUMERIC;
- break;
- case 3:
- selected_mode = MV_BARCODE_QR_MODE_BYTE;
- break;
- case 4:
- selected_mode = MV_BARCODE_QR_MODE_UTF8;
- break;
- default:
- sel_opt = 0;
- break;
- }
- }
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return selected_mode;
+ mv_barcode_qr_mode_e selected_mode = MV_BARCODE_QR_MODE_UNAVAILABLE;
+ int sel_opt = 0;
+ const int options[4] = { 1, 2, 3, 4 };
+ const char *names[4] = { "numeric", "alphanumeric", "byte", "utf8" };
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Select encoding mode:", options, names, 4);
+ switch (sel_opt) {
+ case 1:
+ selected_mode = MV_BARCODE_QR_MODE_NUMERIC;
+ break;
+ case 2:
+ selected_mode = MV_BARCODE_QR_MODE_ALPHANUMERIC;
+ break;
+ case 3:
+ selected_mode = MV_BARCODE_QR_MODE_BYTE;
+ break;
+ case 4:
+ selected_mode = MV_BARCODE_QR_MODE_UTF8;
+ break;
+ default:
+ sel_opt = 0;
+ break;
+ }
+ }
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return selected_mode;
}
mv_barcode_qr_ecc_e select_ecc(void)
{
- mv_barcode_qr_ecc_e selected_ecc = MV_BARCODE_QR_ECC_UNAVAILABLE;
- int sel_opt = 0;
- const int options[4] = { 1, 2, 3, 4 };
- const char *names[4] = { "low", "medium", "quartile", "high" };
-
- MEDIA_VISION_FUNCTION_ENTER();
-
- while (sel_opt == 0)
- {
- sel_opt = show_menu("Select ECC level:", options, names, 4);
- switch (sel_opt)
- {
- case 1:
- selected_ecc = MV_BARCODE_QR_ECC_LOW;
- break;
- case 2:
- selected_ecc = MV_BARCODE_QR_ECC_MEDIUM;
- break;
- case 3:
- selected_ecc = MV_BARCODE_QR_ECC_QUARTILE;
- break;
- case 4:
- selected_ecc = MV_BARCODE_QR_ECC_HIGH;
- break;
- default:
- sel_opt = 0;
- break;
- }
- }
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return selected_ecc;
+ mv_barcode_qr_ecc_e selected_ecc = MV_BARCODE_QR_ECC_UNAVAILABLE;
+ int sel_opt = 0;
+ const int options[4] = { 1, 2, 3, 4 };
+ const char *names[4] = { "low", "medium", "quartile", "high" };
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Select ECC level:", options, names, 4);
+ switch (sel_opt) {
+ case 1:
+ selected_ecc = MV_BARCODE_QR_ECC_LOW;
+ break;
+ case 2:
+ selected_ecc = MV_BARCODE_QR_ECC_MEDIUM;
+ break;
+ case 3:
+ selected_ecc = MV_BARCODE_QR_ECC_QUARTILE;
+ break;
+ case 4:
+ selected_ecc = MV_BARCODE_QR_ECC_HIGH;
+ break;
+ default:
+ sel_opt = 0;
+ break;
+ }
+ }
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return selected_ecc;
}
int select_version(void)
{
- MEDIA_VISION_FUNCTION_ENTER();
-
- int sel_opt = 0;
- while (sel_opt == 0)
- {
- const int options[2] = {1, 40};
- const char *names[2] = { "1..", "..40" };
- sel_opt = show_menu("Select QR version:", options, names, 2);
- if (sel_opt < 1 || sel_opt > 40)
- {
- sel_opt = 0;
- }
- }
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return sel_opt;
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int sel_opt = 0;
+ while (sel_opt == 0) {
+ const int options[2] = {1, 40};
+ const char *names[2] = { "1..", "..40" };
+ sel_opt = show_menu("Select QR version:", options, names, 2);
+ if (sel_opt < 1 || sel_opt > 40)
+ sel_opt = 0;
+ }
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return sel_opt;
}
generation_fcn_e select_gen_function(void)
{
- generation_fcn_e ret_fcn_type = MV_TS_GENERATE_TO_IMAGE_FCN;
- int sel_opt = 0;
- const int options[2] = { 1, 2 };
- const char *names[2] = { "Generate to file", "Generate to source" };
-
- MEDIA_VISION_FUNCTION_ENTER();
-
- while (sel_opt == 0)
- {
- sel_opt = show_menu("Select API function:", options, names, 2);
- switch (sel_opt)
- {
- case 1:
- ret_fcn_type = MV_TS_GENERATE_TO_IMAGE_FCN;
- break;
- case 2:
- ret_fcn_type = MV_TS_GENERATE_TO_SOURCE_FCN;
- break;
- default:
- sel_opt = 0;
- break;
- }
- }
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return ret_fcn_type;
+ generation_fcn_e ret_fcn_type = MV_TS_GENERATE_TO_IMAGE_FCN;
+ int sel_opt = 0;
+ const int options[2] = { 1, 2 };
+ const char *names[2] = { "Generate to file", "Generate to source" };
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Select API function:", options, names, 2);
+ switch (sel_opt) {
+ case 1:
+ ret_fcn_type = MV_TS_GENERATE_TO_IMAGE_FCN;
+ break;
+ case 2:
+ ret_fcn_type = MV_TS_GENERATE_TO_SOURCE_FCN;
+ break;
+ default:
+ sel_opt = 0;
+ break;
+ }
+ }
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return ret_fcn_type;
}
mv_barcode_image_format_e select_file_format(void)
{
- mv_barcode_image_format_e image_format = MV_BARCODE_IMAGE_FORMAT_JPG;
- int sel_opt = 0;
- const int options[3] = { 1, 2, 3 };
- const char *names[3] = { "BMP", "JPG", "PNG" };
-
- MEDIA_VISION_FUNCTION_ENTER();
-
- while (sel_opt == 0)
- {
- sel_opt = show_menu("Select file format:", options, names, 3);
- switch (sel_opt)
- {
- case 1:
- image_format = MV_BARCODE_IMAGE_FORMAT_BMP;
- break;
- case 2:
- image_format = MV_BARCODE_IMAGE_FORMAT_JPG;
- break;
- case 3:
- image_format = MV_BARCODE_IMAGE_FORMAT_PNG;
- break;
- default:
- sel_opt = 0;
- break;
- }
- }
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return image_format;
+ mv_barcode_image_format_e image_format = MV_BARCODE_IMAGE_FORMAT_JPG;
+ int sel_opt = 0;
+ const int options[3] = { 1, 2, 3 };
+ const char *names[3] = { "BMP", "JPG", "PNG" };
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Select file format:", options, names, 3);
+ switch (sel_opt) {
+ case 1:
+ image_format = MV_BARCODE_IMAGE_FORMAT_BMP;
+ break;
+ case 2:
+ image_format = MV_BARCODE_IMAGE_FORMAT_JPG;
+ break;
+ case 3:
+ image_format = MV_BARCODE_IMAGE_FORMAT_PNG;
+ break;
+ default:
+ sel_opt = 0;
+ break;
+ }
+ }
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return image_format;
}
int perform_detect()
{
- MEDIA_VISION_FUNCTION_ENTER();
-
- barcode_model_s detect_model = {
- MV_BARCODE_UNDEFINED,
- MV_BARCODE_QR_ECC_UNAVAILABLE,
- MV_BARCODE_QR_MODE_UNAVAILABLE,
- 0, 0, 0,
- MV_BARCODE_IMAGE_FORMAT_PNG,
- MEDIA_VISION_COLORSPACE_INVALID,
- NULL, NULL, NULL, NULL };
-
- while (input_string("Input file name to be analyzed:", 1024, &(detect_model.file_name)) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
- LOGI("Barcode input image has been specified");
-
- mv_rectangle_s roi = { {0, 0}, 0, 0 };
-
- while (input_int("Input x coordinate for ROI top left vertex:", 0, 10000, &(roi.point.x)) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
-
- while (input_int("Input y coordinate for ROI top left vertex:", 0, 10000, &(roi.point.y)) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
-
- while (input_int("Input ROI width:", 0, 10000, &(roi.width)) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
-
- while (input_int("Input ROI height:", 0, 10000, &(roi.height)) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
- LOGI("Region of interest (ROI) to detect barcode into has been specified");
-
- while (input_string("Input file name to be generated:", 1024, &(detect_model.out_file_name)) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
- LOGI("Barcode output image has been specified");
-
- const int options[11] = { MEDIA_VISION_COLORSPACE_Y800,
- MEDIA_VISION_COLORSPACE_I420,
- MEDIA_VISION_COLORSPACE_NV12,
- MEDIA_VISION_COLORSPACE_YV12,
- MEDIA_VISION_COLORSPACE_NV21,
- MEDIA_VISION_COLORSPACE_YUYV,
- MEDIA_VISION_COLORSPACE_UYVY,
- MEDIA_VISION_COLORSPACE_422P,
- MEDIA_VISION_COLORSPACE_RGB565,
- MEDIA_VISION_COLORSPACE_RGB888,
- MEDIA_VISION_COLORSPACE_RGBA };
- const char *names[11] = { "Y800", "I420", "NV12", "YV12", "NV21",
- "YUYV", "UYVY", "422P", "RGB565",
- "RGB888", "RGBA" };
-
- while (true)
- {
- int sel_opt = show_menu("Select colorspace to test detector on:", options, names, 11);
- if (sel_opt < MEDIA_VISION_COLORSPACE_Y800 ||
- sel_opt > MEDIA_VISION_COLORSPACE_RGBA)
- {
- continue;
- }
- detect_model.colorspace = (mv_colorspace_e)sel_opt;
- LOGI("User selection is %i", sel_opt);
- break;
- }
-
- int err = detect_barcode(detect_model, roi);
-
- if (detect_model.file_name != NULL)
- {
- free(detect_model.file_name);
- }
-
- if (detect_model.out_file_name != NULL)
- {
- free(detect_model.out_file_name);
- }
-
- if (err != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Barcode detection failed with error code (0x%08x)", err);
- }
-
-
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return err;
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ barcode_model_s detect_model = {
+ MV_BARCODE_UNDEFINED,
+ MV_BARCODE_QR_ECC_UNAVAILABLE,
+ MV_BARCODE_QR_MODE_UNAVAILABLE,
+ 0, 0, 0,
+ MV_BARCODE_IMAGE_FORMAT_PNG,
+ MEDIA_VISION_COLORSPACE_INVALID,
+ NULL, NULL, NULL, NULL };
+
+ while (input_string("Input file name to be analyzed:", 1024, &(detect_model.file_name)) == -1)
+ printf("Incorrect input! Try again.\n");
+
+ LOGI("Barcode input image has been specified");
+
+ mv_rectangle_s roi = { {0, 0}, 0, 0 };
+
+ while (input_int("Input x coordinate for ROI top left vertex:", 0, 10000, &(roi.point.x)) == -1)
+ printf("Incorrect input! Try again.\n");
+
+ while (input_int("Input y coordinate for ROI top left vertex:", 0, 10000, &(roi.point.y)) == -1)
+ printf("Incorrect input! Try again.\n");
+
+ while (input_int("Input ROI width:", 0, 10000, &(roi.width)) == -1)
+ printf("Incorrect input! Try again.\n");
+
+ while (input_int("Input ROI height:", 0, 10000, &(roi.height)) == -1)
+ printf("Incorrect input! Try again.\n");
+
+ LOGI("Region of interest (ROI) to detect barcode into has been specified");
+
+ while (input_string("Input file name to be generated:", 1024, &(detect_model.out_file_name)) == -1)
+ printf("Incorrect input! Try again.\n");
+
+ LOGI("Barcode output image has been specified");
+
+ const int options[11] = { MEDIA_VISION_COLORSPACE_Y800,
+ MEDIA_VISION_COLORSPACE_I420,
+ MEDIA_VISION_COLORSPACE_NV12,
+ MEDIA_VISION_COLORSPACE_YV12,
+ MEDIA_VISION_COLORSPACE_NV21,
+ MEDIA_VISION_COLORSPACE_YUYV,
+ MEDIA_VISION_COLORSPACE_UYVY,
+ MEDIA_VISION_COLORSPACE_422P,
+ MEDIA_VISION_COLORSPACE_RGB565,
+ MEDIA_VISION_COLORSPACE_RGB888,
+ MEDIA_VISION_COLORSPACE_RGBA };
+ const char *names[11] = { "Y800", "I420", "NV12", "YV12", "NV21",
+ "YUYV", "UYVY", "422P", "RGB565",
+ "RGB888", "RGBA" };
+
+ while (true) {
+ int sel_opt = show_menu("Select colorspace to test detector on:", options, names, 11);
+ if (sel_opt < MEDIA_VISION_COLORSPACE_Y800 ||
+ sel_opt > MEDIA_VISION_COLORSPACE_RGBA) {
+ continue;
+ }
+ detect_model.colorspace = (mv_colorspace_e)sel_opt;
+ LOGI("User selection is %i", sel_opt);
+ break;
+ }
+
+ int err = detect_barcode(detect_model, roi);
+
+ if (detect_model.file_name != NULL)
+ free(detect_model.file_name);
+
+ if (detect_model.out_file_name != NULL)
+ free(detect_model.out_file_name);
+
+ if (err != MEDIA_VISION_ERROR_NONE)
+ LOGE("Barcode detection failed with error code (0x%08x)", err);
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return err;
}
int perform_generate(void)
{
- MEDIA_VISION_FUNCTION_ENTER();
-
- barcode_model_s generate_model = {
- MV_BARCODE_UNDEFINED,
- MV_BARCODE_QR_ECC_UNAVAILABLE,
- MV_BARCODE_QR_MODE_UNAVAILABLE,
- 0, 0, 0,
- MV_BARCODE_IMAGE_FORMAT_PNG,
- MEDIA_VISION_COLORSPACE_INVALID,
- NULL, NULL, NULL, NULL };
-
- generation_fcn_e gen_fcn = select_gen_function();
- generate_model.type = select_type();
- LOGI("Barcode type has been selected");
-
- if (generate_model.type == MV_BARCODE_QR)
- {
- generate_model.mode = select_mode();
- LOGI("Barcode encoding mode has been selected");
- generate_model.ecc = select_ecc();
- LOGI("Barcode ecc level has been selected");
- generate_model.version = select_version();
- LOGI("Barcode version has been selected");
- }
-
- if (gen_fcn == MV_TS_GENERATE_TO_IMAGE_FCN)
- {
- generate_model.out_image_format = select_file_format();
- LOGI("Barcode output image format has been selected");
- }
-
- while (input_string("Input message:", 7089, &generate_model.message) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
- LOGI("Barcode message has been specified");
-
- while (input_string("Input file name:", 1024, &generate_model.file_name) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
- LOGI("Barcode output file name has been specified");
-
- if (gen_fcn == MV_TS_GENERATE_TO_IMAGE_FCN)
- {
- while (input_size("Input image width:", 10000, &generate_model.width) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
- LOGI("Barcode output file width has been specified");
-
- while (input_size("Input image height:", 10000, &generate_model.height) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
- LOGI("Barcode output file height has been specified");
- }
-
- const int err =
- gen_fcn == MV_TS_GENERATE_TO_IMAGE_FCN ?
- generate_barcode_to_image(generate_model) :
- generate_barcode_to_source(generate_model);
-
- if (generate_model.message != NULL)
- {
- free(generate_model.message);
- }
-
- if (generate_model.file_name != NULL)
- {
- free(generate_model.file_name);
- }
-
- if (err != MEDIA_VISION_ERROR_NONE)
- {
- LOGE("Barcode generation failed with error code (0x%08x)", err);
- printf("ERROR: Errors were occurred during barcode generation!!!\n");
- MEDIA_VISION_FUNCTION_LEAVE();
- return err;
- }
-
- LOGI("Barcode output file has been generated");
- printf("\nBarcode image was successfully generated.\n");
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return 0;
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ barcode_model_s generate_model = {
+ MV_BARCODE_UNDEFINED,
+ MV_BARCODE_QR_ECC_UNAVAILABLE,
+ MV_BARCODE_QR_MODE_UNAVAILABLE,
+ 0, 0, 0,
+ MV_BARCODE_IMAGE_FORMAT_PNG,
+ MEDIA_VISION_COLORSPACE_INVALID,
+ NULL, NULL, NULL, NULL };
+
+ generation_fcn_e gen_fcn = select_gen_function();
+ generate_model.type = select_type();
+ LOGI("Barcode type has been selected");
+
+ if (generate_model.type == MV_BARCODE_QR) {
+ generate_model.mode = select_mode();
+ LOGI("Barcode encoding mode has been selected");
+ generate_model.ecc = select_ecc();
+ LOGI("Barcode ecc level has been selected");
+ generate_model.version = select_version();
+ LOGI("Barcode version has been selected");
+ }
+
+ if (gen_fcn == MV_TS_GENERATE_TO_IMAGE_FCN) {
+ generate_model.out_image_format = select_file_format();
+ LOGI("Barcode output image format has been selected");
+ }
+
+ while (input_string("Input message:", 7089, &generate_model.message) == -1)
+ printf("Incorrect input! Try again.\n");
+
+ LOGI("Barcode message has been specified");
+
+ while (input_string("Input file name:", 1024, &generate_model.file_name) == -1)
+ printf("Incorrect input! Try again.\n");
+
+ LOGI("Barcode output file name has been specified");
+
+ if (gen_fcn == MV_TS_GENERATE_TO_IMAGE_FCN) {
+ while (input_size("Input image width:", 10000, &generate_model.width) == -1)
+ printf("Incorrect input! Try again.\n");
+
+ LOGI("Barcode output file width has been specified");
+
+ while (input_size("Input image height:", 10000, &generate_model.height) == -1)
+ printf("Incorrect input! Try again.\n");
+
+ LOGI("Barcode output file height has been specified");
+ }
+
+ const int err =
+ gen_fcn == MV_TS_GENERATE_TO_IMAGE_FCN ?
+ generate_barcode_to_image(generate_model) :
+ generate_barcode_to_source(generate_model);
+
+ if (generate_model.message != NULL)
+ free(generate_model.message);
+
+ if (generate_model.file_name != NULL)
+ free(generate_model.file_name);
+
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Barcode generation failed with error code (0x%08x)", err);
+ printf("ERROR: Errors were occurred during barcode generation!!!\n");
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return err;
+ }
+
+ LOGI("Barcode output file has been generated");
+ printf("\nBarcode image was successfully generated.\n");
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return 0;
}
int main(void)
{
- LOGI("Media Vision Testsuite is launched.");
-
- int err = MEDIA_VISION_ERROR_NONE;
-
- int sel_opt = 0;
- const int options[2] = { 1, 2 };
- const char *names[2] = { "Generate", "Detect" };
-
- while (sel_opt == 0)
- {
- sel_opt = show_menu("Select action:", options, names, 2);
- switch (sel_opt)
- {
- case 1:
- LOGI("Start the barcode generation flow");
- err = perform_generate();
- break;
- case 2:
- LOGI("Start the barcode detection flow");
- err = perform_detect();
- break;
- default:
- sel_opt = 0;
- continue;
- }
-
- int do_another = 0;
-
- if (err != MEDIA_VISION_ERROR_NONE)
- {
- printf("ERROR: Action is finished with error code: %i\n", err);
- }
-
- sel_opt = 0;
- const int options_last[2] = { 1, 2 };
- const char *names_last[2] = { "YES", "NO" };
-
- while (sel_opt == 0)
- {
- sel_opt = show_menu("Perform another action?", options_last, names_last, 2);
- switch (sel_opt)
- {
- case 1:
- do_another = 1;
- break;
- case 2:
- do_another = 0;
- break;
- default:
- sel_opt = 0;
- break;
- }
- }
- LOGI("User selection is %i", sel_opt);
-
- sel_opt = (do_another == 1 ? 0 : sel_opt);
- }
-
- LOGI("Media Vision Testsuite is closed.");
-
- return err;
+ LOGI("Media Vision Testsuite is launched.");
+
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ int sel_opt = 0;
+ const int options[2] = { 1, 2 };
+ const char *names[2] = { "Generate", "Detect" };
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Select action:", options, names, 2);
+ switch (sel_opt) {
+ case 1:
+ LOGI("Start the barcode generation flow");
+ err = perform_generate();
+ break;
+ case 2:
+ LOGI("Start the barcode detection flow");
+ err = perform_detect();
+ break;
+ default:
+ sel_opt = 0;
+ continue;
+ }
+
+ int do_another = 0;
+
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("ERROR: Action is finished with error code: %i\n", err);
+
+ sel_opt = 0;
+ const int options_last[2] = { 1, 2 };
+ const char *names_last[2] = { "YES", "NO" };
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Perform another action?", options_last, names_last, 2);
+ switch (sel_opt) {
+ case 1:
+ do_another = 1;
+ break;
+ case 2:
+ do_another = 0;
+ break;
+ default:
+ sel_opt = 0;
+ break;
+ }
+ }
+ LOGI("User selection is %i", sel_opt);
+
+ sel_opt = (do_another == 1 ? 0 : sel_opt);
+ }
+
+ LOGI("Media Vision Testsuite is closed.");
+
+ return err;
}
diff --git a/test/testsuites/face/face_test_suite.c b/test/testsuites/face/face_test_suite.c
index c26fce76..e7ca50a6 100644
--- a/test/testsuites/face/face_test_suite.c
+++ b/test/testsuites/face/face_test_suite.c
@@ -40,816 +40,727 @@ static bool Perform_eye_condition_recognize = false;
static bool Perform_facial_expression_recognize = false;
void eye_condition_cb(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s face_location,
- mv_face_eye_condition_e eye_condition,
- void *user_data)
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s face_location,
+ mv_face_eye_condition_e eye_condition,
+ void *user_data)
{
- switch (eye_condition)
- {
- case MV_FACE_EYES_NOT_FOUND:
- printf("Eyes not found");
- break;
- case MV_FACE_EYES_OPEN:
- printf("Eyes are open");
- break;
- case MV_FACE_EYES_CLOSED:
- printf("Eyes are closed");
- break;
- }
+ switch (eye_condition) {
+ case MV_FACE_EYES_NOT_FOUND:
+ printf("Eyes not found");
+ break;
+ case MV_FACE_EYES_OPEN:
+ printf("Eyes are open");
+ break;
+ case MV_FACE_EYES_CLOSED:
+ printf("Eyes are closed");
+ break;
+ }
}
void face_expression_cb(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s face_location,
- mv_face_facial_expression_e facial_expression,
- void *user_data)
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s face_location,
+ mv_face_facial_expression_e facial_expression,
+ void *user_data)
{
- switch (facial_expression)
- {
- case MV_FACE_NEUTRAL:
- printf("Face expression is neutral");
- break;
- case MV_FACE_SMILE:
- printf("Face expression is smiling");
- break;
- case MV_FACE_UNKNOWN:
- printf("Face expression isn't recognized");
- break;
- }
+ switch (facial_expression) {
+ case MV_FACE_NEUTRAL:
+ printf("Face expression is neutral");
+ break;
+ case MV_FACE_SMILE:
+ printf("Face expression is smiling");
+ break;
+ case MV_FACE_UNKNOWN:
+ printf("Face expression isn't recognized");
+ break;
+ }
}
void on_face_detected_cb(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s *faces_locations,
- int number_of_faces,
- void *user_data)
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s *faces_locations,
+ int number_of_faces,
+ void *user_data)
{
- printf("%i faces were detected on the image.\n", number_of_faces);
- if (number_of_faces > 0)
- {
- int is_source_data_loaded = 0;
-
- char *file_name = NULL;
- unsigned char *out_buffer = NULL;
- unsigned int buf_size = 0;
- image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID };
- if (MEDIA_VISION_ERROR_NONE != mv_source_get_buffer(source, &out_buffer, &buf_size) ||
- MEDIA_VISION_ERROR_NONE != mv_source_get_width(source, &(image_data.image_width)) ||
- MEDIA_VISION_ERROR_NONE != mv_source_get_height(source, &(image_data.image_height)) ||
- MEDIA_VISION_ERROR_NONE != mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
- user_data == NULL)
- {
- printf("ERROR: Creating out image is impossible.\n");
- }
- else
- {
- file_name = (char*)user_data;
- is_source_data_loaded = 1;
- }
-
- int i = 0;
- for (i = 0; i < number_of_faces; ++i)
- {
- printf("\Face %i : x - %i, y - %i, width - %i, height - %i ", i,
- faces_locations[i].point.x, faces_locations[i].point.y,
- faces_locations[i].width, faces_locations[i].height);
-
- if (Perform_eye_condition_recognize)
- {
- if (MEDIA_VISION_ERROR_NONE != mv_face_eye_condition_recognize(
- source,
- engine_cfg,
- faces_locations[i],
- eye_condition_cb,
- user_data))
- {
- printf(TEXT_RED "\nEye condition recognition for %i face failed"
- TEXT_RESET "\n", i);
- }
- }
-
- if (Perform_facial_expression_recognize)
- {
- if (MEDIA_VISION_ERROR_NONE != mv_face_facial_expression_recognize(
- source,
- engine_cfg,
- faces_locations[i],
- face_expression_cb,
- user_data))
- {
- printf(TEXT_RED "\nFacial expression recognition for %i "
- "face failed" TEXT_RESET "\n", i);
- }
- }
-
- printf("\n");
-
- if ((is_source_data_loaded == 1) && !Perform_eye_condition_recognize)
- {
- const int rectangle_thickness = 3;
- const int drawing_color[] = {255, 0, 0};
- if (MEDIA_VISION_ERROR_NONE != draw_rectangle_on_buffer(
- faces_locations[i].point.x,
- faces_locations[i].point.y,
- faces_locations[i].point.x + faces_locations[i].width,
- faces_locations[i].point.y + faces_locations[i].height,
- rectangle_thickness,
- drawing_color,
- &image_data,
- out_buffer))
- {
- continue;
- }
- }
- }
-
- if (!Perform_eye_condition_recognize)
- {
- if (file_name != NULL &&
- MEDIA_VISION_ERROR_NONE == save_image_from_buffer(
- file_name,
- out_buffer,
- &image_data,
- 100))
- {
- printf("Image was generated as %s\n", file_name);
- }
- else
- {
- printf("ERROR: Failed to generate output file. Check file name and permissions. \n");
- }
- }
-
- printf("\n");
- }
+ printf("%i faces were detected on the image.\n", number_of_faces);
+ if (number_of_faces > 0) {
+ int is_source_data_loaded = 0;
+
+ char *file_name = NULL;
+ unsigned char *out_buffer = NULL;
+ unsigned int buf_size = 0;
+ image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID };
+ if (MEDIA_VISION_ERROR_NONE != mv_source_get_buffer(source, &out_buffer, &buf_size) ||
+ MEDIA_VISION_ERROR_NONE != mv_source_get_width(source, &(image_data.image_width)) ||
+ MEDIA_VISION_ERROR_NONE != mv_source_get_height(source, &(image_data.image_height)) ||
+ MEDIA_VISION_ERROR_NONE != mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
+ user_data == NULL) {
+ printf("ERROR: Creating out image is impossible.\n");
+ } else {
+ file_name = (char*)user_data;
+ is_source_data_loaded = 1;
+ }
+
+ int i = 0;
+ for (i = 0; i < number_of_faces; ++i) {
+ printf("\Face %i : x - %i, y - %i, width - %i, height - %i ", i,
+ faces_locations[i].point.x, faces_locations[i].point.y,
+ faces_locations[i].width, faces_locations[i].height);
+
+ if (Perform_eye_condition_recognize) {
+ if (MEDIA_VISION_ERROR_NONE != mv_face_eye_condition_recognize(
+ source,
+ engine_cfg,
+ faces_locations[i],
+ eye_condition_cb,
+ user_data)) {
+ printf(TEXT_RED "\nEye condition recognition for %i face failed"
+ TEXT_RESET "\n", i);
+ }
+ }
+
+ if (Perform_facial_expression_recognize) {
+ if (MEDIA_VISION_ERROR_NONE != mv_face_facial_expression_recognize(
+ source,
+ engine_cfg,
+ faces_locations[i],
+ face_expression_cb,
+ user_data)) {
+ printf(TEXT_RED "\nFacial expression recognition for %i "
+ "face failed" TEXT_RESET "\n", i);
+ }
+ }
+
+ printf("\n");
+
+ if ((is_source_data_loaded == 1) && !Perform_eye_condition_recognize) {
+ const int rectangle_thickness = 3;
+ const int drawing_color[] = {255, 0, 0};
+ if (MEDIA_VISION_ERROR_NONE != draw_rectangle_on_buffer(
+ faces_locations[i].point.x,
+ faces_locations[i].point.y,
+ faces_locations[i].point.x + faces_locations[i].width,
+ faces_locations[i].point.y + faces_locations[i].height,
+ rectangle_thickness,
+ drawing_color,
+ &image_data,
+ out_buffer)) {
+ continue;
+ }
+ }
+ }
+
+ if (!Perform_eye_condition_recognize) {
+ if (file_name != NULL &&
+ MEDIA_VISION_ERROR_NONE == save_image_from_buffer(
+ file_name,
+ out_buffer,
+ &image_data,
+ 100)) {
+ printf("Image was generated as %s\n", file_name);
+ } else {
+ printf("ERROR: Failed to generate output file. Check file name and permissions. \n");
+ }
+ }
+
+ printf("\n");
+ }
}
void on_face_recognized_cb(
- mv_source_h source,
- mv_face_recognition_model_h recognition_model,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s *face_location,
- const int *face_label,
- double confidence,
- void *user_data)
+ mv_source_h source,
+ mv_face_recognition_model_h recognition_model,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s *face_location,
+ const int *face_label,
+ double confidence,
+ void *user_data)
{
- if (NULL == face_location)
- {
- printf(TEXT_YELLOW "No faces were recognized in the source"
- TEXT_RESET "\n");
- }
- else
- {
- printf(TEXT_GREEN "Face labeled %i was recognized in the source with "
- "recognition confidence of %.2f"
- TEXT_RESET "\n", *face_label, confidence);
- }
+ if (NULL == face_location) {
+ printf(TEXT_YELLOW "No faces were recognized in the source"
+ TEXT_RESET "\n");
+ } else {
+ printf(TEXT_GREEN "Face labeled %i was recognized in the source with "
+ "recognition confidence of %.2f"
+ TEXT_RESET "\n", *face_label, confidence);
+ }
}
int perform_detect()
{
- char *in_file_name = NULL;
- char *out_file_name = NULL;
-
- // 1. Loading media source
- while (input_string("Input file name to be analyzed:", 1024, &(in_file_name)) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
-
- mv_source_h source;
- int err = mv_create_source(&source);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during creating the source!!! code: %i"
- TEXT_RESET "\n", err);
-
- free(in_file_name);
-
- return err;
- }
-
- err = load_mv_source_from_file(in_file_name, source);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- const int err2 = mv_destroy_source(source);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during destroying the source!!! code: %i"
- TEXT_RESET "\n", err2);
-
- free(in_file_name);
-
- return err2;
- }
-
- free(in_file_name);
-
- return err;
- }
-
- free(in_file_name);
-
- // 2. Select output file to be generated
- while (input_string("Input file name to be generated:", 1024, &(out_file_name)) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
-
- // 3. Select Haar cascade
- const int options[3] = { 1, 2, 3 };
- const char *names[3] = { "haarcascade_frontalface_alt.xml",
- "haarcascade_frontalface_alt2.xml",
- "haarcascade_frontalface_alt_tree.xml"};
-
- const int haarcascade = show_menu("Select Haarcascade:", options, names, 3);
-
- mv_engine_config_h eng_config;
- err = mv_create_engine_config(&eng_config);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during creating the engine config!!! code: %i"
- TEXT_RESET "\n", err);
-
- free(out_file_name);
-
- return err;
- }
-
- switch (haarcascade)
- {
- case 1:
- mv_engine_config_set_string_attribute(
- eng_config,
- MV_FACE_DETECTION_MODEL_FILE_PATH,
- "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml");
- break;
- case 2:
- mv_engine_config_set_string_attribute(
- eng_config,
- MV_FACE_DETECTION_MODEL_FILE_PATH,
- "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml");
- break;
- case 3:
- mv_engine_config_set_string_attribute(
- eng_config,
- MV_FACE_DETECTION_MODEL_FILE_PATH,
- "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt_tree.xml");
- break;
- default:
- printf(TEXT_YELLOW "Default Haar cascade was set.\n" TEXT_RESET);
- }
-
- // 4. Perform detect
- err = mv_face_detect(source, eng_config, on_face_detected_cb, out_file_name);
-
- free(out_file_name);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during face detection!!! code: %i"
- TEXT_RESET "\n", err);
-
- int err2 = mv_destroy_source(source);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during destroying the source!!! code: %i"
- TEXT_RESET "\n", err2);
- return err2;
- }
-
- err2 = mv_destroy_engine_config(eng_config);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during destroying the engine config!!! code: %i"
- TEXT_RESET "\n", err2);
- return err2;
- }
-
- return err;
- }
-
- err = mv_destroy_source(source);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during destroying the source!!! code: %i"
- TEXT_RESET "\n", err);
- return err;
- }
-
- err = mv_destroy_engine_config(eng_config);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during destroying the engine config!!! code: %i"
- TEXT_RESET "\n", err);
- return err;
- }
-
- return err;
+ char *in_file_name = NULL;
+ char *out_file_name = NULL;
+
+ /* 1. Loading media source */
+ while (input_string("Input file name to be analyzed:", 1024, &(in_file_name)) == -1)
+ printf("Incorrect input! Try again.\n");
+
+ mv_source_h source;
+ int err = mv_create_source(&source);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during creating the source!!! code: %i"
+ TEXT_RESET "\n", err);
+
+ free(in_file_name);
+
+ return err;
+ }
+
+ err = load_mv_source_from_file(in_file_name, source);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ const int err2 = mv_destroy_source(source);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during destroying the source!!! code: %i"
+ TEXT_RESET "\n", err2);
+
+ free(in_file_name);
+
+ return err2;
+ }
+
+ free(in_file_name);
+
+ return err;
+ }
+
+ free(in_file_name);
+
+ /* 2. Select output file to be generated */
+ while (input_string("Input file name to be generated:", 1024, &(out_file_name)) == -1)
+ printf("Incorrect input! Try again.\n");
+
+ /* 3. Select Haar cascade */
+ const int options[3] = { 1, 2, 3 };
+ const char *names[3] = { "haarcascade_frontalface_alt.xml",
+ "haarcascade_frontalface_alt2.xml",
+ "haarcascade_frontalface_alt_tree.xml"};
+
+ const int haarcascade = show_menu("Select Haarcascade:", options, names, 3);
+
+ mv_engine_config_h eng_config;
+ err = mv_create_engine_config(&eng_config);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during creating the engine config!!! code: %i"
+ TEXT_RESET "\n", err);
+
+ free(out_file_name);
+
+ return err;
+ }
+
+ switch (haarcascade) {
+ case 1:
+ mv_engine_config_set_string_attribute(
+ eng_config,
+ MV_FACE_DETECTION_MODEL_FILE_PATH,
+ "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml");
+ break;
+ case 2:
+ mv_engine_config_set_string_attribute(
+ eng_config,
+ MV_FACE_DETECTION_MODEL_FILE_PATH,
+ "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml");
+ break;
+ case 3:
+ mv_engine_config_set_string_attribute(
+ eng_config,
+ MV_FACE_DETECTION_MODEL_FILE_PATH,
+ "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt_tree.xml");
+ break;
+ default:
+ printf(TEXT_YELLOW "Default Haar cascade was set.\n" TEXT_RESET);
+ }
+
+ /* 4. Perform detect */
+ err = mv_face_detect(source, eng_config, on_face_detected_cb, out_file_name);
+
+ free(out_file_name);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during face detection!!! code: %i"
+ TEXT_RESET "\n", err);
+
+ int err2 = mv_destroy_source(source);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during destroying the source!!! code: %i"
+ TEXT_RESET "\n", err2);
+ return err2;
+ }
+
+ err2 = mv_destroy_engine_config(eng_config);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during destroying the engine config!!! code: %i"
+ TEXT_RESET "\n", err2);
+ return err2;
+ }
+
+ return err;
+ }
+
+ err = mv_destroy_source(source);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during destroying the source!!! code: %i"
+ TEXT_RESET "\n", err);
+ return err;
+ }
+
+ err = mv_destroy_engine_config(eng_config);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during destroying the engine config!!! code: %i"
+ TEXT_RESET "\n", err);
+ return err;
+ }
+
+ return err;
}
int perform_mv_face_recognize(mv_face_recognition_model_h model)
{
- char *in_file_name = NULL;
-
- mv_source_h source = NULL;
- int err = mv_create_source(&source);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during creating the source!!! code: %i"
- TEXT_RESET "\n", err);
- return err;
- }
-
- printf(TEXT_GREEN "HINT:" TEXT_RESET "\n"
- TEXT_YELLOW "To achieve appropriate accuracy of recognition,\n"
- "choose images with only faces. I.e. face has to cover\n"
- "approximately 95-100%% of the image (passport photos\n"
- "are the best example :)). Note that if this value is\n"
- "less than 95%, accuracy can be significantly reduced.\n"
- "In real code such images can be achieved by cropping\n"
- "faces from images with face detection functionality.\n"
- TEXT_RESET);
- while (-1 == input_string(
- "Input file name with the face to be recognized:",
- 1024,
- &(in_file_name)))
- {
- printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
- }
-
- err = load_mv_source_from_file(in_file_name, source);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- free(in_file_name);
-
- const int err2 = mv_destroy_source(source);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during destroying the source!!! code: %i"
- TEXT_RESET "\n", err2);
- return err2;
- }
-
- return err;
- }
-
- err = mv_face_recognize(source, model, NULL, NULL, on_face_recognized_cb, NULL);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- free(in_file_name);
-
- printf(TEXT_RED
- "ERROR: Errors were occurred during face recognition!!! code: %i"
- TEXT_RESET "\n", err);
-
- int err2 = mv_destroy_source(source);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during destroying the source!!! code: %i"
- TEXT_RESET "\n", err2);
- return err2;
- }
-
- return err;
- }
-
- free(in_file_name);
-
- return err;
+ char *in_file_name = NULL;
+
+ mv_source_h source = NULL;
+ int err = mv_create_source(&source);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during creating the source!!! code: %i"
+ TEXT_RESET "\n", err);
+ return err;
+ }
+
+ printf(TEXT_GREEN "HINT:" TEXT_RESET "\n"
+ TEXT_YELLOW "To achieve appropriate accuracy of recognition,\n"
+ "choose images with only faces. I.e. face has to cover\n"
+ "approximately 95-100%% of the image (passport photos\n"
+ "are the best example :)). Note that if this value is\n"
+ "less than 95%, accuracy can be significantly reduced.\n"
+ "In real code such images can be achieved by cropping\n"
+ "faces from images with face detection functionality.\n"
+ TEXT_RESET);
+ while (-1 == input_string(
+ "Input file name with the face to be recognized:",
+ 1024,
+ &(in_file_name))) {
+ printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
+ }
+
+ err = load_mv_source_from_file(in_file_name, source);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ free(in_file_name);
+
+ const int err2 = mv_destroy_source(source);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during destroying the source!!! code: %i"
+ TEXT_RESET "\n", err2);
+ return err2;
+ }
+
+ return err;
+ }
+
+ err = mv_face_recognize(source, model, NULL, NULL, on_face_recognized_cb, NULL);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ free(in_file_name);
+
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during face recognition!!! code: %i"
+ TEXT_RESET "\n", err);
+
+ int err2 = mv_destroy_source(source);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during destroying the source!!! code: %i"
+ TEXT_RESET "\n", err2);
+ return err2;
+ }
+
+ return err;
+ }
+
+ free(in_file_name);
+
+ return err;
}
int add_single_example(
- mv_face_recognition_model_h model, const char *in_file_name,
- mv_rectangle_s *roi, int *face_label)
+ mv_face_recognition_model_h model, const char *in_file_name,
+ mv_rectangle_s *roi, int *face_label)
{
- mv_source_h source;
- int err = mv_create_source(&source);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during creating the source!!! code: %i"
- TEXT_RESET "\n", err);
-
- return err;
- }
-
- err = load_mv_source_from_file(in_file_name, source);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- const int err2 = mv_destroy_source(source);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during destroying the source!!! code: %i"
- TEXT_RESET "\n", err2);
- return err2;
- }
-
- return err;
- }
-
- if (NULL != roi && !show_confirm_dialog("Do you want to use full image?"))
- {
- printf(TEXT_YELLOW "Specify the ROI as rectangle where face is located.\n"
- "Use negative values if you want to check correctness\n"
- "of error handling.\n"
- TEXT_RESET);
-
- while (-1 == input_int("Specify top left ROI x coordinate:",
- INT_MIN, INT_MAX, &(roi->point.x)))
- {
- printf("Incorrect input! Try again.\n");
- }
-
- while (-1 == input_int("Specify top left ROI y coordinate:",
- INT_MIN, INT_MAX, &(roi->point.y)))
- {
- printf("Incorrect input! Try again.\n");
- }
-
- while (-1 == input_int("Specify top left ROI width:",
- INT_MIN, INT_MAX, &(roi->width)))
- {
- printf("Incorrect input! Try again.\n");
- }
-
- while (-1 == input_int("Specify top left ROI height:",
- INT_MIN, INT_MAX, &(roi->height)))
- {
- printf("Incorrect input! Try again.\n");
- }
- }
- else
- {
- roi = NULL;
- }
-
- int real_label = 0;
- if (NULL == face_label)
- {
- printf(TEXT_YELLOW "Also, you has to assign label for the face in the\n"
- "image. You has assign the same labels for the same\n"
- "persons. For example, always assign label '1' for\n"
- "images with Alice's face; label '2' for Bob's faces,\n"
- "'3' for Ann's faces and so on...\n"
- TEXT_RESET);
-
- face_label = &real_label;
- while (-1 == input_int("Specify label as integer:",
- MIN_ALLOWED_LABEL,
- MAX_ALLOWED_LABEL,
- face_label))
- {
- printf("Incorrect input! You can use %i-%i labels only. Try again.\n",
- MIN_ALLOWED_LABEL,
- MAX_ALLOWED_LABEL);
- }
- }
-
- err = mv_face_recognition_model_add(source, model, roi, *face_label);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during adding the sample image from "
- "[%s] to the face recognition model!!! code: %i"
- TEXT_RESET "\n", in_file_name, err);
- }
-
- const int err2 = mv_destroy_source(source);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during destroying the source!!! code: %i"
- TEXT_RESET "\n", err2);
- }
-
- return err;
+ mv_source_h source;
+ int err = mv_create_source(&source);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during creating the source!!! code: %i"
+ TEXT_RESET "\n", err);
+
+ return err;
+ }
+
+ err = load_mv_source_from_file(in_file_name, source);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ const int err2 = mv_destroy_source(source);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during destroying the source!!! code: %i"
+ TEXT_RESET "\n", err2);
+ return err2;
+ }
+
+ return err;
+ }
+
+ if (NULL != roi && !show_confirm_dialog("Do you want to use full image?")) {
+ printf(TEXT_YELLOW "Specify the ROI as rectangle where face is located.\n"
+ "Use negative values if you want to check correctness\n"
+ "of error handling.\n"
+ TEXT_RESET);
+
+ while (-1 == input_int("Specify top left ROI x coordinate:",
+ INT_MIN, INT_MAX, &(roi->point.x))) {
+ printf("Incorrect input! Try again.\n");
+ }
+
+ while (-1 == input_int("Specify top left ROI y coordinate:",
+ INT_MIN, INT_MAX, &(roi->point.y))) {
+ printf("Incorrect input! Try again.\n");
+ }
+
+ while (-1 == input_int("Specify top left ROI width:",
+ INT_MIN, INT_MAX, &(roi->width))) {
+ printf("Incorrect input! Try again.\n");
+ }
+
+ while (-1 == input_int("Specify top left ROI height:",
+ INT_MIN, INT_MAX, &(roi->height))) {
+ printf("Incorrect input! Try again.\n");
+ }
+ } else {
+ roi = NULL;
+ }
+
+ int real_label = 0;
+ if (NULL == face_label) {
+ printf(TEXT_YELLOW "Also, you has to assign label for the face in the\n"
+ "image. You has assign the same labels for the same\n"
+ "persons. For example, always assign label '1' for\n"
+ "images with Alice's face; label '2' for Bob's faces,\n"
+ "'3' for Ann's faces and so on...\n"
+ TEXT_RESET);
+
+ face_label = &real_label;
+ while (-1 == input_int("Specify label as integer:",
+ MIN_ALLOWED_LABEL,
+ MAX_ALLOWED_LABEL,
+ face_label)) {
+ printf("Incorrect input! You can use %i-%i labels only. Try again.\n",
+ MIN_ALLOWED_LABEL,
+ MAX_ALLOWED_LABEL);
+ }
+ }
+
+ err = mv_face_recognition_model_add(source, model, roi, *face_label);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during adding the sample image from "
+ "[%s] to the face recognition model!!! code: %i"
+ TEXT_RESET "\n", in_file_name, err);
+ }
+
+ const int err2 = mv_destroy_source(source);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during destroying the source!!! code: %i"
+ TEXT_RESET "\n", err2);
+ }
+
+ return err;
}
int perform_mv_face_recognition_model_add_face_example(
- mv_face_recognition_model_h model,
- notification_type_e *notification_type)
+ mv_face_recognition_model_h model,
+ notification_type_e *notification_type)
{
- char *in_file_name = NULL;
-
- printf(TEXT_GREEN "HINT:" TEXT_RESET "\n"
- TEXT_YELLOW "To achieve appropriate accuracy of recognition,\n"
- "choose images with only faces. I.e. face has to cover\n"
- "approximately 95-100%% of the image (passport photos\n"
- "are the best example :)). Note that if this value is\n"
- "less than 95%, accuracy can be significantly reduced.\n"
- "In real code such images can be achieved by cropping\n"
- "faces from images with face detection functionality.\n"
- TEXT_RESET);
-
- const bool from_dir = show_confirm_dialog("Do add images from directory?");
- const char *input_path_msg =
- from_dir ? "Input path to the directory with the face images to be "
- "loaded to the model:"
- : "Input file name with the face to be loaded to the model:";
-
- while (-1 == input_string(input_path_msg, 1024, &(in_file_name)))
- {
- printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
- }
-
- int err = MEDIA_VISION_ERROR_NONE;
-
- if (from_dir)
- {
- *notification_type = FAIL_OR_DONE;
- int face_label = 0;
- while (-1 == input_int("Specify label as integer:",
- MIN_ALLOWED_LABEL,
- MAX_ALLOWED_LABEL,
- &face_label))
- {
- printf("Incorrect input! You can use %i-%i labels only. Try again.\n",
- MIN_ALLOWED_LABEL,
- MAX_ALLOWED_LABEL);
- }
-
- DIR *dir;
- struct dirent *ent;
- if ((dir = opendir(in_file_name)) != NULL)
- {
- char file_path[1024] = "";
-
- // Traverses all the files and directories within source directory
- while ((ent = readdir(dir)) != NULL)
- {
- // Determine current entry name
- const char *file_name = ent->d_name;
-
- // If current entry is directory, or hidden object, skip the step:
- if (file_name[0] == '.')
- {
- continue;
- }
-
- sprintf(file_path, "%s/%s", in_file_name, file_name);
- err = add_single_example(model, file_path, NULL, &face_label);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "Failed to add example from %s. "
- "Error code: %i\n" TEXT_RESET,
- file_path, err);
- }
- else
- {
- printf(TEXT_GREEN "Example labeled [%i] added from " TEXT_RESET
- TEXT_YELLOW "%s\n" TEXT_RESET, face_label, file_path);
- }
- }
-
- closedir(dir);
- }
- else
- {
- printf(TEXT_RED "Can't read from specified directory (%s)\n"
- TEXT_RESET, in_file_name);
- }
- }
- else
- {
- *notification_type = FAIL_OR_SUCCESSS;
- mv_rectangle_s roi;
- err = add_single_example(model, in_file_name, &roi, NULL);
- }
-
- free(in_file_name);
-
- return err;
+ char *in_file_name = NULL;
+
+ printf(TEXT_GREEN "HINT:" TEXT_RESET "\n"
+ TEXT_YELLOW "To achieve appropriate accuracy of recognition,\n"
+ "choose images with only faces. I.e. face has to cover\n"
+ "approximately 95-100%% of the image (passport photos\n"
+ "are the best example :)). Note that if this value is\n"
+ "less than 95%, accuracy can be significantly reduced.\n"
+ "In real code such images can be achieved by cropping\n"
+ "faces from images with face detection functionality.\n"
+ TEXT_RESET);
+
+ const bool from_dir = show_confirm_dialog("Do add images from directory?");
+ const char *input_path_msg =
+ from_dir ? "Input path to the directory with the face images to be "
+ "loaded to the model:"
+ : "Input file name with the face to be loaded to the model:";
+
+ while (-1 == input_string(input_path_msg, 1024, &(in_file_name)))
+ printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
+
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ if (from_dir) {
+ *notification_type = FAIL_OR_DONE;
+ int face_label = 0;
+ while (-1 == input_int("Specify label as integer:",
+ MIN_ALLOWED_LABEL,
+ MAX_ALLOWED_LABEL,
+ &face_label)) {
+ printf("Incorrect input! You can use %i-%i labels only. Try again.\n",
+ MIN_ALLOWED_LABEL,
+ MAX_ALLOWED_LABEL);
+ }
+
+ DIR *dir;
+ struct dirent *ent;
+ if ((dir = opendir(in_file_name)) != NULL) {
+ char file_path[1024] = "";
+
+ /* Traverses all the files and directories within source directory */
+ while ((ent = readdir(dir)) != NULL) {
+ /* Determine current entry name */
+ const char *file_name = ent->d_name;
+
+ /* If current entry is directory, or hidden object, skip the step: */
+ if (file_name[0] == '.')
+ continue;
+
+ sprintf(file_path, "%s/%s", in_file_name, file_name);
+ err = add_single_example(model, file_path, NULL, &face_label);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "Failed to add example from %s. "
+ "Error code: %i\n" TEXT_RESET,
+ file_path, err);
+ } else {
+ printf(TEXT_GREEN "Example labeled [%i] added from " TEXT_RESET
+ TEXT_YELLOW "%s\n" TEXT_RESET, face_label, file_path);
+ }
+ }
+
+ closedir(dir);
+ } else {
+ printf(TEXT_RED "Can't read from specified directory (%s)\n"
+ TEXT_RESET, in_file_name);
+ }
+ } else {
+ *notification_type = FAIL_OR_SUCCESSS;
+ mv_rectangle_s roi;
+ err = add_single_example(model, in_file_name, &roi, NULL);
+ }
+
+ free(in_file_name);
+
+ return err;
}
int perform_mv_face_recognition_model_reset_face_examples(
- mv_face_recognition_model_h model,
- bool full_reset)
+ mv_face_recognition_model_h model,
+ bool full_reset)
{
- printf(TEXT_GREEN "HINT:" TEXT_RESET "\n"
- TEXT_YELLOW "Reset of the examples will affect only examples has\n"
- "been collected via mv_face_recognition_model_add()\n"
- "function calls (i.e. through 'Add image example' menu\n"
- "item). Previously learned model will be not affected,\n"
- "so it is possible to recognize faces with this model\n"
- "after examples reset. Reset of the examples can be\n"
- "useful to erase a class of faces (i.e. all examples\n"
- "related to this class) before learning the model.\n"
- "Or, if it is needed to reset all collected previously\n"
- "examples as an alternative to the creating the new\n"
- "model.\n"
- TEXT_RESET);
-
- int err = MEDIA_VISION_ERROR_NONE;
-
- if (full_reset)
- {
- err = mv_face_recognition_model_reset(model, NULL);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during reset of all examples!!!"
- " code: %i" TEXT_RESET "\n", err);
- return err;
- }
- }
- else
- {
- int reset_label = 0;
-
- while (-1 == input_int("Specify label for the examples to be reset:",
- MIN_ALLOWED_LABEL,
- MAX_ALLOWED_LABEL,
- &reset_label))
- {
- printf("Incorrect input! You can use %i-%i labels only. Try again.\n",
- MIN_ALLOWED_LABEL,
- MAX_ALLOWED_LABEL);
- }
-
- err = mv_face_recognition_model_reset(model, &reset_label);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during reset of examples labeled"
- " with %i!!! code: %i" TEXT_RESET "\n", reset_label, err);
- return err;
- }
- }
-
- return err;
+ printf(TEXT_GREEN "HINT:" TEXT_RESET "\n"
+ TEXT_YELLOW "Reset of the examples will affect only examples has\n"
+ "been collected via mv_face_recognition_model_add()\n"
+ "function calls (i.e. through 'Add image example' menu\n"
+ "item). Previously learned model will be not affected,\n"
+ "so it is possible to recognize faces with this model\n"
+ "after examples reset. Reset of the examples can be\n"
+ "useful to erase a class of faces (i.e. all examples\n"
+ "related to this class) before learning the model.\n"
+ "Or, if it is needed to reset all collected previously\n"
+ "examples as an alternative to the creating the new\n"
+ "model.\n"
+ TEXT_RESET);
+
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ if (full_reset) {
+ err = mv_face_recognition_model_reset(model, NULL);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during reset of all examples!!!"
+ " code: %i" TEXT_RESET "\n", err);
+ return err;
+ }
+ } else {
+ int reset_label = 0;
+
+ while (-1 == input_int("Specify label for the examples to be reset:",
+ MIN_ALLOWED_LABEL,
+ MAX_ALLOWED_LABEL,
+ &reset_label)) {
+ printf("Incorrect input! You can use %i-%i labels only. Try again.\n",
+ MIN_ALLOWED_LABEL,
+ MAX_ALLOWED_LABEL);
+ }
+
+ err = mv_face_recognition_model_reset(model, &reset_label);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during reset of examples labeled"
+ " with %i!!! code: %i" TEXT_RESET "\n", reset_label, err);
+ return err;
+ }
+ }
+
+ return err;
}
int perform_mv_face_recognition_model_save(mv_face_recognition_model_h model)
{
- char *out_file_name = NULL;
+ char *out_file_name = NULL;
- while (input_string("Input file name to save the model:",
- 1024, &(out_file_name)) == -1)
- {
- printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
- }
+ while (input_string("Input file name to save the model:",
+ 1024, &(out_file_name)) == -1) {
+ printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
+ }
- const int err = mv_face_recognition_model_save(out_file_name, model);
+ const int err = mv_face_recognition_model_save(out_file_name, model);
- free(out_file_name);
+ free(out_file_name);
- return err;
+ return err;
}
int perform_mv_face_recognition_model_load(mv_face_recognition_model_h *model)
{
- char *in_file_name = NULL;
+ char *in_file_name = NULL;
- while (input_string("Input file name to load model from:",
- 1024, &(in_file_name)) == -1)
- {
- printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
- }
+ while (input_string("Input file name to load model from:",
+ 1024, &(in_file_name)) == -1) {
+ printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
+ }
- const int err = mv_face_recognition_model_load(in_file_name,model);
+ const int err = mv_face_recognition_model_load(in_file_name, model);
- free(in_file_name);
+ free(in_file_name);
- return err;
+ return err;
}
int perform_mv_face_recognition_model_clone(
- mv_face_recognition_model_h model_to_clone)
+ mv_face_recognition_model_h model_to_clone)
{
- int err = MEDIA_VISION_ERROR_NONE;
-
- mv_face_recognition_model_h cloned_model = NULL;
-
- printf(TEXT_GREEN "Perform clone of the recognition model..."
- TEXT_RESET "\n");
-
- err = mv_face_recognition_model_clone(model_to_clone, &cloned_model);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "Errors were occurred during model clone. Error code %i"
- TEXT_RESET "\n", err);
- return err;
- }
-
- printf(TEXT_YELLOW "Model cloning is done." TEXT_RESET "\n");
-
- if (show_confirm_dialog("Save " TEXT_YELLOW "source model" TEXT_RESET
- " to file?"))
- {
- const int serr = perform_mv_face_recognition_model_save(model_to_clone);
- if (MEDIA_VISION_ERROR_NONE != serr)
- {
- printf(TEXT_RED
- "Errors were occurred when trying to save "
- "source model to file. Error code %i" TEXT_RESET "\n", serr);
- }
- }
-
- if (show_confirm_dialog("Save " TEXT_YELLOW "destination model" TEXT_RESET
- " to file?"))
- {
- const int serr = perform_mv_face_recognition_model_save(cloned_model);
- if (MEDIA_VISION_ERROR_NONE != serr)
- {
- printf(TEXT_RED
- "Errors were occurred when trying to save destination model "
- "to file. Error code %i" TEXT_RESET "\n", serr);
- }
- }
-
- if (cloned_model)
- {
- const int dest_err = mv_face_recognition_model_destroy(cloned_model);
- if (MEDIA_VISION_ERROR_NONE != dest_err)
- {
- printf(TEXT_RED
- "Errors were occurred when destroying destination model ."
- "Error code %i" TEXT_RESET "\n", dest_err);
- }
- }
-
- return err;
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ mv_face_recognition_model_h cloned_model = NULL;
+
+ printf(TEXT_GREEN "Perform clone of the recognition model..."
+ TEXT_RESET "\n");
+
+ err = mv_face_recognition_model_clone(model_to_clone, &cloned_model);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "Errors were occurred during model clone. Error code %i"
+ TEXT_RESET "\n", err);
+ return err;
+ }
+
+ printf(TEXT_YELLOW "Model cloning is done." TEXT_RESET "\n");
+
+ if (show_confirm_dialog("Save " TEXT_YELLOW "source model" TEXT_RESET
+ " to file?")) {
+ const int serr = perform_mv_face_recognition_model_save(model_to_clone);
+ if (MEDIA_VISION_ERROR_NONE != serr) {
+ printf(TEXT_RED
+ "Errors were occurred when trying to save "
+ "source model to file. Error code %i" TEXT_RESET "\n", serr);
+ }
+ }
+
+ if (show_confirm_dialog("Save " TEXT_YELLOW "destination model" TEXT_RESET
+ " to file?")) {
+ const int serr = perform_mv_face_recognition_model_save(cloned_model);
+ if (MEDIA_VISION_ERROR_NONE != serr) {
+ printf(TEXT_RED
+ "Errors were occurred when trying to save destination model "
+ "to file. Error code %i" TEXT_RESET "\n", serr);
+ }
+ }
+
+ if (cloned_model) {
+ const int dest_err = mv_face_recognition_model_destroy(cloned_model);
+ if (MEDIA_VISION_ERROR_NONE != dest_err) {
+ printf(TEXT_RED
+ "Errors were occurred when destroying destination model ."
+ "Error code %i" TEXT_RESET "\n", dest_err);
+ }
+ }
+
+ return err;
}
int perform_mv_face_recognition_model_learn(mv_face_recognition_model_h model)
{
- printf(TEXT_YELLOW "Learning the model has to be performed after\n"
- "adding some amount of examples to the model.\n"
- "If you learn without examples, you will get useless\n"
- "model, which will be unavailable to recognize. Anyway,\n"
- "you can add examples and launch this method again to\n"
- "get the appropriate recognition model suitable for\n"
- "recognition."
- TEXT_RESET "\n");
-
- printf(TEXT_GREEN "Start learning process..." TEXT_RESET "\n");
-
- const int err = mv_face_recognition_model_learn(NULL, model);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "Learning the model failed. Error code: %i. "
- "But you still can test with this model.\n"
- TEXT_RESET "\n", err);
- }
- else
- {
- printf(TEXT_YELLOW "Recognition model has been learned."
- TEXT_RESET "\n");
- }
-
- return err;
+ printf(TEXT_YELLOW "Learning the model has to be performed after\n"
+ "adding some amount of examples to the model.\n"
+ "If you learn without examples, you will get useless\n"
+ "model, which will be unavailable to recognize. Anyway,\n"
+ "you can add examples and launch this method again to\n"
+ "get the appropriate recognition model suitable for\n"
+ "recognition."
+ TEXT_RESET "\n");
+
+ printf(TEXT_GREEN "Start learning process..." TEXT_RESET "\n");
+
+ const int err = mv_face_recognition_model_learn(NULL, model);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "Learning the model failed. Error code: %i. "
+ "But you still can test with this model.\n"
+ TEXT_RESET "\n", err);
+ } else {
+ printf(TEXT_YELLOW "Recognition model has been learned."
+ TEXT_RESET "\n");
+ }
+
+ return err;
}
int perform_mv_face_recognition_model_query_labels(mv_face_recognition_model_h model)
{
- int *learned_labels = NULL;
- int learned_labels_n = 0;
+ int *learned_labels = NULL;
+ int learned_labels_n = 0;
- const int err = mv_face_recognition_model_query_labels(model, &learned_labels, &learned_labels_n);
+ const int err = mv_face_recognition_model_query_labels(model, &learned_labels, &learned_labels_n);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- free(learned_labels);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ free(learned_labels);
- return err;
- }
+ return err;
+ }
- int i = 0;
- printf(TEXT_YELLOW "Recognition model had been learned for the following labels: "
- TEXT_RESET "\n" TEXT_GREEN);
- for (i = 0; i < learned_labels_n; ++i)
- {
- printf("%i, ", learned_labels[i]);
- }
- printf(TEXT_RESET "\n");
+ int i = 0;
+ printf(TEXT_YELLOW "Recognition model had been learned for the following labels: "
+ TEXT_RESET "\n" TEXT_GREEN);
+ for (i = 0; i < learned_labels_n; ++i)
+ printf("%i, ", learned_labels[i]);
- free(learned_labels);
+ printf(TEXT_RESET "\n");
- return MEDIA_VISION_ERROR_NONE;
+ free(learned_labels);
+
+ return MEDIA_VISION_ERROR_NONE;
}
static int TP = 0;
@@ -859,777 +770,683 @@ static int FN = 0;
static double THRESHOLD = 0.75;
void evaluation_cb(
- mv_source_h source,
- mv_face_recognition_model_h recognition_model,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s *face_location,
- const int *face_label,
- double confidence,
- void *user_data)
+ mv_source_h source,
+ mv_face_recognition_model_h recognition_model,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s *face_location,
+ const int *face_label,
+ double confidence,
+ void *user_data)
{
- if (NULL != user_data)
- {
- const int real_label = *((int*)user_data);
- const int rec_label = (NULL != face_label ? *face_label : -1);
- if (real_label == -1)
- {
- confidence >= THRESHOLD ? ++FP : ++TN;
- }
- else if (real_label == rec_label)
- {
- confidence >= THRESHOLD ? ++TP : ++FN;
- }
- else
- {
- if (confidence >= THRESHOLD) { ++FP; }
- ++FN;
- }
- }
+ if (NULL != user_data) {
+ const int real_label = *((int*)user_data);
+ const int rec_label = (NULL != face_label ? *face_label : -1);
+ if (real_label == -1) {
+ confidence >= THRESHOLD ? ++FP : ++TN;
+ } else if (real_label == rec_label) {
+ confidence >= THRESHOLD ? ++TP : ++FN;
+ } else {
+ if (confidence >= THRESHOLD)
+ ++FP;
+
+ ++FN;
+ }
+ }
}
int perform_model_evaluation(mv_face_recognition_model_h model)
{
- int *learned_labels = NULL;
- int learned_labels_n = 0;
-
- mv_face_recognition_model_query_labels(model, &learned_labels, &learned_labels_n);
-
- int i = 0;
-
- printf(TEXT_YELLOW "Evaluating model had been learned for the following labels: "
- TEXT_RESET "\n" TEXT_GREEN);
- for (i = 0; i < learned_labels_n; ++i)
- {
- printf("%i, ", learned_labels[i]);
- }
- printf(TEXT_RESET "\n");
-
- // 100 directories are allowed:
- const int max_dir_allowed = 100;
- char (*directories)[1024] = malloc(sizeof *directories * max_dir_allowed);
- int labels[max_dir_allowed];
- int unique_checks[MAX_ALLOWED_LABEL + 1];
- for (i = 0; i < MAX_ALLOWED_LABEL + 1; ++i)
- {
- unique_checks[i] = 0;
- }
-
- int dir_n = 0;
- int label_count = 0;
- while (show_confirm_dialog("Add test images directory?") &&
- dir_n < max_dir_allowed)
- {
- char *in_file_name = NULL;
- while (-1 == input_string("Specify path to the test images directory:", 1024, &(in_file_name)))
- {
- printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
- }
-
- DIR *dir;
- if ((dir = opendir(in_file_name)) == NULL)
- {
- printf(TEXT_RED "Incorrect input! Directory %s can't be read.\n"
- TEXT_RESET, in_file_name);
- free(in_file_name);
- in_file_name = NULL;
- continue;
- }
- else
- {
- closedir(dir);
- }
-
- int face_label = 0;
- if (-1 == input_int("Specify label as integer:",
- MIN_ALLOWED_LABEL,
- MAX_ALLOWED_LABEL,
- &face_label))
- {
- printf(TEXT_RED "Incorrect input! You can use %i-%i labels only.\n"
- TEXT_RESET,
- MIN_ALLOWED_LABEL,
- MAX_ALLOWED_LABEL);
- free(in_file_name);
- in_file_name = NULL;
- continue;
- }
-
- bool known_label = false;
- for (i = 0; i < learned_labels_n; ++i)
- {
- if (learned_labels[i] == face_label)
- {
- known_label = true;
- break;
- }
- }
-
- if (!known_label)
- {
- printf(TEXT_YELLOW "Recognition model didn't learn with specified label.\n"
- "Images will be marked as unknown (-1)\n" TEXT_RESET);
- }
-
- labels[dir_n] = known_label ? face_label : -1;
- strcpy(directories[dir_n], in_file_name);
- label_count += (0 == unique_checks[face_label] ? 1 : 0);
- if (labels[dir_n] >= 0)
- {
- unique_checks[labels[dir_n]] += 1;
- }
-
- free(in_file_name);
-
- ++dir_n;
-
- printf(TEXT_GREEN "Current test set for %i unique labels:\n" TEXT_RESET, label_count);
- for (i = 0; i < dir_n; ++i)
- {
- printf(TEXT_YELLOW "Label %i: " TEXT_RESET "%s\n", labels[i], directories[i]);
- }
- }
-
- free(learned_labels);
-
- int rec_threshold = 0;
- while (-1 == input_int("Specify recognition confidence threshold (0-100%):", 0, 100, &rec_threshold))
- {
- printf(TEXT_RED "Incorrect input! You can use 0-100 values only." TEXT_RESET "\n");
- }
- THRESHOLD = (double) rec_threshold / 100.0;
-
- TP = 0;
- FP = 0;
- TN = 0;
- FN = 0;
-
- mv_source_h source = NULL;
- int err = mv_create_source(&source);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during creating the source!!! code: %i"
- TEXT_RESET "\n", err);
- return err;
- }
-
- for (i = 0; i < dir_n; ++i)
- {
- DIR *dir;
- struct dirent *ent;
- printf("Processing %s...\n", directories[i]);
- if ((dir = opendir(directories[i])) != NULL)
- {
- char file_path[1024] = "";
-
- // Traverses all the files and directories within source directory
- while ((ent = readdir(dir)) != NULL)
- {
- // Determine current entry name
- const char *file_name = ent->d_name;
-
- // If current entry is directory, or hidden object, skip the step:
- if (file_name[0] == '.')
- {
- continue;
- }
-
- sprintf(file_path, "%s/%s", directories[i], file_name);
- err = load_mv_source_from_file(file_path, source);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "Failed to test on example from %s. "
- "Example will not affect the evaluation. "
- "Error code: %i.\n" TEXT_RESET,
- file_path, err);
- }
- else
- {
- err = mv_face_recognize(source, model, NULL, NULL, evaluation_cb, &(labels[i]));
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "Failed to recognize on example from %s. "
- "Example will not affect the evaluation. "
- "Error code: %i\n" TEXT_RESET,
- file_path, err);
- }
- }
- }
-
- closedir(dir);
- }
- else
- {
- printf(TEXT_RED "Can't read from directory [%s]\n"
- TEXT_RESET, directories[i]);
- }
- }
-
- int err2 = mv_destroy_source(source);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during destroying the source!!! code: %i"
- TEXT_RESET "\n", err2);
- }
- }
-
- double accuracy = (TP + TN) / (double) (TP + FP + TN + FN);
- double precision = TP / (double) (TP + FP);
- double recall = TP / (double) (TP + FN);
- double f1 = 2 * precision * recall / (precision + recall);
-
- printf(TEXT_GREEN "Evaluation results:\n" TEXT_RESET);
- printf(TEXT_YELLOW "\tTRUE POSITIVE : " TEXT_RESET "%5i\n", TP);
- printf(TEXT_YELLOW "\tFALSE POSITIVE : " TEXT_RESET "%5i\n", FP);
- printf(TEXT_YELLOW "\tTRUE NEGATIVE : " TEXT_RESET "%5i\n", TN);
- printf(TEXT_YELLOW "\tFALSE NEGATIVE : " TEXT_RESET "%5i\n", FN);
- printf(TEXT_YELLOW "\tAccuracy : " TEXT_RESET "%f\n", accuracy);
- printf(TEXT_YELLOW "\tPrecision : " TEXT_RESET "%f\n", precision);
- printf(TEXT_YELLOW "\tRecall : " TEXT_RESET "%f\n", recall);
- printf(TEXT_YELLOW "\tF1 score : " TEXT_RESET "%f\n", f1);
-
- free(directories);
-
- return err;
+ int *learned_labels = NULL;
+ int learned_labels_n = 0;
+
+ mv_face_recognition_model_query_labels(model, &learned_labels, &learned_labels_n);
+
+ int i = 0;
+
+ printf(TEXT_YELLOW "Evaluating model had been learned for the following labels: "
+ TEXT_RESET "\n" TEXT_GREEN);
+ for (i = 0; i < learned_labels_n; ++i)
+ printf("%i, ", learned_labels[i]);
+
+ printf(TEXT_RESET "\n");
+
+ /* 100 directories are allowed: */
+ const int max_dir_allowed = 100;
+ char (*directories)[1024] = malloc(sizeof *directories * max_dir_allowed);
+ int labels[max_dir_allowed];
+ int unique_checks[MAX_ALLOWED_LABEL + 1];
+ for (i = 0; i < MAX_ALLOWED_LABEL + 1; ++i)
+ unique_checks[i] = 0;
+
+ int dir_n = 0;
+ int label_count = 0;
+ while (show_confirm_dialog("Add test images directory?") &&
+ dir_n < max_dir_allowed) {
+ char *in_file_name = NULL;
+ while (-1 == input_string("Specify path to the test images directory:", 1024, &(in_file_name)))
+ printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
+
+ DIR *dir;
+ if ((dir = opendir(in_file_name)) == NULL) {
+ printf(TEXT_RED "Incorrect input! Directory %s can't be read.\n"
+ TEXT_RESET, in_file_name);
+ free(in_file_name);
+ in_file_name = NULL;
+ continue;
+ } else {
+ closedir(dir);
+ }
+
+ int face_label = 0;
+ if (-1 == input_int("Specify label as integer:",
+ MIN_ALLOWED_LABEL,
+ MAX_ALLOWED_LABEL,
+ &face_label)) {
+ printf(TEXT_RED "Incorrect input! You can use %i-%i labels only.\n"
+ TEXT_RESET,
+ MIN_ALLOWED_LABEL,
+ MAX_ALLOWED_LABEL);
+ free(in_file_name);
+ in_file_name = NULL;
+ continue;
+ }
+
+ bool known_label = false;
+ for (i = 0; i < learned_labels_n; ++i) {
+ if (learned_labels[i] == face_label) {
+ known_label = true;
+ break;
+ }
+ }
+
+ if (!known_label) {
+ printf(TEXT_YELLOW "Recognition model didn't learn with specified label.\n"
+ "Images will be marked as unknown (-1)\n" TEXT_RESET);
+ }
+
+ labels[dir_n] = known_label ? face_label : -1;
+ strcpy(directories[dir_n], in_file_name);
+ label_count += (0 == unique_checks[face_label] ? 1 : 0);
+ if (labels[dir_n] >= 0)
+ unique_checks[labels[dir_n]] += 1;
+
+ free(in_file_name);
+
+ ++dir_n;
+
+ printf(TEXT_GREEN "Current test set for %i unique labels:\n" TEXT_RESET, label_count);
+ for (i = 0; i < dir_n; ++i)
+ printf(TEXT_YELLOW "Label %i: " TEXT_RESET "%s\n", labels[i], directories[i]);
+ }
+
+ free(learned_labels);
+
+ int rec_threshold = 0;
+ while (-1 == input_int("Specify recognition confidence threshold (0-100%):", 0, 100, &rec_threshold))
+ printf(TEXT_RED "Incorrect input! You can use 0-100 values only." TEXT_RESET "\n");
+
+ THRESHOLD = (double) rec_threshold / 100.0;
+
+ TP = 0;
+ FP = 0;
+ TN = 0;
+ FN = 0;
+
+ mv_source_h source = NULL;
+ int err = mv_create_source(&source);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during creating the source!!! code: %i"
+ TEXT_RESET "\n", err);
+ return err;
+ }
+
+ for (i = 0; i < dir_n; ++i) {
+ DIR *dir;
+ struct dirent *ent;
+ printf("Processing %s...\n", directories[i]);
+ if ((dir = opendir(directories[i])) != NULL) {
+ char file_path[1024] = "";
+
+ /* Traverses all the files and directories within source directory */
+ while ((ent = readdir(dir)) != NULL) {
+ /* Determine current entry name */
+ const char *file_name = ent->d_name;
+
+ /* If current entry is directory, or hidden object, skip the step: */
+ if (file_name[0] == '.')
+ continue;
+
+ sprintf(file_path, "%s/%s", directories[i], file_name);
+ err = load_mv_source_from_file(file_path, source);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "Failed to test on example from %s. "
+ "Example will not affect the evaluation. "
+ "Error code: %i.\n" TEXT_RESET,
+ file_path, err);
+ } else {
+ err = mv_face_recognize(source, model, NULL, NULL, evaluation_cb, &(labels[i]));
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "Failed to recognize on example from %s. "
+ "Example will not affect the evaluation. "
+ "Error code: %i\n" TEXT_RESET,
+ file_path, err);
+ }
+ }
+ }
+
+ closedir(dir);
+ } else {
+ printf(TEXT_RED "Can't read from directory [%s]\n"
+ TEXT_RESET, directories[i]);
+ }
+ }
+
+ int err2 = mv_destroy_source(source);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during destroying the source!!! code: %i"
+ TEXT_RESET "\n", err2);
+ }
+ }
+
+ double accuracy = (TP + TN) / (double) (TP + FP + TN + FN);
+ double precision = TP / (double) (TP + FP);
+ double recall = TP / (double) (TP + FN);
+ double f1 = 2 * precision * recall / (precision + recall);
+
+ printf(TEXT_GREEN "Evaluation results:\n" TEXT_RESET);
+ printf(TEXT_YELLOW "\tTRUE POSITIVE : " TEXT_RESET "%5i\n", TP);
+ printf(TEXT_YELLOW "\tFALSE POSITIVE : " TEXT_RESET "%5i\n", FP);
+ printf(TEXT_YELLOW "\tTRUE NEGATIVE : " TEXT_RESET "%5i\n", TN);
+ printf(TEXT_YELLOW "\tFALSE NEGATIVE : " TEXT_RESET "%5i\n", FN);
+ printf(TEXT_YELLOW "\tAccuracy : " TEXT_RESET "%f\n", accuracy);
+ printf(TEXT_YELLOW "\tPrecision : " TEXT_RESET "%f\n", precision);
+ printf(TEXT_YELLOW "\tRecall : " TEXT_RESET "%f\n", recall);
+ printf(TEXT_YELLOW "\tF1 score : " TEXT_RESET "%f\n", f1);
+
+ free(directories);
+
+ return err;
}
int perform_recognize()
{
- printf("\n" TEXT_YELLOW
- "Recognition model isn't now created.\n"
- "You may create it to perform positive \n"
- "testing, or don't create to check the \n"
- "functionality behaviour for uncreated model."
- TEXT_RESET
- "\n");
-
- int err = MEDIA_VISION_ERROR_NONE;
- mv_face_recognition_model_h recognition_model = NULL;
- const bool do_create = show_confirm_dialog("Do Create Recognition Model?");
- if (do_create)
- {
- printf(TEXT_GREEN "Creating recognition model..." TEXT_RESET "\n");
-
- err = mv_face_recognition_model_create(&recognition_model);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "Creating the model failed. Error code: %i. "
- "But you still can test with uncreated model.\n"
- TEXT_RESET "\n", err);
- }
- else
- {
- printf(TEXT_YELLOW "Recognition model has been created."
- TEXT_RESET "\n");
- }
- }
-
- int sel_opt = 0;
- const int options[11] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 };
- const char *names[11] = { "Add image example",
- "Reset examples by id",
- "Reset all examples",
- "Clone the model",
- "Learn the model",
- "Show learned labels",
- "Save model to file",
- "Load model from file",
- "Recognize with model",
- "Evaluate the model",
- "Destroy model and exit" };
-
- while(!sel_opt)
- {
- sel_opt = show_menu("Select action:", options, names, 11);
- notification_type_e notification_type = FAIL_OR_SUCCESSS;
-
- switch (sel_opt)
- {
- case 1:
- err = perform_mv_face_recognition_model_add_face_example(recognition_model, &notification_type);
- break;
- case 2:
- err = perform_mv_face_recognition_model_reset_face_examples(recognition_model, false);
- break;
- case 3:
- err = perform_mv_face_recognition_model_reset_face_examples(recognition_model, true);
- break;
- case 4:
- err = perform_mv_face_recognition_model_clone(recognition_model);
- break;
- case 5:
- err = perform_mv_face_recognition_model_learn(recognition_model);
- break;
- case 6:
- err = perform_mv_face_recognition_model_query_labels(recognition_model);
- break;
- case 7:
- err = perform_mv_face_recognition_model_save(recognition_model);
- break;
- case 8:
- err = perform_mv_face_recognition_model_load(&recognition_model);
- break;
- case 9:
- err = perform_mv_face_recognize(recognition_model);
- break;
- case 10:
- err = perform_model_evaluation(recognition_model);
- break;
- case 11:
- if (do_create)
- {
- err = mv_face_recognition_model_destroy(recognition_model);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED
- "Error with code %i was occurred during destoy"
- TEXT_RESET "\n", err);
- }
-
- return err;
- }
- else
- {
- return MEDIA_VISION_ERROR_NONE;
- }
- default:
- sel_opt = 0;
- printf("ERROR: Incorrect option was selected.\n");
- continue;
- }
-
- print_action_result(names[sel_opt - 1], err, notification_type);
-
- sel_opt = 0;
- }
+ printf("\n" TEXT_YELLOW
+ "Recognition model isn't now created.\n"
+ "You may create it to perform positive \n"
+ "testing, or don't create to check the \n"
+ "functionality behaviour for uncreated model."
+ TEXT_RESET
+ "\n");
+
+ int err = MEDIA_VISION_ERROR_NONE;
+ mv_face_recognition_model_h recognition_model = NULL;
+ const bool do_create = show_confirm_dialog("Do Create Recognition Model?");
+ if (do_create) {
+ printf(TEXT_GREEN "Creating recognition model..." TEXT_RESET "\n");
+
+ err = mv_face_recognition_model_create(&recognition_model);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "Creating the model failed. Error code: %i. "
+ "But you still can test with uncreated model.\n"
+ TEXT_RESET "\n", err);
+ } else {
+ printf(TEXT_YELLOW "Recognition model has been created."
+ TEXT_RESET "\n");
+ }
+ }
+
+ int sel_opt = 0;
+ const int options[11] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 };
+ const char *names[11] = { "Add image example",
+ "Reset examples by id",
+ "Reset all examples",
+ "Clone the model",
+ "Learn the model",
+ "Show learned labels",
+ "Save model to file",
+ "Load model from file",
+ "Recognize with model",
+ "Evaluate the model",
+ "Destroy model and exit" };
+
+ while (!sel_opt) {
+ sel_opt = show_menu("Select action:", options, names, 11);
+ notification_type_e notification_type = FAIL_OR_SUCCESSS;
+
+ switch (sel_opt) {
+ case 1:
+ err = perform_mv_face_recognition_model_add_face_example(recognition_model, &notification_type);
+ break;
+ case 2:
+ err = perform_mv_face_recognition_model_reset_face_examples(recognition_model, false);
+ break;
+ case 3:
+ err = perform_mv_face_recognition_model_reset_face_examples(recognition_model, true);
+ break;
+ case 4:
+ err = perform_mv_face_recognition_model_clone(recognition_model);
+ break;
+ case 5:
+ err = perform_mv_face_recognition_model_learn(recognition_model);
+ break;
+ case 6:
+ err = perform_mv_face_recognition_model_query_labels(recognition_model);
+ break;
+ case 7:
+ err = perform_mv_face_recognition_model_save(recognition_model);
+ break;
+ case 8:
+ err = perform_mv_face_recognition_model_load(&recognition_model);
+ break;
+ case 9:
+ err = perform_mv_face_recognize(recognition_model);
+ break;
+ case 10:
+ err = perform_model_evaluation(recognition_model);
+ break;
+ case 11:
+ if (do_create) {
+ err = mv_face_recognition_model_destroy(recognition_model);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED
+ "Error with code %i was occurred during destoy"
+ TEXT_RESET "\n", err);
+ }
+
+ return err;
+ } else {
+ return MEDIA_VISION_ERROR_NONE;
+ }
+ default:
+ sel_opt = 0;
+ printf("ERROR: Incorrect option was selected.\n");
+ continue;
+ }
+
+ print_action_result(names[sel_opt - 1], err, notification_type);
+
+ sel_opt = 0;
+ }
}
int perform_mv_face_tracking_model_save(mv_face_tracking_model_h model)
{
- char *out_file_name = NULL;
+ char *out_file_name = NULL;
- while (input_string("Input file name to save the model:",
- 1024, &(out_file_name)) == -1)
- {
- printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
- }
+ while (input_string("Input file name to save the model:",
+ 1024, &(out_file_name)) == -1) {
+ printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
+ }
- const int err = mv_face_tracking_model_save(out_file_name, model);
+ const int err = mv_face_tracking_model_save(out_file_name, model);
- free(out_file_name);
+ free(out_file_name);
- return err;
+ return err;
}
int perform_mv_face_tracking_model_load(mv_face_tracking_model_h *model)
{
- char *in_file_name = NULL;
+ char *in_file_name = NULL;
- while (input_string("Input file name to load model from:",
- 1024, &(in_file_name)) == -1)
- {
- printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
- }
+ while (input_string("Input file name to load model from:",
+ 1024, &(in_file_name)) == -1) {
+ printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
+ }
- const int err = mv_face_tracking_model_load(in_file_name, model);
+ const int err = mv_face_tracking_model_load(in_file_name, model);
- free(in_file_name);
+ free(in_file_name);
- return err;
+ return err;
}
int perform_mv_face_tracking_model_clone(
- mv_face_tracking_model_h model_to_clone)
+ mv_face_tracking_model_h model_to_clone)
{
- int err = MEDIA_VISION_ERROR_NONE;
-
- mv_face_tracking_model_h cloned_model = NULL;
-
- printf(TEXT_GREEN "Perform clone of the tracking model..."
- TEXT_RESET "\n");
-
- err = mv_face_tracking_model_clone(model_to_clone, &cloned_model);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "Errors were occurred during model clone. Error code %i"
- TEXT_RESET "\n", err);
- return err;
- }
-
- printf(TEXT_YELLOW "Model cloning is done." TEXT_RESET "\n");
-
- if (show_confirm_dialog("Save " TEXT_YELLOW "source model" TEXT_RESET
- " to file?"))
- {
- const int serr = perform_mv_face_tracking_model_save(model_to_clone);
- if (MEDIA_VISION_ERROR_NONE != serr)
- {
- printf(TEXT_RED
- "Errors were occurred when trying to save "
- "source model to file. Error code %i" TEXT_RESET "\n", serr);
- }
- }
-
- if (show_confirm_dialog("Save " TEXT_YELLOW "destination model" TEXT_RESET
- " to file?"))
- {
- const int serr = perform_mv_face_tracking_model_save(cloned_model);
- if (MEDIA_VISION_ERROR_NONE != serr)
- {
- printf(TEXT_RED
- "Errors were occurred when trying to save destination model "
- "to file. Error code %i" TEXT_RESET "\n", serr);
- }
- }
-
- if (cloned_model)
- {
- const int dest_err = mv_face_tracking_model_destroy(cloned_model);
- if (MEDIA_VISION_ERROR_NONE != dest_err)
- {
- printf(TEXT_RED
- "Errors were occurred when destroying destination model ."
- "Error code %i" TEXT_RESET "\n", dest_err);
- }
- }
-
- return err;
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ mv_face_tracking_model_h cloned_model = NULL;
+
+ printf(TEXT_GREEN "Perform clone of the tracking model..."
+ TEXT_RESET "\n");
+
+ err = mv_face_tracking_model_clone(model_to_clone, &cloned_model);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "Errors were occurred during model clone. Error code %i"
+ TEXT_RESET "\n", err);
+ return err;
+ }
+
+ printf(TEXT_YELLOW "Model cloning is done." TEXT_RESET "\n");
+
+ if (show_confirm_dialog("Save " TEXT_YELLOW "source model" TEXT_RESET
+ " to file?")) {
+ const int serr = perform_mv_face_tracking_model_save(model_to_clone);
+ if (MEDIA_VISION_ERROR_NONE != serr) {
+ printf(TEXT_RED
+ "Errors were occurred when trying to save "
+ "source model to file. Error code %i" TEXT_RESET "\n", serr);
+ }
+ }
+
+ if (show_confirm_dialog("Save " TEXT_YELLOW "destination model" TEXT_RESET
+ " to file?")) {
+ const int serr = perform_mv_face_tracking_model_save(cloned_model);
+ if (MEDIA_VISION_ERROR_NONE != serr) {
+ printf(TEXT_RED
+ "Errors were occurred when trying to save destination model "
+ "to file. Error code %i" TEXT_RESET "\n", serr);
+ }
+ }
+
+ if (cloned_model) {
+ const int dest_err = mv_face_tracking_model_destroy(cloned_model);
+ if (MEDIA_VISION_ERROR_NONE != dest_err) {
+ printf(TEXT_RED
+ "Errors were occurred when destroying destination model ."
+ "Error code %i" TEXT_RESET "\n", dest_err);
+ }
+ }
+
+ return err;
}
static volatile bool frame_read = false;
void video_1_sample_cb(
- char *buffer,
- unsigned int buffer_size,
- image_data_s image_data,
- void *user_data)
+ char *buffer,
+ unsigned int buffer_size,
+ image_data_s image_data,
+ void *user_data)
{
- if (!frame_read)
- {
- mv_source_h source = (mv_source_h)user_data;
-
- const int err = mv_source_fill_by_buffer(
- source,
- buffer,
- buffer_size,
- image_data.image_width,
- image_data.image_height,
- image_data.image_colorspace);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during filling the "
- "source based on the video frame! Error code: %i"
- TEXT_RESET, err);
- }
-
- frame_read = true;
- }
+ if (!frame_read) {
+ mv_source_h source = (mv_source_h)user_data;
+
+ const int err = mv_source_fill_by_buffer(
+ source,
+ buffer,
+ buffer_size,
+ image_data.image_width,
+ image_data.image_height,
+ image_data.image_colorspace);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "ERROR: Errors were occurred during filling the "
+ "source based on the video frame! Error code: %i"
+ TEXT_RESET, err);
+ }
+
+ frame_read = true;
+ }
}
void face_detected_for_tracking_cb(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- mv_rectangle_s *faces_locations,
- int number_of_faces,
- void *user_data)
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s *faces_locations,
+ int number_of_faces,
+ void *user_data)
{
- if (number_of_faces < 1)
- {
- printf(TEXT_RED "Unfortunatly, no faces were detected on the\n"
- "preparation frame. You has to specify bounding\n"
- "quadrangles for tracking without advices."
- TEXT_RESET "\n");
- return;
- }
-
- printf(TEXT_YELLOW "%i face(s) were detected at the preparation frame.\n"
- "Following list includes information on faces bounding\n"
- "boxes coordinates:"
- TEXT_RESET "\n", number_of_faces);
-
- int idx = 0;
- while (idx < number_of_faces)
- {
- printf(TEXT_MAGENTA "Face %i bounding box: " TEXT_RESET "\n", ++idx);
- printf(TEXT_CYAN "\tTop left point: x1: %4i; y1: %4i\n" TEXT_RESET,
- faces_locations[idx - 1].point.x,
- faces_locations[idx - 1].point.y);
- printf(TEXT_CYAN "\tTop right point: x2: %4i; y2: %4i\n" TEXT_RESET,
- faces_locations[idx - 1].point.x + faces_locations[idx - 1].width,
- faces_locations[idx - 1].point.y);
- printf(TEXT_CYAN "\tBottom right point: x3: %4i; y3: %4i\n" TEXT_RESET,
- faces_locations[idx - 1].point.x + faces_locations[idx - 1].width,
- faces_locations[idx - 1].point.y + faces_locations[idx - 1].height);
- printf(TEXT_CYAN "\tBottom right point: x4: %4i; y4: %4i\n" TEXT_RESET,
- faces_locations[idx - 1].point.x,
- faces_locations[idx - 1].point.y + faces_locations[idx - 1].height);
- }
+ if (number_of_faces < 1) {
+ printf(TEXT_RED "Unfortunatly, no faces were detected on the\n"
+ "preparation frame. You has to specify bounding\n"
+ "quadrangles for tracking without advices."
+ TEXT_RESET "\n");
+ return;
+ }
+
+ printf(TEXT_YELLOW "%i face(s) were detected at the preparation frame.\n"
+ "Following list includes information on faces bounding\n"
+ "boxes coordinates:"
+ TEXT_RESET "\n", number_of_faces);
+
+ int idx = 0;
+ while (idx < number_of_faces) {
+ printf(TEXT_MAGENTA "Face %i bounding box: " TEXT_RESET "\n", ++idx);
+ printf(TEXT_CYAN "\tTop left point: x1: %4i; y1: %4i\n" TEXT_RESET,
+ faces_locations[idx - 1].point.x,
+ faces_locations[idx - 1].point.y);
+ printf(TEXT_CYAN "\tTop right point: x2: %4i; y2: %4i\n" TEXT_RESET,
+ faces_locations[idx - 1].point.x + faces_locations[idx - 1].width,
+ faces_locations[idx - 1].point.y);
+ printf(TEXT_CYAN "\tBottom right point: x3: %4i; y3: %4i\n" TEXT_RESET,
+ faces_locations[idx - 1].point.x + faces_locations[idx - 1].width,
+ faces_locations[idx - 1].point.y + faces_locations[idx - 1].height);
+ printf(TEXT_CYAN "\tBottom right point: x4: %4i; y4: %4i\n" TEXT_RESET,
+ faces_locations[idx - 1].point.x,
+ faces_locations[idx - 1].point.y + faces_locations[idx - 1].height);
+ }
}
int load_source_from_first_video_frame(const char *video_file, mv_source_h source)
{
- mv_video_reader_h reader = NULL;
- int err = mv_create_video_reader(&reader);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during creating the video "
- "reader! Error code: %i\n" TEXT_RESET, err);
- return err;
- }
-
- err = mv_video_reader_set_new_sample_cb(
- reader,
- video_1_sample_cb,
- source);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during new sample "
- "callback set! Error code: %i\n" TEXT_RESET, err);
-
- const int err2 = mv_destroy_video_reader(reader);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during video reader "
- "destroy! Error code: %i\n" TEXT_RESET, err);
- }
-
- return err;
- }
-
- frame_read = false;
- image_data_s video_info;
- unsigned int fps;
- err = mv_video_reader_load(reader, video_file, &video_info, &fps);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during loading the video "
- "by reader! Error code: %i\n" TEXT_RESET, err);
-
- const int err2 = mv_destroy_video_reader(reader);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during video reader "
- "destroy! Error code: %i\n" TEXT_RESET, err);
- }
-
- return err;
- }
-
- //wait for the video reading thread
- while (true)
- {
- if (frame_read)
- {
- int err2 = mv_video_reader_stop(reader);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during attempt to "
- "stop video reader! Error code: %i\n" TEXT_RESET, err2);
- }
-
- err2 = mv_destroy_video_reader(reader);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during video "
- "reader destroy! Error code: %i\n" TEXT_RESET, err2);
- }
-
- break;
- }
- }
-
- return MEDIA_VISION_ERROR_NONE;
+ mv_video_reader_h reader = NULL;
+ int err = mv_create_video_reader(&reader);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "ERROR: Errors were occurred during creating the video "
+ "reader! Error code: %i\n" TEXT_RESET, err);
+ return err;
+ }
+
+ err = mv_video_reader_set_new_sample_cb(
+ reader,
+ video_1_sample_cb,
+ source);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "ERROR: Errors were occurred during new sample "
+ "callback set! Error code: %i\n" TEXT_RESET, err);
+
+ const int err2 = mv_destroy_video_reader(reader);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf(TEXT_RED "ERROR: Errors were occurred during video reader "
+ "destroy! Error code: %i\n" TEXT_RESET, err);
+ }
+
+ return err;
+ }
+
+ frame_read = false;
+ image_data_s video_info;
+ unsigned int fps;
+ err = mv_video_reader_load(reader, video_file, &video_info, &fps);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "ERROR: Errors were occurred during loading the video "
+ "by reader! Error code: %i\n" TEXT_RESET, err);
+
+ const int err2 = mv_destroy_video_reader(reader);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf(TEXT_RED "ERROR: Errors were occurred during video reader "
+ "destroy! Error code: %i\n" TEXT_RESET, err);
+ }
+
+ return err;
+ }
+
+ /* wait for the video reading thread */
+ while (true) {
+ if (frame_read) {
+ int err2 = mv_video_reader_stop(reader);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf(TEXT_RED "ERROR: Errors were occurred during attempt to "
+ "stop video reader! Error code: %i\n" TEXT_RESET, err2);
+ }
+
+ err2 = mv_destroy_video_reader(reader);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf(TEXT_RED "ERROR: Errors were occurred during video "
+ "reader destroy! Error code: %i\n" TEXT_RESET, err2);
+ }
+
+ break;
+ }
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
}
int perform_mv_face_tracking_model_prepare(mv_face_tracking_model_h model)
{
- printf(TEXT_YELLOW "Before any tracking session the tracking model\n"
- "preparation is required. Exception is the case when\n"
- "the next tracking session will be performed with the\n"
- "video which is the direct continuation of the video\n"
- "has been used at the previous tracking session.\n"
- "Preparation has to be done with the first frame of\n"
- "the video or first image from continuous image\n"
- "sequence for which next tracking session plan to be\n"
- "performed.\nTracking model preparation includes\n"
- "specifying the location of the face to be tracked on\n"
- "the first frame. Face tracking algorithm will try to\n"
- "grab the face image significant features and\n"
- "optionally will try to determine the background.\n"
- "Actually, preparation is model-dependent and may\n"
- "differs in respect to used tracking algorithm."
- TEXT_RESET "\n");
-
- int sel_opt = 0;
- const int options[2] = { 1, 2 };
- const char *names[2] = { "Prepare with the video file",
- "Prepare with the image file" };
-
- bool is_video = false;
-
- while(!sel_opt)
- {
- sel_opt = show_menu("Select action:", options, names, 2);
- switch (sel_opt)
- {
- case 1:
- is_video = true;
- break;
- case 2:
- is_video = false;
- break;
- default:
- sel_opt = 0;
- continue;
- }
- }
-
- mv_source_h preparation_frame = NULL;
- int err = mv_create_source(&preparation_frame);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during creating the source!!! code: %i"
- TEXT_RESET "\n", err);
-
- return err;
- }
-
- char *init_frame_file_name = NULL;
- const char *prompt_str =
- (is_video ? "Input video file name to prepare the model:"
- : "Input image file name to prepare the model:");
-
- while (input_string(prompt_str, 1024, &(init_frame_file_name)) == -1)
- {
- printf(TEXT_RED "Incorrect input! Try again.\n" TEXT_RESET);
- }
-
- if (is_video)
- {
- err = load_source_from_first_video_frame(init_frame_file_name, preparation_frame);
- }
- else
- {
- err = load_mv_source_from_file(init_frame_file_name, preparation_frame);
- }
-
- free(init_frame_file_name);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during preparation "
- "frame/image load! Error code: %i\n" TEXT_RESET, err);
-
- int err2 = mv_destroy_source(preparation_frame);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during destroying the "
- "source! Error code: %i\n" TEXT_RESET, err2);
- }
-
- return err;
- }
-
- mv_engine_config_h eng_config = NULL;
- err = mv_create_engine_config(&eng_config);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during creating the "
- "engine config! Error code: %i\n" TEXT_RESET, err);
- }
- else
- {
- err = mv_engine_config_set_string_attribute(
- eng_config,
- MV_FACE_DETECTION_MODEL_FILE_PATH,
- "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml");
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during setting of the "
- "the 'MV_FACE_DETECTION_MODEL_FILE_PATH' attribute "
- "for engine configuration! Check media-vision-config.json "
- "file existence. Error code: %i" TEXT_RESET, err);
- }
- }
-
- err = mv_face_detect(
- preparation_frame,
- eng_config,
- face_detected_for_tracking_cb,
- NULL);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during face detection! "
- "Error code: %i\n" TEXT_RESET, err);
-
- int err2 = mv_destroy_engine_config(eng_config);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during destroying the "
- "engine configuration! Error code: %i\n" TEXT_RESET, err2);
- }
-
- return err;
- }
-
- mv_quadrangle_s roi;
-
- if (show_confirm_dialog("Do specify the face location?"))
- {
- printf(TEXT_YELLOW "Specify the coordinates of the quadrangle to be used\n"
- "for tracking model preparation:" TEXT_RESET "\n");
- int idx = 0;
- char str_prompt[100];
- while (idx < 4)
- {
- ++idx;
- sprintf(str_prompt, "Specify point %i x coordinate: x%i = ",
- idx - 1, idx);
- while (-1 == input_int(str_prompt, INT_MIN, INT_MAX,
- &(roi.points[idx - 1].x)))
- {
- printf("Incorrect input! Try again.\n");
- }
- sprintf(str_prompt, "Specify point %i y coordinate: y%i = ",
- idx - 1, idx);
- while (-1 == input_int(str_prompt, INT_MIN, INT_MAX,
- &(roi.points[idx - 1].y)))
- {
- printf("Incorrect input! Try again.\n");
- }
- }
-
- err = mv_face_tracking_model_prepare(
- model, eng_config, preparation_frame, &roi);
- }
- else
- {
- err = mv_face_tracking_model_prepare(
- model, eng_config, preparation_frame, NULL);
- }
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during the tracking model "
- "preparation! Error code: %i\n" TEXT_RESET, err);
- }
-
- const int err2 = mv_destroy_source(preparation_frame);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during destroying the "
- "source! Error code: %i\n" TEXT_RESET, err2);
- }
-
- return err;
+ printf(TEXT_YELLOW "Before any tracking session the tracking model\n"
+ "preparation is required. Exception is the case when\n"
+ "the next tracking session will be performed with the\n"
+ "video which is the direct continuation of the video\n"
+ "has been used at the previous tracking session.\n"
+ "Preparation has to be done with the first frame of\n"
+ "the video or first image from continuous image\n"
+ "sequence for which next tracking session plan to be\n"
+ "performed.\nTracking model preparation includes\n"
+ "specifying the location of the face to be tracked on\n"
+ "the first frame. Face tracking algorithm will try to\n"
+ "grab the face image significant features and\n"
+ "optionally will try to determine the background.\n"
+ "Actually, preparation is model-dependent and may\n"
+ "differs in respect to used tracking algorithm."
+ TEXT_RESET "\n");
+
+ int sel_opt = 0;
+ const int options[2] = { 1, 2 };
+ const char *names[2] = { "Prepare with the video file",
+ "Prepare with the image file" };
+ bool is_video = false;
+
+ while (!sel_opt) {
+ sel_opt = show_menu("Select action:", options, names, 2);
+ switch (sel_opt) {
+ case 1:
+ is_video = true;
+ break;
+ case 2:
+ is_video = false;
+ break;
+ default:
+ sel_opt = 0;
+ continue;
+ }
+ }
+
+ mv_source_h preparation_frame = NULL;
+ int err = mv_create_source(&preparation_frame);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during creating the source!!! code: %i"
+ TEXT_RESET "\n", err);
+
+ return err;
+ }
+
+ char *init_frame_file_name = NULL;
+ const char *prompt_str =
+ (is_video ? "Input video file name to prepare the model:"
+ : "Input image file name to prepare the model:");
+
+ while (input_string(prompt_str, 1024, &(init_frame_file_name)) == -1)
+ printf(TEXT_RED "Incorrect input! Try again.\n" TEXT_RESET);
+
+ if (is_video)
+ err = load_source_from_first_video_frame(init_frame_file_name, preparation_frame);
+ else
+ err = load_mv_source_from_file(init_frame_file_name, preparation_frame);
+
+ free(init_frame_file_name);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "ERROR: Errors were occurred during preparation "
+ "frame/image load! Error code: %i\n" TEXT_RESET, err);
+
+ int err2 = mv_destroy_source(preparation_frame);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf(TEXT_RED "ERROR: Errors were occurred during destroying the "
+ "source! Error code: %i\n" TEXT_RESET, err2);
+ }
+
+ return err;
+ }
+
+ mv_engine_config_h eng_config = NULL;
+ err = mv_create_engine_config(&eng_config);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "ERROR: Errors were occurred during creating the "
+ "engine config! Error code: %i\n" TEXT_RESET, err);
+ } else {
+ err = mv_engine_config_set_string_attribute(
+ eng_config,
+ MV_FACE_DETECTION_MODEL_FILE_PATH,
+ "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml");
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "ERROR: Errors were occurred during setting of the "
+ "the 'MV_FACE_DETECTION_MODEL_FILE_PATH' attribute "
+ "for engine configuration! Check media-vision-config.json "
+ "file existence. Error code: %i" TEXT_RESET, err);
+ }
+ }
+
+ err = mv_face_detect(
+ preparation_frame,
+ eng_config,
+ face_detected_for_tracking_cb,
+ NULL);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "ERROR: Errors were occurred during face detection! "
+ "Error code: %i\n" TEXT_RESET, err);
+
+ int err2 = mv_destroy_engine_config(eng_config);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf(TEXT_RED "ERROR: Errors were occurred during destroying the "
+ "engine configuration! Error code: %i\n" TEXT_RESET, err2);
+ }
+
+ return err;
+ }
+
+ mv_quadrangle_s roi;
+
+ if (show_confirm_dialog("Do specify the face location?")) {
+ printf(TEXT_YELLOW "Specify the coordinates of the quadrangle to be used\n"
+ "for tracking model preparation:" TEXT_RESET "\n");
+ int idx = 0;
+ char str_prompt[100];
+ while (idx < 4) {
+ ++idx;
+ sprintf(str_prompt, "Specify point %i x coordinate: x%i = ",
+ idx - 1, idx);
+ while (-1 == input_int(str_prompt, INT_MIN, INT_MAX,
+ &(roi.points[idx - 1].x))) {
+ printf("Incorrect input! Try again.\n");
+ }
+ sprintf(str_prompt, "Specify point %i y coordinate: y%i = ",
+ idx - 1, idx);
+ while (-1 == input_int(str_prompt, INT_MIN, INT_MAX,
+ &(roi.points[idx - 1].y))) {
+ printf("Incorrect input! Try again.\n");
+ }
+ }
+
+ err = mv_face_tracking_model_prepare(
+ model, eng_config, preparation_frame, &roi);
+ } else {
+ err = mv_face_tracking_model_prepare(
+ model, eng_config, preparation_frame, NULL);
+ }
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "ERROR: Errors were occurred during the tracking model "
+ "preparation! Error code: %i\n" TEXT_RESET, err);
+ }
+
+ const int err2 = mv_destroy_source(preparation_frame);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf(TEXT_RED "ERROR: Errors were occurred during destroying the "
+ "source! Error code: %i\n" TEXT_RESET, err2);
+ }
+
+ return err;
}
static char *track_output_dir = NULL;
@@ -1637,510 +1454,463 @@ static char *track_output_dir = NULL;
static int track_frame_counter = 0;
void track_cb(
- mv_source_h source,
- mv_face_tracking_model_h tracking_model,
- mv_engine_config_h engine_cfg,
- mv_quadrangle_s *location,
- double confidence,
- void *user_data)
+ mv_source_h source,
+ mv_face_tracking_model_h tracking_model,
+ mv_engine_config_h engine_cfg,
+ mv_quadrangle_s *location,
+ double confidence,
+ void *user_data)
{
- static bool track_catch_face = false;
-
- ++track_frame_counter;
-
- unsigned char *out_buffer = NULL;
- unsigned int buf_size = 0;
- image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID };
- if (MEDIA_VISION_ERROR_NONE !=
- mv_source_get_buffer(source, &out_buffer, &buf_size) ||
- MEDIA_VISION_ERROR_NONE !=
- mv_source_get_width(source, &(image_data.image_width)) ||
- MEDIA_VISION_ERROR_NONE !=
- mv_source_get_height(source, &(image_data.image_height)) ||
- MEDIA_VISION_ERROR_NONE !=
- mv_source_get_colorspace(source, &(image_data.image_colorspace)))
- {
- printf("ERROR: Creating out image is impossible.\n");
-
- return;
- }
-
- if (NULL != location)
- {
- if (!track_catch_face)
- {
- printf(TEXT_GREEN "Frame %i : Tracked object is appeared" TEXT_RESET "\n",
- track_frame_counter);
- track_catch_face = true;
- }
- else
- {
- printf(TEXT_YELLOW "Frame %i : Tracked object is tracked" TEXT_RESET "\n",
- track_frame_counter);
- }
-
- const int rectangle_thickness = 3;
- const int drawing_color[] = {255, 0, 0};
-
- printf(TEXT_YELLOW
- "Location: (%i,%i) -> (%i,%i) -> (%i,%i) -> (%i,%i)\n"
- TEXT_RESET,
- location->points[0].x,
- location->points[0].y,
- location->points[1].x,
- location->points[1].y,
- location->points[2].x,
- location->points[2].y,
- location->points[3].x,
- location->points[3].y);
- printf(TEXT_YELLOW "Track confidence: %f" TEXT_RESET "\n", confidence);
-
- const int err = draw_quadrangle_on_buffer(
- *location,
- rectangle_thickness,
- drawing_color,
- &image_data,
- out_buffer);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "ERROR: Quadrangle wasn't drew on frame buffer! "
- "Error code: %i\n" TEXT_RESET, err);
-
- return;
- }
- }
- else
- {
- if (track_catch_face)
- {
- printf(TEXT_RED "Frame %i : Tracked object is lost" TEXT_RESET "\n",
- track_frame_counter);
- track_catch_face = false;
- }
- else
- {
- printf(TEXT_YELLOW "Frame %i : Tracked object isn't detected" TEXT_RESET "\n",
- track_frame_counter);
- }
- }
-
- char file_path[1024];
- sprintf(file_path, "%s/%05d.jpg", track_output_dir, track_frame_counter);
- if (MEDIA_VISION_ERROR_NONE == save_image_from_buffer(
- file_path, out_buffer, &image_data, 100))
- {
- printf("Frame %i was outputted as %s\n", track_frame_counter, file_path);
- }
- else
- {
- printf(TEXT_RED "ERROR: Failed to generate output file %s. "
- "Check file name and permissions.\n" TEXT_RESET, file_path);
- }
+ static bool track_catch_face = false;
+
+ ++track_frame_counter;
+
+ unsigned char *out_buffer = NULL;
+ unsigned int buf_size = 0;
+ image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID };
+ if (MEDIA_VISION_ERROR_NONE !=
+ mv_source_get_buffer(source, &out_buffer, &buf_size) ||
+ MEDIA_VISION_ERROR_NONE !=
+ mv_source_get_width(source, &(image_data.image_width)) ||
+ MEDIA_VISION_ERROR_NONE !=
+ mv_source_get_height(source, &(image_data.image_height)) ||
+ MEDIA_VISION_ERROR_NONE !=
+ mv_source_get_colorspace(source, &(image_data.image_colorspace))) {
+ printf("ERROR: Creating out image is impossible.\n");
+
+ return;
+ }
+
+ if (NULL != location) {
+ if (!track_catch_face) {
+ printf(TEXT_GREEN "Frame %i : Tracked object is appeared" TEXT_RESET "\n",
+ track_frame_counter);
+ track_catch_face = true;
+ } else {
+ printf(TEXT_YELLOW "Frame %i : Tracked object is tracked" TEXT_RESET "\n",
+ track_frame_counter);
+ }
+
+ const int rectangle_thickness = 3;
+ const int drawing_color[] = {255, 0, 0};
+
+ printf(TEXT_YELLOW
+ "Location: (%i,%i) -> (%i,%i) -> (%i,%i) -> (%i,%i)\n"
+ TEXT_RESET,
+ location->points[0].x,
+ location->points[0].y,
+ location->points[1].x,
+ location->points[1].y,
+ location->points[2].x,
+ location->points[2].y,
+ location->points[3].x,
+ location->points[3].y);
+ printf(TEXT_YELLOW "Track confidence: %f" TEXT_RESET "\n", confidence);
+
+ const int err = draw_quadrangle_on_buffer(
+ *location,
+ rectangle_thickness,
+ drawing_color,
+ &image_data,
+ out_buffer);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "ERROR: Quadrangle wasn't drew on frame buffer! "
+ "Error code: %i\n" TEXT_RESET, err);
+
+ return;
+ }
+ } else {
+ if (track_catch_face) {
+ printf(TEXT_RED "Frame %i : Tracked object is lost" TEXT_RESET "\n",
+ track_frame_counter);
+ track_catch_face = false;
+ } else {
+ printf(TEXT_YELLOW "Frame %i : Tracked object isn't detected" TEXT_RESET "\n",
+ track_frame_counter);
+ }
+ }
+
+ char file_path[1024];
+ sprintf(file_path, "%s/%05d.jpg", track_output_dir, track_frame_counter);
+ if (MEDIA_VISION_ERROR_NONE == save_image_from_buffer(
+ file_path, out_buffer, &image_data, 100)) {
+ printf("Frame %i was outputted as %s\n", track_frame_counter, file_path);
+ } else {
+ printf(TEXT_RED "ERROR: Failed to generate output file %s. "
+ "Check file name and permissions.\n" TEXT_RESET, file_path);
+ }
}
void track_on_sample_cb(
- char *buffer,
- unsigned int buffer_size,
- image_data_s image_data,
- void *user_data)
+ char *buffer,
+ unsigned int buffer_size,
+ image_data_s image_data,
+ void *user_data)
{
- mv_source_h source = NULL;
- int err = mv_create_source(&source);
+ mv_source_h source = NULL;
+ int err = mv_create_source(&source);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during creating the source "
- "based on the video frame! Error code: %i\n" TEXT_RESET, err);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "ERROR: Errors were occurred during creating the source "
+ "based on the video frame! Error code: %i\n" TEXT_RESET, err);
- return;
- }
+ return;
+ }
- err = mv_source_fill_by_buffer(
- source,
- buffer,
- buffer_size,
- image_data.image_width,
- image_data.image_height,
- image_data.image_colorspace);
+ err = mv_source_fill_by_buffer(
+ source,
+ buffer,
+ buffer_size,
+ image_data.image_width,
+ image_data.image_height,
+ image_data.image_colorspace);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during filling the source "
- "based on the video frame! Error code: %i\n" TEXT_RESET , err);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "ERROR: Errors were occurred during filling the source "
+ "based on the video frame! Error code: %i\n" TEXT_RESET , err);
- return;
- }
+ return;
+ }
- mv_face_tracking_model_h tracking_model =
- (mv_face_tracking_model_h)user_data;
+ mv_face_tracking_model_h tracking_model =
+ (mv_face_tracking_model_h)user_data;
- err = mv_face_track(source, tracking_model, NULL, track_cb, false, NULL);
+ err = mv_face_track(source, tracking_model, NULL, track_cb, false, NULL);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during tracking the face "
- TEXT_RESET "on the video frame! Error code: %i\n", err);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "ERROR: Errors were occurred during tracking the face "
+ TEXT_RESET "on the video frame! Error code: %i\n", err);
- return;
- }
+ return;
+ }
}
-// end of stream callback
+/* end of stream callback */
void eos_cb(void *user_data)
{
- printf("Video was fully processed\n");
- if (NULL == user_data)
- {
- printf(TEXT_RED
- "ERROR: eos callback can't stop tracking process."TEXT_RESET);
- return;
- }
-
- pthread_mutex_unlock((pthread_mutex_t*)user_data);
+ printf("Video was fully processed\n");
+ if (NULL == user_data) {
+ printf(TEXT_RED
+ "ERROR: eos callback can't stop tracking process."TEXT_RESET);
+ return;
+ }
+
+ pthread_mutex_unlock((pthread_mutex_t*)user_data);
}
int generate_image_sequence(
- mv_face_tracking_model_h tracking_model,
- const char *track_target_file_name)
+ mv_face_tracking_model_h tracking_model,
+ const char *track_target_file_name)
{
- mv_video_reader_h reader = NULL;
- int err = mv_create_video_reader(&reader);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during creating the video "
- "reader! Error code: %i" TEXT_RESET "\n", err);
- return err;
- }
-
- image_data_s video_info;
- unsigned int fps;
- // init_frame_file_name
- err = mv_video_reader_load(reader, track_target_file_name, &video_info, &fps);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during loading the video "
- "by reader! Error code: %i" TEXT_RESET "\n", err);
-
- const int err2 = mv_destroy_video_reader(reader);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during video reader "
- "destroy! Error code: %i" TEXT_RESET "\n", err);
- }
-
- return err;
- }
-
- err = mv_video_reader_set_new_sample_cb(
- reader,
- track_on_sample_cb,
- tracking_model);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during new sample callback set!"
- " Error code: %i" TEXT_RESET "\n", err);
-
- const int err2 = mv_destroy_video_reader(reader);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during video reader "
- "destroy! Error code: %i" TEXT_RESET "\n", err);
- }
-
- return err;
- }
-
- pthread_mutex_t block_during_tracking_mutex;
- pthread_mutex_init(&block_during_tracking_mutex, NULL);
- pthread_mutex_lock(&block_during_tracking_mutex);
-
- // set end of stream callback
- err = mv_video_reader_set_eos_cb(reader, eos_cb, &block_during_tracking_mutex);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during setting the eos "
- "callback for reader! Error code: %i" TEXT_RESET "\n", err);
-
- const int err2 = mv_destroy_video_reader(reader);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during video reader destroy!"
- " Error code: %i" TEXT_RESET "\n", err);
- }
-
- pthread_mutex_unlock(&block_during_tracking_mutex);
- pthread_mutex_destroy(&block_during_tracking_mutex);
-
- return err;
- }
-
- err = mv_video_reader_start(reader);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during starting the "
- "video reader! Error code: %i" TEXT_RESET "\n", err);
-
- const int err2 = mv_destroy_video_reader(reader);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf(TEXT_RED
- "ERROR: Errors were occurred during video reader destroy!"
- " Error code: %i" TEXT_RESET "\n", err);
- }
-
- pthread_mutex_unlock(&block_during_tracking_mutex);
- pthread_mutex_destroy(&block_during_tracking_mutex);
-
- return err;
- }
-
- //wait for the video reading thread
-
- pthread_mutex_lock(&block_during_tracking_mutex);
- pthread_mutex_unlock(&block_during_tracking_mutex);
- pthread_mutex_destroy(&block_during_tracking_mutex);
-
- err = mv_video_reader_stop(reader);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during "
- "attempt to stop video reader! Error code: %i\n"
- TEXT_RESET, err);
- }
-
- err = mv_destroy_video_reader(reader);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "ERROR: Errors were occurred during video "
- "reader destroy! Error code: %i\n" TEXT_RESET, err);
- }
-
- return MEDIA_VISION_ERROR_NONE;
+ mv_video_reader_h reader = NULL;
+ int err = mv_create_video_reader(&reader);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during creating the video "
+ "reader! Error code: %i" TEXT_RESET "\n", err);
+ return err;
+ }
+
+ image_data_s video_info;
+ unsigned int fps;
+ /* init_frame_file_name */
+ err = mv_video_reader_load(reader, track_target_file_name, &video_info, &fps);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "ERROR: Errors were occurred during loading the video "
+ "by reader! Error code: %i" TEXT_RESET "\n", err);
+
+ const int err2 = mv_destroy_video_reader(reader);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf(TEXT_RED "ERROR: Errors were occurred during video reader "
+ "destroy! Error code: %i" TEXT_RESET "\n", err);
+ }
+
+ return err;
+ }
+
+ err = mv_video_reader_set_new_sample_cb(
+ reader,
+ track_on_sample_cb,
+ tracking_model);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during new sample callback set!"
+ " Error code: %i" TEXT_RESET "\n", err);
+
+ const int err2 = mv_destroy_video_reader(reader);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf(TEXT_RED "ERROR: Errors were occurred during video reader "
+ "destroy! Error code: %i" TEXT_RESET "\n", err);
+ }
+
+ return err;
+ }
+
+ pthread_mutex_t block_during_tracking_mutex;
+ pthread_mutex_init(&block_during_tracking_mutex, NULL);
+ pthread_mutex_lock(&block_during_tracking_mutex);
+
+ /* set end of stream callback */
+ err = mv_video_reader_set_eos_cb(reader, eos_cb, &block_during_tracking_mutex);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during setting the eos "
+ "callback for reader! Error code: %i" TEXT_RESET "\n", err);
+
+ const int err2 = mv_destroy_video_reader(reader);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during video reader destroy!"
+ " Error code: %i" TEXT_RESET "\n", err);
+ }
+
+ pthread_mutex_unlock(&block_during_tracking_mutex);
+ pthread_mutex_destroy(&block_during_tracking_mutex);
+
+ return err;
+ }
+
+ err = mv_video_reader_start(reader);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "ERROR: Errors were occurred during starting the "
+ "video reader! Error code: %i" TEXT_RESET "\n", err);
+
+ const int err2 = mv_destroy_video_reader(reader);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf(TEXT_RED
+ "ERROR: Errors were occurred during video reader destroy!"
+ " Error code: %i" TEXT_RESET "\n", err);
+ }
+
+ pthread_mutex_unlock(&block_during_tracking_mutex);
+ pthread_mutex_destroy(&block_during_tracking_mutex);
+
+ return err;
+ }
+
+ /* wait for the video reading thread */
+
+ pthread_mutex_lock(&block_during_tracking_mutex);
+ pthread_mutex_unlock(&block_during_tracking_mutex);
+ pthread_mutex_destroy(&block_during_tracking_mutex);
+
+ err = mv_video_reader_stop(reader);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "ERROR: Errors were occurred during "
+ "attempt to stop video reader! Error code: %i\n"
+ TEXT_RESET, err);
+ }
+
+ err = mv_destroy_video_reader(reader);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "ERROR: Errors were occurred during video "
+ "reader destroy! Error code: %i\n" TEXT_RESET, err);
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
}
int perform_mv_face_track(mv_face_tracking_model_h tracking_model)
{
- printf(TEXT_YELLOW "Before any tracking session the tracking model\n"
- "preparation is required. Exception is the case when\n"
- "the next tracking session will be performed with the\n"
- "video which is the direct continuation of the video\n"
- "has been used at the previous tracking session.\n"
- "If you want to test correct tracking case, don't\n"
- "forget to perform preparation before tracking."
- TEXT_RESET "\n");
-
- char *track_target_file_name = NULL;
-
- while (input_string("Input video file name to track on:",
- 1024, &(track_target_file_name)) == -1)
- {
- printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
- }
-
- while (input_string("Input directory to save tracking results:",
- 1024, &(track_output_dir)) == -1)
- {
- printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
- }
-
- track_frame_counter = 0;
-
- return generate_image_sequence(tracking_model, track_target_file_name);
+ printf(TEXT_YELLOW "Before any tracking session the tracking model\n"
+ "preparation is required. Exception is the case when\n"
+ "the next tracking session will be performed with the\n"
+ "video which is the direct continuation of the video\n"
+ "has been used at the previous tracking session.\n"
+ "If you want to test correct tracking case, don't\n"
+ "forget to perform preparation before tracking."
+ TEXT_RESET "\n");
+
+ char *track_target_file_name = NULL;
+
+ while (input_string("Input video file name to track on:",
+ 1024, &(track_target_file_name)) == -1) {
+ printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
+ }
+
+ while (input_string("Input directory to save tracking results:",
+ 1024, &(track_output_dir)) == -1) {
+ printf(TEXT_RED "Incorrect input! Try again." TEXT_RESET "\n");
+ }
+
+ track_frame_counter = 0;
+
+ return generate_image_sequence(tracking_model, track_target_file_name);
}
int perform_track()
{
- printf("\n" TEXT_YELLOW
- "Tracking model isn't now created.\n"
- "You may create it to perform positive \n"
- "testing, or don't create to check the \n"
- "functionality behaviour for uncreated model."
- TEXT_RESET
- "\n");
-
- int err = MEDIA_VISION_ERROR_NONE;
- mv_face_tracking_model_h tracking_model = NULL;
- const bool do_create = show_confirm_dialog("Do Create Tracking Model?");
- if (do_create)
- {
- printf(TEXT_GREEN "Creating tracking model..." TEXT_RESET "\n");
-
- err = mv_face_tracking_model_create(&tracking_model);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED "Creating the model failed. Error code: %i. "
- "But you still can test with uncreated model.\n"
- TEXT_RESET "\n", err);
- }
- else
- {
- printf(TEXT_YELLOW "Tracking model has been created."
- TEXT_RESET "\n");
- }
- }
-
- int sel_opt = 0;
- const int options[6] = { 1, 2, 3, 4, 5, 6 };
- const char *names[6] = { "Prepare the model",
- "Clone the model",
- "Save model to file",
- "Load model from file",
- "Track with model",
- "Destroy model and exit" };
-
- while(!sel_opt)
- {
- sel_opt = show_menu("Select action:", options, names, 6);
- notification_type_e notification_type = FAIL_OR_SUCCESSS;
-
- switch (sel_opt)
- {
- case 1:
- err = perform_mv_face_tracking_model_prepare(tracking_model);
- break;
- case 2:
- err = perform_mv_face_tracking_model_clone(tracking_model);
- break;
- case 3:
- err = perform_mv_face_tracking_model_save(tracking_model);
- break;
- case 4:
- err = perform_mv_face_tracking_model_load(&tracking_model);
- break;
- case 5:
- err = perform_mv_face_track(tracking_model);
- notification_type = FAIL_OR_DONE;
- break;
- case 6:
- if (do_create)
- {
- err = mv_face_tracking_model_destroy(tracking_model);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(TEXT_RED
- "Error with code %i was occurred during destroy"
- TEXT_RESET "\n", err);
- }
-
- return err;
- }
- else
- {
- return MEDIA_VISION_ERROR_NONE;
- }
- default:
- sel_opt = 0;
- printf("ERROR: Incorrect input.\n");
- continue;
- }
-
- print_action_result(names[sel_opt - 1], err, notification_type);
-
- sel_opt = 0;
- }
+ printf("\n" TEXT_YELLOW
+ "Tracking model isn't now created.\n"
+ "You may create it to perform positive \n"
+ "testing, or don't create to check the \n"
+ "functionality behaviour for uncreated model."
+ TEXT_RESET
+ "\n");
+
+ int err = MEDIA_VISION_ERROR_NONE;
+ mv_face_tracking_model_h tracking_model = NULL;
+ const bool do_create = show_confirm_dialog("Do Create Tracking Model?");
+ if (do_create) {
+ printf(TEXT_GREEN "Creating tracking model..." TEXT_RESET "\n");
+
+ err = mv_face_tracking_model_create(&tracking_model);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED "Creating the model failed. Error code: %i. "
+ "But you still can test with uncreated model.\n"
+ TEXT_RESET "\n", err);
+ } else {
+ printf(TEXT_YELLOW "Tracking model has been created."
+ TEXT_RESET "\n");
+ }
+ }
+
+ int sel_opt = 0;
+ const int options[6] = { 1, 2, 3, 4, 5, 6 };
+ const char *names[6] = { "Prepare the model",
+ "Clone the model",
+ "Save model to file",
+ "Load model from file",
+ "Track with model",
+ "Destroy model and exit" };
+
+ while (!sel_opt) {
+ sel_opt = show_menu("Select action:", options, names, 6);
+ notification_type_e notification_type = FAIL_OR_SUCCESSS;
+
+ switch (sel_opt) {
+ case 1:
+ err = perform_mv_face_tracking_model_prepare(tracking_model);
+ break;
+ case 2:
+ err = perform_mv_face_tracking_model_clone(tracking_model);
+ break;
+ case 3:
+ err = perform_mv_face_tracking_model_save(tracking_model);
+ break;
+ case 4:
+ err = perform_mv_face_tracking_model_load(&tracking_model);
+ break;
+ case 5:
+ err = perform_mv_face_track(tracking_model);
+ notification_type = FAIL_OR_DONE;
+ break;
+ case 6:
+ if (do_create) {
+ err = mv_face_tracking_model_destroy(tracking_model);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(TEXT_RED
+ "Error with code %i was occurred during destroy"
+ TEXT_RESET "\n", err);
+ }
+
+ return err;
+ } else {
+ return MEDIA_VISION_ERROR_NONE;
+ }
+ default:
+ sel_opt = 0;
+ printf("ERROR: Incorrect input.\n");
+ continue;
+ }
+
+ print_action_result(names[sel_opt - 1], err, notification_type);
+
+ sel_opt = 0;
+ }
}
int perform_eye_condition_recognize()
{
- Perform_eye_condition_recognize = true;
+ Perform_eye_condition_recognize = true;
- const int err = perform_detect();
+ const int err = perform_detect();
- Perform_eye_condition_recognize = false;
+ Perform_eye_condition_recognize = false;
- return err;
+ return err;
}
int perform_face_expression_recognize()
{
- Perform_facial_expression_recognize = true;
+ Perform_facial_expression_recognize = true;
- const int err = perform_detect();
+ const int err = perform_detect();
- Perform_facial_expression_recognize = false;
+ Perform_facial_expression_recognize = false;
- return err;
+ return err;
}
int main(void)
{
- int err = MEDIA_VISION_ERROR_NONE;
-
- int sel_opt = 0;
- const int options[6] = { 1, 2, 3, 4, 5, 6 };
- const char *names[6] = { "Detect",
- "Track",
- "Recognize",
- "Eye condition",
- "Face expression",
- "Exit" };
-
- while (sel_opt == 0)
- {
- sel_opt = show_menu("Select action:", options, names, 6);
- switch (sel_opt)
- {
- case 1:
- err = perform_detect();
- break;
- case 2:
- err = perform_track();
- break;
- case 3:
- err = perform_recognize();
- break;
- case 4:
- err = perform_eye_condition_recognize();
- break;
- case 5:
- err = perform_face_expression_recognize();
- break;
- case 6:
- return 0;
- default:
- sel_opt = 0;
- printf("Invalid option.\n");
- continue;
- }
-
- int do_another = 0;
-
- if (err != MEDIA_VISION_ERROR_NONE)
- {
- printf("ERROR: Action is finished with error code: %i\n", err);
- }
-
- sel_opt = 0;
- const int options_last[2] = { 1, 2 };
- const char *names_last[2] = { "YES", "NO" };
-
- while (sel_opt == 0)
- {
- sel_opt = show_menu("Perform another action?", options_last, names_last, 2);
-
- switch (sel_opt)
- {
- case 1:
- do_another = 1;
- break;
- case 2:
- do_another = 0;
- break;
- default:
- sel_opt = 0;
- printf("Invalid option.\n");
- break;
- }
- }
-
- sel_opt = (do_another == 1 ? 0 : sel_opt);
- }
-
- return 0;
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ int sel_opt = 0;
+ const int options[6] = { 1, 2, 3, 4, 5, 6 };
+ const char *names[6] = { "Detect",
+ "Track",
+ "Recognize",
+ "Eye condition",
+ "Face expression",
+ "Exit" };
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Select action:", options, names, 6);
+ switch (sel_opt) {
+ case 1:
+ err = perform_detect();
+ break;
+ case 2:
+ err = perform_track();
+ break;
+ case 3:
+ err = perform_recognize();
+ break;
+ case 4:
+ err = perform_eye_condition_recognize();
+ break;
+ case 5:
+ err = perform_face_expression_recognize();
+ break;
+ case 6:
+ return 0;
+ default:
+ sel_opt = 0;
+ printf("Invalid option.\n");
+ continue;
+ }
+
+ int do_another = 0;
+
+ if (err != MEDIA_VISION_ERROR_NONE)
+ printf("ERROR: Action is finished with error code: %i\n", err);
+
+ sel_opt = 0;
+ const int options_last[2] = { 1, 2 };
+ const char *names_last[2] = { "YES", "NO" };
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Perform another action?", options_last, names_last, 2);
+
+ switch (sel_opt) {
+ case 1:
+ do_another = 1;
+ break;
+ case 2:
+ do_another = 0;
+ break;
+ default:
+ sel_opt = 0;
+ printf("Invalid option.\n");
+ break;
+ }
+ }
+
+ sel_opt = (do_another == 1 ? 0 : sel_opt);
+ }
+
+ return 0;
}
diff --git a/test/testsuites/image/image_test_suite.c b/test/testsuites/image/image_test_suite.c
index a0d1a11d..1cd9ba78 100644
--- a/test/testsuites/image/image_test_suite.c
+++ b/test/testsuites/image/image_test_suite.c
@@ -26,2051 +26,1827 @@
#include <pthread.h>
-typedef enum
-{
- SOURCE_TYPE_GENERATION,
- SOURCE_TYPE_LOADING,
- SOURCE_TYPE_CLONING,
- SOURCE_TYPE_EMPTY,
- SOURCE_TYPE_INVALID
+typedef enum {
+ SOURCE_TYPE_GENERATION,
+ SOURCE_TYPE_LOADING,
+ SOURCE_TYPE_CLONING,
+ SOURCE_TYPE_EMPTY,
+ SOURCE_TYPE_INVALID
} source_type_e;
-typedef enum
-{
- OBJECT_TYPE_IMAGE_OBJECT,
- OBJECT_TYPE_IMAGE_TRACKING_MODEL,
- OBJECT_TYPE_INVALID
+typedef enum {
+ OBJECT_TYPE_IMAGE_OBJECT,
+ OBJECT_TYPE_IMAGE_TRACKING_MODEL,
+ OBJECT_TYPE_INVALID
} testing_object_type_e;
#define testing_object_maximum_label_length 300
-typedef struct testing_object_s
-{
- void *entity;
+typedef struct testing_object_s {
+ void *entity;
- char origin_label[testing_object_maximum_label_length];
+ char origin_label[testing_object_maximum_label_length];
- char actual_label[testing_object_maximum_label_length];
+ char actual_label[testing_object_maximum_label_length];
- testing_object_type_e object_type;
+ testing_object_type_e object_type;
- source_type_e source_type;
+ source_type_e source_type;
- int cloning_counter;
+ int cloning_counter;
} testing_object;
typedef testing_object *testing_object_h;
void testing_object_create(testing_object_h *result)
{
- (*result) = malloc(sizeof(testing_object));
-
- (*result)->entity = (void*)NULL;
- (*result)->object_type = OBJECT_TYPE_INVALID;
- (*result)->source_type = SOURCE_TYPE_INVALID;
- (*result)->cloning_counter = 0;
- (*result)->origin_label[0] = '\0';
- (*result)->actual_label[0] = '\0';
+ (*result) = malloc(sizeof(testing_object));
+
+ (*result)->entity = (void*)NULL;
+ (*result)->object_type = OBJECT_TYPE_INVALID;
+ (*result)->source_type = SOURCE_TYPE_INVALID;
+ (*result)->cloning_counter = 0;
+ (*result)->origin_label[0] = '\0';
+ (*result)->actual_label[0] = '\0';
}
void testing_object_fill(
- testing_object_h target,
- void *entity,
- testing_object_type_e object_type,
- source_type_e source_type,
- void *source)
+ testing_object_h target,
+ void *entity,
+ testing_object_type_e object_type,
+ source_type_e source_type,
+ void *source)
{
- target->entity = entity;
- target->object_type = object_type;
- target->source_type = source_type;
- target->cloning_counter = 0;
-
- switch (source_type)
- {
- case SOURCE_TYPE_GENERATION:
- {
- if (OBJECT_TYPE_IMAGE_OBJECT == object_type)
- {
- sprintf(
- target->origin_label,
- "generated from \"%s\"",
- (char*)source);
- }
- else if (OBJECT_TYPE_IMAGE_TRACKING_MODEL == object_type)
- {
- sprintf(
- target->origin_label,
- "generated from image object which is %s",
- ((testing_object_h)source)->actual_label);
- }
- else
- {
- sprintf(
- target->origin_label,
- "generated unknown type of testing object");
- }
-
- strcpy(target->actual_label, target->origin_label);
- break;
- }
- case SOURCE_TYPE_LOADING:
- {
- sprintf(target->origin_label, "loaded from \"%s\"", (char*)source);
- strcpy(target->actual_label, target->origin_label);
- break;
- }
- case SOURCE_TYPE_CLONING:
- {
- testing_object_h source_object = (testing_object_h)source;
- strcpy(target->origin_label, source_object->origin_label);
- target->cloning_counter = source_object->cloning_counter + 1;
-
- char number_of_cloning[10];
- number_of_cloning[0] = '\0';
- if (1 < target->cloning_counter)
- {
- sprintf(number_of_cloning, "%s%i%s",
- "(x", target->cloning_counter, ")");
- }
-
- char type_name[20];
- if (OBJECT_TYPE_IMAGE_OBJECT == object_type)
- {
- sprintf(type_name, "image object");
- }
- else if (OBJECT_TYPE_IMAGE_TRACKING_MODEL == object_type)
- {
- sprintf(type_name, "tracking model");
- }
- else
- {
- sprintf(type_name, "unknown object");
- }
- sprintf(target->actual_label, "%s%s%s%s%s%s",
- "cloned ", number_of_cloning,
- " from ", type_name,
- " which is ", target->origin_label);
- break;
- }
- case SOURCE_TYPE_EMPTY:
- {
- strcpy(target->origin_label, "created an empty");
- strcpy(target->actual_label, target->origin_label);
- break;
- }
- default:
- {
- strcpy(target->origin_label, "having unknown source");
- break;
- }
- }
+ target->entity = entity;
+ target->object_type = object_type;
+ target->source_type = source_type;
+ target->cloning_counter = 0;
+
+ switch (source_type) {
+ case SOURCE_TYPE_GENERATION: {
+ if (OBJECT_TYPE_IMAGE_OBJECT == object_type) {
+ sprintf(
+ target->origin_label,
+ "generated from \"%s\"",
+ (char*)source);
+ } else if (OBJECT_TYPE_IMAGE_TRACKING_MODEL == object_type) {
+ sprintf(
+ target->origin_label,
+ "generated from image object which is %s",
+ ((testing_object_h)source)->actual_label);
+ } else {
+ sprintf(
+ target->origin_label,
+ "generated unknown type of testing object");
+ }
+
+ strcpy(target->actual_label, target->origin_label);
+ break;
+ }
+ case SOURCE_TYPE_LOADING: {
+ sprintf(target->origin_label, "loaded from \"%s\"", (char*)source);
+ strcpy(target->actual_label, target->origin_label);
+ break;
+ }
+ case SOURCE_TYPE_CLONING: {
+ testing_object_h source_object = (testing_object_h)source;
+ strcpy(target->origin_label, source_object->origin_label);
+ target->cloning_counter = source_object->cloning_counter + 1;
+
+ char number_of_cloning[10];
+ number_of_cloning[0] = '\0';
+ if (1 < target->cloning_counter) {
+ sprintf(number_of_cloning, "%s%i%s",
+ "(x", target->cloning_counter, ")");
+ }
+
+ char type_name[20];
+ if (OBJECT_TYPE_IMAGE_OBJECT == object_type)
+ sprintf(type_name, "image object");
+ else if (OBJECT_TYPE_IMAGE_TRACKING_MODEL == object_type)
+ sprintf(type_name, "tracking model");
+ else
+ sprintf(type_name, "unknown object");
+
+ sprintf(target->actual_label, "%s%s%s%s%s%s",
+ "cloned ", number_of_cloning,
+ " from ", type_name,
+ " which is ", target->origin_label);
+ break;
+ }
+ case SOURCE_TYPE_EMPTY: {
+ strcpy(target->origin_label, "created an empty");
+ strcpy(target->actual_label, target->origin_label);
+ break;
+ }
+ default: {
+ strcpy(target->origin_label, "having unknown source");
+ break;
+ }
+ }
}
void testing_object_destroy(testing_object_h *target)
{
- switch ((*target)->object_type)
- {
- case OBJECT_TYPE_IMAGE_OBJECT:
- {
- int err = mv_image_object_destroy((mv_image_object_h)((*target)->entity));
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("\nERROR: Errors were occurred during image object "
- "destroying; code %i\n", err);
- }
- break;
- }
- case OBJECT_TYPE_IMAGE_TRACKING_MODEL:
- {
- int err = mv_image_tracking_model_destroy(
- (mv_image_tracking_model_h)((*target)->entity));
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("\nERROR: Errors were occurred during image tracking "
- "model destroying; code %i\n", err);
- }
- break;
- }
- }
- free(*target);
- (*target) = NULL;
+ switch ((*target)->object_type) {
+ case OBJECT_TYPE_IMAGE_OBJECT: {
+ int err = mv_image_object_destroy((mv_image_object_h)((*target)->entity));
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("\nERROR: Errors were occurred during image object "
+ "destroying; code %i\n", err);
+ }
+ break;
+ }
+ case OBJECT_TYPE_IMAGE_TRACKING_MODEL: {
+ int err = mv_image_tracking_model_destroy(
+ (mv_image_tracking_model_h)((*target)->entity));
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("\nERROR: Errors were occurred during image tracking "
+ "model destroying; code %i\n", err);
+ }
+ break;
+ }
+ }
+ free(*target);
+ (*target) = NULL;
}
-typedef struct
-{
- mv_quadrangle_s **locations;
- unsigned int locations_size;
- unsigned int currently_number;
+typedef struct {
+ mv_quadrangle_s **locations;
+ unsigned int locations_size;
+ unsigned int currently_number;
} recognition_result;
void destroy_recognition_result(recognition_result *result)
{
- if (result->locations_size == 0)
- {
- return;
- }
-
- int i = 0;
- for (; i < result->locations_size; ++i)
- {
- if (NULL != result->locations[i])
- {
- free(result->locations[i]);
- }
- }
- free(result->locations);
+ if (result->locations_size == 0)
+ return;
+
+ int i = 0;
+ for (; i < result->locations_size; ++i) {
+ if (NULL != result->locations[i])
+ free(result->locations[i]);
+ }
+ free(result->locations);
}
void recognized_cb(
- mv_source_h source,
- mv_engine_config_h engine_cfg,
- const mv_image_object_h *image_objects,
- mv_quadrangle_s **locations,
- unsigned int number_of_objects,
- void *user_data)
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ const mv_image_object_h *image_objects,
+ mv_quadrangle_s **locations,
+ unsigned int number_of_objects,
+ void *user_data)
{
- MEDIA_VISION_FUNCTION_ENTER();
-
- if (NULL == user_data)
- {
- return;
- }
-
- recognition_result *result = (recognition_result*)user_data;
-
- int object_num = 0;
- for(; object_num < number_of_objects; ++object_num)
- {
- if (result->currently_number >= result->locations_size)
- {
- return;
- }
-
- if (NULL == locations[object_num])
- {
- result->locations[result->currently_number] = NULL;
- }
- else
- {
- result->locations[result->currently_number] = malloc(sizeof(mv_quadrangle_s));
- *(result->locations[result->currently_number]) = *(locations[object_num]);
- }
-
- ++result->currently_number;
- }
-
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ if (NULL == user_data)
+ return;
+
+ recognition_result *result = (recognition_result*)user_data;
+
+ int object_num = 0;
+ for (; object_num < number_of_objects; ++object_num) {
+ if (result->currently_number >= result->locations_size)
+ return;
+
+ if (NULL == locations[object_num]) {
+ result->locations[result->currently_number] = NULL;
+ } else {
+ result->locations[result->currently_number] = malloc(sizeof(mv_quadrangle_s));
+ *(result->locations[result->currently_number]) = *(locations[object_num]);
+ }
+
+ ++result->currently_number;
+ }
+
+ MEDIA_VISION_FUNCTION_LEAVE();
}
void handle_recognition_result(
- const recognition_result *result,
- int number_of_objects,
- mv_source_h *source,
- char *file_name)
+ const recognition_result *result,
+ int number_of_objects,
+ mv_source_h *source,
+ char *file_name)
{
- int is_source_data_loaded = 0;
-
- unsigned char *out_buffer = NULL;
- int buffer_size = 0;
- image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID };
-
- if (MEDIA_VISION_ERROR_NONE != mv_source_get_buffer(source, &(out_buffer), &buffer_size) ||
- MEDIA_VISION_ERROR_NONE != mv_source_get_width(source, &(image_data.image_width)) ||
- MEDIA_VISION_ERROR_NONE != mv_source_get_height(source, &(image_data.image_height)) ||
- MEDIA_VISION_ERROR_NONE != mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
- NULL == file_name)
- {
- printf("ERROR: Creating out image is impossible.\n");
- }
- else
- {
- is_source_data_loaded = 1;
- }
-
- int object_num = 0;
-
-
- for (; object_num < number_of_objects; ++object_num)
- {
- if (NULL == result->locations[object_num])
- {
- printf("\nImage #%i is not recognized\n", object_num);
- continue;
- }
-
- printf("\nImage #%i is recognized\n", object_num);
- printf("Recognized image coordinates:\n");
-
- int point_num = 0;
- for (; point_num < 4; ++point_num)
- {
- printf("%d point - x = %d, y = %d\n", point_num + 1,
- result->locations[object_num]->points[point_num].x,
- result->locations[object_num]->points[point_num].y);
- }
-
- if (is_source_data_loaded)
- {
- const int thickness = 2;
- const int color[] = {0, 255, 0};
-
- const int err = draw_quadrangle_on_buffer(
- *(result->locations[object_num]),
- thickness,
- color,
- &image_data,
- out_buffer);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: Impossible to draw quadrangle\n");
- }
- }
- }
- if (save_image_from_buffer(file_name, out_buffer,
- &image_data, 100) != MEDIA_VISION_ERROR_NONE)
- {
- printf("\nERROR: Failed to generate output file\n");
- }
- else
- {
- printf("\nImage was generated as %s\n", file_name);
- }
+ int is_source_data_loaded = 0;
+
+ unsigned char *out_buffer = NULL;
+ int buffer_size = 0;
+ image_data_s image_data = { 0, 0, MEDIA_VISION_COLORSPACE_INVALID };
+
+ if (MEDIA_VISION_ERROR_NONE != mv_source_get_buffer(source, &(out_buffer), &buffer_size) ||
+ MEDIA_VISION_ERROR_NONE != mv_source_get_width(source, &(image_data.image_width)) ||
+ MEDIA_VISION_ERROR_NONE != mv_source_get_height(source, &(image_data.image_height)) ||
+ MEDIA_VISION_ERROR_NONE != mv_source_get_colorspace(source, &(image_data.image_colorspace)) ||
+ NULL == file_name) {
+ printf("ERROR: Creating out image is impossible.\n");
+ } else {
+ is_source_data_loaded = 1;
+ }
+
+ int object_num = 0;
+
+ for (; object_num < number_of_objects; ++object_num) {
+ if (NULL == result->locations[object_num]) {
+ printf("\nImage #%i is not recognized\n", object_num);
+ continue;
+ }
+
+ printf("\nImage #%i is recognized\n", object_num);
+ printf("Recognized image coordinates:\n");
+
+ int point_num = 0;
+ for (; point_num < 4; ++point_num) {
+ printf("%d point - x = %d, y = %d\n", point_num + 1,
+ result->locations[object_num]->points[point_num].x,
+ result->locations[object_num]->points[point_num].y);
+ }
+
+ if (is_source_data_loaded) {
+ const int thickness = 2;
+ const int color[] = {0, 255, 0};
+
+ const int err = draw_quadrangle_on_buffer(
+ *(result->locations[object_num]),
+ thickness,
+ color,
+ &image_data,
+ out_buffer);
+
+ if (MEDIA_VISION_ERROR_NONE != err)
+ printf("ERROR: Impossible to draw quadrangle\n");
+ }
+ }
+
+ if (save_image_from_buffer(file_name, out_buffer,
+ &image_data, 100) != MEDIA_VISION_ERROR_NONE) {
+ printf("\nERROR: Failed to generate output file\n");
+ } else {
+ printf("\nImage was generated as %s\n", file_name);
+ }
}
int generate_image_object_from_file(const char *path_to_image,
- bool roi_selected,
- mv_rectangle_s roi,
- mv_image_object_h *result)
+ bool roi_selected,
+ mv_rectangle_s roi,
+ mv_image_object_h *result)
{
- MEDIA_VISION_FUNCTION_ENTER();
-
- mv_source_h source;
- int err = mv_create_source(&source);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: Errors were occurred during source creating!!! code %i\n", err);
- }
-
- err = load_mv_source_from_file(path_to_image, source);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: image is not loaded; code %i\n", err);
-
- int err2 = mv_destroy_source(source);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf("\nERROR: Errors were occurred during source "
- "destroying; code %i\n", err2);
- }
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return err;
- }
-
- mv_engine_config_h config;
- err = mv_create_engine_config(&config);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: engine configuration is not created; code %i\n", err);
-
- int err2 = mv_destroy_source(source);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf("\nERROR: Errors were occurred during source "
- "destroying; code %i\n", err2);
- }
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return err;
- }
-
- err = mv_image_object_create(result);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: Errors were occurred during creating image object; "
- "code %i\n", err);
-
- int err2 = mv_destroy_source(source);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf("\nERROR: Errors were occurred during source "
- "destroying; code %i\n", err2);
- }
-
- err2 = mv_destroy_engine_config(config);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf("\nERROR: Errors were occurred during engine config "
- "destroying; code %i\n", err2);
- }
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return err;
- }
-
- if (roi_selected)
- {
- err = mv_image_object_fill(*result, config, source, &roi);
- }
- else
- {
- err = mv_image_object_fill(*result, config, source, NULL);
- }
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: Errors were occurred during filling image object; "
- "code %i\n", err);
-
- int err2 = mv_destroy_source(source);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf("\nERROR: Errors were occurred during source "
- "destroying; code %i\n", err2);
- }
-
- err2 = mv_image_object_destroy(*result);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf("\nERROR: Errors were occurred during image object "
- "destroying; code %i\n", err2);
- }
-
- err2 = mv_destroy_engine_config(config);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf("\nERROR: Errors were occurred during engine config "
- "destroying; code %i\n", err2);
- }
-
- *result = NULL;
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return err;
- }
-
- err = mv_destroy_source(source);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("\nERROR: Errors were occurred during source "
- "destroying; code %i\n", err);
-
- int err2 = mv_destroy_engine_config(config);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf("\nERROR: Errors were occurred during engine config "
- "destroying; code %i\n", err2);
- }
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return err;
- }
-
- err = mv_destroy_engine_config(config);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("\nERROR: Errors were occurred during engine config "
- "destroying; code %i\n", err);
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return err;
- }
-
- MEDIA_VISION_FUNCTION_LEAVE();
- return err;
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ mv_source_h source;
+ int err = mv_create_source(&source);
+ if (MEDIA_VISION_ERROR_NONE != err)
+ printf("ERROR: Errors were occurred during source creating!!! code %i\n", err);
+
+ err = load_mv_source_from_file(path_to_image, source);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: image is not loaded; code %i\n", err);
+
+ int err2 = mv_destroy_source(source);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf("\nERROR: Errors were occurred during source "
+ "destroying; code %i\n", err2);
+ }
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return err;
+ }
+
+ mv_engine_config_h config;
+ err = mv_create_engine_config(&config);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: engine configuration is not created; code %i\n", err);
+
+ int err2 = mv_destroy_source(source);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf("\nERROR: Errors were occurred during source "
+ "destroying; code %i\n", err2);
+ }
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return err;
+ }
+
+ err = mv_image_object_create(result);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: Errors were occurred during creating image object; "
+ "code %i\n", err);
+
+ int err2 = mv_destroy_source(source);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf("\nERROR: Errors were occurred during source "
+ "destroying; code %i\n", err2);
+ }
+
+ err2 = mv_destroy_engine_config(config);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf("\nERROR: Errors were occurred during engine config "
+ "destroying; code %i\n", err2);
+ }
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return err;
+ }
+
+ if (roi_selected)
+ err = mv_image_object_fill(*result, config, source, &roi);
+ else
+ err = mv_image_object_fill(*result, config, source, NULL);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: Errors were occurred during filling image object; "
+ "code %i\n", err);
+
+ int err2 = mv_destroy_source(source);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf("\nERROR: Errors were occurred during source "
+ "destroying; code %i\n", err2);
+ }
+
+ err2 = mv_image_object_destroy(*result);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf("\nERROR: Errors were occurred during image object "
+ "destroying; code %i\n", err2);
+ }
+
+ err2 = mv_destroy_engine_config(config);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf("\nERROR: Errors were occurred during engine config "
+ "destroying; code %i\n", err2);
+ }
+
+ *result = NULL;
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return err;
+ }
+
+ err = mv_destroy_source(source);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("\nERROR: Errors were occurred during source "
+ "destroying; code %i\n", err);
+
+ int err2 = mv_destroy_engine_config(config);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf("\nERROR: Errors were occurred during engine config "
+ "destroying; code %i\n", err2);
+ }
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return err;
+ }
+
+ err = mv_destroy_engine_config(config);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("\nERROR: Errors were occurred during engine config "
+ "destroying; code %i\n", err);
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return err;
+ }
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return err;
}
int recognize_image(const char *path_to_image,
- const char *path_to_generated_image,
- mv_image_object_h *targets,
- int number_of_targets)
+ const char *path_to_generated_image,
+ mv_image_object_h *targets,
+ int number_of_targets)
{
- MEDIA_VISION_FUNCTION_ENTER();
-
-
- if (NULL == targets)
- {
- printf("\nYou must create at least one model for recognition\n");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- mv_source_h source;
- int err = mv_create_source(&source);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("\nERROR: Errors were occurred during source creating; code %i\n", err);
- return err;
- }
-
- err = load_mv_source_from_file(path_to_image, source);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: image is not loaded; code %i\n", err);
- int err2 = mv_destroy_source(source);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf("\nERROR: Errors were occurred during source destroying; "
- "code %i\n", err2);
- }
- MEDIA_VISION_FUNCTION_LEAVE();
- return err;
- }
-
- recognition_result result;
- result.currently_number = 0;
- if (0 < number_of_targets)
- {
- result.locations = malloc(sizeof(mv_quadrangle_s*) * number_of_targets);
- result.locations_size = number_of_targets;
- }
- else
- {
- result.locations = NULL;
- result.locations_size = 0;
- }
-
- mv_engine_config_h config;
- err = mv_create_engine_config(&config);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: engine configuration is not created; code %i\n", err);
- int err2 = mv_destroy_source(source);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf("\nERROR: Errors were occurred during source destroying;"
- "code %i\n", err2);
- }
- MEDIA_VISION_FUNCTION_LEAVE();
- return err;
- }
-
- err = mv_image_recognize(source, targets, number_of_targets, config,
- recognized_cb, &result);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("\nERROR: Image is not recognized; code %i\n", err);
-
- destroy_recognition_result(&result);
-
- int err2 = mv_destroy_source(source);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf("\nERROR: Errors were occurred during source "
- "destroying; code %i\n", err2);
- }
- err2 = mv_destroy_engine_config(config);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf("\nERROR: Errors were occurred during engine config "
- "destroying; code %i\n", err2);
- }
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return err;
- }
-
- handle_recognition_result(&result, number_of_targets, source,
- path_to_generated_image);
-
- destroy_recognition_result(&result);
-
- err = mv_destroy_source(source);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("\nERROR: Errors were occurred during source destroying; code %i\n",
- err);
-
- int err2 = mv_destroy_engine_config(config);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf("\nERROR: Errors were occurred during engine config "
- "destroying; code %i\n", err2);
- }
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return err;
- }
-
- err = mv_destroy_engine_config(config);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("\nERROR: Errors were occurred during engine config destroying; "
- "code %i\n", err);
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return err;
- }
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return err;
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ if (NULL == targets) {
+ printf("\nYou must create at least one model for recognition\n");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ mv_source_h source;
+ int err = mv_create_source(&source);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("\nERROR: Errors were occurred during source creating; code %i\n", err);
+ return err;
+ }
+
+ err = load_mv_source_from_file(path_to_image, source);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: image is not loaded; code %i\n", err);
+ int err2 = mv_destroy_source(source);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf("\nERROR: Errors were occurred during source destroying; "
+ "code %i\n", err2);
+ }
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return err;
+ }
+
+ recognition_result result;
+ result.currently_number = 0;
+ if (0 < number_of_targets) {
+ result.locations = malloc(sizeof(mv_quadrangle_s*) * number_of_targets);
+ result.locations_size = number_of_targets;
+ } else {
+ result.locations = NULL;
+ result.locations_size = 0;
+ }
+
+ mv_engine_config_h config;
+ err = mv_create_engine_config(&config);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: engine configuration is not created; code %i\n", err);
+ int err2 = mv_destroy_source(source);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf("\nERROR: Errors were occurred during source destroying;"
+ "code %i\n", err2);
+ }
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return err;
+ }
+
+ err = mv_image_recognize(source, targets, number_of_targets, config,
+ recognized_cb, &result);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("\nERROR: Image is not recognized; code %i\n", err);
+
+ destroy_recognition_result(&result);
+
+ int err2 = mv_destroy_source(source);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf("\nERROR: Errors were occurred during source "
+ "destroying; code %i\n", err2);
+ }
+ err2 = mv_destroy_engine_config(config);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf("\nERROR: Errors were occurred during engine config "
+ "destroying; code %i\n", err2);
+ }
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return err;
+ }
+
+ handle_recognition_result(&result, number_of_targets, source,
+ path_to_generated_image);
+
+ destroy_recognition_result(&result);
+
+ err = mv_destroy_source(source);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("\nERROR: Errors were occurred during source destroying; code %i\n",
+ err);
+
+ int err2 = mv_destroy_engine_config(config);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf("\nERROR: Errors were occurred during engine config "
+ "destroying; code %i\n", err2);
+ }
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return err;
+ }
+
+ err = mv_destroy_engine_config(config);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("\nERROR: Errors were occurred during engine config destroying; "
+ "code %i\n", err);
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return err;
+ }
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return err;
}
int perform_get_confidence(mv_image_object_h target)
{
- if (NULL == target)
- {
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- double confidence = 0;
- const int err = mv_image_object_get_recognition_rate(target, &confidence);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("\nError: confidence hasn't been received with error code %i\n", err);
- return err;
- }
-
- printf("\nConfidence has been successfully received. Its value equal %f.\n", confidence);
-
- return MEDIA_VISION_ERROR_NONE;
+ if (NULL == target)
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+
+ double confidence = 0;
+ const int err = mv_image_object_get_recognition_rate(target, &confidence);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("\nError: confidence hasn't been received with error code %i\n", err);
+ return err;
+ }
+
+ printf("\nConfidence has been successfully received. Its value equal %f.\n", confidence);
+
+ return MEDIA_VISION_ERROR_NONE;
}
int perform_set_label(mv_image_object_h target)
{
- if (NULL == target)
- {
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (NULL == target)
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- int label = 0;
+ int label = 0;
- while (input_int("Input label (int):", INT_MIN, INT_MAX,
- &label) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
+ while (input_int("Input label (int):", INT_MIN, INT_MAX, &label) == -1)
+ printf("Incorrect input! Try again.\n");
- const int err = mv_image_object_set_label(target, label);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("\nError: the label hasn't been set with error code %i\n", err);
- return err;
- }
+ const int err = mv_image_object_set_label(target, label);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("\nError: the label hasn't been set with error code %i\n", err);
+ return err;
+ }
- printf("\nLabel has been successfully set.\n");
+ printf("\nLabel has been successfully set.\n");
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
int perform_get_label(mv_image_object_h target)
{
- if (NULL == target)
- {
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- int label = 0;
- const int err = mv_image_object_get_label(target, &label);
- if (MEDIA_VISION_ERROR_NO_DATA == err)
- {
- printf("\nSelected image object haven't label.\n");
- return MEDIA_VISION_ERROR_NONE;
- }
- else if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("\nError: label hasn't been received with error code %i\n", err);
- return err;
- }
-
- printf("\nLabel has been successfully received. Its equal %i.\n", label);
-
- return MEDIA_VISION_ERROR_NONE;
+ if (NULL == target)
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+
+ int label = 0;
+ const int err = mv_image_object_get_label(target, &label);
+ if (MEDIA_VISION_ERROR_NO_DATA == err) {
+ printf("\nSelected image object haven't label.\n");
+ return MEDIA_VISION_ERROR_NONE;
+ } else if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("\nError: label hasn't been received with error code %i\n", err);
+ return err;
+ }
+
+ printf("\nLabel has been successfully received. Its equal %i.\n", label);
+
+ return MEDIA_VISION_ERROR_NONE;
}
int perform_recognize(mv_image_object_h *targets, int number_of_targets)
{
- MEDIA_VISION_FUNCTION_ENTER();
+ MEDIA_VISION_FUNCTION_ENTER();
- char *path_to_image = NULL;
- char *path_to_generated_image = NULL;
+ char *path_to_image = NULL;
+ char *path_to_generated_image = NULL;
- while (input_string("Input file name with image for recognizing:",
- 1024, &path_to_image) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
+ while (input_string("Input file name with image for recognizing:",
+ 1024, &path_to_image) == -1) {
+ printf("Incorrect input! Try again.\n");
+ }
- while (input_string("Input file name for generated image:",
- 1024, &path_to_generated_image) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
+ while (input_string("Input file name for generated image:",
+ 1024, &path_to_generated_image) == -1) {
+ printf("Incorrect input! Try again.\n");
+ }
- const int err = recognize_image(path_to_image, path_to_generated_image, targets,
- number_of_targets);
+ const int err = recognize_image(path_to_image, path_to_generated_image, targets,
+ number_of_targets);
- free(path_to_image);
- free(path_to_generated_image);
+ free(path_to_image);
+ free(path_to_generated_image);
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_LEAVE();
- return err;
+ return err;
}
int perform_load_image_object(char **path_to_object, mv_image_object_h *result)
{
- MEDIA_VISION_FUNCTION_ENTER();
+ MEDIA_VISION_FUNCTION_ENTER();
- if (NULL != (*result))
- {
- mv_image_object_destroy(*result);
- *result = NULL;
- }
+ if (NULL != (*result)) {
+ mv_image_object_destroy(*result);
+ *result = NULL;
+ }
- while (input_string("Input file name with image object to be loaded:",
- 1024, path_to_object) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
+ while (input_string("Input file name with image object to be loaded:",
+ 1024, path_to_object) == -1) {
+ printf("Incorrect input! Try again.\n");
+ }
- int err = mv_image_object_load(result, *path_to_object);
+ int err = mv_image_object_load(result, *path_to_object);
- if (MEDIA_VISION_ERROR_NONE != err && NULL != (*result))
- {
- printf("Error: object isn't loaded with error code %i\n", err);
- return err;
- }
+ if (MEDIA_VISION_ERROR_NONE != err && NULL != (*result)) {
+ printf("Error: object isn't loaded with error code %i\n", err);
+ return err;
+ }
- printf("\nObject successfully loaded\n");
+ printf("\nObject successfully loaded\n");
- MEDIA_VISION_FUNCTION_LEAVE();
- return err;
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return err;
}
int perform_save_image_object(mv_image_object_h object)
{
- MEDIA_VISION_FUNCTION_ENTER();
+ MEDIA_VISION_FUNCTION_ENTER();
- int err = MEDIA_VISION_ERROR_NONE;
- char *path_to_object = NULL;
+ int err = MEDIA_VISION_ERROR_NONE;
+ char *path_to_object = NULL;
- while (input_string("Input file name to be generated for image object storing:",
- 1024, &path_to_object) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
+ while (input_string("Input file name to be generated for image object storing:",
+ 1024, &path_to_object) == -1) {
+ printf("Incorrect input! Try again.\n");
+ }
- err = mv_image_object_save(path_to_object, object);
+ err = mv_image_object_save(path_to_object, object);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("\nError during saving the image object. Error code is %i\n", err);
- free(path_to_object);
- return err;
- }
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("\nError during saving the image object. Error code is %i\n", err);
+ free(path_to_object);
+ return err;
+ }
- printf("\nObject successfully saved\n");
+ printf("\nObject successfully saved\n");
- free(path_to_object);
+ free(path_to_object);
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_LEAVE();
- return err;
+ return err;
}
int perform_generate_image_object(mv_image_object_h *result, char **path_to_image)
{
- MEDIA_VISION_FUNCTION_ENTER();
-
- if (NULL == path_to_image || NULL == result)
- {
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
-
- while (input_string("Input file name with image to be analyzed:",
- 1024, path_to_image) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
-
- mv_rectangle_s roi;
- const bool sel_roi = show_confirm_dialog("Select if you want to set ROI");
- if (sel_roi)
- {
- printf("\nInput ROI coordinates\n");
- while (input_int("Input x coordinate:", INT_MIN, INT_MAX,
- &(roi.point.x)) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
-
- while (input_int("Input y coordinate:", INT_MIN, INT_MAX,
- &(roi.point.y)) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
-
- while (input_int("Input ROI width:", INT_MIN, INT_MAX,
- &(roi.width)) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
-
- while (input_int("Input ROI height:", INT_MIN, INT_MAX,
- &(roi.height)) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
-
- }
-
- int err = generate_image_object_from_file(*path_to_image, sel_roi, roi, result);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("\nError in generation image object. Error code is %i\n", err);
-
- if (NULL != (*result))
- {
- mv_image_object_destroy(*result);
- (*result) = NULL;
- }
-
- return err;
- }
-
- printf("\nObject successfully generated\n");
-
- MEDIA_VISION_FUNCTION_LEAVE();
-
- return err;
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ if (NULL == path_to_image || NULL == result)
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+
+ while (input_string("Input file name with image to be analyzed:",
+ 1024, path_to_image) == -1) {
+ printf("Incorrect input! Try again.\n");
+ }
+
+ mv_rectangle_s roi;
+ const bool sel_roi = show_confirm_dialog("Select if you want to set ROI");
+ if (sel_roi) {
+ printf("\nInput ROI coordinates\n");
+ while (input_int("Input x coordinate:", INT_MIN, INT_MAX,
+ &(roi.point.x)) == -1) {
+ printf("Incorrect input! Try again.\n");
+ }
+
+ while (input_int("Input y coordinate:", INT_MIN, INT_MAX,
+ &(roi.point.y)) == -1) {
+ printf("Incorrect input! Try again.\n");
+ }
+
+ while (input_int("Input ROI width:", INT_MIN, INT_MAX,
+ &(roi.width)) == -1) {
+ printf("Incorrect input! Try again.\n");
+ }
+
+ while (input_int("Input ROI height:", INT_MIN, INT_MAX,
+ &(roi.height)) == -1) {
+ printf("Incorrect input! Try again.\n");
+ }
+
+ }
+
+ int err = generate_image_object_from_file(*path_to_image, sel_roi, roi, result);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("\nError in generation image object. Error code is %i\n", err);
+
+ if (NULL != (*result)) {
+ mv_image_object_destroy(*result);
+ (*result) = NULL;
+ }
+
+ return err;
+ }
+
+ printf("\nObject successfully generated\n");
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return err;
}
int perform_clone_image_object(mv_image_object_h src, mv_image_object_h *result)
{
- int err = mv_image_object_clone(src, result);
+ int err = mv_image_object_clone(src, result);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("\nError: object isn't cloned with error code %i\n", err);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("\nError: object isn't cloned with error code %i\n", err);
- int err2 = mv_image_object_destroy(*result);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf("\nERROR: Errors were occurred during image object "
- "destroying; code %i\n", err);
- }
+ int err2 = mv_image_object_destroy(*result);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf("\nERROR: Errors were occurred during image object "
+ "destroying; code %i\n", err);
+ }
- (*result) = NULL;
+ (*result) = NULL;
- return err;
- }
+ return err;
+ }
- printf("\nObject successfully cloned\n");
+ printf("\nObject successfully cloned\n");
- return err;
+ return err;
}
int handle_tracking_result(
- mv_video_writer_h writer,
- mv_source_h frame,
- int frame_number,
- mv_quadrangle_s *location)
+ mv_video_writer_h writer,
+ mv_source_h frame,
+ int frame_number,
+ mv_quadrangle_s *location)
{
- unsigned char *data_buffer = NULL;
- unsigned int buffer_size = 0;
- image_data_s image_data;
-
- int err = mv_source_get_buffer(frame, &data_buffer, &buffer_size);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(
- "ERROR: Errors were occurred during getting buffer from the "
- "source; code %i\n",
- err);
- return err;
- }
-
- err = mv_source_get_width(frame, &image_data.image_width);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(
- "ERROR: Errors were occurred during getting width from the "
- "source; code %i\n",
- err);
- return err;
- }
-
- err = mv_source_get_height(frame, &image_data.image_height);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(
- "ERROR: Errors were occurred during getting height from the "
- "source; code %i\n",
- err);
- return err;
- }
-
- if (location)
- {
- printf(
- "Frame #%i: object is found."
- "Location: {%i, %i}; {%i, %i}; {%i, %i}; {%i, %i}.\n",
- frame_number,
- location->points[0].x,
- location->points[0].y,
- location->points[1].x,
- location->points[1].y,
- location->points[2].x,
- location->points[2].y,
- location->points[3].x,
- location->points[3].y);
- const int thickness = 2;
- const int color[] = {0, 255, 0};
-
- err = draw_quadrangle_on_buffer(
- *location,
- thickness,
- color,
- &image_data,
- data_buffer);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(
- "ERROR: Errors were occurred during drawing quadrangle on "
- "the frame; code %i\n",
- err);
- return err;
- }
- }
- else
- {
- usleep(1000000);
- printf("Frame #%i: object isn't found.\n", frame_number);
- }
-
- err = mv_video_writer_write_frame(writer, data_buffer);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(
- "ERROR: Errors were occurred during writing frame to the "
- "result video file; code %i\n",
- err);
- return err;
- }
-
- return err;
+ unsigned char *data_buffer = NULL;
+ unsigned int buffer_size = 0;
+ image_data_s image_data;
+
+ int err = mv_source_get_buffer(frame, &data_buffer, &buffer_size);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(
+ "ERROR: Errors were occurred during getting buffer from the "
+ "source; code %i\n",
+ err);
+ return err;
+ }
+
+ err = mv_source_get_width(frame, &image_data.image_width);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(
+ "ERROR: Errors were occurred during getting width from the "
+ "source; code %i\n",
+ err);
+ return err;
+ }
+
+ err = mv_source_get_height(frame, &image_data.image_height);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(
+ "ERROR: Errors were occurred during getting height from the "
+ "source; code %i\n",
+ err);
+ return err;
+ }
+
+ if (location) {
+ printf(
+ "Frame #%i: object is found."
+ "Location: {%i, %i}; {%i, %i}; {%i, %i}; {%i, %i}.\n",
+ frame_number,
+ location->points[0].x,
+ location->points[0].y,
+ location->points[1].x,
+ location->points[1].y,
+ location->points[2].x,
+ location->points[2].y,
+ location->points[3].x,
+ location->points[3].y);
+ const int thickness = 2;
+ const int color[] = {0, 255, 0};
+
+ err = draw_quadrangle_on_buffer(
+ *location,
+ thickness,
+ color,
+ &image_data,
+ data_buffer);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(
+ "ERROR: Errors were occurred during drawing quadrangle on "
+ "the frame; code %i\n",
+ err);
+ return err;
+ }
+ } else {
+ usleep(1000000);
+ printf("Frame #%i: object isn't found.\n", frame_number);
+ }
+
+ err = mv_video_writer_write_frame(writer, data_buffer);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(
+ "ERROR: Errors were occurred during writing frame to the "
+ "result video file; code %i\n",
+ err);
+ return err;
+ }
+
+ return err;
}
-typedef struct
-{
- mv_image_tracking_model_h target;
- mv_video_writer_h writer;
- int frame_number;
+typedef struct {
+ mv_image_tracking_model_h target;
+ mv_video_writer_h writer;
+ int frame_number;
} tracking_cb_data;
void tracked_cb(
- mv_source_h source,
- mv_image_object_h image_object,
- mv_engine_config_h engine_cfg,
- mv_quadrangle_s *location,
- void *user_data)
+ mv_source_h source,
+ mv_image_object_h image_object,
+ mv_engine_config_h engine_cfg,
+ mv_quadrangle_s *location,
+ void *user_data)
{
- MEDIA_VISION_FUNCTION_ENTER();
+ MEDIA_VISION_FUNCTION_ENTER();
- if (NULL == user_data)
- {
- return;
- }
+ if (NULL == user_data)
+ return;
- tracking_cb_data *cb_data = (tracking_cb_data*)user_data;
+ tracking_cb_data *cb_data = (tracking_cb_data*)user_data;
- handle_tracking_result(cb_data->writer, source, cb_data->frame_number, location);
+ handle_tracking_result(cb_data->writer, source, cb_data->frame_number, location);
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_LEAVE();
}
void new_frame_cb(
- char *buffer,
- unsigned int buffer_size,
- image_data_s image_data,
- void *user_data)
+ char *buffer,
+ unsigned int buffer_size,
+ image_data_s image_data,
+ void *user_data)
{
- if (NULL == user_data)
- {
- return;
- }
+ if (NULL == user_data)
+ return;
- mv_source_h frame = NULL;
+ mv_source_h frame = NULL;
#define release_resources() \
- if (frame) \
- { \
- const int err2 = mv_destroy_source(frame); \
- if (MEDIA_VISION_ERROR_NONE != err2) \
- { \
- printf( \
- "\nERROR: Errors were occurred during source destroying; " \
- "code %i\n", \
- err2); \
- } \
- }
-
- tracking_cb_data *cb_data = (tracking_cb_data*)user_data;
-
- ++(cb_data->frame_number);
-
- int err = mv_create_source(&frame);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(
- "\nERROR: Errors were occurred during source creating; "
- "code %i\n",
- err);
- release_resources();
- return;
- }
-
- err = mv_source_fill_by_buffer(
- frame,
- buffer,
- buffer_size,
- image_data.image_width,
- image_data.image_height,
- image_data.image_colorspace);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: mv_source_h for frame is not filled; code %i\n", err);
- release_resources();
- return;
- }
-
- err = mv_image_track(
- frame,
- cb_data->target,
- NULL,
- tracked_cb,
- cb_data);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(
- "ERROR: Errors were occurred during tracking object on "
- "the video; code %i\n",
- err);
- release_resources();
- return;
- }
-
- release_resources()
+ if (frame) { \
+ const int err2 = mv_destroy_source(frame); \
+ if (MEDIA_VISION_ERROR_NONE != err2) { \
+ printf( \
+ "\nERROR: Errors were occurred during source destroying; " \
+ "code %i\n", \
+ err2); \
+ } \
+ }
+
+ tracking_cb_data *cb_data = (tracking_cb_data*)user_data;
+
+ ++(cb_data->frame_number);
+
+ int err = mv_create_source(&frame);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(
+ "\nERROR: Errors were occurred during source creating; "
+ "code %i\n",
+ err);
+ release_resources();
+ return;
+ }
+
+ err = mv_source_fill_by_buffer(
+ frame,
+ buffer,
+ buffer_size,
+ image_data.image_width,
+ image_data.image_height,
+ image_data.image_colorspace);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: mv_source_h for frame is not filled; code %i\n", err);
+ release_resources();
+ return;
+ }
+
+ err = mv_image_track(
+ frame,
+ cb_data->target,
+ NULL,
+ tracked_cb,
+ cb_data);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(
+ "ERROR: Errors were occurred during tracking object on "
+ "the video; code %i\n",
+ err);
+ release_resources();
+ return;
+ }
+
+ release_resources()
#undef release_resources()
}
void eos_frame_cb(
- void *user_data)
+ void *user_data)
{
- if (NULL == user_data)
- {
- printf("ERROR: eos callback can't stop tracking process.");
- return;
- }
+ if (NULL == user_data) {
+ printf("ERROR: eos callback can't stop tracking process.");
+ return;
+ }
- pthread_mutex_unlock((pthread_mutex_t*)user_data);
+ pthread_mutex_unlock((pthread_mutex_t*)user_data);
}
int perform_track(mv_image_tracking_model_h target)
{
- if (NULL == target)
- {
- printf("\nTarget is invalid. It is impossible to track of this target.\n");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (NULL == target) {
+ printf("\nTarget is invalid. It is impossible to track of this target.\n");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
- MEDIA_VISION_FUNCTION_ENTER();
+ MEDIA_VISION_FUNCTION_ENTER();
- mv_video_reader_h reader = NULL;
- mv_video_writer_h writer = NULL;
- char *path_to_video = NULL;
- char *path_to_generated_video = NULL;
- image_data_s image_data = {0};
- unsigned int fps = 0;
+ mv_video_reader_h reader = NULL;
+ mv_video_writer_h writer = NULL;
+ char *path_to_video = NULL;
+ char *path_to_generated_video = NULL;
+ image_data_s image_data = {0};
+ unsigned int fps = 0;
#define release_resources() \
- int err2 = MEDIA_VISION_ERROR_NONE; \
- if (reader) \
- { \
- err2 = mv_destroy_video_reader(reader); \
- if (MEDIA_VISION_ERROR_NONE != err2) \
- { \
- printf( \
- "\nERROR: Errors were occurred during video reader destroying; " \
- "code %i\n", \
- err2); \
- } \
- } \
- if (writer) \
- { \
- err2 = mv_destroy_video_writer(writer); \
- if (MEDIA_VISION_ERROR_NONE != err2) \
- { \
- printf( \
- "\nERROR: Errors were occurred during video writer destroying; " \
- "code %i\n", \
- err2); \
- } \
- } \
- if (path_to_video) \
- { \
- free(path_to_video); \
- } \
- if (path_to_generated_video) \
- { \
- free(path_to_generated_video); \
- }
-
- while (input_string("Input file name with video for tracking:",
- 1024, &path_to_video) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
-
- while (input_string("Input file name for generated video:",
- 1024, &path_to_generated_video) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
-
- int err = mv_create_video_reader(&reader);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("\nERROR: Errors were occurred during video reader creating; "
- "code %i\n", err);
- release_resources();
- MEDIA_VISION_FUNCTION_LEAVE();
- return err;
- }
-
- err = mv_create_video_writer(&writer);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(
- "\nERROR: Errors were occurred during video writer creating; "
- "code %i\n",
- err);
- release_resources();
- MEDIA_VISION_FUNCTION_LEAVE();
- return err;
- }
-
- err = mv_video_reader_load(
- reader,
- path_to_video,
- &image_data,
- &fps);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("\nERROR: Errors were occurred during video loading; code %i\n", err);
- release_resources();
- MEDIA_VISION_FUNCTION_LEAVE();
- return err;
- }
-
- printf("Receive frame metadata: wxh - %ux%u, fps - %u, format - %d\n",
- image_data.image_width, image_data.image_height, fps, image_data.image_colorspace);
-
- // Temporary we accept only RGB888
- image_data.image_colorspace = MEDIA_VISION_COLORSPACE_RGB888;
-
- err = mv_video_writer_init(
- writer,
- path_to_generated_video,
- image_data,
- fps);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(
- "\nERROR: Errors were occurred during video writer initializing; "
- "code %i\n",
- err);
- release_resources();
- MEDIA_VISION_FUNCTION_LEAVE();
- return err;
- }
-
- tracking_cb_data cb_data;
- cb_data.target = target;
- cb_data.writer = writer;
- cb_data.frame_number = 0;
- err = mv_video_reader_set_new_sample_cb(reader, new_frame_cb, &cb_data);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(
- "\nERROR: Errors were occurred during set new frame callback; "
- "code %i\n",
- err);
- release_resources();
- MEDIA_VISION_FUNCTION_LEAVE();
- return err;
- }
-
- pthread_mutex_t block_during_tracking_mutex;
- pthread_mutex_init(&block_during_tracking_mutex, NULL);
- pthread_mutex_lock(&block_during_tracking_mutex);
- err = mv_video_reader_set_eos_cb(reader, eos_frame_cb, &block_during_tracking_mutex);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(
- "\nERROR: Errors were occurred during set new frame callback; "
- "code %i\n",
- err);
- release_resources();
- pthread_mutex_unlock(&block_during_tracking_mutex);
- pthread_mutex_destroy(&block_during_tracking_mutex);
- MEDIA_VISION_FUNCTION_LEAVE();
- return err;
- }
-
- err = mv_video_reader_start(reader);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(
- "\nERROR: Errors were occurred during video reading starts; "
- "code %i\n",
- err);
- release_resources();
- pthread_mutex_unlock(&block_during_tracking_mutex);
- pthread_mutex_destroy(&block_during_tracking_mutex);
- MEDIA_VISION_FUNCTION_LEAVE();
- return err;
- }
-
- pthread_mutex_lock(&block_during_tracking_mutex);
- pthread_mutex_unlock(&block_during_tracking_mutex);
- pthread_mutex_destroy(&block_during_tracking_mutex);
- release_resources();
+ int err2 = MEDIA_VISION_ERROR_NONE; \
+ if (reader) { \
+ err2 = mv_destroy_video_reader(reader); \
+ if (MEDIA_VISION_ERROR_NONE != err2) { \
+ printf( \
+ "\nERROR: Errors were occurred during video reader destroying; " \
+ "code %i\n", \
+ err2); \
+ } \
+ } \
+ if (writer) { \
+ err2 = mv_destroy_video_writer(writer); \
+ if (MEDIA_VISION_ERROR_NONE != err2) { \
+ printf( \
+ "\nERROR: Errors were occurred during video writer destroying; " \
+ "code %i\n", \
+ err2); \
+ } \
+ } \
+ if (path_to_video) { \
+ free(path_to_video); \
+ } \
+ if (path_to_generated_video) { \
+ free(path_to_generated_video); \
+ }
+
+ while (input_string("Input file name with video for tracking:",
+ 1024, &path_to_video) == -1) {
+ printf("Incorrect input! Try again.\n");
+ }
+
+ while (input_string("Input file name for generated video:",
+ 1024, &path_to_generated_video) == -1) {
+ printf("Incorrect input! Try again.\n");
+ }
+
+ int err = mv_create_video_reader(&reader);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("\nERROR: Errors were occurred during video reader creating; "
+ "code %i\n", err);
+ release_resources();
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return err;
+ }
+
+ err = mv_create_video_writer(&writer);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(
+ "\nERROR: Errors were occurred during video writer creating; "
+ "code %i\n",
+ err);
+ release_resources();
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return err;
+ }
+
+ err = mv_video_reader_load(
+ reader,
+ path_to_video,
+ &image_data,
+ &fps);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("\nERROR: Errors were occurred during video loading; code %i\n", err);
+ release_resources();
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return err;
+ }
+
+ printf("Receive frame metadata: wxh - %ux%u, fps - %u, format - %d\n",
+ image_data.image_width, image_data.image_height, fps, image_data.image_colorspace);
+
+ /* Temporary we accept only RGB888 */
+ image_data.image_colorspace = MEDIA_VISION_COLORSPACE_RGB888;
+
+ err = mv_video_writer_init(
+ writer,
+ path_to_generated_video,
+ image_data,
+ fps);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(
+ "\nERROR: Errors were occurred during video writer initializing; "
+ "code %i\n",
+ err);
+ release_resources();
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return err;
+ }
+
+ tracking_cb_data cb_data;
+ cb_data.target = target;
+ cb_data.writer = writer;
+ cb_data.frame_number = 0;
+ err = mv_video_reader_set_new_sample_cb(reader, new_frame_cb, &cb_data);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(
+ "\nERROR: Errors were occurred during set new frame callback; "
+ "code %i\n",
+ err);
+ release_resources();
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return err;
+ }
+
+ pthread_mutex_t block_during_tracking_mutex;
+ pthread_mutex_init(&block_during_tracking_mutex, NULL);
+ pthread_mutex_lock(&block_during_tracking_mutex);
+ err = mv_video_reader_set_eos_cb(reader, eos_frame_cb, &block_during_tracking_mutex);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(
+ "\nERROR: Errors were occurred during set new frame callback; "
+ "code %i\n",
+ err);
+ release_resources();
+ pthread_mutex_unlock(&block_during_tracking_mutex);
+ pthread_mutex_destroy(&block_during_tracking_mutex);
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return err;
+ }
+
+ err = mv_video_reader_start(reader);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(
+ "\nERROR: Errors were occurred during video reading starts; "
+ "code %i\n",
+ err);
+ release_resources();
+ pthread_mutex_unlock(&block_during_tracking_mutex);
+ pthread_mutex_destroy(&block_during_tracking_mutex);
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return err;
+ }
+
+ pthread_mutex_lock(&block_during_tracking_mutex);
+ pthread_mutex_unlock(&block_during_tracking_mutex);
+ pthread_mutex_destroy(&block_during_tracking_mutex);
+ release_resources();
#undef release_resources()
- printf("\nTracking process is finished\n");
+ printf("\nTracking process is finished\n");
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_LEAVE();
- return err;
+ return err;
}
int perform_save_image_tracking_model(mv_image_tracking_model_h model)
{
- MEDIA_VISION_FUNCTION_ENTER();
+ MEDIA_VISION_FUNCTION_ENTER();
- int err = MEDIA_VISION_ERROR_NONE;
- char *path_to_file = NULL;
+ int err = MEDIA_VISION_ERROR_NONE;
+ char *path_to_file = NULL;
- while (input_string(
- "Input file name to be generated for image tracking model storing:",
- 1024,
- &path_to_file) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
+ while (input_string(
+ "Input file name to be generated for image tracking model storing:",
+ 1024, &path_to_file) == -1) {
+ printf("Incorrect input! Try again.\n");
+ }
- err = mv_image_tracking_model_save(path_to_file, model);
+ err = mv_image_tracking_model_save(path_to_file, model);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf(
- "\nError during saving the image tracking model. "
- "Error code is %i\n",
- err);
- free(path_to_file);
- return err;
- }
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf(
+ "\nError during saving the image tracking model. "
+ "Error code is %i\n",
+ err);
+ free(path_to_file);
+ return err;
+ }
- printf("\nTracking model successfully saved\n");
+ printf("\nTracking model successfully saved\n");
- free(path_to_file);
+ free(path_to_file);
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_LEAVE();
- return err;
+ return err;
}
int perform_load_image_tracking_model(
- char **path_to_file, mv_image_tracking_model_h *result)
+ char **path_to_file, mv_image_tracking_model_h *result)
{
- MEDIA_VISION_FUNCTION_ENTER();
+ MEDIA_VISION_FUNCTION_ENTER();
- while (input_string(
- "Input file name with image tracking model to be loaded:",
- 1024,
- path_to_file) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
+ while (input_string(
+ "Input file name with image tracking model to be loaded:",
+ 1024, path_to_file) == -1) {
+ printf("Incorrect input! Try again.\n");
+ }
- int err = mv_image_tracking_model_load(*path_to_file, result);
+ int err = mv_image_tracking_model_load(*path_to_file, result);
- if (MEDIA_VISION_ERROR_NONE != err && NULL != (*result))
- {
- printf("Error: tracking model isn't loaded with error code %i\n", err);
+ if (MEDIA_VISION_ERROR_NONE != err && NULL != (*result)) {
+ printf("Error: tracking model isn't loaded with error code %i\n", err);
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_LEAVE();
- return err;
- }
+ return err;
+ }
- printf("\nTracking model successfully loaded\n");
+ printf("\nTracking model successfully loaded\n");
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_LEAVE();
- return err;
+ return err;
}
int perform_clone_image_tracking_model(
- mv_image_tracking_model_h src,
- mv_image_tracking_model_h *result)
+ mv_image_tracking_model_h src,
+ mv_image_tracking_model_h *result)
{
- MEDIA_VISION_FUNCTION_ENTER();
+ MEDIA_VISION_FUNCTION_ENTER();
- int err = mv_image_tracking_model_clone(src, result);
+ int err = mv_image_tracking_model_clone(src, result);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("\nError: tracking model isn't cloned with error code %i\n", err);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("\nError: tracking model isn't cloned with error code %i\n", err);
- int err2 = mv_image_tracking_model_destroy(*result);
- if (MEDIA_VISION_ERROR_NONE != err2)
- {
- printf("\nERROR: Errors were occurred during tracking model "
- "destroying; code %i\n", err);
- }
+ int err2 = mv_image_tracking_model_destroy(*result);
+ if (MEDIA_VISION_ERROR_NONE != err2) {
+ printf("\nERROR: Errors were occurred during tracking model "
+ "destroying; code %i\n", err);
+ }
- (*result) = NULL;
+ (*result) = NULL;
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_LEAVE();
- return err;
- }
+ return err;
+ }
- printf("\nTracking model successfully cloned\n");
+ printf("\nTracking model successfully cloned\n");
- MEDIA_VISION_FUNCTION_LEAVE();
+ MEDIA_VISION_FUNCTION_LEAVE();
- return err;
+ return err;
}
int perform_refresh_image_tracking_model(mv_image_tracking_model_h target)
{
- if (NULL == target)
- {
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
+ if (NULL == target)
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- const int err = mv_image_tracking_model_refresh(target, NULL);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("\nError: tracking model isn't refreshed with error code %i\n", err);
- return err;
- }
+ const int err = mv_image_tracking_model_refresh(target, NULL);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("\nError: tracking model isn't refreshed with error code %i\n", err);
+ return err;
+ }
- printf("\nTracking model is successfully refreshed.\n");
+ printf("\nTracking model is successfully refreshed.\n");
- return MEDIA_VISION_ERROR_NONE;
+ return MEDIA_VISION_ERROR_NONE;
}
void show_testing_objects(const char *title, GArray *testing_objects)
{
- printf("\n");
- int i = 0;
- if (1 > testing_objects->len)
- {
- printf("There are no created objects.\n");
- }
- else
- {
- printf("%s:\n", title);
- printf("-------------------------------------------------------------------------------------\n");
- for (i = 0; i < testing_objects->len; ++i)
- {
- testing_object_h temp = g_array_index (testing_objects, testing_object_h, i);
- if (OBJECT_TYPE_IMAGE_OBJECT == temp->object_type)
- {
- printf("Image object ");
- }
- else if (OBJECT_TYPE_IMAGE_TRACKING_MODEL == temp->object_type)
- {
- printf("Image tracking model ");
- }
- else
- {
- printf("Unknown testing object ");
- }
- printf("#%i. %s\n", i, temp->actual_label);
- }
- printf("-------------------------------------------------------------------------------------\n");
- }
+ printf("\n");
+ int i = 0;
+ if (1 > testing_objects->len) {
+ printf("There are no created objects.\n");
+ } else {
+ printf("%s:\n", title);
+ printf("-------------------------------------------------------------------------------------\n");
+ for (i = 0; i < testing_objects->len; ++i) {
+ testing_object_h temp = g_array_index(testing_objects, testing_object_h, i);
+ if (OBJECT_TYPE_IMAGE_OBJECT == temp->object_type)
+ printf("Image object ");
+ else if (OBJECT_TYPE_IMAGE_TRACKING_MODEL == temp->object_type)
+ printf("Image tracking model ");
+ else
+ printf("Unknown testing object ");
+
+ printf("#%i. %s\n", i, temp->actual_label);
+ }
+ printf("-------------------------------------------------------------------------------------\n");
+ }
}
int select_testing_object(GArray *testing_objects, testing_object_h *result, char *title)
{
- if (0 == testing_objects->len)
- {
- printf("Firstly you must create at least one testing object.\n");
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
-
- show_testing_objects(title, testing_objects);
- int sel_index = 0;
- while (input_int("Input number of element:", 0,
- testing_objects->len - 1, &sel_index) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
- (*result) = g_array_index(testing_objects, testing_object_h, sel_index);
- return MEDIA_VISION_ERROR_NONE;
+ if (0 == testing_objects->len) {
+ printf("Firstly you must create at least one testing object.\n");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ show_testing_objects(title, testing_objects);
+ int sel_index = 0;
+ while (input_int("Input number of element:", 0,
+ testing_objects->len - 1, &sel_index) == -1) {
+ printf("Incorrect input! Try again.\n");
+ }
+ (*result) = g_array_index(testing_objects, testing_object_h, sel_index);
+ return MEDIA_VISION_ERROR_NONE;
}
int select_testing_object_index(GArray *testing_objects, guint *result_index, char *title)
{
- if (0 == testing_objects->len)
- {
- printf("Firstly you must create at least one testing object.\n");
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
-
- show_testing_objects(title, testing_objects);
-
- int sel_index = 0;
- while (input_int("Input number of element:", 0,
- testing_objects->len - 1, &sel_index) == -1)
- {
- printf("Incorrect input! Try again.\n");
- }
- (*result_index) = sel_index;
- return MEDIA_VISION_ERROR_NONE;
+ if (0 == testing_objects->len) {
+ printf("Firstly you must create at least one testing object.\n");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ show_testing_objects(title, testing_objects);
+
+ int sel_index = 0;
+ while (input_int("Input number of element:", 0,
+ testing_objects->len - 1, &sel_index) == -1) {
+ printf("Incorrect input! Try again.\n");
+ }
+ (*result_index) = sel_index;
+ return MEDIA_VISION_ERROR_NONE;
}
int add_testing_object(GArray *testing_objects, testing_object_h object)
{
- if (NULL == object)
- {
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
- g_array_append_val(testing_objects, object);
- return MEDIA_VISION_ERROR_NONE;
+ if (NULL == object)
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+
+ g_array_append_val(testing_objects, object);
+ return MEDIA_VISION_ERROR_NONE;
}
int remove_testing_object(GArray *testing_objects, guint index)
{
- if (index >= testing_objects->len)
- {
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
- g_array_remove_index(testing_objects, index);
- return MEDIA_VISION_ERROR_NONE;
+ if (index >= testing_objects->len)
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+
+ g_array_remove_index(testing_objects, index);
+ return MEDIA_VISION_ERROR_NONE;
}
void perform_recognition_cases(GArray *image_objects)
{
- const char *names[] = {
- "Show created set of image objects",
- "Generate new image object from source image (mv_image_object_fill )",
- "Load existed image object from file (mv_image_object_load)",
- "Clone existed image object (mv_image_object_clone)",
- "Create empty image object (mv_image_object_create)",
- "Save existed image object to the file (mv_image_object_save)",
- "Remove image object from created set (mv_image_object_destroy)",
- "Get confidence from existed image object (mv_image_object_get_recognition_rate)",
- "Recognize all image objects on image (mv_image_recognize)",
- "Set label for existed image object (mv_image_set_label_of_object)",
- "Get label from existed image object (mv_image_get_label_of_object)",
- "Back to the main menu"};
-
- int number_of_options = sizeof(names) / sizeof(names[0]);
- int options[number_of_options];
- int index = 0;
- for (; index < number_of_options; ++index)
- {
- options[index] = index + 1;
- }
-
- while (1)
- {
- int err = MEDIA_VISION_ERROR_NONE;
-
- int sel_opt = show_menu("Select action:", options, names, number_of_options);
-
- switch (sel_opt)
- {
- case 1: // Show created set of image objects
- {
- show_testing_objects("Set of image objects", image_objects);
- break;
- }
- case 2: // Generate new image object from source image (mv_image_object_fill)
- {
- mv_image_object_h temporary = NULL;
- char *path_to_image = NULL;
-
- err = perform_generate_image_object(&temporary, &path_to_image);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("Generation failed (error code - %i)\n", err);
- if (NULL != path_to_image)
- {
- free(path_to_image);
- }
- break;
- }
-
- testing_object_h added_object;
- testing_object_create(&added_object);
- testing_object_fill(added_object, temporary,
- OBJECT_TYPE_IMAGE_OBJECT, SOURCE_TYPE_GENERATION, path_to_image);
-
- if (NULL != path_to_image)
- {
- free(path_to_image);
- }
-
- add_testing_object(image_objects, added_object);
- break;
- }
- case 3: // Load existed image object from file (mv_image_object_load)
- {
- mv_image_object_h temporary_image_object = NULL;
- char *path_to_object = NULL;
-
- err = perform_load_image_object(
- &path_to_object, &temporary_image_object);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("Loading failed (error code - %i)\n", err);
- break;
- }
-
- testing_object_h added_object = NULL;
- testing_object_create(&added_object);
- testing_object_fill(
- added_object,
- temporary_image_object,
- OBJECT_TYPE_IMAGE_OBJECT,
- SOURCE_TYPE_LOADING,
- path_to_object);
-
- free(path_to_object);
-
- add_testing_object(image_objects, added_object);
- break;
- }
- case 4: // Clone existed image object (mv_image_object_clone)
- {
- if (image_objects->len <= 0)
- {
- printf("\nFirstly you must create at least one image object.\n");
- break;
- }
-
- testing_object_h temporary_testing_object = NULL;
- select_testing_object(
- image_objects,
- &temporary_testing_object,
- "Select the object you want to clone");
-
- mv_image_object_h temporary_image_object = NULL;
- perform_clone_image_object(
- temporary_testing_object->entity,
- &temporary_image_object);
-
- testing_object_h added_object = NULL;
- testing_object_create(&added_object);
- testing_object_fill(
- added_object,
- temporary_image_object,
- OBJECT_TYPE_IMAGE_OBJECT,
- SOURCE_TYPE_CLONING,
- temporary_testing_object);
-
- add_testing_object(image_objects, added_object);
- break;
- }
- case 5: // Create empty image object (mv_image_object_create)
- {
- mv_image_object_h temporary_image_object = NULL;
- int err = mv_image_object_create(&temporary_image_object);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: image object creation is failed with code %i\n", err);
- break;
- }
-
- testing_object_h added_object = NULL;
- testing_object_create(&added_object);
- testing_object_fill(
- added_object,
- temporary_image_object,
- OBJECT_TYPE_IMAGE_OBJECT,
- SOURCE_TYPE_EMPTY,
- NULL);
-
- add_testing_object(image_objects, added_object);
- printf("\nImage object successfully created\n");
- break;
- }
- case 6: // Save existed image object to the file (mv_image_object_save)
- {
- if (image_objects->len <= 0)
- {
- printf("\nFirstly you must create at least one image object.\n");
- break;
- }
-
- testing_object_h temporary_testing_object = NULL;
- select_testing_object(image_objects, &temporary_testing_object,
- "Select the object you want to save");
- perform_save_image_object(temporary_testing_object->entity);
- break;
- }
- case 7: // Remove image object from created set (mv_image_object_destroy)
- {
- if (image_objects->len <= 0)
- {
- printf("\nFirstly you must create at least one image object.\n");
- break;
- }
-
- guint selected_index;
- int err = select_testing_object_index(
- image_objects,
- &selected_index,
- "Select the object you want to remove");
- if (MEDIA_VISION_ERROR_NONE == err)
- {
- remove_testing_object(image_objects, selected_index);
- printf("\nImage object successfully removed\n");
- }
- break;
- }
- case 8: // Get confidence from existed image object (mv_image_object_get_recognition_rate)
- {
- if (image_objects->len <= 0)
- {
- printf("\nFirstly you must create at least one image object.\n");
- break;
- }
-
- testing_object_h temporary_testing_object = NULL;
- select_testing_object(image_objects, &temporary_testing_object,
- "Select the object from which you want getting confidence");
- perform_get_confidence(temporary_testing_object->entity);
- break;
- }
- case 9: // Recognize all image objects on image (mv_image_recognize)
- {
- if (image_objects->len <= 0)
- {
- printf("\nFirstly you must create at least one image object.\n");
- break;
- }
-
- mv_image_object_h *objects_pool = malloc(sizeof(mv_image_object_h) * image_objects->len);
- int index = 0;
- for (;index < image_objects->len; ++index)
- {
- objects_pool[index] = g_array_index(image_objects, testing_object_h, index)->entity;
- }
- perform_recognize(objects_pool, image_objects->len);
- free(objects_pool);
- break;
- }
- case 10: // Set label for existed image object (mv_image_object_set_label)
- {
- if (image_objects->len <= 0)
- {
- printf("\nFirstly you must create at least one image object.\n");
- break;
- }
-
- testing_object_h temporary_testing_object = NULL;
- select_testing_object(image_objects, &temporary_testing_object,
- "Select the object for which you want setting label");
- perform_set_label(temporary_testing_object->entity);
- break;
- }
- case 11: // Get label from existed image object (mv_image_object_get_label)
- {
- if (image_objects->len <= 0)
- {
- printf("\nFirstly you must create at least one image object.\n");
- break;
- }
-
- testing_object_h temporary_testing_object = NULL;
- select_testing_object(image_objects, &temporary_testing_object,
- "Select the object from which you want getting label");
- perform_get_label(temporary_testing_object->entity);
- break;
- }
- case 12: // Back to the main menu
- {
- return;
- }
- default:
- printf("Invalid option.\n");
- }
- }
+ const char *names[] = {
+ "Show created set of image objects",
+ "Generate new image object from source image (mv_image_object_fill )",
+ "Load existed image object from file (mv_image_object_load)",
+ "Clone existed image object (mv_image_object_clone)",
+ "Create empty image object (mv_image_object_create)",
+ "Save existed image object to the file (mv_image_object_save)",
+ "Remove image object from created set (mv_image_object_destroy)",
+ "Get confidence from existed image object (mv_image_object_get_recognition_rate)",
+ "Recognize all image objects on image (mv_image_recognize)",
+ "Set label for existed image object (mv_image_set_label_of_object)",
+ "Get label from existed image object (mv_image_get_label_of_object)",
+ "Back to the main menu"};
+
+ int number_of_options = sizeof(names) / sizeof(names[0]);
+ int options[number_of_options];
+ int index = 0;
+ for (; index < number_of_options; ++index)
+ options[index] = index + 1;
+
+ while (1) {
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ int sel_opt = show_menu("Select action:", options, names, number_of_options);
+
+ switch (sel_opt) {
+ case 1: {
+ /* Show created set of image objects */
+ show_testing_objects("Set of image objects", image_objects);
+ break;
+ }
+ case 2: {
+ /* Generate new image object from source image (mv_image_object_fill) */
+ mv_image_object_h temporary = NULL;
+ char *path_to_image = NULL;
+
+ err = perform_generate_image_object(&temporary, &path_to_image);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("Generation failed (error code - %i)\n", err);
+ if (NULL != path_to_image)
+ free(path_to_image);
+
+ break;
+ }
+
+ testing_object_h added_object;
+ testing_object_create(&added_object);
+ testing_object_fill(added_object, temporary,
+ OBJECT_TYPE_IMAGE_OBJECT, SOURCE_TYPE_GENERATION, path_to_image);
+
+ if (NULL != path_to_image)
+ free(path_to_image);
+
+ add_testing_object(image_objects, added_object);
+ break;
+ }
+ case 3: {
+ /* Load existed image object from file (mv_image_object_load) */
+ mv_image_object_h temporary_image_object = NULL;
+ char *path_to_object = NULL;
+
+ err = perform_load_image_object(
+ &path_to_object, &temporary_image_object);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("Loading failed (error code - %i)\n", err);
+ break;
+ }
+
+ testing_object_h added_object = NULL;
+ testing_object_create(&added_object);
+ testing_object_fill(
+ added_object,
+ temporary_image_object,
+ OBJECT_TYPE_IMAGE_OBJECT,
+ SOURCE_TYPE_LOADING,
+ path_to_object);
+
+ free(path_to_object);
+
+ add_testing_object(image_objects, added_object);
+ break;
+ }
+ case 4: {
+ /* Clone existed image object (mv_image_object_clone) */
+ if (image_objects->len <= 0) {
+ printf("\nFirstly you must create at least one image object.\n");
+ break;
+ }
+
+ testing_object_h temporary_testing_object = NULL;
+ select_testing_object(
+ image_objects,
+ &temporary_testing_object,
+ "Select the object you want to clone");
+
+ mv_image_object_h temporary_image_object = NULL;
+ perform_clone_image_object(
+ temporary_testing_object->entity,
+ &temporary_image_object);
+
+ testing_object_h added_object = NULL;
+ testing_object_create(&added_object);
+ testing_object_fill(
+ added_object,
+ temporary_image_object,
+ OBJECT_TYPE_IMAGE_OBJECT,
+ SOURCE_TYPE_CLONING,
+ temporary_testing_object);
+
+ add_testing_object(image_objects, added_object);
+ break;
+ }
+ case 5: {
+ /* Create empty image object (mv_image_object_create) */
+ mv_image_object_h temporary_image_object = NULL;
+ int err = mv_image_object_create(&temporary_image_object);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: image object creation is failed with code %i\n", err);
+ break;
+ }
+
+ testing_object_h added_object = NULL;
+ testing_object_create(&added_object);
+ testing_object_fill(
+ added_object,
+ temporary_image_object,
+ OBJECT_TYPE_IMAGE_OBJECT,
+ SOURCE_TYPE_EMPTY,
+ NULL);
+
+ add_testing_object(image_objects, added_object);
+ printf("\nImage object successfully created\n");
+ break;
+ }
+ case 6: {
+ /* Save existed image object to the file (mv_image_object_save) */
+ if (image_objects->len <= 0) {
+ printf("\nFirstly you must create at least one image object.\n");
+ break;
+ }
+
+ testing_object_h temporary_testing_object = NULL;
+ select_testing_object(image_objects, &temporary_testing_object,
+ "Select the object you want to save");
+ perform_save_image_object(temporary_testing_object->entity);
+ break;
+ }
+ case 7: {
+ /* Remove image object from created set (mv_image_object_destroy) */
+ if (image_objects->len <= 0) {
+ printf("\nFirstly you must create at least one image object.\n");
+ break;
+ }
+
+ guint selected_index;
+ int err = select_testing_object_index(
+ image_objects,
+ &selected_index,
+ "Select the object you want to remove");
+ if (MEDIA_VISION_ERROR_NONE == err) {
+ remove_testing_object(image_objects, selected_index);
+ printf("\nImage object successfully removed\n");
+ }
+ break;
+ }
+ case 8: {
+ /* Get confidence from existed image object (mv_image_object_get_recognition_rate) */
+ if (image_objects->len <= 0) {
+ printf("\nFirstly you must create at least one image object.\n");
+ break;
+ }
+
+ testing_object_h temporary_testing_object = NULL;
+ select_testing_object(image_objects, &temporary_testing_object,
+ "Select the object from which you want getting confidence");
+ perform_get_confidence(temporary_testing_object->entity);
+ break;
+ }
+ case 9: {
+ /* Recognize all image objects on image (mv_image_recognize) */
+ if (image_objects->len <= 0) {
+ printf("\nFirstly you must create at least one image object.\n");
+ break;
+ }
+
+ mv_image_object_h *objects_pool = malloc(sizeof(mv_image_object_h) * image_objects->len);
+ int index = 0;
+ for (; index < image_objects->len; ++index)
+ objects_pool[index] = g_array_index(image_objects, testing_object_h, index)->entity;
+
+ perform_recognize(objects_pool, image_objects->len);
+ free(objects_pool);
+ break;
+ }
+ case 10: {
+ /* Set label for existed image object (mv_image_object_set_label) */
+ if (image_objects->len <= 0) {
+ printf("\nFirstly you must create at least one image object.\n");
+ break;
+ }
+
+ testing_object_h temporary_testing_object = NULL;
+ select_testing_object(image_objects, &temporary_testing_object,
+ "Select the object for which you want setting label");
+ perform_set_label(temporary_testing_object->entity);
+ break;
+ }
+ case 11: {
+ /* Get label from existed image object (mv_image_object_get_label) */
+ if (image_objects->len <= 0) {
+ printf("\nFirstly you must create at least one image object.\n");
+ break;
+ }
+
+ testing_object_h temporary_testing_object = NULL;
+ select_testing_object(image_objects, &temporary_testing_object,
+ "Select the object from which you want getting label");
+ perform_get_label(temporary_testing_object->entity);
+ break;
+ }
+ case 12: {
+ /* Back to the main menu */
+ return;
+ }
+ default:
+ printf("Invalid option.\n");
+ }
+ }
}
void perform_tracking_cases(GArray *image_objects, GArray *image_tracking_models)
{
- const char *names[] = {
- "Show created set of tracking models",
- "Create empty tracking model (mv_image_tracking_model_create)",
- "Generate model based on image object (mv_image_tracking_model_set_target)",
- "Load existed tracking model from file (mv_image_tracking_model_load)",
- "Clone existed tracking model (mv_image_tracking_model_clone)",
- "Save existed tracking model to the file (mv_image_tracking_model_save)",
- "Remove tracking model from created set (mv_image_tracking_model_destroy)",
- "Refresh tracking model (mv_image_tracking_model_refresh)",
- "Track (mv_image_track)",
- "Back to the main menu"};
-
- int number_of_options = sizeof(names) / sizeof(names[0]);
- int options[number_of_options];
- int index = 0;
- for (; index < number_of_options; ++index)
- {
- options[index] = index + 1;
- }
-
- while (1)
- {
- int err = MEDIA_VISION_ERROR_NONE;
-
- int sel_opt = show_menu("Select action:", options, names, number_of_options);
-
- switch (sel_opt)
- {
- case 1: // Show created set of tracking models
- {
- show_testing_objects("Set of image tracking models", image_tracking_models);
- break;
- }
- case 2: // Create empty tracking model (mv_image_tracking_model_create)
- {
- mv_image_tracking_model_h temporary_image_tracking_model = NULL;
-
- int err = mv_image_tracking_model_create(&temporary_image_tracking_model);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("ERROR: tracking model creation is failed with code %i\n", err);
- break;
- }
-
- testing_object_h added_object = NULL;
- testing_object_create(&added_object);
- testing_object_fill(
- added_object,
- temporary_image_tracking_model,
- OBJECT_TYPE_IMAGE_TRACKING_MODEL,
- SOURCE_TYPE_EMPTY,
- NULL);
-
- add_testing_object(image_tracking_models, added_object);
- printf("\nTracking model successfully created\n");
- break;
- }
- case 3: // Generate model based on image object (mv_image_tracking_model_set_target)
- {
- if (image_objects->len <= 0)
- {
- printf("\nFirstly you must create at least one image object.\n");
- break;
- }
-
- mv_image_tracking_model_h temporary_image_tracking_model = NULL;
- err = mv_image_tracking_model_create(&temporary_image_tracking_model);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("Error: tracking model isn't created with error code %i\n", err);
- break;
- }
-
- testing_object_h temporary_testing_object = NULL;
- select_testing_object(
- image_objects,
- &temporary_testing_object,
- "Select the image object for tracking");
-
- err = mv_image_tracking_model_set_target(
- (mv_image_object_h)(temporary_testing_object->entity),
- temporary_image_tracking_model);
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("Error: target isn't set with error code %i\n", err);
- break;
- }
-
- testing_object_h added_object = NULL;
- testing_object_create(&added_object);
- testing_object_fill(
- added_object,
- temporary_image_tracking_model,
- OBJECT_TYPE_IMAGE_TRACKING_MODEL,
- SOURCE_TYPE_GENERATION,
- temporary_testing_object);
-
- add_testing_object(image_tracking_models, added_object);
- printf("\nTracking model successfully generated\n");
- break;
- }
- case 4: // Load existed tracking model from file (mv_image_tracking_model_load)
- {
- mv_image_tracking_model_h temporary_image_tracking_model = NULL;
- char *path_to_object = NULL;
-
- err = perform_load_image_tracking_model(
- &path_to_object, &temporary_image_tracking_model);
-
- if (MEDIA_VISION_ERROR_NONE != err)
- {
- printf("Loading failed (error code - %i)\n", err);
- break;
- }
-
- testing_object_h added_object = NULL;
- testing_object_create(&added_object);
- testing_object_fill(
- added_object,
- temporary_image_tracking_model,
- OBJECT_TYPE_IMAGE_TRACKING_MODEL,
- SOURCE_TYPE_LOADING,
- path_to_object);
-
- free(path_to_object);
-
- add_testing_object(image_tracking_models, added_object);
- break;
- }
- case 5: // Clone existed tracking model (mv_image_tracking_model_clone)
- {
- if (image_tracking_models->len <= 0)
- {
- printf(
- "\nFirstly you must create at least one image "
- "tracking model.\n");
- break;
- }
-
- testing_object_h temporary_testing_object = NULL;
- select_testing_object(
- image_tracking_models,
- &temporary_testing_object,
- "Select the tracking model you want to clone");
-
- mv_image_tracking_model_h temporary_image_tracking_model = NULL;
- perform_clone_image_tracking_model(
- temporary_testing_object->entity,
- &temporary_image_tracking_model);
-
- testing_object_h added_object = NULL;
- testing_object_create(&added_object);
- testing_object_fill(
- added_object,
- temporary_image_tracking_model,
- OBJECT_TYPE_IMAGE_TRACKING_MODEL,
- SOURCE_TYPE_CLONING,
- temporary_testing_object);
-
- add_testing_object(image_tracking_models, added_object);
- break;
- }
- case 6: // Save existed tracking model to the file (mv_image_tracking_model_save)
- {
- if (image_tracking_models->len <= 0)
- {
- printf(
- "\nFirstly you must create at least one image "
- "tracking model.\n");
- break;
- }
-
- testing_object_h temporary_testing_object = NULL;
- select_testing_object(
- image_tracking_models,
- &temporary_testing_object,
- "Select the tracking model you want to save");
-
- perform_save_image_tracking_model(temporary_testing_object->entity);
- break;
- }
- case 7: // Remove tracking model from created set (mv_image_tracking_model_destroy)
- {
- if (image_tracking_models->len <= 0)
- {
- printf(
- "\nFirstly you must create at least one image "
- "tracking model.\n");
- break;
- }
-
- guint selected_index;
- err = select_testing_object_index(
- image_tracking_models,
- &selected_index,
- "Select the object you want to remove");
-
- if (MEDIA_VISION_ERROR_NONE == err)
- {
- remove_testing_object(image_tracking_models, selected_index);
- printf("\nTracking model successfully removed\n");
- }
- break;
- }
- case 8: // Refresh tracking model (mv_image_tracking_model_refresh)
- {
- if (image_tracking_models->len <= 0)
- {
- printf(
- "\nFirstly you must create at least one image "
- "tracking model.\n");
- break;
- }
-
- testing_object_h temporary_testing_object = NULL;
- select_testing_object(
- image_tracking_models,
- &temporary_testing_object,
- "Select the tracking model you want to refresh");
-
- perform_refresh_image_tracking_model(temporary_testing_object->entity);
- break;
- }
- case 9: // Track (mv_image_track)
- {
- if (image_tracking_models->len <= 0)
- {
- printf(
- "\nFirstly you must create at least one image "
- "tracking model.\n");
- break;
- }
-
- testing_object_h temporary_testing_object = NULL;
- err = select_testing_object(
- image_tracking_models,
- &temporary_testing_object,
- "Select the object which you want to track on video");
-
- if (MEDIA_VISION_ERROR_NONE == err)
- {
- perform_track(temporary_testing_object->entity);
- }
- break;
- }
- case 10: // Back to the main menu
- {
- return;
- }
- }
- }
+ const char *names[] = {
+ "Show created set of tracking models",
+ "Create empty tracking model (mv_image_tracking_model_create)",
+ "Generate model based on image object (mv_image_tracking_model_set_target)",
+ "Load existed tracking model from file (mv_image_tracking_model_load)",
+ "Clone existed tracking model (mv_image_tracking_model_clone)",
+ "Save existed tracking model to the file (mv_image_tracking_model_save)",
+ "Remove tracking model from created set (mv_image_tracking_model_destroy)",
+ "Refresh tracking model (mv_image_tracking_model_refresh)",
+ "Track (mv_image_track)",
+ "Back to the main menu"};
+
+ int number_of_options = sizeof(names) / sizeof(names[0]);
+ int options[number_of_options];
+ int index = 0;
+ for (; index < number_of_options; ++index)
+ options[index] = index + 1;
+
+ while (1) {
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ int sel_opt = show_menu("Select action:", options, names, number_of_options);
+
+ switch (sel_opt) {
+ case 1: {
+ /* Show created set of tracking models */
+ show_testing_objects("Set of image tracking models", image_tracking_models);
+ break;
+ }
+ case 2: {
+ /* Create empty tracking model (mv_image_tracking_model_create) */
+ mv_image_tracking_model_h temporary_image_tracking_model = NULL;
+
+ int err = mv_image_tracking_model_create(&temporary_image_tracking_model);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("ERROR: tracking model creation is failed with code %i\n", err);
+ break;
+ }
+
+ testing_object_h added_object = NULL;
+ testing_object_create(&added_object);
+ testing_object_fill(
+ added_object,
+ temporary_image_tracking_model,
+ OBJECT_TYPE_IMAGE_TRACKING_MODEL,
+ SOURCE_TYPE_EMPTY,
+ NULL);
+
+ add_testing_object(image_tracking_models, added_object);
+ printf("\nTracking model successfully created\n");
+ break;
+ }
+ case 3: {
+ /* Generate model based on image object (mv_image_tracking_model_set_target) */
+ if (image_objects->len <= 0) {
+ printf("\nFirstly you must create at least one image object.\n");
+ break;
+ }
+
+ mv_image_tracking_model_h temporary_image_tracking_model = NULL;
+ err = mv_image_tracking_model_create(&temporary_image_tracking_model);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("Error: tracking model isn't created with error code %i\n", err);
+ break;
+ }
+
+ testing_object_h temporary_testing_object = NULL;
+ select_testing_object(
+ image_objects,
+ &temporary_testing_object,
+ "Select the image object for tracking");
+
+ err = mv_image_tracking_model_set_target(
+ (mv_image_object_h)(temporary_testing_object->entity),
+ temporary_image_tracking_model);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("Error: target isn't set with error code %i\n", err);
+ break;
+ }
+
+ testing_object_h added_object = NULL;
+ testing_object_create(&added_object);
+ testing_object_fill(
+ added_object,
+ temporary_image_tracking_model,
+ OBJECT_TYPE_IMAGE_TRACKING_MODEL,
+ SOURCE_TYPE_GENERATION,
+ temporary_testing_object);
+
+ add_testing_object(image_tracking_models, added_object);
+ printf("\nTracking model successfully generated\n");
+ break;
+ }
+ case 4: {
+ /* Load existed tracking model from file (mv_image_tracking_model_load) */
+ mv_image_tracking_model_h temporary_image_tracking_model = NULL;
+ char *path_to_object = NULL;
+
+ err = perform_load_image_tracking_model(
+ &path_to_object, &temporary_image_tracking_model);
+
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ printf("Loading failed (error code - %i)\n", err);
+ break;
+ }
+
+ testing_object_h added_object = NULL;
+ testing_object_create(&added_object);
+ testing_object_fill(
+ added_object,
+ temporary_image_tracking_model,
+ OBJECT_TYPE_IMAGE_TRACKING_MODEL,
+ SOURCE_TYPE_LOADING,
+ path_to_object);
+
+ free(path_to_object);
+
+ add_testing_object(image_tracking_models, added_object);
+ break;
+ }
+ case 5: {
+ /* Clone existed tracking model (mv_image_tracking_model_clone) */
+ if (image_tracking_models->len <= 0) {
+ printf(
+ "\nFirstly you must create at least one image "
+ "tracking model.\n");
+ break;
+ }
+
+ testing_object_h temporary_testing_object = NULL;
+ select_testing_object(
+ image_tracking_models,
+ &temporary_testing_object,
+ "Select the tracking model you want to clone");
+
+ mv_image_tracking_model_h temporary_image_tracking_model = NULL;
+ perform_clone_image_tracking_model(
+ temporary_testing_object->entity,
+ &temporary_image_tracking_model);
+
+ testing_object_h added_object = NULL;
+ testing_object_create(&added_object);
+ testing_object_fill(
+ added_object,
+ temporary_image_tracking_model,
+ OBJECT_TYPE_IMAGE_TRACKING_MODEL,
+ SOURCE_TYPE_CLONING,
+ temporary_testing_object);
+
+ add_testing_object(image_tracking_models, added_object);
+ break;
+ }
+ case 6: {
+ /* Save existed tracking model to the file (mv_image_tracking_model_save) */
+ if (image_tracking_models->len <= 0) {
+ printf(
+ "\nFirstly you must create at least one image "
+ "tracking model.\n");
+ break;
+ }
+
+ testing_object_h temporary_testing_object = NULL;
+ select_testing_object(
+ image_tracking_models,
+ &temporary_testing_object,
+ "Select the tracking model you want to save");
+
+ perform_save_image_tracking_model(temporary_testing_object->entity);
+ break;
+ }
+ case 7: {
+ /* Remove tracking model from created set (mv_image_tracking_model_destroy) */
+ if (image_tracking_models->len <= 0) {
+ printf(
+ "\nFirstly you must create at least one image "
+ "tracking model.\n");
+ break;
+ }
+
+ guint selected_index;
+ err = select_testing_object_index(
+ image_tracking_models,
+ &selected_index,
+ "Select the object you want to remove");
+
+ if (MEDIA_VISION_ERROR_NONE == err) {
+ remove_testing_object(image_tracking_models, selected_index);
+ printf("\nTracking model successfully removed\n");
+ }
+ break;
+ }
+ case 8: {
+ /* Refresh tracking model (mv_image_tracking_model_refresh) */
+ if (image_tracking_models->len <= 0) {
+ printf(
+ "\nFirstly you must create at least one image "
+ "tracking model.\n");
+ break;
+ }
+
+ testing_object_h temporary_testing_object = NULL;
+ select_testing_object(
+ image_tracking_models,
+ &temporary_testing_object,
+ "Select the tracking model you want to refresh");
+
+ perform_refresh_image_tracking_model(temporary_testing_object->entity);
+ break;
+ }
+ case 9: {
+ /* Track (mv_image_track) */
+ if (image_tracking_models->len <= 0) {
+ printf(
+ "\nFirstly you must create at least one image "
+ "tracking model.\n");
+ break;
+ }
+
+ testing_object_h temporary_testing_object = NULL;
+ err = select_testing_object(
+ image_tracking_models,
+ &temporary_testing_object,
+ "Select the object which you want to track on video");
+
+ if (MEDIA_VISION_ERROR_NONE == err)
+ perform_track(temporary_testing_object->entity);
+
+ break;
+ }
+ case 10: {
+ /* Back to the main menu */
+ return;
+ }
+ }
+ }
}
int main(void)
{
- LOGI("Image Media Vision Testsuite is launched.");
-
- GArray *image_objects = g_array_new(FALSE, FALSE, sizeof(testing_object_h));
- GArray *image_tracking_models = g_array_new(FALSE, FALSE,sizeof(testing_object_h));
-
- const int options[3] = { 1, 2, 3 };
- const char *names[3] = {
- "Recognition cases",
- "Tracking cases",
- "Exit" };
-
- mv_image_object_h current_object = NULL;
-
- while(1)
- {
- char exit = 'n';
- int sel_opt = show_menu("Select action:", options, names, 3);
- switch (sel_opt)
- {
- case 1: // Recognition cases
- perform_recognition_cases(image_objects);
- break;
- case 2: // Tracking cases
- perform_tracking_cases(image_objects, image_tracking_models);
- break;
- case 3: // Exit
- exit = 'y';
- break;
- default:
- printf("Invalid option.\n");
- sel_opt = 0;
- continue;
- }
- if ('y' == exit)
- {
- sel_opt = 0;
- const int options_last[2] = { 1, 2 };
- const char *names_last[2] = { "No", "Yes" };
-
- while (sel_opt == 0)
- {
- sel_opt = show_menu("Are you sure?",
- options_last, names_last, 2);
- switch (sel_opt)
- {
- case 1:
- exit = 'n';
- break;
- case 2:
- exit = 'y';
- break;
- default:
- printf("Invalid option. Back to the main menu.");
- sel_opt = 0;
- break;
- }
- }
-
- if ('y' == exit)
- {
- break;
- }
- }
-
- }
-
- guint i = 0;
- for (i = 0; i < image_objects->len; ++i)
- {
- testing_object_h temp = g_array_index(
- image_objects,
- testing_object_h,
- i);
- testing_object_destroy(&temp);
- }
- g_array_free(image_objects, TRUE);
-
- for (i = 0; i < image_tracking_models->len; ++i)
- {
- testing_object_h temp = g_array_index(
- image_tracking_models,
- testing_object_h,
- i);
- testing_object_destroy(&temp);
- }
- g_array_free(image_tracking_models, TRUE);
-
- LOGI("Image Media Vision Testsuite is closed");
-
- return 0;
+ LOGI("Image Media Vision Testsuite is launched.");
+
+ GArray *image_objects = g_array_new(FALSE, FALSE, sizeof(testing_object_h));
+ GArray *image_tracking_models = g_array_new(FALSE, FALSE, sizeof(testing_object_h));
+
+ const int options[3] = { 1, 2, 3 };
+ const char *names[3] = {
+ "Recognition cases",
+ "Tracking cases",
+ "Exit" };
+
+ mv_image_object_h current_object = NULL;
+
+ while (1) {
+ char exit = 'n';
+ int sel_opt = show_menu("Select action:", options, names, 3);
+ switch (sel_opt) {
+ case 1:
+ /* Recognition cases */
+ perform_recognition_cases(image_objects);
+ break;
+ case 2:
+ /* Tracking cases */
+ perform_tracking_cases(image_objects, image_tracking_models);
+ break;
+ case 3:
+ /* Exit */
+ exit = 'y';
+ break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ continue;
+ }
+
+ if ('y' == exit) {
+ sel_opt = 0;
+ const int options_last[2] = { 1, 2 };
+ const char *names_last[2] = { "No", "Yes" };
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Are you sure?",
+ options_last, names_last, 2);
+ switch (sel_opt) {
+ case 1:
+ exit = 'n';
+ break;
+ case 2:
+ exit = 'y';
+ break;
+ default:
+ printf("Invalid option. Back to the main menu.");
+ sel_opt = 0;
+ break;
+ }
+ }
+
+ if ('y' == exit)
+ break;
+ }
+ }
+
+ guint i = 0;
+ for (i = 0; i < image_objects->len; ++i) {
+ testing_object_h temp = g_array_index(
+ image_objects,
+ testing_object_h, i);
+ testing_object_destroy(&temp);
+ }
+ g_array_free(image_objects, TRUE);
+
+ for (i = 0; i < image_tracking_models->len; ++i) {
+ testing_object_h temp = g_array_index(
+ image_tracking_models,
+ testing_object_h, i);
+ testing_object_destroy(&temp);
+ }
+ g_array_free(image_tracking_models, TRUE);
+
+ LOGI("Image Media Vision Testsuite is closed");
+
+ return 0;
}