summaryrefslogtreecommitdiff
path: root/mv_face/face
diff options
context:
space:
mode:
Diffstat (limited to 'mv_face/face')
-rw-r--r--mv_face/face/CMakeLists.txt36
-rw-r--r--mv_face/face/include/FaceDetector.h112
-rw-r--r--mv_face/face/include/FaceExpressionRecognizer.h85
-rw-r--r--mv_face/face/include/FaceEyeCondition.h76
-rw-r--r--mv_face/face/include/FaceRecognitionModel.h290
-rw-r--r--mv_face/face/include/FaceTrackingModel.h175
-rw-r--r--mv_face/face/include/FaceUtil.h75
-rw-r--r--mv_face/face/include/TrackerMedianFlow.h155
-rw-r--r--mv_face/face/include/mv_face_open.h791
-rw-r--r--mv_face/face/src/FaceDetector.cpp105
-rw-r--r--mv_face/face/src/FaceExpressionRecognizer.cpp105
-rw-r--r--mv_face/face/src/FaceEyeCondition.cpp229
-rw-r--r--mv_face/face/src/FaceRecognitionModel.cpp546
-rw-r--r--mv_face/face/src/FaceTrackingModel.cpp217
-rw-r--r--mv_face/face/src/FaceUtil.cpp138
-rw-r--r--mv_face/face/src/TrackerMedianFlow.cpp460
-rw-r--r--mv_face/face/src/mv_face_open.cpp1048
17 files changed, 4643 insertions, 0 deletions
diff --git a/mv_face/face/CMakeLists.txt b/mv_face/face/CMakeLists.txt
new file mode 100644
index 00000000..6cc11999
--- /dev/null
+++ b/mv_face/face/CMakeLists.txt
@@ -0,0 +1,36 @@
+project(${MV_FACE_LIB_NAME})
+cmake_minimum_required(VERSION 2.6)
+
+set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG _DEBUG)
+
+if(NOT SKIP_WARNINGS)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Werror")
+endif()
+
+set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${LIB_INSTALL_DIR})
+set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${LIB_INSTALL_DIR})
+set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
+
+include_directories("${PROJECT_SOURCE_DIR}/include")
+include_directories("${PROJECT_SOURCE_DIR}/src")
+
+file(GLOB MV_FACE_INCLUDE_LIST "${PROJECT_SOURCE_DIR}/include/*.h")
+file(GLOB MV_FACE_SRC_LIST "${PROJECT_SOURCE_DIR}/src/*.cpp")
+
+find_package(OpenCV REQUIRED core objdetect contrib)
+if(NOT OpenCV_FOUND)
+ message(SEND_ERROR "Failed to find OpenCV")
+ return()
+else()
+ include_directories(${OpenCV_INCLUDE_DIRS})
+endif()
+
+if(FORCED_STATIC_BUILD)
+ add_library(${PROJECT_NAME} STATIC ${MV_FACE_INCLUDE_LIST} ${MV_FACE_SRC_LIST})
+else()
+ add_library(${PROJECT_NAME} SHARED ${MV_FACE_INCLUDE_LIST} ${MV_FACE_SRC_LIST})
+endif()
+
+target_link_libraries(${PROJECT_NAME} ${OpenCV_LIBS} ${MV_COMMON_LIB_NAME} dlog)
+
+INSTALL(TARGETS ${PROJECT_NAME} DESTINATION ${LIB_INSTALL_DIR})
diff --git a/mv_face/face/include/FaceDetector.h b/mv_face/face/include/FaceDetector.h
new file mode 100644
index 00000000..f014a542
--- /dev/null
+++ b/mv_face/face/include/FaceDetector.h
@@ -0,0 +1,112 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FACEDETECTOR_H__
+#define __FACEDETECTOR_H__
+
+#include <opencv/cv.h>
+#include <vector>
+#include <string>
+
+/**
+ * @file FaceDetector.h
+ * @brief This file contains the FaceDetector class which implement the face
+ * detection functionality.
+ */
+
+namespace MediaVision
+{
+namespace Face
+{
+
+/**
+ * @class FaceDetector
+ * @brief The Face Detector container.
+ * @details It is class which contains face detection functionality.
+ *
+ * @since_tizen 3.0
+ */
+class FaceDetector
+{
+public:
+
+ /**
+ * @brief Creates a FaceDetector.
+ *
+ * @since_tizen 3.0
+ */
+ FaceDetector();
+
+ /**
+ * @brief Destroys the FaceDetector and releases all its resources.
+ *
+ * @since_tizen 3.0
+ */
+ virtual ~FaceDetector();
+
+ /**
+ * @brief Performs face detection functionality.
+ * @details Use this function to launch face detection algorithm which
+ * used the haarcascade set by setHaarcascadeFilepath().
+ *
+ * @since_tizen 3.0
+ * @param [in] image The image where faces will be detected
+ * @param [in] roi Region of image where faces will be detected
+ * @param [in] minSize Minimum size of faces which will be detected
+ * @param [out] faceLocations The result locations of detected faces.
+ * @return true if detect process is completely finished. Otherwise return false.
+ *
+ * @pre Set a face haarcascade by calling setHaarcascadeFilepath()
+ *
+ * @see setHaarcascadeFilepath()
+ */
+ bool detectFaces(
+ const cv::Mat& image,
+ const cv::Rect& roi,
+ const cv::Size& minSize,
+ std::vector<cv::Rect>& faceLocations);
+
+ /**
+ * @brief Loads haar cascade classifier for detection process.
+ * @details This method is mandatory for normally detecting process.
+ *
+ * @since_tizen 3.0
+ * @param [in] haarcascadeFilepath The path to the file, which contains haar
+ * cascade classifier information for
+ * detection process.
+ * @return true if cascade is loaded from file and ready for detecting
+ * process. Otherwise is false.
+ */
+ bool loadHaarcascade(const std::string& haarcascadeFilepath);
+
+private:
+
+ cv::CascadeClassifier m_faceCascade; /**< Cascade classifier of the face
+ detecting process. */
+
+ std::string m_haarcascadeFilepath; /**< Path to the file, which contains
+ cascade classifier information. */
+
+ bool m_faceCascadeIsLoaded; /**< Flag to determine the state of the
+ m_faceCascade class. true if cascade is loaded
+ from file and is ready to detecting process.
+ Otherwise is false. */
+};
+
+} /* Face */
+} /* MediaVision */
+
+#endif /* __FACEDETECTOR_H__ */
diff --git a/mv_face/face/include/FaceExpressionRecognizer.h b/mv_face/face/include/FaceExpressionRecognizer.h
new file mode 100644
index 00000000..fb445a41
--- /dev/null
+++ b/mv_face/face/include/FaceExpressionRecognizer.h
@@ -0,0 +1,85 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FACEEXPRESSIONRECOGNIZER_H__
+#define __FACEEXPRESSIONRECOGNIZER_H__
+
+#include "mv_common_c.h"
+#include "mv_face_open.h"
+
+#include <string>
+
+namespace cv
+{
+ class Mat;
+}
+
+/**
+ * @file FaceExpressionRecognizer.h
+ * @brief This file contains the FaceExpressionRecognizer class which implements
+ * the facial expressions recognition functionality.
+ */
+
+namespace MediaVision
+{
+namespace Face
+{
+
+/**
+ * @brief Face expression recognition configuration.
+ *
+ * @since_tizen 3.0
+ */
+struct FaceRecognizerConfig
+{
+ FaceRecognizerConfig();
+ std::string mHaarcascadeFilepath;
+};
+
+/**
+ * @class FaceExpressionRecognizer
+ * @brief The FaceExpressionRecognizer implements the facial expressions
+ * recognition functionality.
+ *
+ * @since_tizen 3.0
+ */
+class FaceExpressionRecognizer
+{
+public:
+ /**
+ * @brief Recognizes facial expression on the image with known face location.
+ *
+ * @since_tizen 3.0
+ * @param [in] grayImage The grayscale image with face
+ * @param [in] faceLocation The location of the face on the @a image
+ * @param [out] faceExpression Expression recognized for the face at
+ * @a faceLocation
+ * @param [in] config The configuration will be used for
+ * facial expression recognition
+ *
+ * @see MediaVision::Face::FaceRecognizerConfig
+ */
+ static int recognizeFaceExpression(
+ const cv::Mat& grayImage,
+ const mv_rectangle_s& faceLocation,
+ mv_face_facial_expression_e *faceExpression,
+ const FaceRecognizerConfig& config = FaceRecognizerConfig());
+};
+
+} /* Face */
+} /* MediaVision */
+
+#endif /* __FACEEXPRESSIONRECOGNIZER_H__ */
diff --git a/mv_face/face/include/FaceEyeCondition.h b/mv_face/face/include/FaceEyeCondition.h
new file mode 100644
index 00000000..56e10389
--- /dev/null
+++ b/mv_face/face/include/FaceEyeCondition.h
@@ -0,0 +1,76 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FACEEYECONDITION_H__
+#define __FACEEYECONDITION_H__
+
+#include <mv_common_c.h>
+#include <mv_face.h>
+
+#include <opencv/cv.h>
+
+/**
+ * @file FaceEyeCondition.h
+ * @brief This file contains the FaceEyeCondition class which implements the face
+ * eye condition recognition functionality.
+ */
+
+namespace MediaVision
+{
+namespace Face
+{
+
+/**
+ * @class FaceEyeCondition
+ * @brief The FaceEyeCondition implements the face
+ * eye condition recognition functionality.
+ *
+ * @since_tizen 3.0
+ */
+class FaceEyeCondition
+{
+public:
+
+ /**
+ * @brief Recognizes eye condition on the image with face location.
+ *
+ * @since_tizen 3.0
+ * @param [in] grayImage The image in gray scale with face where
+ * eye condition will be recognized
+ * @param [in] faceLocation The rectangle with face location
+ * @param [out] eyeCondition The eye condition which was recognized
+ * @return @c 0 on success, otherwise a negative error value
+ */
+ static int recognizeEyeCondition(
+ const cv::Mat& grayImage,
+ mv_rectangle_s faceLocation,
+ mv_face_eye_condition_e *eyeCondition);
+
+private:
+
+ static void splitEyes(
+ /*[in]*/ const cv::Mat& grayImage,
+ /*[in]*/ mv_rectangle_s faceLocation,
+ /*[out]*/ cv::Mat& leftEye,
+ /*[out]*/ cv::Mat& rightEye);
+
+ static int isEyeOpen(/*[in]*/const cv::Mat& eye);
+};
+
+} /* Face */
+} /* MediaVision */
+
+#endif /* __FACEEYECONDITION_H__ */
diff --git a/mv_face/face/include/FaceRecognitionModel.h b/mv_face/face/include/FaceRecognitionModel.h
new file mode 100644
index 00000000..b4888f2a
--- /dev/null
+++ b/mv_face/face/include/FaceRecognitionModel.h
@@ -0,0 +1,290 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FACERECOGNITIONMODEL_H__
+#define __FACERECOGNITIONMODEL_H__
+
+#include "FaceUtil.h"
+
+#include <opencv2/core/core.hpp>
+#include <opencv2/contrib/contrib.hpp>
+
+#include <cstring>
+#include <vector>
+
+/**
+ * @file FaceRecognitionModel.h
+ * @brief This file contains the FaceRecognitionModel class definition which
+ * provides face recognition model interface.
+ */
+
+namespace MediaVision
+{
+namespace Face
+{
+
+/**
+ * @brief Structure containing supported recognition algorithms settings.
+ *
+ * @since_tizen 3.0
+ */
+struct FaceRecognitionModelConfig
+{
+ /**
+ * @brief Default constructor for the @ref FaceRecognitionModelConfig
+ *
+ * @since_tizen 3.0
+ */
+ FaceRecognitionModelConfig();
+
+ bool operator!=(
+ const FaceRecognitionModelConfig& other) const;
+
+ FaceRecognitionModelType mModelType; /**<
+ Type of the recognition algorithm */
+
+ int mNumComponents; /**< How many principal components will be included
+ to the Eigenvectors */
+
+ double mThreshold; /**< Minimal distance between principal components of
+ the model allowed */
+
+ int mRadius; /**< Radius of the local features for LBHP algorithm */
+
+ int mNeighbors; /**< How many neighboring pixels has to be analyzed
+ when LBHP learning applied. Usually set as
+ 8*radius */
+
+ int mGridX; /**< X size of the spatial histogram (LBPH) */
+
+ int mGridY; /**< Y size of the spatial histogram (LBPH) */
+
+ int mImgWidth; /**< Width of the image to resize the samples for
+ algorithms working on the samples of the same
+ size (Eigenfaces, Fisherfaces) */
+
+ int mImgHeight; /**< Height of the image to resize the samples for
+ algorithms working on the samples of the same
+ size (Eigenfaces, Fisherfaces) */
+};
+
+/**
+ * @brief Structure where results of
+ * @ref MediaVision::Face::FaceRecognitionModel::recognize() call is
+ * stored.
+ *
+ * @since_tizen 3.0
+ */
+struct FaceRecognitionResults
+{
+ /**
+ * @brief Default constructor for the @ref FaceRecognitionResults
+ *
+ * @since_tizen 3.0
+ */
+ FaceRecognitionResults();
+
+ bool mIsRecognized; /**< The flag indication success of the
+ recognition */
+ cv::Rect_<int> mFaceLocation; /**< Location of the face where face has
+ been recognized */
+ int mFaceLabel; /**< Unique label of the face */
+ double mConfidence; /**< Recognition confidence level */
+};
+
+/**
+ * @class FaceRecognitionModel
+ * @brief Class providing interface for management of face recognition model.
+ *
+ * @since_tizen 3.0
+ */
+class FaceRecognitionModel
+{
+public:
+
+ /**
+ * @brief Creates a FaceRecognitionModel class instance.
+ *
+ * @since_tizen 3.0
+ */
+ FaceRecognitionModel();
+
+ /**
+ * @brief Creates a FaceRecognitionModel class instance based on existed
+ * instance.
+ *
+ * @since_tizen 3.0
+ * @param [in] origin The FaceRecognitionModel object that will be used
+ * for creation of new one
+ */
+ FaceRecognitionModel(const FaceRecognitionModel& origin);
+
+ /**
+ * @brief @ref FaceRecognitionModel copy assignment operator.
+ * @details Fills the information based on the @a copy
+ *
+ * @since_tizen 3.0
+ * @param [in] copy @ref FaceRecognitionModel object which will be
+ * copied
+ */
+ FaceRecognitionModel& operator=(const FaceRecognitionModel& copy);
+
+ /**
+ * @brief Destroys the FaceRecognitionModel class instance including all
+ * its resources.
+ *
+ * @since_tizen 3.0
+ */
+ ~FaceRecognitionModel();
+
+ /**
+ * @brief Serializes FaceRecognitionModel object to the file.
+ *
+ * @since_tizen 3.0
+ * @param [in] fileName The name of the file to which serialized
+ * FaceRecognitionModel object will be saved
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceRecognitionModel::load()
+ */
+ int save(const std::string& fileName);
+
+ /**
+ * @brief Deserializes FaceRecognitionModel object from the file.
+ *
+ * @since_tizen 3.0
+ * @param [in] fileName The name to the file from which serialized
+ * FaceRecognitionModel object will be deserialized
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceRecognitionModel::save()
+ */
+ int load(const std::string& fileName);
+
+ /**
+ * @brief Adds face image example for face labeled by @a faceLabel
+ *
+ * @since_tizen 3.0
+ * @param [in] faceImage Face image to be added to the training set
+ * @param [in] faceLabel Label that defines class of the face
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceRecognitionModel::resetFaceExamples()
+ */
+ int addFaceExample(const cv::Mat& faceImage, int faceLabel);
+
+ /**
+ * @brief Clears the internal set of face image examples.
+ *
+ * @since_tizen 3.0
+ * @remarks Internal set of face image examples contains all samples
+ * collected with @ref FaceRecognitionModel::addPositiveExample()
+ * method.
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceRecognitionModel::addFaceExample()
+ */
+ int resetFaceExamples(void);
+
+ /**
+ * @brief Clears the internal set of face image examples labeled with
+ * @a faceLabel.
+ *
+ * @since_tizen 3.0
+ * @remarks Internal set of face image examples contains all samples
+ * collected with @ref FaceRecognitionModel::addPositiveExample()
+ * method.
+ * @param faceLabel Unique for the model face label
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceRecognitionModel::addFaceExample()
+ */
+ int resetFaceExamples(int faceLabel);
+
+ /**
+ * @brief Getter for the face labels learned by the model.
+ *
+ * @since_tizen 3.0
+ * @remarks Returning vector will contain only labels had been learned by
+ * FaceRecognitionModel::learn() method.
+ * @return Vector of the face labels known by the model
+ *
+ * @see FaceRecognitionModel::addFaceExample()
+ * @see FaceRecognitionModel::learn()
+ */
+ const std::set<int>& getFaceLabels(void) const;
+
+ /**
+ * @brief Learns recognition model based on the set of collected face image
+ * examples.
+ *
+ * @since_tizen 3.0
+ * @param [in] config Configuration of the algorithm to be used for
+ * learning the model
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceRecognitionModel::addFaceExample()
+ */
+ int learn(const FaceRecognitionModelConfig& config = FaceRecognitionModelConfig());
+
+ /**
+ * @brief Recognizes faces in the image and outputs recognition results to
+ * the @a results structure.
+ *
+ * @since_tizen 3.0
+ * @param [in] config Configuration of the algorithm to be used for
+ * face recognition
+ * @param [out] results Structure that will contain recognition results
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceRecognitionModel::learn()
+ */
+ int recognize(const cv::Mat& image, FaceRecognitionResults& results);
+
+private:
+
+ /**
+ * Factory method for creating of the recognition algorithm based on input
+ * configuration:
+ */
+ static cv::Ptr<cv::FaceRecognizer> CreateRecognitionAlgorithm(
+ const FaceRecognitionModelConfig& config =
+ FaceRecognitionModelConfig());
+
+private:
+
+ bool m_canRecognize; /**< The flag showing possibility to recognize with
+ the face recognition model */
+
+ std::map<int, std::vector<cv::Mat> > m_faceSamples; /**< Samples of the
+ images which
+ will be used for
+ the learning */
+
+ FaceRecognitionModelConfig m_learnAlgorithmConfig; /**< Configuration of the
+ learning method */
+
+ cv::Ptr<cv::FaceRecognizer> m_recognizer; /**< Recognizer associated with
+ the current model */
+
+ std::set<int> m_learnedLabels; /**< Vector of the labels had been learned
+ by the model */
+};
+
+} /* Face */
+} /* MediaVision */
+
+#endif /* __FACERECOGNITIONMODEL_H__ */
diff --git a/mv_face/face/include/FaceTrackingModel.h b/mv_face/face/include/FaceTrackingModel.h
new file mode 100644
index 00000000..daa56c75
--- /dev/null
+++ b/mv_face/face/include/FaceTrackingModel.h
@@ -0,0 +1,175 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FACETRACKINGMODEL_H__
+#define __FACETRACKINGMODEL_H__
+
+#include "TrackerMedianFlow.h"
+
+/**
+ * @file FaceTrackingModel.h
+ * @brief This file contains the FaceTrackingModel class definition which
+ * provides face tracking model interface.
+ */
+
+namespace MediaVision
+{
+namespace Face
+{
+
+/**
+ * @brief Structure where results of
+ * @ref MediaVision::Face::FaceTrackingModel::track() call are stored.
+ *
+ * @since_tizen 3.0
+ */
+struct FaceTrackingResults
+{
+ /**
+ * @brief Default constructor for the @ref FaceTrackingResults
+ *
+ * @since_tizen 3.0
+ */
+ FaceTrackingResults();
+
+ bool mIsTracked; /**< The flag indication success of the
+ tracking */
+ cv::Rect_<float> mFaceLocation; /**< Location of the face at the current
+ track iteration where face position
+ is predicted */
+ float mConfidence; /**< Tracking confidence level
+ (0.0 .. 1.0) */
+};
+
+/**
+ * @class FaceTrackingModel
+ * @brief Class providing interface for management of face tracking model.
+ *
+ * @since_tizen 3.0
+ */
+class FaceTrackingModel
+{
+public:
+ /**
+ * @brief Creates a FaceTrackingModel class instance.
+ *
+ * @since_tizen 3.0
+ */
+ FaceTrackingModel();
+
+ /**
+ * @brief Creates a FaceTrackingModel class instance based on existed
+ * instance.
+ *
+ * @since_tizen 3.0
+ * @param [in] origin The FaceTrackingModel object that will be used
+ * for creation of new one
+ */
+ FaceTrackingModel(const FaceTrackingModel& origin);
+
+ /**
+ * @brief @ref FaceTrackingModel copy assignment operator.
+ * @details Fills the information based on the @a copy
+ *
+ * @since_tizen 3.0
+ * @param [in] copy @ref FaceTrackingModel object which will be
+ * copied
+ */
+ FaceTrackingModel& operator=(const FaceTrackingModel& copy);
+
+ /**
+ * @brief Destroys the FaceTrackingModel class instance including all
+ * its resources.
+ *
+ * @since_tizen 3.0
+ */
+ ~FaceTrackingModel();
+
+ /**
+ * @brief Serializes FaceTrackingModel object to the file.
+ *
+ * @since_tizen 3.0
+ * @param [in] fileName The name to the file to which serialized
+ * FaceTrackingModel object will be saved
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceTrackingModel::load()
+ */
+ int save(const std::string& fileName);
+
+ /**
+ * @brief Deserializes FaceTrackingModel object from the file.
+ *
+ * @since_tizen 3.0
+ * @param [in] fileName The name of the file from which serialized
+ * FaceTrackingModel object will be deserialized
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceTrackingModel::save()
+ */
+ int load(const std::string& fileName);
+
+ /**
+ * @brief Prepares FaceTrackingModel object to the next tracking session.
+ *
+ * @since_tizen 3.0
+ * @param [in] image First frame of the video or image sequence for
+ * which tracking will be started
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceTrackingModel::save()
+ */
+ int prepare(const cv::Mat& image);
+
+ /**
+ * @brief Prepares FaceTrackingModel object to the next tracking session.
+ *
+ * @since_tizen 3.0
+ * @param [in] image First frame of the video or image sequence for
+ * which tracking will be started
+ * @param [in] boundingBox Rectangular location of the face on the @a
+ * image
+ * @return @c 0 on success, otherwise a negative error value
+ *
+ * @see FaceTrackingModel::save()
+ */
+ int prepare(const cv::Mat& image, const cv::Rect_<float>& boundingBox);
+
+ /**
+ * @brief Performs one tracking iteration for the video frame or image
+ * from the continuous sequence of images.
+ *
+ * @since_tizen 3.0
+ * @param [in] image
+ * @param [out] boundingBox
+ */
+ int track(const cv::Mat& image, FaceTrackingResults& results);
+
+private:
+
+ bool m_canTrack; /**< The flag showing possibility
+ of the tracking model to
+ perform track */
+
+ cv::Ptr<cv::TrackerMedianFlow> m_tracker; /**< Underlying OpenCV tracking
+ model */
+
+};
+
+} /* Face */
+} /* MediaVision */
+
+#endif /* __FACETRACKINGMODEL_H__ */
diff --git a/mv_face/face/include/FaceUtil.h b/mv_face/face/include/FaceUtil.h
new file mode 100644
index 00000000..d79757df
--- /dev/null
+++ b/mv_face/face/include/FaceUtil.h
@@ -0,0 +1,75 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FACEUTIL_H__
+#define __FACEUTIL_H__
+
+#include <opencv/cv.h>
+
+#include "mv_common_c.h"
+
+/**
+ * @file FaceUtil.h
+ * @brief This file contains the useful functionality for Face module.
+ */
+namespace MediaVision
+{
+namespace Face
+{
+
+/**
+ * @brief Enumeration of supported learning algorithms.
+ *
+ * @since_tizen 3.0
+ */
+enum FaceRecognitionModelType
+{
+ MEDIA_VISION_FACE_MODEL_TYPE_UNKNOWN = 0, /**< Unknown algorithm type */
+ MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES = 1, /**< Eigenfaces algorithm */
+ MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES = 2, /**< Fisherfaces algorithm */
+ MEDIA_VISION_FACE_MODEL_TYPE_LBPH = 3 /**< Local Binary Patterns
+ Histograms algorithm */
+};
+
+/**
+ * @brief Contains parameters for face recognition.
+ *
+ * @since_tizen 3.0
+ */
+struct RecognitionParams
+{
+ RecognitionParams(FaceRecognitionModelType algType);
+
+ RecognitionParams();
+
+ FaceRecognitionModelType mRecognitionAlgType;
+ /**< The type of the learning algorithm. */
+};
+
+/**
+ * @brief Converts mv_source_h to cv::Mat class with grayscale type.
+ *
+ * @since_tizen 3.0
+ * @param [in] mvSource The handle to the image from Media Vision API.
+ * @param [out] cvSource The cv::Mat class, which will be filled.
+ * @return @c 0 on success, otherwise a negative error value
+ */
+int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource);
+
+} /* Face */
+} /* MediaVision */
+
+#endif /* __FACEUTIL_H__ */
diff --git a/mv_face/face/include/TrackerMedianFlow.h b/mv_face/face/include/TrackerMedianFlow.h
new file mode 100644
index 00000000..7112a146
--- /dev/null
+++ b/mv_face/face/include/TrackerMedianFlow.h
@@ -0,0 +1,155 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+ //
+ // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+ //
+ // By downloading, copying, installing or using the software you agree to this license.
+ // If you do not agree to this license, do not download, install,
+ // copy or use the software.
+ //
+ //
+ // License Agreement
+ // For Open Source Computer Vision Library
+ //
+ // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+ // Third party copyrights are property of their respective owners.
+ //
+ // Redistribution and use in source and binary forms, with or without modification,
+ // are permitted provided that the following conditions are met:
+ //
+ // * Redistribution's of source code must retain the above copyright notice,
+ // this list of conditions and the following disclaimer.
+ //
+ // * Redistribution's in binary form must reproduce the above copyright notice,
+ // this list of conditions and the following disclaimer in the documentation
+ // and/or other materials provided with the distribution.
+ //
+ // * The name of the copyright holders may not be used to endorse or promote products
+ // derived from this software without specific prior written permission.
+ //
+ // This software is provided by the copyright holders and contributors "as is" and
+ // any express or implied warranties, including, but not limited to, the implied
+ // warranties of merchantability and fitness for a particular purpose are disclaimed.
+ // In no event shall the Intel Corporation or contributors be liable for any direct,
+ // indirect, incidental, special, exemplary, or consequential damages
+ // (including, but not limited to, procurement of substitute goods or services;
+ // loss of use, data, or profits; or business interruption) however caused
+ // and on any theory of liability, whether in contract, strict liability,
+ // or tort (including negligence or otherwise) arising in any way out of
+ // the use of this software, even if advised of the possibility of such damage.
+ //
+ //M*/
+
+#ifndef __TRACKERMEDIANFLOW_H__
+#define __TRACKERMEDIANFLOW_H__
+
+#include "opencv2/core/core.hpp"
+
+namespace cv
+{
+
+class TrackerMedianFlowModel;
+
+/** @brief Median Flow tracker implementation.
+
+Implementation of a paper @cite MedianFlow .
+
+The tracker is suitable for very smooth and predictable movements when object is visible throughout
+the whole sequence. It's quite and accurate for this type of problems (in particular, it was shown
+by authors to outperform MIL). During the implementation period the code at
+<http://www.aonsquared.co.uk/node/5>, the courtesy of the author Arthur Amarra, was used for the
+reference purpose.
+ */
+class TrackerMedianFlow : public virtual Algorithm
+{
+public:
+
+ struct Params
+ {
+ /**
+ * @brief TrackerMedianFlow algorithm parameters constructor
+ */
+ Params();
+ void read(const FileNode& fn);
+ void write(FileStorage& fs) const;
+
+ int mPointsInGrid; /**< Square root of number of keypoints used.
+ Increase it to trade accurateness for speed.
+ Default value is sensible and recommended */
+
+ Size mWindowSize; /**< Size of the search window at each pyramid level
+ for Lucas-Kanade optical flow search used for
+ tracking */
+
+ int mPyrMaxLevel; /**< Number of pyramid levels for Lucas-Kanade optical
+ flow search used for tracking */
+ };
+
+ TrackerMedianFlow(Params paramsIn = Params());
+
+ bool copyTo(TrackerMedianFlow& copy) const;
+
+ bool init(const Mat& image, const Rect_<float>& boundingBox);
+ bool update(const Mat& image, Rect_<float>& boundingBox);
+
+ bool isInited() const;
+
+ float getLastConfidence() const;
+ Rect_<float> getLastBoundingBox() const;
+
+ void read(FileStorage& fn);
+ void write(FileStorage& fs) const;
+
+private:
+
+ bool isInit;
+
+ bool medianFlowImpl(Mat oldImage, Mat newImage, Rect_<float>& oldBox);
+
+ Rect_<float> vote(
+ const std::vector<Point2f>& oldPoints,
+ const std::vector<Point2f>& newPoints,
+ const Rect_<float>& oldRect,
+ Point2f& mD);
+
+ template<typename T>
+ T getMedian(
+ std::vector<T>& values, int size = -1);
+
+ void check_FB(
+ std::vector<Mat> newPyramid,
+ const std::vector<Point2f>& oldPoints,
+ const std::vector<Point2f>& newPoints,
+ std::vector<bool>& status);
+
+ void check_NCC(
+ const Mat& oldImage,
+ const Mat& newImage,
+ const std::vector<Point2f>& oldPoints,
+ const std::vector<Point2f>& newPoints,
+ std::vector<bool>& status);
+
+ inline float l2distance(Point2f p1, Point2f p2);
+
+ Params params; /**< Parameters used during tracking, see
+ @ref TrackerMedianFlow::Params */
+
+ TermCriteria termcrit; /**< Terminating criteria for OpenCV
+ Lucas–Kanade optical flow algorithm used
+ during tracking */
+
+ Rect_<float> m_boundingBox; /**< Tracking object bounding box */
+
+ float m_confidence; /**< Confidence that face was tracked correctly
+ at the last tracking iteration */
+
+ Mat m_image; /**< Last image for which tracking was
+ performed */
+
+ std::vector<Mat> m_pyramid; /**< The pyramid had been calculated for
+ the previous frame (or when
+ initialize the model) */
+};
+
+} /* namespace cv */
+
+#endif /* __TRACKERMEDIANFLOW_H__ */
diff --git a/mv_face/face/include/mv_face_open.h b/mv_face/face/include/mv_face_open.h
new file mode 100644
index 00000000..a127d5ab
--- /dev/null
+++ b/mv_face/face/include/mv_face_open.h
@@ -0,0 +1,791 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TIZEN_MEDIAVISION_FACE_OPEN_H__
+#define __TIZEN_MEDIAVISION_FACE_OPEN_H__
+
+#include "mv_face.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @file mv_face_open.h
+ * @brief This file contains the Media Vision Face Open API.
+ */
+
+/******************/
+/* Face detection */
+/******************/
+
+/**
+ * @brief Performs face detection on the @a source for the @a engine_conf.
+ * @details Use this function to launch face detection algorithm configured by
+ * @a engine_conf configuration. Each time when mv_face_detect_open is
+ * called, @a detected_cb will receive a set of the detected
+ * faces at the media source.
+ *
+ * @since_tizen 3.0
+ * @param [in] source The handle to the source of the media where faces
+ * will be detected
+ * @param [in] engine_cfg The handle to the configuration of engine will be
+ * used for detecting. If NULL, then default settings
+ * will be used.
+ * @param [in] detected_cb The callback which will be called for all face
+ * locations detected on media source. This callback
+ * will receive detecting results
+ * @param [in] user_data The user data passed from the code where
+ * @ref mv_face_detect_open() is invoked. This data will be
+ * accessible from @a detected_cb callback.
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
+ * isn't supported
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ *
+ * @pre Create a source handle by calling @ref mv_create_source()
+ * @post @a detected_cb will be called to process detection results
+ *
+ * @see mv_face_detected_cb
+ */
+int mv_face_detect_open(
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_face_detected_cb detected_cb,
+ void *user_data);
+
+
+/********************/
+/* Face recognition */
+/********************/
+
+/**
+ * @brief Performs face recognition on the @a source image.
+ * @details Use this function to launch face recognition algorithm configured by
+ * @a engine_conf configuration using @a recognition_model recognition
+ * model. Each time when @ref mv_face_recognize_open() is called,
+ * @a recognized_cb will receive recognition results:\n
+ * - Location in the @a source of the face has been recognized;
+ * - Label of the face has been recognized;
+ * - Confidence of the @a recognition_model that face has been
+ * recognized correctly (value from 0.0 to 1.0).
+ *
+ * @since_tizen 3.0
+ * @remarks Using of untrained or weakly trained recognition models will cause
+ * not accurate results even if resulting confidence will be high.
+ * Use @ref mv_face_recognition_model_learn_open() function before
+ * @ref mv_face_recognize_open() call. Best results can be achieved
+ * when big set of face image examples were added by
+ * @ref mv_face_recognition_model_add_open() before
+ * @ref mv_face_recognition_model_learn_open() call.
+ * @param [in] source The handle to the source of the media to
+ * recognize face(s) for
+ * @param [in] recognition_model The handle to the model will be used for
+ * recognition
+ * @param [in] engine_cfg The handle to the configuration of engine
+ * will be used for recognition. If NULL, then
+ * default settings will be used
+ * @param [in] face_location Rectangular box bounding face image on the
+ * @a source. If NULL, then full source will be
+ * analyzed
+ * @param [in] recognized_cb The callback which will be called for the
+ * face recognition results on the @a source.
+ * @param [in] user_data The user data passed from the code where
+ * @ref mv_face_recognize_open() is invoked.
+ * This data will be accessible from
+ * @a recognized_cb callback.
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
+ * isn't supported
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ *
+ * @pre Create a source handle by calling @ref mv_create_source()
+ * @pre Create a face recognition model handle by calling
+ * @ref mv_face_recognition_model_create_open()
+ * @post @a recognized_cb will be called to process recognition results
+ *
+ * @see mv_face_recognized_cb
+ */
+int mv_face_recognize_open(
+ mv_source_h source,
+ mv_face_recognition_model_h recognition_model,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s *face_location,
+ mv_face_recognized_cb recognized_cb,
+ void *user_data);
+
+
+/*****************/
+/* Face tracking */
+/*****************/
+
+/**
+ * @brief Performs face tracking on the @a source for the @a tracking_model.
+ * @details Use this function to launch face tracking algorithm configured by
+ * @a engine_conf configuration using @a tracking_model tracking
+ * model. Each time when this function is called, @a tracked_cb
+ * will receive updated @a tracking_model, new location determined for
+ * the tracked face and model confidence that location is determined
+ * correctly.
+ *
+ * @since_tizen 3.0
+ * @remarks To allow correct tracking @a tracking_model has to be already used
+ * in previous tracking process(es) or prepared with
+ * @ref mv_face_tracking_model_prepare_open(). Preparation requires
+ * specifying the face location for the @a source on which tracking was
+ * started. I.e. @ref mv_face_tracking_model_prepare_open() function
+ * has to be called at least once before this method call.
+ * @param [in] source The handle to the source of the media to
+ * recognize face for
+ * @param [in] tracking_model The handle to the model will be used for
+ * tracking
+ * @param [in] engine_cfg The handle to the configuration of engine will
+ * be used for tracking. If NULL, the default
+ * configuration will be used.
+ * @param [in] tracked_cb The callback which will be called for tracking
+ * event on the @a source where face would be
+ * tracked. This callback will receive tracking
+ * results
+ * @param [in] do_learn The model learning flag. If it is set @c true
+ * then model will try to learn (if it supports
+ * learning feature), otherwise model will be not
+ * learned during the invoking tracking iteration.
+ * Learning process improves tracking correctness,
+ * but can decrease tracking performance
+ * @param [in] user_data The user data passed from the code where
+ * @ref mv_face_track_open() is invoked. This data
+ * will be accessible from @a tracked_cb callback
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
+ * isn't supported
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ *
+ * @pre Create a source handle by calling @ref mv_create_source()
+ * @pre Create a face tracking model handle by calling
+ * @ref mv_face_tracking_model_create_open()
+ * @post @a tracked_cb will be called to process tracking results
+ *
+ * @see mv_face_tracked_cb
+ */
+int mv_face_track_open(
+ mv_source_h source,
+ mv_face_tracking_model_h tracking_model,
+ mv_engine_config_h engine_cfg,
+ mv_face_tracked_cb tracked_cb,
+ bool do_learn,
+ void *user_data);
+
+
+/********************************/
+/* Recognition of eye condition */
+/********************************/
+
+/**
+ * @brief Determines eye-blink condition for @a face_location on media @a source.
+ * @details Use this function to recognize eye-blink condition for the face
+ * bounded by @a face_location at @a source.
+ *
+ * @since_tizen 3.0
+ * @param [in] source The handle to the source of the media to
+ * recognize eye-blink condition for
+ * @param [in] engine_cfg The handle to the configuration of engine
+ * will be used for eye-blink condition
+ * recognition. If NULL, the default configuration
+ * will be used.
+ * @param [in] face_location The location bounding the face at the @a source
+ * @param [in] eye_condition_recognized_cb The callback for processing result
+ * of eye-blink condition recognition
+ * @param [in] user_data The user data passed from the code where
+ * @ref mv_face_eye_condition_recognize_open() is
+ * invoked. This data will be accessible from
+ * @a eye_condition_recognized_cb callback
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
+ * isn't supported
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ *
+ * @pre Create a source handle by calling @ref mv_create_source_open()
+ *
+ * @see mv_face_eye_condition_recognized_cb
+ */
+int mv_face_eye_condition_recognize_open(
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s face_location,
+ mv_face_eye_condition_recognized_cb eye_condition_recognized_cb,
+ void *user_data);
+
+
+/************************************/
+/* Recognition of facial expression */
+/************************************/
+
+/**
+ * @brief Determines facial expression for @a face_location on media @a source.
+ * @details Use this function to determine facial expression for the face
+ * bounded by @a face_location at @a source.
+ *
+ * @since_tizen 3.0
+ * @param [in] source The handle to the source of the media
+ * to recognize facial expression for
+ * @param [in] engine_cfg The handle to the configuration of
+ * engine will be used for expression recognition
+ * @param [in] face_location The location bounding the face at the @a source
+ * @param [in] expression_recognized_cb The callback for processing result
+ * of facial expression determining
+ * @param [in] user_data The user data passed from the code where
+ * @ref mv_face_facial_expression_recognize_open() is
+ * invoked. This data will be accessible from
+ * @a expression_recognized_cb callback.
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
+ * isn't supported
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ *
+ * @pre Create a source handle by calling @ref mv_create_source_open()
+ * @pre Create a face engine configuration handle by calling @ref mv_create_engine_config()
+ *
+ * @see mv_face_facial_expression_recognized_cb
+ */
+int mv_face_facial_expression_recognize_open(
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s face_location,
+ mv_face_facial_expression_recognized_cb expression_recognized_cb,
+ void *user_data);
+
+/*******************************/
+/* Recognition model behavior */
+/*******************************/
+
+/**
+ * @brief Creates a face recognition model handle.
+ * @details Use this function to create default face recognition model. Creating
+ * process is defined by concrete face engine library. After creation
+ * recognition model has to be learned with
+ * @ref mv_face_recognition_model_learn_open() function to provide
+ * appropriate results of face recognition functionality. Or learned
+ * earlier model can be loaded by
+ * @ref mv_face_recognition_model_load_open() function.
+ *
+ * @since_tizen 3.0
+ * @remarks It can cause incompatibility issues when saved models (see
+ * @ref mv_face_recognition_model_save_open(),
+ * @ref mv_face_recognition_model_load_open() functions documentation)
+ * are used in applications for different platforms which use different
+ * computer vision libraries underlying this API.
+ * @remarks You must release @a recognition_model by using
+ * @ref mv_face_recognition_model_destroy_open() function.
+ * @param [out] recognition_model The handle to the recognition model to be
+ * created
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ *
+ * @see mv_face_recognition_model_destroy_open()
+ */
+int mv_face_recognition_model_create_open(
+ mv_face_recognition_model_h *recognition_model);
+
+/**
+ * @brief Destroys the face recognition model handle and releases all its
+ * resources.
+ *
+ * @since_tizen 3.0
+ * @param [in] recognition_model The handle to the face recognition model to
+ * be destroyed
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ *
+ * @see mv_face_recognition_model_create_open()
+ */
+int mv_face_recognition_model_destroy_open(
+ mv_face_recognition_model_h recognition_model);
+
+/**
+ * @brief Creates a copy of existed recognition model handle and clones all its
+ * resources.
+ *
+ * @since_tizen 3.0
+ * @remarks Cloning perform not only handle copy, but also copies all internal
+ * resources of the model. @a dst must be released using
+ * @a mv_face_recognition_model_destroy_open().
+ * @param [in] src The handle to the recognition model to be copied
+ * @param [out] dst The handle to the copy of existed recognition model
+ * specified as @a src
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ *
+ * @pre Create face recognition handles by calling
+ * @ref mv_face_recognition_model_create_open()
+ *
+ * @see mv_face_recognition_model_create_open()
+ */
+int mv_face_recognition_model_clone_open(
+ mv_face_recognition_model_h src,
+ mv_face_recognition_model_h *dst);
+
+/**
+ * @brief Saves recognition model to the file.
+ *
+ * @since_tizen 3.0
+ * @remarks This function doesn't save added by
+ * @ref mv_face_recognition_model_add_open() function face
+ * image examples. This examples can be removed by
+ * @ref mv_face_recognition_model_reset_open() function
+ * if it is needed to clear the memory.
+ * @remarks After model is saved to the file, it can be loaded from this file
+ * by @ref mv_face_recognition_model_load_open() function.
+ * @param [in] file_name Name of the file to save the model
+ * @param [in] recognition_model The handle to the recognition model to be
+ * saved to the file
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path
+ * @retval #MEDIA_VISION_ERROR_PERMISSION_DENIED Not permitted
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Not supported format
+ * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+ *
+ * @pre Create a face recognition handle by calling
+ * @ref mv_face_recognition_model_create_open() function
+ * @post Saved model can be loaded later by calling
+ * @ref mv_face_recognition_model_load_open() function
+ *
+ * @see mv_face_recognition_model_load_open()
+ * @see mv_face_recognition_model_create_open()
+ */
+int mv_face_recognition_model_save_open(
+ const char *file_name,
+ mv_face_recognition_model_h recognition_model);
+
+/**
+ * @brief Loads recognition model from file.
+ *
+ * @since_tizen 3.0
+ * @remarks This function doesn't modify the set of face image examples added
+ * with @ref mv_face_recognition_model_add_open() function.
+ * Model will be loaded from file without loss of collected examples.
+ * If you want to free memory from examples, use
+ * @ref mv_face_recognition_model_reset_open() function.
+ * It is recommended to clear the memory if learning algorithm doesn't
+ * support reinforcement learning.
+ * @remarks Recognition models can be load from files saved with
+ * mv_face_recognition_model_save_open() function.
+ * @remarks Recognition model must be destroyed using
+ * @ref mv_face_recognition_model_destroy.
+ * @param [in] file_name Name of the file to load the model from
+ * @param [out] recognition_model The handle to the recognition model
+ * to be loaded from the file
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path
+ * @retval #MEDIA_VISION_ERROR_PERMISSION_DENIED Not permitted
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Not supported format
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ *
+ * @see mv_face_recognition_model_save_open()
+ * @see mv_face_recognition_model_destroy_open()
+ */
+int mv_face_recognition_model_load_open(
+ const char *file_name,
+ mv_face_recognition_model_h *recognition_model);
+
+/**
+ * @brief Adds face image example to be used for face recognition model learning
+ * with @ref mv_face_recognition_model_learn_open().
+ *
+ * @since_tizen 3.0
+ * @remarks It is possible to destroy @a source after calling this method.
+ * Source isn't used for learning directly.
+ * @remarks Face image @a example_location location can be determined using
+ * @ref mv_face_detect_open() function.
+ * @param [in] source The handle to @a source that contains face
+ * image
+ * @param [in] recognition_model The handle to the recognition model which
+ * could be learned based on example
+ * @param [in] example_location The pointer to the rectangular location of
+ * the face image at the source image. If NULL,
+ * then full image will be analyzed as the face
+ * image
+ * @param [in] face_label The label that identifies face for which
+ * example is adding. Specify the same labels
+ * for the face images of a single person when
+ * calling this method. Can't be NULL
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ *
+ * @pre Create a face recognition handle by calling
+ * @ref mv_face_recognition_model_create_open() function
+ * @post When appropriate amount of face image examples is added to the
+ * @a recognition_model, this model has to be learned by
+ * @ref mv_face_recognition_model_learn_open() function call. Only after
+ * learning of the model it can be used for face recognition with
+ * @a mv_face_recognize_open() function
+ *
+ * @see mv_face_recognition_model_reset_open()
+ * @see mv_face_recognition_model_learn_open()
+ */
+int mv_face_recognition_model_add_open(
+ const mv_source_h source,
+ mv_face_recognition_model_h recognition_model,
+ const mv_rectangle_s *example_location,
+ int face_label);
+
+/**
+ * @brief Remove from @a recognition_model all collected with
+ * @ref mv_face_recognition_model_add_open() function
+ * face examples labeled with @a face_label.
+ *
+ * @since_tizen 3.0
+ * @remarks Be aware that if this function is called before
+ * @ref mv_face_recognition_model_learn_open() function call, all or
+ * part of the required for learning data will be lost. It means that
+ * face image examples determined by the @a face_label label will be
+ * removed from the model and not taken taken into account when
+ * @ref mv_face_recognition_model_learn_open() will be called next
+ * time.
+ * @remarks Call of this function will free all the memory has been allocated
+ * during previous
+ * @ref mv_face_recognition_model_add_open() calls for
+ * the corresponding @a face_label label.
+ * @param [in] recognition_model The handle to the recognition model for
+ * which face image examples will be reset.
+ * @param [in] face_label The label that identifies face for which
+ * examples will be removed from the
+ * @a recognition_model. If NULL, then all
+ * known by @a recognition_model face image
+ * examples will be removed
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE Key not available
+ *
+ * @see mv_face_recognition_model_add_open()
+ * @see mv_face_recognition_model_learn_open()
+ */
+int mv_face_recognition_model_reset_open(
+ mv_face_recognition_model_h recognition_model,
+ const int *face_label);
+
+/**
+ * @brief Learns face recognition model.
+ * @details Before you start learning process, face recognition models has to be
+ * filled with training data - face image examples. These examples has
+ * to be provided by
+ * mv_face_recognition_model_add_open() function. Usually,
+ * recognition accuracy is increased when number of not identical
+ * examples is large. But it depends of the used learning algorithm.
+ *
+ * @since_tizen 3.0
+ * @remarks Common flow is to collect face examples as much as possible, add
+ * them to the recognition model with
+ * @ref mv_face_recognition_model_add_open(), then call
+ * @ref mv_face_recognition_model_learn_open() for this recognition
+ * model to learn it (or reinforce the model if reinforcement learning
+ * is supported by the used algorithm).
+ * @remarks Selection of the learning algorithm can be performed by setting
+ * corresponding attributes for the @a engine_cfg. You can check
+ * supported by @a engine_cfg attributes using
+ * @ref mv_engine_config_foreach_supported_attribute() function call.
+ * @param [in] engine_cfg The handle to the configuration of
+ * engine will be used for learning of the
+ * recognition models. If NULL, then
+ * default settings will be used
+ * @param [in,out] recognition_model The model which will be learned. After
+ * learning process these model may be
+ * changed, so
+ * @ref mv_face_recognize_open() results
+ * may differ before and after method call
+ * respectively to the face examples
+ * collected for the @a recognition_model
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ * @retval #MEDIA_VISION_ERROR_NO_DATA No data
+ *
+ * @pre Create a face engine configuration handle by calling
+ * @ref mv_create_engine_config() and set supported parameters if
+ * needed. Or just set @a engine_cfg as NULL to learn with default settings
+ * @pre Create a face recognition model handles by calling
+ * @ref mv_face_recognition_model_create_open() function
+ * @pre Add face image examples to the @a recognition_model by calling
+ * @ref mv_face_recognition_model_add_open() function
+ * @post If it is not planned to learn the model again, clear memory by
+ * @ref mv_face_recognition_model_reset_open() function
+ * @post When model has been learned, it can be used for face recognition with
+ * @ref mv_face_recognize_open() function
+ *
+ * @see mv_face_recognition_model_add_open()
+ * @see mv_face_recognition_model_reset_open()
+ * @see mv_face_recognize_open()
+ */
+int mv_face_recognition_model_learn_open(
+ mv_engine_config_h engine_cfg,
+ mv_face_recognition_model_h recognition_model);
+
+/**
+ * @brief Queries labels list and number of labels had been learned by the model.
+ *
+ * @since_tizen 3.0
+ * @remarks @a labels array has to be released using free().
+ * @param [in] recognition_model The handle to the recognition model for
+ * which set of the learned labels will be
+ * queried
+ * @param [out] labels The array which will be filled with labels
+ * had been learned by the model
+ * @param [out] number_of_labels The number of labels in @a labels array
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ *
+ * @pre Add face image examples with labels to the @a recognition_model by
+ * calling the @ref mv_face_recognition_model_add_open()
+ * function
+ * @pre Learn the @a recognition_model by labeled examples using
+ * @ref mv_face_recognition_model_learn_open() function
+ * @post @a labels array has to be freed in the function invoking code
+ *
+ * @see mv_face_recognition_model_add_open()
+ * @see mv_face_recognition_model_reset_open()
+ * @see mv_face_recognition_model_learn_open()
+ */
+int mv_face_recognition_model_query_labels_open(
+ mv_face_recognition_model_h recognition_model,
+ int **labels,
+ unsigned int *number_of_labels);
+
+/***************************/
+/* Tracking model behavior */
+/***************************/
+
+/**
+ * @brief Call this function to create a face tracking model handle.
+ * @details Use this function to create default face tracking model handle.
+ * After creation this handle has to be initialized with
+ * @ref mv_face_tracking_model_prepare_open() function to provide
+ * appropriate results of face tracking functionality. When handle is
+ * prepared, it is possible to use it for tracking on continuous
+ * sequence of the sources. Call
+ * @ref mv_face_tracking_model_prepare_open() function each time before
+ * starting tracking on the new sequence. The exception is situation
+ * when the new sequence is continuation of the previous sequence for
+ * which model has been tracked.
+ *
+ * @since_tizen 3.0
+ * @param [out] tracking_model The pointer to the handle to the tracking
+ * model that will be created
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ *
+ * @post Model can be loaded from the file after creation. Use
+ * @ref mv_face_tracking_model_load_open() function to load it from file
+ * @post Use @ref mv_face_tracking_model_prepare_open() function before tracking
+ * on the new video or continuous images sequence
+ * @post You must release @a tracking_model by using
+ * mv_face_tracking_model_destroy_open() function when it is not needed
+ * anymore
+ *
+ * @see mv_face_tracking_model_destroy_open()
+ * @see mv_face_tracking_model_prepare_open()
+ * @see mv_face_tracking_model_load_open()
+ */
+int mv_face_tracking_model_create_open(
+ mv_face_tracking_model_h *tracking_model);
+
+/**
+ * @brief Call this function to destroy the face tracking model handle and
+ * release all its resources.
+ *
+ * @since_tizen 3.0
+ * @param [in] tracking_model The handle to the face tracking model that
+ * will be destroyed
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ *
+ * @see mv_face_tracking_model_create_open()
+ */
+int mv_face_tracking_model_destroy_open(
+ mv_face_tracking_model_h tracking_model);
+
+/**
+ * @brief Call this function to initialize tracking model by the location of the
+ * face to be tracked.
+ * @details This function is usually called once after tracking model is created
+ * and each time before tracking is started for the new sequence of
+ * sources which is not the direct continuation of the sequence for
+ * which tracking has been performed before. But it is allowed to call
+ * it between tracking sessions to allow Media Vision start to track
+ * more accurately.
+ *
+ * @since_tizen 3.0
+ * @param [in] tracking_model The handle to the tracking model that will be
+ * prepared for tracking on new video or image
+ * sequence
+ * @param [in] engine_cfg The handle to the configuration of engine
+ * will be used for model preparing. If NULL, then
+ * default settings will be used.
+ * @param [in] source The handle to the source where face @a location
+ * is specified. Usually it is the first frame of
+ * the video or the first image in the continuous
+ * image sequence planned to be used for tracking
+ * @param [in] location The quadrangle-shaped location (actually,
+ * rectangle can be used) determining position
+ * of the face to be tracked on the @a source. If
+ * @c NULL, then last location determined by the
+ * tracking model for the tracked face will be
+ * used for preparation
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ *
+ * @pre Create a face tracking model handle by calling
+ * @ref mv_face_tracking_model_create_open() function
+ * @pre Create a source handle by calling @ref mv_create_source() function
+ * @post When model is prepared, @ref mv_face_track_open() function can be used
+ * to track on the video or continuous image sequence
+ *
+ * @see mv_face_tracking_model_create_open()
+ * @see mv_face_track_open()
+ */
+int mv_face_tracking_model_prepare_open(
+ mv_face_tracking_model_h tracking_model,
+ mv_engine_config_h engine_cfg,
+ mv_source_h source,
+ mv_quadrangle_s */*location*/);
+
+/**
+ * @brief Call this function to make a copy of existed tracking model handle and
+ * clone all its resources to the copy.
+ *
+ * @since_tizen 3.0
+ * @remarks Cloning performs not only handle copy, but also copies all internal
+ * resources of the model. @a dst must be released using
+ * mv_face_tracking_model_destroy_open().
+ * @param [in] src The handle to the tracking model to be copied
+ * @param [out] dst The handle to the copy of existed tracking model
+ * specified as @a src
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ *
+ * @pre Create face tracking @a src handle by calling
+ * @ref mv_face_tracking_model_create_open()
+ *
+ * @see mv_face_tracking_model_create_open()
+ */
+int mv_face_tracking_model_clone_open(
+ mv_face_tracking_model_h src,
+ mv_face_tracking_model_h *dst);
+
+/**
+ * @brief Call this method to save tracking model to the file.
+ *
+ * @since_tizen 3.0
+ * @remarks After model is saved to the file, it can be loaded from this file
+ * with @ref mv_face_tracking_model_load_open() function.
+ * @param [in] file_name The name of the file where model will be saved
+ * @param [in] tracking_model The handle to the tracking model to be
+ * saved to the file
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path
+ * @retval #MEDIA_VISION_ERROR_PERMISSION_DENIED Not permitted
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Not supported format
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ *
+ * @pre Create a face tracking handle by calling
+ * @ref mv_face_tracking_model_create_open()
+ * @post Saved model can be loaded from file using
+ * @ref mv_face_tracking_model_load_open() function
+ *
+ * @see mv_face_tracking_model_load_open()
+ * @see mv_face_tracking_model_create_open()
+ */
+int mv_face_tracking_model_save_open(
+ const char *file_name,
+ mv_face_tracking_model_h tracking_model);
+
+/**
+ * @brief Call this method to load a tracking model from file.
+ *
+ * @since_tizen 3.0
+ * @remarks Tracking models can be load from files saved with
+ * mv_face_tracking_model_save_open() function.
+ * @param [in] file_name Path to the file from which model will be
+ * loaded
+ * @param [in] tracking_model The handle to the tracking model to be
+ * loaded from file
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path
+ * @retval #MEDIA_VISION_ERROR_PERMISSION_DENIED Not permitted
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Not supported format
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ *
+ * @pre Models has been saved by @ref mv_face_tracking_model_save_open()
+ * function can be loaded with this function
+ * @post After model has been loaded and if further tracking will be performed
+ * on the video which is not continuation of the last tracking performed
+ * for the model, it is recommended to call
+ * @ref mv_face_tracking_model_prepare_open() function
+ *
+ * @see mv_face_tracking_model_save_open()
+ * @see mv_face_tracking_model_destroy_open()
+ */
+int mv_face_tracking_model_load_open(
+ const char *file_name,
+ mv_face_tracking_model_h *tracking_model);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __TIZEN_MEDIAVISION_FACE_OPEN_H__ */
diff --git a/mv_face/face/src/FaceDetector.cpp b/mv_face/face/src/FaceDetector.cpp
new file mode 100644
index 00000000..21d81958
--- /dev/null
+++ b/mv_face/face/src/FaceDetector.cpp
@@ -0,0 +1,105 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FaceDetector.h"
+
+namespace MediaVision
+{
+namespace Face
+{
+
+FaceDetector::FaceDetector() :
+ m_faceCascade(),
+ m_haarcascadeFilepath(),
+ m_faceCascadeIsLoaded(false)
+{
+ ; /* NULL */
+}
+
+FaceDetector::~FaceDetector()
+{
+ ; /* NULL */
+}
+
+bool FaceDetector::detectFaces(
+ const cv::Mat& image,
+ const cv::Rect& roi,
+ const cv::Size& minSize,
+ std::vector<cv::Rect>& faceLocations)
+{
+ if (!m_faceCascadeIsLoaded)
+ {
+ return false;
+ }
+
+ faceLocations.clear();
+
+ cv::Mat intrestingRegion = image;
+
+ bool roiIsUsed = false;
+ if (roi.x >= 0 && roi.y >= 0 && roi.width > 0 && roi.height > 0 &&
+ (roi.x + roi.width) <= image.cols && (roi.y + roi.height) <= image.rows)
+ {
+ intrestingRegion = intrestingRegion(roi);
+ roiIsUsed = true;
+ }
+
+ if (minSize.width > 0 && minSize.height > 0 &&
+ minSize.width <= image.cols && minSize.height <= image.rows)
+ {
+ m_faceCascade.detectMultiScale(
+ intrestingRegion,
+ faceLocations,
+ 1.1,
+ 3,
+ 0,
+ minSize);
+ }
+ else
+ {
+ m_faceCascade.detectMultiScale(intrestingRegion, faceLocations);
+ }
+
+ if (roiIsUsed)
+ {
+ const size_t numberOfLocations = faceLocations.size();
+ for (size_t i = 0u; i < numberOfLocations; ++i)
+ {
+ faceLocations[i].x += roi.x;
+ faceLocations[i].y += roi.y;
+ }
+ }
+
+ return true;
+}
+
+bool FaceDetector::loadHaarcascade(const std::string& haarcascadeFilepath)
+{
+
+ if (!m_faceCascadeIsLoaded || m_haarcascadeFilepath != haarcascadeFilepath)
+ {
+ if (!(m_faceCascadeIsLoaded = m_faceCascade.load(haarcascadeFilepath)))
+ {
+ return false;
+ }
+ m_haarcascadeFilepath = haarcascadeFilepath;
+ }
+
+ return true;
+}
+
+} /* Face */
+} /* MediaVision */
diff --git a/mv_face/face/src/FaceExpressionRecognizer.cpp b/mv_face/face/src/FaceExpressionRecognizer.cpp
new file mode 100644
index 00000000..51d9d05e
--- /dev/null
+++ b/mv_face/face/src/FaceExpressionRecognizer.cpp
@@ -0,0 +1,105 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FaceExpressionRecognizer.h"
+
+#include "mv_private.h"
+
+#include <vector>
+
+#include <opencv/cv.h>
+
+namespace MediaVision
+{
+namespace Face
+{
+
+static const int MinDetectionWidth = 30;
+static const int MinDetectionHeight = 30;
+
+FaceRecognizerConfig::FaceRecognizerConfig() :
+ mHaarcascadeFilepath(
+ "/usr/share/OpenCV/haarcascades/haarcascade_smile.xml")
+{
+ ; /* NULL */
+}
+
+int FaceExpressionRecognizer::recognizeFaceExpression(
+ const cv::Mat& grayImage,
+ const mv_rectangle_s& faceLocation,
+ mv_face_facial_expression_e *faceExpression,
+ const FaceRecognizerConfig& config)
+{
+ if (NULL == faceExpression)
+ {
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ const int smileRectHeight = cvRound((float)faceLocation.height / 2);
+
+ const cv::Rect roi(
+ faceLocation.point.x,
+ faceLocation.point.y + faceLocation.height - smileRectHeight,
+ faceLocation.width,
+ smileRectHeight);
+
+ if (roi.width < MinDetectionWidth ||
+ roi.height < MinDetectionHeight)
+ {
+ (*faceExpression) = MV_FACE_UNKNOWN;
+ return MEDIA_VISION_ERROR_NONE;
+ }
+
+ if (0 > roi.x ||
+ 0 > roi.y ||
+ roi.x + roi.width > grayImage.cols ||
+ roi.y + roi.height > grayImage.rows)
+ {
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ const cv::Mat mouthImg(grayImage, roi);
+
+ std::vector<cv::Rect> areas;
+
+ cv::CascadeClassifier smileClassifier;
+ smileClassifier.load(config.mHaarcascadeFilepath);
+ smileClassifier.detectMultiScale(
+ mouthImg,
+ areas,
+ 1.1,
+ 80,
+ cv::CASCADE_FIND_BIGGEST_OBJECT |
+ cv::CASCADE_DO_CANNY_PRUNING |
+ cv::CASCADE_SCALE_IMAGE,
+ cv::Size(MinDetectionWidth, MinDetectionHeight));
+
+ (*faceExpression) = MV_FACE_UNKNOWN;
+ const size_t smilesFoundSize = areas.size();
+ if (smilesFoundSize == 0)
+ {
+ (*faceExpression) = MV_FACE_NEUTRAL;
+ }
+ else if (smilesFoundSize == 1)
+ {
+ (*faceExpression) = MV_FACE_SMILE;
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+} /* Face */
+} /* MediaVision */
diff --git a/mv_face/face/src/FaceEyeCondition.cpp b/mv_face/face/src/FaceEyeCondition.cpp
new file mode 100644
index 00000000..9432d1e1
--- /dev/null
+++ b/mv_face/face/src/FaceEyeCondition.cpp
@@ -0,0 +1,229 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FaceEyeCondition.h"
+
+#include <mv_private.h>
+
+#include <vector>
+
+namespace MediaVision
+{
+namespace Face
+{
+
+void FaceEyeCondition::splitEyes(
+ const cv::Mat& grayImage,
+ mv_rectangle_s faceLocation,
+ cv::Mat& leftEye,
+ cv::Mat& rightEye)
+{
+ leftEye = grayImage.rowRange(0, grayImage.rows / 2 - grayImage.rows / 10)
+ .colRange(grayImage.cols / 2 + grayImage.cols / 10,
+ grayImage.cols)
+ .clone();
+
+ rightEye = grayImage.rowRange(grayImage.rows / 2 + grayImage.rows / 10,
+ grayImage.rows)
+ .colRange(grayImage.cols / 2 + grayImage.cols / 10,
+ grayImage.cols)
+ .clone();
+
+ const cv::Rect faceRect(
+ faceLocation.point.x,
+ faceLocation.point.y,
+ faceLocation.width,
+ faceLocation.height);
+
+ const cv::Rect eyeAreaRight(
+ faceRect.x + faceRect.width / 16,
+ (int) (faceRect.y + (faceRect.height / 4.5)),
+ (faceRect.width - 2 * faceRect.width / 16) / 2,
+ (int) (faceRect.height / 3.0));
+
+ const cv::Rect eyeAreaLeft(
+ faceRect.x + faceRect.width / 16
+ + (faceRect.width - 2 * faceRect.width / 16) / 2,
+ (int) (faceRect.y + (faceRect.height / 4.5)),
+ (faceRect.width - 2 * faceRect.width / 16) / 2,
+ (int) (faceRect.height / 3.0));
+
+ const double xLeftEyeCenter = (2 * eyeAreaLeft.x + eyeAreaLeft.width) / 2.;
+ const double yLeftEyeCenter = (2 * eyeAreaLeft.y + eyeAreaLeft.height) / 2.;
+
+ const double xRightEyeCenter = (2 * eyeAreaRight.x + eyeAreaRight.width) / 2.;
+ const double yRightEyeCenter = (2 * eyeAreaRight.y + eyeAreaRight.height) / 2.;
+
+ const cv::Rect leftEyeRect(xLeftEyeCenter - eyeAreaLeft.width / 4,
+ yLeftEyeCenter - eyeAreaLeft.height / 4,
+ eyeAreaLeft.width / 2,
+ eyeAreaLeft.height / 2);
+
+ const cv::Rect rightEyeRect(xRightEyeCenter - eyeAreaRight.width / 4,
+ yRightEyeCenter - eyeAreaRight.height / 4,
+ eyeAreaRight.width / 2,
+ eyeAreaRight.height / 2);
+
+ cv::resize(
+ grayImage(leftEyeRect),
+ leftEye,
+ leftEye.size());
+ cv::resize(
+ grayImage(rightEyeRect),
+ rightEye,
+ rightEye.size());
+}
+
+int FaceEyeCondition::isEyeOpen(const cv::Mat& eye)
+{
+ int isOpen = MV_FACE_EYES_CLOSED;
+
+ cv::Mat eyeEqualized;
+ cv::equalizeHist(eye, eyeEqualized);
+
+ const int thresold = 8;
+ eyeEqualized = eyeEqualized < thresold;
+
+ std::vector<std::vector<cv::Point> > contours;
+ std::vector<cv::Vec4i> hierarchy;
+
+ cv::findContours(
+ eyeEqualized,
+ contours,
+ hierarchy,
+ CV_RETR_CCOMP,
+ CV_CHAIN_APPROX_SIMPLE);
+
+ const size_t contoursSize = contours.size();
+
+ if (!contoursSize)
+ {
+ return MV_FACE_EYES_NOT_FOUND;
+ }
+
+ const int xCenter = eyeEqualized.cols / 2;
+ const int yCenter = eyeEqualized.rows / 2;
+ const int width = eyeEqualized.cols / 2.5;
+ const int height = eyeEqualized.rows / 2.5;
+
+ const cv::Rect boundThresold(xCenter - width, yCenter - height, 2 * width, 2 * height);
+
+ const int widthHeightRatio = 3;
+ const double areaRatio = 0.005;
+ const double areaSmallRatio = 0.0005;
+ size_t rectanglesInsideCount = 0u;
+
+ for (size_t i = 0; i < contoursSize; ++i)
+ {
+ const cv::Rect currentRect = cv::boundingRect(contours[i]);
+ const double currentArea = cv::contourArea(contours[i]);
+
+ if (boundThresold.contains(currentRect.br()) &&
+ boundThresold.contains(currentRect.tl()) &&
+ currentArea > areaRatio * boundThresold.area() &&
+ currentRect.width < widthHeightRatio * currentRect.height)
+ {
+ isOpen = MV_FACE_EYES_OPEN;
+ }
+ else if (boundThresold.contains(currentRect.br()) &&
+ boundThresold.contains(currentRect.tl()) &&
+ currentArea > areaSmallRatio * boundThresold.area())
+ {
+ ++rectanglesInsideCount;
+ }
+ }
+
+ if (rectanglesInsideCount > 8u)
+ {
+ isOpen = MV_FACE_EYES_CLOSED;
+ }
+
+ return isOpen;
+}
+
+int FaceEyeCondition::recognizeEyeCondition(
+ const cv::Mat& grayImage,
+ mv_rectangle_s faceLocation,
+ mv_face_eye_condition_e *eyeCondition)
+{
+ if (grayImage.empty())
+ {
+ *eyeCondition = MV_FACE_EYES_NOT_FOUND;
+
+ LOGE("Input image is empty. Eye condition recognition failed.");
+ return MEDIA_VISION_ERROR_NO_DATA;
+ }
+
+ if (faceLocation.height <= 0 || faceLocation.width <= 0 ||
+ faceLocation.point.x < 0 || faceLocation.point.y < 0 ||
+ (faceLocation.point.x + faceLocation.width) > grayImage.cols ||
+ (faceLocation.point.y + faceLocation.height) > grayImage.rows)
+ {
+ *eyeCondition = MV_FACE_EYES_NOT_FOUND;
+
+ LOGE("Input face location is wrong. Eye condition recognition failed.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (NULL == eyeCondition)
+ {
+ *eyeCondition = MV_FACE_EYES_NOT_FOUND;
+
+ LOGE("Output eye condition is NULL. Eye condition recognition failed.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ // split left and right eyes
+ cv::Mat leftEye;
+ cv::Mat rightEye;
+ splitEyes(grayImage, faceLocation, leftEye, rightEye);
+
+ // recognize eyes conditions
+ const int isOpenLeft = isEyeOpen(leftEye);
+
+ if (isOpenLeft == MV_FACE_EYES_CLOSED)
+ {
+ *eyeCondition = MV_FACE_EYES_CLOSED;
+
+ return MEDIA_VISION_ERROR_NONE;
+ }
+ else if (isOpenLeft == MV_FACE_EYES_NOT_FOUND)
+ {
+ *eyeCondition = MV_FACE_EYES_NOT_FOUND;
+
+ return MEDIA_VISION_ERROR_NONE;
+ }
+
+ const int isOpenRight = isEyeOpen(rightEye);
+
+ if (isOpenRight == MV_FACE_EYES_OPEN)
+ {
+ *eyeCondition = MV_FACE_EYES_OPEN;
+ }
+ else if (isOpenRight == MV_FACE_EYES_CLOSED)
+ {
+ *eyeCondition = MV_FACE_EYES_CLOSED;
+ }
+ else
+ {
+ *eyeCondition = MV_FACE_EYES_NOT_FOUND;
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+} /* Face */
+} /* MediaVision */
diff --git a/mv_face/face/src/FaceRecognitionModel.cpp b/mv_face/face/src/FaceRecognitionModel.cpp
new file mode 100644
index 00000000..1887cea8
--- /dev/null
+++ b/mv_face/face/src/FaceRecognitionModel.cpp
@@ -0,0 +1,546 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FaceRecognitionModel.h"
+
+#include <app_common.h>
+
+#include "mv_private.h"
+#include "mv_common.h"
+
+#include <map>
+
+#include <stdio.h>
+#include <unistd.h>
+
+namespace MediaVision
+{
+namespace Face
+{
+
+namespace
+{
+
+int CopyOpenCVAlgorithmParameters(const cv::Ptr<cv::FaceRecognizer>& srcAlg,
+ cv::Ptr<cv::FaceRecognizer>& dstAlg)
+{
+ char tempPath[1024];
+
+ sprintf(tempPath, "/tmp/alg_copy_%p_%p", srcAlg.obj, dstAlg.obj);
+
+ srcAlg->save(tempPath);
+ dstAlg->load(tempPath);
+
+ if (0 != remove(tempPath))
+ {
+ LOGW("Error removing serialized FaceRecognizer in %s", tempPath);
+ }
+
+ // todo: consider to uncomment this lines if OpenCV will support deep
+ // copy of AlgorithmInfo objects:
+
+ /*std::vector<std::string> paramNames;
+ srcAlg->getParams(paramNames);
+ size_t paramSize = paramNames.size();
+ for (size_t i = 0; i < paramSize; ++i)
+ {
+ int pType = srcAlg->paramType(paramNames[i]);
+
+ switch(pType)
+ {
+ case cv::Param::INT:
+ case cv::Param::UNSIGNED_INT:
+ case cv::Param::UINT64:
+ case cv::Param::SHORT:
+ case cv::Param::UCHAR:
+ dstAlg->set(paramNames[i], srcAlg->getInt(paramNames[i]));
+ break;
+ case cv::Param::BOOLEAN:
+ dstAlg->set(paramNames[i], srcAlg->getBool(paramNames[i]));
+ break;
+ case cv::Param::REAL:
+ case cv::Param::FLOAT:
+ dstAlg->set(paramNames[i], srcAlg->getDouble(paramNames[i]));
+ break;
+ case cv::Param::STRING:
+ dstAlg->set(paramNames[i], srcAlg->getString(paramNames[i]));
+ break;
+ case cv::Param::MAT:
+ dstAlg->set(paramNames[i], srcAlg->getMat(paramNames[i]));
+ break;
+ case cv::Param::MAT_VECTOR:
+ {
+ //std::vector<cv::Mat> value = srcAlg->getMatVector(paramNames[i]);
+ //dstAlg->info()->addParam(*(dstAlg.obj), paramNames[i].c_str(), value);
+ dstAlg->set(paramNames[i], srcAlg->getMatVector(paramNames[i]));
+ break;
+ }
+ case cv::Param::ALGORITHM:
+ dstAlg->set(paramNames[i], srcAlg->getAlgorithm(paramNames[i]));
+ break;
+ default:
+ LOGE("While copying algorothm parameters unsupported parameter "
+ "%s was found.", paramNames[i].c_str());
+
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ break;
+ }
+ }*/
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+void ParseOpenCVLabels(
+ const cv::Ptr<cv::FaceRecognizer>& recognizer,
+ std::set<int>& outLabels)
+{
+ if (!recognizer.empty())
+ {
+ cv::Mat labels = recognizer->getMat("labels");
+ for(int i = 0; i < labels.rows; ++i)
+ {
+ outLabels.insert(labels.at<int>(i, 0));
+ }
+ }
+}
+
+} /* anonymous namespace */
+
+FaceRecognitionModelConfig::FaceRecognitionModelConfig() :
+ mModelType(MEDIA_VISION_FACE_MODEL_TYPE_UNKNOWN),
+ mNumComponents(0),
+ mThreshold(DBL_MAX),
+ mRadius(1),
+ mNeighbors(8),
+ mGridX(8),
+ mGridY(8),
+ mImgWidth(150),
+ mImgHeight(150)
+{
+ ; /* NULL */
+}
+
+FaceRecognitionResults::FaceRecognitionResults() :
+ mIsRecognized(false),
+ mFaceLabel(-1),
+ mConfidence(0.0)
+{
+ ; /* NULL */
+}
+
+bool FaceRecognitionModelConfig::operator!=(
+ const FaceRecognitionModelConfig& other) const
+{
+ return mModelType != other.mModelType ||
+ mNumComponents != other.mNumComponents ||
+ mThreshold != other.mThreshold ||
+ mRadius != other.mRadius ||
+ mNeighbors != other.mNeighbors ||
+ mGridX != other.mGridX ||
+ mGridY != other.mGridY ||
+ mImgWidth != other.mImgWidth ||
+ mImgHeight != other.mImgHeight;
+}
+
+FaceRecognitionModel::FaceRecognitionModel() :
+ m_canRecognize(false),
+ m_recognizer(NULL)
+{
+ ; /* NULL */
+}
+
+FaceRecognitionModel::FaceRecognitionModel(const FaceRecognitionModel& origin) :
+ m_canRecognize(origin.m_canRecognize),
+ m_faceSamples(origin.m_faceSamples),
+ m_learnAlgorithmConfig(origin.m_learnAlgorithmConfig),
+ m_recognizer(CreateRecognitionAlgorithm(origin.m_learnAlgorithmConfig)),
+ m_learnedLabels(origin.m_learnedLabels)
+{
+ if (!m_recognizer.empty())
+ {
+ CopyOpenCVAlgorithmParameters(origin.m_recognizer, m_recognizer);
+ }
+}
+
+FaceRecognitionModel& FaceRecognitionModel::operator=(
+ const FaceRecognitionModel& copy)
+{
+ if (this != &copy)
+ {
+ m_canRecognize = copy.m_canRecognize;
+ m_faceSamples = copy.m_faceSamples;
+ m_learnAlgorithmConfig = copy.m_learnAlgorithmConfig;
+ m_recognizer = CreateRecognitionAlgorithm(m_learnAlgorithmConfig);
+ m_learnedLabels = copy.m_learnedLabels;
+
+ if (!m_recognizer.empty())
+ {
+ CopyOpenCVAlgorithmParameters(copy.m_recognizer, m_recognizer);
+ }
+ }
+
+ return *this;
+}
+
+FaceRecognitionModel::~FaceRecognitionModel()
+{
+ ; /* NULL */
+}
+
+int FaceRecognitionModel::save(const std::string& fileName)
+{
+ if (!m_recognizer.empty())
+ {
+ /* find directory */
+ std::string prefix_path = std::string(app_get_data_path());
+ LOGD("prefix_path: %s", prefix_path.c_str());
+
+ std::string filePath;
+ filePath += prefix_path;
+ filePath += fileName;
+
+ /* check the directory is available */
+ std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/'));
+ if (access(prefix_path_check.c_str(),F_OK))
+ {
+ LOGE("Can't save recognition model. Path[%s] doesn't existed.", prefix_path_check.c_str());
+
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
+
+ cv::FileStorage storage(filePath, cv::FileStorage::WRITE);
+ if (!storage.isOpened())
+ {
+ LOGE("Can't save recognition model. Write to file permission denied.");
+ return MEDIA_VISION_ERROR_PERMISSION_DENIED;
+ }
+
+ switch (m_learnAlgorithmConfig.mModelType)
+ {
+ case MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES:
+ storage << "algorithm" << "Eigenfaces";
+ break;
+ case MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES:
+ storage << "algorithm" << "Fisherfaces";
+ break;
+ case MEDIA_VISION_FACE_MODEL_TYPE_LBPH:
+ storage << "algorithm" << "LBPH";
+ break;
+ default:
+ storage.release();
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
+ }
+
+ storage << "can_recognize" << m_canRecognize;
+ m_recognizer->save(storage);
+
+ storage.release();
+ }
+ else
+ {
+ LOGE("Attempt to save recognition model before learn");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int FaceRecognitionModel::load(const std::string& fileName)
+{
+ /* find directory */
+ std::string prefix_path = std::string(app_get_data_path());
+ LOGD("prefix_path: %s", prefix_path.c_str());
+
+ std::string filePath;
+ filePath += prefix_path;
+ filePath += fileName;
+
+ if (access(filePath.c_str(),F_OK))
+ {
+ LOGE("Can't load face recognition model. File[%s] doesn't exist.", filePath.c_str());
+
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
+
+ cv::FileStorage storage(filePath, cv::FileStorage::READ);
+ if (!storage.isOpened())
+ {
+ LOGE("Can't load recognition model. Read from file permission denied.");
+
+ return MEDIA_VISION_ERROR_PERMISSION_DENIED;
+ }
+
+ LOGD("Loading recognition model from file.");
+
+ std::string algName;
+ int canRecognize = 0;
+ storage["algorithm"] >> algName;
+ storage["can_recognize"] >> canRecognize;
+
+ cv::Ptr<cv::FaceRecognizer> tempRecognizer;
+ FaceRecognitionModelConfig tempConfig;
+ std::set<int> tempLearnedLabels;
+
+ if (algName == "Eigenfaces")
+ {
+ tempRecognizer = cv::createEigenFaceRecognizer();
+ tempRecognizer->load(storage);
+ tempConfig.mModelType =
+ MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES;
+ tempConfig.mNumComponents =
+ tempRecognizer->getInt("ncomponents");
+ ParseOpenCVLabels(tempRecognizer, tempLearnedLabels);
+ }
+ else if (algName == "Fisherfaces")
+ {
+ tempRecognizer = cv::createFisherFaceRecognizer();
+ tempRecognizer->load(storage);
+ tempConfig.mModelType =
+ MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES;
+ tempConfig.mNumComponents =
+ tempRecognizer->getInt("ncomponents");
+ ParseOpenCVLabels(tempRecognizer, tempLearnedLabels);
+ }
+ else if (algName == "LBPH")
+ {
+ tempRecognizer = cv::createLBPHFaceRecognizer();
+ tempRecognizer->load(storage);
+ tempConfig.mModelType =
+ MEDIA_VISION_FACE_MODEL_TYPE_LBPH;
+ tempConfig.mGridX = tempRecognizer->getInt("grid_x");
+ tempConfig.mGridY = tempRecognizer->getInt("grid_y");
+ tempConfig.mNeighbors = tempRecognizer->getInt("neighbors");
+ tempConfig.mRadius = tempRecognizer->getInt("radius");
+ ParseOpenCVLabels(tempRecognizer, tempLearnedLabels);
+ }
+ else
+ {
+ tempConfig = FaceRecognitionModelConfig();
+ LOGE("Failed to load face recognition model from file. File is in "
+ "unsupported format");
+
+ storage.release();
+
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
+ }
+
+ tempConfig.mThreshold = tempRecognizer->getDouble("threshold");
+
+ LOGD("Recognition model of [%s] type has been loaded from file",
+ algName.c_str());
+
+ storage.release();
+
+ m_recognizer = tempRecognizer;
+ m_learnAlgorithmConfig = tempConfig;
+ m_canRecognize = (canRecognize == 1);
+ m_learnedLabels.clear();
+ m_learnedLabels = tempLearnedLabels;
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int FaceRecognitionModel::addFaceExample(
+ const cv::Mat& faceImage,
+ int faceLabel)
+{
+ m_faceSamples[faceLabel].push_back(faceImage);
+
+ LOGD("Added face image example for label %i for recognition model",
+ faceLabel);
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int FaceRecognitionModel::resetFaceExamples(void)
+{
+ m_faceSamples.clear();
+
+ LOGD("All face image examples have been removed from recognition model");
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int FaceRecognitionModel::resetFaceExamples(int faceLabel)
+{
+ if (1 > m_faceSamples.erase(faceLabel))
+ {
+ LOGD("Failed to remove face image examples for label %i. "
+ "No such examples", faceLabel);
+
+ return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
+ }
+
+ LOGD("Face image examples for label %i have been removed from "
+ "recognition model", faceLabel);
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+const std::set<int>& FaceRecognitionModel::getFaceLabels(void) const
+{
+ return m_learnedLabels;
+}
+
+int FaceRecognitionModel::learn(const FaceRecognitionModelConfig& config)
+{
+ bool isIncremental = false;
+ bool isUnisize = false;
+
+ if (MEDIA_VISION_FACE_MODEL_TYPE_LBPH == config.mModelType)
+ {
+ isIncremental = true;
+ }
+
+ if (MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES == config.mModelType ||
+ MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES == config.mModelType)
+ {
+ isUnisize = true;
+ }
+
+ std::vector<cv::Mat> samples;
+ std::vector<int> labels;
+ std::set<int> learnedLabels;
+
+ if (isIncremental)
+ {
+ learnedLabels.insert(m_learnedLabels.begin(), m_learnedLabels.end());
+ }
+
+ std::map<int, std::vector<cv::Mat> >::const_iterator it =
+ m_faceSamples.begin();
+ for (; it != m_faceSamples.end(); ++it)
+ {
+ const size_t faceClassSamplesSize = it->second.size();
+ labels.insert(labels.end(), faceClassSamplesSize, it->first);
+ learnedLabels.insert(it->first);
+
+ if (!isUnisize)
+ {
+ LOGD("%u examples has been added with label %i",
+ it->second.size(), it->first);
+ samples.insert(samples.end(), it->second.begin(), it->second.end());
+ }
+ else
+ {
+ for (size_t sampleInd = 0; sampleInd < faceClassSamplesSize; ++sampleInd)
+ {
+ cv::Mat resizedSample;
+ cv::resize(it->second[sampleInd],
+ resizedSample,
+ cv::Size(config.mImgWidth, config.mImgHeight),
+ 1.0, 1.0, cv::INTER_CUBIC);
+ samples.push_back(resizedSample);
+ }
+ }
+ }
+
+ const size_t samplesSize = samples.size();
+ const size_t labelsSize = labels.size();
+
+ if (0 != samplesSize && samplesSize == labelsSize)
+ {
+ LOGD("Start to learn the model for %u samples and %u labels",
+ samplesSize, labelsSize);
+
+ if (m_learnAlgorithmConfig != config || m_recognizer.empty())
+ {
+ m_recognizer = CreateRecognitionAlgorithm(config);
+ }
+
+ if (m_recognizer.empty())
+ {
+ LOGE("Can't create recognition algorithm for recognition model. "
+ "Configuration is not supported by any of known algorithms.");
+
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ }
+
+ isIncremental ? m_recognizer->update(samples, labels) :
+ m_recognizer->train(samples, labels);
+ m_canRecognize = true;
+ m_learnedLabels.clear();
+ m_learnedLabels = learnedLabels;
+ }
+ else
+ {
+ LOGE("Can't create recognition algorithm for no examples. Try to add "
+ "some face examples before learning");
+
+ return MEDIA_VISION_ERROR_NO_DATA;
+ }
+
+ m_learnAlgorithmConfig = config;
+
+ LOGD("Recognition model has been learned");
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int FaceRecognitionModel::recognize(const cv::Mat& image, FaceRecognitionResults& results)
+{
+ if (!m_recognizer.empty() && m_canRecognize)
+ {
+ double absConf = 0.0;
+ m_recognizer->predict(image, results.mFaceLabel, absConf);
+ // Normalize the absolute value of the confidence
+ absConf = exp(7.5 - (0.05 * absConf));
+ results.mConfidence = absConf / (1 + absConf);
+ results.mIsRecognized = true;
+ results.mFaceLocation = cv::Rect(0, 0, image.cols, image.rows);
+ }
+ else
+ {
+ LOGE("Attempt to recognize faces with untrained model");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+cv::Ptr<cv::FaceRecognizer> FaceRecognitionModel::CreateRecognitionAlgorithm(
+ const FaceRecognitionModelConfig& config)
+{
+ cv::Ptr<cv::FaceRecognizer> tempRecognizer;
+ switch (config.mModelType)
+ {
+ case MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES:
+ tempRecognizer = cv::createEigenFaceRecognizer(
+ config.mNumComponents,
+ config.mThreshold);
+ break;
+ case MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES:
+ tempRecognizer = cv::createFisherFaceRecognizer(
+ config.mNumComponents,
+ config.mThreshold);
+ break;
+ case MEDIA_VISION_FACE_MODEL_TYPE_LBPH:
+ tempRecognizer = cv::createLBPHFaceRecognizer(
+ config.mRadius,
+ config.mNeighbors,
+ config.mGridX,
+ config.mGridY,
+ config.mThreshold);
+ break;
+ default:
+ return NULL;
+ }
+
+ return tempRecognizer;
+}
+
+} /* Face */
+} /* MediaVision */
diff --git a/mv_face/face/src/FaceTrackingModel.cpp b/mv_face/face/src/FaceTrackingModel.cpp
new file mode 100644
index 00000000..2c4fdd6b
--- /dev/null
+++ b/mv_face/face/src/FaceTrackingModel.cpp
@@ -0,0 +1,217 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FaceTrackingModel.h"
+
+#include <app_common.h>
+
+#include "mv_private.h"
+#include "mv_common.h"
+
+#include <unistd.h>
+
+namespace MediaVision
+{
+namespace Face
+{
+
+FaceTrackingResults::FaceTrackingResults() :
+ mIsTracked(false),
+ mConfidence(0.f)
+{
+ ; /* NULL */
+}
+
+FaceTrackingModel::FaceTrackingModel() :
+ m_canTrack(false),
+ m_tracker(new cv::TrackerMedianFlow())
+{
+ ; /* NULL */
+}
+
+FaceTrackingModel::FaceTrackingModel(const FaceTrackingModel& origin) :
+ m_canTrack(origin.m_canTrack),
+ m_tracker(new cv::TrackerMedianFlow())
+{
+ if (!origin.m_tracker.empty())
+ {
+ origin.m_tracker->copyTo(*(m_tracker.obj));
+ }
+}
+
+FaceTrackingModel& FaceTrackingModel::operator=(const FaceTrackingModel& copy)
+{
+ if (this != &copy)
+ {
+ m_canTrack = copy.m_canTrack;
+ m_tracker = cv::Ptr<cv::TrackerMedianFlow>(new cv::TrackerMedianFlow());
+ if (!copy.m_tracker.empty())
+ {
+ copy.m_tracker->copyTo(*(m_tracker.obj));
+ }
+ }
+
+ return *this;
+}
+
+FaceTrackingModel::~FaceTrackingModel()
+{
+ ; /* NULL */
+}
+
+int FaceTrackingModel::save(const std::string& fileName)
+{
+ if (m_tracker.empty())
+ {
+ LOGE("Can't save tracking model. No tracking algorithm is used");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ std::string prefix_path = std::string(app_get_data_path());
+ LOGD("prefix_path: %s", prefix_path.c_str());
+
+ std::string filePath;
+ filePath += prefix_path;
+ filePath += fileName;
+
+ /* check the directory is available */
+ std::string prefix_path_check = filePath.substr(0, filePath.find_last_of('/'));
+ if (access(prefix_path_check.c_str(),F_OK))
+ {
+ LOGE("Can't save tracking model. Path[%s] doesn't existed.", prefix_path_check.c_str());
+
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
+
+ cv::FileStorage storage(filePath, cv::FileStorage::WRITE);
+ if (!storage.isOpened())
+ {
+ LOGE("Can't save tracking model. Write to file permission denied.");
+ return MEDIA_VISION_ERROR_PERMISSION_DENIED;
+ }
+
+ LOGD("Storing tracking model to the file started.");
+
+ storage << "canTrack" << (m_canTrack ? 1 : 0);
+ m_tracker->write(storage);
+
+ LOGD("Storing tracking model to the file finished.");
+
+ storage.release();
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int FaceTrackingModel::load(const std::string& fileName)
+{
+ /* find directory */
+ std::string prefix_path = std::string(app_get_data_path());
+ LOGD("prefix_path: %s", prefix_path.c_str());
+
+ std::string filePath;
+ filePath += prefix_path;
+ filePath += fileName;
+
+ if (access(filePath.c_str(), F_OK))
+ {
+ LOGE("Can't load face tracking model. File[%s] doesn't exist.", filePath.c_str());
+
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
+
+ cv::FileStorage storage(filePath, cv::FileStorage::READ);
+ if (!storage.isOpened())
+ {
+ LOGE("Can't load tracking model. Read from file permission denied.");
+ return MEDIA_VISION_ERROR_PERMISSION_DENIED;
+ }
+
+ int canTrack = 0;
+ storage["canTrack"] >> canTrack;
+ m_canTrack = (0 != canTrack);
+ m_tracker->read(storage);
+
+ LOGD("Loading tracking model from file.");
+
+ storage.release();
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int FaceTrackingModel::prepare(const cv::Mat& image)
+{
+ if (m_tracker.empty())
+ {
+ LOGE("Failed to prepare tracking model. No tracking algorithm "
+ "is available.");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ cv::Rect_<float> lastBoundingBox;
+ if (!m_tracker->isInited())
+ {
+ lastBoundingBox.x = 0;
+ lastBoundingBox.y = 0;
+ lastBoundingBox.width = image.cols;
+ lastBoundingBox.height = image.rows;
+ }
+ else
+ {
+ lastBoundingBox = m_tracker->getLastBoundingBox();
+ }
+
+ return prepare(image, lastBoundingBox);
+}
+
+int FaceTrackingModel::prepare(
+ const cv::Mat& image,
+ const cv::Rect_<float>& boundingBox)
+{
+ if (m_tracker.empty())
+ {
+ LOGE("Failed to prepare tracking model. No tracking algorithm "
+ "is available.");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ if (!m_tracker->init(image, boundingBox))
+ {
+ LOGE("Failed to prepare tracking model.");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ m_canTrack = true;
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int FaceTrackingModel::track(const cv::Mat& image, FaceTrackingResults& results)
+{
+ if (!m_tracker.empty() && m_canTrack)
+ {
+ results.mIsTracked = m_tracker->update(image, results.mFaceLocation);
+ results.mConfidence = m_tracker->getLastConfidence();
+ }
+ else
+ {
+ LOGE("Attempt to track face with not prepared model");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+} /* Face */
+} /* MediaVision */
diff --git a/mv_face/face/src/FaceUtil.cpp b/mv_face/face/src/FaceUtil.cpp
new file mode 100644
index 00000000..7d49dd3e
--- /dev/null
+++ b/mv_face/face/src/FaceUtil.cpp
@@ -0,0 +1,138 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FaceUtil.h"
+
+#include "mv_private.h"
+
+#include <opencv2/imgproc/types_c.h>
+#include <opencv2/highgui/highgui.hpp>
+
+namespace MediaVision
+{
+namespace Face
+{
+
+RecognitionParams::RecognitionParams(FaceRecognitionModelType algType) :
+ mRecognitionAlgType(algType)
+{
+ ; /* NULL */
+}
+
+RecognitionParams::RecognitionParams() :
+ mRecognitionAlgType(MEDIA_VISION_FACE_MODEL_TYPE_LBPH)
+{
+ ; /* NULL */
+}
+
+int convertSourceMV2GrayCV(mv_source_h mvSource, cv::Mat& cvSource)
+{
+ MEDIA_VISION_INSTANCE_CHECK(mvSource);
+
+ int depth = CV_8U; // Default depth. 1 byte for channel.
+ unsigned int channelsNumber = 0;
+ unsigned int width = 0, height = 0;
+ unsigned int bufferSize = 0;
+ unsigned char *buffer = NULL;
+
+ mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID;
+
+ MEDIA_VISION_ASSERT(mv_source_get_width(mvSource, &width),
+ "Failed to get the width.");
+ MEDIA_VISION_ASSERT(mv_source_get_height(mvSource, &height),
+ "Failed to get the height.");
+ MEDIA_VISION_ASSERT(mv_source_get_colorspace(mvSource, &colorspace),
+ "Failed to get the colorspace.");
+ MEDIA_VISION_ASSERT(mv_source_get_buffer(mvSource, &buffer, &bufferSize),
+ "Failed to get the buffer size.");
+
+ int conversionType = -1; // Type of conversion from given colorspace to gray
+ switch(colorspace)
+ {
+ case MEDIA_VISION_COLORSPACE_INVALID:
+ LOGE("Error: mv_source has invalid colorspace.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ case MEDIA_VISION_COLORSPACE_Y800:
+ channelsNumber = 1;
+ // Without convertion
+ break;
+ case MEDIA_VISION_COLORSPACE_I420:
+ channelsNumber = 1;
+ height *= 1.5;
+ conversionType = CV_YUV2GRAY_I420;
+ break;
+ case MEDIA_VISION_COLORSPACE_NV12:
+ channelsNumber = 1;
+ height *= 1.5;
+ conversionType = CV_YUV2GRAY_NV12;
+ break;
+ case MEDIA_VISION_COLORSPACE_YV12:
+ channelsNumber = 1;
+ height *= 1.5;
+ conversionType = CV_YUV2GRAY_YV12;
+ break;
+ case MEDIA_VISION_COLORSPACE_NV21:
+ channelsNumber = 1;
+ height *= 1.5;
+ conversionType = CV_YUV2GRAY_NV21;
+ break;
+ case MEDIA_VISION_COLORSPACE_YUYV:
+ channelsNumber = 2;
+ conversionType = CV_YUV2GRAY_YUYV;
+ break;
+ case MEDIA_VISION_COLORSPACE_UYVY:
+ channelsNumber = 2;
+ conversionType = CV_YUV2GRAY_UYVY;
+ break;
+ case MEDIA_VISION_COLORSPACE_422P:
+ channelsNumber = 2;
+ conversionType = CV_YUV2GRAY_Y422;
+ break;
+ case MEDIA_VISION_COLORSPACE_RGB565:
+ channelsNumber = 2;
+ conversionType = CV_BGR5652GRAY;
+ break;
+ case MEDIA_VISION_COLORSPACE_RGB888:
+ channelsNumber = 3;
+ conversionType = CV_RGB2GRAY;
+ break;
+ case MEDIA_VISION_COLORSPACE_RGBA:
+ channelsNumber = 4;
+ conversionType = CV_RGBA2GRAY;
+ break;
+ default:
+ LOGE("Error: mv_source has unsupported colorspace.");
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
+ }
+
+ if (conversionType == -1) // Without conversion
+ {
+ cvSource = cv::Mat(cv::Size(width, height),
+ CV_MAKETYPE(depth, channelsNumber), buffer).clone();
+ }
+ else // Conversion
+ {
+ // Class for representation the given image as cv::Mat before conversion
+ cv::Mat origin(cv::Size(width, height),
+ CV_MAKETYPE(depth, channelsNumber), buffer);
+ cv::cvtColor(origin, cvSource, conversionType);
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+} /* Face */
+} /* MediaVision */
diff --git a/mv_face/face/src/TrackerMedianFlow.cpp b/mv_face/face/src/TrackerMedianFlow.cpp
new file mode 100644
index 00000000..a7a3b4f0
--- /dev/null
+++ b/mv_face/face/src/TrackerMedianFlow.cpp
@@ -0,0 +1,460 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+ //
+ // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+ //
+ // By downloading, copying, installing or using the software you agree to this license.
+ // If you do not agree to this license, do not download, install,
+ // copy or use the software.
+ //
+ //
+ // License Agreement
+ // For Open Source Computer Vision Library
+ //
+ // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+ // Third party copyrights are property of their respective owners.
+ //
+ // Redistribution and use in source and binary forms, with or without modification,
+ // are permitted provided that the following conditions are met:
+ //
+ // * Redistribution's of source code must retain the above copyright notice,
+ // this list of conditions and the following disclaimer.
+ //
+ // * Redistribution's in binary form must reproduce the above copyright notice,
+ // this list of conditions and the following disclaimer in the documentation
+ // and/or other materials provided with the distribution.
+ //
+ // * The name of the copyright holders may not be used to endorse or promote products
+ // derived from this software without specific prior written permission.
+ //
+ // This software is provided by the copyright holders and contributors "as is" and
+ // any express or implied warranties, including, but not limited to, the implied
+ // warranties of merchantability and fitness for a particular purpose are disclaimed.
+ // In no event shall the Intel Corporation or contributors be liable for any direct,
+ // indirect, incidental, special, exemplary, or consequential damages
+ // (including, but not limited to, procurement of substitute goods or services;
+ // loss of use, data, or profits; or business interruption) however caused
+ // and on any theory of liability, whether in contract, strict liability,
+ // or tort (including negligence or otherwise) arising in any way out of
+ // the use of this software, even if advised of the possibility of such damage.
+ //
+ //M*/
+
+#include "TrackerMedianFlow.h"
+
+#include "opencv2/video/tracking.hpp"
+#include "opencv2/imgproc/imgproc.hpp"
+
+#include <algorithm>
+#include <cmath>
+
+namespace
+{
+ float FloatEps = 10e-6f;
+} /* anonymous namespace */
+
+namespace cv
+{
+
+TrackerMedianFlow::Params::Params()
+{
+ mPointsInGrid = 10;
+ mWindowSize = Size(3, 3);
+ mPyrMaxLevel = 5;
+}
+
+void TrackerMedianFlow::Params::read( const cv::FileNode& fn )
+{
+ mPointsInGrid = fn["pointsInGrid"];
+ int winSizeHeight = fn["windowSizeHeight"];
+ int winSizeWidth = fn["windowSizeWidth"];
+ mWindowSize = Size(winSizeHeight, winSizeWidth);
+ mPyrMaxLevel = fn["pyrMaxLevel"];
+}
+
+void TrackerMedianFlow::Params::write( cv::FileStorage& fs ) const
+{
+ fs << "pointsInGrid" << mPointsInGrid;
+ fs << "windowSizeHeight" << mWindowSize.height;
+ fs << "windowSizeWidth" << mWindowSize.width;
+ fs << "pyrMaxLevel" << mPyrMaxLevel;
+}
+
+TrackerMedianFlow::TrackerMedianFlow(Params paramsIn) :
+ termcrit(TermCriteria::COUNT | TermCriteria::EPS,20,0.3),
+ m_confidence(0.0)
+{
+ params = paramsIn;
+ isInit = false;
+}
+
+bool TrackerMedianFlow::copyTo(TrackerMedianFlow& copy) const
+{
+ copy.isInit = isInit;
+ copy.params = params;
+ copy.termcrit = termcrit;
+ copy.m_boundingBox = m_boundingBox;
+ copy.m_confidence = m_confidence;
+ m_image.copyTo(copy.m_image);
+ return true;
+}
+
+bool TrackerMedianFlow::init(const Mat& image, const Rect_<float>& boundingBox)
+{
+ if (image.empty())
+ {
+ return false;
+ }
+
+ image.copyTo(m_image);
+ buildOpticalFlowPyramid(
+ m_image, m_pyramid, params.mWindowSize, params.mPyrMaxLevel);
+ m_boundingBox = boundingBox;
+
+ isInit = true;
+ return isInit;
+}
+
+bool TrackerMedianFlow::update(const Mat& image, Rect_<float>& boundingBox)
+{
+ if (!isInit || image.empty()) return false;
+
+ // Handles such behaviour when preparation frame has the size
+ // different to the tracking frame size. In such case, we resize preparation
+ // frame and bounding box. Then, track as usually:
+ if (m_image.rows != image.rows || m_image.cols != image.cols)
+ {
+ const float xFactor = (float) image.cols / m_image.cols;
+ const float yFactor = (float) image.rows / m_image.rows;
+
+ resize(m_image, m_image, Size(), xFactor, yFactor);
+
+ m_boundingBox.x *= xFactor;
+ m_boundingBox.y *= yFactor;
+ m_boundingBox.width *= xFactor;
+ m_boundingBox.height *= yFactor;
+ }
+
+ Mat oldImage = m_image;
+
+ Rect_<float> oldBox = m_boundingBox;
+ if(!medianFlowImpl(oldImage, image, oldBox))
+ {
+ return false;
+ }
+
+ boundingBox = oldBox;
+ image.copyTo(m_image);
+ m_boundingBox = boundingBox;
+ return true;
+}
+
+bool TrackerMedianFlow::isInited() const
+{
+ return isInit;
+}
+
+float TrackerMedianFlow::getLastConfidence() const
+{
+ return m_confidence;
+}
+
+Rect_<float> TrackerMedianFlow::getLastBoundingBox() const
+{
+ return m_boundingBox;
+}
+
+bool TrackerMedianFlow::medianFlowImpl(
+ Mat oldImage_gray, Mat newImage_gray, Rect_<float>& oldBox)
+{
+ std::vector<Point2f> pointsToTrackOld, pointsToTrackNew;
+
+ const float gridXStep = oldBox.width / params.mPointsInGrid;
+ const float gridYStep = oldBox.height / params.mPointsInGrid;
+ for (int i = 0; i < params.mPointsInGrid; i++)
+ {
+ for (int j = 0; j < params.mPointsInGrid; j++)
+ {
+ pointsToTrackOld.push_back(
+ Point2f(oldBox.x + .5f*gridXStep + 1.f*gridXStep*j,
+ oldBox.y + .5f*gridYStep + 1.f*gridYStep*i));
+ }
+ }
+
+ std::vector<uchar> status(pointsToTrackOld.size());
+ std::vector<float> errors(pointsToTrackOld.size());
+
+ std::vector<Mat> tempPyramid;
+ buildOpticalFlowPyramid(
+ newImage_gray,
+ tempPyramid,
+ params.mWindowSize,
+ params.mPyrMaxLevel);
+
+ calcOpticalFlowPyrLK(m_pyramid,
+ tempPyramid,
+ pointsToTrackOld,
+ pointsToTrackNew,
+ status,
+ errors,
+ params.mWindowSize,
+ params.mPyrMaxLevel,
+ termcrit);
+
+ std::vector<Point2f> di;
+ for (size_t idx = 0u; idx < pointsToTrackOld.size(); idx++)
+ {
+ if (status[idx] == 1)
+ {
+ di.push_back(pointsToTrackNew[idx] - pointsToTrackOld[idx]);
+ }
+ }
+
+ std::vector<bool> filter_status;
+ check_FB(tempPyramid,
+ pointsToTrackOld,
+ pointsToTrackNew,
+ filter_status);
+ check_NCC(oldImage_gray,
+ newImage_gray,
+ pointsToTrackOld,
+ pointsToTrackNew,
+ filter_status);
+
+ for (size_t idx = 0u; idx < pointsToTrackOld.size(); idx++)
+ {
+ if (!filter_status[idx])
+ {
+ pointsToTrackOld.erase(pointsToTrackOld.begin() + idx);
+ pointsToTrackNew.erase(pointsToTrackNew.begin() + idx);
+ filter_status.erase(filter_status.begin() + idx);
+ idx--;
+ }
+ }
+
+ if (pointsToTrackOld.size() == 0 || di.size() == 0)
+ {
+ return false;
+ }
+
+ Point2f mDisplacement;
+ Rect_<float> boxCandidate =
+ vote(pointsToTrackOld, pointsToTrackNew, oldBox, mDisplacement);
+
+ std::vector<float> displacements;
+ for (size_t idx = 0u; idx < di.size(); idx++)
+ {
+ di[idx] -= mDisplacement;
+ displacements.push_back(sqrt(di[idx].ddot(di[idx])));
+ }
+
+ m_confidence =
+ (10.f - getMedian(displacements,(int)displacements.size())) / 10.f;
+ if (m_confidence <= 0.f)
+ {
+ m_confidence = 0.f;
+ return false;
+ }
+
+ m_pyramid.swap(tempPyramid);
+ oldBox = boxCandidate;
+ return true;
+}
+
+Rect_<float> TrackerMedianFlow::vote(
+ const std::vector<Point2f>& oldPoints,
+ const std::vector<Point2f>& newPoints,
+ const Rect_<float>& oldRect,
+ Point2f& mD)
+{
+ Rect_<float> newRect;
+ Point2d newCenter(oldRect.x + oldRect.width/2.0,
+ oldRect.y + oldRect.height/2.0);
+
+ int n = (int)oldPoints.size();
+ std::vector<float> buf(std::max( n*(n-1) / 2, 3), 0.f);
+
+ if(oldPoints.size() == 1)
+ {
+ newRect.x = oldRect.x+newPoints[0].x-oldPoints[0].x;
+ newRect.y = oldRect.y+newPoints[0].y-oldPoints[0].y;
+ newRect.width=oldRect.width;
+ newRect.height=oldRect.height;
+ return newRect;
+ }
+
+ float xshift = 0.f;
+ float yshift = 0.f;
+ for(int i = 0; i < n; i++)
+ {
+ buf[i] = newPoints[i].x - oldPoints[i].x;
+ }
+
+ xshift = getMedian(buf, n);
+ newCenter.x += xshift;
+ for(int idx = 0; idx < n; idx++)
+ {
+ buf[idx] = newPoints[idx].y - oldPoints[idx].y;
+ }
+
+ yshift = getMedian(buf, n);
+ newCenter.y += yshift;
+ mD = Point2f(xshift, yshift);
+
+ if(oldPoints.size() == 1)
+ {
+ newRect.x = newCenter.x - oldRect.width / 2.0;
+ newRect.y = newCenter.y - oldRect.height / 2.0;
+ newRect.width = oldRect.width;
+ newRect.height = oldRect.height;
+ return newRect;
+ }
+
+ float nd = 0.f;
+ float od = 0.f;
+ for (int i = 0, ctr = 0; i < n; i++)
+ {
+ for(int j = 0; j < i; j++)
+ {
+ nd = l2distance(newPoints[i], newPoints[j]);
+ od = l2distance(oldPoints[i], oldPoints[j]);
+ buf[ctr] = (od == 0.f ? 0.f : nd / od);
+ ctr++;
+ }
+ }
+
+ float scale = getMedian(buf, n*(n-1) / 2);
+ newRect.x = newCenter.x - scale * oldRect.width / 2.f;
+ newRect.y = newCenter.y-scale * oldRect.height / 2.f;
+ newRect.width = scale * oldRect.width;
+ newRect.height = scale * oldRect.height;
+
+ return newRect;
+}
+
+template<typename T>
+T TrackerMedianFlow::getMedian(std::vector<T>& values, int size)
+{
+ if (size == -1)
+ {
+ size = (int)values.size();
+ }
+
+ std::vector<T> copy(values.begin(), values.begin() + size);
+ std::sort(copy.begin(),copy.end());
+ if(size%2==0)
+ {
+ return (copy[size/2-1]+copy[size/2])/((T)2.0);
+ }
+ else
+ {
+ return copy[(size - 1) / 2];
+ }
+}
+
+float TrackerMedianFlow::l2distance(Point2f p1, Point2f p2)
+{
+ float dx = p1.x - p2.x;
+ float dy = p1.y - p2.y;
+ return sqrt(dx * dx + dy * dy);
+}
+
+void TrackerMedianFlow::check_FB(
+ std::vector<Mat> newPyramid,
+ const std::vector<Point2f>& oldPoints,
+ const std::vector<Point2f>& newPoints,
+ std::vector<bool>& status)
+{
+ if(status.size() == 0)
+ {
+ status = std::vector<bool>(oldPoints.size(), true);
+ }
+
+ std::vector<uchar> LKstatus(oldPoints.size());
+ std::vector<float> errors(oldPoints.size());
+ std::vector<float> FBerror(oldPoints.size());
+ std::vector<Point2f> pointsToTrackReprojection;
+
+ calcOpticalFlowPyrLK(newPyramid,
+ m_pyramid,
+ newPoints,
+ pointsToTrackReprojection,
+ LKstatus,
+ errors,
+ params.mWindowSize,
+ params.mPyrMaxLevel,
+ termcrit);
+
+ for (size_t idx = 0u; idx < oldPoints.size(); idx++)
+ {
+ FBerror[idx] = l2distance(oldPoints[idx], pointsToTrackReprojection[idx]);
+ }
+
+ float FBerrorMedian = getMedian(FBerror) + FloatEps;
+ for (size_t idx = 0u; idx < oldPoints.size(); idx++)
+ {
+ status[idx] = (FBerror[idx] < FBerrorMedian);
+ }
+}
+
+void TrackerMedianFlow::check_NCC(
+ const Mat& oldImage,
+ const Mat& newImage,
+ const std::vector<Point2f>& oldPoints,
+ const std::vector<Point2f>& newPoints,
+ std::vector<bool>& status)
+{
+ std::vector<float> NCC(oldPoints.size(), 0.f);
+ Size patch(30, 30);
+ Mat p1;
+ Mat p2;
+
+ for (size_t idx = 0u; idx < oldPoints.size(); idx++)
+ {
+ getRectSubPix(oldImage, patch, oldPoints[idx], p1);
+ getRectSubPix(newImage, patch, newPoints[idx], p2);
+
+ const int N = 900;
+ const float s1 = sum(p1)(0);
+ const float s2 = sum(p2)(0);
+ const float n1 = norm(p1);
+ const float n2 = norm(p2);
+ const float prod = p1.dot(p2);
+ const float sq1 = sqrt(n1 * n1 - s1 * s1 / N);
+ const float sq2 = sqrt(n2 * n2 - s2 * s2 / N);
+ NCC[idx] = (sq2==0 ? sq1 / std::abs(sq1)
+ : (prod - s1 * s2 / N) / sq1 / sq2);
+ }
+
+ float median = getMedian(NCC) - FloatEps;
+ for(size_t idx = 0u; idx < oldPoints.size(); idx++)
+ {
+ status[idx] = status[idx] && (NCC[idx] > median);
+ }
+}
+
+void TrackerMedianFlow::read( cv::FileStorage& fs )
+{
+ params.read(fs.root());
+ float bbX = 0.f;
+ float bbY = 0.f;
+ float bbW = 0.f;
+ float bbH = 0.f;
+ fs["lastLocationX"] >> bbX;
+ fs["lastLocationY"] >> bbY;
+ fs["lastLocationW"] >> bbW;
+ fs["lastLocationH"] >> bbH;
+ m_boundingBox = Rect_<float>(bbX, bbY, bbW, bbH);
+ fs["lastImage"] >> m_image;
+}
+
+void TrackerMedianFlow::write( cv::FileStorage& fs ) const
+{
+ params.write(fs);
+ fs << "lastLocationX" << m_boundingBox.x;
+ fs << "lastLocationY" << m_boundingBox.y;
+ fs << "lastLocationW" << m_boundingBox.width;
+ fs << "lastLocationH" << m_boundingBox.height;
+ fs << "lastImage" << m_image;
+}
+
+} /* namespace cv */
diff --git a/mv_face/face/src/mv_face_open.cpp b/mv_face/face/src/mv_face_open.cpp
new file mode 100644
index 00000000..41f2398e
--- /dev/null
+++ b/mv_face/face/src/mv_face_open.cpp
@@ -0,0 +1,1048 @@
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mv_face_open.h"
+
+#include "FaceDetector.h"
+#include "FaceUtil.h"
+#include "FaceRecognitionModel.h"
+#include "FaceTrackingModel.h"
+#include "FaceEyeCondition.h"
+#include "FaceExpressionRecognizer.h"
+
+#include "mv_private.h"
+
+#include <vector>
+#include <set>
+#include <cstring>
+
+using namespace ::MediaVision::Face;
+
+static const RecognitionParams defaultRecognitionParams = RecognitionParams();
+
+static void extractRecognitionParams(
+ mv_engine_config_h engine_cfg,
+ RecognitionParams& recognitionParams)
+{
+ mv_engine_config_h working_cfg = NULL;
+
+ if (NULL == engine_cfg)
+ {
+ mv_create_engine_config(&working_cfg);
+ }
+ else
+ {
+ working_cfg = engine_cfg;
+ }
+
+ int algType = 0;
+ mv_engine_config_get_int_attribute_c(
+ working_cfg,
+ "MV_FACE_RECOGNITION_MODEL_TYPE",
+ &algType);
+
+ if (0 < algType && 4 > algType)
+ {
+ recognitionParams.mRecognitionAlgType =
+ (FaceRecognitionModelType)algType;
+ }
+ else
+ {
+ recognitionParams.mRecognitionAlgType =
+ defaultRecognitionParams.mRecognitionAlgType;
+ }
+
+ if (NULL == engine_cfg)
+ {
+ mv_destroy_engine_config(working_cfg);
+ }
+}
+
+inline void convertRectCV2MV(const cv::Rect& src, mv_rectangle_s& dst)
+{
+ dst.point.x = src.x;
+ dst.point.y = src.y;
+ dst.width = src.width;
+ dst.height = src.height;
+}
+
+int mv_face_detect_open(
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_face_detected_cb detected_cb,
+ void *user_data)
+{
+ cv::Mat image;
+
+ int error = convertSourceMV2GrayCV(source, image);
+ if (error != MEDIA_VISION_ERROR_NONE)
+ {
+ LOGE("Convertion mv_source_h to gray failed");
+ return error;
+ }
+
+ char *haarcascadeFilepath;
+ error = mv_engine_config_get_string_attribute_c(
+ engine_cfg,
+ "MV_FACE_DETECTION_MODEL_FILE_PATH",
+ &haarcascadeFilepath);
+
+ //default path
+ std::string haarcascadeFilePathStr =
+ "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml";
+
+ if (error == MEDIA_VISION_ERROR_NONE)
+ {
+ LOGI("Haarcascade file was set as default");
+ haarcascadeFilePathStr = std::string(haarcascadeFilepath);
+
+ delete[] haarcascadeFilepath;
+ }
+ else
+ {
+ LOGE("Error occurred during face detection haarcascade file receiving."
+ " (%i)", error);
+ }
+
+ static FaceDetector faceDetector;
+
+ if (!faceDetector.loadHaarcascade(haarcascadeFilePathStr))
+ {
+ LOGE("Loading Haarcascade failed");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ cv::Rect roi(-1, -1, -1, -1);
+ error = mv_engine_config_get_int_attribute_c(
+ engine_cfg,
+ MV_FACE_DETECTION_ROI_X,
+ &roi.x);
+ if (error != MEDIA_VISION_ERROR_NONE)
+ {
+ LOGE("Error occurred during face detection roi (x) receiving."
+ " (%i)", error);
+ }
+
+ error = mv_engine_config_get_int_attribute_c(
+ engine_cfg,
+ MV_FACE_DETECTION_ROI_Y,
+ &roi.y);
+ if (error != MEDIA_VISION_ERROR_NONE)
+ {
+ LOGE("Error occurred during face detection roi (y) receiving."
+ " (%i)", error);
+ }
+
+ error = mv_engine_config_get_int_attribute_c(
+ engine_cfg,
+ MV_FACE_DETECTION_ROI_WIDTH,
+ &roi.width);
+ if (error != MEDIA_VISION_ERROR_NONE)
+ {
+ LOGE("Error occurred during face detection roi (width) receiving."
+ " (%i)", error);
+ }
+
+ error = mv_engine_config_get_int_attribute_c(
+ engine_cfg,
+ MV_FACE_DETECTION_ROI_HEIGHT,
+ &roi.height);
+ if (error != MEDIA_VISION_ERROR_NONE)
+ {
+ LOGE("Error occurred during face detection roi (height) receiving."
+ " (%i)", error);
+ }
+
+ cv::Size minSize(-1, -1);
+ error = mv_engine_config_get_int_attribute_c(
+ engine_cfg,
+ MV_FACE_DETECTION_MIN_SIZE_WIDTH,
+ &minSize.width);
+ if (error != MEDIA_VISION_ERROR_NONE)
+ {
+ LOGE("Error occurred during face detection minimum width receiving."
+ " (%i)", error);
+ }
+
+ error = mv_engine_config_get_int_attribute_c(
+ engine_cfg,
+ MV_FACE_DETECTION_MIN_SIZE_HEIGHT,
+ &minSize.height);
+ if (error != MEDIA_VISION_ERROR_NONE)
+ {
+ LOGE("Error occurred during face detection minimum height receiving."
+ " (%i)", error);
+ }
+
+ std::vector<cv::Rect> faceLocations;
+ if (!faceDetector.detectFaces(image, roi, minSize, faceLocations))
+ {
+ LOGE("Face detection in OpenCV failed");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ static const int StartMaxResultsNumber = 50;
+ static std::vector<mv_rectangle_s> results(StartMaxResultsNumber);
+
+ const int numberOfResults = faceLocations.size();
+ if (numberOfResults > StartMaxResultsNumber)
+ {
+ results.resize(numberOfResults);
+ }
+
+ for(int rectNum = 0; rectNum < numberOfResults; ++rectNum)
+ {
+ convertRectCV2MV(faceLocations[rectNum], results[rectNum]);
+ }
+
+ LOGI("Call the detect callback for %i detected faces", numberOfResults);
+ detected_cb(source, engine_cfg, results.data(), numberOfResults, user_data);
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int mv_face_recognize_open(
+ mv_source_h source,
+ mv_face_recognition_model_h recognition_model,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s *face_location,
+ mv_face_recognized_cb recognized_cb,
+ void *user_data)
+{
+ if (!source)
+ {
+ LOGE("Can't recognize for the NULL Media Vision source handle");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+ if (!recognized_cb)
+ {
+ LOGE("Recognition failed. Can't output recognition results without "
+ "callback function");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+ if (!recognition_model)
+ {
+ LOGE("Can't recognize for the NULL Media Vision Face recognition model");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ FaceRecognitionModel *pRecModel = static_cast<FaceRecognitionModel*>(recognition_model);
+
+ if (!pRecModel)
+ {
+ LOGE("Face recognition failed. Incorrect Media Vision Face recognition model handle is used");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ cv::Mat grayImage;
+ int ret = convertSourceMV2GrayCV(source, grayImage);
+
+ if (MEDIA_VISION_ERROR_NONE != ret)
+ {
+ LOGE("Convertion mv_source_h to gray failed");
+ return ret;
+ }
+
+ cv::Mat image;
+ if (NULL == face_location)
+ {
+ image = grayImage;
+ }
+ else
+ {
+ cv::Rect_<int> roi;
+ roi.x = face_location->point.x;
+ roi.y = face_location->point.y;
+ roi.width = face_location->width;
+ roi.height = face_location->height;
+ image = grayImage(roi);
+ }
+
+ FaceRecognitionResults results;
+
+ LOGD("Face recognition is started");
+
+ ret = pRecModel->recognize(image, results);
+
+ if (MEDIA_VISION_ERROR_NONE != ret)
+ {
+ LOGE("Error occurred during the recognition. Failed");
+ return ret;
+ }
+
+ if (!results.mIsRecognized)
+ {
+ recognized_cb(
+ source,
+ recognition_model,
+ engine_cfg,
+ NULL,
+ NULL,
+ 0.0,
+ user_data);
+ }
+ else
+ {
+ mv_rectangle_s location;
+ location.point.x = results.mFaceLocation.x;
+ location.point.y = results.mFaceLocation.y;
+ location.width = results.mFaceLocation.width;
+ location.height = results.mFaceLocation.height;
+
+ if (face_location != NULL)
+ {
+ location.point.x += face_location->point.x;
+ location.point.y += face_location->point.y;
+ }
+
+ recognized_cb(
+ source,
+ recognition_model,
+ engine_cfg,
+ &location,
+ &(results.mFaceLabel),
+ results.mConfidence,
+ user_data);
+ }
+
+ LOGD("Face recognition is finished");
+
+ return ret;
+}
+
+int mv_face_track_open(
+ mv_source_h source,
+ mv_face_tracking_model_h tracking_model,
+ mv_engine_config_h engine_cfg,
+ mv_face_tracked_cb tracked_cb,
+ bool /*do_learn*/,
+ void *user_data)
+{
+ if (!source)
+ {
+ LOGE("Can't track for the NULL Media Vision source handle");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+ if (!tracked_cb)
+ {
+ LOGE("Tracking failed. Can't output tracking results without "
+ "callback function");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+ if (!tracking_model)
+ {
+ LOGE("Can't track for the NULL Media Vision Face tracking model");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ FaceTrackingModel *pTrackModel =
+ static_cast<FaceTrackingModel*>(tracking_model);
+
+ if (!pTrackModel)
+ {
+ LOGE("Face tracking failed. "
+ "Incorrect Media Vision Face tracking model handle is used");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ cv::Mat grayImage;
+ int ret = convertSourceMV2GrayCV(source, grayImage);
+
+ if (MEDIA_VISION_ERROR_NONE != ret)
+ {
+ LOGE("Convertion mv_source_h to gray failed");
+ return ret;
+ }
+
+ FaceTrackingResults results;
+ ret = pTrackModel->track(grayImage, results);
+
+ if (MEDIA_VISION_ERROR_NONE != ret)
+ {
+ LOGE("Tracking can't be performed. "
+ "Check that tracking model is prepared when tracking starts");
+ return ret;
+ }
+
+ if (results.mIsTracked)
+ {
+ mv_quadrangle_s predictedLocation;
+ predictedLocation.points[0].x = results.mFaceLocation.x;
+ predictedLocation.points[0].y = results.mFaceLocation.y;
+ predictedLocation.points[1].x =
+ results.mFaceLocation.x + results.mFaceLocation.width;
+ predictedLocation.points[1].y = results.mFaceLocation.y;
+ predictedLocation.points[2].x =
+ results.mFaceLocation.x + results.mFaceLocation.width;
+ predictedLocation.points[2].y =
+ results.mFaceLocation.y + results.mFaceLocation.height;
+ predictedLocation.points[3].x = results.mFaceLocation.x;
+ predictedLocation.points[3].y =
+ results.mFaceLocation.y + results.mFaceLocation.height;
+ tracked_cb(
+ source,
+ tracking_model,
+ engine_cfg,
+ &predictedLocation,
+ results.mConfidence,
+ user_data);
+ }
+ else
+ {
+ tracked_cb(
+ source,
+ tracking_model,
+ engine_cfg,
+ NULL,
+ results.mConfidence,
+ user_data);
+ }
+
+ return ret;
+}
+
+int mv_face_eye_condition_recognize_open(
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s face_location,
+ mv_face_eye_condition_recognized_cb eye_condition_recognized_cb,
+ void *user_data)
+{
+ cv::Mat image;
+
+ int error = convertSourceMV2GrayCV(source, image);
+ if (error != MEDIA_VISION_ERROR_NONE)
+ {
+ LOGE("Convertion mv_source_h to gray failed");
+ return error;
+ }
+
+ mv_face_eye_condition_e eye_condition;
+ error = FaceEyeCondition::recognizeEyeCondition(
+ image,
+ face_location,
+ &eye_condition);
+
+ if (error != MEDIA_VISION_ERROR_NONE)
+ {
+ LOGE("eye contition recognition failed");
+ return error;
+ }
+
+ eye_condition_recognized_cb(
+ source,
+ engine_cfg,
+ face_location,
+ eye_condition,
+ user_data);
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int mv_face_facial_expression_recognize_open(
+ mv_source_h source,
+ mv_engine_config_h engine_cfg,
+ mv_rectangle_s face_location,
+ mv_face_facial_expression_recognized_cb expression_recognized_cb,
+ void *user_data)
+{
+ cv::Mat image;
+
+ int error = convertSourceMV2GrayCV(source, image);
+ if (error != MEDIA_VISION_ERROR_NONE)
+ {
+ LOGE("Convertion mv_source_h to gray failed");
+ return error;
+ }
+
+ mv_face_facial_expression_e expression;
+ error = FaceExpressionRecognizer::recognizeFaceExpression(
+ image, face_location, &expression);
+
+ if (error != MEDIA_VISION_ERROR_NONE)
+ {
+ LOGE("eye contition recognition failed");
+ return error;
+ }
+
+ expression_recognized_cb(
+ source,
+ engine_cfg,
+ face_location,
+ expression,
+ user_data);
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int mv_face_recognition_model_create_open(
+ mv_face_recognition_model_h *recognition_model)
+{
+ if (recognition_model == NULL)
+ {
+ LOGE("Recognition model can't be created because handle pointer is NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ (*recognition_model) =
+ static_cast<mv_face_recognition_model_h>(new (std::nothrow)FaceRecognitionModel());
+
+ if (*recognition_model == NULL)
+ {
+ LOGE("Failed to create media vision recognition model");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+
+ LOGD("Recognition model [%p] has been created", *recognition_model);
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int mv_face_recognition_model_destroy_open(
+ mv_face_recognition_model_h recognition_model)
+{
+ if (!recognition_model)
+ {
+ LOGE("Recognition model can't be destroyed because handle is NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ LOGD("Destroying media vision recognition model [%p]", recognition_model);
+ delete static_cast<FaceRecognitionModel*>(recognition_model);
+ LOGD("Media vision recognition model has been destroyed");
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int mv_face_recognition_model_clone_open(
+ mv_face_recognition_model_h src,
+ mv_face_recognition_model_h *dst)
+{
+ if (!src || !dst)
+ {
+ LOGE("Can't clone recognition model. Both source and destination"
+ "recognition model handles has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ (*dst) = static_cast<mv_face_recognition_model_h>(new (std::nothrow)FaceRecognitionModel());
+
+ if (*dst == NULL)
+ {
+ LOGE("Failed to create media vision recognition model");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+
+ LOGD("Recognition model [%p] has been created", *dst);
+
+ const FaceRecognitionModel *pSrcModel = static_cast<FaceRecognitionModel*>(src);
+ FaceRecognitionModel *pDstModel = static_cast<FaceRecognitionModel*>(*dst);
+
+ *pDstModel = *pSrcModel;
+
+ LOGD("Media vision recognition model has been cloned");
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int mv_face_recognition_model_save_open(
+ const char *file_name,
+ mv_face_recognition_model_h recognition_model)
+{
+ if (!recognition_model)
+ {
+ LOGE("Can't save recognition model to the file. Handle has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (NULL == file_name)
+ {
+ LOGE("Can't save recognition model to the file. File name has to be specified");
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
+
+ FaceRecognitionModel *pRecModel = static_cast<FaceRecognitionModel*>(recognition_model);
+ const int ret = pRecModel->save(std::string(file_name));
+
+ if (MEDIA_VISION_ERROR_NONE != ret)
+ {
+ LOGE("Error occurred when save recognition model to the file");
+ return ret;
+ }
+
+ LOGD("Media vision recognition model has been saved to the file [%s]", file_name);
+ return ret;
+}
+
+int mv_face_recognition_model_load_open(
+ const char *file_name,
+ mv_face_recognition_model_h *recognition_model)
+{
+ if (!recognition_model)
+ {
+ LOGE("Can't load recognition model from the file. "
+ "Handle has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (NULL == file_name)
+ {
+ LOGE("Can't load recognition model from the file. "
+ "File name has to be specified");
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
+
+ (*recognition_model) =
+ static_cast<mv_face_recognition_model_h>(new (std::nothrow)FaceRecognitionModel());
+
+ if (*recognition_model == NULL)
+ {
+ LOGE("Failed to create media vision recognition model");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+
+ FaceRecognitionModel *pRecModel =
+ static_cast<FaceRecognitionModel*>(*recognition_model);
+
+ if (!pRecModel)
+ {
+ LOGE("Loading of the face recognition model from file failed. "
+ "Incorrect Media Vision Face recognition model handle is used");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ const int ret = pRecModel->load(std::string(file_name));
+
+ if (MEDIA_VISION_ERROR_NONE != ret)
+ {
+ LOGE("Error occurred when loading recognition model to the file");
+ return ret;
+ }
+
+ LOGD("Media vision recognition model has been loaded from the file [%s]", file_name);
+ return ret;
+}
+
+int mv_face_recognition_model_add_open(
+ const mv_source_h source,
+ mv_face_recognition_model_h recognition_model,
+ const mv_rectangle_s *example_location,
+ int face_label)
+{
+ if (!source)
+ {
+ LOGE("Can't add face image example for recognition model. "
+ "Media Vision source handle has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!recognition_model)
+ {
+ LOGE("Can't add face image example for recognition model. "
+ "Model handle has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ FaceRecognitionModel *pRecModel =
+ static_cast<FaceRecognitionModel*>(recognition_model);
+
+ if (!pRecModel)
+ {
+ LOGE("Add face image example to the model failed. "
+ "Incorrect Media Vision Face recognition model handle is used");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ cv::Mat image;
+ int ret = convertSourceMV2GrayCV(source, image);
+ if (MEDIA_VISION_ERROR_NONE != ret)
+ {
+ LOGE("Convertion mv_source_h to gray failed");
+ return ret;
+ }
+
+ if (!example_location)
+ {
+ ret = pRecModel->addFaceExample(image, face_label);
+ }
+ else
+ {
+ cv::Rect_<int> roi;
+ roi.x = example_location->point.x;
+ roi.y = example_location->point.y;
+ roi.width = example_location->width;
+ roi.height = example_location->height;
+ ret = pRecModel->addFaceExample(image(roi).clone(), face_label);
+ }
+
+ if (MEDIA_VISION_ERROR_NONE != ret)
+ {
+ LOGE("Error occurred when adding face image example to the recognition model");
+ return ret;
+ }
+
+ LOGD("The face image example labeled %i has been added "
+ "to the Media Vision recognition model", face_label);
+ return ret;
+}
+
+int mv_face_recognition_model_reset_open(
+ mv_face_recognition_model_h recognition_model,
+ const int *face_label)
+{
+ if (!recognition_model)
+ {
+ LOGE("Can't reset positive examples for NULL recognition model");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ FaceRecognitionModel *pRecModel =
+ static_cast<FaceRecognitionModel*>(recognition_model);
+
+ if (!pRecModel)
+ {
+ LOGE("Loading of the face recognition model from file failed. "
+ "Incorrect Media Vision Face recognition model handle is used");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ int ret = (NULL != face_label ?
+ pRecModel->resetFaceExamples(*face_label) :
+ pRecModel->resetFaceExamples());
+
+ if (MEDIA_VISION_ERROR_NONE != ret)
+ {
+ LOGE("Error occurred when reset positive examples of the recognition model");
+ return ret;
+ }
+
+ LOGD("The positive examples has been removed from recognition model");
+ return ret;
+}
+
+int mv_face_recognition_model_learn_open(
+ mv_engine_config_h engine_cfg,
+ mv_face_recognition_model_h recognition_model)
+{
+ if (!recognition_model)
+ {
+ LOGE("Can't learn recognition model. Model handle has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ FaceRecognitionModel *pRecModel =
+ static_cast<FaceRecognitionModel*>(recognition_model);
+
+ if (!pRecModel)
+ {
+ LOGE("Learning of the face recognition model failed. "
+ "Incorrect Media Vision Face recognition model handle is used");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ RecognitionParams recognitionParams;
+ extractRecognitionParams(engine_cfg, recognitionParams);
+ FaceRecognitionModelConfig learnConfig;
+ learnConfig.mModelType = recognitionParams.mRecognitionAlgType;
+
+ const int ret = pRecModel->learn(learnConfig);
+
+ if (MEDIA_VISION_ERROR_NONE != ret)
+ {
+ LOGE("Error occurred when learn face recognition model");
+ return ret;
+ }
+
+ LOGD("Face recognition model has been learned");
+ return ret;
+}
+
+int mv_face_recognition_model_query_labels_open(
+ mv_face_recognition_model_h recognition_model,
+ int **labels,
+ unsigned int *number_of_labels)
+{
+ if (!recognition_model)
+ {
+ LOGE("Can't get list of labels for NULL recognition model");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (NULL == labels || NULL == number_of_labels)
+ {
+ LOGE("Can't get list of labels. labels and number_of_labels out "
+ "parameters both has to be not NULL.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ FaceRecognitionModel *pRecModel =
+ static_cast<FaceRecognitionModel*>(recognition_model);
+
+ if (!pRecModel)
+ {
+ LOGE("Learning of the face recognition model failed. "
+ "Incorrect Media Vision Face recognition model handle is used");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ const std::set<int>& learnedLabels = pRecModel->getFaceLabels();
+ *number_of_labels = learnedLabels.size();
+ (*labels) = new int[*number_of_labels];
+
+ std::set<int>::const_iterator it = learnedLabels.begin();
+ int i = 0;
+ for (; it != learnedLabels.end(); ++it)
+ {
+ (*labels)[i] = *it;
+ ++i;
+ }
+
+ LOGD("List of the labels learned by the recognition model has been retrieved");
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int mv_face_tracking_model_create_open(
+ mv_face_tracking_model_h *tracking_model)
+{
+ if (tracking_model == NULL)
+ {
+ LOGE("Tracking model can't be created because handle pointer is NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ (*tracking_model) =
+ static_cast<mv_face_tracking_model_h>(new (std::nothrow)FaceTrackingModel());
+
+ if (*tracking_model == NULL)
+ {
+ LOGE("Failed to create media vision tracking model");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+
+ LOGD("Tracking model [%p] has been created", *tracking_model);
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int mv_face_tracking_model_destroy_open(
+ mv_face_tracking_model_h tracking_model)
+{
+ if (!tracking_model)
+ {
+ LOGE("Tracking model can't be destroyed because handle is NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ LOGD("Destroying media vision tracking model [%p]", tracking_model);
+ delete static_cast<FaceTrackingModel*>(tracking_model);
+ LOGD("Media vision tracking model has been destroyed");
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int mv_face_tracking_model_prepare_open(
+ mv_face_tracking_model_h tracking_model,
+ mv_engine_config_h /*engine_cfg*/,
+ mv_source_h source,
+ mv_quadrangle_s *location)
+{
+ if (!tracking_model)
+ {
+ LOGE("Can't prepare tracking model. Handle has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!source)
+ {
+ LOGE("Can't prepare tracking model. "
+ "Media Vision source handle has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ FaceTrackingModel *pTrackModel =
+ static_cast<FaceTrackingModel*>(tracking_model);
+
+ if (!pTrackModel)
+ {
+ LOGE("Preparation of the face tracking model failed. "
+ "Incorrect Media Vision Face tracking model handle is used");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ cv::Mat image;
+ int ret = convertSourceMV2GrayCV(source, image);
+ if (MEDIA_VISION_ERROR_NONE != ret)
+ {
+ LOGE("Convertion mv_source_h to gray failed");
+ return ret;
+ }
+
+ cv::Rect_<double> roi;
+ if (!location)
+ {
+ ret = pTrackModel->prepare(image);
+ }
+ else
+ {
+ int minX = image.cols;
+ int minY = image.rows;
+ int maxX = 0.0;
+ int maxY = 0.0;
+ for (unsigned i = 0; i < 4; ++i)
+ {
+ minX = minX > location->points[i].x ? location->points[i].x : minX;
+ minY = minY > location->points[i].y ? location->points[i].y : minY;
+ maxX = maxX < location->points[i].x ? location->points[i].x : maxX;
+ maxY = maxY < location->points[i].y ? location->points[i].y : maxY;
+ }
+
+ roi.x = minX;
+ roi.y = minY;
+ roi.width = maxX - minX;
+ roi.height = maxY - minY;
+ ret = pTrackModel->prepare(image, roi);
+ }
+
+ if (MEDIA_VISION_ERROR_NONE != ret)
+ {
+ LOGE("Error occurred when prepare face tracking model");
+ return ret;
+ }
+
+ LOGD("Face tracking model has been prepared");
+
+ return ret;
+}
+
+int mv_face_tracking_model_clone_open(
+ mv_face_tracking_model_h src,
+ mv_face_tracking_model_h *dst)
+{
+ if (!src || !dst)
+ {
+ LOGE("Can't clone tracking model. Both source and destination"
+ "tracking model handles has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ (*dst) = static_cast<mv_face_tracking_model_h>(new (std::nothrow)FaceTrackingModel());
+
+ if (*dst == NULL)
+ {
+ LOGE("Failed to create media vision tracking model");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+
+ LOGD("Tracking model [%p] has been created", *dst);
+
+ const FaceTrackingModel *pSrcModel = static_cast<FaceTrackingModel*>(src);
+ FaceTrackingModel *pDstModel = static_cast<FaceTrackingModel*>(*dst);
+
+ *pDstModel = *pSrcModel;
+
+ LOGD("Media vision tracking model has been cloned");
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int mv_face_tracking_model_save_open(
+ const char *file_name,
+ mv_face_tracking_model_h tracking_model)
+{
+ if (!tracking_model)
+ {
+ LOGE("Can't save tracking model to the file. "
+ "Handle has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (NULL == file_name)
+ {
+ LOGE("Can't save tracking model to the file. "
+ "File name has to be specified");
+ return MEDIA_VISION_ERROR_INVALID_PATH;
+ }
+
+ FaceTrackingModel *pTrackModel = static_cast<FaceTrackingModel*>(tracking_model);
+
+ if (!pTrackModel)
+ {
+ LOGE("Saving of the face tracking model to file failed. "
+ "Incorrect Media Vision Face tracking model handle is used");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ const int ret = pTrackModel->save(std::string(file_name));
+
+ if (MEDIA_VISION_ERROR_NONE != ret)
+ {
+ LOGE("Error occurred when save tracking model to the file");
+ return ret;
+ }
+
+ LOGD("Media vision tracking model has been saved to the file [%s]", file_name);
+
+ return ret;
+}
+
+int mv_face_tracking_model_load_open(
+ const char *file_name,
+ mv_face_tracking_model_h *tracking_model)
+{
+ if (!tracking_model)
+ {
+ LOGE("Can't load tracking model from the file. "
+ "Handle has to be not NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ if (NULL == file_name)
+ {
+ LOGE("Can't load tracking model from the file. "
+ "File name has to be specified");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ (*tracking_model) =
+ static_cast<mv_face_tracking_model_h>(new (std::nothrow)FaceTrackingModel());
+
+ if (*tracking_model == NULL)
+ {
+ LOGE("Failed to create media vision tracking model");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+
+ FaceTrackingModel *pTrackModel =
+ static_cast<FaceTrackingModel*>(*tracking_model);
+
+ if (!pTrackModel)
+ {
+ LOGE("Loading of the face tracking model from file failed. "
+ "Incorrect Media Vision Face tracking model handle is used");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ const int ret = pTrackModel->load(std::string(file_name));
+
+ if (MEDIA_VISION_ERROR_NONE != ret)
+ {
+ LOGE("Error occurred when save recognition model to the file");
+ return ret;
+ }
+
+ LOGD("Media vision recognition model has been loaded from the file [%s]", file_name);
+
+ return ret;
+}