diff options
author | Tae-Young Chung <ty83.chung@samsung.com> | 2018-02-08 15:18:52 +0900 |
---|---|---|
committer | Tae-Young Chung <ty83.chung@samsung.com> | 2018-06-19 13:10:56 +0900 |
commit | 308eab6b69a5c4394a7dda1a51c9621c185270d1 (patch) | |
tree | 4d1922d64973bb95c8c0e091ae4db009bcba0dbf | |
parent | d6ef47d6802abb3febb4d0e81197c39ea3444551 (diff) | |
download | mediavision-308eab6b69a5c4394a7dda1a51c9621c185270d1.tar.gz mediavision-308eab6b69a5c4394a7dda1a51c9621c185270d1.tar.bz2 mediavision-308eab6b69a5c4394a7dda1a51c9621c185270d1.zip |
Migration to OpenCV 3.4.0
Change-Id: I38918bc7555837f234ce0f4c52da4cbd5f2e9232
Signed-off-by: Tae-Young Chung <ty83.chung@samsung.com>
48 files changed, 187 insertions, 1374 deletions
diff --git a/mv_barcode/barcode_generator/src/BarcodeGenerator.cpp b/mv_barcode/barcode_generator/src/BarcodeGenerator.cpp index d3299460..1bfe3270 100644 --- a/mv_barcode/barcode_generator/src/BarcodeGenerator.cpp +++ b/mv_barcode/barcode_generator/src/BarcodeGenerator.cpp @@ -20,9 +20,9 @@ #include <zint.h> -#include <opencv2/core/core.hpp> -#include <opencv2/imgproc/imgproc.hpp> -#include <opencv2/highgui/highgui.hpp> +#include <opencv2/core.hpp> +#include <opencv2/imgproc.hpp> +#include <opencv2/highgui.hpp> #include <cstring> #include <vector> diff --git a/mv_face/face/CMakeLists.txt b/mv_face/face/CMakeLists.txt index caff5302..8aec2e3f 100644 --- a/mv_face/face/CMakeLists.txt +++ b/mv_face/face/CMakeLists.txt @@ -17,7 +17,7 @@ include_directories("${PROJECT_SOURCE_DIR}/src") file(GLOB MV_FACE_INCLUDE_LIST "${PROJECT_SOURCE_DIR}/include/*.h") file(GLOB MV_FACE_SRC_LIST "${PROJECT_SOURCE_DIR}/src/*.cpp" "${PROJECT_SOURCE_DIR}/src/*.c") -find_package(OpenCV REQUIRED core objdetect contrib) +find_package(OpenCV REQUIRED core objdetect imgproc tracking face) if(NOT OpenCV_FOUND) message(SEND_ERROR "Failed to find OpenCV") return() diff --git a/mv_face/face/include/FaceDetector.h b/mv_face/face/include/FaceDetector.h index 48d17305..55c0132e 100644 --- a/mv_face/face/include/FaceDetector.h +++ b/mv_face/face/include/FaceDetector.h @@ -17,7 +17,7 @@ #ifndef __MEDIA_VISION_FACE_DETECTOR_H__ #define __MEDIA_VISION_FACE_DETECTOR_H__ -#include <opencv/cv.h> +#include <opencv2/objdetect.hpp> #include <vector> #include <string> diff --git a/mv_face/face/include/FaceExpressionRecognizer.h b/mv_face/face/include/FaceExpressionRecognizer.h index b8948338..496e4072 100644 --- a/mv_face/face/include/FaceExpressionRecognizer.h +++ b/mv_face/face/include/FaceExpressionRecognizer.h @@ -20,12 +20,9 @@ #include "mv_common_c.h" #include "mv_face_open.h" +#include <opencv2/objdetect.hpp> #include <string> -namespace cv { - class Mat; -} - /** * @file FaceExpressionRecognizer.h * @brief This file contains the FaceExpressionRecognizer class which implements diff --git a/mv_face/face/include/FaceEyeCondition.h b/mv_face/face/include/FaceEyeCondition.h index 7c1ec363..cc7a9bc6 100644 --- a/mv_face/face/include/FaceEyeCondition.h +++ b/mv_face/face/include/FaceEyeCondition.h @@ -20,7 +20,7 @@ #include <mv_common_c.h> #include <mv_face.h> -#include <opencv/cv.h> +#include <opencv2/imgproc.hpp> /** * @file FaceEyeCondition.h diff --git a/mv_face/face/include/FaceRecognitionModel.h b/mv_face/face/include/FaceRecognitionModel.h index f89c8466..8a8f3ae2 100644 --- a/mv_face/face/include/FaceRecognitionModel.h +++ b/mv_face/face/include/FaceRecognitionModel.h @@ -19,11 +19,13 @@ #include "FaceUtil.h" -#include <opencv2/core/core.hpp> -#include <opencv2/contrib/contrib.hpp> +#include <opencv2/core.hpp> +#include <opencv2/face/facerec.hpp> +#include <opencv2/imgproc.hpp> #include <cstring> #include <vector> +#include <set> /** * @file FaceRecognitionModel.h @@ -252,7 +254,7 @@ private: * Factory method for creating of the recognition algorithm based on input * configuration: */ - static cv::Ptr<cv::FaceRecognizer> CreateRecognitionAlgorithm( + static cv::Ptr<cv::face::FaceRecognizer> CreateRecognitionAlgorithm( const FaceRecognitionModelConfig& config = FaceRecognitionModelConfig()); @@ -268,7 +270,7 @@ private: FaceRecognitionModelConfig m_learnAlgorithmConfig; /**< Configuration of the learning method */ - cv::Ptr<cv::FaceRecognizer> m_recognizer; /**< Recognizer associated with + cv::Ptr<cv::face::FaceRecognizer> m_recognizer; /**< Recognizer associated with the current model */ std::set<int> m_learnedLabels; /**< Vector of the labels had been learned diff --git a/mv_face/face/include/TrackerMedianFlow.h b/mv_face/face/include/FaceTracker.h index e8bed92d..5266feae 100644 --- a/mv_face/face/include/TrackerMedianFlow.h +++ b/mv_face/face/include/FaceTracker.h @@ -42,11 +42,12 @@ #ifndef __MEDIA_VISION_TRACKER_MEDIAN_FLOW_H__ #define __MEDIA_VISION_TRACKER_MEDIAN_FLOW_H__ -#include "opencv2/core/core.hpp" +#include <opencv2/core.hpp> +#include <opencv2/tracking.hpp> namespace cv { -class TrackerMedianFlowModel; +//class TrackerMedianFlowModel; /** @brief Median Flow tracker implementation. @@ -58,7 +59,7 @@ by authors to outperform MIL). During the implementation period the code at <http://www.aonsquared.co.uk/node/5>, the courtesy of the author Arthur Amarra, was used for the reference purpose. */ -class TrackerMedianFlow : public virtual Algorithm { +class FaceTracker : public TrackerMedianFlow { public: struct Params { /** @@ -80,12 +81,12 @@ public: flow search used for tracking */ }; - TrackerMedianFlow(Params paramsIn = Params()); + FaceTracker(Params paramsIn = Params()); - bool copyTo(TrackerMedianFlow& copy) const; + bool copyTo(FaceTracker& copy) const; - bool init(const Mat& image, const Rect_<float>& boundingBox); - bool update(const Mat& image, Rect_<float>& boundingBox); + bool initImpl(const Mat& image, const Rect2d& boundingBox); + bool updateImpl(const Mat& image, Rect2d& boundingBox); bool isInited() const; @@ -94,11 +95,12 @@ public: void read(FileStorage& fn); void write(FileStorage& fs) const; + void read( const FileNode& fn ); private: bool m_isInit; - bool medianFlowImpl(Mat oldImage, Mat newImage, Rect_<float>& oldBox); + bool medianFlowImpl(Mat oldImage, Mat newImage, Rect2f& oldBox); Rect_<float> vote( const std::vector<Point2f>& oldPoints, @@ -132,7 +134,7 @@ private: Lucas–Kanade optical flow algorithm used during tracking */ - Rect_<float> m_boundingBox; /**< Tracking object bounding box */ + Rect2d m_boundingBox; /**< Tracking object bounding box */ float m_confidence; /**< Confidence that face was tracked correctly at the last tracking iteration */ diff --git a/mv_face/face/include/FaceTrackingModel.h b/mv_face/face/include/FaceTrackingModel.h index 8c73705b..95f8d6e0 100644 --- a/mv_face/face/include/FaceTrackingModel.h +++ b/mv_face/face/include/FaceTrackingModel.h @@ -17,7 +17,9 @@ #ifndef __MEDIA_VISION_FACE_TRACKING_MODEL_H__ #define __MEDIA_VISION_FACE_TRACKING_MODEL_H__ -#include "TrackerMedianFlow.h" +#include <opencv2/core.hpp> +#include "FaceTracker.h" + /** * @file FaceTrackingModel.h @@ -158,7 +160,7 @@ private: of the tracking model to perform track */ - cv::Ptr<cv::TrackerMedianFlow> m_tracker; /**< Underlying OpenCV tracking + cv::Ptr<cv::FaceTracker> m_tracker; /**< Underlying OpenCV tracking model */ }; diff --git a/mv_face/face/include/FaceUtil.h b/mv_face/face/include/FaceUtil.h index 65c58969..bd2cd637 100644 --- a/mv_face/face/include/FaceUtil.h +++ b/mv_face/face/include/FaceUtil.h @@ -17,7 +17,7 @@ #ifndef __MEDIA_VISION_FACE_UTIL_H__ #define __MEDIA_VISION_FACE_UTIL_H__ -#include <opencv/cv.h> +#include <opencv2/core.hpp> #include "mv_common_c.h" diff --git a/mv_face/face/src/FaceExpressionRecognizer.cpp b/mv_face/face/src/FaceExpressionRecognizer.cpp index a1f7b0b7..54f5309c 100644 --- a/mv_face/face/src/FaceExpressionRecognizer.cpp +++ b/mv_face/face/src/FaceExpressionRecognizer.cpp @@ -20,7 +20,6 @@ #include <vector> -#include <opencv/cv.h> namespace MediaVision { namespace Face { diff --git a/mv_face/face/src/FaceRecognitionModel.cpp b/mv_face/face/src/FaceRecognitionModel.cpp index e247f584..32a6d8c2 100644 --- a/mv_face/face/src/FaceRecognitionModel.cpp +++ b/mv_face/face/src/FaceRecognitionModel.cpp @@ -56,17 +56,17 @@ bool isEmptyAlgorithmParam(const std::string& path) return false; } -int CopyOpenCVAlgorithmParameters(const cv::Ptr<cv::FaceRecognizer>& srcAlg, - cv::Ptr<cv::FaceRecognizer>& dstAlg) +int CopyOpenCVAlgorithmParameters(const cv::Ptr<cv::face::FaceRecognizer>& srcAlg, + cv::Ptr<cv::face::FaceRecognizer>& dstAlg) { char tempPath[1024] = ""; - snprintf(tempPath, 1024, "/tmp/alg_copy_%p_%p", srcAlg.obj, dstAlg.obj); + snprintf(tempPath, 1024, "/tmp/alg_copy_%p_%p", srcAlg.get(), dstAlg.get()); - srcAlg->save(tempPath); + srcAlg->write(tempPath); if (!isEmptyAlgorithmParam(tempPath)) - dstAlg->load(tempPath); + dstAlg->read(tempPath); if (0 != remove(tempPath)) LOGW("Error removing serialized FaceRecognizer in %s", tempPath); @@ -123,11 +123,11 @@ int CopyOpenCVAlgorithmParameters(const cv::Ptr<cv::FaceRecognizer>& srcAlg, } void ParseOpenCVLabels( - const cv::Ptr<cv::FaceRecognizer>& recognizer, + const cv::Ptr<cv::face::FaceRecognizer>& recognizer, std::set<int>& outLabels) { if (!recognizer.empty()) { - cv::Mat labels = recognizer->getMat("labels"); + cv::Mat labels = (dynamic_cast<cv::face::EigenFaceRecognizer*>(recognizer.get()))->getLabels(); for (int i = 0; i < labels.rows; ++i) outLabels.insert(labels.at<int>(i, 0)); @@ -174,7 +174,7 @@ bool FaceRecognitionModelConfig::operator!=( FaceRecognitionModel::FaceRecognitionModel() : m_canRecognize(false), - m_recognizer(NULL) + m_recognizer() // The default constructor creates a null Ptr { ; /* NULL */ } @@ -255,7 +255,7 @@ int FaceRecognitionModel::save(const std::string& fileName) } storage << "can_recognize" << m_canRecognize; - m_recognizer->save(storage); + m_recognizer->write(storage); storage.release(); } else { @@ -292,39 +292,39 @@ int FaceRecognitionModel::load(const std::string& fileName) storage["algorithm"] >> algName; storage["can_recognize"] >> canRecognize; - cv::Ptr<cv::FaceRecognizer> tempRecognizer; + cv::Ptr<cv::face::FaceRecognizer> tempRecognizer; FaceRecognitionModelConfig tempConfig; std::set<int> tempLearnedLabels; if (algName == "Eigenfaces") { - tempRecognizer = cv::createEigenFaceRecognizer(); + tempRecognizer = cv::face::EigenFaceRecognizer::create(); storage["resizeW"] >> tempConfig.mImgWidth; storage["resizeH"] >> tempConfig.mImgHeight; - tempRecognizer->load(storage); + tempRecognizer->read(storage.root()); tempConfig.mModelType = MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES; tempConfig.mNumComponents = - tempRecognizer->getInt("ncomponents"); + (dynamic_cast<cv::face::EigenFaceRecognizer*>(tempRecognizer.get()))->getNumComponents(); ParseOpenCVLabels(tempRecognizer, tempLearnedLabels); } else if (algName == "Fisherfaces") { - tempRecognizer = cv::createFisherFaceRecognizer(); + tempRecognizer = cv::face::FisherFaceRecognizer::create(); storage["resizeW"] >> tempConfig.mImgWidth; storage["resizeH"] >> tempConfig.mImgHeight; - tempRecognizer->load(storage); + tempRecognizer->read(storage.root()); tempConfig.mModelType = MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES; tempConfig.mNumComponents = - tempRecognizer->getInt("ncomponents"); + (dynamic_cast<cv::face::FisherFaceRecognizer*>(tempRecognizer.get()))->getNumComponents(); ParseOpenCVLabels(tempRecognizer, tempLearnedLabels); } else if (algName == "LBPH") { - tempRecognizer = cv::createLBPHFaceRecognizer(); - tempRecognizer->load(storage); + tempRecognizer = cv::face::LBPHFaceRecognizer::create(); + tempRecognizer->read(storage.root()); tempConfig.mModelType = MEDIA_VISION_FACE_MODEL_TYPE_LBPH; - tempConfig.mGridX = tempRecognizer->getInt("grid_x"); - tempConfig.mGridY = tempRecognizer->getInt("grid_y"); - tempConfig.mNeighbors = tempRecognizer->getInt("neighbors"); - tempConfig.mRadius = tempRecognizer->getInt("radius"); + tempConfig.mGridX = (dynamic_cast<cv::face::LBPHFaceRecognizer*>(tempRecognizer.get()))->getGridX(); + tempConfig.mGridY = (dynamic_cast<cv::face::LBPHFaceRecognizer*>(tempRecognizer.get()))->getGridY(); + tempConfig.mNeighbors = (dynamic_cast<cv::face::LBPHFaceRecognizer*>(tempRecognizer.get()))->getNeighbors(); + tempConfig.mRadius = (dynamic_cast<cv::face::LBPHFaceRecognizer*>(tempRecognizer.get()))->getRadius(); ParseOpenCVLabels(tempRecognizer, tempLearnedLabels); } else { tempConfig = FaceRecognitionModelConfig(); @@ -336,7 +336,7 @@ int FaceRecognitionModel::load(const std::string& fileName) return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT; } - tempConfig.mThreshold = tempRecognizer->getDouble("threshold"); + tempConfig.mThreshold = tempRecognizer->getThreshold(); LOGD("Recognition model of [%s] type has been loaded from file", algName.c_str()); @@ -529,23 +529,23 @@ int FaceRecognitionModel::recognize(const cv::Mat& image, FaceRecognitionResults return MEDIA_VISION_ERROR_NONE; } -cv::Ptr<cv::FaceRecognizer> FaceRecognitionModel::CreateRecognitionAlgorithm( +cv::Ptr<cv::face::FaceRecognizer> FaceRecognitionModel::CreateRecognitionAlgorithm( const FaceRecognitionModelConfig& config) { - cv::Ptr<cv::FaceRecognizer> tempRecognizer; + cv::Ptr<cv::face::FaceRecognizer> tempRecognizer; switch (config.mModelType) { case MEDIA_VISION_FACE_MODEL_TYPE_EIGENFACES: - tempRecognizer = cv::createEigenFaceRecognizer( + tempRecognizer = cv::face::EigenFaceRecognizer::create( config.mNumComponents, config.mThreshold); break; case MEDIA_VISION_FACE_MODEL_TYPE_FISHERFACES: - tempRecognizer = cv::createFisherFaceRecognizer( + tempRecognizer = cv::face::FisherFaceRecognizer::create( config.mNumComponents, config.mThreshold); break; case MEDIA_VISION_FACE_MODEL_TYPE_LBPH: - tempRecognizer = cv::createLBPHFaceRecognizer( + tempRecognizer = cv::face::LBPHFaceRecognizer::create( config.mRadius, config.mNeighbors, config.mGridX, @@ -553,7 +553,7 @@ cv::Ptr<cv::FaceRecognizer> FaceRecognitionModel::CreateRecognitionAlgorithm( config.mThreshold); break; default: - return NULL; + LOGE("Unknown FaceRecognition model"); } return tempRecognizer; diff --git a/mv_face/face/src/TrackerMedianFlow.cpp b/mv_face/face/src/FaceTracker.cpp index 759b6061..f5427cef 100644 --- a/mv_face/face/src/TrackerMedianFlow.cpp +++ b/mv_face/face/src/FaceTracker.cpp @@ -39,10 +39,10 @@ // //M*/ -#include "TrackerMedianFlow.h" +#include "FaceTracker.h" #include "opencv2/video/tracking.hpp" -#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgproc.hpp" #include <algorithm> #include <cmath> @@ -52,14 +52,14 @@ namespace { } /* anonymous namespace */ namespace cv { -TrackerMedianFlow::Params::Params() +FaceTracker::Params::Params() { mPointsInGrid = 10; mWindowSize = Size(3, 3); mPyrMaxLevel = 5; } -void TrackerMedianFlow::Params::read(const cv::FileNode& fn) +void FaceTracker::Params::read(const cv::FileNode& fn) { mPointsInGrid = fn["pointsInGrid"]; int winSizeHeight = fn["windowSizeHeight"]; @@ -68,7 +68,7 @@ void TrackerMedianFlow::Params::read(const cv::FileNode& fn) mPyrMaxLevel = fn["pyrMaxLevel"]; } -void TrackerMedianFlow::Params::write(cv::FileStorage& fs) const +void FaceTracker::Params::write(cv::FileStorage& fs) const { fs << "pointsInGrid" << mPointsInGrid; fs << "windowSizeHeight" << mWindowSize.height; @@ -76,7 +76,7 @@ void TrackerMedianFlow::Params::write(cv::FileStorage& fs) const fs << "pyrMaxLevel" << mPyrMaxLevel; } -TrackerMedianFlow::TrackerMedianFlow(Params paramsIn) : +FaceTracker::FaceTracker(Params paramsIn) : m_termcrit(TermCriteria::COUNT | TermCriteria::EPS, 20, 0.3), m_confidence(0.0) { @@ -84,7 +84,7 @@ TrackerMedianFlow::TrackerMedianFlow(Params paramsIn) : m_isInit = false; } -bool TrackerMedianFlow::copyTo(TrackerMedianFlow& copy) const +bool FaceTracker::copyTo(FaceTracker& copy) const { copy.m_isInit = m_isInit; copy.m_params = m_params; @@ -95,7 +95,7 @@ bool TrackerMedianFlow::copyTo(TrackerMedianFlow& copy) const return true; } -bool TrackerMedianFlow::init(const Mat& image, const Rect_<float>& boundingBox) +bool FaceTracker::initImpl(const Mat& image, const Rect2d& boundingBox) { if (image.empty()) return false; @@ -109,7 +109,7 @@ bool TrackerMedianFlow::init(const Mat& image, const Rect_<float>& boundingBox) return m_isInit; } -bool TrackerMedianFlow::update(const Mat& image, Rect_<float>& boundingBox) +bool FaceTracker::updateImpl(const Mat& image, Rect2d& boundingBox) { if (!m_isInit || image.empty()) return false; @@ -119,8 +119,8 @@ bool TrackerMedianFlow::update(const Mat& image, Rect_<float>& boundingBox) * frame and bounding box. Then, track as usually: */ if (m_image.rows != image.rows || m_image.cols != image.cols) { - const float xFactor = (float) image.cols / m_image.cols; - const float yFactor = (float) image.rows / m_image.rows; + const double xFactor = (double) image.cols / m_image.cols; + const double yFactor = (double) image.rows / m_image.rows; resize(m_image, m_image, Size(), xFactor, yFactor); @@ -132,33 +132,33 @@ bool TrackerMedianFlow::update(const Mat& image, Rect_<float>& boundingBox) Mat oldImage = m_image; - Rect_<float> oldBox = m_boundingBox; + Rect2f oldBox = (Rect2f)m_boundingBox; if(!medianFlowImpl(oldImage, image, oldBox)) return false; - boundingBox = oldBox; + boundingBox = (Rect2d)oldBox; image.copyTo(m_image); m_boundingBox = boundingBox; return true; } -bool TrackerMedianFlow::isInited() const +bool FaceTracker::isInited() const { return m_isInit; } -float TrackerMedianFlow::getLastConfidence() const +float FaceTracker::getLastConfidence() const { return m_confidence; } -Rect_<float> TrackerMedianFlow::getLastBoundingBox() const +Rect_<float> FaceTracker::getLastBoundingBox() const { return m_boundingBox; } -bool TrackerMedianFlow::medianFlowImpl( - Mat oldGrayImage, Mat newGrayImage, Rect_<float>& oldBox) +bool FaceTracker::medianFlowImpl( + Mat oldGrayImage, Mat newGrayImage, Rect2f& oldBox) { std::vector<Point2f> pointsToTrackOld, pointsToTrackNew; @@ -241,7 +241,7 @@ bool TrackerMedianFlow::medianFlowImpl( return true; } -Rect_<float> TrackerMedianFlow::vote( +Rect_<float> FaceTracker::vote( const std::vector<Point2f>& oldPoints, const std::vector<Point2f>& newPoints, const Rect_<float>& oldRect, @@ -305,7 +305,7 @@ Rect_<float> TrackerMedianFlow::vote( } template<typename T> -T TrackerMedianFlow::getMedian(std::vector<T>& values, int size) +T FaceTracker::getMedian(std::vector<T>& values, int size) { if (size == -1) size = (int)values.size(); @@ -319,14 +319,14 @@ T TrackerMedianFlow::getMedian(std::vector<T>& values, int size) } } -float TrackerMedianFlow::l2distance(Point2f p1, Point2f p2) +float FaceTracker::l2distance(Point2f p1, Point2f p2) { float dx = p1.x - p2.x; float dy = p1.y - p2.y; return sqrt(dx * dx + dy * dy); } -void TrackerMedianFlow::check_FB( +void FaceTracker::check_FB( std::vector<Mat> newPyramid, const std::vector<Point2f>& oldPoints, const std::vector<Point2f>& newPoints, @@ -358,7 +358,7 @@ void TrackerMedianFlow::check_FB( status[idx] = (FBerror[idx] < FBerrorMedian); } -void TrackerMedianFlow::check_NCC( +void FaceTracker::check_NCC( const Mat& oldImage, const Mat& newImage, const std::vector<Point2f>& oldPoints, @@ -391,9 +391,14 @@ void TrackerMedianFlow::check_NCC( status[idx] = status[idx] && (NCC[idx] > median); } -void TrackerMedianFlow::read(cv::FileStorage& fs) +void FaceTracker::read(const cv::FileNode& fn) { - m_params.read(fs.root()); + m_params.read(fn); +} + +void FaceTracker::read(cv::FileStorage& fs) +{ + read(fs.root()); float bbX = 0.f; float bbY = 0.f; float bbW = 0.f; @@ -406,7 +411,8 @@ void TrackerMedianFlow::read(cv::FileStorage& fs) fs["lastImage"] >> m_image; } -void TrackerMedianFlow::write(cv::FileStorage& fs) const + +void FaceTracker::write(cv::FileStorage& fs) const { m_params.write(fs); fs << "lastLocationX" << m_boundingBox.x; diff --git a/mv_face/face/src/FaceTrackingModel.cpp b/mv_face/face/src/FaceTrackingModel.cpp index 98df8a42..46faa0bc 100644 --- a/mv_face/face/src/FaceTrackingModel.cpp +++ b/mv_face/face/src/FaceTrackingModel.cpp @@ -32,26 +32,26 @@ FaceTrackingResults::FaceTrackingResults() : FaceTrackingModel::FaceTrackingModel() : m_canTrack(false), - m_tracker(new cv::TrackerMedianFlow()) + m_tracker(new cv::FaceTracker()) { ; /* NULL */ } FaceTrackingModel::FaceTrackingModel(const FaceTrackingModel& origin) : m_canTrack(origin.m_canTrack), - m_tracker(new cv::TrackerMedianFlow()) + m_tracker(new cv::FaceTracker()) { if (!origin.m_tracker.empty()) - origin.m_tracker->copyTo(*(m_tracker.obj)); + origin.m_tracker->copyTo(*(m_tracker.get())); } FaceTrackingModel& FaceTrackingModel::operator=(const FaceTrackingModel& copy) { if (this != ©) { m_canTrack = copy.m_canTrack; - m_tracker = cv::Ptr<cv::TrackerMedianFlow>(new cv::TrackerMedianFlow()); + m_tracker = cv::Ptr<cv::FaceTracker>(new cv::FaceTracker()); if (!copy.m_tracker.empty()) - copy.m_tracker->copyTo(*(m_tracker.obj)); + copy.m_tracker->copyTo(*(m_tracker.get())); } return *this; @@ -162,7 +162,7 @@ int FaceTrackingModel::prepare( return MEDIA_VISION_ERROR_INVALID_OPERATION; } - if (!m_tracker->init(image, boundingBox)) { + if (!m_tracker->initImpl(image, boundingBox)) { LOGE("Failed to prepare tracking model."); return MEDIA_VISION_ERROR_INVALID_OPERATION; } @@ -174,8 +174,10 @@ int FaceTrackingModel::prepare( int FaceTrackingModel::track(const cv::Mat& image, FaceTrackingResults& results) { if (!m_tracker.empty() && m_canTrack) { - results.mIsTracked = m_tracker->update(image, results.mFaceLocation); + cv::Rect2d faceLocation = (cv::Rect2d)results.mFaceLocation; + results.mIsTracked = m_tracker->updateImpl(image, faceLocation); results.mConfidence = m_tracker->getLastConfidence(); + results.mFaceLocation = (cv::Rect2f)faceLocation; } else { LOGE("Attempt to track face with not prepared model"); return MEDIA_VISION_ERROR_INVALID_OPERATION; diff --git a/mv_face/face/src/FaceUtil.cpp b/mv_face/face/src/FaceUtil.cpp index 954f82b0..c2e5ec01 100644 --- a/mv_face/face/src/FaceUtil.cpp +++ b/mv_face/face/src/FaceUtil.cpp @@ -18,7 +18,7 @@ #include "mv_private.h" -#include <opencv2/imgproc/types_c.h> +#include <opencv2/imgproc.hpp> namespace MediaVision { namespace Face { diff --git a/mv_image/image/CMakeLists.txt b/mv_image/image/CMakeLists.txt index 801c4180..acdb2405 100644 --- a/mv_image/image/CMakeLists.txt +++ b/mv_image/image/CMakeLists.txt @@ -17,7 +17,7 @@ include_directories("${PROJECT_SOURCE_DIR}/src") file(GLOB_RECURSE MV_IMAGE_INC_LIST "${PROJECT_SOURCE_DIR}/include/*.h") file(GLOB_RECURSE MV_IMAGE_SRC_LIST "${PROJECT_SOURCE_DIR}/src/*.cpp" "${PROJECT_SOURCE_DIR}/src/*.c") -find_package(OpenCV REQUIRED core imgproc objdetect features2d contrib) +find_package(OpenCV REQUIRED core imgproc objdetect tracking features2d xfeatures2d) if(NOT OpenCV_FOUND) message(SEND_ERROR "Failed to find OpenCV") return() diff --git a/mv_image/image/include/Features/FeatureExtractor.h b/mv_image/image/include/Features/FeatureExtractor.h index 4a34faea..f2f6e5e6 100644 --- a/mv_image/image/include/Features/FeatureExtractor.h +++ b/mv_image/image/include/Features/FeatureExtractor.h @@ -21,11 +21,6 @@ #include "Features/FeaturePack.h" -namespace cv { -class FeatureDetector; -class DescriptorExtractor; -} - namespace MediaVision { namespace Image { /** diff --git a/mv_image/image/include/Features/FeatureExtractorFactory.h b/mv_image/image/include/Features/FeatureExtractorFactory.h index 837725a3..14772928 100644 --- a/mv_image/image/include/Features/FeatureExtractorFactory.h +++ b/mv_image/image/include/Features/FeatureExtractorFactory.h @@ -19,7 +19,7 @@ #include "Features/FeatureExtractor.h" -#include <opencv2/core/core.hpp> +#include <opencv2/core.hpp> namespace MediaVision { namespace Image { diff --git a/mv_image/image/include/Features/FeatureMatcher.h b/mv_image/image/include/Features/FeatureMatcher.h index 37f4508b..971e7f9a 100644 --- a/mv_image/image/include/Features/FeatureMatcher.h +++ b/mv_image/image/include/Features/FeatureMatcher.h @@ -19,8 +19,6 @@ #include "Features/FeaturePack.h" -#include <opencv2/features2d/features2d.hpp> - namespace MediaVision { namespace Image { diff --git a/mv_image/image/include/Features/FeaturePack.h b/mv_image/image/include/Features/FeaturePack.h index c492bf3a..422de0bd 100644 --- a/mv_image/image/include/Features/FeaturePack.h +++ b/mv_image/image/include/Features/FeaturePack.h @@ -17,14 +17,15 @@ #ifndef __MEDIA_VISION_FEATUREPACK_H__ #define __MEDIA_VISION_FEATUREPACK_H__ +#include "mv_private.h" #include "ImageConfig.h" #include <vector> -#include <opencv2/core/core.hpp> - -namespace cv { -class KeyPoint; -} +#include <opencv2/core.hpp> +#include <opencv2/imgproc.hpp> +#include <opencv2/features2d.hpp> +#include <opencv2/xfeatures2d.hpp> +#include <opencv2/calib3d.hpp> namespace MediaVision { namespace Image { diff --git a/mv_image/image/include/ImageMathUtil.h b/mv_image/image/include/ImageMathUtil.h index f8a8ce11..bd496745 100644 --- a/mv_image/image/include/ImageMathUtil.h +++ b/mv_image/image/include/ImageMathUtil.h @@ -17,7 +17,7 @@ #ifndef __MEDIA_VISION_IMAGEMATHUTIL_H__ #define __MEDIA_VISION_IMAGEMATHUTIL_H__ -#include <opencv/cv.h> +#include <opencv2/core.hpp> /** * @file ImageMathUtil.h diff --git a/mv_image/image/include/Recognition/ImageRecognizer.h b/mv_image/image/include/Recognition/ImageRecognizer.h index 2a925508..5ac2f2e6 100644 --- a/mv_image/image/include/Recognition/ImageRecognizer.h +++ b/mv_image/image/include/Recognition/ImageRecognizer.h @@ -22,7 +22,7 @@ #include "Recognition/ImageObject.h" -#include <opencv/cv.h> +#include <opencv2/core.hpp> /** * @file ImageRecognizer.h diff --git a/mv_image/image/include/Tracking/CascadeTracker.h b/mv_image/image/include/Tracking/CascadeTracker.h index e28e2944..09a9d081 100644 --- a/mv_image/image/include/Tracking/CascadeTracker.h +++ b/mv_image/image/include/Tracking/CascadeTracker.h @@ -19,7 +19,7 @@ #include "Tracking/ObjectTracker.h" -#include <opencv2/core/core.hpp> +#include <opencv2/core.hpp> #include <set> diff --git a/mv_image/image/include/Tracking/ObjectTracker.h b/mv_image/image/include/Tracking/ObjectTracker.h index ffc02c1f..0fe88b49 100644 --- a/mv_image/image/include/Tracking/ObjectTracker.h +++ b/mv_image/image/include/Tracking/ObjectTracker.h @@ -17,7 +17,7 @@ #ifndef __MEDIA_VISION_OBJECTTRACKER_H__ #define __MEDIA_VISION_OBJECTTRACKER_H__ -#include <opencv2/core/core.hpp> +#include <opencv2/core.hpp> namespace MediaVision { namespace Image { diff --git a/mv_image/image/src/Features/BasicExtractorFactory.cpp b/mv_image/image/src/Features/BasicExtractorFactory.cpp index 9c2d6e61..0982be16 100644 --- a/mv_image/image/src/Features/BasicExtractorFactory.cpp +++ b/mv_image/image/src/Features/BasicExtractorFactory.cpp @@ -16,7 +16,7 @@ #include "Features/BasicExtractorFactory.h" -#include <opencv/cv.h> +#include <opencv2/core.hpp> namespace MediaVision { namespace Image { @@ -32,17 +32,35 @@ BasicExtractorFactory::BasicExtractorFactory( cv::Ptr<FeatureExtractor> BasicExtractorFactory::buildFeatureExtractor() { cv::Ptr<FeatureExtractor> featureExtractor(new (std::nothrow)FeatureExtractor()); - if (featureExtractor == NULL) - return NULL; - cv::Ptr<cv::FeatureDetector> detector = - cv::FeatureDetector::create(KeypointNames[__kpType]); + if (featureExtractor != NULL) { + cv::Ptr<cv::FeatureDetector> detector; + switch (__kpType) { + case KT_ORB: + detector = cv::ORB::create(); + break; + case KT_GFTT: + detector = cv::GFTTDetector::create(); + break; + default: + LOGE("Unknown feature detector", __FUNCTION__); + } - cv::Ptr<cv::DescriptorExtractor> extractor = - cv::DescriptorExtractor::create(DescriptorNames[__descType]); + cv::Ptr<cv::DescriptorExtractor> extractor; + switch (__descType) { + case DT_ORB: + extractor = cv::ORB::create(); + break; + case DT_BRIEF: + extractor = cv::xfeatures2d::BriefDescriptorExtractor::create(); + break; + default: + LOGE("Unkown feature extractor", __FUNCTION__); + } - featureExtractor->setFeatureDetector(detector, __kpType); - featureExtractor->setDescriptorExtractor(extractor, __descType); + featureExtractor->setFeatureDetector(detector, __kpType); + featureExtractor->setDescriptorExtractor(extractor, __descType); + } return featureExtractor; } diff --git a/mv_image/image/src/Features/FeatureExtractor.cpp b/mv_image/image/src/Features/FeatureExtractor.cpp index 15c36bec..5b04f007 100644 --- a/mv_image/image/src/Features/FeatureExtractor.cpp +++ b/mv_image/image/src/Features/FeatureExtractor.cpp @@ -18,7 +18,8 @@ #include "ImageMathUtil.h" -#include <opencv/cv.h> +#include <opencv2/core.hpp> + namespace MediaVision { namespace Image { diff --git a/mv_image/image/src/Features/FeatureMatcher.cpp b/mv_image/image/src/Features/FeatureMatcher.cpp index 891e85f4..f7c887c0 100644 --- a/mv_image/image/src/Features/FeatureMatcher.cpp +++ b/mv_image/image/src/Features/FeatureMatcher.cpp @@ -18,7 +18,7 @@ #include "ImageMathUtil.h" -#include <opencv/cv.h> +#include <opencv2/core.hpp> namespace MediaVision { namespace Image { diff --git a/mv_image/image/src/Features/FeaturePack.cpp b/mv_image/image/src/Features/FeaturePack.cpp index 57338ff2..d2ce6161 100644 --- a/mv_image/image/src/Features/FeaturePack.cpp +++ b/mv_image/image/src/Features/FeaturePack.cpp @@ -16,7 +16,7 @@ #include "Features/FeaturePack.h" -#include <opencv/cv.h> +#include <opencv2/core.hpp> namespace MediaVision { namespace Image { diff --git a/mv_image/image/src/Features/ORBExtractorFactory.cpp b/mv_image/image/src/Features/ORBExtractorFactory.cpp index cc482cee..1dcded8f 100644 --- a/mv_image/image/src/Features/ORBExtractorFactory.cpp +++ b/mv_image/image/src/Features/ORBExtractorFactory.cpp @@ -18,7 +18,7 @@ #include "ImageMathUtil.h" -#include <opencv/cv.h> +#include <opencv2/core.hpp> namespace MediaVision { namespace Image { @@ -33,17 +33,9 @@ ORBExtractorFactory::ORBExtractorFactory( cv::Ptr<FeatureExtractor> ORBExtractorFactory::buildFeatureExtractor() { cv::Ptr<FeatureExtractor> featureExtractor(new (std::nothrow)FeatureExtractor()); - if (featureExtractor == NULL) - return NULL; - cv::Ptr<cv::OrbFeatureDetector> detector( - new (std::nothrow)cv::ORB( - __maximumFeaturesNumber, - __scaleFactor)); - if (detector == NULL) - return NULL; - - cv::Ptr<cv::OrbDescriptorExtractor> extractor = detector; + cv::Ptr<cv::ORB> detector = cv::ORB::create(__maximumFeaturesNumber, __scaleFactor); + cv::Ptr<cv::ORB> extractor = detector; featureExtractor->setFeatureDetector(detector, KT_ORB); featureExtractor->setDescriptorExtractor(extractor, DT_ORB); diff --git a/mv_image/image/src/Recognition/ImageObject.cpp b/mv_image/image/src/Recognition/ImageObject.cpp index 057038eb..60d30b2f 100644 --- a/mv_image/image/src/Recognition/ImageObject.cpp +++ b/mv_image/image/src/Recognition/ImageObject.cpp @@ -26,10 +26,11 @@ #include "mv_common.h" #include <opencv/cv.h> -#include <opencv2/features2d/features2d.hpp> +#include <opencv2/features2d.hpp> #include <fstream> #include <unistd.h> +#include <iomanip> namespace MediaVision { namespace Image { diff --git a/mv_image/image/src/Tracking/AsyncTracker.cpp b/mv_image/image/src/Tracking/AsyncTracker.cpp index ea24f8a4..89167845 100644 --- a/mv_image/image/src/Tracking/AsyncTracker.cpp +++ b/mv_image/image/src/Tracking/AsyncTracker.cpp @@ -22,7 +22,7 @@ namespace MediaVision { namespace Image { AsyncTracker::AsyncTracker(const AsyncTracker& copy) : - __baseTracker(copy.__baseTracker.obj->clone()), + __baseTracker(copy.__baseTracker.get()->clone()), __result(copy.__result), __isRun(false), __isUpdated(copy.__isUpdated), diff --git a/mv_image/image/src/Tracking/CascadeTracker.cpp b/mv_image/image/src/Tracking/CascadeTracker.cpp index 71109394..a60bbb2d 100644 --- a/mv_image/image/src/Tracking/CascadeTracker.cpp +++ b/mv_image/image/src/Tracking/CascadeTracker.cpp @@ -49,7 +49,7 @@ bool CascadeTracker::track(const cv::Mat& frame, std::vector<cv::Point>& result) std::set<TrackerInfo>::iterator it = __trackers.begin(); for (; it != __trackers.end(); ++it) - if (!it->mTracker.obj->track(frame, it->mResult)) + if (!it->mTracker.get()->track(frame, it->mResult)) it->mResult.clear(); return mergeResults(result); @@ -60,7 +60,7 @@ void CascadeTracker::reinforcement(const std::vector<cv::Point>& location) std::set<TrackerInfo>::iterator it = __trackers.begin(); for (; it != __trackers.end(); ++it) - it->mTracker.obj->reinforcement(location); + it->mTracker.get()->reinforcement(location); } cv::Ptr<ObjectTracker> CascadeTracker::clone() const @@ -76,7 +76,7 @@ CascadeTracker& CascadeTracker::operator=(const CascadeTracker& copy) std::set<TrackerInfo>::iterator it = copy.__trackers.begin(); for (; it != copy.__trackers.end(); ++it) { - TrackerInfo temp(it->mTracker.obj->clone(), it->mPriority); + TrackerInfo temp(it->mTracker.get()->clone(), it->mPriority); temp.mResult = it->mResult; __trackers.insert(temp); @@ -121,7 +121,7 @@ void CascadeTracker::internalReinforcement() bool isUpdated = true; /* TODO: Redesign without dynamic_cast */ - AsyncTracker *asyncView = dynamic_cast<AsyncTracker*>(it1->mTracker.obj); + AsyncTracker *asyncView = dynamic_cast<AsyncTracker*>(it1->mTracker.get()); if (NULL != asyncView) isUpdated = asyncView->isUpdated(it1->mResult); @@ -135,7 +135,7 @@ void CascadeTracker::internalReinforcement() if (getQuadrangleArea(checkedArea.data()) < __minimumArea) { it1->mResult = std::vector<cv::Point>(0); - it1->mTracker.obj->reinforcement(it1->mResult); + it1->mTracker.get()->reinforcement(it1->mResult); } float priority = it1->mPriority; @@ -143,7 +143,7 @@ void CascadeTracker::internalReinforcement() for (; it2 != __trackers.end(); ++it2) if (it1 != it2 && priority > it2->mPriority) - it2->mTracker.obj->reinforcement(it1->mResult); + it2->mTracker.get()->reinforcement(it1->mResult); } } } diff --git a/mv_image/image/src/Tracking/FeatureSubstitutionTracker.cpp b/mv_image/image/src/Tracking/FeatureSubstitutionTracker.cpp index 43805a81..b6e10fcf 100644 --- a/mv_image/image/src/Tracking/FeatureSubstitutionTracker.cpp +++ b/mv_image/image/src/Tracking/FeatureSubstitutionTracker.cpp @@ -78,11 +78,11 @@ bool FeatureSubstitutionTracker::track( sceneImageObject->fill(frame, __featureExtractingParams, computeExpectedArea()); - ImageRecognizer recognizer(*sceneImageObject.obj); + ImageRecognizer recognizer(*sceneImageObject.get()); const bool isTracked = recognizer.recognize( - *__target.obj, + *(__target.get()), __recogParams, contour, __objectScalingFactor); diff --git a/mv_image/image/src/Tracking/ImageTrackingModel.cpp b/mv_image/image/src/Tracking/ImageTrackingModel.cpp index 3d78550e..e0337ad7 100644 --- a/mv_image/image/src/Tracking/ImageTrackingModel.cpp +++ b/mv_image/image/src/Tracking/ImageTrackingModel.cpp @@ -28,6 +28,7 @@ #include <fstream> #include <unistd.h> #include <new> +#include <iomanip> namespace MediaVision { namespace Image { @@ -44,12 +45,13 @@ ImageTrackingModel::ImageTrackingModel() : ImageTrackingModel::ImageTrackingModel(const ImageTrackingModel& copy) : __target(copy.__target), - __tracker(copy.__tracker.empty()? NULL: copy.__tracker->clone()), __stabilizator(copy.__stabilizator), __location(copy.__location), __stabilizationParams(copy.__stabilizationParams) { - ; /* NULL */ + if (!copy.__tracker.empty()) + __tracker = copy.__tracker->clone(); + /* NULL */ } void ImageTrackingModel::setTarget(const ImageObject& target) diff --git a/mv_image/image/src/Tracking/MFTracker.cpp b/mv_image/image/src/Tracking/MFTracker.cpp index fd99019f..72695b2a 100644 --- a/mv_image/image/src/Tracking/MFTracker.cpp +++ b/mv_image/image/src/Tracking/MFTracker.cpp @@ -16,7 +16,8 @@ #include "Tracking/MFTracker.h" -#include <opencv/cv.h> +#include "opencv2/video/tracking.hpp" +#include "opencv2/imgproc.hpp" namespace MediaVision { namespace Image { @@ -205,13 +206,13 @@ bool MFTracker::medianFlowImpl( std::vector<float> errors(numberOfPointsToTrackOld); std::vector<cv::Mat> tempPyramid; - cv::buildOpticalFlowPyramid( + buildOpticalFlowPyramid( newImage_gray, tempPyramid, __params.mWindowSize, __params.mPyrMaxLevel); - cv::calcOpticalFlowPyrLK(__pyramid, + calcOpticalFlowPyrLK(__pyramid, tempPyramid, pointsToTrackOld, pointsToTrackNew, diff --git a/mv_image/image/src/mv_image_open.cpp b/mv_image/image/src/mv_image_open.cpp index 38df14f3..04d4a9af 100644 --- a/mv_image/image/src/mv_image_open.cpp +++ b/mv_image/image/src/mv_image_open.cpp @@ -26,7 +26,7 @@ #include "Recognition/ImageRecognizer.h" #include "Tracking/ImageTrackingModel.h" -#include <opencv/cv.h> +#include <opencv2/core.hpp> namespace { diff --git a/mv_surveillance/surveillance/include/EventTriggerPersonAppearance.h b/mv_surveillance/surveillance/include/EventTriggerPersonAppearance.h index a4ce5a0f..43551077 100644 --- a/mv_surveillance/surveillance/include/EventTriggerPersonAppearance.h +++ b/mv_surveillance/surveillance/include/EventTriggerPersonAppearance.h @@ -27,9 +27,9 @@ #include "EventResult.h" #include "EventDefs.h" -#include "HoGDetector.h" -#include <opencv/cv.h> +#include <opencv2/opencv.hpp> +#include <opencv2/objdetect.hpp> #include <sys/time.h> @@ -187,7 +187,7 @@ private: CVRectangles __disappearedRects; - modifiedcv::HOGDescriptor __hogClassifier; /**< Classifier to be used for full body + cv::HOGDescriptor __hogClassifier; /**< Classifier to be used for full body person detection */ MVRectangles __detectedLocations; diff --git a/mv_surveillance/surveillance/include/HoGDetector.h b/mv_surveillance/surveillance/include/HoGDetector.h deleted file mode 100644 index b4fd68f7..00000000 --- a/mv_surveillance/surveillance/include/HoGDetector.h +++ /dev/null @@ -1,201 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ - -#ifndef __MEDIA_VISION_HOGDETECTOR_H__ -#define __MEDIA_VISION_HOGDETECTOR_H__ - -/** - * @file HOGDetector.h - * @brief This file contains structure of HOG detector. - */ - -#include "opencv2/core/core.hpp" -#include "opencv2/objdetect/objdetect.hpp" - -#include <vector> - -namespace modifiedcv { - -using namespace cv; - -struct HOGDescriptor { - enum { L2Hys = 0 }; - enum { DEFAULT_NLEVELS = 64 }; - - /* default constructor */ - HOGDescriptor() : - winSize(64, 128), - blockSize(16, 16), - blockStride(8, 8), - cellSize(8, 8), - nbins(9), - derivAperture(1), - winSigma(-1), - histogramNormType(HOGDescriptor::L2Hys), - L2HysThreshold(0.2), - gammaCorrection(true), - nlevels(HOGDescriptor::DEFAULT_NLEVELS) - {} - - /* constructor */ - HOGDescriptor( - Size _winSize, - Size _blockSize, - Size _blockStride, - Size _cellSize, - int _nbins, - int _derivAperture = 1, - double _winSigma = -1., - int _histogramNormType = L2Hys, - double _L2HysThreshold = 0.2, - bool _gammaCorrection = false, - int _nlevels = DEFAULT_NLEVELS) : - winSize(_winSize), - blockSize(_blockSize), - blockStride(_blockStride), - cellSize(_cellSize), - nbins(_nbins), - derivAperture(_derivAperture), - winSigma(_winSigma), - histogramNormType(_histogramNormType), - L2HysThreshold(_L2HysThreshold), - gammaCorrection(_gammaCorrection), - nlevels(_nlevels) - {} - - /* default destructor */ - virtual ~HOGDescriptor() {} - - size_t getDescriptorSize() const; - - bool checkDetectorSize() const; - - double getWinSigma() const; - - virtual void setSVMDetector(InputArray _svmdetector); - - virtual void compute( - const Mat& img, - CV_OUT vector<float>& descriptors, - Size winStride = Size(), - Size padding = Size(), - const vector<Point>& locations = vector<Point>()) const; - - /* with found weights output */ - virtual void detect( - const Mat& img, - CV_OUT vector<Point>& foundLocations, - CV_OUT vector<double>& weights, - double hitThreshold = 0., - Size winStride = Size(), - Size padding = Size(), - const vector<Point>& searchLocations = vector<Point>()) const; - - /* without found weights output */ - virtual void detect( - const Mat& img, - CV_OUT vector<Point>& foundLocations, - double hitThreshold = 0., - Size winStride = Size(), - Size padding = Size(), - const vector<Point>& searchLocations = vector<Point>()) const; - - /* with result weights output */ - virtual void detectMultiScale( - const Mat& img, - CV_OUT vector<Rect>& foundLocations, - CV_OUT vector<double>& foundWeights, - double hitThreshold = 0, - Size winStride = Size(), - Size padding = Size(), - double scale = 1.05, - double finalThreshold = 2.0, - bool useMeanshiftGrouping = false) const; - - /* without found weights output */ - virtual void detectMultiScale( - const Mat& img, - CV_OUT vector<Rect>& foundLocations, - double hitThreshold = 0., - Size winStride = Size(), - Size padding = Size(), - double scale = 1.05, - double finalThreshold = 2.0, - bool useMeanshiftGrouping = false) const; - - virtual void computeGradient( - const Mat& img, - CV_OUT Mat& grad, - CV_OUT Mat& angleOfs, - Size paddingTL = Size(), - Size paddingBR = Size()) const; - - static vector<float> getDefaultPeopleDetector(); - - static vector<float> getDaimlerPeopleDetector(); - - /* read/parse Dalal's alt model file */ - void readALTModel(std::string modelfile); - - void groupRectangles( - vector<cv::Rect>& rectList, - vector<double>& weights, - int groupThreshold, - double eps) const; - - Size winSize; - Size blockSize; - Size blockStride; - Size cellSize; - int nbins; - int derivAperture; - double winSigma; - int histogramNormType; - double L2HysThreshold; - bool gammaCorrection; - vector<float> svmDetector; - int nlevels; -}; - -} /* modifiedcv */ - -#endif /* __MEDIA_VISION_HOGDETECTOR_H__ */ diff --git a/mv_surveillance/surveillance/include/MFTracker.h b/mv_surveillance/surveillance/include/MFTracker.h index 947f6d4c..4a587140 100644 --- a/mv_surveillance/surveillance/include/MFTracker.h +++ b/mv_surveillance/surveillance/include/MFTracker.h @@ -17,7 +17,7 @@ #ifndef __MEDIA_VISION_MFTRACKER_H__ #define __MEDIA_VISION_MFTRACKER_H__ -#include <opencv2/core/core.hpp> +#include <opencv2/core.hpp> namespace mediavision { namespace surveillance { diff --git a/mv_surveillance/surveillance/include/SurveillanceHelper.h b/mv_surveillance/surveillance/include/SurveillanceHelper.h index 1ad303a1..9e0de187 100644 --- a/mv_surveillance/surveillance/include/SurveillanceHelper.h +++ b/mv_surveillance/surveillance/include/SurveillanceHelper.h @@ -24,7 +24,7 @@ #include <mv_common.h> -#include <opencv/cv.h> +#include <opencv2/core.hpp> namespace mediavision { namespace surveillance { diff --git a/mv_surveillance/surveillance/src/EventManager.cpp b/mv_surveillance/surveillance/src/EventManager.cpp index 77787f39..7dc2c092 100644 --- a/mv_surveillance/surveillance/src/EventManager.cpp +++ b/mv_surveillance/surveillance/src/EventManager.cpp @@ -22,7 +22,7 @@ #include "EventTriggerPersonRecognition.h" #include "EventTriggerMovementDetection.h" -#include <mv_private.h> +#include "mv_private.h" namespace mediavision { namespace surveillance { diff --git a/mv_surveillance/surveillance/src/EventTriggerMovementDetection.cpp b/mv_surveillance/surveillance/src/EventTriggerMovementDetection.cpp index af375378..6385f3a8 100644 --- a/mv_surveillance/surveillance/src/EventTriggerMovementDetection.cpp +++ b/mv_surveillance/surveillance/src/EventTriggerMovementDetection.cpp @@ -22,7 +22,7 @@ #include <mv_private.h> -#include "opencv2/highgui/highgui.hpp" +#include "opencv2/highgui.hpp" namespace mediavision { namespace surveillance { diff --git a/mv_surveillance/surveillance/src/EventTriggerPersonAppearance.cpp b/mv_surveillance/surveillance/src/EventTriggerPersonAppearance.cpp index 59bc61a8..f09f39db 100644 --- a/mv_surveillance/surveillance/src/EventTriggerPersonAppearance.cpp +++ b/mv_surveillance/surveillance/src/EventTriggerPersonAppearance.cpp @@ -21,8 +21,7 @@ #include "SurveillanceHelper.h" #include "EventTriggerMovementDetection.h" -#include "opencv2/opencv.hpp" -#include "opencv2/highgui/highgui.hpp" +#include "opencv2/highgui.hpp" #include <mv_private.h> diff --git a/mv_surveillance/surveillance/src/HoGDetector.cpp b/mv_surveillance/surveillance/src/HoGDetector.cpp deleted file mode 100644 index 4d1ea0c7..00000000 --- a/mv_surveillance/surveillance/src/HoGDetector.cpp +++ /dev/null @@ -1,1006 +0,0 @@ -/*M/////////////////////////////////////////////////////////////////////////////////////// -// -// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. -// -// By downloading, copying, installing or using the software you agree to this license. -// If you do not agree to this license, do not download, install, -// copy or use the software. -// -// -// License Agreement -// For Open Source Computer Vision Library -// -// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. -// Copyright (C) 2009, Willow Garage Inc., all rights reserved. -// Third party copyrights are property of their respective owners. -// -// Redistribution and use in source and binary forms, with or without modification, -// are permitted provided that the following conditions are met: -// -// * Redistribution's of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistribution's in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// * The name of the copyright holders may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// This software is provided by the copyright holders and contributors "as is" and -// any express or implied warranties, including, but not limited to, the implied -// warranties of merchantability and fitness for a particular purpose are disclaimed. -// In no event shall the Intel Corporation or contributors be liable for any direct, -// indirect, incidental, special, exemplary, or consequential damages -// (including, but not limited to, procurement of substitute goods or services; -// loss of use, data, or profits; or business interruption) however caused -// and on any theory of liability, whether in contract, strict liability, -// or tort (including negligence or otherwise) arising in any way out of -// the use of this software, even if advised of the possibility of such damage. -// -//M*/ -#include <stdio.h> -#include "HoGDetector.h" -#include "opencv2/imgproc/imgproc.hpp" -#include <iterator> - -#ifdef ENABLE_NEON -#include <arm_neon.h> -#endif - -#ifdef ENABLE_OMP -#include <sched.h> -#define NCORES 4 -static int coreids[NCORES] = {1, 2, 3, 4}; -#endif - -/****************************************************************************************\ - The code below is implementation of HOG (Histogram-of-Oriented Gradients) - descriptor and object detection, introduced by Navneet Dalal and Bill Triggs. - - The computed feature vectors are compatible with the - INRIA Object Detection and Localization Toolkit - (http://pascal.inrialpes.fr/soft/olt/) -\****************************************************************************************/ - -namespace modifiedcv { - -class ParallelLoopBodyWrapper { -public: - ParallelLoopBodyWrapper(const cv::ParallelLoopBody& _body, const cv::Range& _r) { - body = &_body; - wholeRange = _r; - nstripes = cvRound(wholeRange.end - wholeRange.start); - } - void operator()(const cv::Range& sr) const { - cv::Range r; - r.start = (int)(wholeRange.start + - ((uint64)sr.start*(wholeRange.end - wholeRange.start) + nstripes/2)/nstripes); - r.end = sr.end >= nstripes ? wholeRange.end : (int)(wholeRange.start + - ((uint64)sr.end*(wholeRange.end - wholeRange.start) + nstripes/2)/nstripes); - (*body)(r); - } - cv::Range stripeRange() const { - return cv::Range(0, nstripes); - } - -protected: - const cv::ParallelLoopBody* body; - cv::Range wholeRange; - int nstripes; -}; - -void parallel_for_(const cv::Range& range, const cv::ParallelLoopBody& body) -{ -#if defined ENABLE_OMP - ParallelLoopBodyWrapper pbody(body, range); - cv::Range stripeRange = pbody.stripeRange(); - int i = 0; - #pragma omp parallel for private(i) num_threads(NCORES) - for (i = stripeRange.start; i < stripeRange.end; ++i) { - cpu_set_t mask; - CPU_ZERO(&mask); - CPU_SET(coreids[i % 4], &mask); - - if (sched_setaffinity (0, sizeof(mask), &mask) == -1) { - printf("Could not set CPU Affinity, continuing..."); - } - - pbody(Range(i, i + 1)); - } -#else - cv::parallel_for_(range, body); -#endif -} - -size_t HOGDescriptor::getDescriptorSize() const -{ - return (size_t)nbins* - (blockSize.width/cellSize.width)* - (blockSize.height/cellSize.height)* - ((winSize.width - blockSize.width)/blockStride.width + 1)* - ((winSize.height - blockSize.height)/blockStride.height + 1); -} - -double HOGDescriptor::getWinSigma() const -{ - return winSigma >= 0 ? winSigma : (blockSize.width + blockSize.height)/8.; -} - -bool HOGDescriptor::checkDetectorSize() const -{ - size_t detectorSize = svmDetector.size(), descriptorSize = getDescriptorSize(); - return detectorSize == 0 || - detectorSize == descriptorSize || - detectorSize == descriptorSize + 1; -} - -void HOGDescriptor::setSVMDetector(InputArray _svmDetector) -{ - _svmDetector.getMat().convertTo(svmDetector, CV_32F); - CV_Assert(checkDetectorSize()); -} - -void HOGDescriptor::computeGradient(const Mat& img, Mat& grad, Mat& qangle, - Size paddingTL, Size paddingBR) const -{ - CV_Assert(img.type() == CV_8U); - - Size gradsize(img.cols + paddingTL.width + paddingBR.width, - img.rows + paddingTL.height + paddingBR.height); - grad.create(gradsize, CV_32FC2); /* <magnitude*(1-alpha), magnitude*alpha> */ - qangle.create(gradsize, CV_8UC2); /* [0..nbins-1] - quantized gradient orientation */ - Size wholeSize; - Point roiofs; - img.locateROI(wholeSize, roiofs); - - int i, x, y; - /* int cn = img.channels(); */ - - Mat_<float> _lut(1, 256); - const float* lut = &_lut(0, 0); - - if ( gammaCorrection ) - for ( i = 0; i < 256; i++ ) - _lut(0, i) = std::sqrt((float)i); - else - for ( i = 0; i < 256; i++ ) - _lut(0, i) = (float)i; - - AutoBuffer<int> mapbuf(gradsize.width + gradsize.height + 4); - int* xmap = (int*)mapbuf + 1; - int* ymap = xmap + gradsize.width + 2; - - const int borderType = (int)cv::BORDER_REFLECT_101; - - for ( x = -1; x < gradsize.width + 1; x++ ) - xmap[x] = cv::borderInterpolate(x - paddingTL.width + roiofs.x, - wholeSize.width, borderType) - roiofs.x; - for ( y = -1; y < gradsize.height + 1; y++ ) - ymap[y] = cv::borderInterpolate(y - paddingTL.height + roiofs.y, - wholeSize.height, borderType) - roiofs.y; - - /* x- & y- derivatives for the whole row */ - int width = gradsize.width; - AutoBuffer<float> _dbuf(width*4); - float* dbuf = _dbuf; - Mat Dx(1, width, CV_32F, dbuf); - Mat Dy(1, width, CV_32F, dbuf + width); - Mat Mag(1, width, CV_32F, dbuf + width*2); - Mat Angle(1, width, CV_32F, dbuf + width*3); - - int _nbins = nbins; - float angleScale = (float)(_nbins/CV_PI); - - for ( y = 0; y < gradsize.height; y++ ) { - const uchar* imgPtr = img.data + img.step*ymap[y]; - const uchar* prevPtr = img.data + img.step*ymap[y-1]; - const uchar* nextPtr = img.data + img.step*ymap[y+1]; - float* gradPtr = (float*)grad.ptr(y); - uchar* qanglePtr = (uchar*)qangle.ptr(y); - - for (x = 0; x < width; x++) { - int x1 = xmap[x]; - dbuf[x] = (float)(lut[imgPtr[xmap[x+1]]] - lut[imgPtr[xmap[x-1]]]); - dbuf[width + x] = (float)(lut[nextPtr[x1]] - lut[prevPtr[x1]]); - } - - cartToPolar(Dx, Dy, Mag, Angle, false); - - for (x = 0; x < width; x++) { - float mag = dbuf[x+width*2], angle = dbuf[x+width*3]*angleScale - 0.5f; - int hidx = cvFloor(angle); - angle -= hidx; - gradPtr[x*2] = mag*(1.f - angle); - gradPtr[x*2+1] = mag*angle; - - if ( hidx < 0 ) - hidx += _nbins; - else if ( hidx >= _nbins ) - hidx -= _nbins; - assert((unsigned)hidx < (unsigned)_nbins); - - qanglePtr[x*2] = (uchar)hidx; - hidx++; - hidx &= hidx < _nbins ? -1 : 0; - qanglePtr[x*2+1] = (uchar)hidx; - } - } -} - - -struct HOGCache { - struct BlockData { - BlockData() : histOfs(0), imgOffset() {} - int histOfs; - Point imgOffset; - }; - - struct PixData { - size_t gradOfs, qangleOfs; - int histOfs[4]; - float histWeights[4]; - float gradWeight; - }; - - HOGCache(); - HOGCache(const HOGDescriptor* descriptor, - const Mat& img, Size paddingTL, Size paddingBR, - bool useCache, Size cacheStride); - virtual ~HOGCache() {}; - virtual void init(const HOGDescriptor* descriptor, - const Mat& img, Size paddingTL, Size paddingBR, - bool useCache, Size cacheStride); - - Size windowsInImage(Size imageSize, Size winStride) const; - Rect getWindow(Size imageSize, Size winStride, int idx) const; - - const float* getBlock(Point pt, float* buf); - virtual void normalizeBlockHistogram(float* histogram) const; - - vector<PixData> pixData; - vector<BlockData> blockData; - - bool useCache; - vector<int> ymaxCached; - Size winSize, cacheStride; - Size nblocks, ncells; - int blockHistogramSize; - int count1, count2, count4; - Point imgoffset; - Mat_<float> blockCache; - Mat_<uchar> blockCacheFlags; - - Mat grad, qangle; - const HOGDescriptor* descriptor; -}; - - -HOGCache::HOGCache() -{ - useCache = false; - blockHistogramSize = count1 = count2 = count4 = 0; - descriptor = 0; -} - -HOGCache::HOGCache(const HOGDescriptor* _descriptor, - const Mat& _img, Size _paddingTL, Size _paddingBR, - bool _useCache, Size _cacheStride) -{ - init(_descriptor, _img, _paddingTL, _paddingBR, _useCache, _cacheStride); -} - -void HOGCache::init(const HOGDescriptor* _descriptor, - const Mat& _img, Size _paddingTL, Size _paddingBR, - bool _useCache, Size _cacheStride) -{ - descriptor = _descriptor; - cacheStride = _cacheStride; - useCache = _useCache; - - descriptor->computeGradient(_img, grad, qangle, _paddingTL, _paddingBR); - imgoffset = _paddingTL; - - winSize = descriptor->winSize; - Size blockSize = descriptor->blockSize; - Size blockStride = descriptor->blockStride; - Size cellSize = descriptor->cellSize; - int i, j, nbins = descriptor->nbins; - int rawBlockSize = blockSize.width*blockSize.height; - - nblocks = Size((winSize.width - blockSize.width)/blockStride.width + 1, - (winSize.height - blockSize.height)/blockStride.height + 1); - ncells = Size(blockSize.width/cellSize.width, blockSize.height/cellSize.height); - blockHistogramSize = ncells.width*ncells.height*nbins; - - if ( useCache ) { - Size cacheSize((grad.cols - blockSize.width)/cacheStride.width+1, - (winSize.height/cacheStride.height)+1); - blockCache.create(cacheSize.height, cacheSize.width*blockHistogramSize); - blockCacheFlags.create(cacheSize); - size_t cacheRows = blockCache.rows; - ymaxCached.resize(cacheRows); - for (size_t ii = 0; ii < cacheRows; ii++ ) - ymaxCached[ii] = -1; - } - - Mat_<float> weights(blockSize); - float sigma = (float)descriptor->getWinSigma(); - float scale = 1.f/(sigma*sigma*2); - - float blockHalfHeight = blockSize.height*0.5f; - float blockHalfWidth = blockSize.width*0.5f; - for (i = 0; i < blockSize.height; i++) - for (j = 0; j < blockSize.width; j++) { - float di = i - blockHalfHeight; - float dj = j - blockHalfWidth; - weights(i, j) = std::exp(-(di*di + dj*dj)*scale); - } - - blockData.resize(nblocks.width*nblocks.height); - pixData.resize(rawBlockSize*3); - - /* - * Initialize 2 lookup tables, pixData & blockData. - * Here is why: - * - * The detection algorithm runs in 4 nested loops (at each pyramid layer): - * loop over the windows within the input image - * loop over the blocks within each window - * loop over the cells within each block - * loop over the pixels in each cell - * - * As each of the loops runs over a 2-dimensional array, - * we could get 8(!) nested loops in total, which is very-very slow. - * - * To speed the things up, we do the following: - * 1. loop over windows is unrolled in the HOGDescriptor::{compute|detect} methods; - * inside we compute the current search window using getWindow() method. - * Yes, it involves some overhead (function call + couple of divisions), - * but it's tiny in fact. - * 2. loop over the blocks is also unrolled. Inside we use pre-computed blockData[j] - * to set up gradient and histogram pointers. - * 3. loops over cells and pixels in each cell are merged - * (since there is no overlap between cells, each pixel in the block is processed once) - * and also unrolled. Inside we use PixData[k] to access the gradient values and - * update the histogram - */ - - count1 = count2 = count4 = 0; - for ( j = 0; j < blockSize.width; j++ ) - for ( i = 0; i < blockSize.height; i++ ) { - PixData* data = 0; - float cellX = (j+0.5f)/cellSize.width - 0.5f; - float cellY = (i+0.5f)/cellSize.height - 0.5f; - int icellX0 = cvFloor(cellX); - int icellY0 = cvFloor(cellY); - int icellX1 = icellX0 + 1, icellY1 = icellY0 + 1; - cellX -= icellX0; - cellY -= icellY0; - - if ( (unsigned)icellX0 < (unsigned)ncells.width && - (unsigned)icellX1 < (unsigned)ncells.width ) { - if ( (unsigned)icellY0 < (unsigned)ncells.height && - (unsigned)icellY1 < (unsigned)ncells.height ) { - data = &pixData[rawBlockSize*2 + (count4++)]; - data->histOfs[0] = (icellX0*ncells.height + icellY0)*nbins; - data->histWeights[0] = (1.f - cellX)*(1.f - cellY); - data->histOfs[1] = (icellX1*ncells.height + icellY0)*nbins; - data->histWeights[1] = cellX*(1.f - cellY); - data->histOfs[2] = (icellX0*ncells.height + icellY1)*nbins; - data->histWeights[2] = (1.f - cellX)*cellY; - data->histOfs[3] = (icellX1*ncells.height + icellY1)*nbins; - data->histWeights[3] = cellX*cellY; - } else { - data = &pixData[rawBlockSize + (count2++)]; - if ( (unsigned)icellY0 < (unsigned)ncells.height ) { - icellY1 = icellY0; - cellY = 1.f - cellY; - } - data->histOfs[0] = (icellX0*ncells.height + icellY1)*nbins; - data->histWeights[0] = (1.f - cellX)*cellY; - data->histOfs[1] = (icellX1*ncells.height + icellY1)*nbins; - data->histWeights[1] = cellX*cellY; - data->histOfs[2] = data->histOfs[3] = 0; - data->histWeights[2] = data->histWeights[3] = 0; - } - } else { - if ( (unsigned)icellX0 < (unsigned)ncells.width ) { - icellX1 = icellX0; - cellX = 1.f - cellX; - } - - if ( (unsigned)icellY0 < (unsigned)ncells.height && - (unsigned)icellY1 < (unsigned)ncells.height ) { - data = &pixData[rawBlockSize + (count2++)]; - data->histOfs[0] = (icellX1*ncells.height + icellY0)*nbins; - data->histWeights[0] = cellX*(1.f - cellY); - data->histOfs[1] = (icellX1*ncells.height + icellY1)*nbins; - data->histWeights[1] = cellX*cellY; - data->histOfs[2] = data->histOfs[3] = 0; - data->histWeights[2] = data->histWeights[3] = 0; - } else { - data = &pixData[count1++]; - if ( (unsigned)icellY0 < (unsigned)ncells.height ) { - icellY1 = icellY0; - cellY = 1.f - cellY; - } - data->histOfs[0] = (icellX1*ncells.height + icellY1)*nbins; - data->histWeights[0] = cellX*cellY; - data->histOfs[1] = data->histOfs[2] = data->histOfs[3] = 0; - data->histWeights[1] = data->histWeights[2] = data->histWeights[3] = 0; - } - } - data->gradOfs = (grad.cols*i + j)*2; - data->qangleOfs = (qangle.cols*i + j)*2; - data->gradWeight = weights(i, j); - } - - assert(count1 + count2 + count4 == rawBlockSize); - /* defragment pixData */ - for ( j = 0; j < count2; j++ ) - pixData[j + count1] = pixData[j + rawBlockSize]; - for ( j = 0; j < count4; j++ ) - pixData[j + count1 + count2] = pixData[j + rawBlockSize*2]; - count2 += count1; - count4 += count2; - - /* initialize blockData */ - for ( j = 0; j < nblocks.width; j++ ) - for ( i = 0; i < nblocks.height; i++ ) { - BlockData& data = blockData[j*nblocks.height + i]; - data.histOfs = (j*nblocks.height + i)*blockHistogramSize; - data.imgOffset = Point(j*blockStride.width, i*blockStride.height); - } -} - - -const float* HOGCache::getBlock(Point pt, float* buf) -{ - float* blockHist = buf; - assert(descriptor != 0); - - Size blockSize = descriptor->blockSize; - pt += imgoffset; - - CV_Assert( (unsigned)pt.x <= (unsigned)(grad.cols - blockSize.width) && - (unsigned)pt.y <= (unsigned)(grad.rows - blockSize.height) ); - - if ( useCache ) { - CV_Assert(pt.x % cacheStride.width == 0 && - pt.y % cacheStride.height == 0); - Point cacheIdx(pt.x/cacheStride.width, - (pt.y/cacheStride.height) % blockCache.rows); - if ( pt.y != ymaxCached[cacheIdx.y] ) { - Mat_<uchar> cacheRow = blockCacheFlags.row(cacheIdx.y); - cacheRow = (uchar)0; - ymaxCached[cacheIdx.y] = pt.y; - } - - blockHist = &blockCache[cacheIdx.y][cacheIdx.x*blockHistogramSize]; - uchar& computedFlag = blockCacheFlags(cacheIdx.y, cacheIdx.x); - if ( computedFlag != 0 ) - return blockHist; - computedFlag = (uchar)1; /* set it at once, before actual computing */ - } - - int k, C1 = count1, C2 = count2, C4 = count4; - const float* gradPtr = (const float*)(grad.data + grad.step*pt.y) + pt.x*2; - const uchar* qanglePtr = qangle.data + qangle.step*pt.y + pt.x*2; - - CV_Assert(blockHist != 0); - for ( k = 0; k < blockHistogramSize; k++ ) - blockHist[k] = 0.f; - - const PixData* _pixData = &pixData[0]; - - for ( k = 0; k < C1; k++ ) { - const PixData& pk = _pixData[k]; - const float* a = gradPtr + pk.gradOfs; - float w = pk.gradWeight*pk.histWeights[0]; - const uchar* h = qanglePtr + pk.qangleOfs; - int h0 = h[0], h1 = h[1]; - float* hist = blockHist + pk.histOfs[0]; - float t0 = hist[h0] + a[0]*w; - float t1 = hist[h1] + a[1]*w; - hist[h0] = t0; - hist[h1] = t1; - } - - for ( ; k < C2; k++ ) { - const PixData& pk = _pixData[k]; - const float* a = gradPtr + pk.gradOfs; - float w, t0, t1, a0 = a[0], a1 = a[1]; - const uchar* h = qanglePtr + pk.qangleOfs; - int h0 = h[0], h1 = h[1]; - - float* hist = blockHist + pk.histOfs[0]; - w = pk.gradWeight*pk.histWeights[0]; - t0 = hist[h0] + a0*w; - t1 = hist[h1] + a1*w; - hist[h0] = t0; - hist[h1] = t1; - - hist = blockHist + pk.histOfs[1]; - w = pk.gradWeight*pk.histWeights[1]; - t0 = hist[h0] + a0*w; - t1 = hist[h1] + a1*w; - hist[h0] = t0; - hist[h1] = t1; - } - - for ( ; k < C4; k++ ) { - const PixData& pk = _pixData[k]; - const float* a = gradPtr + pk.gradOfs; - float w, t0, t1, a0 = a[0], a1 = a[1]; - const uchar* h = qanglePtr + pk.qangleOfs; - int h0 = h[0], h1 = h[1]; - - float* hist = blockHist + pk.histOfs[0]; - w = pk.gradWeight*pk.histWeights[0]; - t0 = hist[h0] + a0*w; - t1 = hist[h1] + a1*w; - hist[h0] = t0; - hist[h1] = t1; - - hist = blockHist + pk.histOfs[1]; - w = pk.gradWeight*pk.histWeights[1]; - t0 = hist[h0] + a0*w; - t1 = hist[h1] + a1*w; - hist[h0] = t0; - hist[h1] = t1; - - hist = blockHist + pk.histOfs[2]; - w = pk.gradWeight*pk.histWeights[2]; - t0 = hist[h0] + a0*w; - t1 = hist[h1] + a1*w; - hist[h0] = t0; - hist[h1] = t1; - - hist = blockHist + pk.histOfs[3]; - w = pk.gradWeight*pk.histWeights[3]; - t0 = hist[h0] + a0*w; - t1 = hist[h1] + a1*w; - hist[h0] = t0; - hist[h1] = t1; - } - - normalizeBlockHistogram(blockHist); - - return blockHist; -} - -void HOGCache::normalizeBlockHistogram(float* _hist) const -{ -#ifdef ENABLE_NEON - /* NEON vector for loading the histogram to the memory */ - float32x4_t hist_v; - /* Initialize the accumulator for summation storing */ - float32x4_t acc = vdupq_n_f32(0.f); -#endif - - /* Histogram pointer in the memory */ - float *hist_ptr = &_hist[0]; - /* Variable to store values of summations */ - float sum = 0.f; - size_t sz = blockHistogramSize; - -#ifdef ENABLE_NEON - for (; sz != 0u; sz -= 4u) { - hist_v = vld1q_f32(hist_ptr); - acc = vmlaq_f32(acc, hist_v, hist_v); - hist_ptr += 4; - } - - sum += vgetq_lane_f32(acc, 0) + vgetq_lane_f32(acc, 1) + - vgetq_lane_f32(acc, 2) + vgetq_lane_f32(acc, 3); - - /* Reset accumulator */ - acc = vdupq_n_f32(0.f); - - sz = blockHistogramSize; - hist_ptr = &_hist[0]; -#else - for (size_t i = 0; i < sz; ++i) - sum += hist_ptr[i] * hist_ptr[i]; -#endif - - float scale = 1.f / (std::sqrt(sum) + sz * 0.1f); - sum = 0.f; - -#ifdef ENABLE_NEON - float32x4_t thres_v = vdupq_n_f32((float)descriptor->L2HysThreshold); - - for (; sz != 0; sz -= 4) { - /* Find minimal value among threshold and histogram value, accumulate - * this value squared */ - hist_v = vminq_f32(vmulq_n_f32(vld1q_f32(hist_ptr), scale), thres_v); - acc = vmlaq_f32(acc, hist_v, hist_v); - /* Update histograms in memory according with found min values */ - vst1q_f32(hist_ptr, hist_v); - hist_ptr += 4; - } - - sum += vgetq_lane_f32(acc, 0) + vgetq_lane_f32(acc, 1) + - vgetq_lane_f32(acc, 2) + vgetq_lane_f32(acc, 3); - -#else - float thresh = (float)descriptor->L2HysThreshold; - for (size_t i = 0; i < sz; ++i) { - hist_ptr[i] = std::min(hist_ptr[i] * scale, thresh); - sum += hist_ptr[i] * hist_ptr[i]; - } -#endif - - scale = 1.f / (std::sqrt(sum) + 1e-3f); - -#ifdef ENABLE_NEON - sz = blockHistogramSize; - hist_ptr = &_hist[0]; - - /* Scale histogram (normalize): */ - for (; sz != 0; sz -= 4) { - vst1q_f32(hist_ptr, vmulq_n_f32(vld1q_f32(hist_ptr), scale)); - hist_ptr += 4; - } -#else - for (size_t i = 0; i < sz; i++ ) - hist_ptr[i] *= scale; -#endif -} - - -Size HOGCache::windowsInImage(Size imageSize, Size winStride) const -{ - return Size((imageSize.width - winSize.width)/winStride.width + 1, - (imageSize.height - winSize.height)/winStride.height + 1); -} - -Rect HOGCache::getWindow(Size imageSize, Size winStride, int idx) const -{ - int nwindowsX = (imageSize.width - winSize.width)/winStride.width + 1; - int y = idx / nwindowsX; - int x = idx - nwindowsX*y; - return Rect( x*winStride.width, y*winStride.height, winSize.width, winSize.height ); -} - - -void HOGDescriptor::compute(const Mat& img, vector<float>& descriptors, - Size winStride, Size padding, - const vector<Point>& locations) const -{ - if ( winStride == Size() ) - winStride = cellSize; - Size cacheStride(gcd(winStride.width, blockStride.width), - gcd(winStride.height, blockStride.height)); - size_t nwindows = locations.size(); - padding.width = (int)alignSize(std::max(padding.width, 0), cacheStride.width); - padding.height = (int)alignSize(std::max(padding.height, 0), cacheStride.height); - Size paddedImgSize(img.cols + padding.width*2, img.rows + padding.height*2); - - HOGCache cache(this, img, padding, padding, nwindows == 0, cacheStride); - - if ( !nwindows ) - nwindows = cache.windowsInImage(paddedImgSize, winStride).area(); - - const HOGCache::BlockData* blockData = &cache.blockData[0]; - - int nblocks = cache.nblocks.area(); - int blockHistogramSize = cache.blockHistogramSize; - size_t dsize = getDescriptorSize(); - descriptors.resize(dsize*nwindows); - - for ( size_t i = 0; i < nwindows; i++ ) { - float* descriptor = &descriptors[i*dsize]; - - Point pt0; - if ( !locations.empty() ) { - pt0 = locations[i]; - if ( pt0.x < -padding.width || pt0.x > img.cols + padding.width - winSize.width || - pt0.y < -padding.height || pt0.y > img.rows + padding.height - winSize.height ) - continue; - } else { - pt0 = cache.getWindow(paddedImgSize, winStride, (int)i).tl() - Point(padding); - CV_Assert(pt0.x % cacheStride.width == 0 && pt0.y % cacheStride.height == 0); - } - - for ( int j = 0; j < nblocks; j++ ) { - const HOGCache::BlockData& bj = blockData[j]; - Point pt = pt0 + bj.imgOffset; - - float* dst = descriptor + bj.histOfs; - const float* src = cache.getBlock(pt, dst); - if ( src != dst ) - for ( int k = 0; k < blockHistogramSize; k++ ) - dst[k] = src[k]; - } - } -} - - -void HOGDescriptor::detect(const Mat& img, - vector<Point>& hits, vector<double>& weights, double hitThreshold, - Size winStride, Size padding, const vector<Point>& locations) const -{ - hits.clear(); - if ( svmDetector.empty() ) - return; - - if ( winStride == Size() ) - winStride = cellSize; - Size cacheStride(gcd(winStride.width, blockStride.width), - gcd(winStride.height, blockStride.height)); - size_t nwindows = locations.size(); - padding.width = (int)alignSize(std::max(padding.width, 0), cacheStride.width); - padding.height = (int)alignSize(std::max(padding.height, 0), cacheStride.height); - Size paddedImgSize(img.cols + padding.width*2, img.rows + padding.height*2); - - HOGCache cache(this, img, padding, padding, nwindows == 0, cacheStride); - - if ( !nwindows ) - nwindows = cache.windowsInImage(paddedImgSize, winStride).area(); - - const HOGCache::BlockData* blockData = &cache.blockData[0]; - - int nblocks = cache.nblocks.area(); - int blockHistogramSize = cache.blockHistogramSize; - size_t dsize = getDescriptorSize(); - - double rho = svmDetector.size() > dsize ? svmDetector[dsize] : 0; - vector<float> blockHist(blockHistogramSize); - - for ( size_t i = 0; i < nwindows; i++ ) { - Point pt0; - if ( !locations.empty() ) { - pt0 = locations[i]; - if ( pt0.x < -padding.width || pt0.x > img.cols + padding.width - winSize.width || - pt0.y < -padding.height || pt0.y > img.rows + padding.height - winSize.height ) - continue; - } else { - pt0 = cache.getWindow(paddedImgSize, winStride, (int)i).tl() - Point(padding); - CV_Assert(pt0.x % cacheStride.width == 0 && pt0.y % cacheStride.height == 0); - } - double s = rho; - const float* svmVec = &svmDetector[0]; - int j, k; - - for ( j = 0; j < nblocks; j++, svmVec += blockHistogramSize ) { - const HOGCache::BlockData& bj = blockData[j]; - Point pt = pt0 + bj.imgOffset; - - const float* vec = cache.getBlock(pt, &blockHist[0]); -#ifdef ENABLE_NEON - float32x4_t vec_v; /* NEON feature vector */ - float32x4_t svm_v; /* NEON SVM feature weights */ - float32x4_t acc = vdupq_n_f32(0.f); /* NEON partial sum */ - for ( k = 0; k <= blockHistogramSize - 4; k += 4 ) { - vec_v = vld1q_f32(vec + k); - svm_v = vld1q_f32(svmVec + k); - acc = vmlaq_f32(acc, vec_v, svm_v); - } - - s += vgetq_lane_f32(acc, 0) + vgetq_lane_f32(acc, 1) + - vgetq_lane_f32(acc, 2) + vgetq_lane_f32(acc, 3); - -#else - for ( k = 0; k <= blockHistogramSize - 4; k += 4 ) - s += vec[k]*svmVec[k] + vec[k+1]*svmVec[k+1] + - vec[k+2]*svmVec[k+2] + vec[k+3]*svmVec[k+3]; -#endif - for ( ; k < blockHistogramSize; k++ ) - s += vec[k]*svmVec[k]; - } - - if ( s >= hitThreshold ) { - hits.push_back(pt0); - weights.push_back(s); - } - } -} - -void HOGDescriptor::detect(const Mat& img, vector<Point>& hits, double hitThreshold, - Size winStride, Size padding, const vector<Point>& locations) const -{ - vector<double> weightsV; - detect(img, hits, weightsV, hitThreshold, winStride, padding, locations); -} - -class HOGInvoker : public ParallelLoopBody { - public: - HOGInvoker(const HOGDescriptor* _hog, const Mat& _img, - double _hitThreshold, Size _winStride, Size _padding, - const double* _levelScale, std::vector<Rect> * _vec, Mutex* _mtx, - std::vector<double>* _weights = 0, std::vector<double>* _scales = 0) { - hog = _hog; - img = _img; - hitThreshold = _hitThreshold; - winStride = _winStride; - padding = _padding; - levelScale = _levelScale; - vec = _vec; - weights = _weights; - scales = _scales; - mtx = _mtx; - } - - void operator()(const Range& range) const { - int i, i1 = range.start, i2 = range.end; - double minScale = i1 > 0 ? levelScale[i1] : i2 > 1 ? levelScale[i1+1] : std::max(img.cols, img.rows); - Size maxSz(cvCeil(img.cols/minScale), cvCeil(img.rows/minScale)); - Mat smallerImgBuf(maxSz, img.type()); - vector<Point> locations; - vector<double> hitsWeights; - - Size wholeSize; - Point offset; - img.locateROI(wholeSize, offset); - - for ( i = i1; i < i2; i++ ) { - double scale = levelScale[i]; - Size sz(cvRound(img.cols/scale), cvRound(img.rows/scale)); - Mat smallerImg(sz, img.type(), smallerImgBuf.data); - if (sz == img.size()) - smallerImg = Mat(sz, img.type(), img.data, img.step); - else - resize(img, smallerImg, sz); - hog->detect(smallerImg, locations, hitsWeights, hitThreshold, winStride, padding); - - Size scaledWinSize = Size(cvRound(hog->winSize.width*scale), cvRound(hog->winSize.height*scale)); - - mtx->lock(); - for ( size_t j = 0; j < locations.size(); j++ ) { - vec->push_back(Rect(cvRound(locations[j].x*scale), - cvRound(locations[j].y*scale), - scaledWinSize.width, scaledWinSize.height)); - if (scales) { - scales->push_back(scale); - } - } - mtx->unlock(); - - if (weights && (!hitsWeights.empty())) { - mtx->lock(); - for (size_t j = 0; j < locations.size(); j++) { - weights->push_back(hitsWeights[j]); - } - mtx->unlock(); - } - } - } - - const HOGDescriptor* hog; - Mat img; - double hitThreshold; - Size winStride; - Size padding; - const double* levelScale; - std::vector<Rect>* vec; - std::vector<double>* weights; - std::vector<double>* scales; - Mutex* mtx; -}; - - -void HOGDescriptor::detectMultiScale( - const Mat& img, vector<Rect>& foundLocations, vector<double>& foundWeights, - double hitThreshold, Size winStride, Size padding, - double scale0, double finalThreshold, bool useMeanshiftGrouping) const -{ - double scale = 1.; - int levels = 0; - - vector<double> levelScale; - for ( levels = 0; levels < nlevels; levels++ ) { - levelScale.push_back(scale); - if ( cvRound(img.cols/scale) < winSize.width || - cvRound(img.rows/scale) < winSize.height || - scale0 <= 1 ) - break; - scale *= scale0; - } - levels = std::max(levels, 1); - levelScale.resize(levels); - - std::vector<Rect> allCandidates; - std::vector<double> tempScales; - std::vector<double> tempWeights; - std::vector<double> foundScales; - Mutex mtx; - - modifiedcv::parallel_for_(Range(0, (int)levelScale.size()), - HOGInvoker(this, img, hitThreshold, winStride, padding, &levelScale[0], &allCandidates, &mtx, &tempWeights, &tempScales)); - - std::copy(tempScales.begin(), tempScales.end(), back_inserter(foundScales)); - foundLocations.clear(); - std::copy(allCandidates.begin(), allCandidates.end(), back_inserter(foundLocations)); - foundWeights.clear(); - std::copy(tempWeights.begin(), tempWeights.end(), back_inserter(foundWeights)); - - if ( useMeanshiftGrouping ) { - groupRectangles_meanshift(foundLocations, foundWeights, foundScales, finalThreshold, winSize); - } else { - groupRectangles(foundLocations, foundWeights, (int)finalThreshold, 0.2); - } -} - -void HOGDescriptor::detectMultiScale(const Mat& img, vector<Rect>& foundLocations, - double hitThreshold, Size winStride, Size padding, - double scale0, double finalThreshold, bool useMeanshiftGrouping) const -{ - vector<double> foundWeights; - detectMultiScale(img, foundLocations, foundWeights, hitThreshold, winStride, - padding, scale0, finalThreshold, useMeanshiftGrouping); -} - -void HOGDescriptor::groupRectangles(vector<cv::Rect>& rectList, vector<double>& weights, int groupThreshold, double eps) const -{ - if ( groupThreshold <= 0 || rectList.empty() ) { - return; - } - - CV_Assert(rectList.size() == weights.size()); - - vector<int> labels; - int nclasses = partition(rectList, labels, SimilarRects(eps)); - - vector<cv::Rect_<double> > rrects(nclasses); - vector<int> numInClass(nclasses, 0); - vector<double> foundWeights(nclasses, DBL_MIN); - int i, j, nlabels = (int)labels.size(); - - for ( i = 0; i < nlabels; i++ ) { - int cls = labels[i]; - rrects[cls].x += rectList[i].x; - rrects[cls].y += rectList[i].y; - rrects[cls].width += rectList[i].width; - rrects[cls].height += rectList[i].height; - foundWeights[cls] = max(foundWeights[cls], weights[i]); - numInClass[cls]++; - } - - for ( i = 0; i < nclasses; i++ ) { - /* find the average of all ROI in the cluster */ - cv::Rect_<double> r = rrects[i]; - double s = 1.0/numInClass[i]; - rrects[i] = cv::Rect_<double>(cv::saturate_cast<double>(r.x*s), - cv::saturate_cast<double>(r.y*s), - cv::saturate_cast<double>(r.width*s), - cv::saturate_cast<double>(r.height*s)); - } - - rectList.clear(); - weights.clear(); - - for ( i = 0; i < nclasses; i++ ) { - cv::Rect r1 = rrects[i]; - int n1 = numInClass[i]; - double w1 = foundWeights[i]; - if ( n1 <= groupThreshold ) - continue; - /* filter out small rectangles inside large rectangles */ - for ( j = 0; j < nclasses; j++ ) { - int n2 = numInClass[j]; - - if ( j == i || n2 <= groupThreshold ) - continue; - - cv::Rect r2 = rrects[j]; - - int dx = cv::saturate_cast<int>(r2.width * eps); - int dy = cv::saturate_cast<int>(r2.height * eps); - - if ( r1.x >= r2.x - dx && - r1.y >= r2.y - dy && - r1.x + r1.width <= r2.x + r2.width + dx && - r1.y + r1.height <= r2.y + r2.height + dy && - (n2 > std::max(3, n1) || n1 < 3) ) - break; - } - - if ( j == nclasses ) { - rectList.push_back(r1); - weights.push_back(w1); - } - } -} -} diff --git a/mv_surveillance/surveillance/src/MFTracker.cpp b/mv_surveillance/surveillance/src/MFTracker.cpp index b5e6da59..85b98033 100644 --- a/mv_surveillance/surveillance/src/MFTracker.cpp +++ b/mv_surveillance/surveillance/src/MFTracker.cpp @@ -16,7 +16,8 @@ #include "MFTracker.h" -#include <opencv/cv.h> +#include "opencv2/video/tracking.hpp" +#include "opencv2/imgproc.hpp" namespace mediavision { namespace surveillance { @@ -169,13 +170,13 @@ bool MFTracker::medianFlowImpl( std::vector<float> errors(numberOfPointsToTrackOld); std::vector<cv::Mat> tempPyramid; - cv::buildOpticalFlowPyramid( + buildOpticalFlowPyramid( newImage_gray, tempPyramid, __params.mWindowSize, __params.mPyrMaxLevel); - cv::calcOpticalFlowPyrLK(__pyramid, + calcOpticalFlowPyrLK(__pyramid, tempPyramid, pointsToTrackOld, pointsToTrackNew, diff --git a/mv_surveillance/surveillance/src/SurveillanceHelper.cpp b/mv_surveillance/surveillance/src/SurveillanceHelper.cpp index bbd92e90..d2dddcc6 100644 --- a/mv_surveillance/surveillance/src/SurveillanceHelper.cpp +++ b/mv_surveillance/surveillance/src/SurveillanceHelper.cpp @@ -16,9 +16,10 @@ #include "SurveillanceHelper.h" -#include <mv_private.h> +#include "mv_private.h" -#include "opencv2/highgui/highgui.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/highgui.hpp" namespace mediavision { namespace surveillance { diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec index a66512be..56ee7bb2 100644 --- a/packaging/capi-media-vision.spec +++ b/packaging/capi-media-vision.spec @@ -1,6 +1,6 @@ Name: capi-media-vision Summary: Media Vision library for Tizen Native API -Version: 0.3.30 +Version: 0.4.0 Release: 1 Group: Multimedia/Framework License: Apache-2.0 and BSD-3-Clause @@ -10,7 +10,7 @@ BuildRequires: pkgconfig(capi-media-tool) BuildRequires: pkgconfig(libtbm) BuildRequires: pkgconfig(dlog) BuildRequires: pkgconfig(capi-system-info) -BuildRequires: pkgconfig(opencv) +BuildRequires: pkgconfig(opencv) >= 3.4.0 BuildRequires: pkgconfig(zbar) BuildRequires: pkgconfig(glib-2.0) # Change to the pkgconfig(zint) after zint package refactor diff --git a/test/testsuites/common/image_helper/src/ImageHelper.cpp b/test/testsuites/common/image_helper/src/ImageHelper.cpp index b72c2e36..a02486c8 100644 --- a/test/testsuites/common/image_helper/src/ImageHelper.cpp +++ b/test/testsuites/common/image_helper/src/ImageHelper.cpp @@ -23,9 +23,9 @@ #include <setjmp.h> -#include <opencv2/core/core.hpp> -#include <opencv2/highgui/highgui.hpp> -#include <opencv2/imgproc/imgproc.hpp> +#include <opencv2/core.hpp> +#include <opencv2/highgui.hpp> +#include <opencv2/imgproc.hpp> /** * @file ImageHelper.cpp diff --git a/test/testsuites/common/image_helper/src/image_helper.cpp b/test/testsuites/common/image_helper/src/image_helper.cpp index 616e4117..c4d96980 100644 --- a/test/testsuites/common/image_helper/src/image_helper.cpp +++ b/test/testsuites/common/image_helper/src/image_helper.cpp @@ -19,7 +19,7 @@ #include "mv_private.h" #include "ImageHelper.h" -#include <opencv2/core/core.hpp> +#include <opencv2/core.hpp> /** * @file image_helper.cpp |