summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSeokHoon Lee <andy.shlee@samsung.com>2016-04-12 17:40:15 +0900
committerTae-Young Chung <ty83.chung@samsung.com>2016-04-12 19:22:32 +0900
commitec7d1dd82e4ec3c46a5e9df291bfc90e906ba5d1 (patch)
tree6fee404ff02a7914880d34d6a6e477594b14413c
parent901c93be027012ea1c52fc1ce09d33639a99ada4 (diff)
downloadmediavision-ec7d1dd82e4ec3c46a5e9df291bfc90e906ba5d1.tar.gz
mediavision-ec7d1dd82e4ec3c46a5e9df291bfc90e906ba5d1.tar.bz2
mediavision-ec7d1dd82e4ec3c46a5e9df291bfc90e906ba5d1.zip
Signed-off-by: SeokHoon Lee <andy.shlee@samsung.com> Change-Id: Ib87112bce437a1cbc8821b39144b7ed2687073f3
-rw-r--r--mv_image/image/src/Recognition/ImageObject.cpp80
-rw-r--r--mv_image/image/src/Tracking/ImageTrackingModel.cpp48
-rw-r--r--mv_image/image/src/Tracking/MFTracker.cpp32
-rw-r--r--mv_surveillance/surveillance/src/EventTriggerPersonAppearance.cpp28
-rw-r--r--mv_surveillance/surveillance/src/EventTriggerPersonRecognition.cpp5
-rw-r--r--packaging/capi-media-vision.spec2
-rw-r--r--test/testsuites/common/image_helper/src/ImageHelper.cpp2
7 files changed, 93 insertions, 104 deletions
diff --git a/mv_image/image/src/Recognition/ImageObject.cpp b/mv_image/image/src/Recognition/ImageObject.cpp
index d9f99af9..41d2f8e4 100644
--- a/mv_image/image/src/Recognition/ImageObject.cpp
+++ b/mv_image/image/src/Recognition/ImageObject.cpp
@@ -200,7 +200,7 @@ int ImageObject::save(const char *fileName) const
return MEDIA_VISION_ERROR_PERMISSION_DENIED;
}
- out<<(*this);
+ out << (*this);
out.close();
LOGI("[%s] Image object is saved.", __FUNCTION__);
@@ -244,44 +244,44 @@ int ImageObject::load(const char *fileName)
std::ostream& operator << (std::ostream& os, const ImageObject& obj)
{
- os<<std::setprecision(7);
+ os << std::setprecision(7);
- os<<obj.m_isEmpty<<'\n';
- os<<obj.m_isLabeled<<'\n';
- os<<obj.m_label<<'\n';
+ os << obj.m_isEmpty << '\n';
+ os << obj.m_isLabeled << '\n';
+ os << obj.m_label << '\n';
- os<<obj.m_boundingContour.size()<<'\n';
+ os << obj.m_boundingContour.size() << '\n';
for (size_t pointNum = 0u; pointNum < obj.m_boundingContour.size(); ++pointNum) {
- os<<obj.m_boundingContour[pointNum].x<<' ';
- os<<obj.m_boundingContour[pointNum].y<<'\n';
+ os << obj.m_boundingContour[pointNum].x << ' ';
+ os << obj.m_boundingContour[pointNum].y << '\n';
}
const size_t numberOfKeypoints = obj.m_features.m_objectKeypoints.size();
- os<<numberOfKeypoints<<'\n';
+ os << numberOfKeypoints << '\n';
for (size_t keypointNum = 0u; keypointNum < numberOfKeypoints; ++keypointNum) {
- os<<obj.m_features.m_objectKeypoints[keypointNum].pt.x<<' ';
- os<<obj.m_features.m_objectKeypoints[keypointNum].pt.y<<' ';
- os<<obj.m_features.m_objectKeypoints[keypointNum].size<<' ';
- os<<obj.m_features.m_objectKeypoints[keypointNum].response<<' ';
- os<<obj.m_features.m_objectKeypoints[keypointNum].angle<<' ';
- os<<obj.m_features.m_objectKeypoints[keypointNum].octave<<' ';
- os<<obj.m_features.m_objectKeypoints[keypointNum].class_id<<'\n';
+ os << obj.m_features.m_objectKeypoints[keypointNum].pt.x << ' ';
+ os << obj.m_features.m_objectKeypoints[keypointNum].pt.y << ' ';
+ os << obj.m_features.m_objectKeypoints[keypointNum].size << ' ';
+ os << obj.m_features.m_objectKeypoints[keypointNum].response << ' ';
+ os << obj.m_features.m_objectKeypoints[keypointNum].angle << ' ';
+ os << obj.m_features.m_objectKeypoints[keypointNum].octave << ' ';
+ os << obj.m_features.m_objectKeypoints[keypointNum].class_id << '\n';
}
const int numberOfDescriptors = obj.m_features.m_objectDescriptors.rows;
const int sizeOfDescriptor = obj.m_features.m_objectDescriptors.cols;
- os<<numberOfDescriptors<<' ';
- os<<sizeOfDescriptor<<' ';
- os<<obj.m_features.m_objectDescriptors.type()<<'\n';
+ os << numberOfDescriptors << ' ';
+ os << sizeOfDescriptor << ' ';
+ os << obj.m_features.m_objectDescriptors.type() << '\n';
for (int descriptorNum = 0; descriptorNum < numberOfDescriptors;
- ++descriptorNum, os<<'\n') {
+ ++descriptorNum, os << '\n') {
for (int featureNum = 0; featureNum < sizeOfDescriptor;
++featureNum) {
- os<<(int)obj.m_features.m_objectDescriptors.at<uchar>(
+ os << (int)obj.m_features.m_objectDescriptors.at<uchar>(
descriptorNum,
- featureNum)<<' ';
+ featureNum) << ' ';
}
}
@@ -302,54 +302,54 @@ std::istream& operator >> (std::istream& is, ImageObject& obj)
return is; \
}
- is>>temporal.m_isEmpty;
+ is >> temporal.m_isEmpty;
MEDIA_VISION_CHECK_IFSTREAM
- is>>temporal.m_isLabeled;
+ is >> temporal.m_isLabeled;
MEDIA_VISION_CHECK_IFSTREAM
- is>>temporal.m_label;
+ is >> temporal.m_label;
MEDIA_VISION_CHECK_IFSTREAM
- is>>numberOfContourPoints;
+ is >> numberOfContourPoints;
MEDIA_VISION_CHECK_IFSTREAM
temporal.m_boundingContour.resize(numberOfContourPoints);
for (size_t pointNum = 0; pointNum < numberOfContourPoints; ++pointNum) {
- is>>temporal.m_boundingContour[pointNum].x;
+ is >> temporal.m_boundingContour[pointNum].x;
MEDIA_VISION_CHECK_IFSTREAM
- is>>temporal.m_boundingContour[pointNum].y;
+ is >> temporal.m_boundingContour[pointNum].y;
MEDIA_VISION_CHECK_IFSTREAM
}
- is>>numberOfKeypoints;
+ is >> numberOfKeypoints;
temporal.m_features.m_objectKeypoints.resize(numberOfKeypoints);
for (size_t keypointNum = 0; keypointNum < numberOfKeypoints; ++keypointNum) {
- is>>temporal.m_features.m_objectKeypoints[keypointNum].pt.x;
+ is >> temporal.m_features.m_objectKeypoints[keypointNum].pt.x;
MEDIA_VISION_CHECK_IFSTREAM
- is>>temporal.m_features.m_objectKeypoints[keypointNum].pt.y;
+ is >> temporal.m_features.m_objectKeypoints[keypointNum].pt.y;
MEDIA_VISION_CHECK_IFSTREAM
- is>>temporal.m_features.m_objectKeypoints[keypointNum].size;
+ is >> temporal.m_features.m_objectKeypoints[keypointNum].size;
MEDIA_VISION_CHECK_IFSTREAM
- is>>temporal.m_features.m_objectKeypoints[keypointNum].response;
+ is >> temporal.m_features.m_objectKeypoints[keypointNum].response;
MEDIA_VISION_CHECK_IFSTREAM
- is>>temporal.m_features.m_objectKeypoints[keypointNum].angle;
+ is >> temporal.m_features.m_objectKeypoints[keypointNum].angle;
MEDIA_VISION_CHECK_IFSTREAM
- is>>temporal.m_features.m_objectKeypoints[keypointNum].octave;
+ is >> temporal.m_features.m_objectKeypoints[keypointNum].octave;
MEDIA_VISION_CHECK_IFSTREAM
- is>>temporal.m_features.m_objectKeypoints[keypointNum].class_id;
+ is >> temporal.m_features.m_objectKeypoints[keypointNum].class_id;
MEDIA_VISION_CHECK_IFSTREAM
}
- is>>rows;
+ is >> rows;
MEDIA_VISION_CHECK_IFSTREAM
- is>>cols;
+ is >> cols;
MEDIA_VISION_CHECK_IFSTREAM
- is>>descriptorType;
+ is >> descriptorType;
MEDIA_VISION_CHECK_IFSTREAM
temporal.m_features.m_objectDescriptors = cv::Mat(rows, cols, descriptorType);
int value = 0;
for (int descriptorNum = 0; descriptorNum < rows; ++descriptorNum) {
for (int featureNum = 0; featureNum < cols; ++featureNum) {
- is>>value;
+ is >> value;
MEDIA_VISION_CHECK_IFSTREAM
temporal.m_features.m_objectDescriptors.at<uchar>(descriptorNum, featureNum) =
diff --git a/mv_image/image/src/Tracking/ImageTrackingModel.cpp b/mv_image/image/src/Tracking/ImageTrackingModel.cpp
index b9a89cec..8f4c0f90 100644
--- a/mv_image/image/src/Tracking/ImageTrackingModel.cpp
+++ b/mv_image/image/src/Tracking/ImageTrackingModel.cpp
@@ -228,7 +228,7 @@ int ImageTrackingModel::save(const char *filepath) const
LOGD("prefixPath: %s", prefixPath.c_str());
/* check the directory is available */
- if (access(prefixPath.c_str(),F_OK)) {
+ if (access(prefixPath.c_str(), F_OK)) {
LOGE("Can't save tracking model. Path[%s] doesn't existed.", filePath.c_str());
return MEDIA_VISION_ERROR_INVALID_PATH;
@@ -242,7 +242,7 @@ int ImageTrackingModel::save(const char *filepath) const
return MEDIA_VISION_ERROR_PERMISSION_DENIED;
}
- out<<(*this);
+ out << (*this);
out.close();
LOGI("[%s] Image tracking model is saved.", __FUNCTION__);
@@ -256,7 +256,7 @@ int ImageTrackingModel::load(const char *filepath)
filePath = std::string(filepath);
- if (access(filePath.c_str(),F_OK)) {
+ if (access(filePath.c_str(), F_OK)) {
LOGE("Can't load tracking model. Path[%s] doesn't existed.", filepath);
return MEDIA_VISION_ERROR_INVALID_PATH;
@@ -285,22 +285,22 @@ int ImageTrackingModel::load(const char *filepath)
std::ostream& operator << (std::ostream& os, const ImageTrackingModel& obj)
{
- os<<std::setprecision(7);
+ os << std::setprecision(7);
- os<<obj.m_target;
- os<<obj.m_stabilizationParams.mIsEnabled<<'\n';
- os<<obj.m_stabilizationParams.mHistoryAmount<<'\n';
- os<<obj.m_stabilizationParams.mStabilizationSpeed<<'\n';
- os<<obj.m_stabilizationParams.mStabilizationAcceleration<<'\n';
- os<<obj.m_stabilizationParams.mTolerantShift<<'\n';
- os<<obj.m_stabilizationParams.mTolerantShiftExtra<<'\n';
+ os << obj.m_target;
+ os << obj.m_stabilizationParams.mIsEnabled << '\n';
+ os << obj.m_stabilizationParams.mHistoryAmount << '\n';
+ os << obj.m_stabilizationParams.mStabilizationSpeed << '\n';
+ os << obj.m_stabilizationParams.mStabilizationAcceleration << '\n';
+ os << obj.m_stabilizationParams.mTolerantShift << '\n';
+ os << obj.m_stabilizationParams.mTolerantShiftExtra << '\n';
const size_t numberOfContourPoints = obj.m_location.size();
- os<<numberOfContourPoints<<'\n';
+ os << numberOfContourPoints << '\n';
for (size_t pointNum = 0u; pointNum < numberOfContourPoints; ++pointNum)
- os<<' '<<obj.m_location[pointNum].x<<' '<<obj.m_location[pointNum].y;
+ os << ' ' << obj.m_location[pointNum].x << ' ' << obj.m_location[pointNum].y;
- os<<'\n';
+ os << '\n';
return os;
}
@@ -315,26 +315,26 @@ std::istream& operator >> (std::istream& is, ImageTrackingModel& obj)
ImageObject target;
std::vector<cv::Point> location;
- is>>target;
+ is >> target;
MEDIA_VISION_CHECK_IFSTREAM
StabilizationParams params;
- is>>params.mIsEnabled;
- is>>params.mHistoryAmount;
- is>>params.mStabilizationSpeed;
- is>>params.mStabilizationAcceleration;
- is>>params.mTolerantShift;
- is>>params.mTolerantShiftExtra;
+ is >> params.mIsEnabled;
+ is >> params.mHistoryAmount;
+ is >> params.mStabilizationSpeed;
+ is >> params.mStabilizationAcceleration;
+ is >> params.mTolerantShift;
+ is >> params.mTolerantShiftExtra;
size_t numberOfContourPoints = 0u;
- is>>numberOfContourPoints;
+ is >> numberOfContourPoints;
MEDIA_VISION_CHECK_IFSTREAM
location.resize(numberOfContourPoints);
for (size_t pointNum = 0u; pointNum < numberOfContourPoints; ++pointNum) {
- is>>location[pointNum].x;
+ is >> location[pointNum].x;
MEDIA_VISION_CHECK_IFSTREAM
- is>>location[pointNum].y;
+ is >> location[pointNum].y;
MEDIA_VISION_CHECK_IFSTREAM
}
diff --git a/mv_image/image/src/Tracking/MFTracker.cpp b/mv_image/image/src/Tracking/MFTracker.cpp
index 9dc20cc1..d2674b0c 100644
--- a/mv_image/image/src/Tracking/MFTracker.cpp
+++ b/mv_image/image/src/Tracking/MFTracker.cpp
@@ -29,8 +29,8 @@ namespace {
size = (int)values.size();
std::vector<T> copy(values.begin(), values.begin() + size);
- std::sort(copy.begin(),copy.end());
- if(size%2==0)
+ std::sort(copy.begin(), copy.end());
+ if (size%2 == 0)
return (copy[size/2-1]+copy[size/2])/((T)2.0);
else
return copy[(size - 1) / 2];
@@ -160,7 +160,7 @@ bool MFTracker::update(const cv::Mat& image)
cv::Mat oldImage = m_image;
cv::Rect_<float> oldBox = m_boundingBox;
- if(!medianFlowImpl(oldImage, image, oldBox))
+ if (!medianFlowImpl(oldImage, image, oldBox))
return false;
image.copyTo(m_image);
@@ -260,8 +260,7 @@ bool MFTracker::medianFlowImpl(
displacements.push_back(sqrt(di[idx].ddot(di[idx])));
}
- m_confidence =
- (10.f - getMedian(displacements,(int)displacements.size())) / 10.f;
+ m_confidence = (10.f - getMedian(displacements, (int)displacements.size())) / 10.f;
if (m_confidence < 0.f) {
m_confidence = 0.f;
@@ -285,32 +284,32 @@ cv::Rect_<float> MFTracker::vote(
oldRect.y + oldRect.height / 2.f);
const int n = (int)oldPoints.size();
- std::vector<float> buf(std::max( n*(n-1) / 2, 3), 0.f);
+ std::vector<float> buf(std::max(n*(n-1) / 2, 3), 0.f);
- if(oldPoints.size() == 1) {
+ if (oldPoints.size() == 1) {
newRect.x = oldRect.x+newPoints[0].x-oldPoints[0].x;
newRect.y = oldRect.y+newPoints[0].y-oldPoints[0].y;
- newRect.width=oldRect.width;
- newRect.height=oldRect.height;
+ newRect.width = oldRect.width;
+ newRect.height = oldRect.height;
return newRect;
}
float xshift = 0.f;
float yshift = 0.f;
- for(int i = 0; i < n; i++)
+ for (int i = 0; i < n; i++)
buf[i] = newPoints[i].x - oldPoints[i].x;
xshift = getMedian(buf, n);
newCenter.x += xshift;
- for(int idx = 0; idx < n; idx++)
+ for (int idx = 0; idx < n; idx++)
buf[idx] = newPoints[idx].y - oldPoints[idx].y;
yshift = getMedian(buf, n);
newCenter.y += yshift;
mD = cv::Point2f(xshift, yshift);
- if(oldPoints.size() == 1) {
+ if (oldPoints.size() == 1) {
newRect.x = newCenter.x - oldRect.width / 2.f;
newRect.y = newCenter.y - oldRect.height / 2.f;
newRect.width = oldRect.width;
@@ -322,7 +321,7 @@ cv::Rect_<float> MFTracker::vote(
float nd = 0.f;
float od = 0.f;
for (int i = 0, ctr = 0; i < n; i++) {
- for(int j = 0; j < i; j++) {
+ for (int j = 0; j < i; j++) {
nd = l2distance(newPoints[i], newPoints[j]);
od = l2distance(oldPoints[i], oldPoints[j]);
buf[ctr] = (od == 0.f ? 0.f : nd / od);
@@ -347,7 +346,7 @@ void MFTracker::check_FB(
{
const size_t numberOfOldPoints = oldPoints.size();
- if(status.empty())
+ if (status.empty())
status = std::vector<bool>(numberOfOldPoints, true);
std::vector<uchar> LKstatus(numberOfOldPoints);
@@ -397,12 +396,11 @@ void MFTracker::check_NCC(
const float prod = p1.dot(p2);
const float sq1 = sqrt(n1 * n1 - s1 * s1 / N);
const float sq2 = sqrt(n2 * n2 - s2 * s2 / N);
- NCC[idx] = (sq2==0 ? sq1 / std::abs(sq1)
- : (prod - s1 * s2 / N) / sq1 / sq2);
+ NCC[idx] = (sq2 == 0 ? sq1 / std::abs(sq1) : (prod - s1 * s2 / N) / sq1 / sq2);
}
float median = getMedian(NCC) - FloatEps;
- for(size_t idx = 0u; idx < oldPoints.size(); idx++)
+ for (size_t idx = 0u; idx < oldPoints.size(); idx++)
status[idx] = status[idx] && (NCC[idx] > median);
}
diff --git a/mv_surveillance/surveillance/src/EventTriggerPersonAppearance.cpp b/mv_surveillance/surveillance/src/EventTriggerPersonAppearance.cpp
index ca14d450..298776be 100644
--- a/mv_surveillance/surveillance/src/EventTriggerPersonAppearance.cpp
+++ b/mv_surveillance/surveillance/src/EventTriggerPersonAppearance.cpp
@@ -88,8 +88,7 @@ int EventResultPersonAppearance::getResultValue(const char *valueName,
MAX_VALUE_NAME_LENGHT) == 0) {
size_t * const numberOfAppearedPersons = (size_t*) value;
*numberOfAppearedPersons = __appearedLocations.size();
- }
- else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_APPEARED_LOCATIONS,
+ } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_APPEARED_LOCATIONS,
MAX_VALUE_NAME_LENGHT) == 0) {
mv_rectangle_s * const appearedLocations = (mv_rectangle_s*) value;
@@ -97,13 +96,11 @@ int EventResultPersonAppearance::getResultValue(const char *valueName,
for (size_t i = 0u; i < numberOfAppearedPersons; ++i)
appearedLocations[i] = __appearedLocations[i];
- }
- else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_TRACKED_NUMBER,
+ } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_TRACKED_NUMBER,
MAX_VALUE_NAME_LENGHT) == 0) {
size_t * const numberOfTrackedPersons = (size_t*) value;
*numberOfTrackedPersons = __trackedLocations.size();
- }
- else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_TRACKED_LOCATIONS,
+ } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_TRACKED_LOCATIONS,
MAX_VALUE_NAME_LENGHT) == 0) {
mv_rectangle_s * const trackedLocations = (mv_rectangle_s*) value;
@@ -111,13 +108,11 @@ int EventResultPersonAppearance::getResultValue(const char *valueName,
for (size_t i = 0u; i < numberOfTrackedPersons; ++i)
trackedLocations[i] = __trackedLocations[i];
- }
- else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_DISAPPEARED_NUMBER,
+ } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_DISAPPEARED_NUMBER,
MAX_VALUE_NAME_LENGHT) == 0) {
size_t * const numberOfDisappearedPersons = (size_t*) value;
*numberOfDisappearedPersons = __disappearedLocations.size();
- }
- else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_DISAPPEARED_LOCATIONS,
+ } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_DISAPPEARED_LOCATIONS,
MAX_VALUE_NAME_LENGHT) == 0) {
mv_rectangle_s * const disappearedLocations = (mv_rectangle_s*) value;
@@ -125,8 +120,7 @@ int EventResultPersonAppearance::getResultValue(const char *valueName,
for (size_t i = 0u; i < numberOfDisappearedPersons; ++i)
disappearedLocations[i] = __disappearedLocations[i];
- }
- else {
+ } else {
LOGE("This value name doesn't exist. Getting result value failed.");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
@@ -335,8 +329,7 @@ void EventTriggerPersonAppearance::movementDetectedCB(
for (size_t idx = 0u; idx < hogRectsSize; ++idx)
if (!trackChecks[idx])
trigger->__appearedRects.push_back(hogRects[idx]);
- }
- else {
+ } else {
/* 4.2 Try to track */
CVRectanglesConstIter appearedIter = trigger->__appearedRects.begin();
for (; appearedIter != trigger->__appearedRects.end(); ++appearedIter)
@@ -376,14 +369,13 @@ void EventTriggerPersonAppearance::movementDetectedCB(
}
}
- if (tracked)
+ if (tracked) {
++iter;
- else {
+ } else {
if (iter->framesCount == 0) {
trigger->__disappearedRects.push_back(iter->rect);
iter = trigger->__trackedRects.erase(iter);
- }
- else {
+ } else {
--(iter->framesCount);
++iter;
}
diff --git a/mv_surveillance/surveillance/src/EventTriggerPersonRecognition.cpp b/mv_surveillance/surveillance/src/EventTriggerPersonRecognition.cpp
index 7b28709a..c5dd1261 100644
--- a/mv_surveillance/surveillance/src/EventTriggerPersonRecognition.cpp
+++ b/mv_surveillance/surveillance/src/EventTriggerPersonRecognition.cpp
@@ -236,9 +236,8 @@ int EventTriggerPersonRecognition::pushSource(
return error;
}
- if (buffer_size != width * height)
- {
- // Unexcepted behaviour
+ if (buffer_size != width * height) {
+ /* Unexcepted behaviour */
LOGE("Grayscale source interpretation failed.");
return MEDIA_VISION_ERROR_INTERNAL;
}
diff --git a/packaging/capi-media-vision.spec b/packaging/capi-media-vision.spec
index dab5a125..b659303c 100644
--- a/packaging/capi-media-vision.spec
+++ b/packaging/capi-media-vision.spec
@@ -1,6 +1,6 @@
Name: capi-media-vision
Summary: Media Vision library for Tizen Native API
-Version: 0.3.12
+Version: 0.3.13
Release: 0
Group: Multimedia/Framework
License: Apache-2.0 and BSD-2.0
diff --git a/test/testsuites/common/image_helper/src/ImageHelper.cpp b/test/testsuites/common/image_helper/src/ImageHelper.cpp
index d3623163..2207a891 100644
--- a/test/testsuites/common/image_helper/src/ImageHelper.cpp
+++ b/test/testsuites/common/image_helper/src/ImageHelper.cpp
@@ -290,7 +290,7 @@ int ImageHelper::convertBufferToRGB888(
unsigned char **pOutBuffer)
{
// todo: support more colorspaces:
- switch(imageData.imageColorspace)
+ switch (imageData.imageColorspace)
{
case MEDIA_VISION_COLORSPACE_Y800:
return convertY800ToRGB(pInBuffer, imageData, pOutBuffer);