学习OpenCV2——MeanShift之目标跟踪

时间:2023-01-31 00:37:13

        前面学习了MeanShift用于目标检测,现在来看看MeanShift如何用于目标跟踪。

        OpenCV里的MeanShift跟踪方法涉及图像矩和反向投影的知识,如果不清楚可以先看我的另一篇博文“图像的几何矩”


1. MeanShift( )跟踪的流程

        MeanShift是个迭代的算法,每次迭代会往概率密度大的方向移动,所以MeanShift算法得用在概率密度图上,而反向投影图就是一个典型的颜色概率密度图。下图是

meanshift算法的流程图。

                                                                               学习OpenCV2——MeanShift之目标跟踪

图画麻烦了,其实编程很简单。

初始化:计算目标区域rect0的直方图hist0。

跟   踪:根据目标直方图hist0,计算整个搜索区域的反向投影图prohist1;

              给定目标起始区域rect和终止条件,在反向投影图prohist1上调用meanshift函数,得到最终区域rectN。


2. cvMeanShift( )详解

OpenCV中进行MeanShift跟踪的函数是cvMeanShift( )。

函数原型见  ..\OpenCV249\sources\modules\video\src\camshift.cpp

int cvMeanShift( const void* imgProb,             //输入概率密度图
CvRect windowIn, //初始目标区域
CvTermCriteria criteria, //迭代终止条件
CvConnectedComp* comp ) //可选条件,表示连通域结构体

函数有返回值,返回的是迭代次数,而这个windowIn则是最终目标区域。

CV_IMPL int
cvMeanShift( const void* imgProb, CvRect windowIn,
CvTermCriteria criteria, CvConnectedComp* comp )
{
//CvMoments是个结构体,存的是图像矩
CvMoments moments;
int i = 0, eps;
CvMat stub, *mat = (CvMat*)imgProb;
CvMat cur_win;
CvRect cur_rect = windowIn;

CV_FUNCNAME( "cvMeanShift" );

//初始化跟踪窗口
if( comp )
comp->rect = windowIn;

//把0阶矩和1阶矩先初始化置零
moments.m00 = moments.m10 = moments.m01 = 0;

__BEGIN__;

CV_CALL( mat = cvGetMat( mat, &stub )); //返回矩阵头,stub是临时值

//各种输入变量不符合要求时显示错误信息
if( CV_MAT_CN( mat->type ) > 1 )
CV_ERROR( CV_BadNumChannels, cvUnsupportedFormat );
if( windowIn.height <= 0 || windowIn.width <= 0 )
CV_ERROR( CV_StsBadArg, "Input window has non-positive sizes" );
if( windowIn.x < 0 || windowIn.x + windowIn.width > mat->cols ||
windowIn.y < 0 || windowIn.y + windowIn.height > mat->rows )
CV_ERROR( CV_StsBadArg, "Initial window is not inside the image ROI" );

//检查迭代终止条件,如未设定,则使用默认值,精度=1.0,迭代次数=100
CV_CALL( criteria = cvCheckTermCriteria( criteria, 1., 100 ));

//精度eps=1
eps = cvRound( criteria.epsilon * criteria.epsilon );

//在迭代次数返回内进行循环
for( i = 0; i < criteria.max_iter; i++ )
{
int dx, dy, nx, ny;
double inv_m00;

CV_CALL( cvGetSubRect( mat, &cur_win, cur_rect )); //将cur_rect对应的mat部分赋给cur_win
CV_CALL( cvMoments( &cur_win, &moments )); //计算cur_win的图像矩

/* Calculating center of mass */
if( fabs(moments.m00) < DBL_EPSILON )
break;

inv_m00 = moments.inv_sqrt_m00*moments.inv_sqrt_m00; //inv_m00=1/m00
//搜索区域的水平重心偏移dx
dx = cvRound( moments.m10 * inv_m00 - windowIn.width*0.5 ); //m10/m00就是cur_win的重心横坐标
//搜索区域的垂直重心偏移dy
dy = cvRound( moments.m01 * inv_m00 - windowIn.height*0.5 ); //m01/m00就是cur_win的重心纵坐标

//搜索区域的重心坐标(nx,ny)
nx = cur_rect.x + dx;
ny = cur_rect.y + dy;

//跟踪目标处于图像边缘时进行一些相应的处理
if( nx < 0 )
nx = 0;
else if( nx + cur_rect.width > mat->cols )
nx = mat->cols - cur_rect.width;

if( ny < 0 )
ny = 0;
else if( ny + cur_rect.height > mat->rows )
ny = mat->rows - cur_rect.height;

dx = nx - cur_rect.x; //因为nx和ny可能越界,上面计算的nx和ny可能会变化,所以dx、dy、cur_rect都可能变化
dy = ny - cur_rect.y;
cur_rect.x = nx;
cur_rect.y = ny;

//精度达到要求时即可退出循环
if( dx*dx + dy*dy < eps )
break;
}

__END__;

//更新目标的图像矩
if( comp )
{
comp->rect = cur_rect;
comp->area = (float)moments.m00;
}

return i; //返回迭代次数i
}
可以学到一个设置参数默认值的方法CV_CALL(赋值语句)

3. 实验代码及结果

#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <ctype.h>
using namespace cv;
using namespace std;

//定义一些全局变量
bool LBdown = false;
int trackCount = 0; //跟踪计数
Mat image;
Rect selection;
Point origin;

//函数声明
void onMouse( int event, int x, int y, int, void* );
void help();


int main( )
{
help();

int vmin = 10, vmax = 256, smin = 30;
int hbinNum = 16;
float hranges[] = {0,180};
const float* phranges = hranges;
bool backprojMode = false;

VideoCapture capture(0); //读取摄像头

if( !capture.isOpened() )
{
cout << "***Could not initialize capturing...***\n";
return -1;
}

namedWindow( "Histogram", 0 );
namedWindow( "MeanShift Tracking", 0 );
setMouseCallback( "MeanShift Tracking", onMouse, 0 );//消息响应机制
createTrackbar( "Vmin", "MeanShift Tracking", &vmin, 256, 0 );//createTrackbar函数的功能是在对应的窗口创建滑动条,滑动条Vmin,vmin表示滑动条的值,最大为256
createTrackbar( "Vmax", "MeanShift Tracking", &vmax, 256, 0 );//最后一个参数为0代表没有调用滑动拖动的响应函数
createTrackbar( "Smin", "MeanShift Tracking", &smin, 256, 0 );//vmin,vmax,smin初始值分别为10,256,30

bool paused = false;
Mat frame,hsv,hue,mask,hist,histimg = Mat::zeros(200, 320, CV_8UC3), backproj;;
Rect trackWindow;

while (1)
{
if( !paused )//没有暂停,不停地显示当前图片
{
capture>>frame;
if(frame.empty())
break;
}

frame.copyTo(image);

if(!paused)
{
if(trackCount > 0)
{
cvtColor(image, hsv, CV_BGR2HSV);
//通过inRange限制s和v分量范围,滤除一些干扰
inRange(hsv, Scalar(0, smin, min(vmin,vmax)),Scalar(180, 256, max(vmin, vmax)), mask);
int ch[] = {0, 0};
hue.create(hsv.size(), hsv.depth());//hue初始化为与hsv大小深度一样的矩阵
mixChannels(&hsv, 1, &hue, 1, ch, 1);//将hsv第一个通道(也就是色调)的数复制到hue中

//=========起始帧进行初始化,即画出目标直方图==============================
if(trackCount == 1)
{
histimg = Scalar::all(0); //重新选择目标就将histimg清零
//计算直方图及归一化
Mat roi(hue, selection), maskroi(mask, selection);//mask保存的hsv的最小值
calcHist(&roi, 1, 0, maskroi, hist, 1, &hbinNum, &phranges);
normalize(hist, hist, 0, 255, CV_MINMAX); //将hist归一化到0~255

//----------------------------------------------------------------
//下面程序段是画直方图,并不是必须的,不影响跟踪效果
//定义bin的颜色
int binWidth = histimg.cols / hbinNum; //直方图每个块的宽度

Mat binColor(1, hbinNum, CV_8UC3);//定义一个颜色矩阵
RNG rng;
rng.fill(binColor,RNG::NORMAL,1,255);

//绘制直方图
double binValueMax = 0;
minMaxLoc(hist,0,&binValueMax,0,0);

cout<<binValueMax<<endl;

for(int i= 0;i<hbinNum;i++)
{
int binHeight = saturate_cast<int>(hist.at<float>(i)/binValueMax*histimg.rows); //每个bin的高度
rectangle( histimg, Point(i*binWidth,histimg.rows),Point((i+1)*binWidth,histimg.rows - binHeight),
Scalar(binColor.at<Vec3b>(i)), -1, 8 );
}
//----------------------------------------------------------------------
trackCount ++;
trackWindow = selection; //trackWindow也就是目标窗口,用于meanShift()迭代
}

//===============后续帧跟踪=================
//计算反向投影,调用meanshift()方法进行跟踪
calcBackProject(&hue, 1, 0, hist, backproj, &phranges);//计算直方图的反向投影,计算hue图像0通道直方图hist的反向投影,并让入backproj中
backproj &= mask;
meanShift(backproj, trackWindow, TermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ));

if( backprojMode )
cvtColor( backproj, image, CV_GRAY2BGR );
rectangle(image,Point(trackWindow.x,trackWindow.y),Point(trackWindow.x+trackWindow.width,trackWindow.y+trackWindow.height),Scalar(0,0,255),1,CV_AA);

trackCount++;
cout<<"trackCount="<<trackCount<<endl;
}
}

imshow( "Histogram", histimg );
imshow( "MeanShift Tracking",image);

int c = waitKey(30);
switch (c)
{
//注意按键区分大小写
case 27: //esc 退出程序
return 0;
case 98: //'b' 反向投影模型交替
backprojMode = !backprojMode;
break;
case 112: //"p" 暂停跟踪交替
paused = !paused;
break;
}
}
}

void onMouse( int event, int x, int y, int, void* )
{
if( LBdown )//只有当鼠标左键按下去时才有效,然后通过if里面代码就可以确定所选择的矩形区域selection了
{
selection.x = MIN(x, origin.x);//矩形左上角顶点坐标
selection.y = MIN(y, origin.y);
selection.width = abs(x - origin.x);//矩形宽
selection.height = abs(y - origin.y);//矩形高
selection &= Rect(0, 0, image.cols, image.rows);//用于确保所选的矩形区域在图片范围内
rectangle( image, selection,Scalar(0,0,255), 1, 8 );
imshow( "MeanShift Tracking",image);
}

switch( event )
{
case CV_EVENT_LBUTTONDOWN:
origin = Point(x,y);
selection = Rect(x,y,0,0);//鼠标刚按下去时初始化了一个矩形区域
LBdown = true;
//trackCount = 0;
break;
case CV_EVENT_LBUTTONUP:
LBdown = false;
if( selection.area()>0 )
trackCount = 1;
//cout<<"selection="<<selection<<endl;
break;
}
}

void help()
{
cout << "\n\nHot keys: \n"
"\tESC - quit the program\n"
"\tb - switch to/from backprojection view\n"
"\tp - pause video\n"
"To initialize tracking, select the object with mouse\n";
}

学习OpenCV2——MeanShift之目标跟踪

学习OpenCV2——MeanShift之目标跟踪


图片不清楚是截图的原因。