LK金字塔光流算法是一种经典的稀疏光流算法,该算法有三个假设条件:亮度恒定、小运动、邻域空间一致。图像越满足这三个条件,算法的跟踪效果就越好。本文在原算法的基础上尝试作一点改进,使得跟踪效果更准确更稳定。下面详细讲解该算法的原理,接着尝试进行改进,并使用C++与Opencv来实现算法。

假设待跟踪点坐标为(ux,uy),邻域窗口大小为wx*wy,光流为(dx,dy),则待跟踪点与跟踪点的邻域窗口平方差可如下式表示:


因为是小运动,dx和dy都很小,故对J(x+dx,y+dy)进行泰勒展开并略去高阶项得到:



上式中,由于dx和dy都很小,把浮动图像J对x和y的梯度近似替换为参考图像I对x和y的梯度。因为后者在迭代优化过程中是固定不变的,这样就可以避免在每一轮迭代中都计算梯度,可减少很多计算时间。






计算出d之后,根据d移动浮动图像上的跟踪点,在此基础上重复上述计算过程进行迭代优化,直到满足给定精度或达到最大迭代次数为止。迭代过程中,[Ix Iy]是不变的,故G保持不变,但因为δI与上一轮得到的光流有关,b在每轮迭代中是变化的。假设第k轮迭代得到的光流为dk=(dxk,dyk),第k-1轮迭代得到的光流为dk-1=(dxk-1,dyk-1),那么有:

以上计算过程中,δI实际上是参考图像I上待跟踪点与浮动图像J上相同坐标点的像素差,且▽I实际上是浮动图像J上点的梯度。但原算法为了减小计算时间,在迭代计算过程中,使用参考图像的梯度来代替浮动图像的梯度,并且δI改为取参考图像上待跟踪点与浮动图像上跟踪点的像素差。当参考图像与浮动图像之间的运动偏移很大时,就不再适合使用浮动图像的梯度来近似替代参考图像的梯度了,所以这种情况下跟踪效果会受很大影响。本文为解决此问题,将δI与▽I的计算还原回去,如下图所示,此时δI取领域块A与邻域块B的像素差值,▽I取邻域块C的梯度。


下面上代码,基于C++与Opencv实现。
typedef float DTYPE_FD; typedef unsigned char DTYPE; //获取图像高斯金字塔 void build_gauss_Pyramid_cuda(Mat src, DTYPE *gauss_Pyramid, int num) { memcpy(gauss_Pyramid, (DTYPE *)src.data, src.rows*src.cols*sizeof(DTYPE)); //先把第0层的原图拷贝进去 Mat currentMat = src.clone(); int len_x = src.cols; int len_y = src.rows; int img_size = src.rows*src.cols; int offset = 0; for (int i = 0; i < num; i++) { Mat nextLayer; offset += img_size; img_size >>= 2; len_x >>= 1; len_y >>= 1; pyrDown(currentMat, nextLayer, Size(len_x, len_y)); memcpy(gauss_Pyramid+offset, (DTYPE *)nextLayer.data, img_size*sizeof(DTYPE)); currentMat = nextLayer.clone(); } }
讯享网
讯享网//分别计算单帧图像的x,y方向的梯度 void cal_gradient(DTYPE *src, int row, int col, DTYPE_FD *Fx, DTYPE_FD *Fy) { int index; //计算x方向的首列和尾列梯度, i = 0~row-1, j = 0, col-1 for(int i = 0; i < row; i++) { index = i*col; Fx[index] = (DTYPE_FD)(src[index+1] - src[index]); //首列 Fx[index+col-1] = (DTYPE_FD)(src[index+col-1] - src[index+col-2]); //尾列 } //计算y方向的首行和尾行梯度, i = 0, row-1, j = 0~col-1 for(int j = 0; j < col; j++) { Fy[j] = (DTYPE_FD)(src[col+j] - src[j]); //首行 index = (row-1)*col; Fy[index+j] = (DTYPE_FD)(src[index+j] - src[index-col+j]); //尾行 } for(int i = 1; i < row-1; i++) { for(int j = 1; j < col-1; j++) { index = i*col+j; Fx[index] = (src[index+1] - src[index-1])/2.0; Fy[index] = (src[index+col] - src[index-col])/2.0; } } } //获取图像高斯金字塔的梯度,row和col为原图尺寸 void cal_gauss_Pyramid_gradient_cuda(DTYPE *gauss_Pyramid, int row, int col, DTYPE_FD *Fx, DTYPE_FD *Fy, int num) { cal_gradient(gauss_Pyramid, row, col, Fx, Fy); //计算第0层的梯度,Fx和Fy也是金字塔,是多层图像的梯度 int len_x = col; int len_y = row; int img_size = len_x*len_y; int offset = 0; for (int i = 0; i < num; i++) { offset += img_size; img_size >>= 2; len_x >>= 1; len_y >>= 1; cal_gradient(gauss_Pyramid+offset, len_y, len_x, Fx+offset, Fy+offset); //Fx和Fy也是金字塔,是多层图像的梯度 } }
//计算前后帧的高斯金字塔的差值 void cal_diff(DTYPE *pre_gauss_Pyramid, DTYPE *cur_gauss_Pyramid, int length, DTYPE_FD *diff) { for(int i = 0; i < length; i++) { diff[i] = (DTYPE_FD)(pre_gauss_Pyramid[i] - cur_gauss_Pyramid[i]); } }
讯享网vector<Point2f> lk_track_ori1(Mat preFrame, Mat curFrame, vector<Point2f> keyPoints, vector<uchar> &status, int maxPyramidLayer, int maxIteration, int winsize, DTYPE_FD opticalflowResidual) { status.clear(); int gaussian_len = get_gauss_Pyramid_memory_len(preFrame.rows, preFrame.cols, maxPyramidLayer); //总共maxPyramidLayer+1层 DTYPE *pre_gauss_Pyramid_memory = (DTYPE *)malloc(gaussian_len); DTYPE *cur_gauss_Pyramid_memory = (DTYPE *)malloc(gaussian_len); DTYPE_FD *cur_Fx = (DTYPE_FD *)malloc(gaussian_len / sizeof(DTYPE) * sizeof(DTYPE_FD)); DTYPE_FD *cur_Fy = (DTYPE_FD *)malloc(gaussian_len / sizeof(DTYPE) * sizeof(DTYPE_FD)); DTYPE_FD *diff = (DTYPE_FD *)malloc(gaussian_len / sizeof(DTYPE) * sizeof(DTYPE_FD)); build_gauss_Pyramid_cuda(preFrame, pre_gauss_Pyramid_memory, maxPyramidLayer); //计算图像金字塔 build_gauss_Pyramid_cuda(curFrame, cur_gauss_Pyramid_memory, maxPyramidLayer); cal_gauss_Pyramid_gradient_cuda(cur_gauss_Pyramid_memory, curFrame.rows, curFrame.cols, cur_Fx, cur_Fy, maxPyramidLayer); cal_diff(pre_gauss_Pyramid_memory, cur_gauss_Pyramid_memory, gaussian_len / sizeof(DTYPE), diff); vector<Point2f> tPoints; int width, height; DTYPE_FD delta[2]; const int winsize_2_1 = 2 * winsize + 1; const int img_size = preFrame.rows*preFrame.cols; DTYPE_FD *dd = (DTYPE_FD *)malloc(winsize_2_1*winsize_2_1 * sizeof(DTYPE_FD)); float coeff[16]; const DTYPE_FD maxPyramidLayer_coeff = (float)(1. / (1 << maxPyramidLayer)); for (unsigned int i = 0; i < keyPoints.size(); i++) { DTYPE_FD g[2] = { 0 }; bool isValid = true; float prePoint_x; //最顶层的初始跟踪点 float prePoint_y; prePoint_x = keyPoints[i].x*maxPyramidLayer_coeff; prePoint_y = keyPoints[i].y*maxPyramidLayer_coeff; for (int j = maxPyramidLayer; j >= 0; j--) { //int offset = get_gauss_Pyramid_memory_offset_size(preFrame.rows, preFrame.cols, j); //, height, width); int offset = (j > 0) ? ((int)(img_size*(4 - 1.0 / (1 << (2 * j - 2))) / 3.0)) : 0; height = preFrame.rows >> j; //row/pow(2.0, index); width = preFrame.cols >> j; //col/pow(2.0, index); DTYPE_FD *fx = &cur_Fx[offset]; DTYPE_FD *fy = &cur_Fy[offset]; DTYPE_FD *d = &diff[offset]; DTYPE_FD x, y, curX, curY, ix, iy; for (int t1 = -winsize; t1 <= winsize; t1++) //row { for (int t2 = -winsize; t2 <= winsize; t2++) //col { x = prePoint_x + t2; y = prePoint_y + t1; int floorRow = floor(y); //δI=A(x,y)-B(x,y),而不是δI=A(x,y)-B(x+Δx,y+Δy),所以这样做会更准确一点 int floorCol = floor(x); //双线性插值 int ceilRow = floorRow + 1; int ceilCol = floorCol + 1; if (floorCol >= 0 && floorCol < width - 1 && floorRow >= 0 && floorRow < height - 1) { DTYPE_FD fracRow = y - floorRow; DTYPE_FD fracCol = x - floorCol; float k0 = 1.0 - fracRow; float k1 = 1.0 - fracCol; dd[(t1 + winsize)*(2 * winsize + 1) + t2 + winsize] = (k0*k1*d[floorRow*width + floorCol] + fracRow*k1*d[ceilRow*width + floorCol] + k0*fracCol*d[floorRow*width + ceilCol] + fracRow*fracCol*d[ceilRow*width + ceilCol]); } else { dd[(t1 + winsize)*(2 * winsize + 1) + t2 + winsize] = 0; } } } DTYPE_FD v[2] = { 0 }; //[x y] int iterCount = 0; DTYPE_FD residual; while (iterCount < maxIteration) //迭代计算剩余光流 { iterCount++; DTYPE_FD G[4] = { 0 }; //[Ix*Ix, Ix*Iy, Ix*Iy, Iy*Iy] DTYPE_FD b[2] = { 0 }; //[x y] for (int t1 = -winsize; t1 <= winsize; t1++) //row { for (int t2 = -winsize; t2 <= winsize; t2++) //col { x = prePoint_x + t2; y = prePoint_y + t1; curX = x + g[0] + v[0]; //这里需注意:后一帧图像的点坐标是变化的 curY = y + g[1] + v[1]; /插值// int floorRow = floor(curY); int floorCol = floor(curX); if (floorCol >= 0 && floorCol < width - 1 && floorRow >= 0 && floorRow < height - 1) { int ceilRow = floorRow + 1; int ceilCol = floorCol + 1; DTYPE_FD fracRow = curY - floorRow; DTYPE_FD fracCol = curX - floorCol; DTYPE_FD k0 = 1.0 - fracRow; DTYPE_FD k1 = 1.0 - fracCol; DTYPE_FD a0 = k0*k1; DTYPE_FD a1 = fracRow*k1; DTYPE_FD a2 = k0*fracCol; DTYPE_FD a3 = fracRow*fracCol; int idx0 = floorRow*width + floorCol; int idx1 = ceilRow*width + floorCol; int idx2 = floorRow*width + ceilCol; int idx3 = ceilRow*width + ceilCol; ix = a0 * fx[idx0] + a1 * fx[idx1] + a2 * fx[idx2] + a3 * fx[idx3]; iy = a0 * fy[idx0] + a1 * fy[idx1] + a2 * fy[idx2] + a3 * fy[idx3]; } else { ix = 0; iy = 0; } G[0] += ix * ix; //计算G G[1] += ix * iy; G[3] += iy * iy; int idx = (t1 + winsize)*winsize_2_1 + t2 + winsize; b[0] += dd[idx] * ix; //计算b b[1] += dd[idx] * iy; } } G[2] = G[1]; DTYPE_FD abs_G = 1.f / (G[0] * G[3] - G[1] * G[2]); //求矩阵的行列式的倒数 delta[0] = (G[3] * b[0] - G[2] * b[1])*abs_G; delta[1] = (-G[1] * b[0] + G[0] * b[1])*abs_G; v[0] += delta[0]; v[1] += delta[1]; residual = delta[0] * delta[0] + delta[1] * delta[1]; if (residual <= opticalflowResidual*opticalflowResidual) break; } if (iterCount >= maxIteration) { isValid = false; break; } if (j == 0) { g[0] += v[0]; //最终光流 g[1] += v[1]; } else { g[0] = 2 * (g[0] + v[0]); //金字塔中间层光流 g[1] = 2 * (g[1] + v[1]); } prePoint_x = prePoint_x*2.f; //下一层的初始跟踪点,迭代的时候已经加上了猜测光流,所以此处不需要再加了 prePoint_y = prePoint_y*2.f; } Point2f dstPoint(keyPoints[i].x + g[0], keyPoints[i].y + g[1]); //得到跟踪点 tPoints.push_back(dstPoint); if (isValid && dstPoint.x >= 0 && dstPoint.x < curFrame.cols && dstPoint.y >= 0 && dstPoint.y < curFrame.rows) { status.push_back(1); } else { status.push_back(0); } } free(pre_gauss_Pyramid_memory); free(cur_gauss_Pyramid_memory); free(cur_Fx); free(cur_Fy); free(diff); free(dd); return tPoints; }
设置相同的参数,分别运行以上代码和Opencv实现的LK光流金字塔算法接口calcOpticalFlowPyrLK,对运动图像进行追踪,对比结果如下。可以看到,当参考图像与浮动图像的运动偏移较小时,Opencv实现的原算法与本文C++实现的算法,跟踪效果都很好,但是当运动偏移较大时,Opencv实现原算法的跟踪结果中,误跟踪点明显增多,然而本文C++实现算法的跟踪效果还是比较好的。因此改进之后算法对大运动具有更好的适应性了。


小偏移浮动图像

大偏移浮动图像

Opencv calcOpticalFlowPyrLK函数追踪小偏移图像

上述C++实现代码追踪小偏移图像

Opencv calcOpticalFlowPyrLK函数追踪大偏移图像

上述C++实现代码追踪大偏移图像

版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容,请联系我们,一经查实,本站将立刻删除。
如需转载请保留出处:https://51itzy.com/kjqy/67633.html