2010-11-22 101 views
2

嘿, 我正在做一个项目,以稳定视频序列使用光流法。 到目前为止,我已经做好了光流。但我有两个分支在我面前工作.. 1-在获得光流后,我找到了图像位移的平均值,然后从第二帧的特征中减去了平均值,我的问题接下来要做什么?图像稳定

2 - 或者,我可以使用OpenCV的功能,以稳定的图像,这是我计算的变换矩阵,然后我用cvPerspectiveTransform然后cvWarpPerspective,但我发现了错误,这是“不良标识”

可以看到代码,我想要做什么来稳定图像?我想要提供任何解决方案?

enter code here 
#include <stdio.h> 
#include <stdlib.h>  
//#include "/usr/include/opencv/cv.h"  
#include <cv.h>  
#include <cvaux.h>  
#include <highgui.h>  
#include <math.h>  
#include <iostream> 

#define PI 3.1415926535898 

double rads(double degs) 
{ 
    return (PI/180 * degs); 
} 

CvCapture *cap; 

IplImage *img;  
IplImage *frame;  
IplImage *frame1;  
IplImage *frame3;  
IplImage *frame2;  
IplImage *temp_image1;  
IplImage *temp_image2;  
IplImage *frame1_1C;  
IplImage *frame2_1C;  
IplImage *eig_image;  
IplImage *temp_image;  
IplImage *pyramid1 = NULL;  
IplImage *pyramid2 = NULL; 

char * mapx; 
char * mapy; 

int h; 
int corner_count; 
CvMat* M = cvCreateMat(3,3,CV_32FC1); 
CvPoint p,q,l,s; 
double hypotenuse; 
double angle; 

int line_thickness = 1, line_valid = 1, pos = 0; 
CvScalar line_color; 
CvScalar target_color[4] = { // in BGR order 
     {{ 0, 0, 255, 0 }}, // red  
     {{ 0, 255, 0, 0 }}, // green  
     {{ 255, 0, 0, 0 }}, // blue  
     {{ 0, 255, 255, 0 }} // yellow  
}; 

inline static double square(int a)  
{ 
return a * a; 
} 

char* IntToChar(int num){return NULL;} 

/*{ 
    char* retstr = static_cast<char*>(calloc(12, sizeof(char))); 

    if (sprintf(retstr, "%i", num) > 0) 
    { 
     return retstr; 
    } 
    else 
    { 
     return NULL; 
    } 
}*/ 

inline static void allocateOnDemand(IplImage **img, CvSize size, int depth, int channels) 
{ 
    if (*img != NULL) 
     return; 

    *img = cvCreateImage(size, depth, channels); 

    if (*img == NULL) 
    { 
     fprintf(stderr, "Error: Couldn't allocate image. Out of memory?\n"); 
     exit(-1); 
    } 
} 

void clearImage (IplImage *img) 
{ 
    for (int i=0; i<img->imageSize; i++)  
     img->imageData[i] = (char) 0;  
} 

int main() 
{ 
    cap = cvCaptureFromCAM(0);  
    //cap = cvCaptureFromAVI("/home/saif/Desktop/NAO.. the project/jj/Test3.avi"); 

    CvSize frame_size; 

    // Reading the video's frame size 
    frame_size.height = (int) cvGetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT); 
    frame_size.width = (int) cvGetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH);  
    cvNamedWindow("Optical Flow", CV_WINDOW_AUTOSIZE); 

    while(true)  
    { 
    frame = cvQueryFrame(cap); 

     if (frame == NULL) 
     {  
      fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n"); 
      return -1;  
     } 

     // Allocating another image if it is not allocated already.  
     allocateOnDemand(&frame1_1C, frame_size, IPL_DEPTH_8U, 1);  
     cvConvertImage(frame, frame1_1C, 0);  
     allocateOnDemand(&frame1, frame_size, IPL_DEPTH_8U, 3);  
     cvConvertImage(frame, frame1, 0); 

     //Get the second frame of video.  
     frame = cvQueryFrame(cap); 

     if (frame == NULL)  
     { 
      fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n"); 
      return -1; 
     } 

     if(!frame) 
     {  
      printf("bad video \n");  
      exit(0); 
     } 

     allocateOnDemand(&frame2_1C, frame_size, IPL_DEPTH_8U, 1); 
     cvConvertImage(frame, frame2_1C, 0);  
     allocateOnDemand(&frame2, frame_size, IPL_DEPTH_8U, 3);  
     cvConvertImage(frame, frame2, 0); 

     CvSize optical_flow_window = cvSize(5,5);  
     eig_image = cvCreateImage(frame_size, IPL_DEPTH_32F, 1);  
     temp_image = cvCreateImage(frame_size, IPL_DEPTH_32F, 1); 

     CvTermCriteria optical_flow_termination_criteria = cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3); 

     // Feature tracking 
     CvPoint2D32f frame1_features[4]; 
     CvPoint2D32f frame2_features[4]; 

     //cvCornerEigenValsAndVecs(eig_image, temp_image, 1);  
     corner_count = 4; 

     cvGoodFeaturesToTrack(frame1_1C,eig_image , temp_image, frame1_features, &corner_count, 0.1, .01, NULL, 5, 1);  
     cvFindCornerSubPix(frame1_1C, frame1_features, corner_count,cvSize(5, 5) ,optical_flow_window , optical_flow_termination_criteria); 

     if (corner_count <= 0)  
      printf("\nNo features detected.\n");  
     else  
      printf("\nNumber of features found = %d\n", corner_count); 

     //Locus Kande method.  
     char optical_flow_found_feature[20];  
     float optical_flow_feature_error[20]; 

     allocateOnDemand(&pyramid1, frame_size, IPL_DEPTH_8U, 1);  
     allocateOnDemand(&pyramid2, frame_size, IPL_DEPTH_8U, 1); 

     cvCalcOpticalFlowPyrLK(frame1_1C, frame2_1C, pyramid1, pyramid2, frame1_features, frame2_features, corner_count, optical_flow_window, 5, optical_flow_found_feature, NULL, optical_flow_termination_criteria, NULL); 

    /* 
    double sumOfDistancesX = 0;  
    double sumOfDistancesY = 0; 

    int debug = 0; 

    CvFont font1, font2;  
    CvScalar red, green, blue;  
    IplImage* seg_in = NULL;  
    IplImage *seg_out = NULL; 

    allocateOnDemand(&seg_in, frame_size, IPL_DEPTH_8U, 3);  
    allocateOnDemand(&seg_out, frame_size, IPL_DEPTH_8U, 3); 

    clearImage(seg_in);  
    clearImage(seg_in);  

    for(int i=0; i <corner_count; i++) 
    { 

     if (optical_flow_found_feature[i] == 0) 
      continue;  
     p.x = (int) frame1_features[i].x;  
     p.y = (int) frame1_features[i].y;  
     q.x = (int) frame2_features[i].x;  
     q.y = (int) frame2_features[i].y; 
     angle = atan2((double) p.y - q.y, (double) p.x - q.x); 

      sumOfDistancesX += q.x - p.x;  
      sumOfDistancesY += q.y - p.y; 

      //cvRemap(frame2,frame1,averageDistanceX , averageDistanceY,CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS, cvScalarAll(0));  
     } 
     */ 

     /*  
     int averageDistanceX = sumOfDistancesX/corner_count;  
     int averageDistanceY = sumOfDistancesY/corner_count;  
     l.x = averageDistanceX - q.x;  
     s.y = averageDistanceY - q.y; 
     */ 

#define cvWarpPerspectiveQMatrix cvGetPerspectiveTransform 

     //CvMat* N = cvCreateMat(3,3,CV_32FC1); 

     cvGetPerspectiveTransform(frame2_features, frame1_features, M); 
     cvPerspectiveTransform(frame1_features, frame2_features, M);  
     cvWarpPerspective(frame2_features, frame1_features, M,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS,cvScalarAll(0)); 

     cvShowImage("Optical Flow", frame1);  
     cvWaitKey(50); 
    } 

    cvReleaseCapture(&cap);  
    cvReleaseMat(&M);  

    return 0;  
} 
+0

你能告诉我们哪条线给你“坏旗”错误吗? 编辑:哦,欢迎来到堆栈溢出! – btown 2010-11-23 05:30:57

+0

坏标志错误,它在cvPerspectiveTransform和cvWarpPerpective中开始。我的意思是,通过使用其中一个或两个都可以得到坏旗。 – Mario 2010-11-24 13:08:28

回答

2

你不想从第二图像减去平均位移,要通过平均位移变换(移动)第二图像,使其“匹配”的第一位。你使用的“位移”取决于你的情况。

  • 如果您的相机晃动但是静止,否则您需要使用两个连续帧之间的平均位移作为第二帧的变换向量。使用每个新帧计算转换后的第一帧和新帧之间的位移,并转换新帧。
  • 如果您的相机移动并摇动(即头盔安装在山地自行车上的相机),您希望首先在几帧之间找到帧之间的平均位移,然后按照该平均位移和它与前一帧之间的位移。

编辑 什么,你基本上需要的选项2,做的是计算帧之间平均移动平均在过去的几帧。这可以通过多种方式来完成,但我建议使用类似卡尔曼滤波器的方法。然后,对于一个新的框架,您可以计算该框架和(修正的)前一个框架之间的移动。从运动中,您可以减去到该点的平均运动量,然后通过该差值移动新帧。