2017-09-19 29 views
0

伙计们。在一系列帧上改变位移

我已经编写了一段代码来描述一个.avi视频文件上兴趣点的动作。 下面是代码:

#include "opencv2/video/tracking.hpp" 
#include<opencv2/core/core.hpp> 
#include<opencv2/highgui/highgui.hpp> 
#include<opencv2/imgproc/imgproc.hpp> 
#include<iostream> 

using namespace cv; 
using namespace std; 

int main() { 
VideoCapture capture("video.avi"); 


if (!capture.isOpened()) { 
    cout << "ERROR OPENING VIDEO\n\n"; 
    return(0); 
} 

double rate = capture.get(CV_CAP_PROP_FPS); 
unsigned int numberFrames = (unsigned int) capture.get(CV_CAP_PROP_FRAME_COUNT); 
int width = (unsigned int) capture.get(CV_CAP_PROP_FRAME_WIDTH); 
int height = (unsigned int) capture.get(CV_CAP_PROP_FRAME_HEIGHT); 
unsigned int codec = (unsigned int) capture.get(CV_CAP_PROP_FOURCC); 

Mat currentGray; 
Mat previousGray; 
vector<Point2f> points[2]; 
vector<Point2f> initial; 
vector<Point2f> features; 

vector<uchar> status; 
vector<float> error; 

int maxCorners = 500; // maximum number of features to detect 
double qualityLevel = 0.01; // quality level for feature detection 
double minDistance = 10; // min distance between two points 

Mat frame, output; 

VideoWriter createdVideo("output.avi", codec, rate, Size(width,height), 1); 

for (unsigned frameCounter = 0; frameCounter < numberFrames; frameCounter++) { 

    capture >> frame; 

    if (frame.empty()) 
     break; 

    imshow("Video", frame); 
    cvtColor(frame, currentGray, CV_BGR2GRAY); 
    frame.copyTo(output); 


    if (points[0].size() <= 10){ 
     goodFeaturesToTrack(currentGray, // the image 
      features, // the output detected features 
      maxCorners, // the maximum number of features 
      qualityLevel, // quality level 
      minDistance); // min distance between two features 

     // add the detected features to 
     // the currently tracked features 
     points[0].insert(points[0].end(), 
      features.begin(), features.end()); 
     initial.insert(initial.end(), 
      features.begin(), features.end()); 
    } 

    if (previousGray.empty()) 
     currentGray.copyTo(previousGray); 

    calcOpticalFlowPyrLK(previousGray, currentGray, // 2 consecutive images 
     points[0], // input point positions in first image 
     points[1], // output point positions in the 2nd image 
     status, // tracking success 
     error); // tracking error 

    int k = 0; 
    for (int i = 0; i < points[1].size(); i++) { 
     // do we keep this point? 

     if (status[i] && // if point has moved 
      (abs(points[0][i].x - points[1][i].x) + 
      (abs(points[0][i].y - points[1][i].y)) > 2)) 

      initial[k] = initial[i]; 
      points[1][k++] = points[1][i]; 
    } 

    points[1].resize(k); 
    initial.resize(k); 


    for (int i = 0; i < points[1].size(); i++) { 
     // draw line and circle 
     line(output, 
      initial[i], // initial position 
      points[1][i],// new position 
      Scalar(0, 255, 0), 2); 
     circle(output, 
      points[1][i], 
      2, 
      Scalar(0, 0, 255), -1); 

    } 

    std::swap(points[1], points[0]); 
    cv::swap(previousGray, currentGray); 

    createdVideo.write(output); 

} 

waitKey(0); 
return(0); 
} 

我的代码通过帧跟踪点帧的位移,并让他们的第一位置,直到视频结束。 但是,我想不保留位置的第一帧点,但改变它们随着时间的推移,即改变第一点位置与第二点位置等,然后巨大的线不会出现,但只有两帧两帧。

有没有可能做到这一点?

回答

0

既然你只想在两个帧中的点的位置,只需使用两个向量;一个持有来自最后一帧的关键点,另一个持有前一帧的关键点。在每次迭代结束时,只需将以前的点设置为当前点。像这样的伪代码:

// first frame 
// detect keypoints 
prev_frame_points = keypoints 

// rest of the frames 
for frame in frames: 
    // detect keypoints 
    curr_frame_points = keypoints 
    line(..., prev_frame_points, curr_frame_points, ...) 
    prev_frame_points = curr_frame_points