-1

也许问题是,我不擅长英语。 我是openCV的新手。我想知道拼接器合并的区域。这样↓全景图拼接算法

image

merged image

+1

请提供更多详情。 – Piglet

+0

查看[Wiki:autostitch](https://en.wikipedia.org/wiki/AutoStitch)它是如何工作的。当您运行拼接算法时,您将获得每个图像的变换矩阵,以便将其转换为多边形,并将它们的相交/重叠计算为几何问题。 – Spektre

回答

0

如果你知道在你的拍摄图像,您可以按照此代码为您的拼接图像一起顺序。如果订单未知,那么解决方案会变得更加复杂。此外,此代码是为相同大小的图像而设计的,如果您的相机被移动了,它可能会导致一些错误的结果。实施一些检查以便正确理解。您可以参考这篇文章“http://ramsrigoutham.com/2012/11/22/panorama-image-stitching-in-opencv/”,以更好地理解主要调用两次的拼接功能。

#include <stdio.h> 
#include <iostream> 
#include "opencv2/core/core.hpp" 
#include "opencv2/features2d/features2d.hpp" 
#include "opencv2/highgui/highgui.hpp" 
#include "opencv2/nonfree/nonfree.hpp" 
#include "opencv2/calib3d/calib3d.hpp" 
#include "opencv2/imgproc/imgproc.hpp" 
using namespace cv; 
void stitching(cv::Mat&,cv::Mat& ,cv::Mat&); 
int main() 
{   
Mat image1= imread("image1.jpg"); 
Mat image2= imread("image2.jpg"); 
Mat image3= imread("image3.jpg"); 
Mat gray_image1; 
Mat gray_image2; 
Mat gray_image3; 
Mat result1,result2; 
// Convert to Grayscale 
cvtColor(image1, gray_image1, CV_RGB2GRAY); 
cvtColor(image2, gray_image2, CV_RGB2GRAY); 
cvtColor(image3, gray_image3, CV_RGB2GRAY); 

stitching(gray_image1,gray_image2,result1); 
stitching(result1,gray_image3,result2); 
cv::imshow("stitched image"result2); 
cv::WaitKey(0); 


} 


    void stitching(cv::Mat& im1,cv::Mat& im2,cv::Mat& stitch_im) 
    { 
int minHessian = 400; 

SurfFeatureDetector detector(minHessian); 

std::vector<KeyPoint> keypoints_object, keypoints_scene; 

detector.detect(im1, keypoints_object); 
detector.detect(im2, keypoints_scene); 

SurfDescriptorExtractor extractor; 

Mat descriptors_object, descriptors_scene; 

extractor.compute(im1, keypoints_object, descriptors_object); 
extractor.compute(im2, keypoints_scene, descriptors_scene); 

FlannBasedMatcher matcher; 
std::vector<DMatch> matches; 
matcher.match(descriptors_object, descriptors_scene, matches); 

double max_dist = 0; double min_dist = 100; 

for(int i = 0; i < descriptors_object.rows; i++) 
{ double dist = matches[i].distance; 
if(dist < min_dist) min_dist = dist; 
if(dist > max_dist) max_dist = dist; 
} 

std::vector<DMatch> good_matches; 

for(int i = 0; i < descriptors_object.rows; i++) 
{ if(matches[i].distance < 3*min_dist) 
{ good_matches.push_back(matches[i]); } 
} 
std::vector<Point2f> obj; 
std::vector<Point2f> scene; 

for(int i = 0; i < good_matches.size(); i++) 
{ 

obj.push_back(keypoints_object[ good_matches[i].queryIdx ].pt); 
scene.push_back(keypoints_scene[ good_matches[i].trainIdx ].pt); 
} 


Mat H = findHomography(obj, scene, CV_RANSAC); 

cv::Mat result; 
warpPerspective(im1,stitch_im,H,cv::Size(im1.cols+im2.cols,im1.rows)); 



}