当前位置:主页 > 软件编程 > C代码 >

opencv2基于SURF特征提取实现两张图像拼接融合

时间:2021-08-02 07:28:55 | 栏目:C代码 | 点击:

本文实例为大家分享了opencv2实现两张图像拼接融合的具体代码,供大家参考,具体内容如下

要用到两个文件,estimate.cpp和matcher.h(在有关鲁棒匹配这篇博文中有)

estimate.cpp的头文件也需要添加一些东西才行,以下是对的,已经成功运行。

加了using namespace std;之后,cv::可以去掉了。

estimate.cpp:

#include <iostream>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include<opencv2/nonfree/nonfree.hpp>
#include<opencv2\legacy\legacy.hpp> 
#include "matcher.h"
using namespace std;
using namespace cv;
int main()
{
// Read input images读入图像
cv::Mat image1= cv::imread("parliament1.bmp",0);
cv::Mat image2= cv::imread("parliament2.bmp",0);
if (!image1.data || !image2.data)
return 0; 


  // Display the images显示图像
cv::namedWindow("Image 1");
cv::imshow("Image 1",image1);
cv::namedWindow("Image 2");
cv::imshow("Image 2",image2);


// Prepare the matcher准备匹配
RobustMatcher rmatcher;
rmatcher.setConfidenceLevel(0.98);
rmatcher.setMinDistanceToEpipolar(1.0);
rmatcher.setRatio(0.65f);
cv::Ptr<cv::FeatureDetector> pfd= new cv::SurfFeatureDetector(10); 
rmatcher.setFeatureDetector(pfd);


// Match the two images
std::vector<cv::DMatch> matches;
std::vector<cv::KeyPoint> keypoints1, keypoints2;
cv::Mat fundemental= rmatcher.match(image1,image2,matches, keypoints1, keypoints2);


// draw the matches画匹配结果
cv::Mat imageMatches;
cv::drawMatches(image1,keypoints1, // 1st image and its keypoints第一张图像及其关键点
      image2,keypoints2, // 2nd image and its keypoints第二张图像及其关键点
matches, // the matches匹配结果
imageMatches, // the image produced产生的图像
cv::Scalar(255,255,255)); // color of the lines线的颜色
cv::namedWindow("Matches");
cv::imshow("Matches",imageMatches);

// Convert keypoints into Point2f将关键点转换为Point2f
std::vector<cv::Point2f> points1, points2;
for (std::vector<cv::DMatch>::const_iterator it= matches.begin();
it!= matches.end(); ++it) {H


// Get the position of left keypoints得到左图关键点位置
float x= keypoints1[it->queryIdx].pt.x;
float y= keypoints1[it->queryIdx].pt.y;
points1.push_back(cv::Point2f(x,y));
// Get the position of right keypoints得到右图关键点位置
x= keypoints2[it->trainIdx].pt.x;
y= keypoints2[it->trainIdx].pt.y;
points2.push_back(cv::Point2f(x,y));
}


std::cout << points1.size() << " " << points2.size() << std::endl; 


// Find the homography between image 1 and image 2找到图像1和图像2之间的单应性矩阵
std::vector<uchar> inliers(points1.size(),0);
cv::Mat homography= cv::findHomography(
cv::Mat(points1),cv::Mat(points2), // corresponding points对应点
inliers, // outputed inliers matches 输出内点匹配
CV_RANSAC, // RANSAC method   RANSAC 方法
1.);  // max distance to reprojection point到对应点的最大距离


// Draw the inlier points画内点
std::vector<cv::Point2f>::const_iterator itPts= points1.begin();
std::vector<uchar>::const_iterator itIn= inliers.begin();
while (itPts!=points1.end()) {


// draw a circle at each inlier location在每一个内点画一个圈
if (*itIn) 
 cv::circle(image1,*itPts,3,cv::Scalar(255,255,255),2);

++itPts;
++itIn;
}


itPts= points2.begin();
itIn= inliers.begin();
while (itPts!=points2.end()) {


// draw a circle at each inlier location在每一个内点画一个圈
if (*itIn) 
cv::circle(image2,*itPts,3,cv::Scalar(255,255,255),2);

++itPts;
++itIn;
}


  // Display the images with points显示画点的图像
cv::namedWindow("Image 1 Homography Points");
cv::imshow("Image 1 Homography Points",image1);
cv::namedWindow("Image 2 Homography Points");
cv::imshow("Image 2 Homography Points",image2);


// Warp image 1 to image 2变形图像1到图像2
cv::Mat result;
cv::warpPerspective(image1, // input image输入的图像
result, // output image输出的图像
homography, // homography单应性矩阵
cv::Size(2*image1.cols,image1.rows)); // size of output image输出图像的大小


// Copy image 1 on the first half of full image复制图像1的上一部分
cv::Mat half(result,cv::Rect(0,0,image2.cols,image2.rows));
image2.copyTo(half);


  // Display the warp image显示变形后图像
cv::namedWindow("After warping");
cv::imshow("After warping",result);


cv::waitKey();
return 0;
}

您可能感兴趣的文章:

相关文章