天天看点

一种opencv特征匹配点的筛选方法

void alignImages(Mat &im1, Mat &im2, Mat &im1Reg, Mat &h)

{
    vector<KeyPoint> kp1, kp2;
    Ptr<SURF> m_surf = SURF::create(1000);
    Mat descriptors_1, descriptors_2;
    m_surf->detectAndCompute(im1,Mat(),kp1,descriptors_1);
    m_surf->detectAndCompute(im2,Mat(),kp2,descriptors_2);
    cout<<"*********"<<endl;
    BFMatcher matcher(NORM_L2);
    cout<<"++++++++++"<<endl;
    Mat img_matches;
    vector<vector<DMatch>> matches;
    vector<DMatch> bestMatches;
    matcher.knnMatch(descriptors_1, descriptors_2, matches, 2);
    std::cout << "matches size:" << (int)matches.size() << endl;
    for (int i = 0; i < (int)matches.size(); i++)
    {
        if (matches[i][0].distance < 0.8 * matches[i][1].distance)
        {
            bestMatches.push_back(matches[i][0]);
        }
    }

    float angel1 = 0;
    float angel2 = 0;
    float oneToMore = 0;
    int k = 0;
    struct keypointFlag{
        DMatch match;
        int flag;
    };
    keypointFlag initFlag[bestMatches.size()];
    for(int i=0;i<bestMatches.size();i++)
    {
        initFlag[i].match=bestMatches[i];
        initFlag[i].flag=0;
    }
    vector<DMatch>goodMatches;
    for(int i = 0;i<bestMatches.size();i++)
    {
        k = 0;
        for(int j = 0;j<bestMatches.size();j++)
        {
            if(i!=j&&initFlag[i].flag != 2)
            {
                oneToMore = sqrt(pow(kp1[initFlag[i].match.queryIdx].pt.x-kp1[initFlag[j].match.queryIdx].pt.x,2)
                        +pow(kp1[initFlag[i].match.queryIdx].pt.y-kp1[initFlag[j].match.queryIdx].pt.y,2));
                angel1 = atan((kp1[initFlag[i].match.queryIdx].pt.y-kp2[initFlag[i].match.trainIdx].pt.y)
                        /(kp1[initFlag[i].match.queryIdx].pt.x-kp2[initFlag[i].match.trainIdx].pt.x));
                angel2 = atan((kp1[initFlag[j].match.queryIdx].pt.y-kp2[initFlag[j].match.trainIdx].pt.y)
                              /(kp1[initFlag[j].match.queryIdx].pt.x-kp2[initFlag[j].match.trainIdx].pt.x));
                if(oneToMore<70&&abs(angel1-angel2)<0.1)
                {
                    initFlag[j].flag = 1;
                    cout<<abs(angel1-angel2)<<endl;
                }
            }
        }
        for(int j = 0;j<bestMatches.size();j++)
        {
            if(i!=j)
            {
                if(initFlag[j].flag == 1)
                {
                    k++;
                }
            }
        }
        cout<<"k: "<<k<<endl;
        if(k>4)
        {
            goodMatches.push_back(initFlag[i].match);
            initFlag[i].flag = 2;
            for(int j = 0;j<bestMatches.size();j++)
            {
                if(initFlag[j].flag == 1)
                {
                    int flagTmp = 1;
                    for(int m = 0;m < goodMatches.size();m++)
                    {
                        if(kp1[initFlag[j].match.queryIdx].pt.x == kp1[goodMatches[m].queryIdx].pt.x
                        && kp1[initFlag[j].match.queryIdx].pt.y == kp1[goodMatches[m].queryIdx].pt.y)
                        {
                            flagTmp = 0;
                            break;
                        }
                    }
                    if(flagTmp == 1)
                    {
                        goodMatches.push_back(initFlag[j].match);
                        initFlag[j].flag = 0;
                    }
                }
            }
        }
        for(int j = 0;j<bestMatches.size();j++)
        {
            if(initFlag[j].flag == 1)
                initFlag[j].flag = 0;
        }
    }
    cout<<"goodMatches size: "<<goodMatches.size()<<endl;
//    for(int i = 0;i<bestMatches.size();i++)
//    {
//        angel = atan((kp1[bestMatches[i].queryIdx].pt.x-kp2[bestMatches[i].trainIdx].pt.x)/(kp1[bestMatches[i].queryIdx].pt.y-kp2[bestMatches[i].trainIdx].pt.y));
//        cout<<angel<<endl;
//    }

    drawMatches(im1, kp1, im2, kp2, goodMatches, img_matches, Scalar::all(-1), Scalar::all(-1),
                vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
    imshow("Matches", img_matches);
    imwrite("../match.jpg", img_matches);

    std::cout << "the size of bestmatches:" << goodMatches.size() << endl;

  // Extract location of good matches
    int ptCount = (int)goodMatches.size();
    Mat p1(ptCount, 2, CV_32F);
    Mat p2(ptCount, 2, CV_32F);

    Point2f pt;
    for (int i = 0; i < ptCount; i++)
    {
        pt = kp1[goodMatches[i].queryIdx].pt;
        p1.at<float>(i, 0) = pt.x;
        p1.at<float>(i, 1) = pt.y;

        pt = kp2[goodMatches[i].trainIdx].pt;
        p2.at<float>(i, 0) = pt.x;
        p2.at<float>(i, 1) = pt.y;
    }
    int InlinerCount = ptCount;
    cout << "InlinerCount: " << InlinerCount << endl;

    vector<Point2f> m_LeftInlier;
    vector<Point2f> m_RightInlier;
    vector<DMatch> m_InlierMatches;

    m_InlierMatches.resize(InlinerCount);
    m_LeftInlier.resize(InlinerCount);
    m_RightInlier.resize(InlinerCount);
    InlinerCount = 0;


    for (int i = 0; i < ptCount; i++)
    {
            m_LeftInlier[InlinerCount].x = p1.at<float>(i, 0);
            m_LeftInlier[InlinerCount].y = p1.at<float>(i, 1);
            m_RightInlier[InlinerCount].x = p2.at<float>(i, 0);
            m_RightInlier[InlinerCount].y = p2.at<float>(i, 1);
            m_InlierMatches[InlinerCount].queryIdx = InlinerCount;
            m_InlierMatches[InlinerCount].trainIdx = InlinerCount;

            InlinerCount++;
            cout << "InlinerCount:" << InlinerCount << endl;
    }



    vector<KeyPoint> key1;
    vector<KeyPoint> key2;
    KeyPoint::convert(m_LeftInlier, key1);
    KeyPoint::convert(m_RightInlier, key2);
    Mat OutImage;
    drawMatches(im1, key1, im2, key2, m_InlierMatches, OutImage, CV_RGB(255, 0, 0), CV_RGB(0, 0, 255));
    imwrite("final.jpg", OutImage);

    Mat R = estimateRigidTransform(m_LeftInlier, m_RightInlier, true);

    //Mat R = estimateAffine2D(m_LeftInlier, m_RightInlier);
    cout << R << endl;
    double s = sqrt((R.at<double>(0, 0)) * (R.at<double>(0, 0)) + (R.at <double>(1, 0)) * (R.at <double>(1, 0)));
    cout << "s:  " << s << endl;
    cv::Mat H = cv::Mat(3, 3, R.type());
    H.at< double>(0, 0) = R.at<double>(0, 0) / s;
    H.at< double>(0, 1) = R.at <double>(0, 1) / s;
    H.at< double>(0, 2) = R.at <double>(0, 2);//

    H.at< double>(1, 0) = R.at <double>(1, 0) / s;
    H.at< double>(1, 1) = R.at <double>(1, 1) / s;
    H.at< double>(1, 2) = R.at <double>(1, 2);//

    H.at< double>(2, 0) = 0.0;
    H.at< double>(2, 1) = 0.0;
    H.at<double>(2, 2) = 1.0;
    cout << H << endl;
    normalize(H, H, 1, 0, NORM_L2, -1);
    cout << H << endl;

    vector<Point2f> obj_corners(4);
    obj_corners[0] = Point(0, 0);
    obj_corners[1] = Point(im1.cols, 0);
    obj_corners[2] = Point(im1.cols, im1.rows);
    obj_corners[3] = Point(0, im1.rows);
    vector<Point2f> scene_corners(4);
    perspectiveTransform(obj_corners, scene_corners, H);

    cout << "scene_corners" << scene_corners << endl;

    int width1 = max(abs(scene_corners[0].x - scene_corners[2].x),
                     abs(scene_corners[1].x - scene_corners[3].x));
    int height1 = max(abs(scene_corners[0].y - scene_corners[2].y),
                      abs(scene_corners[1].y - scene_corners[3].y));
    float origin_x = 0, origin_y = 0;
    if (scene_corners[0].x < 0 || scene_corners[1].x < 0
        || scene_corners[2].x < 0 || scene_corners[3].x < 0)
        origin_x += min(min(scene_corners[0].x, scene_corners[2].x),
                        min(scene_corners[1].x, scene_corners[3].x));
    else origin_x += min(min(scene_corners[0].x, scene_corners[2].x),
                         min(scene_corners[1].x, scene_corners[3].x));
    if (scene_corners[0].y < 0 || scene_corners[1].y < 0
        || scene_corners[2].y < 0 || scene_corners[3].y < 0)
        origin_y += min(min(scene_corners[0].y, scene_corners[2].y),
                        min(scene_corners[1].y, scene_corners[3].y));
    else origin_y += min(min(scene_corners[0].y, scene_corners[2].y),
                         min(scene_corners[1].y, scene_corners[3].y));

    Mat imageturn = Mat::zeros(width1, height1, im1.type());


    for (int i = 0; i < 4; i++)
    {
        scene_corners[i].x -= origin_x;
        scene_corners[i].y -= origin_y;
    }
    Mat H1 = getPerspectiveTransform(obj_corners, scene_corners);

    normalize(H1, H1, 1, 0, NORM_L2, -1);
    vector<Point2f> warp_m_leftInlier;
    vector<KeyPoint> L_key;
    perspectiveTransform(m_LeftInlier, warp_m_leftInlier, H1);

    KeyPoint::convert(warp_m_leftInlier, L_key);
    warpPerspective(im1, im1Reg, H1, Size(width1, height1), INTER_NEAREST + 8, BORDER_CONSTANT, Scalar::all(255));
    imwrite("imageturn.jpg", im1Reg);
    Mat OutImage1;
    drawMatches(im1Reg, L_key, im2, key2, m_InlierMatches, OutImage1, CV_RGB(255, 0, 0), CV_RGB(0, 0, 255));
    imwrite("OutImage1.jpg", OutImage1);




//  // Find homography
//  h = findHomography( points1, points2, RANSAC );
//
//  // Use homography to warp image
//  warpPerspective(im1, im1Reg, h, im2.size());

}
           

基本思想,使用surf进行特征点检测,使用暴力匹配进行粗匹配,对粗匹配结果采取密度分布与斜率约等的思想,即任意选取一点为定点,计算其周围像素距离小于70的点的斜率与选定点的斜率是否约等,如果是,将其标记为疑似最优点,当选定点周围能找到大于4个这样的点即认为其这一组为相匹配的最优点,放入新建的向量里,一次检查没给点,直至选取所有最优点,经反复实验不同的图片其准确率可达100%。

继续阅读