2012-03-16 229 views
9

我一直在使用EMGU CV库中的SURF特征检测示例。EMGU CV SURF图像匹配

到目前为止,它的工作令人惊讶;我可以检测到两张给定图像之间的匹配对象,但是当图像不匹配时,我遇到了一个问题。

我一直在寻找来自论坛的支持,但他们从我所在的位置下来。谁会知道哪些参数决定图像是否匹配。当我用两张不匹配的图像进行测试时,代码仍会继续进行,就好像有匹配一样,即使没有匹配,也会在图像的随机位置绘制模糊的粗红线。

如果没有匹配,我希望从代码中突破并且不要继续。

附录:

 static void Run() 
     { 
      Image<Gray, Byte> modelImage = new Image<Gray, byte>("HatersGonnaHate.png"); 
     Image<Gray, Byte> observedImage = new Image<Gray, byte>("box_in_scene.png"); 
     Stopwatch watch; 
     HomographyMatrix homography = null; 

     SURFDetector surfCPU = new SURFDetector(500, false); 

     VectorOfKeyPoint modelKeyPoints; 
     VectorOfKeyPoint observedKeyPoints; 
     Matrix<int> indices; 
     Matrix<float> dist; 
     Matrix<byte> mask; 

     if (GpuInvoke.HasCuda) 
     { 
      GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f); 
      using (GpuImage<Gray, Byte> gpuModelImage = new GpuImage<Gray, byte>(modelImage)) 
      //extract features from the object image 
      using (GpuMat<float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null)) 
      using (GpuMat<float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints)) 
      using (GpuBruteForceMatcher matcher = new GpuBruteForceMatcher(GpuBruteForceMatcher.DistanceType.L2)) 
      { 
       modelKeyPoints = new VectorOfKeyPoint(); 
       surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints); 
       watch = Stopwatch.StartNew(); 

       // extract features from the observed image 
       using (GpuImage<Gray, Byte> gpuObservedImage = new GpuImage<Gray, byte>(observedImage)) 
       using (GpuMat<float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null)) 
       using (GpuMat<float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints)) 
       using (GpuMat<int> gpuMatchIndices = new GpuMat<int>(gpuObservedDescriptors.Size.Height, 2, 1)) 
       using (GpuMat<float> gpuMatchDist = new GpuMat<float>(gpuMatchIndices.Size, 1)) 
       { 
        observedKeyPoints = new VectorOfKeyPoint(); 
        surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints); 

        matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, 2, null); 

        indices = new Matrix<int>(gpuMatchIndices.Size); 
        dist = new Matrix<float>(indices.Size); 
        gpuMatchIndices.Download(indices); 
        gpuMatchDist.Download(dist); 

        mask = new Matrix<byte>(dist.Rows, 1); 

        mask.SetValue(255); 

        Features2DTracker.VoteForUniqueness(dist, 0.8, mask); 

        int nonZeroCount = CvInvoke.cvCountNonZero(mask); 
        if (nonZeroCount >= 4) 
        { 
        nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20); 
        if (nonZeroCount >= 4) 
         homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3); 
        } 

        watch.Stop(); 
       } 
      } 
     } 
     else 
     { 
      //extract features from the object image 
      modelKeyPoints = surfCPU.DetectKeyPointsRaw(modelImage, null); 
      //MKeyPoint[] kpts = modelKeyPoints.ToArray(); 
      Matrix<float> modelDescriptors = surfCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints); 

      watch = Stopwatch.StartNew(); 

      // extract features from the observed image 
      observedKeyPoints = surfCPU.DetectKeyPointsRaw(observedImage, null); 
      Matrix<float> observedDescriptors = surfCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints); 

      BruteForceMatcher matcher = new BruteForceMatcher(BruteForceMatcher.DistanceType.L2F32); 
      matcher.Add(modelDescriptors); 
      int k = 2; 
      indices = new Matrix<int>(observedDescriptors.Rows, k); 
      dist = new Matrix<float>(observedDescriptors.Rows, k); 
      matcher.KnnMatch(observedDescriptors, indices, dist, k, null); 

      mask = new Matrix<byte>(dist.Rows, 1); 

      mask.SetValue(255); 

      Features2DTracker.VoteForUniqueness(dist, 0.8, mask); 

      int nonZeroCount = CvInvoke.cvCountNonZero(mask); 
      if (nonZeroCount >= 4) 
      { 
       nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20); 
       if (nonZeroCount >= 4) 
        homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3); 
      } 

      watch.Stop(); 
     } 

     //Draw the matched keypoints 
     Image<Bgr, Byte> result = Features2DTracker.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints, 
      indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DTracker.KeypointDrawType.NOT_DRAW_SINGLE_POINTS); 

     #region draw the projected region on the image 
     if (homography != null) 
     { //draw a rectangle along the projected model 
      Rectangle rect = modelImage.ROI; 
      PointF[] pts = new PointF[] { 
       new PointF(rect.Left, rect.Bottom), 
       new PointF(rect.Right, rect.Bottom), 
       new PointF(rect.Right, rect.Top), 
       new PointF(rect.Left, rect.Top)}; 
      homography.ProjectPoints(pts); 

      result.DrawPolyline(Array.ConvertAll<PointF, Point>(pts, Point.Round), true, new Bgr(Color.Red), 5); 
     } 
     #endregion 

     ImageViewer.Show(result, String.Format("Matched using {0} in {1} milliseconds", GpuInvoke.HasCuda ? "GPU" : "CPU", watch.ElapsedMilliseconds)); 
     } 


    } 

} 

`

+0

附加说明:为了更清楚,当2张图像不匹配时,我希望停止执行并检查另一张图像。 – user1246856 2012-03-16 22:01:45

+3

更新:我想我解决了这个问题。我刚减少了唯一性阈值: Features2DTracker.VoteForUniqueness(dist,0.8,mask); 从0.8变为0.5。工作正常。 – user1246856 2012-03-17 09:42:24

+0

你能写出你如何解答答案?谢谢 – 2012-03-28 08:20:29

回答

1

我不知道是否有一个适合图像序列或所有几何变形的所有情况的方法。

我建议你计算两幅图像之间的PSNR,并研究图像序列的容差阈值。