I use Emgu CV SURF feauture to recognize similar objects in images.
The image is drawn on the right, showing all the key points found on both images, similar points ( what I want ), and a rectangle (usually a rectangle, sometimes just a line) that covers similar points.
The problem is that similar points are visible in the image, but they are not saved in the format I want, in fact they are stored in a VectorOfKeyPoint object that simply stores the pointer and other memory data where the points are stored in memory (this is what I think). Meaning, I cannot get similar points in pairs, for example:
((img1X, img1Y), (img2X, img2Y))
This will be what I am looking for so that I can use the items later. Right now, I can just see the points in the above image, but I can’t get them in pairs.
The code I use is an example from Emgu's resume.
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Drawing;
using System.Runtime.InteropServices;
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Features2D;
using Emgu.CV.Structure;
using Emgu.CV.Util;
#if !__IOS__
using Emgu.CV.Cuda;
#endif
using Emgu.CV.XFeatures2D;
namespace FirstEmgu
{
public static class DrawMatches
{
private static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
{
int k = 2;
double uniquenessThreshold = 0.8;
double hessianThresh = 300;
Stopwatch watch;
homography = null;
modelKeyPoints = new VectorOfKeyPoint();
observedKeyPoints = new VectorOfKeyPoint();
#if !__IOS__
if (CudaInvoke.HasCuda)
{
CudaSURF surfCuda = new CudaSURF((float)hessianThresh);
using (GpuMat gpuModelImage = new GpuMat(modelImage))
using (GpuMat gpuModelKeyPoints = surfCuda.DetectKeyPointsRaw(gpuModelImage, null))
using (GpuMat gpuModelDescriptors = surfCuda.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
using (CudaBFMatcher matcher = new CudaBFMatcher(DistanceType.L2))
{
surfCuda.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
watch = Stopwatch.StartNew();
using (GpuMat gpuObservedImage = new GpuMat(observedImage))
using (GpuMat gpuObservedKeyPoints = surfCuda.DetectKeyPointsRaw(gpuObservedImage, null))
using (GpuMat gpuObservedDescriptors = surfCuda.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
{
matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k);
surfCuda.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);
mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
mask.SetTo(new MCvScalar(255));
Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);
int nonZeroCount = CvInvoke.CountNonZero(mask);
if (nonZeroCount >= 4)
{
nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
matches, mask, 1.5, 20);
if (nonZeroCount >= 4)
homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
observedKeyPoints, matches, mask, 2);
}
}
watch.Stop();
}
}
else
#endif
{
using (UMat uModelImage = modelImage.ToUMat(AccessType.Read))
using (UMat uObservedImage = observedImage.ToUMat(AccessType.Read))
{
SURF surfCPU = new SURF(hessianThresh);
UMat modelDescriptors = new UMat();
surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);
watch = Stopwatch.StartNew();
UMat observedDescriptors = new UMat();
surfCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
BFMatcher matcher = new BFMatcher(DistanceType.L2);
matcher.Add(modelDescriptors);
matcher.KnnMatch(observedDescriptors, matches, k, null);
mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
mask.SetTo(new MCvScalar(255));
Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);
int nonZeroCount = CvInvoke.CountNonZero(mask);
if (nonZeroCount >= 4)
{
nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
matches, mask, 1.5, 20);
if (nonZeroCount >= 4)
homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
observedKeyPoints, matches, mask, 2);
}
watch.Stop();
}
}
matchTime = watch.ElapsedMilliseconds;
}
public static Mat Draw(Mat modelImage, Mat observedImage, out long matchTime)
{
Mat homography;
VectorOfKeyPoint modelKeyPoints;
VectorOfKeyPoint observedKeyPoints;
using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
{
Mat mask;
FindMatch(modelImage, observedImage, out matchTime, out modelKeyPoints, out observedKeyPoints, matches,
out mask, out homography);
Mat result = new Mat();
Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);
#region draw the projected region on the image
if (homography != null)
{
Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
PointF[] pts = new PointF[]
{
new PointF(rect.Left, rect.Bottom),
new PointF(rect.Right, rect.Bottom),
new PointF(rect.Right, rect.Top),
new PointF(rect.Left, rect.Top)
};
pts = CvInvoke.PerspectiveTransform(pts, homography);
Point[] points = Array.ConvertAll<PointF, Point>(pts, Point.Round);
using (VectorOfPoint vp = new VectorOfPoint(points))
{
CvInvoke.Polylines(result, vp, true, new MCvScalar(255, 0, 0, 255), 5);
}
}
#endregion
return result;
}
}
public static Point[] FindPoints(Mat modelImage, Mat observedImage, out long matchTime)
{
Mat homography;
VectorOfKeyPoint modelKeyPoints;
VectorOfKeyPoint observedKeyPoints;
using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
{
Mat mask;
FindMatch(modelImage, observedImage, out matchTime, out modelKeyPoints, out observedKeyPoints, matches,
out mask, out homography);
Mat result = new Mat();
Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);
Point[] points = null;
if (homography != null)
{
Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
PointF[] pts = new PointF[]
{
new PointF(rect.Left, rect.Bottom),
new PointF(rect.Right, rect.Bottom),
new PointF(rect.Right, rect.Top),
new PointF(rect.Left, rect.Top)
};
pts = CvInvoke.PerspectiveTransform(pts, homography);
points = Array.ConvertAll<PointF, Point>(pts, Point.Round);
}
return points;
}
}
}
}
EDIT
I managed to get some points from match objects like this:
Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);
for (int i = 0; i < matches.Size; i++)
{
var a = matches[i].ToArray();
foreach (var e in a)
{
Point p = new Point(e.TrainIdx, e.QueryIdx);
Console.WriteLine(string.Format("Point: {0}", p));
}
Console.WriteLine("-----------------------");
}
I think this should get me points. I managed to get it to work in python, and the code is not much different. The problem is that too many points are returned. In fact, this returns me all the points on Y.
Example
(45, 1), (67, 1)
(656, 2), (77, 2)
...
It does not give me the points I want, although I could be close. Any suggestions are welcome.
2
: Firefox - - , . , , . , , , .