2016-08-05 12 views
9

私は2枚の画像(データベースから私のカメラや他からキャプチャされたもの)DrawMatching - 画像認識

の間にマッチしたキーポイントを表示しようとしていた誰もがでDrawMatches機能を書面で私を助けることができます2つの画像間の一致した行を示すために私のコード。ここで

は私のコードです:

public final class ImageDetectionFilter{ 

// Flag draw target Image corner. 
private boolean flagDraw ; 

// The reference image (this detector's target). 
private final Mat mReferenceImage; 

// Features of the reference image. 
private final MatOfKeyPoint mReferenceKeypoints = new MatOfKeyPoint(); 

// Descriptors of the reference image's features. 
private final Mat mReferenceDescriptors = new Mat(); 

// The corner coordinates of the reference image, in pixels. 
// CvType defines the color depth, number of channels, and 
// channel layout in the image. Here, each point is represented 
// by two 32-bit floats. 
private final Mat mReferenceCorners = new Mat(4, 1, CvType.CV_32FC2); 

// Features of the scene (the current frame). 
private final MatOfKeyPoint mSceneKeypoints = new MatOfKeyPoint(); 
// Descriptors of the scene's features. 
private final Mat mSceneDescriptors = new Mat(); 
// Tentative corner coordinates detected in the scene, in 
// pixels. 
private final Mat mCandidateSceneCorners = 
    new Mat(4, 1, CvType.CV_32FC2); 
// Good corner coordinates detected in the scene, in pixels. 
private final Mat mSceneCorners = new Mat(4, 1, CvType.CV_32FC2); 
// The good detected corner coordinates, in pixels, as integers. 
private final MatOfPoint mIntSceneCorners = new MatOfPoint(); 

// A grayscale version of the scene. 
private final Mat mGraySrc = new Mat(); 
// Tentative matches of scene features and reference features. 
private final MatOfDMatch mMatches = new MatOfDMatch(); 

// A feature detector, which finds features in images. 
private final FeatureDetector mFeatureDetector = 
    FeatureDetector.create(FeatureDetector.ORB); 
// A descriptor extractor, which creates descriptors of 
// features. 
private final DescriptorExtractor mDescriptorExtractor = 
    DescriptorExtractor.create(DescriptorExtractor.ORB); 
// A descriptor matcher, which matches features based on their 
// descriptors. 
private final DescriptorMatcher mDescriptorMatcher = DescriptorMatcher 
    .create(DescriptorMatcher.BRUTEFORCE_HAMMINGLUT); 

// The color of the outline drawn around the detected image. 
private final Scalar mLineColor = new Scalar(0, 255, 0); 

public ImageDetectionFilter(final Context context, 
    final int referenceImageResourceID) throws IOException { 

// Load the reference image from the app's resources. 
// It is loaded in BGR (blue, green, red) format. 
mReferenceImage = Utils.loadResource(context, referenceImageResourceID, 
     Imgcodecs.CV_LOAD_IMAGE_COLOR); 

// Create grayscale and RGBA versions of the reference image. 
final Mat referenceImageGray = new Mat(); 
Imgproc.cvtColor(mReferenceImage, referenceImageGray, 
     Imgproc.COLOR_BGR2GRAY); 

Imgproc.cvtColor(mReferenceImage, mReferenceImage, 
     Imgproc.COLOR_BGR2RGBA); 

// Store the reference image's corner coordinates, in pixels. 
mReferenceCorners.put(0, 0, new double[] { 0.0, 0.0 }); 
mReferenceCorners.put(1, 0, 
     new double[] { referenceImageGray.cols(),0.0 }); 
mReferenceCorners.put(2, 0, 
     new double[] { referenceImageGray.cols(), 
     referenceImageGray.rows() }); 
mReferenceCorners.put(3, 0, 
     new double[] { 0.0, referenceImageGray.rows() }); 

// Detect the reference features and compute their 
// descriptors. 
mFeatureDetector.detect(referenceImageGray, 
     mReferenceKeypoints); 
mDescriptorExtractor.compute(referenceImageGray, 
     mReferenceKeypoints,mReferenceDescriptors); 
} 

public void apply(Mat src, Mat dst) { 

// Convert the scene to grayscale. 
Imgproc.cvtColor(src, mGraySrc, Imgproc.COLOR_RGBA2GRAY); 

// Detect the same features, compute their descriptors, 
// and match the scene descriptors to reference descriptors. 
mFeatureDetector.detect(mGraySrc, mSceneKeypoints); 
mDescriptorExtractor.compute(mGraySrc, mSceneKeypoints, 
     mSceneDescriptors); 
mDescriptorMatcher.match(mSceneDescriptors, 
     mReferenceDescriptors,mMatches); 

findSceneCorners(); 

// If the corners have been found, draw an outline around the 
// target image. 
// Else, draw a thumbnail of the target image. 
draw(src, dst); 

} 

private void findSceneCorners() { 
flagDraw = false; 

final List<DMatch> matchesList = mMatches.toList(); 

if (matchesList.size() < 4) { 
    // There are too few matches to find the homography. 
    return; 
} 

final List<KeyPoint> referenceKeypointsList = 
     mReferenceKeypoints.toList(); 
final List<KeyPoint> sceneKeypointsList = 
     mSceneKeypoints.toList(); 

// Calculate the max and min distances between keypoints. 
double maxDist = 0.0; 
double minDist = Double.MAX_VALUE; 

for (final DMatch match : matchesList) { 
    final double dist = match.distance; 
    if (dist < minDist) { 
     minDist = dist; 
    } 
    if (dist > maxDist) { 
     maxDist = dist; 
    } 
} 

// The thresholds for minDist are chosen subjectively 
// based on testing. The unit is not related to pixel 
// distances; it is related to the number of failed tests 
// for similarity between the matched descriptors. 
if (minDist > 50.0) { 
    // The target is completely lost. 
    // Discard any previously found corners. 
    mSceneCorners.create(0, 0, mSceneCorners.type()); 
    return; 
} else if (minDist > 25.0) { 
    // The target is lost but maybe it is still close. 
    // Keep any previously found corners. 
    return; 
} 

// Identify "good" keypoints and on match distance. 
final ArrayList<Point> goodReferencePointsList = 
     new ArrayList<Point>(); 
final ArrayList<Point> goodScenePointsList = 
     new ArrayList<Point>(); 
final double maxGoodMatchDist = 1.75 * minDist; 
for (final DMatch match : matchesList) { 
    if (match.distance < maxGoodMatchDist) { 
     goodReferencePointsList.add(
       referenceKeypointsList.get(match.trainIdx).pt); 
     goodScenePointsList 
       .add(sceneKeypointsList.get(match.queryIdx).pt); 
    } 
} 
if (goodReferencePointsList.size() < 4 
     || goodScenePointsList.size() < 4) { 
    // There are too few good points to find the homography. 
    return; 
} 
// There are enough good points to find the homography. 
// (Otherwise, the method would have already returned.) 

// Convert the matched points to MatOfPoint2f format, as 
// required by the Calib3d.findHomography function. 
final MatOfPoint2f goodReferencePoints = new MatOfPoint2f(); 
goodReferencePoints.fromList(goodReferencePointsList); 

final MatOfPoint2f goodScenePoints = new MatOfPoint2f(); 
goodScenePoints.fromList(goodScenePointsList); 

// Find the homography. 
final Mat homography = Calib3d.findHomography(
     goodReferencePoints,goodScenePoints); 

// Use the homography to project the reference corner 
// coordinates into scene coordinates. 
Core.perspectiveTransform(mReferenceCorners, 
     mCandidateSceneCorners,homography); 

// Convert the scene corners to integer format, as required 
// by the Imgproc.isContourConvex function. 
mCandidateSceneCorners.convertTo(mIntSceneCorners, 
     CvType.CV_32S); 

// Check whether the corners form a convex polygon. If not, 
// (that is, if the corners form a concave polygon), the 
// detection result is invalid because no real perspective can 
// make the corners of a rectangular image look like a concave 
// polygon! 
if (Imgproc.isContourConvex(mIntSceneCorners)) { 
    // The corners form a convex polygon, so record them as 
    // valid scene corners. 
    mCandidateSceneCorners.copyTo(mSceneCorners); 
    flagDraw = true; 
} 

} 

protected void draw(final Mat src, final Mat dst) { 

if (dst != src) { 
    src.copyTo(dst); 
} 

// Outline the found target in green. 
Imgproc.line(dst, new Point(mSceneCorners.get(0, 0)), new Point(
     mSceneCorners.get(1, 0)), mLineColor, 4); 
Imgproc.line(dst, new Point(mSceneCorners.get(1, 0)), new Point(
     mSceneCorners.get(2, 0)), mLineColor, 4); 
Imgproc.line(dst, new Point(mSceneCorners.get(2, 0)), new Point(
     mSceneCorners.get(3, 0)), mLineColor, 4); 
Imgproc.line(dst, new Point(mSceneCorners.get(3, 0)), new Point(
     mSceneCorners.get(0, 0)), mLineColor, 4); 
} 

public boolean getFlagDraw(){ 

return flagDraw; 
} 
} 
+0

は参考になります。 – ZdaR

+0

以下のステートメントを実行しながら@ZdaR、Amはエラーを取得。あなたはそれに見て、私はそれでいただきました!間違っている知っているようでした。 Mat outImg = new Mat(); Features2d.drawMatches(mReferenceImage、mReferenceKeypoints、mCandidateSceneCorners、mSceneKeypoints、mMatches、outImg); – WhoAmI

答えて

1

私はJavaでしっかりとこれが参考になるだろうが、私は、私はOpenCVのを使用してpythonでこれを達成するために管理方法の例を掲載していますかわからないではないと思います。たぶんこれはガイドラインとしてあなたを助けるでしょう。

(例は関心のある更なる説明を有するthisサイトから適合されている)

この例では、Iは、6匹の漫画の動物のセット内の1匹の漫画の動物の回転バージョンを発見しています。

基本的に、あなたはあなたのトレーニングとクエリ画像からキーポイントとcv2.drawMatches()を呼び出し、貧しいマッチをマスクします。私のコードの関連部分は一番下にあります。

あなたの例は非常に最小限のコード例ではありませんが、私はそのすべてを処理しませんでしたが、あなたのキーポイントが既にあり、準備ができているようですか?あなたにもマッチングするためのいくつかのサンプル画像を共有するかどう

enter image description here

enter image description here

enter image description here

import numpy as np 
import cv2 
from matplotlib import pyplot as plt 

MIN_MATCH_COUNT = 4 


img1 = cv2.imread('d:/one_animal_rotated.jpg',0)   # queryImage 
img2 = cv2.imread('d:/many_animals.jpg',0) # trainImage 

# Initiate SIFT detector 
sift = cv2.xfeatures2d.SIFT_create(0,3,0) 
# find the keypoints and descriptors with SIFT 
kp1, des1 = sift.detectAndCompute(img1,None) 
kp2, des2 = sift.detectAndCompute(img2,None) 

#find matches using FLANN 
FLANN_INDEX_KDTREE = 0 
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) 
search_params = dict(checks = 50) 
flann = cv2.FlannBasedMatcher(index_params, search_params) 
matches = flann.knnMatch(des1,des2,k=2) 

#apply ratio test to find best matches (values from 0.7-1 made sense here) 
good = [] 
for m,n in matches: 
    if m.distance < 1*n.distance: 
     good.append(m) 

#find homography to transform the edges of the query image and draw them on the train image 
#This is also used to mask all keypoints that aren't inside this box further below. 
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good]).reshape(-1,1,2) 
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good]).reshape(-1,1,2) 

M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) 
matchesMask = mask.ravel().tolist() 

h,w = img1.shape 
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) 
dst = cv2.perspectiveTransform(pts,M) 
img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA) 

#draw the good matched key points 
draw_params = dict(matchColor = (0,255,0), # draw matches in green color 
        singlePointColor = None, 
        matchesMask = matchesMask, # draw only inliers 
        flags = 2) 

img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params) 
plt.figure() 
plt.imshow(img3, 'gray'),plt.show() 
+0

これは私が、コードの私のセットで実装するために必要なものです。しかし、私は、データベースや他のカメラから撮影した画像から、一つの画像のために、Javaでそれを必要とします。 – WhoAmI