Today my post is on, how you can use SIFT/SURF algorithms for Object Recognition with OpenCV Java. I have shared this post on SURF feature detector previously. This is fully based on that post and therefore I'm just trying to show you how you can implement the same logic in OpenCV Java. It's important that you have to download previous OpenCV versions, so that you have SURF feature detector in your library. Because in the newer versions they have removed these Non-Free modules from the java wrapper. You can check this out.
I believe that you have some basic knowledge in working with OpenCV Java. If you want some beginner help you can refer the following links.
Now let's move on to our development. I'm using Eclipse for developing this. I'm going to use OpencCV 2.4.11. You can download it here.
First create the user library for OpenCV as described in the previous link and add it to the build path. Then we can start developing the code for object recognition. Following is my eclipse project. I have added the OpenCV 2.4.11 library as a user library and added it to the build path.
I'm using following images for object recognition.
1. Object we are going to recognize.
2. Scene that we are going to recognize the object from.
Now following is the Java implementation of our SURF feature detector.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
package com.dummys.learning; | |
import org.opencv.calib3d.Calib3d; | |
import org.opencv.core.*; | |
import org.opencv.features2d.*; | |
import org.opencv.highgui.Highgui; | |
import java.io.File; | |
import java.util.LinkedList; | |
import java.util.List; | |
/** | |
* Created by Kinath on 8/6/2016. | |
*/ | |
public class SURFDetector { | |
public static void main(String[] args) { | |
File lib = null; | |
String os = System.getProperty("os.name"); | |
String bitness = System.getProperty("sun.arch.data.model"); | |
if (os.toUpperCase().contains("WINDOWS")) { | |
if (bitness.endsWith("64")) { | |
lib = new File("libs//x64//" + System.mapLibraryName("opencv_java2411")); | |
} else { | |
lib = new File("libs//x86//" + System.mapLibraryName("opencv_java2411")); | |
} | |
} | |
System.out.println(lib.getAbsolutePath()); | |
System.load(lib.getAbsolutePath()); | |
String bookObject = "images//bookobject.jpg"; | |
String bookScene = "images//bookscene.jpg"; | |
System.out.println("Started...."); | |
System.out.println("Loading images..."); | |
Mat objectImage = Highgui.imread(bookObject, Highgui.CV_LOAD_IMAGE_COLOR); | |
Mat sceneImage = Highgui.imread(bookScene, Highgui.CV_LOAD_IMAGE_COLOR); | |
MatOfKeyPoint objectKeyPoints = new MatOfKeyPoint(); | |
FeatureDetector featureDetector = FeatureDetector.create(FeatureDetector.SURF); | |
System.out.println("Detecting key points..."); | |
featureDetector.detect(objectImage, objectKeyPoints); | |
KeyPoint[] keypoints = objectKeyPoints.toArray(); | |
System.out.println(keypoints); | |
MatOfKeyPoint objectDescriptors = new MatOfKeyPoint(); | |
DescriptorExtractor descriptorExtractor = DescriptorExtractor.create(DescriptorExtractor.SURF); | |
System.out.println("Computing descriptors..."); | |
descriptorExtractor.compute(objectImage, objectKeyPoints, objectDescriptors); | |
// Create the matrix for output image. | |
Mat outputImage = new Mat(objectImage.rows(), objectImage.cols(), Highgui.CV_LOAD_IMAGE_COLOR); | |
Scalar newKeypointColor = new Scalar(255, 0, 0); | |
System.out.println("Drawing key points on object image..."); | |
Features2d.drawKeypoints(objectImage, objectKeyPoints, outputImage, newKeypointColor, 0); | |
// Match object image with the scene image | |
MatOfKeyPoint sceneKeyPoints = new MatOfKeyPoint(); | |
MatOfKeyPoint sceneDescriptors = new MatOfKeyPoint(); | |
System.out.println("Detecting key points in background image..."); | |
featureDetector.detect(sceneImage, sceneKeyPoints); | |
System.out.println("Computing descriptors in background image..."); | |
descriptorExtractor.compute(sceneImage, sceneKeyPoints, sceneDescriptors); | |
Mat matchoutput = new Mat(sceneImage.rows() * 2, sceneImage.cols() * 2, Highgui.CV_LOAD_IMAGE_COLOR); | |
Scalar matchestColor = new Scalar(0, 255, 0); | |
List<MatOfDMatch> matches = new LinkedList<MatOfDMatch>(); | |
DescriptorMatcher descriptorMatcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED); | |
System.out.println("Matching object and scene images..."); | |
descriptorMatcher.knnMatch(objectDescriptors, sceneDescriptors, matches, 2); | |
System.out.println("Calculating good match list..."); | |
LinkedList<DMatch> goodMatchesList = new LinkedList<DMatch>(); | |
float nndrRatio = 0.7f; | |
for (int i = 0; i < matches.size(); i++) { | |
MatOfDMatch matofDMatch = matches.get(i); | |
DMatch[] dmatcharray = matofDMatch.toArray(); | |
DMatch m1 = dmatcharray[0]; | |
DMatch m2 = dmatcharray[1]; | |
if (m1.distance <= m2.distance * nndrRatio) { | |
goodMatchesList.addLast(m1); | |
} | |
} | |
if (goodMatchesList.size() >= 7) { | |
System.out.println("Object Found!!!"); | |
List<KeyPoint> objKeypointlist = objectKeyPoints.toList(); | |
List<KeyPoint> scnKeypointlist = sceneKeyPoints.toList(); | |
LinkedList<Point> objectPoints = new LinkedList<>(); | |
LinkedList<Point> scenePoints = new LinkedList<>(); | |
for (int i = 0; i < goodMatchesList.size(); i++) { | |
objectPoints.addLast(objKeypointlist.get(goodMatchesList.get(i).queryIdx).pt); | |
scenePoints.addLast(scnKeypointlist.get(goodMatchesList.get(i).trainIdx).pt); | |
} | |
MatOfPoint2f objMatOfPoint2f = new MatOfPoint2f(); | |
objMatOfPoint2f.fromList(objectPoints); | |
MatOfPoint2f scnMatOfPoint2f = new MatOfPoint2f(); | |
scnMatOfPoint2f.fromList(scenePoints); | |
Mat homography = Calib3d.findHomography(objMatOfPoint2f, scnMatOfPoint2f, Calib3d.RANSAC, 3); | |
Mat obj_corners = new Mat(4, 1, CvType.CV_32FC2); | |
Mat scene_corners = new Mat(4, 1, CvType.CV_32FC2); | |
obj_corners.put(0, 0, new double[]{0, 0}); | |
obj_corners.put(1, 0, new double[]{objectImage.cols(), 0}); | |
obj_corners.put(2, 0, new double[]{objectImage.cols(), objectImage.rows()}); | |
obj_corners.put(3, 0, new double[]{0, objectImage.rows()}); | |
System.out.println("Transforming object corners to scene corners..."); | |
Core.perspectiveTransform(obj_corners, scene_corners, homography); | |
Mat img = Highgui.imread(bookScene, Highgui.CV_LOAD_IMAGE_COLOR); | |
Core.line(img, new Point(scene_corners.get(0, 0)), new Point(scene_corners.get(1, 0)), new Scalar(0, 255, 0), 4); | |
Core.line(img, new Point(scene_corners.get(1, 0)), new Point(scene_corners.get(2, 0)), new Scalar(0, 255, 0), 4); | |
Core.line(img, new Point(scene_corners.get(2, 0)), new Point(scene_corners.get(3, 0)), new Scalar(0, 255, 0), 4); | |
Core.line(img, new Point(scene_corners.get(3, 0)), new Point(scene_corners.get(0, 0)), new Scalar(0, 255, 0), 4); | |
System.out.println("Drawing matches image..."); | |
MatOfDMatch goodMatches = new MatOfDMatch(); | |
goodMatches.fromList(goodMatchesList); | |
Features2d.drawMatches(objectImage, objectKeyPoints, sceneImage, sceneKeyPoints, goodMatches, matchoutput, matchestColor, newKeypointColor, new MatOfByte(), 2); | |
Highgui.imwrite("output//outputImage.jpg", outputImage); | |
Highgui.imwrite("output//matchoutput.jpg", matchoutput); | |
Highgui.imwrite("output//img.jpg", img); | |
} else { | |
System.out.println("Object Not Found"); | |
} | |
System.out.println("Ended...."); | |
} | |
} |
Following are our outputs.
1. Identified key-points of the object.
2. Matching the object keypoints with the scene.
3. Object recognized image.
You can do the same with SIFT feature detector by just changing the Feature Detector and Descriptor Extractor name to SIFT.
So that's it.
Hope that helps.
Thank You. :-)