Visual Servoing Platform version 3.5.0
testKeyPoint-4.cpp
1/****************************************************************************
2 *
3 * ViSP, open source Visual Servoing Platform software.
4 * Copyright (C) 2005 - 2019 by Inria. All rights reserved.
5 *
6 * This software is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 * See the file LICENSE.txt at the root directory of this source
11 * distribution for additional information about the GNU GPL.
12 *
13 * For using ViSP with software that can not be combined with the GNU
14 * GPL, please contact Inria about acquiring a ViSP Professional
15 * Edition License.
16 *
17 * See http://visp.inria.fr for more information.
18 *
19 * This software was developed at:
20 * Inria Rennes - Bretagne Atlantique
21 * Campus Universitaire de Beaulieu
22 * 35042 Rennes Cedex
23 * France
24 *
25 * If you have questions regarding the use of this file, please contact
26 * Inria at visp@inria.fr
27 *
28 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30 *
31 * Description:
32 * Test keypoint matching and pose estimation with mostly OpenCV functions
33 *calls to detect potential memory leaks in testKeyPoint-2.cpp.
34 *
35 * Authors:
36 * Souriya Trinh
37 *
38 *****************************************************************************/
39
40#include <iostream>
41
42#include <visp3/core/vpConfig.h>
43
44#if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020301)
45
46#include <opencv2/core/core.hpp>
47#include <opencv2/features2d/features2d.hpp>
48#include <visp3/core/vpHomogeneousMatrix.h>
49#include <visp3/core/vpImage.h>
50#include <visp3/core/vpIoTools.h>
51#include <visp3/gui/vpDisplayGDI.h>
52#include <visp3/gui/vpDisplayGTK.h>
53#include <visp3/gui/vpDisplayOpenCV.h>
54#include <visp3/gui/vpDisplayX.h>
55#include <visp3/io/vpImageIo.h>
56#include <visp3/io/vpParseArgv.h>
57#include <visp3/io/vpVideoReader.h>
58#include <visp3/mbt/vpMbEdgeTracker.h>
59#include <visp3/vision/vpKeyPoint.h>
60
61// List of allowed command line options
62#define GETOPTARGS "cdh"
63
64void usage(const char *name, const char *badparam);
65bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display);
66
75void usage(const char *name, const char *badparam)
76{
77 fprintf(stdout, "\n\
78Test keypoints matching.\n\
79\n\
80SYNOPSIS\n\
81 %s [-c] [-d] [-h]\n", name);
82
83 fprintf(stdout, "\n\
84OPTIONS: \n\
85\n\
86 -c\n\
87 Disable the mouse click. Useful to automate the \n\
88 execution of this program without human intervention.\n\
89\n\
90 -d \n\
91 Turn off the display.\n\
92\n\
93 -h\n\
94 Print the help.\n");
95
96 if (badparam)
97 fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
98}
99
111bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display)
112{
113 const char *optarg_;
114 int c;
115 while ((c = vpParseArgv::parse(argc, argv, GETOPTARGS, &optarg_)) > 1) {
116
117 switch (c) {
118 case 'c':
119 click_allowed = false;
120 break;
121 case 'd':
122 display = false;
123 break;
124 case 'h':
125 usage(argv[0], NULL);
126 return false;
127 break;
128
129 default:
130 usage(argv[0], optarg_);
131 return false;
132 break;
133 }
134 }
135
136 if ((c == 1) || (c == -1)) {
137 // standalone param or error
138 usage(argv[0], NULL);
139 std::cerr << "ERROR: " << std::endl;
140 std::cerr << " Bad argument " << optarg_ << std::endl << std::endl;
141 return false;
142 }
143
144 return true;
145}
146
147template<typename Type>
148void run_test(const std::string &env_ipath, bool opt_click_allowed, bool opt_display,
149 vpImage<Type> &I, vpImage<Type> &Imatch, vpImage<Type> &Iref)
150{
151 // Set the path location of the image sequence
152 std::string dirname = vpIoTools::createFilePath(env_ipath, "mbt/cube");
153
154 // Build the name of the image files
155 std::string filenameRef = vpIoTools::createFilePath(dirname, "image0000.pgm");
156 vpImageIo::read(I, filenameRef);
157 Iref = I;
158 std::string filenameCur = vpIoTools::createFilePath(dirname, "image%04d.pgm");
159
160#if defined VISP_HAVE_X11
161 vpDisplayX display, display2;
162#elif defined VISP_HAVE_GTK
163 vpDisplayGTK display, display2;
164#elif defined VISP_HAVE_GDI
165 vpDisplayGDI display, display2;
166#else
167 vpDisplayOpenCV display, display2;
168#endif
169
170 if (opt_display) {
171 display.setDownScalingFactor(vpDisplay::SCALE_AUTO);
172 display.init(I, 0, 0, "ORB keypoints matching");
173 Imatch.resize(I.getHeight(), 2 * I.getWidth());
174 Imatch.insert(I, vpImagePoint(0, 0));
176 display2.init(Imatch, 0, (int)I.getHeight() / vpDisplay::getDownScalingFactor(I) + 70, "ORB keypoints matching");
177 }
178
180 vpMbEdgeTracker tracker;
181 // Load config for tracker
182 std::string tracker_config_file = vpIoTools::createFilePath(env_ipath, "mbt/cube.xml");
183
184 tracker.loadConfigFile(tracker_config_file);
185 tracker.getCameraParameters(cam);
186#if 0
187 // Corresponding parameters manually set to have an example code
188 vpMe me;
189 me.setMaskSize(5);
190 me.setMaskNumber(180);
191 me.setRange(8);
192 me.setThreshold(10000);
193 me.setMu1(0.5);
194 me.setMu2(0.5);
195 me.setSampleStep(4);
196 me.setNbTotalSample(250);
197 tracker.setMovingEdge(me);
198 cam.initPersProjWithoutDistortion(547.7367575, 542.0744058, 338.7036994, 234.5083345);
199 tracker.setCameraParameters(cam);
200 tracker.setNearClippingDistance(0.01);
201 tracker.setFarClippingDistance(100.0);
203#endif
204
205 tracker.setAngleAppear(vpMath::rad(89));
206 tracker.setAngleDisappear(vpMath::rad(89));
207
208 // Load CAO model
209 std::string cao_model_file = vpIoTools::createFilePath(env_ipath, "mbt/cube.cao");
210 tracker.loadModel(cao_model_file);
211
212 // Initialize the pose
213 std::string init_file = vpIoTools::createFilePath(env_ipath, "mbt/cube.init");
214 if (opt_display && opt_click_allowed) {
215 tracker.initClick(I, init_file);
216 } else {
217 vpHomogeneousMatrix cMoi(0.02044769891, 0.1101505452, 0.5078963719, 2.063603907, 1.110231561, -0.4392789872);
218 tracker.initFromPose(I, cMoi);
219 }
220
221 // Get the init pose
223 tracker.getPose(cMo);
224
225 // Init keypoints
226 cv::Ptr<cv::FeatureDetector> detector;
227 cv::Ptr<cv::DescriptorExtractor> extractor;
228 cv::Ptr<cv::DescriptorMatcher> matcher;
229
230#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
231 detector = cv::ORB::create(500, 1.2f, 1);
232 extractor = cv::ORB::create(500, 1.2f, 1);
233#elif (VISP_HAVE_OPENCV_VERSION >= 0x020301)
234 detector = cv::FeatureDetector::create("ORB");
235 extractor = cv::DescriptorExtractor::create("ORB");
236#endif
237 matcher = cv::DescriptorMatcher::create("BruteForce-Hamming");
238
239#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
240 detector->set("nLevels", 1);
241#endif
242
243 // Detect keypoints on the current image
244 std::vector<cv::KeyPoint> trainKeyPoints;
245 cv::Mat matImg;
246 vpImageConvert::convert(I, matImg);
247 detector->detect(matImg, trainKeyPoints);
248
249 // Keep only keypoints on the cube
250 std::vector<vpPolygon> polygons;
251 std::vector<std::vector<vpPoint> > roisPt;
252 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.getPolygonFaces(false);
253 polygons = pair.first;
254 roisPt = pair.second;
255
256 // Compute the 3D coordinates
257 std::vector<cv::Point3f> points3f;
258 vpKeyPoint::compute3DForPointsInPolygons(cMo, cam, trainKeyPoints, polygons, roisPt, points3f);
259
260 // Extract descriptors
261 cv::Mat trainDescriptors;
262 extractor->compute(matImg, trainKeyPoints, trainDescriptors);
263
264 if (trainKeyPoints.size() != (size_t)trainDescriptors.rows || trainKeyPoints.size() != points3f.size()) {
265 throw(vpException(vpException::fatalError, "Problem with training data size !"));
266 }
267
268 // Init reader for getting the input image sequence
270 g.setFileName(filenameCur);
271 g.open(I);
272 g.acquire(I);
273
274 bool opt_click = false;
276 while ((opt_display && !g.end()) || (!opt_display && g.getFrameIndex() < 30)) {
277 g.acquire(I);
278
279 vpImageConvert::convert(I, matImg);
280 std::vector<cv::KeyPoint> queryKeyPoints;
281 detector->detect(matImg, queryKeyPoints);
282
283 cv::Mat queryDescriptors;
284 extractor->compute(matImg, queryKeyPoints, queryDescriptors);
285
286 std::vector<std::vector<cv::DMatch> > knn_matches;
287 std::vector<cv::DMatch> matches;
288 matcher->knnMatch(queryDescriptors, trainDescriptors, knn_matches, 2);
289 for (std::vector<std::vector<cv::DMatch> >::const_iterator it = knn_matches.begin(); it != knn_matches.end();
290 ++it) {
291 if (it->size() > 1) {
292 double ratio = (*it)[0].distance / (*it)[1].distance;
293 if (ratio < 0.85) {
294 matches.push_back((*it)[0]);
295 }
296 }
297 }
298
299 vpPose estimated_pose;
300 for (std::vector<cv::DMatch>::const_iterator it = matches.begin(); it != matches.end(); ++it) {
301 vpPoint pt(points3f[(size_t)(it->trainIdx)].x, points3f[(size_t)(it->trainIdx)].y,
302 points3f[(size_t)(it->trainIdx)].z);
303
304 double x = 0.0, y = 0.0;
305 vpPixelMeterConversion::convertPoint(cam, queryKeyPoints[(size_t)(it->queryIdx)].pt.x,
306 queryKeyPoints[(size_t)(it->queryIdx)].pt.y, x, y);
307 pt.set_x(x);
308 pt.set_y(y);
309
310 estimated_pose.addPoint(pt);
311 }
312
313 bool is_pose_estimated = false;
314 if (estimated_pose.npt >= 4) {
315 try {
316 unsigned int nb_inliers = (unsigned int)(0.6 * estimated_pose.npt);
317 estimated_pose.setRansacNbInliersToReachConsensus(nb_inliers);
318 estimated_pose.setRansacThreshold(0.01);
319 estimated_pose.setRansacMaxTrials(500);
320 estimated_pose.computePose(vpPose::RANSAC, cMo);
321 is_pose_estimated = true;
322 } catch (...) {
323 is_pose_estimated = false;
324 }
325 }
326
327 if (opt_display) {
329
330 Imatch.insert(I, vpImagePoint(0, Iref.getWidth()));
331 vpDisplay::display(Imatch);
332 for (std::vector<cv::DMatch>::const_iterator it = matches.begin(); it != matches.end(); ++it) {
333 vpImagePoint leftPt(trainKeyPoints[(size_t)it->trainIdx].pt.y, trainKeyPoints[(size_t)it->trainIdx].pt.x);
334 vpImagePoint rightPt(queryKeyPoints[(size_t)it->queryIdx].pt.y,
335 queryKeyPoints[(size_t)it->queryIdx].pt.x + Iref.getWidth());
336 vpDisplay::displayLine(Imatch, leftPt, rightPt, vpColor::green);
337 }
338
339 if (is_pose_estimated) {
340 tracker.setPose(I, cMo);
341 tracker.display(I, cMo, cam, vpColor::red);
342 vpDisplay::displayFrame(I, cMo, cam, 0.05, vpColor::none);
343 }
344
345 vpDisplay::flush(Imatch);
347 }
348
349 // Click requested to process next image
350 if (opt_click_allowed && opt_display) {
351 if (opt_click) {
352 vpDisplay::getClick(I, button, true);
353 if (button == vpMouseButton::button3) {
354 opt_click = false;
355 }
356 } else {
357 // Use right click to enable/disable step by step tracking
358 if (vpDisplay::getClick(I, button, false)) {
359 if (button == vpMouseButton::button3) {
360 opt_click = true;
361 } else if (button == vpMouseButton::button1) {
362 break;
363 }
364 }
365 }
366 }
367 }
368}
369
376int main(int argc, const char **argv)
377{
378 try {
379 std::string env_ipath;
380 bool opt_click_allowed = true;
381 bool opt_display = true;
382
383 // Read the command line options
384 if (getOptions(argc, argv, opt_click_allowed, opt_display) == false) {
385 exit(-1);
386 }
387
388 // Get the visp-images-data package path or VISP_INPUT_IMAGE_PATH
389 // environment variable value
391
392 if (env_ipath.empty()) {
393 std::cerr << "Please set the VISP_INPUT_IMAGE_PATH environment "
394 "variable value."
395 << std::endl;
396 return -1;
397 }
398
399 {
400 vpImage<unsigned char> I, Imatch, Iref;
401
402
403 std::cout << "-- Test on gray level images" << std::endl;
404 run_test(env_ipath, opt_click_allowed, opt_display, I, Imatch, Iref);
405 }
406
407 {
408 vpImage<vpRGBa> I, Imatch, Iref;
409
410 std::cout << "-- Test on color images" << std::endl;
411 run_test(env_ipath, opt_click_allowed, opt_display, I, Imatch, Iref);
412 }
413
414 } catch (const vpException &e) {
415 std::cerr << e.what() << std::endl;
416 return -1;
417 }
418
419 std::cout << "testKeyPoint-4 is ok !" << std::endl;
420 return 0;
421}
422#else
423int main()
424{
425 std::cerr << "You need OpenCV library." << std::endl;
426
427 return 0;
428}
429
430#endif
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
static const vpColor red
Definition: vpColor.h:217
static const vpColor none
Definition: vpColor.h:229
static const vpColor green
Definition: vpColor.h:220
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:129
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
Definition: vpDisplayGTK.h:135
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition: vpDisplayX.h:135
void init(vpImage< unsigned char > &I, int win_x=-1, int win_y=-1, const std::string &win_title="")
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
virtual void setDownScalingFactor(unsigned int scale)
Definition: vpDisplay.cpp:231
static void display(const vpImage< unsigned char > &I)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void flush(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0))
@ SCALE_AUTO
Definition: vpDisplay.h:183
unsigned int getDownScalingFactor()
Definition: vpDisplay.h:235
error that can be emited by ViSP classes.
Definition: vpException.h:72
@ fatalError
Fatal error.
Definition: vpException.h:96
const char * what() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition: vpImageIo.cpp:149
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:88
Definition of the vpImage class member functions.
Definition: vpImage.h:139
unsigned int getWidth() const
Definition: vpImage.h:246
void resize(unsigned int h, unsigned int w)
resize the image : Image initialization
Definition: vpImage.h:800
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
Definition: vpImage.h:1115
unsigned int getHeight() const
Definition: vpImage.h:188
static std::string getViSPImagesDataPath()
Definition: vpIoTools.cpp:1365
static std::string createFilePath(const std::string &parent, const std::string &child)
Definition: vpIoTools.cpp:1670
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:632
static double rad(double deg)
Definition: vpMath.h:110
Make the complete tracking of an object by using its CAD model.
virtual void setCameraParameters(const vpCameraParameters &cam)
virtual void setNearClippingDistance(const double &dist)
virtual void setFarClippingDistance(const double &dist)
virtual void loadConfigFile(const std::string &configFile, bool verbose=true)
virtual void setClipping(const unsigned int &flags)
virtual void setPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cdMo)
void setMovingEdge(const vpMe &me)
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false)
virtual void getCameraParameters(vpCameraParameters &cam) const
Definition: vpMbTracker.h:248
virtual void getPose(vpHomogeneousMatrix &cMo) const
Definition: vpMbTracker.h:414
virtual void setAngleDisappear(const double &a)
Definition: vpMbTracker.h:480
virtual void initClick(const vpImage< unsigned char > &I, const std::string &initFile, bool displayHelp=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
virtual void initFromPose(const vpImage< unsigned char > &I, const std::string &initFile)
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
virtual void setAngleAppear(const double &a)
Definition: vpMbTracker.h:469
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(bool orderPolygons=true, bool useVisibility=true, bool clipPolygon=false)
virtual unsigned int getClipping() const
Definition: vpMbTracker.h:256
Definition: vpMe.h:61
void setMu1(const double &mu_1)
Definition: vpMe.h:241
void setSampleStep(const double &s)
Definition: vpMe.h:278
void setRange(const unsigned int &r)
Definition: vpMe.h:271
void setMaskSize(const unsigned int &a)
Definition: vpMe.cpp:459
void setNbTotalSample(const int &nb)
Definition: vpMe.h:255
void setMu2(const double &mu_2)
Definition: vpMe.h:248
void setMaskNumber(const unsigned int &a)
Definition: vpMe.cpp:452
void setThreshold(const double &t)
Definition: vpMe.h:300
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
Definition: vpParseArgv.cpp:69
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition: vpPoint.h:82
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Definition: vpPose.h:81
void setRansacMaxTrials(const int &rM)
Definition: vpPose.h:245
void addPoint(const vpPoint &P)
Definition: vpPose.cpp:149
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
Definition: vpPose.h:235
@ RANSAC
Definition: vpPose.h:90
unsigned int npt
Number of point used in pose computation.
Definition: vpPose.h:109
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=NULL)
Definition: vpPose.cpp:374
void setRansacThreshold(const double &t)
Definition: vpPose.h:236
Class that enables to manipulate easily a video file or a sequence of images. As it inherits from the...
void acquire(vpImage< vpRGBa > &I)
void open(vpImage< vpRGBa > &I)
void setFileName(const std::string &filename)
long getFrameIndex() const