Visual Servoing Platform version 3.5.0
tutorial-mb-generic-tracker-live.cpp
1
2#include <visp3/core/vpConfig.h>
3#ifdef VISP_HAVE_MODULE_SENSOR
4#include <visp3/sensor/vpV4l2Grabber.h>
5#include <visp3/sensor/vp1394CMUGrabber.h>
6#include <visp3/sensor/vp1394TwoGrabber.h>
7#include <visp3/sensor/vpFlyCaptureGrabber.h>
8#include <visp3/sensor/vpRealSense2.h>
9#endif
10#include <visp3/core/vpIoTools.h>
11#include <visp3/core/vpXmlParserCamera.h>
12#include <visp3/gui/vpDisplayGDI.h>
13#include <visp3/gui/vpDisplayOpenCV.h>
14#include <visp3/gui/vpDisplayX.h>
15#include <visp3/io/vpImageIo.h>
16#include <visp3/vision/vpKeyPoint.h>
18#include <visp3/mbt/vpMbGenericTracker.h>
20
22//#undef VISP_HAVE_V4L2
23//#undef VISP_HAVE_DC1394
24//#undef VISP_HAVE_CMU1394
25//#undef VISP_HAVE_FLYCAPTURE
26//#undef VISP_HAVE_REALSENSE2
27//#undef VISP_HAVE_OPENCV
29
30int main(int argc, char **argv)
31{
32#if defined(VISP_HAVE_OPENCV) && \
33 (defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || (VISP_HAVE_OPENCV_VERSION >= 0x020100) || \
34 defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2) )
35
36 try {
37 std::string opt_modelname = "model/teabox/teabox.cao";
38 int opt_tracker = 2;
39 int opt_device = 0; // For OpenCV and V4l2 grabber to set the camera device
40 double opt_proj_error_threshold = 30.;
41 bool opt_use_ogre = false;
42 bool opt_use_scanline = false;
43 bool opt_display_projection_error = false;
44 bool opt_learn = false;
45 bool opt_auto_init = false;
46 std::string opt_learning_data = "learning/data-learned.bin";
47 std::string opt_intrinsic_file = "";
48 std::string opt_camera_name = "";
49
50 for (int i = 0; i < argc; i++) {
51 if (std::string(argv[i]) == "--model") {
52 opt_modelname = std::string(argv[i + 1]);
53 }
54 else if (std::string(argv[i]) == "--tracker") {
55 opt_tracker = atoi(argv[i + 1]);
56 }
57 else if (std::string(argv[i]) == "--camera_device" && i + 1 < argc) {
58 opt_device = atoi(argv[i + 1]);
59 }
60 else if (std::string(argv[i]) == "--max_proj_error") {
61 opt_proj_error_threshold = atof(argv[i + 1]);
62 } else if (std::string(argv[i]) == "--use_ogre") {
63 opt_use_ogre = true;
64 } else if (std::string(argv[i]) == "--use_scanline") {
65 opt_use_scanline = true;
66 } else if (std::string(argv[i]) == "--learn") {
67 opt_learn = true;
68 } else if (std::string(argv[i]) == "--learning_data" && i+1 < argc) {
69 opt_learning_data = argv[i+1];
70 } else if (std::string(argv[i]) == "--auto_init") {
71 opt_auto_init = true;
72 } else if (std::string(argv[i]) == "--display_proj_error") {
73 opt_display_projection_error = true;
74 } else if (std::string(argv[i]) == "--intrinsic" && i + 1 < argc) {
75 opt_intrinsic_file = std::string(argv[i + 1]);
76 } else if (std::string(argv[i]) == "--camera_name" && i + 1 < argc) {
77 opt_camera_name = std::string(argv[i + 1]);
78 }
79 else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
80 std::cout << "\nUsage: " << argv[0]
81 << " [--camera_device <camera device> (default: 0)]"
82 << " [--intrinsic <intrinsic file> (default: empty)]"
83 << " [--camera_name <camera name> (default: empty)]"
84 << " [--model <model name> (default: teabox)]"
85 << " [--tracker <0=egde|1=keypoint|2=hybrid> (default: 2)]"
86 << " [--use_ogre] [--use_scanline]"
87 << " [--max_proj_error <allowed projection error> (default: 30)]"
88 << " [--learn] [--auto_init] [--learning_data <data-learned.bin> (default: learning/data-learned.bin)]"
89 << " [--display_proj_error]"
90 << " [--help] [-h]\n"
91 << std::endl;
92 return 0;
93 }
94 }
95 std::string parentname = vpIoTools::getParent(opt_modelname);
96 std::string objectname = vpIoTools::getNameWE(opt_modelname);
97
98 if (!parentname.empty())
99 objectname = parentname + "/" + objectname;
100
101 std::cout << "Tracker requested config files: " << objectname << ".[init, cao]" << std::endl;
102 std::cout << "Tracker optional config files: " << objectname << ".[ppm]" << std::endl;
103
104 std::cout << "Tracked features: " << std::endl;
105 std::cout << " Use edges : " << (opt_tracker == 0 || opt_tracker == 2) << std::endl;
106 std::cout << " Use klt : " << (opt_tracker == 1 || opt_tracker == 2) << std::endl;
107 std::cout << "Tracker options: " << std::endl;
108 std::cout << " Use ogre : " << opt_use_ogre << std::endl;
109 std::cout << " Use scanline: " << opt_use_scanline << std::endl;
110 std::cout << " Proj. error : " << opt_proj_error_threshold << std::endl;
111 std::cout << " Display proj. error: " << opt_display_projection_error << std::endl;
112 std::cout << "Config files: " << std::endl;
113 std::cout << " Config file : " << "\"" << objectname + ".xml" << "\"" << std::endl;
114 std::cout << " Model file : " << "\"" << objectname + ".cao" << "\"" << std::endl;
115 std::cout << " Init file : " << "\"" << objectname + ".init" << "\"" << std::endl;
116 std::cout << "Learning options : " << std::endl;
117 std::cout << " Learn : " << opt_learn << std::endl;
118 std::cout << " Auto init : " << opt_auto_init << std::endl;
119 std::cout << " Learning data: " << opt_learning_data << std::endl;
120
122#if VISP_VERSION_INT > VP_VERSION_INT(3, 2, 0)
123 vpImage<vpRGBa> I; // Since ViSP 3.2.0 we support model-based tracking on color images
124#else
125 vpImage<unsigned char> I; // Tracking on gray level images
126#endif
128
131 cam.initPersProjWithoutDistortion(839, 839, 325, 243);
133 vpXmlParserCamera parser;
134 if (!opt_intrinsic_file.empty() && !opt_camera_name.empty())
135 parser.parse(cam, opt_intrinsic_file, opt_camera_name, vpCameraParameters::perspectiveProjWithoutDistortion);
136
140
142#if defined(VISP_HAVE_V4L2)
144 std::ostringstream device;
145 device << "/dev/video" << opt_device;
146 std::cout << "Use Video 4 Linux grabber on device " << device.str() << std::endl;
147 g.setDevice(device.str());
148 g.setScale(1);
149 g.open(I);
150#elif defined(VISP_HAVE_DC1394)
151 (void)opt_device; // To avoid non used warning
152 std::cout << "Use DC1394 grabber" << std::endl;
154 g.open(I);
155#elif defined(VISP_HAVE_CMU1394)
156 (void)opt_device; // To avoid non used warning
157 std::cout << "Use CMU1394 grabber" << std::endl;
159 g.open(I);
160#elif defined(VISP_HAVE_FLYCAPTURE)
161 (void)opt_device; // To avoid non used warning
162 std::cout << "Use FlyCapture grabber" << std::endl;
164 g.open(I);
165#elif defined(VISP_HAVE_REALSENSE2)
166 (void)opt_device; // To avoid non used warning
167 std::cout << "Use Realsense 2 grabber" << std::endl;
168 vpRealSense2 g;
169 rs2::config config;
170 config.disable_stream(RS2_STREAM_DEPTH);
171 config.disable_stream(RS2_STREAM_INFRARED);
172 config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30);
173 g.open(config);
174 g.acquire(I);
175
176 std::cout << "Read camera parameters from Realsense device" << std::endl;
178
179#elif defined(VISP_HAVE_OPENCV)
180 std::cout << "Use OpenCV grabber on device " << opt_device << std::endl;
181 cv::VideoCapture g(opt_device); // Open the default camera
182 if (!g.isOpened()) { // Check if we succeeded
183 std::cout << "Failed to open the camera" << std::endl;
184 return -1;
185 }
186 cv::Mat frame;
187 g >> frame; // get a new frame from camera
188 vpImageConvert::convert(frame, I);
189#endif
191
192 vpDisplay *display = NULL;
193#if defined(VISP_HAVE_X11)
194 display = new vpDisplayX;
195#elif defined(VISP_HAVE_GDI)
196 display = new vpDisplayGDI;
197#else
198 display = new vpDisplayOpenCV;
199#endif
200 display->init(I, 100, 100, "Model-based tracker");
201
202 while (true) {
203#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
204 g.acquire(I);
205#elif defined(VISP_HAVE_OPENCV)
206 g >> frame;
207 vpImageConvert::convert(frame, I);
208#endif
209
211 vpDisplay::displayText(I, 20, 20, "Click when ready.", vpColor::red);
213
214 if (vpDisplay::getClick(I, false)) {
215 break;
216 }
217 }
218
220 vpMbGenericTracker tracker;
221 if (opt_tracker == 0)
223#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV)
224 else if (opt_tracker == 1)
226 else
228#else
229 else {
230# if !defined(VISP_HAVE_MODULE_KLT)
231 std::cout << "klt and hybrid model-based tracker are not available since visp_klt module is not available. "
232 "In CMakeGUI turn visp_klt module ON, configure and build ViSP again."
233 << std::endl;
234# else
235 std::cout << "Hybrid tracking is impossible since OpenCV is not enabled. "
236 << "Install OpenCV, configure and build ViSP again to run this tutorial."
237 << std::endl;
238# endif
239 return EXIT_SUCCESS;
240 }
241#endif
243
244 bool usexml = false;
246 if (vpIoTools::checkFilename(objectname + ".xml")) {
247 tracker.loadConfigFile(objectname + ".xml");
248 usexml = true;
249 }
251
252 if (!usexml) {
254 if (opt_tracker == 0 || opt_tracker == 2) {
256 vpMe me;
257 me.setMaskSize(5);
258 me.setMaskNumber(180);
259 me.setRange(8);
260 me.setThreshold(10000);
261 me.setMu1(0.5);
262 me.setMu2(0.5);
263 me.setSampleStep(4);
264 tracker.setMovingEdge(me);
266 }
267
268#ifdef VISP_HAVE_MODULE_KLT
269 if (opt_tracker == 1 || opt_tracker == 2) {
271 vpKltOpencv klt_settings;
272 klt_settings.setMaxFeatures(300);
273 klt_settings.setWindowSize(5);
274 klt_settings.setQuality(0.015);
275 klt_settings.setMinDistance(8);
276 klt_settings.setHarrisFreeParameter(0.01);
277 klt_settings.setBlockSize(3);
278 klt_settings.setPyramidLevels(3);
279 tracker.setKltOpencv(klt_settings);
280 tracker.setKltMaskBorder(5);
282 }
283#endif
284 }
285
286 tracker.setCameraParameters(cam);
288
290 tracker.loadModel(objectname + ".cao");
293 tracker.setDisplayFeatures(true);
296 tracker.setOgreVisibilityTest(opt_use_ogre);
297 tracker.setScanLineVisibilityTest(opt_use_scanline);
300 tracker.setProjectionErrorComputation(true);
301 tracker.setProjectionErrorDisplay(opt_display_projection_error);
303
304#if (defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)) || \
305 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
306 std::string detectorName = "SIFT";
307 std::string extractorName = "SIFT";
308 std::string matcherName = "BruteForce";
309#else
310 std::string detectorName = "FAST";
311 std::string extractorName = "ORB";
312 std::string matcherName = "BruteForce-Hamming";
313#endif
314 vpKeyPoint keypoint;
315 if (opt_learn || opt_auto_init) {
316 keypoint.setDetector(detectorName);
317 keypoint.setExtractor(extractorName);
318 keypoint.setMatcher(matcherName);
319#if !(defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D))
320# if (VISP_HAVE_OPENCV_VERSION < 0x030000)
321 keypoint.setDetectorParameter("ORB", "nLevels", 1);
322# else
323 cv::Ptr<cv::ORB> orb_detector = keypoint.getDetector("ORB").dynamicCast<cv::ORB>();
324 if (orb_detector) {
325 orb_detector->setNLevels(1);
326 }
327# endif
328#endif
329 }
330
331 if (opt_auto_init) {
332 if (!vpIoTools::checkFilename(opt_learning_data)) {
333 std::cout << "Cannot enable auto detection. Learning file \"" << opt_learning_data << "\" doesn't exist" << std::endl;
334 return EXIT_FAILURE;
335 }
336 keypoint.loadLearningData(opt_learning_data, true);
337 }
338 else {
339 tracker.initClick(I, objectname + ".init", true);
340 }
341
342 bool learn_position = false;
343 bool run_auto_init = false;
344 if (opt_auto_init) {
345 run_auto_init = true;
346 }
347
348 //To be able to display keypoints matching with test-detection-rs2
349 int learn_id = 1;
350 unsigned int learn_cpt = 0;
351 bool quit = false;
352 bool tracking_failed = false;
353
354 while (!quit) {
355 double t_begin = vpTime::measureTimeMs();
356#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
357 g.acquire(I);
358#elif defined(VISP_HAVE_OPENCV)
359 g >> frame;
360 vpImageConvert::convert(frame, I);
361#endif
363
364 // Run auto initialization from learned data
365 if (run_auto_init) {
366 tracking_failed = false;
367 if (keypoint.matchPoint(I, cam, cMo)) {
368 std::cout << "Auto init succeed" << std::endl;
369 tracker.initFromPose(I, cMo);
370 } else {
372 continue;
373 }
374 }
375 else if (tracking_failed) {
376 // Manual init
377 tracking_failed = false;
378 tracker.initClick(I, objectname + ".init", true);
379 }
380
381 // Run the tracker
382 try {
383 if (run_auto_init) {
384 // Turn display features off just after auto init to not display wrong moving-edge if the tracker fails
385 tracker.setDisplayFeatures(false);
386
387 run_auto_init = false;
388 }
389 tracker.track(I);
390 } catch (const vpException &e) {
391 std::cout << "Tracker exception: " << e.getStringMessage() << std::endl;
392 tracking_failed = true;
393 if (opt_auto_init) {
394 std::cout << "Tracker needs to restart (tracking exception)" << std::endl;
395 run_auto_init = true;
396 }
397 }
398
399 if (! tracking_failed) {
400 double proj_error = 0;
402 // Check tracking errors
403 proj_error = tracker.getProjectionError();
404 }
405 else {
406 tracker.getPose(cMo);
407 tracker.getCameraParameters(cam);
408 proj_error = tracker.computeCurrentProjectionError(I, cMo, cam);
409 }
410 if (proj_error > opt_proj_error_threshold) {
411 std::cout << "Tracker needs to restart (projection error detected: " << proj_error << ")" << std::endl;
412 if (opt_auto_init) {
413 run_auto_init = true;
414 }
415 tracking_failed = true;
416 }
417 }
418
419 if (! tracking_failed) {
420 tracker.setDisplayFeatures(true);
422 tracker.getPose(cMo);
425 tracker.getCameraParameters(cam);
426 tracker.display(I, cMo, cam, vpColor::green, 2, false);
428 vpDisplay::displayFrame(I, cMo, cam, 0.025, vpColor::none, 3);
429
430 { // Display estimated pose in [m] and [deg]
431 vpPoseVector pose(cMo);
432 std::stringstream ss;
433 ss << "Translation: " << std::setprecision(5) << pose[0] << " " << pose[1] << " " << pose[2] << " [m]";
434 vpDisplay::displayText(I, 80, 20, ss.str(), vpColor::green);
435 ss.str(""); // erase ss
436 ss << "Rotation tu: " << std::setprecision(4) << vpMath::deg(pose[3]) << " " << vpMath::deg(pose[4]) << " " << vpMath::deg(pose[5]) << " [deg]";
437 vpDisplay::displayText(I, 100, 20, ss.str(), vpColor::green);
438 }
439 {
440 std::stringstream ss;
441 ss << "Features: edges " << tracker.getNbFeaturesEdge() << ", klt " << tracker.getNbFeaturesKlt();
442 vpDisplay::displayText(I, 120, 20, ss.str(), vpColor::red);
443 }
444 }
445
446 if (learn_position) {
447 learn_cpt ++;
448 // Detect keypoints on the current image
449 std::vector<cv::KeyPoint> trainKeyPoints;
450 keypoint.detect(I, trainKeyPoints);
451
452 // Keep only keypoints on the cube
453 std::vector<vpPolygon> polygons;
454 std::vector<std::vector<vpPoint> > roisPt;
455 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.getPolygonFaces();
456 polygons = pair.first;
457 roisPt = pair.second;
458
459 // Compute the 3D coordinates
460 std::vector<cv::Point3f> points3f;
461 vpKeyPoint::compute3DForPointsInPolygons(cMo, cam, trainKeyPoints, polygons, roisPt, points3f);
462
463 // Build the reference keypoints
464 keypoint.buildReference(I, trainKeyPoints, points3f, true, learn_id++);
465
466 // Display learned data
467 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
468 vpDisplay::displayCross(I, (int)it->pt.y, (int)it->pt.x, 10, vpColor::yellow, 3);
469 }
470 learn_position = false;
471 std::cout << "Data learned" << std::endl;
472 }
473
474 std::stringstream ss;
475 ss << "Loop time: " << vpTime::measureTimeMs() - t_begin << " ms";
476 vpDisplay::displayText(I, 20, 20, ss.str(), vpColor::red);
477 if (opt_learn)
478 vpDisplay::displayText(I, 35, 20, "Left click: learn Right click: quit", vpColor::red);
479 else if (opt_auto_init)
480 vpDisplay::displayText(I, 35, 20, "Left click: auto_init Right click: quit", vpColor::red);
481 else
482 vpDisplay::displayText(I, 35, 20, "Right click: quit", vpColor::red);
483
485 if (vpDisplay::getClick(I, button, false)) {
486 if (button == vpMouseButton::button3) {
487 quit = true;
488 } else if (button == vpMouseButton::button1 && opt_learn) {
489 learn_position = true;
490 } else if (button == vpMouseButton::button1 && opt_auto_init && !opt_learn) {
491 run_auto_init = true;
492 }
493 }
494
496 }
497 if (opt_learn && learn_cpt) {
498 std::cout << "Save learning from " << learn_cpt << " images in file: " << opt_learning_data << std::endl;
499 keypoint.saveLearningData(opt_learning_data, true, true);
500 }
501
503 delete display;
505 } catch (const vpException &e) {
506 std::cout << "Catch a ViSP exception: " << e << std::endl;
507 }
508#elif defined(VISP_HAVE_OPENCV)
509 (void) argc;
510 (void) argv;
511 std::cout << "Install a 3rd party dedicated to frame grabbing (dc1394, cmu1394, v4l2, OpenCV, FlyCapture, Realsense2), configure and build ViSP again to use this example" << std::endl;
512#else
513 (void) argc;
514 (void) argv;
515 std::cout << "Install OpenCV 3rd party, configure and build ViSP again to use this example" << std::endl;
516#endif
517}
Firewire cameras video capture based on CMU 1394 Digital Camera SDK.
void open(vpImage< unsigned char > &I)
Class for firewire ieee1394 video devices using libdc1394-2.x api.
void open(vpImage< unsigned char > &I)
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
static const vpColor red
Definition: vpColor.h:217
static const vpColor none
Definition: vpColor.h:229
static const vpColor yellow
Definition: vpColor.h:225
static const vpColor green
Definition: vpColor.h:220
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:129
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition: vpDisplayX.h:135
Class that defines generic functionnalities for display.
Definition: vpDisplay.h:178
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0))
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emited by ViSP classes.
Definition: vpException.h:72
const std::string & getStringMessage() const
Send a reference (constant) related the error message (can be empty).
Definition: vpException.cpp:92
void open(vpImage< unsigned char > &I)
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static bool checkFilename(const std::string &filename)
Definition: vpIoTools.cpp:802
static std::string getNameWE(const std::string &pathname)
Definition: vpIoTools.cpp:1532
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:1606
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
Definition: vpKeyPoint.h:223
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setExtractor(const vpFeatureDescriptorType &extractorType)
Definition: vpKeyPoint.h:840
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
void setMatcher(const std::string &matcherName)
Definition: vpKeyPoint.h:916
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:632
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
void setDetector(const vpFeatureDetectorType &detectorType)
Definition: vpKeyPoint.h:782
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Definition: vpKeyPoint.h:493
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:238
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
Definition: vpKltOpencv.h:79
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double deg(double rad)
Definition: vpMath.h:103
Real-time 6D object pose tracking using its CAD model.
virtual void setCameraParameters(const vpCameraParameters &camera)
virtual void getPose(vpHomogeneousMatrix &cMo) const
virtual void setDisplayFeatures(bool displayF)
virtual int getTrackerType() const
virtual void setKltMaskBorder(const unsigned int &e)
virtual void setProjectionErrorComputation(const bool &flag)
virtual unsigned int getNbFeaturesEdge() const
virtual void initFromPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo)
virtual unsigned int getNbFeaturesKlt() const
virtual void getCameraParameters(vpCameraParameters &camera) const
virtual void setMovingEdge(const vpMe &me)
virtual void setScanLineVisibilityTest(const bool &v)
virtual void setKltOpencv(const vpKltOpencv &t)
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(bool orderPolygons=true, bool useVisibility=true, bool clipPolygon=false)
virtual void setProjectionErrorDisplay(bool display)
virtual void setTrackerType(int type)
virtual void initClick(const vpImage< unsigned char > &I1, const vpImage< unsigned char > &I2, const std::string &initFile1, const std::string &initFile2, bool displayHelp=false, const vpHomogeneousMatrix &T1=vpHomogeneousMatrix(), const vpHomogeneousMatrix &T2=vpHomogeneousMatrix())
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam)
virtual void loadConfigFile(const std::string &configFile, bool verbose=true)
virtual void setOgreVisibilityTest(const bool &v)
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false)
virtual void track(const vpImage< unsigned char > &I)
virtual double getProjectionError() const
Definition: vpMbTracker.h:310
Definition: vpMe.h:61
void setMu1(const double &mu_1)
Definition: vpMe.h:241
void setSampleStep(const double &s)
Definition: vpMe.h:278
void setRange(const unsigned int &r)
Definition: vpMe.h:271
void setMaskSize(const unsigned int &a)
Definition: vpMe.cpp:459
void setMu2(const double &mu_2)
Definition: vpMe.h:248
void setMaskNumber(const unsigned int &a)
Definition: vpMe.cpp:452
void setThreshold(const double &t)
Definition: vpMe.h:300
Implementation of a pose vector and operations on poses.
Definition: vpPoseVector.h:152
void acquire(vpImage< unsigned char > &grey, double *ts=NULL)
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
bool open(const rs2::config &cfg=rs2::config())
Class that is a wrapper over the Video4Linux2 (V4L2) driver.
void open(vpImage< unsigned char > &I)
void setScale(unsigned scale=vpV4l2Grabber::DEFAULT_SCALE)
void setDevice(const std::string &devname)
XML parser to load and save intrinsic camera parameters.
int parse(vpCameraParameters &cam, const std::string &filename, const std::string &camera_name, const vpCameraParameters::vpCameraParametersProjType &projModel, unsigned int image_width=0, unsigned int image_height=0)
VISP_EXPORT double measureTimeMs()