Visual Servoing Platform version 3.5.0
mbtGenericTrackingDepth.cpp
1/****************************************************************************
2 *
3 * ViSP, open source Visual Servoing Platform software.
4 * Copyright (C) 2005 - 2019 by Inria. All rights reserved.
5 *
6 * This software is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 * See the file LICENSE.txt at the root directory of this source
11 * distribution for additional information about the GNU GPL.
12 *
13 * For using ViSP with software that can not be combined with the GNU
14 * GPL, please contact Inria about acquiring a ViSP Professional
15 * Edition License.
16 *
17 * See http://visp.inria.fr for more information.
18 *
19 * This software was developed at:
20 * Inria Rennes - Bretagne Atlantique
21 * Campus Universitaire de Beaulieu
22 * 35042 Rennes Cedex
23 * France
24 *
25 * If you have questions regarding the use of this file, please contact
26 * Inria at visp@inria.fr
27 *
28 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30 *
31 * Description:
32 * Example of tracking with vpGenericTracker on Castel.
33 *
34 *****************************************************************************/
35
42#include <cstdlib>
43#include <iostream>
44#include <visp3/core/vpConfig.h>
45
46#if (defined(VISP_HAVE_MODULE_MBT) && defined(VISP_HAVE_DISPLAY)) \
47 && (defined(VISP_HAVE_LAPACK) || defined(VISP_HAVE_EIGEN3) || defined(VISP_HAVE_OPENCV))
48
49#include <visp3/core/vpDebug.h>
50#include <visp3/core/vpHomogeneousMatrix.h>
51#include <visp3/core/vpIoTools.h>
52#include <visp3/core/vpMath.h>
53#include <visp3/gui/vpDisplayD3D.h>
54#include <visp3/gui/vpDisplayGDI.h>
55#include <visp3/gui/vpDisplayGTK.h>
56#include <visp3/gui/vpDisplayOpenCV.h>
57#include <visp3/gui/vpDisplayX.h>
58#include <visp3/io/vpImageIo.h>
59#include <visp3/io/vpParseArgv.h>
60#include <visp3/io/vpVideoReader.h>
61#include <visp3/mbt/vpMbGenericTracker.h>
62
63#define GETOPTARGS "x:X:m:M:i:n:dchfolwvpt:T:e:"
64
65#define USE_XML 1
66#define USE_SMALL_DATASET 1 // small depth dataset in ViSP-images
67
68namespace
69{
70void usage(const char *name, const char *badparam)
71{
72 fprintf(stdout, "\n\
73 Example of tracking with vpGenericTracker.\n\
74 \n\
75 SYNOPSIS\n\
76 %s [-i <test image path>] [-x <config file>] [-X <config file depth>]\n\
77 [-m <model name>] [-M <model name depth>] [-n <initialisation file base name>]\n\
78 [-f] [-c] [-d] [-h] [-o] [-w] [-l] [-v] [-p]\n\
79 [-t <tracker type>] [-T <tracker type>] [-e <last frame index>]\n", name);
80
81 fprintf(stdout, "\n\
82 OPTIONS: \n\
83 -i <input image path> \n\
84 Set image input path.\n\
85 These images come from ViSP-images-x.y.z.tar.gz available \n\
86 on the ViSP website.\n\
87 Setting the VISP_INPUT_IMAGE_PATH environment\n\
88 variable produces the same behavior than using\n\
89 this option.\n\
90 \n\
91 -x <config file> \n\
92 Set the config file (the xml file) to use.\n\
93 The config file is used to specify the parameters of the tracker.\n\
94 \n\
95 -X <config file> \n\
96 Set the config file (the xml file) to use for the depth sensor.\n\
97 The config file is used to specify the parameters of the tracker.\n\
98 \n\
99 -m <model name> \n\
100 Specify the name of the file of the model.\n\
101 The model can either be a vrml model (.wrl) or a .cao file.\n\
102 \n\
103 -M <model name> \n\
104 Specify the name of the file of the model for the depth sensor.\n\
105 The model can either be a vrml model (.wrl) or a .cao file.\n\
106 \n\
107 -n <initialisation file base name> \n\
108 Base name of the initialisation file. The file will be 'base_name'.init .\n\
109 This base name is also used for the optional picture specifying where to \n\
110 click (a .ppm picture).\n\
111 \n\
112 -f \n\
113 Turn off the display of the the moving edges and Klt points. \n\
114 \n\
115 -d \n\
116 Turn off the display.\n\
117 \n\
118 -c\n\
119 Disable the mouse click. Useful to automate the \n\
120 execution of this program without human intervention.\n\
121 \n\
122 -o\n\
123 Use Ogre3D for visibility tests\n\
124 \n\
125 -w\n\
126 When Ogre3D is enable [-o] show Ogre3D configuration dialog that allows to set the renderer.\n\
127 \n\
128 -l\n\
129 Use the scanline for visibility tests.\n\
130 \n\
131 -v\n\
132 Compute covariance matrix.\n\
133 \n\
134 -p\n\
135 Compute gradient projection error.\n\
136 \n\
137 -t <tracker type>\n\
138 Set tracker type (<1 (Edge)>, <2 (KLT)>, <3 (both)>) for color sensor.\n\
139 \n\
140 -T <tracker type>\n\
141 Set tracker type (<4 (Depth normal)>, <8 (Depth dense)>, <12 (both)>) for depth sensor.\n\
142 \n\
143 -e <last frame index>\n\
144 Specify the index of the last frame. Once reached, the tracking is stopped.\n\
145 \n\
146 -h \n\
147 Print the help.\n\n");
148
149 if (badparam)
150 fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
151}
152
153bool getOptions(int argc, const char **argv, std::string &ipath, std::string &configFile, std::string &configFile_depth,
154 std::string &modelFile, std::string &modelFile_depth, std::string &initFile, bool &displayFeatures,
155 bool &click_allowed, bool &display, bool &useOgre, bool &showOgreConfigDialog, bool &useScanline,
156 bool &computeCovariance, bool &projectionError, int &trackerType, int &tracker_type_depth,
157 int &lastFrame)
158{
159 const char *optarg_;
160 int c;
161 while ((c = vpParseArgv::parse(argc, argv, GETOPTARGS, &optarg_)) > 1) {
162
163 switch (c) {
164 case 'i':
165 ipath = optarg_;
166 break;
167 case 'x':
168 configFile = optarg_;
169 break;
170 case 'X':
171 configFile_depth = optarg_;
172 break;
173 case 'm':
174 modelFile = optarg_;
175 break;
176 case 'M':
177 modelFile_depth = optarg_;
178 break;
179 case 'n':
180 initFile = optarg_;
181 break;
182 case 'f':
183 displayFeatures = false;
184 break;
185 case 'c':
186 click_allowed = false;
187 break;
188 case 'd':
189 display = false;
190 break;
191 case 'o':
192 useOgre = true;
193 break;
194 case 'l':
195 useScanline = true;
196 break;
197 case 'w':
198 showOgreConfigDialog = true;
199 break;
200 case 'v':
201 computeCovariance = true;
202 break;
203 case 'p':
204 projectionError = true;
205 break;
206 case 't':
207 trackerType = atoi(optarg_);
208 break;
209 case 'T':
210 tracker_type_depth = atoi(optarg_);
211 break;
212 case 'e':
213 lastFrame = atoi(optarg_);
214 break;
215 case 'h':
216 usage(argv[0], NULL);
217 return false;
218 break;
219
220 default:
221 usage(argv[0], optarg_);
222 return false;
223 break;
224 }
225 }
226
227 if ((c == 1) || (c == -1)) {
228 // standalone param or error
229 usage(argv[0], NULL);
230 std::cerr << "ERROR: " << std::endl;
231 std::cerr << " Bad argument " << optarg_ << std::endl << std::endl;
232 return false;
233 }
234
235 return true;
236}
237
238struct rs_intrinsics {
239 float ppx;
241 float ppy;
243 float fx;
245 float fy;
247 float coeffs[5];
248};
249
250void rs_deproject_pixel_to_point(float point[3], const rs_intrinsics &intrin, const float pixel[2], float depth)
251{
252 float x = (pixel[0] - intrin.ppx) / intrin.fx;
253 float y = (pixel[1] - intrin.ppy) / intrin.fy;
254
255 float r2 = x * x + y * y;
256 float f = 1 + intrin.coeffs[0] * r2 + intrin.coeffs[1] * r2 * r2 + intrin.coeffs[4] * r2 * r2 * r2;
257 float ux = x * f + 2 * intrin.coeffs[2] * x * y + intrin.coeffs[3] * (r2 + 2 * x * x);
258 float uy = y * f + 2 * intrin.coeffs[3] * x * y + intrin.coeffs[2] * (r2 + 2 * y * y);
259
260 x = ux;
261 y = uy;
262
263 point[0] = depth * x;
264 point[1] = depth * y;
265 point[2] = depth;
266}
267
268bool read_data(unsigned int cpt, const std::string &input_directory, vpImage<unsigned char> &I,
269 vpImage<uint16_t> &I_depth_raw, std::vector<vpColVector> &pointcloud, unsigned int &pointcloud_width,
270 unsigned int &pointcloud_height)
271{
272 char buffer[256];
273
274 // Read image
275 std::stringstream ss;
276 ss << input_directory << "/image_%04d.pgm";
277 sprintf(buffer, ss.str().c_str(), cpt);
278 std::string filename_image = buffer;
279
280 if (!vpIoTools::checkFilename(filename_image)) {
281 std::cerr << "Cannot read: " << filename_image << std::endl;
282 return false;
283 }
284 vpImageIo::read(I, filename_image);
285
286 // Read raw depth
287 ss.str("");
288 ss << input_directory << "/depth_image_%04d.bin";
289 sprintf(buffer, ss.str().c_str(), cpt);
290 std::string filename_depth = buffer;
291
292 std::ifstream file_depth(filename_depth.c_str(), std::ios::in | std::ios::binary);
293 if (!file_depth.is_open()) {
294 return false;
295 }
296
297 unsigned int height = 0, width = 0;
298 vpIoTools::readBinaryValueLE(file_depth, height);
299 vpIoTools::readBinaryValueLE(file_depth, width);
300
301 I_depth_raw.resize(height, width);
302
303 uint16_t depth_value = 0;
304 for (unsigned int i = 0; i < height; i++) {
305 for (unsigned int j = 0; j < width; j++) {
306 vpIoTools::readBinaryValueLE(file_depth, depth_value);
307 I_depth_raw[i][j] = depth_value;
308 }
309 }
310
311 // Transform pointcloud
312 pointcloud_width = width;
313 pointcloud_height = height;
314 pointcloud.resize((size_t)width * height);
315
316 // Only for Creative SR300
317 const float depth_scale = 0.000124986647f;
318 rs_intrinsics depth_intrinsic;
319 depth_intrinsic.ppx = 311.484558f;
320 depth_intrinsic.ppy = 246.283234f;
321 depth_intrinsic.fx = 476.053619f;
322 depth_intrinsic.fy = 476.053497f;
323 depth_intrinsic.coeffs[0] = 0.165056542f;
324 depth_intrinsic.coeffs[1] = -0.0508309528f;
325 depth_intrinsic.coeffs[2] = 0.00435937941f;
326 depth_intrinsic.coeffs[3] = 0.00541406544f;
327 depth_intrinsic.coeffs[4] = 0.250085592f;
328
329 for (unsigned int i = 0; i < height; i++) {
330 for (unsigned int j = 0; j < width; j++) {
331 float scaled_depth = I_depth_raw[i][j] * depth_scale;
332 float point[3];
333 float pixel[2] = {(float)j, (float)i};
334 rs_deproject_pixel_to_point(point, depth_intrinsic, pixel, scaled_depth);
335
336 vpColVector data_3D(3);
337 data_3D[0] = point[0];
338 data_3D[1] = point[1];
339 data_3D[2] = point[2];
340
341 pointcloud[(size_t)(i * width + j)] = data_3D;
342 }
343 }
344
345 return true;
346}
347
348void loadConfiguration(vpMbTracker *const tracker,
349 const std::string &
350#if USE_XML
351 configFile
352#endif
353 ,
354 const std::string &
355#if USE_XML
356 configFile_depth
357#endif
358)
359{
360#if USE_XML
361 // From the xml file
362 dynamic_cast<vpMbGenericTracker *>(tracker)->loadConfigFile(configFile, configFile_depth);
363#else
364 // Edge
365 vpMe me;
366 me.setMaskSize(5);
367 me.setMaskNumber(180);
368 me.setRange(8);
369 me.setThreshold(10000);
370 me.setMu1(0.5);
371 me.setMu2(0.5);
372 me.setSampleStep(4);
373 dynamic_cast<vpMbGenericTracker *>(tracker)->setMovingEdge(me);
374
375// Klt
376#if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
377 vpKltOpencv klt;
378 klt.setMaxFeatures(10000);
379 klt.setWindowSize(5);
380 klt.setQuality(0.01);
381 klt.setMinDistance(5);
382 klt.setHarrisFreeParameter(0.02);
383 klt.setBlockSize(3);
384 klt.setPyramidLevels(3);
385
386 dynamic_cast<vpMbGenericTracker *>(tracker)->setKltOpencv(klt);
387 dynamic_cast<vpMbGenericTracker *>(tracker)->setKltMaskBorder(5);
388#endif
389
390 // Depth
391 dynamic_cast<vpMbGenericTracker *>(tracker)->setDepthNormalFeatureEstimationMethod(
393 dynamic_cast<vpMbGenericTracker *>(tracker)->setDepthNormalPclPlaneEstimationMethod(2);
394 dynamic_cast<vpMbGenericTracker *>(tracker)->setDepthNormalPclPlaneEstimationRansacMaxIter(200);
395 dynamic_cast<vpMbGenericTracker *>(tracker)->setDepthNormalPclPlaneEstimationRansacThreshold(0.001);
396 dynamic_cast<vpMbGenericTracker *>(tracker)->setDepthNormalSamplingStep(2, 2);
397
398 dynamic_cast<vpMbGenericTracker *>(tracker)->setDepthDenseSamplingStep(4, 4);
399
400 vpCameraParameters cam1, cam2;
401 cam1.initPersProjWithoutDistortion(615.1674804688, 615.1675415039, 312.1889953613, 243.4373779297);
402 cam2.initPersProjWithoutDistortion(476.0536193848, 476.0534973145, 311.4845581055, 246.2832336426);
403
404 dynamic_cast<vpMbGenericTracker *>(tracker)->setCameraParameters(cam1, cam2);
405
406 tracker->setAngleAppear(vpMath::rad(70));
407 tracker->setAngleDisappear(vpMath::rad(80));
408
409 // Specify the clipping to
410 tracker->setNearClippingDistance(0.01);
411 tracker->setFarClippingDistance(2.0);
413// tracker->setClipping(tracker->getClipping() | vpMbtPolygon::LEFT_CLIPPING
414// | vpMbtPolygon::RIGHT_CLIPPING | vpMbtPolygon::UP_CLIPPING |
415// vpMbtPolygon::DOWN_CLIPPING); // Equivalent to FOV_CLIPPING
416#endif
417}
418}
419
420int main(int argc, const char **argv)
421{
422 try {
423 std::string env_ipath;
424 std::string opt_ipath;
425 std::string ipath;
426 std::string opt_configFile;
427 std::string opt_configFile_depth;
428 std::string opt_modelFile;
429 std::string opt_modelFile_depth;
430 std::string opt_initFile;
431 std::string initFile;
432 bool displayFeatures = true;
433 bool opt_click_allowed = true;
434 bool opt_display = true;
435 bool useOgre = false;
436 bool showOgreConfigDialog = false;
437 bool useScanline = false;
438 bool computeCovariance = false;
439 bool projectionError = false;
440 int trackerType_image = vpMbGenericTracker::EDGE_TRACKER;
441 int trackerType_depth = vpMbGenericTracker::DEPTH_DENSE_TRACKER;
442#if defined(__mips__) || defined(__mips) || defined(mips) || defined(__MIPS__)
443 // To avoid Debian test timeout
444 int opt_lastFrame = 5;
445#else
446 int opt_lastFrame = -1;
447#endif
448
449 // Get the visp-images-data package path or VISP_INPUT_IMAGE_PATH
450 // environment variable value
452
453 // Set the default input path
454 if (!env_ipath.empty())
455 ipath = env_ipath;
456
457 // Read the command line options
458 if (!getOptions(argc, argv, opt_ipath, opt_configFile, opt_configFile_depth, opt_modelFile, opt_modelFile_depth,
459 opt_initFile, displayFeatures, opt_click_allowed, opt_display, useOgre, showOgreConfigDialog,
460 useScanline, computeCovariance, projectionError, trackerType_image, trackerType_depth,
461 opt_lastFrame)) {
462 return EXIT_FAILURE;
463 }
464
465#if !defined(VISP_HAVE_MODULE_KLT) || (!defined(VISP_HAVE_OPENCV) || (VISP_HAVE_OPENCV_VERSION < 0x020100))
466 if (trackerType_image == /*vpMbGenericTracker::KLT_TRACKER*/ 2) {
467 std::cout << "KLT only features cannot be used: ViSP is not built with "
468 "KLT module or OpenCV is not available."
469 << std::endl;
470 return EXIT_SUCCESS;
471 }
472#endif
473
474 // Test if an input path is set
475 if (opt_ipath.empty() && env_ipath.empty()) {
476 usage(argv[0], NULL);
477 std::cerr << std::endl << "ERROR:" << std::endl;
478 std::cerr << " Use -i <visp image path> option or set VISP_INPUT_IMAGE_PATH " << std::endl
479 << " environment variable to specify the location of the " << std::endl
480 << " image path where test images are located." << std::endl
481 << std::endl;
482
483 return EXIT_FAILURE;
484 }
485
486 // Get the option values
487 ipath = vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/castel");
488
489 std::string dir_path = vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth");
490 if (!vpIoTools::checkDirectory(dir_path)) {
491 std::cerr << "ViSP-images does not contain the folder: " << dir_path << "!" << std::endl;
492 return EXIT_SUCCESS;
493 }
494
495 std::string configFile, configFile_depth;
496 if (!opt_configFile.empty())
497 configFile = opt_configFile;
498 else
499 configFile =
500 vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/chateau.xml");
501
502 if (!opt_configFile_depth.empty())
503 configFile_depth = opt_configFile_depth;
504 else
505 configFile_depth =
506 vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/chateau_depth.xml");
507
508 std::string modelFile, modelFile_depth;
509 if (!opt_modelFile.empty())
510 modelFile = opt_modelFile;
511 else {
512#if defined(VISP_HAVE_COIN3D) && (COIN_MAJOR_VERSION == 2 || COIN_MAJOR_VERSION == 3 || COIN_MAJOR_VERSION == 4)
513 modelFile =
514 vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/chateau_gantry.wrl");
515#else
516 modelFile = vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/chateau.cao");
517#endif
518 }
519
520 if (!opt_modelFile_depth.empty())
521 modelFile_depth = opt_modelFile_depth;
522 else
523 modelFile_depth =
524 vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/chateau.cao");
525
526 std::string vrml_ext = ".wrl";
527 bool use_vrml =
528 (modelFile.compare(modelFile.length() - vrml_ext.length(), vrml_ext.length(), vrml_ext) == 0) ||
529 (modelFile_depth.compare(modelFile_depth.length() - vrml_ext.length(), vrml_ext.length(), vrml_ext) == 0);
530
531 if (use_vrml) {
532#if defined(VISP_HAVE_COIN3D) && (COIN_MAJOR_VERSION == 2 || COIN_MAJOR_VERSION == 3 || COIN_MAJOR_VERSION == 4)
533 std::cout << "use_vrml: " << use_vrml << std::endl;
534#else
535 std::cerr << "Error: vrml model file is only supported if ViSP is "
536 "build with Coin3D 3rd party"
537 << std::endl;
538 return EXIT_FAILURE;
539#endif
540 }
541
542 if (!opt_initFile.empty())
543 initFile = opt_initFile;
544 else
545 initFile = vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/chateau.init");
546
547 vpImage<unsigned char> I, I_depth;
548 vpImage<uint16_t> I_depth_raw;
549 std::vector<vpColVector> pointcloud;
550 unsigned int pointcloud_width, pointcloud_height;
551 if (!read_data(0, ipath, I, I_depth_raw, pointcloud, pointcloud_width, pointcloud_height)) {
552 std::cerr << "Cannot open sequence: " << ipath << std::endl;
553 return EXIT_FAILURE;
554 }
555
556 vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
557
558// initialise a display
559#if defined VISP_HAVE_X11
560 vpDisplayX display1, display2;
561#elif defined VISP_HAVE_GDI
562 vpDisplayGDI display1, display2;
563#elif defined VISP_HAVE_OPENCV
564 vpDisplayOpenCV display1, display2;
565#elif defined VISP_HAVE_D3D9
566 vpDisplayD3D display1, display2;
567#elif defined VISP_HAVE_GTK
568 vpDisplayGTK display1, display2;
569#else
570 opt_display = false;
571#endif
572 if (opt_display) {
573#if defined(VISP_HAVE_DISPLAY)
576 display1.init(I, 100, 100, "Test tracking (Left)");
577 display2.init(I_depth, (int)(I.getWidth() / vpDisplay::getDownScalingFactor(I)) + 110, 100,
578 "Test tracking (Right)");
579#endif
581 vpDisplay::display(I_depth);
583 vpDisplay::flush(I_depth);
584 }
585
586 std::vector<int> trackerTypes(2);
587 trackerTypes[0] = trackerType_image;
588 trackerTypes[1] = trackerType_depth;
589 // Object pointer to check that inheritance is ok
590 vpMbTracker *tracker = new vpMbGenericTracker(trackerTypes);
591 vpHomogeneousMatrix c1Mo, c2Mo;
592 vpCameraParameters cam1, cam2;
593
594 loadConfiguration(tracker, configFile, configFile_depth);
595
596 vpHomogeneousMatrix depth_M_color;
597 std::string depth_M_color_filename =
598 vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/depth_M_color.txt");
599 {
600 std::ifstream depth_M_color_file(depth_M_color_filename.c_str());
601 depth_M_color.load(depth_M_color_file);
602 std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformationMatrices;
603 mapOfCameraTransformationMatrices["Camera2"] = depth_M_color;
604 dynamic_cast<vpMbGenericTracker *>(tracker)->setCameraTransformationMatrix(mapOfCameraTransformationMatrices);
605 }
606
607 // Display the moving edges, and the Klt points
608 tracker->setDisplayFeatures(displayFeatures);
609
610 // Tells if the tracker has to use Ogre3D for visibility tests
611 tracker->setOgreVisibilityTest(useOgre);
612 if (useOgre)
613 tracker->setOgreShowConfigDialog(showOgreConfigDialog);
614
615 // Tells if the tracker has to use the scanline visibility tests
616 tracker->setScanLineVisibilityTest(useScanline);
617
618 // Tells if the tracker has to compute the covariance matrix
619 tracker->setCovarianceComputation(computeCovariance);
620
621 // Tells if the tracker has to compute the projection error
622 tracker->setProjectionErrorComputation(projectionError);
623
624 // Retrieve the camera parameters from the tracker
625 dynamic_cast<vpMbGenericTracker *>(tracker)->getCameraParameters(cam1, cam2);
626
627 // Loop to position the cube
628 if (opt_display && opt_click_allowed) {
629 while (!vpDisplay::getClick(I, false)) {
631 vpDisplay::displayText(I, 15, 10, "click after positioning the object", vpColor::red);
633 }
634 }
635
636 // Load the 3D model (either a vrml file or a .cao file)
637 dynamic_cast<vpMbGenericTracker *>(tracker)->loadModel(modelFile, modelFile_depth);
638
639 if (opt_display && opt_click_allowed) {
640 std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
641 mapOfImages["Camera1"] = &I;
642 mapOfImages["Camera2"] = &I_depth;
643 std::map<std::string, std::string> mapOfInitFiles;
644 mapOfInitFiles["Camera1"] = initFile;
645
646 // Initialise the tracker by clicking on the image
647 dynamic_cast<vpMbGenericTracker *>(tracker)->initClick(mapOfImages, mapOfInitFiles, true);
648 dynamic_cast<vpMbGenericTracker *>(tracker)->getPose(c1Mo, c2Mo);
649 // display the 3D model at the given pose
650 dynamic_cast<vpMbGenericTracker *>(tracker)->display(I, I_depth, c1Mo, c2Mo, cam1, cam2, vpColor::red);
651 } else {
652 vpHomogeneousMatrix c1Moi(0.06846423368, 0.09062570884, 0.3401096693, -2.671882598, 0.1174275908, -0.6011935263);
653 vpHomogeneousMatrix c2Moi(0.04431452054, 0.09294637757, 0.3357760654, -2.677922443, 0.121297639, -0.6028463357);
654 dynamic_cast<vpMbGenericTracker *>(tracker)->initFromPose(I, I_depth, c1Moi, c2Moi);
655 }
656
657 // track the model
658 {
659 std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
660 mapOfImages["Camera1"] = &I;
661 std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
662 mapOfPointclouds["Camera2"] = &pointcloud;
663 std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
664 mapOfWidths["Camera2"] = pointcloud_width;
665 mapOfHeights["Camera2"] = pointcloud_height;
666
667 dynamic_cast<vpMbGenericTracker *>(tracker)->track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
668 }
669 dynamic_cast<vpMbGenericTracker *>(tracker)->getPose(c1Mo, c2Mo);
670
671 if (opt_display) {
673 vpDisplay::flush(I_depth);
674 }
675
676 bool quit = false, click = false;
677 unsigned int frame_index = 0;
678 std::vector<double> time_vec;
679 while (read_data(frame_index, ipath, I, I_depth_raw, pointcloud, pointcloud_width, pointcloud_height) && !quit &&
680 (opt_lastFrame > 0 ? (int)frame_index <= opt_lastFrame : true)) {
681 vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
682
683 if (opt_display) {
685 vpDisplay::display(I_depth);
686
687 std::stringstream ss;
688 ss << "Num frame: " << frame_index;
689 vpDisplay::displayText(I, 40, 20, ss.str(), vpColor::red);
690 }
691
692 // Test reset the tracker
693 if (frame_index == 10) {
694 std::cout << "----------Test reset tracker----------" << std::endl;
695 if (opt_display) {
697 vpDisplay::display(I_depth);
698 }
699
700 tracker->resetTracker();
701
702 loadConfiguration(tracker, configFile, configFile_depth);
703 dynamic_cast<vpMbGenericTracker *>(tracker)->loadModel(modelFile, modelFile_depth);
704 dynamic_cast<vpMbGenericTracker *>(tracker)->setCameraParameters(cam1, cam2);
705 tracker->setOgreVisibilityTest(useOgre);
706 tracker->setScanLineVisibilityTest(useScanline);
707 tracker->setCovarianceComputation(computeCovariance);
708 tracker->setProjectionErrorComputation(projectionError);
709 dynamic_cast<vpMbGenericTracker *>(tracker)->initFromPose(I, I_depth, c1Mo, c2Mo);
710 }
711
712// Test to set an initial pose
713#if USE_SMALL_DATASET
714 if (frame_index == 20) {
715 c1Mo.buildFrom(0.07734634051, 0.08993639906, 0.342344402, -2.708409543, 0.0669276477, -0.3798958303);
716 c2Mo.buildFrom(0.05319520317, 0.09223511976, 0.3380095812, -2.71438192, 0.07141055397, -0.3810081638);
717#else
718 if (frame_index == 50) {
719 c1Mo.buildFrom(0.09280663035, 0.09277655672, 0.330415149, -2.724431817, 0.0293932671, 0.02027966377);
720 c2Mo.buildFrom(0.06865933578, 0.09494713501, 0.3260555142, -2.730027451, 0.03498390135, 0.01989831338);
721#endif
722 std::cout << "Test set pose" << std::endl;
723 dynamic_cast<vpMbGenericTracker *>(tracker)->setPose(I, I_depth, c1Mo, c2Mo);
724 }
725
726#if USE_SMALL_DATASET
727 // track the object: stop tracking from frame 15 to 20
728 if (frame_index < 15 || frame_index >= 20) {
729#else
730 // track the object: stop tracking from frame 30 to 50
731 if (frame_index < 30 || frame_index >= 50) {
732#endif
733 std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
734 mapOfImages["Camera1"] = &I;
735 std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
736 mapOfPointclouds["Camera2"] = &pointcloud;
737 std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
738 mapOfWidths["Camera2"] = pointcloud_width;
739 mapOfHeights["Camera2"] = pointcloud_height;
740
741 double t = vpTime::measureTimeMs();
742 dynamic_cast<vpMbGenericTracker *>(tracker)->track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
743 t = vpTime::measureTimeMs() - t;
744 time_vec.push_back(t);
745
746 dynamic_cast<vpMbGenericTracker *>(tracker)->getPose(c1Mo, c2Mo);
747
748 if (opt_display) {
749 // display the 3D model
750 dynamic_cast<vpMbGenericTracker *>(tracker)->display(I, I_depth, c1Mo, c2Mo, cam1, cam2, vpColor::darkRed);
751 // display the frame
752 vpDisplay::displayFrame(I, c1Mo, cam1, 0.05);
753 vpDisplay::displayFrame(I_depth, c2Mo, cam2, 0.05);
754 // computation time
755 std::stringstream ss;
756 ss << "Computation time: " << t << " ms";
757 vpDisplay::displayText(I, 60, 20, ss.str(), vpColor::red);
758 // nb features
759 ss.str("");
760 ss << "nb features: " << tracker->getError().getRows();
761 vpDisplay::displayText(I_depth, 80, 20, ss.str(), vpColor::red);
762 {
763 std::stringstream ss;
764 ss << "Features: edges " << dynamic_cast<vpMbGenericTracker *>(tracker)->getNbFeaturesEdge()
765 << ", klt " << dynamic_cast<vpMbGenericTracker *>(tracker)->getNbFeaturesKlt()
766 << ", depth " << dynamic_cast<vpMbGenericTracker *>(tracker)->getNbFeaturesDepthDense();
767 vpDisplay::displayText(I, I.getHeight() - 30, 20, ss.str(), vpColor::red);
768 }
769 }
770 }
771
772 if (opt_click_allowed && opt_display) {
773 vpDisplay::displayText(I, 10, 10, "Click to quit", vpColor::red);
775 if (vpDisplay::getClick(I, button, click)) {
776 switch (button) {
778 quit = !click;
779 break;
780
782 click = !click;
783 break;
784
785 default:
786 break;
787 }
788 }
789 }
790
791 if (computeCovariance) {
792 std::cout << "Covariance matrix: \n" << tracker->getCovarianceMatrix() << std::endl << std::endl;
793 }
794
795 if (projectionError) {
796 std::cout << "Projection error: " << tracker->getProjectionError() << std::endl << std::endl;
797 }
798
799 if (opt_display) {
801 vpDisplay::flush(I_depth);
802 }
803
804 frame_index++;
805 }
806
807 std::cout << "\nFinal poses, c1Mo:\n" << c1Mo << "\nc2Mo:\n" << c2Mo << std::endl;
808 std::cout << "\nComputation time, Mean: " << vpMath::getMean(time_vec)
809 << " ms ; Median: " << vpMath::getMedian(time_vec) << " ms ; Std: " << vpMath::getStdev(time_vec) << " ms"
810 << std::endl;
811
812 if (opt_click_allowed && !quit) {
814 }
815
816 delete tracker;
817 tracker = NULL;
818
819 return EXIT_SUCCESS;
820 } catch (const vpException &e) {
821 std::cout << "Catch an exception: " << e << std::endl;
822 return EXIT_FAILURE;
823 }
824}
825
826#elif !(defined(VISP_HAVE_MODULE_MBT) && defined(VISP_HAVE_DISPLAY))
827int main()
828{
829 std::cout << "Cannot run this example: visp_mbt, visp_gui modules are required."
830 << std::endl;
831 return EXIT_SUCCESS;
832}
833#else
834int main()
835{
836 std::cout << "Cannot run this example: install Lapack, Eigen3 or OpenCV" << std::endl;
837 return EXIT_SUCCESS;
838}
839#endif
840
unsigned int getRows() const
Definition: vpArray2D.h:289
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
Implementation of column vector and the associated operations.
Definition: vpColVector.h:131
static const vpColor red
Definition: vpColor.h:217
static const vpColor darkRed
Definition: vpColor.h:218
Display for windows using Direct3D 3rd party. Thus to enable this class Direct3D should be installed....
Definition: vpDisplayD3D.h:107
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:129
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
Definition: vpDisplayGTK.h:135
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition: vpDisplayX.h:135
void init(vpImage< unsigned char > &I, int win_x=-1, int win_y=-1, const std::string &win_title="")
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
virtual void setDownScalingFactor(unsigned int scale)
Definition: vpDisplay.cpp:231
static void display(const vpImage< unsigned char > &I)
static void flush(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0))
@ SCALE_AUTO
Definition: vpDisplay.h:183
unsigned int getDownScalingFactor()
Definition: vpDisplay.h:235
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emited by ViSP classes.
Definition: vpException.h:72
Implementation of an homogeneous matrix and operations on such kind of matrices.
void load(std::ifstream &f)
void buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R)
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition: vpImageIo.cpp:149
unsigned int getWidth() const
Definition: vpImage.h:246
void resize(unsigned int h, unsigned int w)
resize the image : Image initialization
Definition: vpImage.h:800
unsigned int getHeight() const
Definition: vpImage.h:188
static std::string getViSPImagesDataPath()
Definition: vpIoTools.cpp:1365
static bool checkFilename(const std::string &filename)
Definition: vpIoTools.cpp:802
static void readBinaryValueLE(std::ifstream &file, int16_t &short_value)
Definition: vpIoTools.cpp:1993
static bool checkDirectory(const std::string &dirname)
Definition: vpIoTools.cpp:420
static std::string createFilePath(const std::string &parent, const std::string &child)
Definition: vpIoTools.cpp:1670
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
Definition: vpKltOpencv.h:79
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double rad(double deg)
Definition: vpMath.h:110
static double getMedian(const std::vector< double > &v)
Definition: vpMath.cpp:261
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
Definition: vpMath.cpp:291
static double getMean(const std::vector< double > &v)
Definition: vpMath.cpp:241
Real-time 6D object pose tracking using its CAD model.
Main methods for a model-based tracker.
Definition: vpMbTracker.h:105
virtual void resetTracker()=0
virtual void setOgreShowConfigDialog(bool showConfigDialog)
Definition: vpMbTracker.h:643
virtual void setDisplayFeatures(bool displayF)
Definition: vpMbTracker.h:517
virtual vpColVector getError() const =0
virtual void setAngleDisappear(const double &a)
Definition: vpMbTracker.h:480
virtual void setCovarianceComputation(const bool &flag)
Definition: vpMbTracker.h:499
virtual void setScanLineVisibilityTest(const bool &v)
Definition: vpMbTracker.h:601
virtual void setOgreVisibilityTest(const bool &v)
virtual vpMatrix getCovarianceMatrix() const
Definition: vpMbTracker.h:265
virtual void setNearClippingDistance(const double &dist)
virtual void setFarClippingDistance(const double &dist)
virtual double getProjectionError() const
Definition: vpMbTracker.h:310
virtual void setProjectionErrorComputation(const bool &flag)
Definition: vpMbTracker.h:584
virtual void setClipping(const unsigned int &flags)
virtual void setAngleAppear(const double &a)
Definition: vpMbTracker.h:469
virtual unsigned int getClipping() const
Definition: vpMbTracker.h:256
Definition: vpMe.h:61
void setMu1(const double &mu_1)
Definition: vpMe.h:241
void setSampleStep(const double &s)
Definition: vpMe.h:278
void setRange(const unsigned int &r)
Definition: vpMe.h:271
void setMaskSize(const unsigned int &a)
Definition: vpMe.cpp:459
void setMu2(const double &mu_2)
Definition: vpMe.h:248
void setMaskNumber(const unsigned int &a)
Definition: vpMe.cpp:452
void setThreshold(const double &t)
Definition: vpMe.h:300
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
Definition: vpParseArgv.cpp:69
VISP_EXPORT double measureTimeMs()