Visual Servoing Platform version 3.5.0
vpDetectorAprilTag.cpp
1/****************************************************************************
2 *
3 * ViSP, open source Visual Servoing Platform software.
4 * Copyright (C) 2005 - 2019 by Inria. All rights reserved.
5 *
6 * This software is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 * See the file LICENSE.txt at the root directory of this source
11 * distribution for additional information about the GNU GPL.
12 *
13 * For using ViSP with software that can not be combined with the GNU
14 * GPL, please contact Inria about acquiring a ViSP Professional
15 * Edition License.
16 *
17 * See http://visp.inria.fr for more information.
18 *
19 * This software was developed at:
20 * Inria Rennes - Bretagne Atlantique
21 * Campus Universitaire de Beaulieu
22 * 35042 Rennes Cedex
23 * France
24 *
25 * If you have questions regarding the use of this file, please contact
26 * Inria at visp@inria.fr
27 *
28 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30 *
31 * Description:
32 * Base class for April Tag detection.
33 *
34 *****************************************************************************/
35#include <visp3/core/vpConfig.h>
36
37#ifdef VISP_HAVE_APRILTAG
38#include <map>
39
40#include <apriltag.h>
41#include <common/homography.h>
42#include <tag16h5.h>
43#include <tag25h7.h>
44#include <tag25h9.h>
45#include <tag36h10.h>
46#include <tag36h11.h>
47#include <tagCircle21h7.h>
48#include <tagStandard41h12.h>
49#include <apriltag_pose.h>
50#include <visp3/detection/vpDetectorAprilTag.h>
51#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
52#include <tagCircle49h12.h>
53#include <tagCustom48h12.h>
54#include <tagStandard41h12.h>
55#include <tagStandard52h13.h>
56#endif
57
58#include <visp3/core/vpDisplay.h>
59#include <visp3/core/vpPixelMeterConversion.h>
60#include <visp3/core/vpPoint.h>
61#include <visp3/vision/vpPose.h>
62
63#ifndef DOXYGEN_SHOULD_SKIP_THIS
64class vpDetectorAprilTag::Impl
65{
66public:
67 Impl(const vpAprilTagFamily &tagFamily, const vpPoseEstimationMethod &method)
68 : m_poseEstimationMethod(method), m_tagsId(), m_tagFamily(tagFamily),
69 m_td(NULL), m_tf(NULL), m_detections(NULL), m_zAlignedWithCameraFrame(false)
70 {
71 switch (m_tagFamily) {
72 case TAG_36h11:
73 m_tf = tag36h11_create();
74 break;
75
76 case TAG_36h10:
77 m_tf = tag36h10_create();
78 break;
79
80 case TAG_36ARTOOLKIT:
81 break;
82
83 case TAG_25h9:
84 m_tf = tag25h9_create();
85 break;
86
87 case TAG_25h7:
88 m_tf = tag25h7_create();
89 break;
90
91 case TAG_16h5:
92 m_tf = tag16h5_create();
93 break;
94
95 case TAG_CIRCLE21h7:
96 m_tf = tagCircle21h7_create();
97 break;
98
99 case TAG_CIRCLE49h12:
100#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
101 m_tf = tagCircle49h12_create();
102#endif
103 break;
104
105 case TAG_CUSTOM48h12:
106#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
107 m_tf = tagCustom48h12_create();
108#endif
109 break;
110
112#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
113 m_tf = tagStandard52h13_create();
114#endif
115 break;
116
118#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
119 m_tf = tagStandard41h12_create();
120#endif
121 break;
122
123 default:
124 throw vpException(vpException::fatalError, "Unknown Tag family!");
125 }
126
127 if (m_tagFamily != TAG_36ARTOOLKIT && m_tf) {
128 m_td = apriltag_detector_create();
129 apriltag_detector_add_family(m_td, m_tf);
130 }
131
132 m_mapOfCorrespondingPoseMethods[DEMENTHON_VIRTUAL_VS] = vpPose::DEMENTHON;
133 m_mapOfCorrespondingPoseMethods[LAGRANGE_VIRTUAL_VS] = vpPose::LAGRANGE;
134 }
135
136 Impl(const Impl &o)
138 m_td(NULL), m_tf(NULL), m_detections(NULL), m_zAlignedWithCameraFrame(o.m_zAlignedWithCameraFrame)
139 {
140 switch (m_tagFamily) {
141 case TAG_36h11:
142 m_tf = tag36h11_create();
143 break;
144
145 case TAG_36h10:
146 m_tf = tag36h10_create();
147 break;
148
149 case TAG_36ARTOOLKIT:
150 break;
151
152 case TAG_25h9:
153 m_tf = tag25h9_create();
154 break;
155
156 case TAG_25h7:
157 m_tf = tag25h7_create();
158 break;
159
160 case TAG_16h5:
161 m_tf = tag16h5_create();
162 break;
163
164 case TAG_CIRCLE21h7:
165 m_tf = tagCircle21h7_create();
166 break;
167
168 case TAG_CIRCLE49h12:
169#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
170 m_tf = tagCircle49h12_create();
171#endif
172 break;
173
174 case TAG_CUSTOM48h12:
175#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
176 m_tf = tagCustom48h12_create();
177#endif
178 break;
179
181#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
182 m_tf = tagStandard52h13_create();
183#endif
184 break;
185
187#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
188 m_tf = tagStandard41h12_create();
189#endif
190 break;
191
192 default:
193 throw vpException(vpException::fatalError, "Unknown Tag family!");
194 }
195
196 if (m_tagFamily != TAG_36ARTOOLKIT && m_tf) {
197 m_td = apriltag_detector_create();
198 apriltag_detector_add_family(m_td, m_tf);
199 }
200
201 m_mapOfCorrespondingPoseMethods[DEMENTHON_VIRTUAL_VS] = vpPose::DEMENTHON;
202 m_mapOfCorrespondingPoseMethods[LAGRANGE_VIRTUAL_VS] = vpPose::LAGRANGE;
203
204 if (o.m_detections != NULL) {
205 m_detections = apriltag_detections_copy(o.m_detections);
206 }
207 }
208
209 ~Impl()
210 {
211 if (m_td) {
212 apriltag_detector_destroy(m_td);
213 }
214
215 if (m_tf) {
216 switch (m_tagFamily) {
217 case TAG_36h11:
218 tag36h11_destroy(m_tf);
219 break;
220
221 case TAG_36h10:
222 tag36h10_destroy(m_tf);
223 break;
224
225 case TAG_36ARTOOLKIT:
226 break;
227
228 case TAG_25h9:
229 tag25h9_destroy(m_tf);
230 break;
231
232 case TAG_25h7:
233 tag25h7_destroy(m_tf);
234 break;
235
236 case TAG_16h5:
237 tag16h5_destroy(m_tf);
238 break;
239
240 case TAG_CIRCLE21h7:
241 tagCircle21h7_destroy(m_tf);
242 break;
243
244 case TAG_CIRCLE49h12:
245 #if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
246 tagCustom48h12_destroy(m_tf);
247 #endif
248 break;
249
250 case TAG_CUSTOM48h12:
251 #if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
252 tagCustom48h12_destroy(m_tf);
253 #endif
254 break;
255
257 #if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
258 tagStandard52h13_destroy(m_tf);
259 #endif
260 break;
261
263 #if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
264 tagStandard41h12_destroy(m_tf);
265 #endif
266 break;
267
268 default:
269 break;
270 }
271 }
272
273 if (m_detections) {
274 apriltag_detections_destroy(m_detections);
275 m_detections = NULL;
276 }
277 }
278
279 void convertHomogeneousMatrix(const apriltag_pose_t &pose, vpHomogeneousMatrix &cMo) {
280 for (unsigned int i = 0; i < 3; i++) {
281 for (unsigned int j = 0; j < 3; j++) {
282 cMo[i][j] = MATD_EL(pose.R, i, j);
283 }
284 cMo[i][3] = MATD_EL(pose.t, i, 0);
285 }
286 }
287
288 bool detect(const vpImage<unsigned char> &I, double tagSize, const vpCameraParameters &cam,
289 std::vector<std::vector<vpImagePoint> > &polygons,
290 std::vector<std::string> &messages, bool displayTag, const vpColor color,
291 unsigned int thickness, std::vector<vpHomogeneousMatrix> *cMo_vec,
292 std::vector<vpHomogeneousMatrix> *cMo_vec2, std::vector<double> *projErrors,
293 std::vector<double> *projErrors2)
294 {
296 //TAG_36ARTOOLKIT is not available anymore
297 std::cerr << "TAG_36ARTOOLKIT detector is not available anymore." << std::endl;
298 return false;
299 }
300#if !defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
302 std::cerr << "TAG_CIRCLE49h12, TAG_CUSTOM48h12, TAG_STANDARD41h12 and TAG_STANDARD52h13 are disabled." << std::endl;
303 return false;
304 }
305#endif
306
307 const bool computePose = (cMo_vec != NULL);
308
309 image_u8_t im = {/*.width =*/(int32_t)I.getWidth(),
310 /*.height =*/(int32_t)I.getHeight(),
311 /*.stride =*/(int32_t)I.getWidth(),
312 /*.buf =*/I.bitmap};
313
314 if (m_detections) {
315 apriltag_detections_destroy(m_detections);
316 m_detections = NULL;
317 }
318
319 m_detections = apriltag_detector_detect(m_td, &im);
320 int nb_detections = zarray_size(m_detections);
321 bool detected = nb_detections > 0;
322
323 polygons.resize(static_cast<size_t>(nb_detections));
324 messages.resize(static_cast<size_t>(nb_detections));
325 m_tagsId.resize(static_cast<size_t>(nb_detections));
326
327 for (int i = 0; i < zarray_size(m_detections); i++) {
328 apriltag_detection_t *det;
329 zarray_get(m_detections, i, &det);
330
331 std::vector<vpImagePoint> polygon;
332 for (int j = 0; j < 4; j++) {
333 polygon.push_back(vpImagePoint(det->p[j][1], det->p[j][0]));
334 }
335 polygons[static_cast<size_t>(i)] = polygon;
336 std::stringstream ss;
337 ss << m_tagFamily << " id: " << det->id;
338 messages[static_cast<size_t>(i)] = ss.str();
339 m_tagsId[static_cast<size_t>(i)] = det->id;
340
341 if (displayTag) {
342 vpColor Ox = (color == vpColor::none) ? vpColor::red : color;
343 vpColor Oy = (color == vpColor::none) ? vpColor::green : color;
344 vpColor Ox2 = (color == vpColor::none) ? vpColor::yellow : color;
345 vpColor Oy2 = (color == vpColor::none) ? vpColor::blue : color;
346
347 vpDisplay::displayLine(I, (int)det->p[0][1], (int)det->p[0][0], (int)det->p[1][1], (int)det->p[1][0],
348 Ox, thickness);
349 vpDisplay::displayLine(I, (int)det->p[0][1], (int)det->p[0][0], (int)det->p[3][1], (int)det->p[3][0],
350 Oy, thickness);
351 vpDisplay::displayLine(I, (int)det->p[1][1], (int)det->p[1][0], (int)det->p[2][1], (int)det->p[2][0],
352 Ox2, thickness);
353 vpDisplay::displayLine(I, (int)det->p[2][1], (int)det->p[2][0], (int)det->p[3][1], (int)det->p[3][0],
354 Oy2, thickness);
355 }
356
357 if (computePose) {
358 vpHomogeneousMatrix cMo, cMo2;
359 double err1, err2;
360 if (getPose(static_cast<size_t>(i), tagSize, cam, cMo, cMo_vec2 ? &cMo2 : NULL,
361 projErrors ? &err1 : NULL, projErrors2 ? &err2 : NULL)) {
362 cMo_vec->push_back(cMo);
363 if (cMo_vec2) {
364 cMo_vec2->push_back(cMo2);
365 }
366 if (projErrors) {
367 projErrors->push_back(err1);
368 }
369 if (projErrors2) {
370 projErrors2->push_back(err2);
371 }
372 }
373 // else case should never happen
374 }
375 }
376
377 return detected;
378 }
379
380 bool getPose(size_t tagIndex, double tagSize, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, vpHomogeneousMatrix *cMo2,
381 double *projErrors, double *projErrors2) {
382 if (m_detections == NULL) {
383 throw(vpException(vpException::fatalError, "Cannot get tag index=%d pose: detection empty", tagIndex));
384 }
386 //TAG_36ARTOOLKIT is not available anymore
387 std::cerr << "TAG_36ARTOOLKIT detector is not available anymore." << std::endl;
388 return false;
389 }
390#if !defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
392 std::cerr << "TAG_CIRCLE49h12, TAG_CUSTOM48h12, TAG_STANDARD41h12 and TAG_STANDARD52h13 are disabled." << std::endl;
393 return false;
394 }
395#endif
396
397 apriltag_detection_t *det;
398 zarray_get(m_detections, static_cast<int>(tagIndex), &det);
399
400 int nb_detections = zarray_size(m_detections);
401 if (tagIndex >= (size_t)nb_detections) {
402 return false;
403 }
404
405 //In AprilTag3, estimate_pose_for_tag_homography() and estimate_tag_pose() have been added.
406 //They use a tag frame aligned with the camera frame
407 //Before the release of AprilTag3, convention used was to define the z-axis of the tag going upward.
408 //To keep compatibility, we maintain the same convention than before and there is setZAlignedWithCameraAxis().
409 //Under the hood, we use aligned frames everywhere and transform the pose according to the option.
410
411 vpHomogeneousMatrix cMo_homography_ortho_iter;
414 double fx = cam.get_px(), fy = cam.get_py();
415 double cx = cam.get_u0(), cy = cam.get_v0();
416
417 apriltag_detection_info_t info;
418 info.det = det;
419 info.tagsize = tagSize;
420 info.fx = fx;
421 info.fy = fy;
422 info.cx = cx;
423 info.cy = cy;
424
425 //projErrors and projErrors2 will be override later
426 getPoseWithOrthogonalMethod(info, cMo, cMo2, projErrors, projErrors2);
427 cMo_homography_ortho_iter = cMo;
428 }
429
430 vpHomogeneousMatrix cMo_homography;
433 double fx = cam.get_px(), fy = cam.get_py();
434 double cx = cam.get_u0(), cy = cam.get_v0();
435
436 apriltag_detection_info_t info;
437 info.det = det;
438 info.tagsize = tagSize;
439 info.fx = fx;
440 info.fy = fy;
441 info.cx = cx;
442 info.cy = cy;
443
444 apriltag_pose_t pose;
445 estimate_pose_for_tag_homography(&info, &pose);
446 convertHomogeneousMatrix(pose, cMo);
447
448 matd_destroy(pose.R);
449 matd_destroy(pose.t);
450
451 cMo_homography = cMo;
452 }
453
454 // Add marker object points
455 vpPose pose;
456 vpPoint pt;
457
458 vpImagePoint imPt;
459 double x = 0.0, y = 0.0;
460 std::vector<vpPoint> pts(4);
461 pt.setWorldCoordinates(-tagSize / 2.0, tagSize / 2.0, 0.0);
462 imPt.set_uv(det->p[0][0], det->p[0][1]);
464 pt.set_x(x);
465 pt.set_y(y);
466 pts[0] = pt;
467
468 pt.setWorldCoordinates(tagSize / 2.0, tagSize / 2.0, 0.0);
469 imPt.set_uv(det->p[1][0], det->p[1][1]);
471 pt.set_x(x);
472 pt.set_y(y);
473 pts[1] = pt;
474
475 pt.setWorldCoordinates(tagSize / 2.0, -tagSize / 2.0, 0.0);
476 imPt.set_uv(det->p[2][0], det->p[2][1]);
478 pt.set_x(x);
479 pt.set_y(y);
480 pts[2] = pt;
481
482 pt.setWorldCoordinates(-tagSize / 2.0, -tagSize / 2.0, 0.0);
483 imPt.set_uv(det->p[3][0], det->p[3][1]);
485 pt.set_x(x);
486 pt.set_y(y);
487 pts[3] = pt;
488
489 pose.addPoints(pts);
490
494 vpHomogeneousMatrix cMo_dementhon, cMo_lagrange;
495
496 double residual_dementhon = std::numeric_limits<double>::max(),
497 residual_lagrange = std::numeric_limits<double>::max();
498 double residual_homography = pose.computeResidual(cMo_homography);
499 double residual_homography_ortho_iter = pose.computeResidual(cMo_homography_ortho_iter);
500
501 if (pose.computePose(vpPose::DEMENTHON, cMo_dementhon)) {
502 residual_dementhon = pose.computeResidual(cMo_dementhon);
503 }
504
505 if (pose.computePose(vpPose::LAGRANGE, cMo_lagrange)) {
506 residual_lagrange = pose.computeResidual(cMo_lagrange);
507 }
508
509 std::vector<double> residuals;
510 residuals.push_back(residual_dementhon);
511 residuals.push_back(residual_lagrange);
512 residuals.push_back(residual_homography);
513 residuals.push_back(residual_homography_ortho_iter);
514 std::vector<vpHomogeneousMatrix> poses;
515 poses.push_back(cMo_dementhon);
516 poses.push_back(cMo_lagrange);
517 poses.push_back(cMo_homography);
518 poses.push_back(cMo_homography_ortho_iter);
519
520 std::ptrdiff_t minIndex = std::min_element(residuals.begin(), residuals.end()) - residuals.begin();
521 cMo = *(poses.begin() + minIndex);
522 } else {
523 pose.computePose(m_mapOfCorrespondingPoseMethods[m_poseEstimationMethod], cMo);
524 }
525 }
526
529 // Compute final pose using VVS
531 }
532
533 //Only with HOMOGRAPHY_ORTHOGONAL_ITERATION we can directly get two solutions
535 if (cMo2) {
536 double scale = tagSize/2.0;
537 double data_p0[] = {-scale, scale, 0};
538 double data_p1[] = {scale, scale, 0};
539 double data_p2[] = {scale, -scale, 0};
540 double data_p3[] = {-scale, -scale, 0};
541 matd_t* p[4] = {matd_create_data(3, 1, data_p0),
542 matd_create_data(3, 1, data_p1),
543 matd_create_data(3, 1, data_p2),
544 matd_create_data(3, 1, data_p3)};
545 matd_t* v[4];
546 for (int i = 0; i < 4; i++) {
547 double data_v[] = {(det->p[i][0] - cam.get_u0())/cam.get_px(), (det->p[i][1] - cam.get_v0())/cam.get_py(), 1};
548 v[i] = matd_create_data(3, 1, data_v);
549 }
550
551 apriltag_pose_t solution1, solution2;
552 const int nIters = 50;
553 solution1.R = matd_create_data(3, 3, cMo.getRotationMatrix().data);
554 solution1.t = matd_create_data(3, 1, cMo.getTranslationVector().data);
555
556 double err2;
557 get_second_solution(v, p, &solution1, &solution2, nIters, &err2);
558
559 for (int i = 0; i < 4; i++) {
560 matd_destroy(p[i]);
561 matd_destroy(v[i]);
562 }
563
564 if (solution2.R) {
565 convertHomogeneousMatrix(solution2, *cMo2);
566
567 matd_destroy(solution2.R);
568 matd_destroy(solution2.t);
569 }
570
571 matd_destroy(solution1.R);
572 matd_destroy(solution1.t);
573 }
574 }
575
576 //Compute projection error with vpPose::computeResidual() for consistency
577 if (projErrors) {
578 *projErrors = pose.computeResidual(cMo);
579 }
580 if (projErrors2 && cMo2) {
581 *projErrors2 = pose.computeResidual(*cMo2);
582 }
583
584 if (!m_zAlignedWithCameraFrame) {
586 // Apply a rotation of 180deg around x axis
587 oMo[0][0] = 1; oMo[0][1] = 0; oMo[0][2] = 0;
588 oMo[1][0] = 0; oMo[1][1] = -1; oMo[1][2] = 0;
589 oMo[2][0] = 0; oMo[2][1] = 0; oMo[2][2] = -1;
590 cMo = cMo*oMo;
591 if (cMo2) {
592 *cMo2 = *cMo2*oMo;
593 }
594 }
595
596 return true;
597 }
598
599 void getPoseWithOrthogonalMethod(apriltag_detection_info_t &info, vpHomogeneousMatrix &cMo1, vpHomogeneousMatrix *cMo2,
600 double *err1, double *err2) {
601 apriltag_pose_t pose1, pose2;
602 double err_1, err_2;
603 estimate_tag_pose_orthogonal_iteration(&info, &err_1, &pose1, &err_2, &pose2, 50);
604 if (err_1 <= err_2) {
605 convertHomogeneousMatrix(pose1, cMo1);
606 if (cMo2) {
607 if (pose2.R) {
608 convertHomogeneousMatrix(pose2, *cMo2);
609 } else {
610 *cMo2 = cMo1;
611 }
612 }
613 } else {
614 convertHomogeneousMatrix(pose2, cMo1);
615 if (cMo2) {
616 convertHomogeneousMatrix(pose1, *cMo2);
617 }
618 }
619
620 matd_destroy(pose1.R);
621 matd_destroy(pose1.t);
622 if (pose2.R) {
623 matd_destroy(pose2.t);
624 }
625 matd_destroy(pose2.R);
626
627 if (err1)
628 *err1 = err_1;
629 if (err2)
630 *err2 = err_2;
631 }
632
633 bool getZAlignedWithCameraAxis() { return m_zAlignedWithCameraFrame; }
634
635 bool getAprilTagDecodeSharpening(double &decodeSharpening) const {
636 if (m_td) {
637 decodeSharpening = m_td->decode_sharpening;
638 return true;
639 }
640 return false;
641 }
642
643 bool getNbThreads(int &nThreads) const {
644 if (m_td) {
645 nThreads = m_td->nthreads;
646 return true;
647 }
648 return false;
649 }
650
651 bool getQuadDecimate(float &quadDecimate) const {
652 if (m_td) {
653 quadDecimate = m_td->quad_decimate;
654 return true;
655 }
656 return false;
657 }
658
659 bool getQuadSigma(float &quadSigma) const {
660 if (m_td) {
661 quadSigma = m_td->quad_sigma;
662 return true;
663 }
664 return false;
665 }
666
667 bool getRefineEdges(bool &refineEdges) const {
668 if (m_td) {
669 refineEdges = (m_td->refine_edges ? true : false);
670 return true;
671 }
672 return false;
673 }
674
675 bool getZAlignedWithCameraAxis() const {
676 return m_zAlignedWithCameraFrame;
677 }
678
679 std::vector<int> getTagsId() const { return m_tagsId; }
680
681 void setAprilTagDecodeSharpening(double decodeSharpening) {
682 if (m_td) {
683 m_td->decode_sharpening = decodeSharpening;
684 }
685 }
686
687 void setNbThreads(int nThreads) {
688 if (m_td) {
689 m_td->nthreads = nThreads;
690 }
691 }
692
693 void setQuadDecimate(float quadDecimate) {
694 if (m_td) {
695 m_td->quad_decimate = quadDecimate;
696 }
697 }
698
699 void setQuadSigma(float quadSigma) {
700 if (m_td) {
701 m_td->quad_sigma = quadSigma;
702 }
703 }
704
705 void setRefineDecode(bool) { }
706
707 void setRefineEdges(bool refineEdges) {
708 if (m_td) {
709 m_td->refine_edges = refineEdges ? 1 : 0;
710 }
711 }
712
713 void setRefinePose(bool) { }
714
715 void setPoseEstimationMethod(const vpPoseEstimationMethod &method) { m_poseEstimationMethod = method; }
716
717 void setZAlignedWithCameraAxis(bool zAlignedWithCameraFrame) { m_zAlignedWithCameraFrame = zAlignedWithCameraFrame; }
718
719protected:
720 std::map<vpPoseEstimationMethod, vpPose::vpPoseMethodType> m_mapOfCorrespondingPoseMethods;
722 std::vector<int> m_tagsId;
724 apriltag_detector_t *m_td;
725 apriltag_family_t *m_tf;
726 zarray_t *m_detections;
727 bool m_zAlignedWithCameraFrame;
728};
729#endif // DOXYGEN_SHOULD_SKIP_THIS
730
732 const vpPoseEstimationMethod &poseEstimationMethod)
733 : m_displayTag(false), m_displayTagColor(vpColor::none), m_displayTagThickness(2),
734 m_poseEstimationMethod(poseEstimationMethod), m_tagFamily(tagFamily), m_defaultCam(),
735 m_impl(new Impl(tagFamily, poseEstimationMethod))
736{
737}
738
740 : vpDetectorBase (o),
741 m_displayTag(false), m_displayTagColor(vpColor::none), m_displayTagThickness(2),
742 m_poseEstimationMethod(o.m_poseEstimationMethod), m_tagFamily(o.m_tagFamily), m_defaultCam(),
743 m_impl(new Impl(*o.m_impl))
744{
745}
746
748{
749 swap(*this, o);
750 return *this;
751}
752
754
763{
764 m_message.clear();
765 m_polygon.clear();
766 m_nb_objects = 0;
767
768 std::vector<vpHomogeneousMatrix> cMo_vec;
769 const double tagSize = 1.0;
770 bool detected = m_impl->detect(I, tagSize, m_defaultCam, m_polygon, m_message, m_displayTag,
772 NULL, NULL, NULL, NULL);
773 m_nb_objects = m_message.size();
774
775 return detected;
776}
777
796 std::vector<vpHomogeneousMatrix> &cMo_vec, std::vector<vpHomogeneousMatrix> *cMo_vec2,
797 std::vector<double> *projErrors, std::vector<double> *projErrors2)
798{
799 m_message.clear();
800 m_polygon.clear();
801 m_nb_objects = 0;
802
803 cMo_vec.clear();
804 if (cMo_vec2) {
805 cMo_vec2->clear();
806 }
807 bool detected = m_impl->detect(I, tagSize, cam, m_polygon, m_message, m_displayTag,
809 &cMo_vec, cMo_vec2, projErrors, projErrors2);
810 m_nb_objects = m_message.size();
811
812 return detected;
813}
814
846bool vpDetectorAprilTag::getPose(size_t tagIndex, double tagSize, const vpCameraParameters &cam,
848 double *projError, double *projError2)
849{
850 return m_impl->getPose(tagIndex, tagSize, cam, cMo, cMo2, projError, projError2);
851}
852
870std::vector<std::vector<vpPoint> > vpDetectorAprilTag::getTagsPoints3D(const std::vector<int>& tagsId,
871 const std::map<int, double>& tagsSize) const
872{
873 std::vector<std::vector<vpPoint> > tagsPoints3D;
874
875 double default_size = -1;
876 {
877 std::map<int, double>::const_iterator it = tagsSize.find(-1);
878 if (it != tagsSize.end()) {
879 default_size = it->second; // Default size
880 }
881 }
882 for (size_t i = 0; i < tagsId.size(); i++) {
883 std::map<int, double>::const_iterator it = tagsSize.find(tagsId[i]);
884 double tagSize = default_size; // Default size
885 if (it == tagsSize.end()) {
886 if (default_size < 0) { // no default size found
887 throw(vpException(vpException::fatalError, "Tag with id %d has no 3D size or there is no default 3D size defined", tagsId[i]));
888 }
889 } else {
890 tagSize = it->second;
891 }
892 std::vector<vpPoint> points3D(4);
893 if (m_impl->getZAlignedWithCameraAxis()) {
894 points3D[0] = vpPoint(-tagSize/2, tagSize/2, 0);
895 points3D[1] = vpPoint( tagSize/2, tagSize/2, 0);
896 points3D[2] = vpPoint( tagSize/2, -tagSize/2, 0);
897 points3D[3] = vpPoint(-tagSize/2, -tagSize/2, 0);
898 } else {
899 points3D[0] = vpPoint(-tagSize/2, -tagSize/2, 0);
900 points3D[1] = vpPoint( tagSize/2, -tagSize/2, 0);
901 points3D[2] = vpPoint( tagSize/2, tagSize/2, 0);
902 points3D[3] = vpPoint(-tagSize/2, tagSize/2, 0);
903 }
904 tagsPoints3D.push_back(points3D);
905 }
906
907 return tagsPoints3D;
908}
909
915std::vector<std::vector<vpImagePoint> > vpDetectorAprilTag::getTagsCorners() const
916{
917 return m_polygon;
918}
919
925std::vector<int> vpDetectorAprilTag::getTagsId() const
926{
927 return m_impl->getTagsId();
928}
929
931{
932 return m_impl->setAprilTagDecodeSharpening(decodeSharpening);
933}
934
936{
937 //back-up settings
938 double decodeSharpening = 0.25;
939 m_impl->getAprilTagDecodeSharpening(decodeSharpening);
940 int nThreads = 1;
941 m_impl->getNbThreads(nThreads);
942 float quadDecimate = 1;
943 m_impl->getQuadDecimate(quadDecimate);
944 float quadSigma = 0;
945 m_impl->getQuadSigma(quadSigma);
946 bool refineEdges = true;
947 m_impl->getRefineEdges(refineEdges);
948 bool zAxis = m_impl->getZAlignedWithCameraAxis();
949
950 delete m_impl;
951 m_impl = new Impl(tagFamily, m_poseEstimationMethod);
952 m_impl->setAprilTagDecodeSharpening(decodeSharpening);
953 m_impl->setNbThreads(nThreads);
954 m_impl->setQuadDecimate(quadDecimate);
955 m_impl->setQuadSigma(quadSigma);
956 m_impl->setRefineEdges(refineEdges);
957 m_impl->setZAlignedWithCameraAxis(zAxis);
958}
959
966{
967 if (nThreads > 0) {
968 m_impl->setNbThreads(nThreads);
969 }
970}
971
978{
979 m_poseEstimationMethod = poseEstimationMethod;
980 m_impl->setPoseEstimationMethod(poseEstimationMethod);
981}
982
996{
997 m_impl->setQuadDecimate(quadDecimate);
998}
999
1013{
1014 m_impl->setQuadSigma(quadSigma);
1015}
1016
1017#if defined(VISP_BUILD_DEPRECATED_FUNCTIONS)
1021vp_deprecated void vpDetectorAprilTag::setAprilTagRefineDecode(bool refineDecode) {
1022 m_impl->setRefineDecode(refineDecode);
1023}
1024#endif
1025
1041{
1042 m_impl->setRefineEdges(refineEdges);
1043}
1044
1045#if defined(VISP_BUILD_DEPRECATED_FUNCTIONS)
1049vp_deprecated void vpDetectorAprilTag::setAprilTagRefinePose(bool refinePose)
1050{
1051 m_impl->setRefinePose(refinePose);
1052}
1053#endif
1054
1056{
1057 using std::swap;
1058
1059 swap(o1.m_impl, o2.m_impl);
1060}
1061
1067void vpDetectorAprilTag::setZAlignedWithCameraAxis(bool zAlignedWithCameraFrame)
1068{
1069 m_impl->setZAlignedWithCameraAxis(zAlignedWithCameraFrame);
1070}
1071
1072#elif !defined(VISP_BUILD_SHARED_LIBS)
1073// Work arround to avoid warning: libvisp_core.a(vpDetectorAprilTag.cpp.o) has
1074// no symbols
1075void dummy_vpDetectorAprilTag() {}
1076#endif
Type * data
Address of the first element of the data array.
Definition: vpArray2D.h:145
Generic class defining intrinsic camera parameters.
Class to define RGB colors available for display functionnalities.
Definition: vpColor.h:158
static const vpColor red
Definition: vpColor.h:217
static const vpColor none
Definition: vpColor.h:229
static const vpColor blue
Definition: vpColor.h:223
static const vpColor yellow
Definition: vpColor.h:225
static const vpColor green
Definition: vpColor.h:220
void setZAlignedWithCameraAxis(bool zAlignedWithCameraFrame)
std::vector< std::vector< vpImagePoint > > getTagsCorners() const
void setAprilTagQuadDecimate(float quadDecimate)
friend void swap(vpDetectorAprilTag &o1, vpDetectorAprilTag &o2)
vpDetectorAprilTag & operator=(vpDetectorAprilTag o)
std::vector< std::vector< vpPoint > > getTagsPoints3D(const std::vector< int > &tagsId, const std::map< int, double > &tagsSize) const
bool getPose(size_t tagIndex, double tagSize, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, vpHomogeneousMatrix *cMo2=NULL, double *projError=NULL, double *projError2=NULL)
unsigned int m_displayTagThickness
void setAprilTagRefineDecode(bool refineDecode)
vpAprilTagFamily m_tagFamily
void setAprilTagRefineEdges(bool refineEdges)
vpPoseEstimationMethod m_poseEstimationMethod
@ TAG_CIRCLE21h7
AprilTag Circle21h7 pattern.
@ TAG_25h7
DEPRECATED AND POOR DETECTION PERFORMANCE.
@ TAG_36ARTOOLKIT
DEPRECATED AND WILL NOT DETECT ARTOOLKIT TAGS.
@ TAG_25h9
AprilTag 25h9 pattern.
@ TAG_CUSTOM48h12
AprilTag Custom48h12 pattern.
@ TAG_36h11
AprilTag 36h11 pattern (recommended)
@ TAG_STANDARD52h13
AprilTag Standard52h13 pattern.
@ TAG_16h5
AprilTag 16h5 pattern.
@ TAG_STANDARD41h12
AprilTag Standard41h12 pattern.
@ TAG_CIRCLE49h12
AprilTag Circle49h12 pattern.
void setAprilTagQuadSigma(float quadSigma)
void setAprilTagNbThreads(int nThreads)
bool detect(const vpImage< unsigned char > &I)
vpDetectorAprilTag(const vpAprilTagFamily &tagFamily=TAG_36h11, const vpPoseEstimationMethod &poseEstimationMethod=HOMOGRAPHY_VIRTUAL_VS)
void setAprilTagPoseEstimationMethod(const vpPoseEstimationMethod &poseEstimationMethod)
std::vector< int > getTagsId() const
void setAprilTagFamily(const vpAprilTagFamily &tagFamily)
void setAprilTagRefinePose(bool refinePose)
void setAprilTagDecodeSharpening(double decodeSharpening)
std::vector< std::string > m_message
Message attached to each object.
std::vector< std::vector< vpImagePoint > > m_polygon
For each object, defines the polygon that contains the object.
size_t m_nb_objects
Number of detected objects.
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
error that can be emited by ViSP classes.
Definition: vpException.h:72
@ fatalError
Fatal error.
Definition: vpException.h:96
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpRotationMatrix getRotationMatrix() const
vpTranslationVector getTranslationVector() const
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:88
void set_uv(double u, double v)
Definition: vpImagePoint.h:247
unsigned int getWidth() const
Definition: vpImage.h:246
Type * bitmap
points toward the bitmap
Definition: vpImage.h:143
unsigned int getHeight() const
Definition: vpImage.h:188
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition: vpPoint.h:82
void set_x(double x)
Set the point x coordinate in the image plane.
Definition: vpPoint.cpp:511
void setWorldCoordinates(double oX, double oY, double oZ)
Definition: vpPoint.cpp:113
void set_y(double y)
Set the point y coordinate in the image plane.
Definition: vpPoint.cpp:513
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Definition: vpPose.h:81
@ DEMENTHON
Definition: vpPose.h:86
@ VIRTUAL_VS
Definition: vpPose.h:95
@ LAGRANGE
Definition: vpPose.h:85
void addPoints(const std::vector< vpPoint > &lP)
Definition: vpPose.cpp:164
double computeResidual(const vpHomogeneousMatrix &cMo) const
Compute and return the sum of squared residuals expressed in meter^2 for the pose matrix cMo.
Definition: vpPose.cpp:336
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=NULL)
Definition: vpPose.cpp:374