Visual Servoing Platform version 3.5.0
servoSimuPoint2DhalfCamVelocity2.cpp
1/****************************************************************************
2 *
3 * ViSP, open source Visual Servoing Platform software.
4 * Copyright (C) 2005 - 2019 by Inria. All rights reserved.
5 *
6 * This software is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 * See the file LICENSE.txt at the root directory of this source
11 * distribution for additional information about the GNU GPL.
12 *
13 * For using ViSP with software that can not be combined with the GNU
14 * GPL, please contact Inria about acquiring a ViSP Professional
15 * Edition License.
16 *
17 * See http://visp.inria.fr for more information.
18 *
19 * This software was developed at:
20 * Inria Rennes - Bretagne Atlantique
21 * Campus Universitaire de Beaulieu
22 * 35042 Rennes Cedex
23 * France
24 *
25 * If you have questions regarding the use of this file, please contact
26 * Inria at visp@inria.fr
27 *
28 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30 *
31 * Description:
32 * Simulation of a 2 1/2 D visual servoing using theta U visual features.
33 *
34 * Authors:
35 * Eric Marchand
36 * Fabien Spindler
37 *
38 *****************************************************************************/
39
49#include <stdio.h>
50#include <stdlib.h>
51
52#include <visp3/core/vpHomogeneousMatrix.h>
53#include <visp3/core/vpMath.h>
54#include <visp3/core/vpPoint.h>
55#include <visp3/io/vpParseArgv.h>
56#include <visp3/robot/vpSimulatorCamera.h>
57#include <visp3/visual_features/vpFeatureBuilder.h>
58#include <visp3/visual_features/vpFeaturePoint.h>
59#include <visp3/visual_features/vpFeatureThetaU.h>
60#include <visp3/visual_features/vpGenericFeature.h>
61#include <visp3/vs/vpServo.h>
62
63// List of allowed command line options
64#define GETOPTARGS "h"
65
66void usage(const char *name, const char *badparam);
67bool getOptions(int argc, const char **argv);
68
77void usage(const char *name, const char *badparam)
78{
79 fprintf(stdout, "\n\
80Simulation of a 2 1/2 D visual servoing (x,y,log Z, theta U):\n\
81- eye-in-hand control law,\n\
82- velocity computed in the camera frame,\n\
83- without display.\n\
84 \n\
85SYNOPSIS\n\
86 %s [-h]\n", name);
87
88 fprintf(stdout, "\n\
89OPTIONS: Default\n\
90 \n\
91 -h\n\
92 Print the help.\n");
93
94 if (badparam)
95 fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
96}
97
107bool getOptions(int argc, const char **argv)
108{
109 const char *optarg_;
110 int c;
111 while ((c = vpParseArgv::parse(argc, argv, GETOPTARGS, &optarg_)) > 1) {
112
113 switch (c) {
114 case 'h':
115 usage(argv[0], NULL);
116 return false;
117
118 default:
119 usage(argv[0], optarg_);
120 return false;
121 }
122 }
123
124 if ((c == 1) || (c == -1)) {
125 // standalone param or error
126 usage(argv[0], NULL);
127 std::cerr << "ERROR: " << std::endl;
128 std::cerr << " Bad argument " << optarg_ << std::endl << std::endl;
129 return false;
130 }
131
132 return true;
133}
134
135int main(int argc, const char **argv)
136{
137#if (defined(VISP_HAVE_LAPACK) || defined(VISP_HAVE_EIGEN3) || defined(VISP_HAVE_OPENCV))
138 try {
139 // Read the command line options
140 if (getOptions(argc, argv) == false) {
141 exit(-1);
142 }
143
144 std::cout << std::endl;
145 std::cout << "-------------------------------------------------------" << std::endl;
146 std::cout << " simulation of a 2 1/2 D visual servoing " << std::endl;
147 std::cout << "-------------------------------------------------------" << std::endl;
148 std::cout << std::endl;
149
150 // In this example we will simulate a visual servoing task.
151 // In simulation, we have to define the scene frane Ro and the
152 // camera frame Rc.
153 // The camera location is given by an homogenous matrix cMo that
154 // describes the position of the scene or object frame in the camera frame.
155
156 vpServo task;
157
158 // sets the initial camera location
159 // we give the camera location as a size 6 vector (3 translations in meter
160 // and 3 rotation (theta U representation)
161 vpPoseVector c_r_o(0.1, 0.2, 2, vpMath::rad(20), vpMath::rad(10), vpMath::rad(50));
162
163 // this pose vector is then transformed in a 4x4 homogeneous matrix
164 vpHomogeneousMatrix cMo(c_r_o);
165
166 // We define a robot
167 // The vpSimulatorCamera implements a simple moving that is juste defined
168 // by its location cMo
169 vpSimulatorCamera robot;
170
171 // Compute the position of the object in the world frame
172 vpHomogeneousMatrix wMc, wMo;
173 robot.getPosition(wMc);
174 wMo = wMc * cMo;
175
176 // Now that the current camera position has been defined,
177 // let us defined the defined camera location.
178 // It is defined by cdMo
179 // sets the desired camera location
180 vpPoseVector cd_r_o(0, 0, 1, vpMath::rad(0), vpMath::rad(0), vpMath::rad(0));
181 vpHomogeneousMatrix cdMo(cd_r_o);
182
183 //----------------------------------------------------------------------
184 // A 2 1/2 D visual servoing can be defined by
185 // - the position of a point x,y
186 // - the difference between this point depth and a desire depth
187 // modeled by log Z/Zd to be regulated to 0
188 // - the rotation that the camera has to realized cdMc
189
190 // Let us now defined the current value of these features
191
192 // since we simulate we have to define a 3D point that will
193 // forward-projected to define the current position x,y of the
194 // reference point
195
196 //------------------------------------------------------------------
197 // First feature (x,y)
198 //------------------------------------------------------------------
199 // Let oP be this ... point,
200 // a vpPoint class has three main member
201 // .oP : 3D coordinates in scene frame
202 // .cP : 3D coordinates in camera frame
203 // .p : 2D
204
205 //------------------------------------------------------------------
206 // sets the point coordinates in the world frame
207 vpPoint point(0, 0, 0);
208 // computes the point coordinates in the camera frame and its
209 // 2D coordinates cP and then p
210 // computes the point coordinates in the camera frame and its 2D
211 // coordinates" ) ;
212 point.track(cMo);
213
214 // We also defined (again by forward projection) the desired position
215 // of this point according to the desired camera position
216 vpPoint pointd(0, 0, 0);
217 pointd.track(cdMo);
218
219 // Nevertheless, a vpPoint is not a feature, this is just a "tracker"
220 // from which the feature are built
221 // a feature is juste defined by a vector s, a way to compute the
222 // interaction matrix and the error, and if required a (or a vector of)
223 // 3D information
224
225 // for a point (x,y) Visp implements the vpFeaturePoint class.
226 // we no defined a feature for x,y (and for (x*,y*))
227 vpFeaturePoint p, pd;
228
229 // and we initialized the vector s=(x,y) of p from the tracker P
230 // Z coordinates in p is also initialized, it will be used to compute
231 // the interaction matrix
232 vpFeatureBuilder::create(p, point);
233 vpFeatureBuilder::create(pd, pointd);
234
235 //------------------------------------------------------------------
236 // Second feature log (Z/Zd)
237 // not necessary to project twice (reuse p)
238
239 // This case in intersting since this visual feature has not
240 // been predefined in VisP
241 // In such case we have a generic feature class vpGenericFeature
242 // We will have to defined
243 // the vector s : .set_s(...)
244 // the interaction matrix Ls : .setInteractionMatrix(...)
245
246 // log(Z/Zd) is then a size 1 vector logZ
247 vpGenericFeature logZ(1);
248 // initialized to s = log(Z/Zd)
249 // Let us note that here we use the point P and Pd, it's not necessary
250 // to forward project twice (it's already done)
251 logZ.set_s(log(point.get_Z() / pointd.get_Z()));
252
253 // This visual has to be regulated to zero
254
255 //------------------------------------------------------------------
256 // 3rd feature ThetaU
257 // The thetaU feature is defined, tu represents the rotation that the
258 // camera has to realized. the complete displacement is then defined by:
259 //------------------------------------------------------------------
261 // compute the rotation that the camera has to achieve
262 cdMc = cdMo * cMo.inverse();
263
264 // from this displacement, we extract the rotation cdRc represented by
265 // the angle theta and the rotation axis u
267 tu.buildFrom(cdMc);
268 // This visual has to be regulated to zero
269
270 // sets the desired rotation (always zero !)
271 // since s is the rotation that the camera has to realize
272
273 //------------------------------------------------------------------
274 // Let us now the task itself
275 //------------------------------------------------------------------
276
277 // define the task
278 // - we want an eye-in-hand control law
279 // - robot is controlled in the camera frame
280 // we choose to control the robot in the camera frame
282 // Interaction matrix is computed with the current value of s
284
285 // we build the task by "stacking" the visual feature
286 // previously defined
287 task.addFeature(p, pd);
288 task.addFeature(logZ);
289 task.addFeature(tu);
290 // addFeature(X,Xd) means X should be regulated to Xd
291 // addFeature(X) means that X should be regulated to 0
292 // some features such as vpFeatureThetaU MUST be regulated to zero
293 // (otherwise, it will results in an error at exectution level)
294
295 // set the gain
296 task.setLambda(1);
297
298 // Display task information
299 task.print();
300 //------------------------------------------------------------------
301 // An now the closed loop
302
303 unsigned int iter = 0;
304 // loop
305 while (iter++ < 200) {
306 std::cout << "---------------------------------------------" << iter << std::endl;
307 vpColVector v;
308
309 // get the robot position
310 robot.getPosition(wMc);
311 // Compute the position of the object frame in the camera frame
312 cMo = wMc.inverse() * wMo;
313
314 // update the feature
315 point.track(cMo);
316 vpFeatureBuilder::create(p, point);
317
318 cdMc = cdMo * cMo.inverse();
319 tu.buildFrom(cdMc);
320
321 // there is no feature for logZ, we explicitely build
322 // the related interaction matrix") ;
323 logZ.set_s(log(point.get_Z() / pointd.get_Z()));
324 vpMatrix LlogZ(1, 6);
325 LlogZ[0][0] = LlogZ[0][1] = LlogZ[0][5] = 0;
326 LlogZ[0][2] = -1 / p.get_Z();
327 LlogZ[0][3] = -p.get_y();
328 LlogZ[0][4] = p.get_x();
329
330 logZ.setInteractionMatrix(LlogZ);
331
332 // compute the control law
333 v = task.computeControlLaw();
334
335 // send the camera velocity to the controller ") ;
337
338 std::cout << "|| s - s* || = " << (task.getError()).sumSquare() << std::endl;
339 }
340
341 // Display task information
342 task.print();
343 // Final camera location
344 std::cout << cMo << std::endl;
345 return EXIT_SUCCESS;
346 } catch (const vpException &e) {
347 std::cout << "Catch a ViSP exception: " << e << std::endl;
348 return EXIT_FAILURE;
349 }
350#else
351 (void)argc;
352 (void)argv;
353 std::cout << "Cannot run this example: install Lapack, Eigen3 or OpenCV" << std::endl;
354 return EXIT_SUCCESS;
355#endif
356}
Implementation of column vector and the associated operations.
Definition: vpColVector.h:131
error that can be emited by ViSP classes.
Definition: vpException.h:72
static void create(vpFeaturePoint &s, const vpCameraParameters &cam, const vpDot &d)
Class that defines a 2D point visual feature which is composed by two parameters that are the cartes...
double get_y() const
double get_x() const
double get_Z() const
Class that defines a 3D visual feature from a axis/angle parametrization that represent the rotatio...
Class that enables to define a feature or a set of features which are not implemented in ViSP as a sp...
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
void buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R)
static double rad(double deg)
Definition: vpMath.h:110
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:154
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
Definition: vpParseArgv.cpp:69
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition: vpPoint.h:82
Implementation of a pose vector and operations on poses.
Definition: vpPoseVector.h:152
void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel)
@ CAMERA_FRAME
Definition: vpRobot.h:82
void setInteractionMatrixType(const vpServoIteractionMatrixType &interactionMatrixType, const vpServoInversionType &interactionMatrixInversion=PSEUDO_INVERSE)
Definition: vpServo.cpp:567
@ EYEINHAND_CAMERA
Definition: vpServo.h:155
void print(const vpServo::vpServoPrintType display_level=ALL, std::ostream &os=std::cout)
Definition: vpServo.cpp:306
void setLambda(double c)
Definition: vpServo.h:404
void setServo(const vpServoType &servo_type)
Definition: vpServo.cpp:218
vpColVector getError() const
Definition: vpServo.h:278
vpColVector computeControlLaw()
Definition: vpServo.cpp:929
@ CURRENT
Definition: vpServo.h:182
void addFeature(vpBasicFeature &s, vpBasicFeature &s_star, unsigned int select=vpBasicFeature::FEATURE_ALL)
Definition: vpServo.cpp:490
Class that defines the simplest robot: a free flying camera.