1 /****************************************************************************
2 *
3 * ViSP, open source Visual Servoing Platform software.
4 * Copyright (C) 2005 - 2019 by Inria. All rights reserved.
5 *
6 * This software is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 * See the file LICENSE.txt at the root directory of this source
11 * distribution for additional information about the GNU GPL.
12 *
13 * For using ViSP with software that can not be combined with the GNU
14 * GPL, please contact Inria about acquiring a ViSP Professional
15 * Edition License.
16 *
17 * See http://visp.inria.fr for more information.
18 *
19 * This software was developed at:
20 * Inria Rennes - Bretagne Atlantique
21 * Campus Universitaire de Beaulieu
22 * 35042 Rennes Cedex
23 * France
24 *
25 * If you have questions regarding the use of this file, please contact
26 * Inria at visp@inria.fr
27 *
28 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30 *
31 * Description:
32 * tests the control law
33 * eye-to-hand control
34 * velocity computed in the camera frame
35 *
36 * Authors:
37 * Eric Marchand
38 *
39 *****************************************************************************/
40 /*!
41 \file servoAfma6Points2DCamVelocityEyeToHand.cpp
42
43 \brief Example of a eye-to-hand control law. We control here a real robot,
44 the Afma6 robot (cartesian robot, with 6 degrees of freedom). The robot is
45 controlled in the camera frame.
46
47 */
48
49 /*!
50 \example servoAfma6Points2DCamVelocityEyeToHand.cpp
51
52 Example of a eye-to-hand control law. We control here a real robot, the
53 Afma6 robot (cartesian robot, with 6 degrees of freedom). The robot is
54 controlled in the camera frame.
55
56 */
57
58 #include <cmath> // std::fabs
59 #include <limits> // numeric_limits
60 #include <list>
61 #include <stdlib.h>
62 #include <visp3/core/vpConfig.h>
63 #include <visp3/core/vpDebug.h> // Debug trace
64 #if (defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_DC1394))
65
66 #define SAVE 0
67
68 #include <visp3/blob/vpDot.h>
69 #include <visp3/core/vpDisplay.h>
70 #include <visp3/core/vpException.h>
71 #include <visp3/core/vpHomogeneousMatrix.h>
72 #include <visp3/core/vpImage.h>
73 #include <visp3/core/vpImagePoint.h>
74 #include <visp3/core/vpMath.h>
75 #include <visp3/core/vpPoint.h>
76 #include <visp3/gui/vpDisplayGTK.h>
77 #include <visp3/gui/vpDisplayOpenCV.h>
78 #include <visp3/gui/vpDisplayX.h>
79 #include <visp3/io/vpImageIo.h>
80 #include <visp3/robot/vpRobotAfma6.h>
81 #include <visp3/sensor/vp1394TwoGrabber.h>
82 #include <visp3/vision/vpPose.h>
83 #include <visp3/visual_features/vpFeatureBuilder.h>
84 #include <visp3/visual_features/vpFeaturePoint.h>
85 #include <visp3/vs/vpServo.h>
86 #include <visp3/vs/vpServoDisplay.h>
87
88 #define L 0.006
89 #define D 0
90
main()91 int main()
92 {
93 try {
94 vpServo task;
95
96 vpCameraParameters cam;
97 vpImage<unsigned char> I;
98 int i;
99
100 vp1394TwoGrabber g;
101 g.setVideoMode(vp1394TwoGrabber::vpVIDEO_MODE_640x480_MONO8);
102 g.setFramerate(vp1394TwoGrabber::vpFRAMERATE_60);
103 g.open(I);
104
105 g.acquire(I);
106
107 #ifdef VISP_HAVE_X11
108 vpDisplayX display(I, 100, 100, "Current image");
109 #elif defined(VISP_HAVE_OPENCV)
110 vpDisplayOpenCV display(I, 100, 100, "Current image");
111 #elif defined(VISP_HAVE_GTK)
112 vpDisplayGTK display(I, 100, 100, "Current image");
113 #endif
114
115 vpDisplay::display(I);
116 vpDisplay::flush(I);
117
118 std::cout << std::endl;
119 std::cout << "-------------------------------------------------------" << std::endl;
120 std::cout << " Test program for vpServo " << std::endl;
121 std::cout << " Eye-to-hand task control" << std::endl;
122 std::cout << " Simulation " << std::endl;
123 std::cout << " task : servo a point " << std::endl;
124 std::cout << "-------------------------------------------------------" << std::endl;
125 std::cout << std::endl;
126
127 int nbPoint = 7;
128
129 vpDot dot[nbPoint];
130 vpImagePoint cog;
131
132 for (i = 0; i < nbPoint; i++) {
133 dot[i].initTracking(I);
134 dot[i].setGraphics(true);
135 dot[i].track(I);
136 vpDisplay::flush(I);
137 dot[i].setGraphics(false);
138 }
139
140 // Compute the pose 3D model
141 vpPoint point[nbPoint];
142 point[0].setWorldCoordinates(-2 * L, D, -3 * L);
143 point[1].setWorldCoordinates(0, D, -3 * L);
144 point[2].setWorldCoordinates(2 * L, D, -3 * L);
145
146 point[3].setWorldCoordinates(-L, D, -L);
147 point[4].setWorldCoordinates(L, D, -L);
148 point[5].setWorldCoordinates(L, D, L);
149 point[6].setWorldCoordinates(-L, D, L);
150
151 vpRobotAfma6 robot;
152 // Update camera parameters
153 robot.getCameraParameters(cam, I);
154
155 vpHomogeneousMatrix cMo, cdMo;
156 vpPose pose;
157 pose.clearPoint();
158 for (i = 0; i < nbPoint; i++) {
159 cog = dot[i].getCog();
160 double x = 0, y = 0;
161 vpPixelMeterConversion::convertPoint(cam, cog, x, y);
162 point[i].set_x(x);
163 point[i].set_y(y);
164 pose.addPoint(point[i]);
165 }
166
167 // compute the initial pose using Dementhon method followed by a non
168 // linear minimisation method
169 pose.computePose(vpPose::DEMENTHON_LOWE, cMo);
170
171 std::cout << cMo << std::endl;
172 cMo.print();
173
174 /*------------------------------------------------------------------
175 -- Learning the desired position
176 -- or reading the desired position
177 ------------------------------------------------------------------
178 */
179 std::cout << " Learning 0/1 " << std::endl;
180 char name[FILENAME_MAX];
181 sprintf(name, "cdMo.dat");
182 int learning;
183 std::cin >> learning;
184 if (learning == 1) {
185 // save the object position
186 vpTRACE("Save the location of the object in a file cdMo.dat");
187 std::ofstream f(name);
188 cMo.save(f);
189 f.close();
190 exit(1);
191 }
192
193 {
194 vpTRACE("Loading desired location from cdMo.dat");
195 std::ifstream f("cdMo.dat");
196 cdMo.load(f);
197 f.close();
198 }
199
200 vpFeaturePoint p[nbPoint], pd[nbPoint];
201
202 // set the desired position of the point by forward projection using
203 // the pose cdMo
204 for (i = 0; i < nbPoint; i++) {
205 vpColVector cP, p;
206 point[i].changeFrame(cdMo, cP);
207 point[i].projection(cP, p);
208
209 pd[i].set_x(p[0]);
210 pd[i].set_y(p[1]);
211 }
212
213 //------------------------------------------------------------------
214
215 vpTRACE("define the task");
216 vpTRACE("\t we want an eye-in-hand control law");
217 vpTRACE("\t robot is controlled in the camera frame");
218 task.setServo(vpServo::EYETOHAND_L_cVe_eJe);
219 task.setInteractionMatrixType(vpServo::CURRENT);
220
221 for (i = 0; i < nbPoint; i++) {
222 task.addFeature(p[i], pd[i]);
223 }
224
225 vpTRACE("Display task information ");
226 task.print();
227
228 //------------------------------------------------------------------
229
230 double convergence_threshold = 0.00; // 025 ;
231 vpDisplay::getClick(I);
232
233 //-------------------------------------------------------------
234 double error = 1;
235 unsigned int iter = 0;
236 vpTRACE("\t loop");
237 robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL);
238 vpColVector v; // computed robot velocity
239
240 // position of the object in the effector frame
241 vpHomogeneousMatrix oMcamrobot;
242 oMcamrobot[0][3] = -0.05;
243
244 vpImage<vpRGBa> Ic;
245 int it = 0;
246
247 double lambda_av = 0.1;
248 double alpha = 1; // 1 ;
249 double beta = 3; // 3 ;
250
251 std::cout << "alpha 0.7" << std::endl;
252 std::cin >> alpha;
253 std::cout << "beta 5" << std::endl;
254 std::cin >> beta;
255 std::list<vpImagePoint> Lcog;
256 vpImagePoint ip;
257 while (error > convergence_threshold) {
258 std::cout << "---------------------------------------------" << iter++ << std::endl;
259
260 g.acquire(I);
261 vpDisplay::display(I);
262 ip.set_i(265);
263 ip.set_j(150);
264 vpDisplay::displayText(I, ip, "Eye-To-Hand Visual Servoing", vpColor::green);
265 ip.set_i(280);
266 ip.set_j(150);
267 vpDisplay::displayText(I, ip, "IRISA-INRIA Rennes, Lagadic project", vpColor::green);
268 try {
269 for (i = 0; i < nbPoint; i++) {
270 dot[i].track(I);
271 Lcog.push_back(dot[i].getCog());
272 }
273 } catch (...) {
274 vpTRACE("Error detected while tracking visual features");
275 robot.stopMotion();
276 exit(1);
277 }
278
279 // compute the initial pose using a non linear minimisation method
280 pose.clearPoint();
281
282 for (i = 0; i < nbPoint; i++) {
283 double x = 0, y = 0;
284 cog = dot[i].getCog();
285 vpPixelMeterConversion::convertPoint(cam, cog, x, y);
286 point[i].set_x(x);
287 point[i].set_y(y);
288
289 vpColVector cP;
290 point[i].changeFrame(cdMo, cP);
291
292 p[i].set_x(x);
293 p[i].set_y(y);
294 p[i].set_Z(cP[2]);
295
296 pose.addPoint(point[i]);
297
298 point[i].display(I, cMo, cam, vpColor::green);
299 point[i].display(I, cdMo, cam, vpColor::blue);
300 }
301 pose.computePose(vpPose::LOWE, cMo);
302 vpDisplay::flush(I);
303
304 //! set up the Jacobian
305 vpHomogeneousMatrix cMe, camrobotMe;
306 robot.get_cMe(camrobotMe);
307 cMe = cMo * oMcamrobot * camrobotMe;
308
309 task.set_cVe(cMe);
310
311 vpMatrix eJe;
312 robot.get_eJe(eJe);
313 task.set_eJe(eJe);
314
315 // Compute the adaptative gain (speed up the convergence)
316 double gain;
317 if (iter > 2) {
318 if (std::fabs(alpha) <= std::numeric_limits<double>::epsilon())
319 gain = lambda_av;
320 else {
321 gain = alpha * exp(-beta * (task.getError()).sumSquare()) + lambda_av;
322 }
323 } else
324 gain = lambda_av;
325 if (SAVE == 1)
326 gain = gain / 5;
327
328 vpTRACE("%f %f %f %f %f", alpha, beta, lambda_av, (task.getError()).sumSquare(), gain);
329 task.setLambda(gain);
330
331 v = task.computeControlLaw();
332
333 // display points trajectory
334 for (std::list<vpImagePoint>::const_iterator it_cog = Lcog.begin(); it_cog != Lcog.end(); ++it_cog) {
335 vpDisplay::displayPoint(I, *it_cog, vpColor::red);
336 }
337 vpServoDisplay::display(task, cam, I);
338 robot.setVelocity(vpRobot::ARTICULAR_FRAME, v);
339
340 error = (task.getError()).sumSquare();
341 std::cout << "|| s - s* || = " << error << std::endl;
342
343 if (error > 7) {
344 vpTRACE("Error detected while tracking visual features");
345 robot.stopMotion();
346 exit(1);
347 }
348
349 // display the pose
350 // pose.display(I,cMo,cam, 0.04, vpColor::red) ;
351 // display the pose
352 // pose.display(I,cdMo,cam, 0.04, vpColor::blue) ;
353 if ((SAVE == 1) && (iter % 3 == 0)) {
354
355 vpDisplay::getImage(I, Ic);
356 sprintf(name, "/tmp/marchand/image.%04d.ppm", it++);
357 vpImageIo::write(Ic, name);
358 }
359 }
360 v = 0;
361 robot.setVelocity(vpRobot::CAMERA_FRAME, v);
362 vpDisplay::getClick(I);
363 return EXIT_SUCCESS;
364 }
365 catch (const vpException &e) {
366 std::cout << "Test failed with exception: " << e << std::endl;
367 return EXIT_FAILURE;
368 }
369 }
370
371 #else
main()372 int main()
373 {
374 std::cout << "You do not have an afma6 robot connected to your computer..." << std::endl;
375 return EXIT_SUCCESS;
376 }
377
378 #endif
379