1 /****************************************************************************
2  *
3  * ViSP, open source Visual Servoing Platform software.
4  * Copyright (C) 2005 - 2019 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  * See the file LICENSE.txt at the root directory of this source
11  * distribution for additional information about the GNU GPL.
12  *
13  * For using ViSP with software that can not be combined with the GNU
14  * GPL, please contact Inria about acquiring a ViSP Professional
15  * Edition License.
16  *
17  * See http://visp.inria.fr for more information.
18  *
19  * This software was developed at:
20  * Inria Rennes - Bretagne Atlantique
21  * Campus Universitaire de Beaulieu
22  * 35042 Rennes Cedex
23  * France
24  *
25  * If you have questions regarding the use of this file, please contact
26  * Inria at visp@inria.fr
27  *
28  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30  *
31  * Description:
32  * Defines the simplest robot : a free flying camera.
33  *
34  * Authors:
35  * Eric Marchand
36  *
37  *****************************************************************************/
38 
39 /*!
40   \file vpRobotCamera.cpp
41   \brief class that defines the simplest robot : a free flying camera
42 */
43 
44 #include <visp3/robot/vpRobotCamera.h>
45 
46 #if defined(VISP_BUILD_DEPRECATED_FUNCTIONS)
47 
48 #include <visp3/core/vpDebug.h>
49 #include <visp3/core/vpExponentialMap.h>
50 #include <visp3/core/vpHomogeneousMatrix.h>
51 #include <visp3/robot/vpRobotException.h>
52 
53 /*!
54   Constructor.
55 
56   Initialise the robot by a call to init().
57 
58   Sampling time is set to 40 ms. To change it you should call
59   setSamplingTime().
60 
61   Robot jacobian expressed in the end-effector frame \f$ {^e}{\bf J}_e \f$
62   is set to identity (see get_eJe()).
63 
64   \code
65   vpRobotCamera robot;
66 
67   robot.setSamplingTime(0.020); // Set the sampling time to 20 ms.
68 
69   \endcode
70 
71 */
vpRobotCamera()72 vpRobotCamera::vpRobotCamera() : cMw_() { init(); }
73 
74 /*!
75   Robot initialisation.
76 
77   Robot jacobian expressed in the end-effector frame \f$ {^e}{\bf J}_e \f$
78   is set to identity (see get_eJe()).
79 
80 */
init()81 void vpRobotCamera::init()
82 {
83   nDof = 6;
84   eJe.eye(6, 6);
85   eJeAvailable = true;
86   fJeAvailable = false;
87   areJointLimitsAvailable = false;
88   qmin = NULL;
89   qmax = NULL;
90 
91   setMaxTranslationVelocity(1.);           // vx, vy and vz max set to 1 m/s
92   setMaxRotationVelocity(vpMath::rad(90)); // wx, wy and wz max set to 90 deg/s
93 }
94 
95 /*!
96   Destructor.
97 
98 */
~vpRobotCamera()99 vpRobotCamera::~vpRobotCamera() {}
100 
101 /*!
102 
103   Get the twist transformation from camera frame to end-effector
104   frame.  This transformation allows to compute a velocity expressed
105   in the end-effector frame into the camera frame.
106 
107   \param cVe : Twist transformation. Here this transformation is equal to
108   identity since camera frame and end-effector frame are at the same location.
109 
110 */
get_cVe(vpVelocityTwistMatrix & cVe) const111 void vpRobotCamera::get_cVe(vpVelocityTwistMatrix &cVe) const
112 {
113   vpVelocityTwistMatrix cVe_;
114   cVe = cVe_;
115 }
116 
117 /*!
118   Get the robot jacobian expressed in the end-effector frame.
119   For that simple robot the Jacobian is the identity.
120 
121   \param eJe_ : A 6 by 6 matrix representing the robot jacobian \f$ {^e}{\bf
122   J}_e\f$ expressed in the end-effector frame.
123 */
get_eJe(vpMatrix & eJe_)124 void vpRobotCamera::get_eJe(vpMatrix &eJe_) { eJe_ = this->eJe; }
125 
126 /*!
127   Send to the controller a velocity.
128 
129   \param frame : Control frame type. Only articular (vpRobot::ARTICULAR_FRAME)
130   and camera frame (vpRobot::CAMERA_FRAME) are implemented.
131 
132   \param v : Velocity twist to apply to the robot.
133 
134   - In the camera frame, this velocity is represented by a twist vector of
135   dimension 6 \f$ {\bf v} = [v_x v_y v_z w_x w_y w_z]^t \f$ where \f$ v_x,
136   v_y, v_z  \f$ are the translation velocities in m/s and \f$ w_x, w_y, w_z
137   \f$ the rotation velocities in rad/s applied in the camera frame.
138 
139   - In articular, the behavior is the same as in camera frame.
140 
141   Internally, the exponential map (vpExponentialMap) is used to update the
142   camera position from its velocity after applying the velocity during a
143   sampling time. This sampling time can be set using setSamplingTime().
144 
145   \sa setSamplingTime()
146 
147 */
setVelocity(const vpRobot::vpControlFrameType frame,const vpColVector & v)148 void vpRobotCamera::setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &v)
149 {
150   switch (frame) {
151   case vpRobot::ARTICULAR_FRAME:
152   case vpRobot::CAMERA_FRAME: {
153     if (vpRobot::STATE_VELOCITY_CONTROL != getRobotState()) {
154       setRobotState(vpRobot::STATE_VELOCITY_CONTROL);
155     }
156 
157     vpColVector v_max(6);
158 
159     for (unsigned int i = 0; i < 3; i++)
160       v_max[i] = getMaxTranslationVelocity();
161     for (unsigned int i = 3; i < 6; i++)
162       v_max[i] = getMaxRotationVelocity();
163 
164     vpColVector v_sat = vpRobot::saturateVelocities(v, v_max, true);
165 
166     this->cMw_ = vpExponentialMap::direct(v_sat, delta_t_).inverse() * this->cMw_;
167     break;
168   }
169   case vpRobot::REFERENCE_FRAME:
170     throw vpRobotException(vpRobotException::wrongStateError, "Cannot set a velocity in the reference frame:"
171                                                               "functionality not implemented");
172     break;
173   case vpRobot::MIXT_FRAME:
174     throw vpRobotException(vpRobotException::wrongStateError, "Cannot set a velocity in the mixt frame:"
175                                                               "functionality not implemented");
176 
177     break;
178   case vpRobot::END_EFFECTOR_FRAME:
179     throw vpRobotException(vpRobotException::wrongStateError, "Cannot set a velocity in the end-effector frame:"
180                                                               "functionality not implemented");
181     break;
182   }
183 }
184 
185 /*!
186   Get the robot position as the transformation from camera frame to world
187   frame.
188 */
getPosition(vpHomogeneousMatrix & cMw) const189 void vpRobotCamera::getPosition(vpHomogeneousMatrix &cMw) const { cMw = this->cMw_; }
190 
191 /*
192   Get the current position of the robot.
193 
194   \param frame : Control frame type in which to get the position, either :
195   - in the camera cartesien frame,
196   - joint (articular) coordinates of each axes
197   - in a reference or fixed cartesien frame attached to the robot base
198   - in a mixt cartesien frame (translation in reference frame, and rotation in
199   camera frame)
200 
201   \param position : Measured position of the robot:
202   - in camera cartesien frame, a 6 dimension vector, set to 0.
203 
204   - in articular, a 6 dimension vector corresponding to the articular
205   position of each dof, first the 3 translations, then the 3
206   articular rotation positions represented by a vpRxyzVector.
207 
208   - in reference frame, a 6 dimension vector, the first 3 values correspond to
209   the translation tx, ty, tz in meters (like a vpTranslationVector), and the
210   last 3 values to the rx, ry, rz rotation (like a vpRxyzVector).
211 */
getPosition(const vpRobot::vpControlFrameType frame,vpColVector & q)212 void vpRobotCamera::getPosition(const vpRobot::vpControlFrameType frame, vpColVector &q)
213 {
214   q.resize(6);
215 
216   switch (frame) {
217   case vpRobot::CAMERA_FRAME:
218     q = 0;
219     break;
220 
221   case vpRobot::ARTICULAR_FRAME:
222   case vpRobot::REFERENCE_FRAME: {
223     // Convert wMc_ to a position
224     // From fMc extract the pose
225     vpRotationMatrix cRw;
226     this->cMw_.extract(cRw);
227     vpRxyzVector rxyz;
228     rxyz.buildFrom(cRw);
229 
230     for (unsigned int i = 0; i < 3; i++) {
231       q[i] = this->cMw_[i][3]; // translation x,y,z
232       q[i + 3] = rxyz[i];      // Euler rotation x,y,z
233     }
234 
235     break;
236   }
237   case vpRobot::MIXT_FRAME:
238     std::cout << "MIXT_FRAME is not implemented in vpSimulatorCamera::getPosition()" << std::endl;
239     break;
240   case vpRobot::END_EFFECTOR_FRAME:
241     std::cout << "END_EFFECTOR_FRAME is not implemented in vpSimulatorCamera::getPosition()" << std::endl;
242     break;
243   }
244 }
245 
246 /*!
247   Set the robot position as the transformation from camera frame to world
248   frame.
249 */
setPosition(const vpHomogeneousMatrix & cMw)250 void vpRobotCamera::setPosition(const vpHomogeneousMatrix &cMw)
251 {
252   if (vpRobot::STATE_POSITION_CONTROL != getRobotState()) {
253     setRobotState(vpRobot::STATE_POSITION_CONTROL);
254   }
255 
256   this->cMw_ = cMw;
257 }
258 
259 #elif !defined(VISP_BUILD_SHARED_LIBS)
260 // Work arround to avoid warning: libvisp_robot.a(vpRobotCamera.cpp.o) has no
261 // symbols
dummy_vpRobotCamera()262 void dummy_vpRobotCamera(){};
263 #endif
264