1 /****************************************************************************
2  *
3  * ViSP, open source Visual Servoing Platform software.
4  * Copyright (C) 2005 - 2019 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  * See the file LICENSE.txt at the root directory of this source
11  * distribution for additional information about the GNU GPL.
12  *
13  * For using ViSP with software that can not be combined with the GNU
14  * GPL, please contact Inria about acquiring a ViSP Professional
15  * Edition License.
16  *
17  * See http://visp.inria.fr for more information.
18  *
19  * This software was developed at:
20  * Inria Rennes - Bretagne Atlantique
21  * Campus Universitaire de Beaulieu
22  * 35042 Rennes Cedex
23  * France
24  *
25  * If you have questions regarding the use of this file, please contact
26  * Inria at visp@inria.fr
27  *
28  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30  *
31  * Description:
32  * Generic model-based tracker.
33  *
34  *****************************************************************************/
35 
36 #include <visp3/mbt/vpMbGenericTracker.h>
37 
38 #include <visp3/core/vpDisplay.h>
39 #include <visp3/core/vpExponentialMap.h>
40 #include <visp3/core/vpTrackingException.h>
41 #include <visp3/mbt/vpMbtXmlGenericParser.h>
42 
vpMbGenericTracker()43 vpMbGenericTracker::vpMbGenericTracker()
44   : m_error(), m_L(), m_mapOfCameraTransformationMatrix(), m_mapOfFeatureFactors(), m_mapOfTrackers(),
45     m_percentageGdPt(0.4), m_referenceCameraName("Camera"), m_thresholdOutlier(0.5), m_w(), m_weightedError(),
46     m_nb_feat_edge(0), m_nb_feat_klt(0), m_nb_feat_depthNormal(0), m_nb_feat_depthDense(0)
47 {
48   m_mapOfTrackers["Camera"] = new TrackerWrapper(EDGE_TRACKER);
49 
50   // Add default camera transformation matrix
51   m_mapOfCameraTransformationMatrix["Camera"] = vpHomogeneousMatrix();
52 
53   // Add default ponderation between each feature type
54   m_mapOfFeatureFactors[EDGE_TRACKER] = 1.0;
55 
56 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
57   m_mapOfFeatureFactors[KLT_TRACKER] = 1.0;
58 #endif
59 
60   m_mapOfFeatureFactors[DEPTH_NORMAL_TRACKER] = 1.0;
61   m_mapOfFeatureFactors[DEPTH_DENSE_TRACKER] = 1.0;
62 }
63 
vpMbGenericTracker(unsigned int nbCameras,int trackerType)64 vpMbGenericTracker::vpMbGenericTracker(unsigned int nbCameras, int trackerType)
65   : m_error(), m_L(), m_mapOfCameraTransformationMatrix(), m_mapOfFeatureFactors(), m_mapOfTrackers(),
66     m_percentageGdPt(0.4), m_referenceCameraName("Camera"), m_thresholdOutlier(0.5), m_w(), m_weightedError(),
67     m_nb_feat_edge(0), m_nb_feat_klt(0), m_nb_feat_depthNormal(0), m_nb_feat_depthDense(0)
68 {
69   if (nbCameras == 0) {
70     throw vpException(vpTrackingException::fatalError, "Cannot use no camera!");
71   } else if (nbCameras == 1) {
72     m_mapOfTrackers["Camera"] = new TrackerWrapper(trackerType);
73 
74     // Add default camera transformation matrix
75     m_mapOfCameraTransformationMatrix["Camera"] = vpHomogeneousMatrix();
76   } else {
77     for (unsigned int i = 1; i <= nbCameras; i++) {
78       std::stringstream ss;
79       ss << "Camera" << i;
80       m_mapOfTrackers[ss.str()] = new TrackerWrapper(trackerType);
81 
82       // Add default camera transformation matrix
83       m_mapOfCameraTransformationMatrix[ss.str()] = vpHomogeneousMatrix();
84     }
85 
86     // Set by default the reference camera to the first one
87     m_referenceCameraName = m_mapOfTrackers.begin()->first;
88   }
89 
90   // Add default ponderation between each feature type
91   m_mapOfFeatureFactors[EDGE_TRACKER] = 1.0;
92 
93 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
94   m_mapOfFeatureFactors[KLT_TRACKER] = 1.0;
95 #endif
96 
97   m_mapOfFeatureFactors[DEPTH_NORMAL_TRACKER] = 1.0;
98   m_mapOfFeatureFactors[DEPTH_DENSE_TRACKER] = 1.0;
99 }
100 
vpMbGenericTracker(const std::vector<int> & trackerTypes)101 vpMbGenericTracker::vpMbGenericTracker(const std::vector<int> &trackerTypes)
102   : m_error(), m_L(), m_mapOfCameraTransformationMatrix(), m_mapOfFeatureFactors(), m_mapOfTrackers(),
103     m_percentageGdPt(0.4), m_referenceCameraName("Camera"), m_thresholdOutlier(0.5), m_w(), m_weightedError(),
104     m_nb_feat_edge(0), m_nb_feat_klt(0), m_nb_feat_depthNormal(0), m_nb_feat_depthDense(0)
105 {
106   if (trackerTypes.empty()) {
107     throw vpException(vpException::badValue, "There is no camera!");
108   }
109 
110   if (trackerTypes.size() == 1) {
111     m_mapOfTrackers["Camera"] = new TrackerWrapper(trackerTypes[0]);
112 
113     // Add default camera transformation matrix
114     m_mapOfCameraTransformationMatrix["Camera"] = vpHomogeneousMatrix();
115   } else {
116     for (size_t i = 1; i <= trackerTypes.size(); i++) {
117       std::stringstream ss;
118       ss << "Camera" << i;
119       m_mapOfTrackers[ss.str()] = new TrackerWrapper(trackerTypes[i - 1]);
120 
121       // Add default camera transformation matrix
122       m_mapOfCameraTransformationMatrix[ss.str()] = vpHomogeneousMatrix();
123     }
124 
125     // Set by default the reference camera to the first one
126     m_referenceCameraName = m_mapOfTrackers.begin()->first;
127   }
128 
129   // Add default ponderation between each feature type
130   m_mapOfFeatureFactors[EDGE_TRACKER] = 1.0;
131 
132 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
133   m_mapOfFeatureFactors[KLT_TRACKER] = 1.0;
134 #endif
135 
136   m_mapOfFeatureFactors[DEPTH_NORMAL_TRACKER] = 1.0;
137   m_mapOfFeatureFactors[DEPTH_DENSE_TRACKER] = 1.0;
138 }
139 
vpMbGenericTracker(const std::vector<std::string> & cameraNames,const std::vector<int> & trackerTypes)140 vpMbGenericTracker::vpMbGenericTracker(const std::vector<std::string> &cameraNames,
141                                        const std::vector<int> &trackerTypes)
142   : m_error(), m_L(), m_mapOfCameraTransformationMatrix(), m_mapOfFeatureFactors(), m_mapOfTrackers(),
143     m_percentageGdPt(0.4), m_referenceCameraName("Camera"), m_thresholdOutlier(0.5), m_w(), m_weightedError(),
144     m_nb_feat_edge(0), m_nb_feat_klt(0), m_nb_feat_depthNormal(0), m_nb_feat_depthDense(0)
145 {
146   if (cameraNames.size() != trackerTypes.size() || cameraNames.empty()) {
147     throw vpException(vpTrackingException::badValue,
148                       "cameraNames.size() != trackerTypes.size() || cameraNames.empty()");
149   }
150 
151   for (size_t i = 0; i < cameraNames.size(); i++) {
152     m_mapOfTrackers[cameraNames[i]] = new TrackerWrapper(trackerTypes[i]);
153 
154     // Add default camera transformation matrix
155     m_mapOfCameraTransformationMatrix[cameraNames[i]] = vpHomogeneousMatrix();
156   }
157 
158   // Set by default the reference camera to the first one
159   m_referenceCameraName = m_mapOfTrackers.begin()->first;
160 
161   // Add default ponderation between each feature type
162   m_mapOfFeatureFactors[EDGE_TRACKER] = 1.0;
163 
164 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
165   m_mapOfFeatureFactors[KLT_TRACKER] = 1.0;
166 #endif
167 
168   m_mapOfFeatureFactors[DEPTH_NORMAL_TRACKER] = 1.0;
169   m_mapOfFeatureFactors[DEPTH_DENSE_TRACKER] = 1.0;
170 }
171 
~vpMbGenericTracker()172 vpMbGenericTracker::~vpMbGenericTracker()
173 {
174   for (std::map<std::string, TrackerWrapper *>::iterator it = m_mapOfTrackers.begin(); it != m_mapOfTrackers.end();
175        ++it) {
176     delete it->second;
177     it->second = NULL;
178   }
179 }
180 
181 /*!
182   Compute projection error given an input image and camera pose, parameters.
183   This projection error uses locations sampled exactly where the model is projected using the camera pose
184   and intrinsic parameters.
185   You may want to use \sa setProjectionErrorComputation \sa getProjectionError
186 
187   to get a projection error computed at the ME locations after a call to track().
188   It works similarly to vpMbTracker::getProjectionError function:
189   <blockquote>
190   Get the error angle between the gradient direction of the model features projected at the resulting pose and their normal.
191   The error is expressed in degree between 0 and 90.
192   </blockquote>
193 
194   \param I : Input grayscale image.
195   \param cMo : Camera pose.
196   \param cam : Camera parameters.
197 */
computeCurrentProjectionError(const vpImage<unsigned char> & I,const vpHomogeneousMatrix & cMo,const vpCameraParameters & cam)198 double vpMbGenericTracker::computeCurrentProjectionError(const vpImage<unsigned char> &I, const vpHomogeneousMatrix &cMo,
199                                                          const vpCameraParameters &cam)
200 {
201   double rawTotalProjectionError = 0.0;
202   unsigned int nbTotalFeaturesUsed = 0;
203 
204   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
205        it != m_mapOfTrackers.end(); ++it) {
206     TrackerWrapper *tracker = it->second;
207 
208     unsigned int nbFeaturesUsed = 0;
209     double curProjError = tracker->computeProjectionErrorImpl(I, cMo, cam, nbFeaturesUsed);
210 
211     if (nbFeaturesUsed > 0) {
212       nbTotalFeaturesUsed += nbFeaturesUsed;
213       rawTotalProjectionError += curProjError;
214     }
215   }
216 
217   if (nbTotalFeaturesUsed > 0) {
218     return vpMath::deg(rawTotalProjectionError / (double)nbTotalFeaturesUsed);
219   }
220 
221   return 90.0;
222 }
223 
224 /*!
225   Compute projection error given an input image and camera pose, parameters.
226   This projection error uses locations sampled exactly where the model is projected using the camera pose
227   and intrinsic parameters.
228   You may want to use \sa setProjectionErrorComputation \sa getProjectionError
229 
230   to get a projection error computed at the ME locations after a call to track().
231   It works similarly to vpMbTracker::getProjectionError function:
232   <blockquote>
233   Get the error angle between the gradient direction of the model features projected at the resulting pose and their normal.
234   The error is expressed in degree between 0 and 90.
235   </blockquote>
236 
237   \param I_color : Input color image.
238   \param _cMo : Camera pose.
239   \param _cam : Camera parameters.
240 */
computeCurrentProjectionError(const vpImage<vpRGBa> & I_color,const vpHomogeneousMatrix & _cMo,const vpCameraParameters & _cam)241 double vpMbGenericTracker::computeCurrentProjectionError(const vpImage<vpRGBa> &I_color, const vpHomogeneousMatrix &_cMo,
242                                                          const vpCameraParameters &_cam)
243 {
244   vpImage<unsigned char> I;
245   vpImageConvert::convert(I_color, I); // FS: Shoudn't we use here m_I that was converted in track() ?
246 
247   return computeCurrentProjectionError(I, _cMo, _cam);
248 }
249 
computeProjectionError()250 void vpMbGenericTracker::computeProjectionError()
251 {
252   if (computeProjError) {
253     double rawTotalProjectionError = 0.0;
254     unsigned int nbTotalFeaturesUsed = 0;
255 
256     for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
257          it != m_mapOfTrackers.end(); ++it) {
258       TrackerWrapper *tracker = it->second;
259 
260       double curProjError = tracker->getProjectionError();
261       unsigned int nbFeaturesUsed = tracker->nbFeaturesForProjErrorComputation;
262 
263       if (nbFeaturesUsed > 0) {
264         nbTotalFeaturesUsed += nbFeaturesUsed;
265         rawTotalProjectionError += (vpMath::rad(curProjError) * nbFeaturesUsed);
266       }
267     }
268 
269     if (nbTotalFeaturesUsed > 0) {
270       projectionError = vpMath::deg(rawTotalProjectionError / (double)nbTotalFeaturesUsed);
271     } else {
272       projectionError = 90.0;
273     }
274   } else {
275     projectionError = 90.0;
276   }
277 }
278 
computeVVS(std::map<std::string,const vpImage<unsigned char> * > & mapOfImages)279 void vpMbGenericTracker::computeVVS(std::map<std::string, const vpImage<unsigned char> *> &mapOfImages)
280 {
281   computeVVSInit(mapOfImages);
282 
283   if (m_error.getRows() < 4) {
284     throw vpTrackingException(vpTrackingException::notEnoughPointError, "Error: not enough features");
285   }
286 
287   double normRes = 0;
288   double normRes_1 = -1;
289   unsigned int iter = 0;
290 
291   vpMatrix LTL;
292   vpColVector LTR, v;
293   vpColVector error_prev;
294 
295   double mu = m_initialMu;
296   vpHomogeneousMatrix cMo_prev;
297 
298   bool isoJoIdentity_ = true;
299 
300   // Covariance
301   vpColVector W_true(m_error.getRows());
302   vpMatrix L_true, LVJ_true;
303 
304   // Create the map of VelocityTwistMatrices
305   std::map<std::string, vpVelocityTwistMatrix> mapOfVelocityTwist;
306   for (std::map<std::string, vpHomogeneousMatrix>::const_iterator it = m_mapOfCameraTransformationMatrix.begin();
307        it != m_mapOfCameraTransformationMatrix.end(); ++it) {
308     vpVelocityTwistMatrix cVo;
309     cVo.buildFrom(it->second);
310     mapOfVelocityTwist[it->first] = cVo;
311   }
312 
313   double factorEdge = m_mapOfFeatureFactors[EDGE_TRACKER];
314 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
315   double factorKlt = m_mapOfFeatureFactors[KLT_TRACKER];
316 #endif
317   double factorDepth = m_mapOfFeatureFactors[DEPTH_NORMAL_TRACKER];
318   double factorDepthDense = m_mapOfFeatureFactors[DEPTH_DENSE_TRACKER];
319 
320   m_nb_feat_edge = 0;
321   m_nb_feat_klt = 0;
322   m_nb_feat_depthNormal = 0;
323   m_nb_feat_depthDense = 0;
324 
325   while (std::fabs(normRes_1 - normRes) > m_stopCriteriaEpsilon && (iter < m_maxIter)) {
326     computeVVSInteractionMatrixAndResidu(mapOfImages, mapOfVelocityTwist);
327 
328     bool reStartFromLastIncrement = false;
329     computeVVSCheckLevenbergMarquardt(iter, m_error, error_prev, cMo_prev, mu, reStartFromLastIncrement);
330     if (reStartFromLastIncrement) {
331       for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
332            it != m_mapOfTrackers.end(); ++it) {
333         TrackerWrapper *tracker = it->second;
334 
335         tracker->m_cMo = m_mapOfCameraTransformationMatrix[it->first] * cMo_prev;
336 
337 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
338         vpHomogeneousMatrix c_curr_tTc_curr0 =
339             m_mapOfCameraTransformationMatrix[it->first] * cMo_prev * tracker->c0Mo.inverse();
340         tracker->ctTc0 = c_curr_tTc_curr0;
341 #endif
342       }
343     }
344 
345     if (!reStartFromLastIncrement) {
346       computeVVSWeights();
347 
348       if (computeCovariance) {
349         L_true = m_L;
350         if (!isoJoIdentity_) {
351           vpVelocityTwistMatrix cVo;
352           cVo.buildFrom(m_cMo);
353           LVJ_true = (m_L * (cVo * oJo));
354         }
355       }
356 
357       vpVelocityTwistMatrix cVo;
358       if (iter == 0) {
359         isoJoIdentity_ = true;
360         oJo.eye();
361 
362         // If all the 6 dof should be estimated, we check if the interaction
363         // matrix is full rank. If not we remove automatically the dof that
364         // cannot be estimated This is particularly useful when consering
365         // circles (rank 5) and cylinders (rank 4)
366         if (isoJoIdentity_) {
367           cVo.buildFrom(m_cMo);
368 
369           vpMatrix K; // kernel
370           unsigned int rank = (m_L * cVo).kernel(K);
371           if (rank == 0) {
372             throw vpException(vpException::fatalError, "Rank=0, cannot estimate the pose !");
373           }
374 
375           if (rank != 6) {
376             vpMatrix I; // Identity
377             I.eye(6);
378             oJo = I - K.AtA();
379 
380             isoJoIdentity_ = false;
381           }
382         }
383       }
384 
385       // Weighting
386       double num = 0;
387       double den = 0;
388 
389       unsigned int start_index = 0;
390       for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
391            it != m_mapOfTrackers.end(); ++it) {
392         TrackerWrapper *tracker = it->second;
393 
394         if (tracker->m_trackerType & EDGE_TRACKER) {
395           for (unsigned int i = 0; i < tracker->m_error_edge.getRows(); i++) {
396             double wi = tracker->m_w_edge[i] * tracker->m_factor[i] * factorEdge;
397             W_true[start_index + i] = wi;
398             m_weightedError[start_index + i] = wi * m_error[start_index + i];
399 
400             num += wi * vpMath::sqr(m_error[start_index + i]);
401             den += wi;
402 
403             for (unsigned int j = 0; j < m_L.getCols(); j++) {
404               m_L[start_index + i][j] *= wi;
405             }
406           }
407 
408           start_index += tracker->m_error_edge.getRows();
409         }
410 
411 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
412         if (tracker->m_trackerType & KLT_TRACKER) {
413           for (unsigned int i = 0; i < tracker->m_error_klt.getRows(); i++) {
414             double wi = tracker->m_w_klt[i] * factorKlt;
415             W_true[start_index + i] = wi;
416             m_weightedError[start_index + i] = wi * m_error[start_index + i];
417 
418             num += wi * vpMath::sqr(m_error[start_index + i]);
419             den += wi;
420 
421             for (unsigned int j = 0; j < m_L.getCols(); j++) {
422               m_L[start_index + i][j] *= wi;
423             }
424           }
425 
426           start_index += tracker->m_error_klt.getRows();
427         }
428 #endif
429 
430         if (tracker->m_trackerType & DEPTH_NORMAL_TRACKER) {
431           for (unsigned int i = 0; i < tracker->m_error_depthNormal.getRows(); i++) {
432             double wi = tracker->m_w_depthNormal[i] * factorDepth;
433             W_true[start_index + i] = wi;
434             m_weightedError[start_index + i] = wi * m_error[start_index + i];
435 
436             num += wi * vpMath::sqr(m_error[start_index + i]);
437             den += wi;
438 
439             for (unsigned int j = 0; j < m_L.getCols(); j++) {
440               m_L[start_index + i][j] *= wi;
441             }
442           }
443 
444           start_index += tracker->m_error_depthNormal.getRows();
445         }
446 
447         if (tracker->m_trackerType & DEPTH_DENSE_TRACKER) {
448           for (unsigned int i = 0; i < tracker->m_error_depthDense.getRows(); i++) {
449             double wi = tracker->m_w_depthDense[i] * factorDepthDense;
450             W_true[start_index + i] = wi;
451             m_weightedError[start_index + i] = wi * m_error[start_index + i];
452 
453             num += wi * vpMath::sqr(m_error[start_index + i]);
454             den += wi;
455 
456             for (unsigned int j = 0; j < m_L.getCols(); j++) {
457               m_L[start_index + i][j] *= wi;
458             }
459           }
460 
461           start_index += tracker->m_error_depthDense.getRows();
462         }
463       }
464 
465       normRes_1 = normRes;
466       normRes = sqrt(num / den);
467 
468       computeVVSPoseEstimation(isoJoIdentity_, iter, m_L, LTL, m_weightedError, m_error, error_prev, LTR, mu, v);
469 
470       cMo_prev = m_cMo;
471 
472       m_cMo = vpExponentialMap::direct(v).inverse() * m_cMo;
473 
474 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
475       for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
476            it != m_mapOfTrackers.end(); ++it) {
477         TrackerWrapper *tracker = it->second;
478 
479         vpHomogeneousMatrix c_curr_tTc_curr0 =
480             m_mapOfCameraTransformationMatrix[it->first] * m_cMo * tracker->c0Mo.inverse();
481         tracker->ctTc0 = c_curr_tTc_curr0;
482       }
483 #endif
484 
485       // Update cMo
486       for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
487            it != m_mapOfTrackers.end(); ++it) {
488         TrackerWrapper *tracker = it->second;
489         tracker->m_cMo = m_mapOfCameraTransformationMatrix[it->first] * m_cMo;
490       }
491     }
492 
493     iter++;
494   }
495 
496   // Update features number
497   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
498        it != m_mapOfTrackers.end(); ++it) {
499     TrackerWrapper *tracker = it->second;
500     if (tracker->m_trackerType & EDGE_TRACKER) {
501       m_nb_feat_edge += tracker->m_error_edge.size();
502     }
503 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
504     if (tracker->m_trackerType & KLT_TRACKER) {
505       m_nb_feat_klt += tracker->m_error_klt.size();
506     }
507 #endif
508     if (tracker->m_trackerType & DEPTH_NORMAL_TRACKER) {
509       m_nb_feat_depthNormal += tracker->m_error_depthNormal.size();
510     }
511     if (tracker->m_trackerType & DEPTH_DENSE_TRACKER) {
512       m_nb_feat_depthDense += tracker->m_error_depthDense.size();
513     }
514   }
515 
516   computeCovarianceMatrixVVS(isoJoIdentity_, W_true, cMo_prev, L_true, LVJ_true, m_error);
517 
518   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
519        it != m_mapOfTrackers.end(); ++it) {
520     TrackerWrapper *tracker = it->second;
521 
522     if (tracker->m_trackerType & EDGE_TRACKER) {
523       tracker->updateMovingEdgeWeights();
524     }
525   }
526 }
527 
computeVVSInit()528 void vpMbGenericTracker::computeVVSInit()
529 {
530   throw vpException(vpException::fatalError, "vpMbGenericTracker::computeVVSInit() should not be called!");
531 }
532 
computeVVSInit(std::map<std::string,const vpImage<unsigned char> * > & mapOfImages)533 void vpMbGenericTracker::computeVVSInit(std::map<std::string, const vpImage<unsigned char> *> &mapOfImages)
534 {
535   unsigned int nbFeatures = 0;
536 
537   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
538        it != m_mapOfTrackers.end(); ++it) {
539     TrackerWrapper *tracker = it->second;
540     tracker->computeVVSInit(mapOfImages[it->first]);
541 
542     nbFeatures += tracker->m_error.getRows();
543   }
544 
545   m_L.resize(nbFeatures, 6, false, false);
546   m_error.resize(nbFeatures, false);
547 
548   m_weightedError.resize(nbFeatures, false);
549   m_w.resize(nbFeatures, false);
550   m_w = 1;
551 }
552 
computeVVSInteractionMatrixAndResidu()553 void vpMbGenericTracker::computeVVSInteractionMatrixAndResidu()
554 {
555   throw vpException(vpException::fatalError, "vpMbGenericTracker::"
556                                              "computeVVSInteractionMatrixAndR"
557                                              "esidu() should not be called");
558 }
559 
computeVVSInteractionMatrixAndResidu(std::map<std::string,const vpImage<unsigned char> * > & mapOfImages,std::map<std::string,vpVelocityTwistMatrix> & mapOfVelocityTwist)560 void vpMbGenericTracker::computeVVSInteractionMatrixAndResidu(
561     std::map<std::string, const vpImage<unsigned char> *> &mapOfImages,
562     std::map<std::string, vpVelocityTwistMatrix> &mapOfVelocityTwist)
563 {
564   unsigned int start_index = 0;
565 
566   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
567        it != m_mapOfTrackers.end(); ++it) {
568     TrackerWrapper *tracker = it->second;
569 
570     tracker->m_cMo = m_mapOfCameraTransformationMatrix[it->first] * m_cMo;
571 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
572     vpHomogeneousMatrix c_curr_tTc_curr0 = m_mapOfCameraTransformationMatrix[it->first] * m_cMo * tracker->c0Mo.inverse();
573     tracker->ctTc0 = c_curr_tTc_curr0;
574 #endif
575 
576     tracker->computeVVSInteractionMatrixAndResidu(mapOfImages[it->first]);
577 
578     m_L.insert(tracker->m_L * mapOfVelocityTwist[it->first], start_index, 0);
579     m_error.insert(start_index, tracker->m_error);
580 
581     start_index += tracker->m_error.getRows();
582   }
583 }
584 
computeVVSWeights()585 void vpMbGenericTracker::computeVVSWeights()
586 {
587   unsigned int start_index = 0;
588 
589   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
590        it != m_mapOfTrackers.end(); ++it) {
591     TrackerWrapper *tracker = it->second;
592     tracker->computeVVSWeights();
593 
594     m_w.insert(start_index, tracker->m_w);
595     start_index += tracker->m_w.getRows();
596   }
597 }
598 
599 /*!
600   Display the 3D model from a given position of the camera.
601 
602   \param I : The grayscale image.
603   \param cMo : Pose used to project the 3D model into the image.
604   \param cam : The camera parameters.
605   \param col : The desired color.
606   \param thickness : The thickness of the lines.
607   \param displayFullModel : If true, the full model is displayed (even the non
608   visible faces).
609 
610   \note This function will display the model only for the reference camera.
611 */
display(const vpImage<unsigned char> & I,const vpHomogeneousMatrix & cMo,const vpCameraParameters & cam,const vpColor & col,unsigned int thickness,bool displayFullModel)612 void vpMbGenericTracker::display(const vpImage<unsigned char> &I, const vpHomogeneousMatrix &cMo,
613                                  const vpCameraParameters &cam, const vpColor &col, unsigned int thickness,
614                                  bool displayFullModel)
615 {
616   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
617   if (it != m_mapOfTrackers.end()) {
618     TrackerWrapper *tracker = it->second;
619     tracker->display(I, cMo, cam, col, thickness, displayFullModel);
620   } else {
621     std::cerr << "Cannot find the reference camera: " << m_referenceCameraName << "!" << std::endl;
622   }
623 }
624 
625 /*!
626   Display the 3D model from a given position of the camera.
627 
628   \param I : The color image.
629   \param cMo : Pose used to project the 3D model into the image.
630   \param cam : The camera parameters.
631   \param col : The desired color.
632   \param thickness : The thickness of the lines.
633   \param displayFullModel : If true, the full model is displayed (even the non
634   visible faces).
635 
636   \note This function will display the model only for the reference camera.
637 */
display(const vpImage<vpRGBa> & I,const vpHomogeneousMatrix & cMo,const vpCameraParameters & cam,const vpColor & col,unsigned int thickness,bool displayFullModel)638 void vpMbGenericTracker::display(const vpImage<vpRGBa> &I, const vpHomogeneousMatrix &cMo,
639                                  const vpCameraParameters &cam, const vpColor &col, unsigned int thickness,
640                                  bool displayFullModel)
641 {
642   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
643   if (it != m_mapOfTrackers.end()) {
644     TrackerWrapper *tracker = it->second;
645     tracker->display(I, cMo, cam, col, thickness, displayFullModel);
646   } else {
647     std::cerr << "Cannot find the reference camera: " << m_referenceCameraName << "!" << std::endl;
648   }
649 }
650 
651 /*!
652   Display the 3D model from a given position of the camera.
653 
654   \param I1 : The first grayscale image.
655   \param I2 : The second grayscale image.
656   \param c1Mo : Pose used to project the 3D model into the first image.
657   \param c2Mo : Pose used to project the 3D model into the second image.
658   \param cam1 : The first camera parameters.
659   \param cam2 : The second camera parameters.
660   \param color : The desired color.
661   \param thickness : The thickness of the lines.
662   \param displayFullModel : If true, the full model is displayed (even the non
663   visible faces).
664 
665   \note This function assumes a stereo configuration of the generic tracker.
666 */
display(const vpImage<unsigned char> & I1,const vpImage<unsigned char> & I2,const vpHomogeneousMatrix & c1Mo,const vpHomogeneousMatrix & c2Mo,const vpCameraParameters & cam1,const vpCameraParameters & cam2,const vpColor & color,unsigned int thickness,bool displayFullModel)667 void vpMbGenericTracker::display(const vpImage<unsigned char> &I1, const vpImage<unsigned char> &I2,
668                                  const vpHomogeneousMatrix &c1Mo, const vpHomogeneousMatrix &c2Mo,
669                                  const vpCameraParameters &cam1, const vpCameraParameters &cam2, const vpColor &color,
670                                  unsigned int thickness, bool displayFullModel)
671 {
672   if (m_mapOfTrackers.size() == 2) {
673     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
674     it->second->display(I1, c1Mo, cam1, color, thickness, displayFullModel);
675     ++it;
676 
677     it->second->display(I2, c2Mo, cam2, color, thickness, displayFullModel);
678   } else {
679     std::cerr << "The tracker is not set as a stereo configuration! There are " << m_mapOfTrackers.size() << " cameras!"
680               << std::endl;
681   }
682 }
683 
684 /*!
685   Display the 3D model from a given position of the camera.
686 
687   \param I1 : The first color image.
688   \param I2 : The second color image.
689   \param c1Mo : Pose used to project the 3D model into the first image.
690   \param c2Mo : Pose used to project the 3D model into the second image.
691   \param cam1 : The first camera parameters.
692   \param cam2 : The second camera parameters.
693   \param color : The desired color.
694   \param thickness : The thickness of the lines.
695   \param displayFullModel : If true, the full model is displayed (even the non
696   visible faces).
697 
698   \note This function assumes a stereo configuration of the generic tracker.
699 */
display(const vpImage<vpRGBa> & I1,const vpImage<vpRGBa> & I2,const vpHomogeneousMatrix & c1Mo,const vpHomogeneousMatrix & c2Mo,const vpCameraParameters & cam1,const vpCameraParameters & cam2,const vpColor & color,unsigned int thickness,bool displayFullModel)700 void vpMbGenericTracker::display(const vpImage<vpRGBa> &I1, const vpImage<vpRGBa> &I2, const vpHomogeneousMatrix &c1Mo,
701                                  const vpHomogeneousMatrix &c2Mo, const vpCameraParameters &cam1,
702                                  const vpCameraParameters &cam2, const vpColor &color, unsigned int thickness,
703                                  bool displayFullModel)
704 {
705   if (m_mapOfTrackers.size() == 2) {
706     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
707     it->second->display(I1, c1Mo, cam1, color, thickness, displayFullModel);
708     ++it;
709 
710     it->second->display(I2, c2Mo, cam2, color, thickness, displayFullModel);
711   } else {
712     std::cerr << "The tracker is not set as a stereo configuration! There are " << m_mapOfTrackers.size() << " cameras!"
713               << std::endl;
714   }
715 }
716 
717 /*!
718   Display the 3D model from a given position of the camera.
719 
720   \param mapOfImages : Map of grayscale images.
721   \param mapOfCameraPoses : Map of camera poses.
722   \param mapOfCameraParameters : Map of camera parameters.
723   \param col : The desired color.
724   \param thickness : The thickness of the lines.
725   \param displayFullModel : If true, the full model is displayed (even the non
726   visible faces).
727 */
display(const std::map<std::string,const vpImage<unsigned char> * > & mapOfImages,const std::map<std::string,vpHomogeneousMatrix> & mapOfCameraPoses,const std::map<std::string,vpCameraParameters> & mapOfCameraParameters,const vpColor & col,unsigned int thickness,bool displayFullModel)728 void vpMbGenericTracker::display(const std::map<std::string, const vpImage<unsigned char> *> &mapOfImages,
729                                  const std::map<std::string, vpHomogeneousMatrix> &mapOfCameraPoses,
730                                  const std::map<std::string, vpCameraParameters> &mapOfCameraParameters,
731                                  const vpColor &col, unsigned int thickness, bool displayFullModel)
732 {
733   // Display only for the given images
734   for (std::map<std::string, const vpImage<unsigned char> *>::const_iterator it_img = mapOfImages.begin();
735        it_img != mapOfImages.end(); ++it_img) {
736     std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(it_img->first);
737     std::map<std::string, vpHomogeneousMatrix>::const_iterator it_camPose = mapOfCameraPoses.find(it_img->first);
738     std::map<std::string, vpCameraParameters>::const_iterator it_cam = mapOfCameraParameters.find(it_img->first);
739 
740     if (it_tracker != m_mapOfTrackers.end() && it_camPose != mapOfCameraPoses.end() &&
741         it_cam != mapOfCameraParameters.end()) {
742       TrackerWrapper *tracker = it_tracker->second;
743       tracker->display(*it_img->second, it_camPose->second, it_cam->second, col, thickness, displayFullModel);
744     } else {
745       std::cerr << "Missing elements for image:" << it_img->first << "!" << std::endl;
746     }
747   }
748 }
749 
750 /*!
751   Display the 3D model from a given position of the camera.
752 
753   \param mapOfImages : Map of color images.
754   \param mapOfCameraPoses : Map of camera poses.
755   \param mapOfCameraParameters : Map of camera parameters.
756   \param col : The desired color.
757   \param thickness : The thickness of the lines.
758   \param displayFullModel : If true, the full model is displayed (even the non
759   visible faces).
760 */
display(const std::map<std::string,const vpImage<vpRGBa> * > & mapOfImages,const std::map<std::string,vpHomogeneousMatrix> & mapOfCameraPoses,const std::map<std::string,vpCameraParameters> & mapOfCameraParameters,const vpColor & col,unsigned int thickness,bool displayFullModel)761 void vpMbGenericTracker::display(const std::map<std::string, const vpImage<vpRGBa> *> &mapOfImages,
762                                  const std::map<std::string, vpHomogeneousMatrix> &mapOfCameraPoses,
763                                  const std::map<std::string, vpCameraParameters> &mapOfCameraParameters,
764                                  const vpColor &col, unsigned int thickness, bool displayFullModel)
765 {
766   // Display only for the given images
767   for (std::map<std::string, const vpImage<vpRGBa> *>::const_iterator it_img = mapOfImages.begin();
768        it_img != mapOfImages.end(); ++it_img) {
769     std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(it_img->first);
770     std::map<std::string, vpHomogeneousMatrix>::const_iterator it_camPose = mapOfCameraPoses.find(it_img->first);
771     std::map<std::string, vpCameraParameters>::const_iterator it_cam = mapOfCameraParameters.find(it_img->first);
772 
773     if (it_tracker != m_mapOfTrackers.end() && it_camPose != mapOfCameraPoses.end() &&
774         it_cam != mapOfCameraParameters.end()) {
775       TrackerWrapper *tracker = it_tracker->second;
776       tracker->display(*it_img->second, it_camPose->second, it_cam->second, col, thickness, displayFullModel);
777     } else {
778       std::cerr << "Missing elements for image:" << it_img->first << "!" << std::endl;
779     }
780   }
781 }
782 
783 /*!
784   Get the camera names.
785 
786   \return The vector of camera names.
787 */
getCameraNames() const788 std::vector<std::string> vpMbGenericTracker::getCameraNames() const
789 {
790   std::vector<std::string> cameraNames;
791 
792   for (std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.begin();
793        it_tracker != m_mapOfTrackers.end(); ++it_tracker) {
794     cameraNames.push_back(it_tracker->first);
795   }
796 
797   return cameraNames;
798 }
799 
getCameraParameters(vpCameraParameters & camera) const800 void vpMbGenericTracker::getCameraParameters(vpCameraParameters &camera) const
801 {
802   vpMbTracker::getCameraParameters(camera);
803 }
804 
805 /*!
806   Get all the camera parameters.
807 
808   \param cam1 : Copy of the camera parameters for the first camera.
809   \param cam2 : Copy of the camera parameters for the second camera.
810 
811   \note This function assumes a stereo configuration of the generic tracker.
812 */
getCameraParameters(vpCameraParameters & cam1,vpCameraParameters & cam2) const813 void vpMbGenericTracker::getCameraParameters(vpCameraParameters &cam1, vpCameraParameters &cam2) const
814 {
815   if (m_mapOfTrackers.size() == 2) {
816     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
817     it->second->getCameraParameters(cam1);
818     ++it;
819 
820     it->second->getCameraParameters(cam2);
821   } else {
822     std::cerr << "The tracker is not set as a stereo configuration! There are " << m_mapOfTrackers.size() << " cameras!"
823               << std::endl;
824   }
825 }
826 
827 /*!
828   Get all the camera parameters.
829 
830   \param mapOfCameraParameters : Map of camera parameters.
831 */
getCameraParameters(std::map<std::string,vpCameraParameters> & mapOfCameraParameters) const832 void vpMbGenericTracker::getCameraParameters(std::map<std::string, vpCameraParameters> &mapOfCameraParameters) const
833 {
834   // Clear the input map
835   mapOfCameraParameters.clear();
836 
837   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
838        it != m_mapOfTrackers.end(); ++it) {
839     vpCameraParameters cam_;
840     it->second->getCameraParameters(cam_);
841     mapOfCameraParameters[it->first] = cam_;
842   }
843 }
844 
845 /*!
846   Get the camera tracker types.
847 
848   \return The map of camera tracker types.
849   \sa vpTrackerType
850 */
getCameraTrackerTypes() const851 std::map<std::string, int> vpMbGenericTracker::getCameraTrackerTypes() const
852 {
853   std::map<std::string, int> trackingTypes;
854 
855   TrackerWrapper *traker;
856   for (std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.begin();
857        it_tracker != m_mapOfTrackers.end(); ++it_tracker) {
858     traker = it_tracker->second;
859     trackingTypes[it_tracker->first] = traker->getTrackerType();
860   }
861 
862   return trackingTypes;
863 }
864 
865 /*!
866   Get the clipping used and defined in vpPolygon3D::vpMbtPolygonClippingType.
867 
868   \param clippingFlag1 : Clipping flags for the first camera.
869   \param clippingFlag2 : Clipping flags for the second camera.
870 
871   \note This function assumes a stereo configuration of the generic tracker.
872 */
getClipping(unsigned int & clippingFlag1,unsigned int & clippingFlag2) const873 void vpMbGenericTracker::getClipping(unsigned int &clippingFlag1, unsigned int &clippingFlag2) const
874 {
875   if (m_mapOfTrackers.size() == 2) {
876     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
877     clippingFlag1 = it->second->getClipping();
878     ++it;
879 
880     clippingFlag2 = it->second->getClipping();
881   } else {
882     std::cerr << "The tracker is not set as a stereo configuration! There are " << m_mapOfTrackers.size() << " cameras!"
883               << std::endl;
884   }
885 }
886 
887 /*!
888   Get the clipping used and defined in vpPolygon3D::vpMbtPolygonClippingType.
889 
890   \param mapOfClippingFlags : Map of clipping flags.
891 */
getClipping(std::map<std::string,unsigned int> & mapOfClippingFlags) const892 void vpMbGenericTracker::getClipping(std::map<std::string, unsigned int> &mapOfClippingFlags) const
893 {
894   mapOfClippingFlags.clear();
895 
896   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
897        it != m_mapOfTrackers.end(); ++it) {
898     TrackerWrapper *tracker = it->second;
899     mapOfClippingFlags[it->first] = tracker->getClipping();
900   }
901 }
902 
903 /*!
904   Return a reference to the faces structure.
905 
906   \return Reference to the face structure.
907 */
getFaces()908 vpMbHiddenFaces<vpMbtPolygon> &vpMbGenericTracker::getFaces()
909 {
910   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
911   if (it != m_mapOfTrackers.end()) {
912     return it->second->getFaces();
913   }
914 
915   std::cerr << "The reference camera: " << m_referenceCameraName << " cannot be found!" << std::endl;
916   return faces;
917 }
918 
919 /*!
920   Return a reference to the faces structure for the given camera name.
921 
922   \return Reference to the face structure.
923 */
getFaces(const std::string & cameraName)924 vpMbHiddenFaces<vpMbtPolygon> &vpMbGenericTracker::getFaces(const std::string &cameraName)
925 {
926   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(cameraName);
927   if (it != m_mapOfTrackers.end()) {
928     return it->second->getFaces();
929   }
930 
931   std::cerr << "The camera: " << cameraName << " cannot be found!" << std::endl;
932   return faces;
933 }
934 
935 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
936 /*!
937   Return the address of the circle feature list for the reference camera.
938 */
getFeaturesCircle()939 std::list<vpMbtDistanceCircle *> &vpMbGenericTracker::getFeaturesCircle()
940 {
941   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
942   if (it != m_mapOfTrackers.end()) {
943     TrackerWrapper *tracker = it->second;
944     return tracker->getFeaturesCircle();
945   } else {
946     throw vpException(vpTrackingException::badValue, "Cannot find the reference camera:  %s!",
947                       m_referenceCameraName.c_str());
948   }
949 }
950 
951 /*!
952   Return the address of the cylinder feature list for the reference camera.
953 */
getFeaturesKltCylinder()954 std::list<vpMbtDistanceKltCylinder *> &vpMbGenericTracker::getFeaturesKltCylinder()
955 {
956   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
957   if (it != m_mapOfTrackers.end()) {
958     TrackerWrapper *tracker = it->second;
959     return tracker->getFeaturesKltCylinder();
960   } else {
961     throw vpException(vpTrackingException::badValue, "Cannot find the reference camera:  %s!",
962                       m_referenceCameraName.c_str());
963   }
964 }
965 
966 /*!
967   Return the address of the Klt feature list for the reference camera.
968 */
getFeaturesKlt()969 std::list<vpMbtDistanceKltPoints *> &vpMbGenericTracker::getFeaturesKlt()
970 {
971   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
972   if (it != m_mapOfTrackers.end()) {
973     TrackerWrapper *tracker = it->second;
974     return tracker->getFeaturesKlt();
975   } else {
976     throw vpException(vpTrackingException::badValue, "Cannot find the reference camera:  %s!",
977                       m_referenceCameraName.c_str());
978   }
979 }
980 #endif
981 
982 /*!
983   Returns a list of visual features parameters for the reference camera.
984   The first element of the vector indicates the feature type that is either a moving-edge (ME) when
985   value is 0, or a keypoint (KLT) when value is 1.
986   Then behind, the second element of the vector gives the corresponding feature
987   parameters.
988   - Moving-edges parameters are: `<feature id (here 0 for ME)>`, `<pt.i()>`, `<pt.j()>`, `<state>` where `pt.i(), pt.j()`
989     are the coordinates of the moving-edge point feature, and `state` with values in range [0,4] indicates the state of the ME
990     - 0 for vpMeSite::NO_SUPPRESSION
991     - 1 for vpMeSite::CONSTRAST
992     - 2 for vpMeSite::THRESHOLD
993     - 3 for vpMeSite::M_ESTIMATOR
994     - 4 for vpMeSite::TOO_NEAR
995   - KLT parameters are: `<feature id (here 1 for KLT)>`, `<pt.i()>`, `<pt.j()>`,
996   `<klt_id.i()>`, `<klt_id.j()>`, `<klt_id.id>`
997 
998   When the tracking is achieved with features from multiple cameras you can use rather
999   getFeaturesForDisplay(std::map<std::string, std::vector<std::vector<double> > > &).
1000 
1001   It can be used to display the 3D model with a render engine of your choice.
1002 
1003   \note It returns the visual features for the reference camera.
1004 
1005   \sa getModelForDisplay(unsigned int, unsigned int, const vpHomogeneousMatrix &, const vpCameraParameters &, bool)
1006 */
getFeaturesForDisplay()1007 std::vector<std::vector<double> > vpMbGenericTracker::getFeaturesForDisplay()
1008 {
1009   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
1010 
1011   if (it != m_mapOfTrackers.end()) {
1012     return it->second->getFeaturesForDisplay();
1013   } else {
1014     std::cerr << "The reference camera: " << m_referenceCameraName << " does not exist!" << std::endl;
1015   }
1016 
1017   return std::vector<std::vector<double> >();
1018 }
1019 
1020 /*!
1021   Get a list of visual features parameters for multiple cameras. The considered camera name is the first element of the map.
1022   The second element of the map contains the visual features parameters where
1023   the first element of the vector indicates the feature type that is either a moving-edge (ME) when
1024   value is 0, or a keypoint (KLT) when value is 1.
1025   Then behind, the second element of the vector gives the corresponding feature
1026   parameters.
1027   - Moving-edges parameters are: `<feature id (here 0 for ME)>`, `<pt.i()>`, `<pt.j()>`, `<state>` where `pt.i(), pt.j()`
1028     are the coordinates of the moving-edge point feature, and `state` with values in range [0,4] indicates the state of the ME
1029     - 0 for vpMeSite::NO_SUPPRESSION
1030     - 1 for vpMeSite::CONSTRAST
1031     - 2 for vpMeSite::THRESHOLD
1032     - 3 for vpMeSite::M_ESTIMATOR
1033     - 4 for vpMeSite::TOO_NEAR
1034   - KLT parameters are: `<feature id (here 1 for KLT)>`, `<pt.i()>`, `<pt.j()>`,
1035   `<klt_id.i()>`, `<klt_id.j()>`, `<klt_id.id>`
1036   It can be used to display the 3D model with a render engine of your choice.
1037 
1038   When the tracking is achieved with features from a single camera you can use rather
1039   getFeaturesForDisplay().
1040 
1041   \sa getModelForDisplay(std::map<std::string, std::vector<std::vector<double> > > &, const std::map<std::string, unsigned int> &, const std::map<std::string, unsigned int> &, const std::map<std::string, vpHomogeneousMatrix> &, const std::map<std::string, vpCameraParameters> &, bool)
1042 */
getFeaturesForDisplay(std::map<std::string,std::vector<std::vector<double>>> & mapOfFeatures)1043 void vpMbGenericTracker::getFeaturesForDisplay(std::map<std::string, std::vector<std::vector<double> > > &mapOfFeatures)
1044 {
1045   // Clear the input map
1046   mapOfFeatures.clear();
1047 
1048   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
1049        it != m_mapOfTrackers.end(); ++it) {
1050     mapOfFeatures[it->first] = it->second->getFeaturesForDisplay();
1051   }
1052 }
1053 
1054 /*!
1055    \return The threshold value between 0 and 1 over good moving edges ratio.
1056    It allows to decide if the tracker has enough valid moving edges to compute
1057    a pose. 1 means that all moving edges should be considered as good to have
1058    a valid pose, while 0.1 means that 10% of the moving edge are enough to
1059    declare a pose valid.
1060 
1061    \sa setGoodMovingEdgesRatioThreshold()
1062 */
getGoodMovingEdgesRatioThreshold() const1063 double vpMbGenericTracker::getGoodMovingEdgesRatioThreshold() const { return m_percentageGdPt; }
1064 
1065 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
1066 /*!
1067   Get the current list of KLT points for the reference camera.
1068 
1069   \warning This function convert and copy the OpenCV KLT points into
1070   vpImagePoints.
1071 
1072   \return the list of KLT points through vpKltOpencv.
1073 */
getKltImagePoints() const1074 std::vector<vpImagePoint> vpMbGenericTracker::getKltImagePoints() const
1075 {
1076   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
1077   if (it != m_mapOfTrackers.end()) {
1078     TrackerWrapper *tracker = it->second;
1079     return tracker->getKltImagePoints();
1080   } else {
1081     std::cerr << "Cannot find the reference camera: " << m_referenceCameraName << "!" << std::endl;
1082   }
1083 
1084   return std::vector<vpImagePoint>();
1085 }
1086 
1087 /*!
1088   Get the current list of KLT points and their id for the reference camera.
1089 
1090   \warning This function convert and copy the openCV KLT points into
1091   vpImagePoints.
1092 
1093   \return the list of KLT points and their id through vpKltOpencv.
1094 */
getKltImagePointsWithId() const1095 std::map<int, vpImagePoint> vpMbGenericTracker::getKltImagePointsWithId() const
1096 {
1097   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
1098   if (it != m_mapOfTrackers.end()) {
1099     TrackerWrapper *tracker = it->second;
1100     return tracker->getKltImagePointsWithId();
1101   } else {
1102     std::cerr << "Cannot find the reference camera: " << m_referenceCameraName << "!" << std::endl;
1103   }
1104 
1105   return std::map<int, vpImagePoint>();
1106 }
1107 
1108 /*!
1109   Get the erosion of the mask used on the Model faces.
1110 
1111   \return The erosion for the reference camera.
1112 */
getKltMaskBorder() const1113 unsigned int vpMbGenericTracker::getKltMaskBorder() const
1114 {
1115   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
1116   if (it != m_mapOfTrackers.end()) {
1117     TrackerWrapper *tracker = it->second;
1118     return tracker->getKltMaskBorder();
1119   } else {
1120     std::cerr << "Cannot find the reference camera: " << m_referenceCameraName << "!" << std::endl;
1121   }
1122 
1123   return 0;
1124 }
1125 
1126 /*!
1127   Get number of KLT points for the reference camera.
1128 
1129   \return Number of KLT points for the reference camera.
1130 */
getKltNbPoints() const1131 int vpMbGenericTracker::getKltNbPoints() const
1132 {
1133   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
1134   if (it != m_mapOfTrackers.end()) {
1135     TrackerWrapper *tracker = it->second;
1136     return tracker->getKltNbPoints();
1137   } else {
1138     std::cerr << "Cannot find the reference camera: " << m_referenceCameraName << "!" << std::endl;
1139   }
1140 
1141   return 0;
1142 }
1143 
1144 /*!
1145   Get the klt tracker at the current state for the reference camera.
1146 
1147   \return klt tracker.
1148 */
getKltOpencv() const1149 vpKltOpencv vpMbGenericTracker::getKltOpencv() const
1150 {
1151   std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(m_referenceCameraName);
1152 
1153   if (it_tracker != m_mapOfTrackers.end()) {
1154     TrackerWrapper *tracker;
1155     tracker = it_tracker->second;
1156     return tracker->getKltOpencv();
1157   } else {
1158     std::cerr << "Cannot find the reference camera: " << m_referenceCameraName << "!" << std::endl;
1159   }
1160 
1161   return vpKltOpencv();
1162 }
1163 
1164 /*!
1165   Get the klt tracker at the current state.
1166 
1167   \param klt1 : Klt tracker for the first camera.
1168   \param klt2 : Klt tracker for the second camera.
1169 
1170   \note This function assumes a stereo configuration of the generic tracker.
1171 */
getKltOpencv(vpKltOpencv & klt1,vpKltOpencv & klt2) const1172 void vpMbGenericTracker::getKltOpencv(vpKltOpencv &klt1, vpKltOpencv &klt2) const
1173 {
1174   if (m_mapOfTrackers.size() == 2) {
1175     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
1176     klt1 = it->second->getKltOpencv();
1177     ++it;
1178 
1179     klt2 = it->second->getKltOpencv();
1180   } else {
1181     std::cerr << "The tracker is not set as a stereo configuration! There are " << m_mapOfTrackers.size() << " cameras!"
1182               << std::endl;
1183   }
1184 }
1185 
1186 /*!
1187   Get the klt tracker at the current state.
1188 
1189   \param mapOfKlts : Map if klt trackers.
1190 */
getKltOpencv(std::map<std::string,vpKltOpencv> & mapOfKlts) const1191 void vpMbGenericTracker::getKltOpencv(std::map<std::string, vpKltOpencv> &mapOfKlts) const
1192 {
1193   mapOfKlts.clear();
1194 
1195   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
1196        it != m_mapOfTrackers.end(); ++it) {
1197     TrackerWrapper *tracker = it->second;
1198     mapOfKlts[it->first] = tracker->getKltOpencv();
1199   }
1200 }
1201 
1202 #if (VISP_HAVE_OPENCV_VERSION >= 0x020408)
1203 /*!
1204   Get the current list of KLT points for the reference camera.
1205 
1206    \return the list of KLT points through vpKltOpencv.
1207 */
getKltPoints() const1208 std::vector<cv::Point2f> vpMbGenericTracker::getKltPoints() const
1209 {
1210   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
1211   if (it != m_mapOfTrackers.end()) {
1212     TrackerWrapper *tracker = it->second;
1213     return tracker->getKltPoints();
1214   } else {
1215     std::cerr << "Cannot find the reference camera: " << m_referenceCameraName << "!" << std::endl;
1216   }
1217 
1218   return std::vector<cv::Point2f>();
1219 }
1220 #endif
1221 
1222 /*!
1223   Get the threshold for the acceptation of a point.
1224 
1225   \return threshold_outlier : Threshold for the weight below which a point is
1226   rejected.
1227 */
getKltThresholdAcceptation() const1228 double vpMbGenericTracker::getKltThresholdAcceptation() const { return m_thresholdOutlier; }
1229 #endif
1230 
1231 /*!
1232   Get the list of the circles tracked for the specified level. Each circle
1233   contains the list of the vpMeSite.
1234 
1235   \throw vpException::dimensionError if the second parameter does not
1236   correspond to an used level.
1237 
1238   \param circlesList : The list of the circles of the model.
1239   \param level : Level corresponding to the list to return.
1240 
1241   \note Multi-scale moving edge tracking is not possible, scale level=0 must be used.
1242 */
getLcircle(std::list<vpMbtDistanceCircle * > & circlesList,unsigned int level) const1243 void vpMbGenericTracker::getLcircle(std::list<vpMbtDistanceCircle *> &circlesList,
1244                                     unsigned int level) const
1245 {
1246   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
1247   if (it != m_mapOfTrackers.end()) {
1248     it->second->getLcircle(circlesList, level);
1249   } else {
1250     std::cerr << "The reference camera: " << m_referenceCameraName << " does not exist!" << std::endl;
1251   }
1252 }
1253 
1254 /*!
1255   Get the list of the circles tracked for the specified level. Each circle
1256   contains the list of the vpMeSite.
1257 
1258   \throw vpException::dimensionError if the second parameter does not
1259   correspond to an used level.
1260 
1261   \param cameraName : Camera name for which we want to get the list of vpMbtDistanceCircle.
1262   \param circlesList : The list of the circles of the model.
1263   \param level : Level corresponding to the list to return.
1264 
1265   \note Multi-scale moving edge tracking is not possible, scale level=0 must be used.
1266 */
getLcircle(const std::string & cameraName,std::list<vpMbtDistanceCircle * > & circlesList,unsigned int level) const1267 void vpMbGenericTracker::getLcircle(const std::string &cameraName, std::list<vpMbtDistanceCircle *> &circlesList,
1268                                     unsigned int level) const
1269 {
1270   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(cameraName);
1271   if (it != m_mapOfTrackers.end()) {
1272     it->second->getLcircle(circlesList, level);
1273   } else {
1274     std::cerr << "The camera: " << cameraName << " does not exist!" << std::endl;
1275   }
1276 }
1277 
1278 /*!
1279   Get the list of the cylinders tracked for the specified level. Each cylinder
1280   contains the list of the vpMeSite.
1281 
1282   \throw vpException::dimensionError if the second parameter does not
1283   correspond to an used level.
1284 
1285   \param cylindersList : The list of the cylinders of the model.
1286   \param level : Level corresponding to the list to return.
1287 
1288   \note Multi-scale moving edge tracking is not possible, scale level=0 must be used.
1289 */
getLcylinder(std::list<vpMbtDistanceCylinder * > & cylindersList,unsigned int level) const1290 void vpMbGenericTracker::getLcylinder(std::list<vpMbtDistanceCylinder *> &cylindersList,
1291                                       unsigned int level) const
1292 {
1293   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
1294   if (it != m_mapOfTrackers.end()) {
1295     it->second->getLcylinder(cylindersList, level);
1296   } else {
1297     std::cerr << "The reference camera: " << m_referenceCameraName << " does not exist!" << std::endl;
1298   }
1299 }
1300 
1301 /*!
1302   Get the list of the cylinders tracked for the specified level. Each cylinder
1303   contains the list of the vpMeSite.
1304 
1305   \throw vpException::dimensionError if the second parameter does not
1306   correspond to an used level.
1307 
1308   \param cameraName : Camera name for which we want to get the list of vpMbtDistanceCylinder.
1309   \param cylindersList : The list of the cylinders of the model.
1310   \param level : Level corresponding to the list to return.
1311 
1312   \note Multi-scale moving edge tracking is not possible, scale level=0 must be used.
1313 */
getLcylinder(const std::string & cameraName,std::list<vpMbtDistanceCylinder * > & cylindersList,unsigned int level) const1314 void vpMbGenericTracker::getLcylinder(const std::string &cameraName, std::list<vpMbtDistanceCylinder *> &cylindersList,
1315                                       unsigned int level) const
1316 {
1317   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(cameraName);
1318   if (it != m_mapOfTrackers.end()) {
1319     it->second->getLcylinder(cylindersList, level);
1320   } else {
1321     std::cerr << "The camera: " << cameraName << " does not exist!" << std::endl;
1322   }
1323 }
1324 
1325 /*!
1326   Get the list of the lines tracked for the specified level. Each line
1327   contains the list of the vpMeSite.
1328 
1329   \throw vpException::dimensionError if the second parameter does not
1330   correspond to an used level.
1331 
1332   \param linesList : The list of the lines of the model.
1333   \param level : Level corresponding to the list to return.
1334 
1335   \note Multi-scale moving edge tracking is not possible, scale level=0 must be used.
1336 */
getLline(std::list<vpMbtDistanceLine * > & linesList,unsigned int level) const1337 void vpMbGenericTracker::getLline(std::list<vpMbtDistanceLine *> &linesList,
1338                                   unsigned int level) const
1339 {
1340   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
1341 
1342   if (it != m_mapOfTrackers.end()) {
1343     it->second->getLline(linesList, level);
1344   } else {
1345     std::cerr << "The reference camera: " << m_referenceCameraName << " does not exist!" << std::endl;
1346   }
1347 }
1348 
1349 /*!
1350   Get the list of the lines tracked for the specified level. Each line
1351   contains the list of the vpMeSite.
1352 
1353   \throw vpException::dimensionError if the second parameter does not
1354   correspond to an used level.
1355 
1356   \param cameraName : Camera name for which we want to get the list of vpMbtDistanceLine.
1357   \param linesList : The list of the lines of the model.
1358   \param level : Level corresponding to the list to return.
1359 
1360   \note Multi-scale moving edge tracking is not possible, scale level=0 must be used.
1361 */
getLline(const std::string & cameraName,std::list<vpMbtDistanceLine * > & linesList,unsigned int level) const1362 void vpMbGenericTracker::getLline(const std::string &cameraName, std::list<vpMbtDistanceLine *> &linesList,
1363                                   unsigned int level) const
1364 {
1365   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(cameraName);
1366   if (it != m_mapOfTrackers.end()) {
1367     it->second->getLline(linesList, level);
1368   } else {
1369     std::cerr << "The camera: " << cameraName << " does not exist!" << std::endl;
1370   }
1371 }
1372 
1373 /*!
1374   Get primitive parameters to display the object CAD model for the reference camera.
1375 
1376   It can be used to display the 3D model with a render engine of your choice.
1377 
1378   \param width : Image width.
1379   \param height : Image height.
1380   \param cMo : Pose used to project the 3D model into the image.
1381   \param cam : The camera parameters.
1382   \param displayFullModel : If true, the line is displayed even if it is not
1383 
1384   \return List of primitives parameters corresponding to the reference camera in order
1385   to display the model to a given pose with camera parameters.
1386   The first element of the vector indicates the type of parameters: 0 for a line and 1 for an ellipse.
1387   Then the second element gives the corresponding parameters.
1388   - Line parameters are: `<primitive id (here 0 for line)>`, `<pt_start.i()>`, `<pt_start.j()>`,
1389   `<pt_end.i()>`, `<pt_end.j()>`.
1390   - Ellipse parameters are: `<primitive id (here 1 for ellipse)>`, `<pt_center.i()>`, `<pt_center.j()>`,
1391   `<n_20>`, `<n_11>`, `<n_02>` where `<n_ij>` are the second order centered moments of the ellipse
1392   normalized by its area (i.e., such that \f$n_{ij} = \mu_{ij}/a\f$ where \f$\mu_{ij}\f$ are the centered moments and a the area).
1393 
1394   When tracking is performed using multiple cameras, you should rather use
1395   getModelForDisplay(std::map<std::string, std::vector<std::vector<double> > > &, const std::map<std::string, unsigned int> &, const std::map<std::string, unsigned int> &, const std::map<std::string, vpHomogeneousMatrix> &, const std::map<std::string, vpCameraParameters> &, bool)
1396 
1397   \sa getFeaturesForDisplay()
1398 */
getModelForDisplay(unsigned int width,unsigned int height,const vpHomogeneousMatrix & cMo,const vpCameraParameters & cam,bool displayFullModel)1399 std::vector<std::vector<double> > vpMbGenericTracker::getModelForDisplay(unsigned int width, unsigned int height,
1400                                                                          const vpHomogeneousMatrix &cMo,
1401                                                                          const vpCameraParameters &cam,
1402                                                                          bool displayFullModel)
1403 {
1404   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
1405 
1406   if (it != m_mapOfTrackers.end()) {
1407     return it->second->getModelForDisplay(width, height, cMo, cam, displayFullModel);
1408   } else {
1409     std::cerr << "The reference camera: " << m_referenceCameraName << " does not exist!" << std::endl;
1410   }
1411 
1412   return std::vector<std::vector<double> >();
1413 }
1414 
1415 /*!
1416   Get primitive parameters to display the object CAD model for the multiple cameras.
1417 
1418   It can be used to display the 3D model with a render engine of your choice.
1419 
1420   Each first element of the map correspond to the camera name.
1421 
1422   \param mapOfModels : Map of models. The second element of the map contains a list of primitives parameters
1423   to display the model at a given pose with corresponding camera parameters.
1424   The first element of the vector indicates the type of parameters: 0 for a line and 1 for an ellipse.
1425   Then the second element gives the corresponding parameters.
1426   - Line parameters are: `<primitive id (here 0 for line)>`, `<pt_start.i()>`, `<pt_start.j()>`,
1427   `<pt_end.i()>`, `<pt_end.j()>`.
1428   - Ellipse parameters are: `<primitive id (here 1 for ellipse)>`, `<pt_center.i()>`, `<pt_center.j()>`,
1429   `<n_20>`, `<n_11>`, `<n_02>` where `<n_ij>` are the second order centered moments of the ellipse
1430   normalized by its area (i.e., such that \f$n_{ij} = \mu_{ij}/a\f$ where \f$\mu_{ij}\f$ are the centered moments and a the area).
1431   \param mapOfwidths : Map of images width.
1432   \param mapOfheights : Map of images height.
1433   \param mapOfcMos : Map of poses used to project the 3D model into the images.
1434   \param mapOfCams : The camera parameters.
1435   \param displayFullModel : If true, the line is displayed even if it is not
1436 
1437   If you are using a single camera you should rather use
1438   getModelForDisplay(unsigned int, unsigned int, const vpHomogeneousMatrix &, const vpCameraParameters &, bool)
1439 
1440   \sa getFeaturesForDisplay(std::map<std::string, std::vector<std::vector<double> > > &)
1441 */
getModelForDisplay(std::map<std::string,std::vector<std::vector<double>>> & mapOfModels,const std::map<std::string,unsigned int> & mapOfwidths,const std::map<std::string,unsigned int> & mapOfheights,const std::map<std::string,vpHomogeneousMatrix> & mapOfcMos,const std::map<std::string,vpCameraParameters> & mapOfCams,bool displayFullModel)1442 void vpMbGenericTracker::getModelForDisplay(std::map<std::string, std::vector<std::vector<double> > > &mapOfModels,
1443                                             const std::map<std::string, unsigned int> &mapOfwidths,
1444                                             const std::map<std::string, unsigned int> &mapOfheights,
1445                                             const std::map<std::string, vpHomogeneousMatrix> &mapOfcMos,
1446                                             const std::map<std::string, vpCameraParameters> &mapOfCams,
1447                                             bool displayFullModel)
1448 {
1449   // Clear the input map
1450   mapOfModels.clear();
1451 
1452   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
1453        it != m_mapOfTrackers.end(); ++it) {
1454     std::map<std::string, unsigned int>::const_iterator findWidth = mapOfwidths.find(it->first);
1455     std::map<std::string, unsigned int>::const_iterator findHeight = mapOfheights.find(it->first);
1456     std::map<std::string, vpHomogeneousMatrix>::const_iterator findcMo = mapOfcMos.find(it->first);
1457     std::map<std::string, vpCameraParameters>::const_iterator findCam = mapOfCams.find(it->first);
1458 
1459     if (findWidth != mapOfwidths.end() &&
1460         findHeight != mapOfheights.end() &&
1461         findcMo != mapOfcMos.end() &&
1462         findCam != mapOfCams.end()) {
1463       mapOfModels[it->first] = it->second->getModelForDisplay(findWidth->second, findHeight->second,
1464                                                               findcMo->second, findCam->second, displayFullModel);
1465     }
1466   }
1467 }
1468 
1469 /*!
1470   Get the moving edge parameters for the reference camera.
1471 
1472   \return an instance of the moving edge parameters used by the tracker.
1473 */
getMovingEdge() const1474 vpMe vpMbGenericTracker::getMovingEdge() const
1475 {
1476   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
1477 
1478   if (it != m_mapOfTrackers.end()) {
1479     return it->second->getMovingEdge();
1480   } else {
1481     std::cerr << "The reference camera: " << m_referenceCameraName << " does not exist!" << std::endl;
1482   }
1483 
1484   return vpMe();
1485 }
1486 
1487 /*!
1488   Get the moving edge parameters.
1489 
1490   \param me1 : Moving edge parameters for the first camera.
1491   \param me2 : Moving edge parameters for the second camera.
1492 
1493   \note This function assumes a stereo configuration of the generic tracker.
1494 */
getMovingEdge(vpMe & me1,vpMe & me2) const1495 void vpMbGenericTracker::getMovingEdge(vpMe &me1, vpMe &me2) const
1496 {
1497   if (m_mapOfTrackers.size() == 2) {
1498     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
1499     it->second->getMovingEdge(me1);
1500     ++it;
1501 
1502     it->second->getMovingEdge(me2);
1503   } else {
1504     std::cerr << "The tracker is not set as a stereo configuration! There are " << m_mapOfTrackers.size() << " cameras!"
1505               << std::endl;
1506   }
1507 }
1508 
1509 /*!
1510   Get the moving edge parameters for all the cameras
1511 
1512   \param mapOfMovingEdges : Map of moving edge parameters for all the cameras.
1513 */
getMovingEdge(std::map<std::string,vpMe> & mapOfMovingEdges) const1514 void vpMbGenericTracker::getMovingEdge(std::map<std::string, vpMe> &mapOfMovingEdges) const
1515 {
1516   mapOfMovingEdges.clear();
1517 
1518   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
1519        it != m_mapOfTrackers.end(); ++it) {
1520     TrackerWrapper *tracker = it->second;
1521     mapOfMovingEdges[it->first] = tracker->getMovingEdge();
1522   }
1523 }
1524 
1525 /*!
1526   Return the number of good points (vpMeSite) tracked. A good point is a
1527   vpMeSite with its flag "state" equal to 0. Only these points are used
1528   during the virtual visual servoing stage.
1529 
1530   \param level : Pyramid level to consider.
1531 
1532   \exception vpException::dimensionError if level does not represent a used
1533   level.
1534 
1535   \return the number of good points for the reference camera.
1536 
1537   \note Multi-scale moving edge tracking is not possible, scale level=0 must
1538   be used.
1539 
1540   \sa getNbFeaturesEdge()
1541 */
getNbPoints(unsigned int level) const1542 unsigned int vpMbGenericTracker::getNbPoints(unsigned int level) const
1543 {
1544   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
1545 
1546   unsigned int nbGoodPoints = 0;
1547   if (it != m_mapOfTrackers.end()) {
1548 
1549     nbGoodPoints = it->second->getNbPoints(level);
1550   } else {
1551     std::cerr << "The reference camera: " << m_referenceCameraName << " does not exist!" << std::endl;
1552   }
1553 
1554   return nbGoodPoints;
1555 }
1556 
1557 /*!
1558   Return the number of good points (vpMeSite) tracked. A good point is a
1559   vpMeSite with its flag "state" equal to 0. Only these points are used
1560   during the virtual visual servoing stage.
1561 
1562   \param mapOfNbPoints : Map of number of good points (vpMeSite) tracked for
1563   all the cameras. \param level : Pyramid level to consider.
1564 
1565   \exception vpException::dimensionError if level does not represent a used
1566   level.
1567 
1568   \note Multi-scale moving edge tracking is not possible, scale level=0 must
1569   be used.
1570 */
getNbPoints(std::map<std::string,unsigned int> & mapOfNbPoints,unsigned int level) const1571 void vpMbGenericTracker::getNbPoints(std::map<std::string, unsigned int> &mapOfNbPoints, unsigned int level) const
1572 {
1573   mapOfNbPoints.clear();
1574 
1575   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
1576        it != m_mapOfTrackers.end(); ++it) {
1577     TrackerWrapper *tracker = it->second;
1578     mapOfNbPoints[it->first] = tracker->getNbPoints(level);
1579   }
1580 }
1581 
1582 /*!
1583   Get the number of polygons (faces) representing the object to track.
1584 
1585   \return Number of polygons for the reference camera.
1586 */
getNbPolygon() const1587 unsigned int vpMbGenericTracker::getNbPolygon() const
1588 {
1589   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
1590   if (it != m_mapOfTrackers.end()) {
1591     return it->second->getNbPolygon();
1592   }
1593 
1594   std::cerr << "The reference camera: " << m_referenceCameraName << " does not exist!" << std::endl;
1595   return 0;
1596 }
1597 
1598 /*!
1599   Get the number of polygons (faces) representing the object to track.
1600 
1601   \return Number of polygons for all the cameras.
1602 */
getNbPolygon(std::map<std::string,unsigned int> & mapOfNbPolygons) const1603 void vpMbGenericTracker::getNbPolygon(std::map<std::string, unsigned int> &mapOfNbPolygons) const
1604 {
1605   mapOfNbPolygons.clear();
1606 
1607   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
1608        it != m_mapOfTrackers.end(); ++it) {
1609     TrackerWrapper *tracker = it->second;
1610     mapOfNbPolygons[it->first] = tracker->getNbPolygon();
1611   }
1612 }
1613 
1614 /*!
1615   Return the polygon (face) "index" for the reference camera.
1616 
1617   \exception vpException::dimensionError if index does not represent a good
1618   polygon.
1619 
1620   \param index : Index of the polygon to return.
1621   \return Pointer to the polygon index for the reference camera or NULL in
1622   case of problem.
1623 */
getPolygon(unsigned int index)1624 vpMbtPolygon *vpMbGenericTracker::getPolygon(unsigned int index)
1625 {
1626   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
1627   if (it != m_mapOfTrackers.end()) {
1628     return it->second->getPolygon(index);
1629   }
1630 
1631   std::cerr << "The reference camera: " << m_referenceCameraName << " does not exist!" << std::endl;
1632   return NULL;
1633 }
1634 
1635 /*!
1636   Return the polygon (face) "index" for the specified camera.
1637 
1638   \exception vpException::dimensionError if index does not represent a good
1639   polygon.
1640 
1641   \param cameraName : Name of the camera to return the polygon.
1642   \param index : Index of the polygon to return.
1643   \return Pointer to the polygon index for the specified camera or NULL in
1644   case of problem.
1645 */
getPolygon(const std::string & cameraName,unsigned int index)1646 vpMbtPolygon *vpMbGenericTracker::getPolygon(const std::string &cameraName, unsigned int index)
1647 {
1648   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(cameraName);
1649   if (it != m_mapOfTrackers.end()) {
1650     return it->second->getPolygon(index);
1651   }
1652 
1653   std::cerr << "The camera: " << cameraName << " does not exist!" << std::endl;
1654   return NULL;
1655 }
1656 
1657 /*!
1658   Get the list of polygons faces (a vpPolygon representing the projection of
1659   the face in the image and a list of face corners in 3D), with the
1660   possibility to order by distance to the camera or to use the visibility
1661   check to consider if the polygon face must be retrieved or not.
1662 
1663   \param orderPolygons : If true, the resulting list is ordered from the
1664   nearest polygon faces to the farther. \param useVisibility : If true, only
1665   visible faces will be retrieved. \param clipPolygon : If true, the polygons
1666   will be clipped according to the clipping flags set in vpMbTracker. \return
1667   A pair object containing the list of vpPolygon and the list of face corners.
1668 
1669   \note This function will return the 2D polygons faces and 3D face points
1670   only for the reference camera.
1671 */
1672 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > >
getPolygonFaces(bool orderPolygons,bool useVisibility,bool clipPolygon)1673 vpMbGenericTracker::getPolygonFaces(bool orderPolygons, bool useVisibility, bool clipPolygon)
1674 {
1675   std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > polygonFaces;
1676 
1677   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
1678   if (it != m_mapOfTrackers.end()) {
1679     TrackerWrapper *tracker = it->second;
1680     polygonFaces = tracker->getPolygonFaces(orderPolygons, useVisibility, clipPolygon);
1681   } else {
1682     std::cerr << "Cannot find the reference camera: " << m_referenceCameraName << "!" << std::endl;
1683   }
1684 
1685   return polygonFaces;
1686 }
1687 
1688 /*!
1689   Get the list of polygons faces (a vpPolygon representing the projection of
1690   the face in the image and a list of face corners in 3D), with the
1691   possibility to order by distance to the camera or to use the visibility
1692   check to consider if the polygon face must be retrieved or not.
1693 
1694   \param mapOfPolygons : Map of 2D polygon faces.
1695   \param mapOfPoints : Map of face 3D points.
1696   \param orderPolygons : If true, the resulting list is ordered from the
1697   nearest polygon faces to the farther. \param useVisibility : If true, only
1698   visible faces will be retrieved. \param clipPolygon : If true, the polygons
1699   will be clipped according to the clipping flags set in vpMbTracker. \return
1700   A pair object containing the list of vpPolygon and the list of face corners.
1701 
1702   \note This function will return the 2D polygons faces and 3D face points
1703   only for all the cameras.
1704 */
getPolygonFaces(std::map<std::string,std::vector<vpPolygon>> & mapOfPolygons,std::map<std::string,std::vector<std::vector<vpPoint>>> & mapOfPoints,bool orderPolygons,bool useVisibility,bool clipPolygon)1705 void vpMbGenericTracker::getPolygonFaces(std::map<std::string, std::vector<vpPolygon> > &mapOfPolygons,
1706                                          std::map<std::string, std::vector<std::vector<vpPoint> > > &mapOfPoints,
1707                                          bool orderPolygons, bool useVisibility, bool clipPolygon)
1708 {
1709   mapOfPolygons.clear();
1710   mapOfPoints.clear();
1711 
1712   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
1713        it != m_mapOfTrackers.end(); ++it) {
1714     TrackerWrapper *tracker = it->second;
1715     std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > polygonFaces =
1716         tracker->getPolygonFaces(orderPolygons, useVisibility, clipPolygon);
1717 
1718     mapOfPolygons[it->first] = polygonFaces.first;
1719     mapOfPoints[it->first] = polygonFaces.second;
1720   }
1721 }
1722 
getPose(vpHomogeneousMatrix & cMo) const1723 void vpMbGenericTracker::getPose(vpHomogeneousMatrix &cMo) const
1724 {
1725   vpMbTracker::getPose(cMo);
1726 }
1727 
1728 /*!
1729   Get the current pose between the object and the cameras.
1730 
1731   \param c1Mo : The camera pose for the first camera.
1732   \param c2Mo : The camera pose for the second camera.
1733 
1734   \note This function assumes a stereo configuration of the generic tracker.
1735 */
getPose(vpHomogeneousMatrix & c1Mo,vpHomogeneousMatrix & c2Mo) const1736 void vpMbGenericTracker::getPose(vpHomogeneousMatrix &c1Mo, vpHomogeneousMatrix &c2Mo) const
1737 {
1738   if (m_mapOfTrackers.size() == 2) {
1739     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
1740     it->second->getPose(c1Mo);
1741     ++it;
1742 
1743     it->second->getPose(c2Mo);
1744   } else {
1745     std::cerr << "The tracker is not set as a stereo configuration! There are " << m_mapOfTrackers.size() << " cameras!"
1746               << std::endl;
1747   }
1748 }
1749 
1750 /*!
1751   Get the current pose between the object and the cameras.
1752 
1753   \param mapOfCameraPoses : The map of camera poses for all the cameras.
1754 */
getPose(std::map<std::string,vpHomogeneousMatrix> & mapOfCameraPoses) const1755 void vpMbGenericTracker::getPose(std::map<std::string, vpHomogeneousMatrix> &mapOfCameraPoses) const
1756 {
1757   // Clear the map
1758   mapOfCameraPoses.clear();
1759 
1760   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
1761        it != m_mapOfTrackers.end(); ++it) {
1762     TrackerWrapper *tracker = it->second;
1763     tracker->getPose(mapOfCameraPoses[it->first]);
1764   }
1765 }
1766 
1767 /*!
1768   Get name of the reference camera.
1769 */
getReferenceCameraName() const1770 std::string vpMbGenericTracker::getReferenceCameraName() const
1771 {
1772   return  m_referenceCameraName;
1773 }
1774 
1775 /*!
1776   The tracker type for the reference camera.
1777 */
getTrackerType() const1778 int vpMbGenericTracker::getTrackerType() const
1779 {
1780   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
1781   if (it != m_mapOfTrackers.end()) {
1782     TrackerWrapper *tracker = it->second;
1783     return tracker->getTrackerType();
1784   } else {
1785     throw vpException(vpTrackingException::badValue, "Cannot find the reference camera: %s!",
1786                       m_referenceCameraName.c_str());
1787   }
1788 }
1789 
init(const vpImage<unsigned char> & I)1790 void vpMbGenericTracker::init(const vpImage<unsigned char> &I)
1791 {
1792   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
1793        it != m_mapOfTrackers.end(); ++it) {
1794     TrackerWrapper *tracker = it->second;
1795     tracker->m_cMo = m_mapOfCameraTransformationMatrix[it->first] * m_cMo;
1796     tracker->init(I);
1797   }
1798 }
1799 
initCircle(const vpPoint &,const vpPoint &,const vpPoint &,double,int,const std::string &)1800 void vpMbGenericTracker::initCircle(const vpPoint & /*p1*/, const vpPoint & /*p2*/, const vpPoint & /*p3*/,
1801                                     double /*radius*/, int /*idFace*/, const std::string & /*name*/)
1802 {
1803   throw vpException(vpException::fatalError, "vpMbGenericTracker::initCircle() should not be called!");
1804 }
1805 
1806 #ifdef VISP_HAVE_MODULE_GUI
1807 
1808 /*!
1809   Initialise the tracker by clicking in the reference image on the pixels that
1810   correspond to the 3D points whose coordinates are extracted from a file. In
1811   this file, comments starting with # character are allowed. Notice that 3D
1812   point coordinates are expressed in meter in the object frame with their X, Y
1813   and Z values.
1814 
1815   The structure of this file is the following:
1816 
1817   \code
1818   # 3D point coordinates
1819   4                 # Number of points in the file (minimum is four)
1820   0.01 0.01 0.01    # \
1821   ...               #  | 3D coordinates in the object frame (X, Y, Z)
1822   0.01 -0.01 -0.01  # /
1823   \endcode
1824 
1825   \param I1 : Input grayscale image for the first camera.
1826   \param I2 : Input grayscale image for the second camera.
1827   \param initFile1 : File containing the coordinates of at least 4 3D points
1828   the user has to click in the image acquired by the first camera. This file
1829   should have .init extension (ie teabox.init).
1830   \param initFile2 : File
1831   containing the coordinates of at least 4 3D points the user has to click in
1832   the image acquired by the second camera. This file should have .init
1833   extension.
1834   \param displayHelp : Optionnal display of an image that should
1835   have the same generic name as the init file (ie teabox.ppm, or teabox.png). This image may
1836   be used to show where to click. This functionality is only available if
1837   visp_io module is used. Supported image formats are .pgm, .ppm, .png, .jpeg.
1838   \param T1 : optional transformation matrix to transform 3D points in \a initFile1
1839   expressed in the original object frame to the desired object frame.
1840   \param T2 : optional transformation matrix to transform 3D points in \a initFile2
1841   expressed in the original object frame to the desired object frame
1842   (T2==T1 if the init points are expressed in the same object frame which should be the case
1843   most of the time).
1844 
1845   \exception vpException::ioError : The file specified in \e initFile doesn't
1846   exist.
1847 
1848   \note This function assumes a stereo configuration of the generic tracker.
1849 */
initClick(const vpImage<unsigned char> & I1,const vpImage<unsigned char> & I2,const std::string & initFile1,const std::string & initFile2,bool displayHelp,const vpHomogeneousMatrix & T1,const vpHomogeneousMatrix & T2)1850 void vpMbGenericTracker::initClick(const vpImage<unsigned char> &I1, const vpImage<unsigned char> &I2,
1851                                    const std::string &initFile1, const std::string &initFile2, bool displayHelp,
1852                                    const vpHomogeneousMatrix &T1, const vpHomogeneousMatrix &T2)
1853 {
1854   if (m_mapOfTrackers.size() == 2) {
1855     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
1856     TrackerWrapper *tracker = it->second;
1857     tracker->initClick(I1, initFile1, displayHelp, T1);
1858 
1859     ++it;
1860 
1861     tracker = it->second;
1862     tracker->initClick(I2, initFile2, displayHelp, T2);
1863 
1864     it = m_mapOfTrackers.find(m_referenceCameraName);
1865     if (it != m_mapOfTrackers.end()) {
1866       tracker = it->second;
1867 
1868       // Set the reference cMo
1869       tracker->getPose(m_cMo);
1870     }
1871   } else {
1872     throw vpException(vpTrackingException::initializationError,
1873                       "Cannot initClick()! Require two cameras but there are %d cameras!", m_mapOfTrackers.size());
1874   }
1875 }
1876 
1877 /*!
1878   Initialise the tracker by clicking in the reference image on the pixels that
1879   correspond to the 3D points whose coordinates are extracted from a file. In
1880   this file, comments starting with # character are allowed. Notice that 3D
1881   point coordinates are expressed in meter in the object frame with their X, Y
1882   and Z values.
1883 
1884   The structure of this file is the following:
1885 
1886   \code
1887   # 3D point coordinates
1888   4                 # Number of points in the file (minimum is four)
1889   0.01 0.01 0.01    # \
1890   ...               #  | 3D coordinates in the object frame (X, Y, Z)
1891   0.01 -0.01 -0.01  # /
1892   \endcode
1893 
1894   \param I_color1 : Input color image for the first camera.
1895   \param I_color2 : Input color image for the second camera.
1896   \param initFile1 : File containing the coordinates of at least 4 3D points
1897   the user has to click in the image acquired by the first camera. This file
1898   should have .init extension (ie teabox.init).
1899   \param initFile2 : File
1900   containing the coordinates of at least 4 3D points the user has to click in
1901   the image acquired by the second camera. This file should have .init
1902   extension.
1903   \param displayHelp : Optionnal display of an image that should
1904   have the same generic name as the init file (ie teabox.ppm, or teabox.png). This image may
1905   be used to show where to click. This functionality is only available if
1906   visp_io module is used. Supported image formats are .pgm, .ppm, .png, .jpeg.
1907   \param T1 : optional transformation matrix to transform 3D points in \a initFile1
1908   expressed in the original object frame to the desired object frame.
1909   \param T2 : optional transformation matrix to transform 3D points in \a initFile2
1910   expressed in the original object frame to the desired object frame
1911   (T2==T1 if the init points are expressed in the same object frame which should be the case
1912   most of the time).
1913 
1914   \exception vpException::ioError : The file specified in \e initFile doesn't
1915   exist.
1916 
1917   \note This function assumes a stereo configuration of the generic tracker.
1918 */
initClick(const vpImage<vpRGBa> & I_color1,const vpImage<vpRGBa> & I_color2,const std::string & initFile1,const std::string & initFile2,bool displayHelp,const vpHomogeneousMatrix & T1,const vpHomogeneousMatrix & T2)1919 void vpMbGenericTracker::initClick(const vpImage<vpRGBa> &I_color1, const vpImage<vpRGBa> &I_color2,
1920                                    const std::string &initFile1, const std::string &initFile2, bool displayHelp,
1921                                    const vpHomogeneousMatrix &T1, const vpHomogeneousMatrix &T2)
1922 {
1923   if (m_mapOfTrackers.size() == 2) {
1924     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
1925     TrackerWrapper *tracker = it->second;
1926     tracker->initClick(I_color1, initFile1, displayHelp, T1);
1927 
1928     ++it;
1929 
1930     tracker = it->second;
1931     tracker->initClick(I_color2, initFile2, displayHelp, T2);
1932 
1933     it = m_mapOfTrackers.find(m_referenceCameraName);
1934     if (it != m_mapOfTrackers.end()) {
1935       tracker = it->second;
1936 
1937       // Set the reference cMo
1938       tracker->getPose(m_cMo);
1939     }
1940   } else {
1941     throw vpException(vpTrackingException::initializationError,
1942                       "Cannot initClick()! Require two cameras but there are %d cameras!", m_mapOfTrackers.size());
1943   }
1944 }
1945 
1946 /*!
1947   Initialise the tracker by clicking in the reference image on the pixels that
1948   correspond to the 3D points whose coordinates are extracted from a file. In
1949   this file, comments starting with # character are allowed. Notice that 3D
1950   point coordinates are expressed in meter in the object frame with their X, Y
1951   and Z values.
1952 
1953   The structure of this file is the following:
1954 
1955   \code
1956   # 3D point coordinates
1957   4                 # Number of points in the file (minimum is four)
1958   0.01 0.01 0.01    # \
1959   ...               #  | 3D coordinates in the object frame (X, Y, Z)
1960   0.01 -0.01 -0.01  # /
1961   \endcode
1962 
1963   The cameras that have not an init file will be automatically initialized but
1964   the camera transformation matrices have to be set before.
1965 
1966   \param mapOfImages : Map of grayscale images.
1967   \param mapOfInitFiles : Map of files containing the points where to click
1968   for each camera.
1969   \param displayHelp : Optionnal display of an image that should
1970   have the same generic name as the init file (ie teabox.ppm, or teabox.png). This image may
1971   be used to show where to click. This functionality is only available if
1972   visp_io module is used. Supported image formats are .pgm, .ppm, .png, .jpeg.
1973   \param mapOfT : optional map of transformation matrices to transform
1974   3D points in \a mapOfInitFiles expressed in the original object frame to the
1975   desired object frame (if the init points are expressed in the same object frame
1976   which should be the case most of the time, all the transformation matrices are identical).
1977 
1978   \exception vpException::ioError : The file specified in \e initFile doesn't
1979   exist.
1980 
1981   \note Image and init file must be supplied for the reference camera. The
1982   images for all the cameras must be supplied to correctly initialize the
1983   trackers but some init files can be omitted. In this case, they will be
1984   initialized using the pose computed from the reference camera pose and using
1985   the known geometric transformation between each camera (see
1986   setCameraTransformationMatrix()).
1987 */
initClick(const std::map<std::string,const vpImage<unsigned char> * > & mapOfImages,const std::map<std::string,std::string> & mapOfInitFiles,bool displayHelp,const std::map<std::string,vpHomogeneousMatrix> & mapOfT)1988 void vpMbGenericTracker::initClick(const std::map<std::string, const vpImage<unsigned char> *> &mapOfImages,
1989                                    const std::map<std::string, std::string> &mapOfInitFiles, bool displayHelp,
1990                                    const std::map<std::string, vpHomogeneousMatrix> &mapOfT)
1991 {
1992   std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(m_referenceCameraName);
1993   std::map<std::string, const vpImage<unsigned char> *>::const_iterator it_img =
1994       mapOfImages.find(m_referenceCameraName);
1995   std::map<std::string, std::string>::const_iterator it_initFile = mapOfInitFiles.find(m_referenceCameraName);
1996 
1997   if (it_tracker != m_mapOfTrackers.end() && it_img != mapOfImages.end() && it_initFile != mapOfInitFiles.end()) {
1998     TrackerWrapper *tracker = it_tracker->second;
1999     std::map<std::string, vpHomogeneousMatrix>::const_iterator it_T = mapOfT.find(it_tracker->first);
2000     if (it_T != mapOfT.end())
2001       tracker->initClick(*it_img->second, it_initFile->second, displayHelp, it_T->second);
2002     else
2003       tracker->initClick(*it_img->second, it_initFile->second, displayHelp);
2004     tracker->getPose(m_cMo);
2005   } else {
2006     throw vpException(vpTrackingException::initializationError, "Cannot initClick for the reference camera!");
2007   }
2008 
2009   // Vector of missing initFile for cameras
2010   std::vector<std::string> vectorOfMissingCameraPoses;
2011 
2012   // Set pose for the specified cameras
2013   for (it_tracker = m_mapOfTrackers.begin(); it_tracker != m_mapOfTrackers.end(); ++it_tracker) {
2014     if (it_tracker->first != m_referenceCameraName) {
2015       it_img = mapOfImages.find(it_tracker->first);
2016       it_initFile = mapOfInitFiles.find(it_tracker->first);
2017 
2018       if (it_img != mapOfImages.end() && it_initFile != mapOfInitFiles.end()) {
2019         // InitClick for the current camera
2020         TrackerWrapper *tracker = it_tracker->second;
2021         tracker->initClick(*it_img->second, it_initFile->second, displayHelp);
2022       } else {
2023         vectorOfMissingCameraPoses.push_back(it_tracker->first);
2024       }
2025     }
2026   }
2027 
2028   // Init for cameras that do not have an initFile
2029   for (std::vector<std::string>::const_iterator it = vectorOfMissingCameraPoses.begin();
2030        it != vectorOfMissingCameraPoses.end(); ++it) {
2031     it_img = mapOfImages.find(*it);
2032     std::map<std::string, vpHomogeneousMatrix>::const_iterator it_camTrans =
2033         m_mapOfCameraTransformationMatrix.find(*it);
2034 
2035     if (it_img != mapOfImages.end() && it_camTrans != m_mapOfCameraTransformationMatrix.end()) {
2036       vpHomogeneousMatrix cCurrentMo = it_camTrans->second * m_cMo;
2037       m_mapOfTrackers[*it]->m_cMo = cCurrentMo;
2038       m_mapOfTrackers[*it]->init(*it_img->second);
2039     } else {
2040       throw vpException(vpTrackingException::initializationError,
2041                         "Missing image or missing camera transformation "
2042                         "matrix! Cannot set the pose for camera: %s!",
2043                         it->c_str());
2044     }
2045   }
2046 }
2047 
2048 /*!
2049   Initialise the tracker by clicking in the reference image on the pixels that
2050   correspond to the 3D points whose coordinates are extracted from a file. In
2051   this file, comments starting with # character are allowed. Notice that 3D
2052   point coordinates are expressed in meter in the object frame with their X, Y
2053   and Z values.
2054 
2055   The structure of this file is the following:
2056 
2057   \code
2058   # 3D point coordinates
2059   4                 # Number of points in the file (minimum is four)
2060   0.01 0.01 0.01    # \
2061   ...               #  | 3D coordinates in the object frame (X, Y, Z)
2062   0.01 -0.01 -0.01  # /
2063   \endcode
2064 
2065   The cameras that have not an init file will be automatically initialized but
2066   the camera transformation matrices have to be set before.
2067 
2068   \param mapOfColorImages : Map of color images.
2069   \param mapOfInitFiles : Map of files containing the points where to click
2070   for each camera.
2071   \param displayHelp : Optionnal display of an image that should
2072   have the same generic name as the init file (ie teabox.ppm, or teabox.png). This image may
2073   be used to show where to click. This functionality is only available if
2074   visp_io module is used. Supported image formats are .pgm, .ppm, .png, .jpeg.
2075   \param mapOfT : optional map of transformation matrices to transform
2076   3D points in \a mapOfInitFiles expressed in the original object frame to the
2077   desired object frame (if the init points are expressed in the same object frame
2078   which should be the case most of the time, all the transformation matrices are identical).
2079 
2080   \exception vpException::ioError : The file specified in \e initFile doesn't
2081   exist.
2082 
2083   \note Image and init file must be supplied for the reference camera. The
2084   images for all the cameras must be supplied to correctly initialize the
2085   trackers but some init files can be omitted. In this case, they will be
2086   initialized using the pose computed from the reference camera pose and using
2087   the known geometric transformation between each camera (see
2088   setCameraTransformationMatrix()).
2089 */
initClick(const std::map<std::string,const vpImage<vpRGBa> * > & mapOfColorImages,const std::map<std::string,std::string> & mapOfInitFiles,bool displayHelp,const std::map<std::string,vpHomogeneousMatrix> & mapOfT)2090 void vpMbGenericTracker::initClick(const std::map<std::string, const vpImage<vpRGBa> *> &mapOfColorImages,
2091                                    const std::map<std::string, std::string> &mapOfInitFiles, bool displayHelp,
2092                                    const std::map<std::string, vpHomogeneousMatrix> &mapOfT)
2093 {
2094   std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(m_referenceCameraName);
2095   std::map<std::string, const vpImage<vpRGBa> *>::const_iterator it_img =
2096       mapOfColorImages.find(m_referenceCameraName);
2097   std::map<std::string, std::string>::const_iterator it_initFile = mapOfInitFiles.find(m_referenceCameraName);
2098 
2099   if (it_tracker != m_mapOfTrackers.end() && it_img != mapOfColorImages.end() && it_initFile != mapOfInitFiles.end()) {
2100     TrackerWrapper *tracker = it_tracker->second;
2101     std::map<std::string, vpHomogeneousMatrix>::const_iterator it_T = mapOfT.find(it_tracker->first);
2102     if (it_T != mapOfT.end())
2103       tracker->initClick(*it_img->second, it_initFile->second, displayHelp, it_T->second);
2104     else
2105       tracker->initClick(*it_img->second, it_initFile->second, displayHelp);
2106     tracker->getPose(m_cMo);
2107   } else {
2108     throw vpException(vpTrackingException::initializationError, "Cannot initClick for the reference camera!");
2109   }
2110 
2111   // Vector of missing initFile for cameras
2112   std::vector<std::string> vectorOfMissingCameraPoses;
2113 
2114   // Set pose for the specified cameras
2115   for (it_tracker = m_mapOfTrackers.begin(); it_tracker != m_mapOfTrackers.end(); ++it_tracker) {
2116     if (it_tracker->first != m_referenceCameraName) {
2117       it_img = mapOfColorImages.find(it_tracker->first);
2118       it_initFile = mapOfInitFiles.find(it_tracker->first);
2119 
2120       if (it_img != mapOfColorImages.end() && it_initFile != mapOfInitFiles.end()) {
2121         // InitClick for the current camera
2122         TrackerWrapper *tracker = it_tracker->second;
2123         tracker->initClick(*it_img->second, it_initFile->second, displayHelp);
2124       } else {
2125         vectorOfMissingCameraPoses.push_back(it_tracker->first);
2126       }
2127     }
2128   }
2129 
2130   // Init for cameras that do not have an initFile
2131   for (std::vector<std::string>::const_iterator it = vectorOfMissingCameraPoses.begin();
2132        it != vectorOfMissingCameraPoses.end(); ++it) {
2133     it_img = mapOfColorImages.find(*it);
2134     std::map<std::string, vpHomogeneousMatrix>::const_iterator it_camTrans =
2135         m_mapOfCameraTransformationMatrix.find(*it);
2136 
2137     if (it_img != mapOfColorImages.end() && it_camTrans != m_mapOfCameraTransformationMatrix.end()) {
2138       vpHomogeneousMatrix cCurrentMo = it_camTrans->second * m_cMo;
2139       m_mapOfTrackers[*it]->m_cMo = cCurrentMo;
2140       vpImageConvert::convert(*it_img->second, m_mapOfTrackers[*it]->m_I);
2141       m_mapOfTrackers[*it]->init(m_mapOfTrackers[*it]->m_I);
2142     } else {
2143       throw vpException(vpTrackingException::initializationError,
2144                         "Missing image or missing camera transformation "
2145                         "matrix! Cannot set the pose for camera: %s!",
2146                         it->c_str());
2147     }
2148   }
2149 }
2150 #endif
2151 
initCylinder(const vpPoint &,const vpPoint &,const double,const int,const std::string &)2152 void vpMbGenericTracker::initCylinder(const vpPoint & /*p1*/, const vpPoint & /*p2*/, const double /*radius*/,
2153                                       const int /*idFace*/, const std::string & /*name*/)
2154 {
2155   throw vpException(vpException::fatalError, "vpMbGenericTracker::initCylinder() should not be called!");
2156 }
2157 
initFaceFromCorners(vpMbtPolygon &)2158 void vpMbGenericTracker::initFaceFromCorners(vpMbtPolygon & /*polygon*/)
2159 {
2160   throw vpException(vpException::fatalError, "vpMbGenericTracker::initFaceFromCorners() should not be called!");
2161 }
2162 
initFaceFromLines(vpMbtPolygon &)2163 void vpMbGenericTracker::initFaceFromLines(vpMbtPolygon & /*polygon*/)
2164 {
2165   throw vpException(vpException::fatalError, "vpMbGenericTracker::initFaceFromLines() should not be called!");
2166 }
2167 
2168 /*!
2169   Initialise the tracker by reading 3D point coordinates and the corresponding
2170   2D image point coordinates from a file. Comments starting with # character
2171   are allowed. 3D point coordinates are expressed in meter in the object frame
2172   with X, Y and Z values. 2D point coordinates are expressied in pixel
2173   coordinates, with first the line and then the column of the pixel in the
2174   image. The structure of this file is the following.
2175   \code
2176  # 3D point coordinates
2177  4                 # Number of 3D points in the file (minimum is four)
2178  0.01 0.01 0.01    #  \
2179  ...               #  | 3D coordinates in meters in the object frame
2180  0.01 -0.01 -0.01  # /
2181  # corresponding 2D point coordinates
2182  4                 # Number of image points in the file (has to be the same as the number of 3D points)
2183  100 200           #  \
2184  ...               #  | 2D coordinates in pixel in the image
2185  50 10  		        #  /
2186   \endcode
2187 
2188   \param I1 : Input grayscale image for the first camera.
2189   \param I2 : Input grayscale image for the second camera.
2190   \param initFile1 : Path to the file containing all the points for the first
2191   camera.
2192   \param initFile2 : Path to the file containing all the points for
2193   the second camera.
2194 
2195   \note This function assumes a stereo configuration of the generic tracker.
2196 */
initFromPoints(const vpImage<unsigned char> & I1,const vpImage<unsigned char> & I2,const std::string & initFile1,const std::string & initFile2)2197 void vpMbGenericTracker::initFromPoints(const vpImage<unsigned char> &I1, const vpImage<unsigned char> &I2,
2198                                         const std::string &initFile1, const std::string &initFile2)
2199 {
2200   if (m_mapOfTrackers.size() == 2) {
2201     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
2202     TrackerWrapper *tracker = it->second;
2203     tracker->initFromPoints(I1, initFile1);
2204 
2205     ++it;
2206 
2207     tracker = it->second;
2208     tracker->initFromPoints(I2, initFile2);
2209 
2210     it = m_mapOfTrackers.find(m_referenceCameraName);
2211     if (it != m_mapOfTrackers.end()) {
2212       tracker = it->second;
2213 
2214       // Set the reference cMo
2215       tracker->getPose(m_cMo);
2216 
2217       // Set the reference camera parameters
2218       tracker->getCameraParameters(m_cam);
2219     }
2220   } else {
2221     throw vpException(vpTrackingException::initializationError,
2222                       "Cannot initFromPoints()! Require two cameras but "
2223                       "there are %d cameras!",
2224                       m_mapOfTrackers.size());
2225   }
2226 }
2227 
2228 /*!
2229   Initialise the tracker by reading 3D point coordinates and the corresponding
2230   2D image point coordinates from a file. Comments starting with # character
2231   are allowed. 3D point coordinates are expressed in meter in the object frame
2232   with X, Y and Z values. 2D point coordinates are expressied in pixel
2233   coordinates, with first the line and then the column of the pixel in the
2234   image. The structure of this file is the following.
2235   \code
2236  # 3D point coordinates
2237  4                 # Number of 3D points in the file (minimum is four)
2238  0.01 0.01 0.01    #  \
2239  ...               #  | 3D coordinates in meters in the object frame
2240  0.01 -0.01 -0.01  # /
2241  # corresponding 2D point coordinates
2242  4                 # Number of image points in the file (has to be the same as the number of 3D points)
2243  100 200           #  \
2244  ...               #  | 2D coordinates in pixel in the image
2245  50 10  		        #  /
2246   \endcode
2247 
2248   \param I_color1 : Input color image for the first camera.
2249   \param I_color2 : Input color image for the second camera.
2250   \param initFile1 : Path to the file containing all the points for the first
2251   camera.
2252   \param initFile2 : Path to the file containing all the points for
2253   the second camera.
2254 
2255   \note This function assumes a stereo configuration of the generic tracker.
2256 */
initFromPoints(const vpImage<vpRGBa> & I_color1,const vpImage<vpRGBa> & I_color2,const std::string & initFile1,const std::string & initFile2)2257 void vpMbGenericTracker::initFromPoints(const vpImage<vpRGBa> &I_color1, const vpImage<vpRGBa> &I_color2,
2258                                         const std::string &initFile1, const std::string &initFile2)
2259 {
2260   if (m_mapOfTrackers.size() == 2) {
2261     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
2262     TrackerWrapper *tracker = it->second;
2263     tracker->initFromPoints(I_color1, initFile1);
2264 
2265     ++it;
2266 
2267     tracker = it->second;
2268     tracker->initFromPoints(I_color2, initFile2);
2269 
2270     it = m_mapOfTrackers.find(m_referenceCameraName);
2271     if (it != m_mapOfTrackers.end()) {
2272       tracker = it->second;
2273 
2274       // Set the reference cMo
2275       tracker->getPose(m_cMo);
2276 
2277       // Set the reference camera parameters
2278       tracker->getCameraParameters(m_cam);
2279     }
2280   } else {
2281     throw vpException(vpTrackingException::initializationError,
2282                       "Cannot initFromPoints()! Require two cameras but "
2283                       "there are %d cameras!",
2284                       m_mapOfTrackers.size());
2285   }
2286 }
2287 
initFromPoints(const std::map<std::string,const vpImage<unsigned char> * > & mapOfImages,const std::map<std::string,std::string> & mapOfInitPoints)2288 void vpMbGenericTracker::initFromPoints(const std::map<std::string, const vpImage<unsigned char> *> &mapOfImages,
2289                                         const std::map<std::string, std::string> &mapOfInitPoints)
2290 {
2291   // Set the reference cMo
2292   std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(m_referenceCameraName);
2293   std::map<std::string, const vpImage<unsigned char> *>::const_iterator it_img =
2294       mapOfImages.find(m_referenceCameraName);
2295   std::map<std::string, std::string>::const_iterator it_initPoints = mapOfInitPoints.find(m_referenceCameraName);
2296 
2297   if (it_tracker != m_mapOfTrackers.end() && it_img != mapOfImages.end() && it_initPoints != mapOfInitPoints.end()) {
2298     TrackerWrapper *tracker = it_tracker->second;
2299     tracker->initFromPoints(*it_img->second, it_initPoints->second);
2300     tracker->getPose(m_cMo);
2301   } else {
2302     throw vpException(vpTrackingException::initializationError, "Cannot initFromPoints() for the reference camera!");
2303   }
2304 
2305   // Vector of missing initPoints for cameras
2306   std::vector<std::string> vectorOfMissingCameraPoints;
2307 
2308   // Set pose for the specified cameras
2309   for (it_tracker = m_mapOfTrackers.begin(); it_tracker != m_mapOfTrackers.end(); ++it_tracker) {
2310     it_img = mapOfImages.find(it_tracker->first);
2311     it_initPoints = mapOfInitPoints.find(it_tracker->first);
2312 
2313     if (it_img != mapOfImages.end() && it_initPoints != mapOfInitPoints.end()) {
2314       // Set pose
2315       it_tracker->second->initFromPoints(*it_img->second, it_initPoints->second);
2316     } else {
2317       vectorOfMissingCameraPoints.push_back(it_tracker->first);
2318     }
2319   }
2320 
2321   for (std::vector<std::string>::const_iterator it = vectorOfMissingCameraPoints.begin();
2322        it != vectorOfMissingCameraPoints.end(); ++it) {
2323     it_img = mapOfImages.find(*it);
2324     std::map<std::string, vpHomogeneousMatrix>::const_iterator it_camTrans =
2325         m_mapOfCameraTransformationMatrix.find(*it);
2326 
2327     if (it_img != mapOfImages.end() && it_camTrans != m_mapOfCameraTransformationMatrix.end()) {
2328       vpHomogeneousMatrix cCurrentMo = it_camTrans->second * m_cMo;
2329       m_mapOfTrackers[*it]->initFromPose(*it_img->second, cCurrentMo);
2330     } else {
2331       throw vpException(vpTrackingException::initializationError,
2332                         "Missing image or missing camera transformation "
2333                         "matrix! Cannot init the pose for camera: %s!",
2334                         it->c_str());
2335     }
2336   }
2337 }
2338 
initFromPoints(const std::map<std::string,const vpImage<vpRGBa> * > & mapOfColorImages,const std::map<std::string,std::string> & mapOfInitPoints)2339 void vpMbGenericTracker::initFromPoints(const std::map<std::string, const vpImage<vpRGBa> *> &mapOfColorImages,
2340                                         const std::map<std::string, std::string> &mapOfInitPoints)
2341 {
2342   // Set the reference cMo
2343   std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(m_referenceCameraName);
2344   std::map<std::string, const vpImage<vpRGBa> *>::const_iterator it_img =
2345       mapOfColorImages.find(m_referenceCameraName);
2346   std::map<std::string, std::string>::const_iterator it_initPoints = mapOfInitPoints.find(m_referenceCameraName);
2347 
2348   if (it_tracker != m_mapOfTrackers.end() && it_img != mapOfColorImages.end() && it_initPoints != mapOfInitPoints.end()) {
2349     TrackerWrapper *tracker = it_tracker->second;
2350     tracker->initFromPoints(*it_img->second, it_initPoints->second);
2351     tracker->getPose(m_cMo);
2352   } else {
2353     throw vpException(vpTrackingException::initializationError, "Cannot initFromPoints() for the reference camera!");
2354   }
2355 
2356   // Vector of missing initPoints for cameras
2357   std::vector<std::string> vectorOfMissingCameraPoints;
2358 
2359   // Set pose for the specified cameras
2360   for (it_tracker = m_mapOfTrackers.begin(); it_tracker != m_mapOfTrackers.end(); ++it_tracker) {
2361     it_img = mapOfColorImages.find(it_tracker->first);
2362     it_initPoints = mapOfInitPoints.find(it_tracker->first);
2363 
2364     if (it_img != mapOfColorImages.end() && it_initPoints != mapOfInitPoints.end()) {
2365       // Set pose
2366       it_tracker->second->initFromPoints(*it_img->second, it_initPoints->second);
2367     } else {
2368       vectorOfMissingCameraPoints.push_back(it_tracker->first);
2369     }
2370   }
2371 
2372   for (std::vector<std::string>::const_iterator it = vectorOfMissingCameraPoints.begin();
2373        it != vectorOfMissingCameraPoints.end(); ++it) {
2374     it_img = mapOfColorImages.find(*it);
2375     std::map<std::string, vpHomogeneousMatrix>::const_iterator it_camTrans =
2376         m_mapOfCameraTransformationMatrix.find(*it);
2377 
2378     if (it_img != mapOfColorImages.end() && it_camTrans != m_mapOfCameraTransformationMatrix.end()) {
2379       vpHomogeneousMatrix cCurrentMo = it_camTrans->second * m_cMo;
2380       m_mapOfTrackers[*it]->initFromPose(*it_img->second, cCurrentMo);
2381     } else {
2382       throw vpException(vpTrackingException::initializationError,
2383                         "Missing image or missing camera transformation "
2384                         "matrix! Cannot init the pose for camera: %s!",
2385                         it->c_str());
2386     }
2387   }
2388 }
2389 
initFromPose(const vpImage<unsigned char> & I,const vpHomogeneousMatrix & cMo)2390 void vpMbGenericTracker::initFromPose(const vpImage<unsigned char> &I, const vpHomogeneousMatrix &cMo)
2391 {
2392   vpMbTracker::initFromPose(I, cMo);
2393 }
2394 
2395 /*!
2396   Initialise the tracking thanks to the pose in vpPoseVector format, and read
2397   in the file initFile.
2398 
2399   \param I1 : Input grayscale image for the first camera.
2400   \param I2 : Input grayscale image for the second camera.
2401   \param initFile1 : Init pose file for the first camera.
2402   \param initFile2 : Init pose file for the second camera.
2403 
2404   \note This function assumes a stereo configuration of the generic tracker.
2405 */
initFromPose(const vpImage<unsigned char> & I1,const vpImage<unsigned char> & I2,const std::string & initFile1,const std::string & initFile2)2406 void vpMbGenericTracker::initFromPose(const vpImage<unsigned char> &I1, const vpImage<unsigned char> &I2,
2407                                       const std::string &initFile1, const std::string &initFile2)
2408 {
2409   if (m_mapOfTrackers.size() == 2) {
2410     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
2411     TrackerWrapper *tracker = it->second;
2412     tracker->initFromPose(I1, initFile1);
2413 
2414     ++it;
2415 
2416     tracker = it->second;
2417     tracker->initFromPose(I2, initFile2);
2418 
2419     it = m_mapOfTrackers.find(m_referenceCameraName);
2420     if (it != m_mapOfTrackers.end()) {
2421       tracker = it->second;
2422 
2423       // Set the reference cMo
2424       tracker->getPose(m_cMo);
2425 
2426       // Set the reference camera parameters
2427       tracker->getCameraParameters(m_cam);
2428     }
2429   } else {
2430     throw vpException(vpTrackingException::initializationError,
2431                       "Cannot initFromPose()! Require two cameras but there "
2432                       "are %d cameras!",
2433                       m_mapOfTrackers.size());
2434   }
2435 }
2436 
2437 /*!
2438   Initialise the tracking thanks to the pose in vpPoseVector format, and read
2439   in the file initFile.
2440 
2441   \param I_color1 : Input color image for the first camera.
2442   \param I_color2 : Input color image for the second camera.
2443   \param initFile1 : Init pose file for the first camera.
2444   \param initFile2 : Init pose file for the second camera.
2445 
2446   \note This function assumes a stereo configuration of the generic tracker.
2447 */
initFromPose(const vpImage<vpRGBa> & I_color1,const vpImage<vpRGBa> & I_color2,const std::string & initFile1,const std::string & initFile2)2448 void vpMbGenericTracker::initFromPose(const vpImage<vpRGBa> &I_color1, const vpImage<vpRGBa> &I_color2,
2449                                       const std::string &initFile1, const std::string &initFile2)
2450 {
2451   if (m_mapOfTrackers.size() == 2) {
2452     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
2453     TrackerWrapper *tracker = it->second;
2454     tracker->initFromPose(I_color1, initFile1);
2455 
2456     ++it;
2457 
2458     tracker = it->second;
2459     tracker->initFromPose(I_color2, initFile2);
2460 
2461     it = m_mapOfTrackers.find(m_referenceCameraName);
2462     if (it != m_mapOfTrackers.end()) {
2463       tracker = it->second;
2464 
2465       // Set the reference cMo
2466       tracker->getPose(m_cMo);
2467 
2468       // Set the reference camera parameters
2469       tracker->getCameraParameters(m_cam);
2470     }
2471   } else {
2472     throw vpException(vpTrackingException::initializationError,
2473                       "Cannot initFromPose()! Require two cameras but there "
2474                       "are %d cameras!",
2475                       m_mapOfTrackers.size());
2476   }
2477 }
2478 
2479 /*!
2480   Initialise the tracking thanks to the pose in vpPoseVector format, and read
2481   in the file initFile.
2482 
2483   \param mapOfImages : Map of grayscale images.
2484   \param mapOfInitPoses : Map of init pose files.
2485 
2486   \note Image and init pose file must be supplied for the reference camera.
2487   The images for all the cameras must be supplied to correctly initialize the
2488   trackers but some init pose files can be omitted. In this case, they will be
2489   initialized using the pose computed from the reference camera pose and using
2490   the known geometric transformation between each camera (see
2491   setCameraTransformationMatrix()).
2492 */
initFromPose(const std::map<std::string,const vpImage<unsigned char> * > & mapOfImages,const std::map<std::string,std::string> & mapOfInitPoses)2493 void vpMbGenericTracker::initFromPose(const std::map<std::string, const vpImage<unsigned char> *> &mapOfImages,
2494                                       const std::map<std::string, std::string> &mapOfInitPoses)
2495 {
2496   // Set the reference cMo
2497   std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(m_referenceCameraName);
2498   std::map<std::string, const vpImage<unsigned char> *>::const_iterator it_img =
2499       mapOfImages.find(m_referenceCameraName);
2500   std::map<std::string, std::string>::const_iterator it_initPose = mapOfInitPoses.find(m_referenceCameraName);
2501 
2502   if (it_tracker != m_mapOfTrackers.end() && it_img != mapOfImages.end() && it_initPose != mapOfInitPoses.end()) {
2503     TrackerWrapper *tracker = it_tracker->second;
2504     tracker->initFromPose(*it_img->second, it_initPose->second);
2505     tracker->getPose(m_cMo);
2506   } else {
2507     throw vpException(vpTrackingException::initializationError, "Cannot initFromPose() for the reference camera!");
2508   }
2509 
2510   // Vector of missing pose matrices for cameras
2511   std::vector<std::string> vectorOfMissingCameraPoses;
2512 
2513   // Set pose for the specified cameras
2514   for (it_tracker = m_mapOfTrackers.begin(); it_tracker != m_mapOfTrackers.end(); ++it_tracker) {
2515     it_img = mapOfImages.find(it_tracker->first);
2516     it_initPose = mapOfInitPoses.find(it_tracker->first);
2517 
2518     if (it_img != mapOfImages.end() && it_initPose != mapOfInitPoses.end()) {
2519       // Set pose
2520       it_tracker->second->initFromPose(*it_img->second, it_initPose->second);
2521     } else {
2522       vectorOfMissingCameraPoses.push_back(it_tracker->first);
2523     }
2524   }
2525 
2526   for (std::vector<std::string>::const_iterator it = vectorOfMissingCameraPoses.begin();
2527        it != vectorOfMissingCameraPoses.end(); ++it) {
2528     it_img = mapOfImages.find(*it);
2529     std::map<std::string, vpHomogeneousMatrix>::const_iterator it_camTrans =
2530         m_mapOfCameraTransformationMatrix.find(*it);
2531 
2532     if (it_img != mapOfImages.end() && it_camTrans != m_mapOfCameraTransformationMatrix.end()) {
2533       vpHomogeneousMatrix cCurrentMo = it_camTrans->second * m_cMo;
2534       m_mapOfTrackers[*it]->initFromPose(*it_img->second, cCurrentMo);
2535     } else {
2536       throw vpException(vpTrackingException::initializationError,
2537                         "Missing image or missing camera transformation "
2538                         "matrix! Cannot init the pose for camera: %s!",
2539                         it->c_str());
2540     }
2541   }
2542 }
2543 
2544 /*!
2545   Initialise the tracking thanks to the pose in vpPoseVector format, and read
2546   in the file initFile.
2547 
2548   \param mapOfColorImages : Map of color images.
2549   \param mapOfInitPoses : Map of init pose files.
2550 
2551   \note Image and init pose file must be supplied for the reference camera.
2552   The images for all the cameras must be supplied to correctly initialize the
2553   trackers but some init pose files can be omitted. In this case, they will be
2554   initialized using the pose computed from the reference camera pose and using
2555   the known geometric transformation between each camera (see
2556   setCameraTransformationMatrix()).
2557 */
initFromPose(const std::map<std::string,const vpImage<vpRGBa> * > & mapOfColorImages,const std::map<std::string,std::string> & mapOfInitPoses)2558 void vpMbGenericTracker::initFromPose(const std::map<std::string, const vpImage<vpRGBa> *> &mapOfColorImages,
2559                                       const std::map<std::string, std::string> &mapOfInitPoses)
2560 {
2561   // Set the reference cMo
2562   std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(m_referenceCameraName);
2563   std::map<std::string, const vpImage<vpRGBa> *>::const_iterator it_img =
2564       mapOfColorImages.find(m_referenceCameraName);
2565   std::map<std::string, std::string>::const_iterator it_initPose = mapOfInitPoses.find(m_referenceCameraName);
2566 
2567   if (it_tracker != m_mapOfTrackers.end() && it_img != mapOfColorImages.end() && it_initPose != mapOfInitPoses.end()) {
2568     TrackerWrapper *tracker = it_tracker->second;
2569     tracker->initFromPose(*it_img->second, it_initPose->second);
2570     tracker->getPose(m_cMo);
2571   } else {
2572     throw vpException(vpTrackingException::initializationError, "Cannot initFromPose() for the reference camera!");
2573   }
2574 
2575   // Vector of missing pose matrices for cameras
2576   std::vector<std::string> vectorOfMissingCameraPoses;
2577 
2578   // Set pose for the specified cameras
2579   for (it_tracker = m_mapOfTrackers.begin(); it_tracker != m_mapOfTrackers.end(); ++it_tracker) {
2580     it_img = mapOfColorImages.find(it_tracker->first);
2581     it_initPose = mapOfInitPoses.find(it_tracker->first);
2582 
2583     if (it_img != mapOfColorImages.end() && it_initPose != mapOfInitPoses.end()) {
2584       // Set pose
2585       it_tracker->second->initFromPose(*it_img->second, it_initPose->second);
2586     } else {
2587       vectorOfMissingCameraPoses.push_back(it_tracker->first);
2588     }
2589   }
2590 
2591   for (std::vector<std::string>::const_iterator it = vectorOfMissingCameraPoses.begin();
2592        it != vectorOfMissingCameraPoses.end(); ++it) {
2593     it_img = mapOfColorImages.find(*it);
2594     std::map<std::string, vpHomogeneousMatrix>::const_iterator it_camTrans =
2595         m_mapOfCameraTransformationMatrix.find(*it);
2596 
2597     if (it_img != mapOfColorImages.end() && it_camTrans != m_mapOfCameraTransformationMatrix.end()) {
2598       vpHomogeneousMatrix cCurrentMo = it_camTrans->second * m_cMo;
2599       m_mapOfTrackers[*it]->initFromPose(*it_img->second, cCurrentMo);
2600     } else {
2601       throw vpException(vpTrackingException::initializationError,
2602                         "Missing image or missing camera transformation "
2603                         "matrix! Cannot init the pose for camera: %s!",
2604                         it->c_str());
2605     }
2606   }
2607 }
2608 
2609 /*!
2610   Initialize the tracking thanks to the pose.
2611 
2612   \param I1 : Input grayscale image for the first camera.
2613   \param I2 : Input grayscale image for the second camera.
2614   \param c1Mo : Pose matrix for the first camera.
2615   \param c2Mo : Pose matrix for the second camera.
2616 
2617   \note This function assumes a stereo configuration of the generic tracker.
2618 */
initFromPose(const vpImage<unsigned char> & I1,const vpImage<unsigned char> & I2,const vpHomogeneousMatrix & c1Mo,const vpHomogeneousMatrix & c2Mo)2619 void vpMbGenericTracker::initFromPose(const vpImage<unsigned char> &I1, const vpImage<unsigned char> &I2,
2620                                       const vpHomogeneousMatrix &c1Mo, const vpHomogeneousMatrix &c2Mo)
2621 {
2622   if (m_mapOfTrackers.size() == 2) {
2623     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
2624     it->second->initFromPose(I1, c1Mo);
2625 
2626     ++it;
2627 
2628     it->second->initFromPose(I2, c2Mo);
2629 
2630     m_cMo = c1Mo;
2631   } else {
2632     throw vpException(vpTrackingException::initializationError,
2633                       "This method requires 2 cameras but there are %d cameras!", m_mapOfTrackers.size());
2634   }
2635 }
2636 
2637 /*!
2638   Initialize the tracking thanks to the pose.
2639 
2640   \param I_color1 : Input color image for the first camera.
2641   \param I_color2 : Input color image for the second camera.
2642   \param c1Mo : Pose matrix for the first camera.
2643   \param c2Mo : Pose matrix for the second camera.
2644 
2645   \note This function assumes a stereo configuration of the generic tracker.
2646 */
initFromPose(const vpImage<vpRGBa> & I_color1,const vpImage<vpRGBa> & I_color2,const vpHomogeneousMatrix & c1Mo,const vpHomogeneousMatrix & c2Mo)2647 void vpMbGenericTracker::initFromPose(const vpImage<vpRGBa> &I_color1, const vpImage<vpRGBa> &I_color2,
2648                                       const vpHomogeneousMatrix &c1Mo, const vpHomogeneousMatrix &c2Mo)
2649 {
2650   if (m_mapOfTrackers.size() == 2) {
2651     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
2652     it->second->initFromPose(I_color1, c1Mo);
2653 
2654     ++it;
2655 
2656     it->second->initFromPose(I_color2, c2Mo);
2657 
2658     m_cMo = c1Mo;
2659   } else {
2660     throw vpException(vpTrackingException::initializationError,
2661                       "This method requires 2 cameras but there are %d cameras!", m_mapOfTrackers.size());
2662   }
2663 }
2664 
2665 /*!
2666   Initialize the tracking thanks to the pose.
2667 
2668   \param mapOfImages : Map of grayscale images.
2669   \param mapOfCameraPoses : Map of pose matrix.
2670 
2671   \note Image and camera pose must be supplied for the reference camera. The
2672   images for all the cameras must be supplied to correctly initialize the
2673   trackers but some camera poses can be omitted. In this case, they will be
2674   initialized using the pose computed from the reference camera pose and using
2675   the known geometric transformation between each camera (see
2676   setCameraTransformationMatrix()).
2677 */
initFromPose(const std::map<std::string,const vpImage<unsigned char> * > & mapOfImages,const std::map<std::string,vpHomogeneousMatrix> & mapOfCameraPoses)2678 void vpMbGenericTracker::initFromPose(const std::map<std::string, const vpImage<unsigned char> *> &mapOfImages,
2679                                       const std::map<std::string, vpHomogeneousMatrix> &mapOfCameraPoses)
2680 {
2681   // Set the reference cMo
2682   std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(m_referenceCameraName);
2683   std::map<std::string, const vpImage<unsigned char> *>::const_iterator it_img =
2684       mapOfImages.find(m_referenceCameraName);
2685   std::map<std::string, vpHomogeneousMatrix>::const_iterator it_camPose = mapOfCameraPoses.find(m_referenceCameraName);
2686 
2687   if (it_tracker != m_mapOfTrackers.end() && it_img != mapOfImages.end() && it_camPose != mapOfCameraPoses.end()) {
2688     TrackerWrapper *tracker = it_tracker->second;
2689     tracker->initFromPose(*it_img->second, it_camPose->second);
2690     tracker->getPose(m_cMo);
2691   } else {
2692     throw vpException(vpTrackingException::initializationError, "Cannot set pose for the reference camera!");
2693   }
2694 
2695   // Vector of missing pose matrices for cameras
2696   std::vector<std::string> vectorOfMissingCameraPoses;
2697 
2698   // Set pose for the specified cameras
2699   for (it_tracker = m_mapOfTrackers.begin(); it_tracker != m_mapOfTrackers.end(); ++it_tracker) {
2700     it_img = mapOfImages.find(it_tracker->first);
2701     it_camPose = mapOfCameraPoses.find(it_tracker->first);
2702 
2703     if (it_img != mapOfImages.end() && it_camPose != mapOfCameraPoses.end()) {
2704       // Set pose
2705       it_tracker->second->initFromPose(*it_img->second, it_camPose->second);
2706     } else {
2707       vectorOfMissingCameraPoses.push_back(it_tracker->first);
2708     }
2709   }
2710 
2711   for (std::vector<std::string>::const_iterator it = vectorOfMissingCameraPoses.begin();
2712        it != vectorOfMissingCameraPoses.end(); ++it) {
2713     it_img = mapOfImages.find(*it);
2714     std::map<std::string, vpHomogeneousMatrix>::const_iterator it_camTrans =
2715         m_mapOfCameraTransformationMatrix.find(*it);
2716 
2717     if (it_img != mapOfImages.end() && it_camTrans != m_mapOfCameraTransformationMatrix.end()) {
2718       vpHomogeneousMatrix cCurrentMo = it_camTrans->second * m_cMo;
2719       m_mapOfTrackers[*it]->initFromPose(*it_img->second, cCurrentMo);
2720     } else {
2721       throw vpException(vpTrackingException::initializationError,
2722                         "Missing image or missing camera transformation "
2723                         "matrix! Cannot set the pose for camera: %s!",
2724                         it->c_str());
2725     }
2726   }
2727 }
2728 
2729 /*!
2730   Initialize the tracking thanks to the pose.
2731 
2732   \param mapOfColorImages : Map of color images.
2733   \param mapOfCameraPoses : Map of pose matrix.
2734 
2735   \note Image and camera pose must be supplied for the reference camera. The
2736   images for all the cameras must be supplied to correctly initialize the
2737   trackers but some camera poses can be omitted. In this case, they will be
2738   initialized using the pose computed from the reference camera pose and using
2739   the known geometric transformation between each camera (see
2740   setCameraTransformationMatrix()).
2741 */
initFromPose(const std::map<std::string,const vpImage<vpRGBa> * > & mapOfColorImages,const std::map<std::string,vpHomogeneousMatrix> & mapOfCameraPoses)2742 void vpMbGenericTracker::initFromPose(const std::map<std::string, const vpImage<vpRGBa> *> &mapOfColorImages,
2743                                       const std::map<std::string, vpHomogeneousMatrix> &mapOfCameraPoses)
2744 {
2745   // Set the reference cMo
2746   std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(m_referenceCameraName);
2747   std::map<std::string, const vpImage<vpRGBa> *>::const_iterator it_img =
2748       mapOfColorImages.find(m_referenceCameraName);
2749   std::map<std::string, vpHomogeneousMatrix>::const_iterator it_camPose = mapOfCameraPoses.find(m_referenceCameraName);
2750 
2751   if (it_tracker != m_mapOfTrackers.end() && it_img != mapOfColorImages.end() && it_camPose != mapOfCameraPoses.end()) {
2752     TrackerWrapper *tracker = it_tracker->second;
2753     tracker->initFromPose(*it_img->second, it_camPose->second);
2754     tracker->getPose(m_cMo);
2755   } else {
2756     throw vpException(vpTrackingException::initializationError, "Cannot set pose for the reference camera!");
2757   }
2758 
2759   // Vector of missing pose matrices for cameras
2760   std::vector<std::string> vectorOfMissingCameraPoses;
2761 
2762   // Set pose for the specified cameras
2763   for (it_tracker = m_mapOfTrackers.begin(); it_tracker != m_mapOfTrackers.end(); ++it_tracker) {
2764     it_img = mapOfColorImages.find(it_tracker->first);
2765     it_camPose = mapOfCameraPoses.find(it_tracker->first);
2766 
2767     if (it_img != mapOfColorImages.end() && it_camPose != mapOfCameraPoses.end()) {
2768       // Set pose
2769       it_tracker->second->initFromPose(*it_img->second, it_camPose->second);
2770     } else {
2771       vectorOfMissingCameraPoses.push_back(it_tracker->first);
2772     }
2773   }
2774 
2775   for (std::vector<std::string>::const_iterator it = vectorOfMissingCameraPoses.begin();
2776        it != vectorOfMissingCameraPoses.end(); ++it) {
2777     it_img = mapOfColorImages.find(*it);
2778     std::map<std::string, vpHomogeneousMatrix>::const_iterator it_camTrans =
2779         m_mapOfCameraTransformationMatrix.find(*it);
2780 
2781     if (it_img != mapOfColorImages.end() && it_camTrans != m_mapOfCameraTransformationMatrix.end()) {
2782       vpHomogeneousMatrix cCurrentMo = it_camTrans->second * m_cMo;
2783       m_mapOfTrackers[*it]->initFromPose(*it_img->second, cCurrentMo);
2784     } else {
2785       throw vpException(vpTrackingException::initializationError,
2786                         "Missing image or missing camera transformation "
2787                         "matrix! Cannot set the pose for camera: %s!",
2788                         it->c_str());
2789     }
2790   }
2791 }
2792 
2793 /*!
2794   Load the xml configuration file.
2795   From the configuration file initialize the parameters corresponding to the
2796   objects: tracking parameters, camera intrinsic parameters.
2797 
2798   \throw vpException::ioError if the file has not been properly parsed (file
2799   not found).
2800 
2801   \param configFile : full name of the xml file.
2802   \param verbose : verbose flag.
2803 */
loadConfigFile(const std::string & configFile,bool verbose)2804 void vpMbGenericTracker::loadConfigFile(const std::string &configFile, bool verbose)
2805 {
2806   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
2807        it != m_mapOfTrackers.end(); ++it) {
2808     TrackerWrapper *tracker = it->second;
2809     tracker->loadConfigFile(configFile, verbose);
2810   }
2811 
2812   if (m_mapOfTrackers.find(m_referenceCameraName) == m_mapOfTrackers.end()) {
2813     throw vpException(vpException::fatalError, "Cannot find the reference camera:  %s!", m_referenceCameraName.c_str());
2814   }
2815 
2816   m_mapOfTrackers[m_referenceCameraName]->getCameraParameters(m_cam);
2817   this->angleAppears = m_mapOfTrackers[m_referenceCameraName]->getAngleAppear();
2818   this->angleDisappears = m_mapOfTrackers[m_referenceCameraName]->getAngleDisappear();
2819   this->clippingFlag = m_mapOfTrackers[m_referenceCameraName]->getClipping();
2820 }
2821 
2822 /*!
2823   Load the xml configuration files.
2824   From the configuration file initialize the parameters corresponding to the
2825   objects: tracking parameters, camera intrinsic parameters.
2826 
2827   \throw vpException::ioError if the file has not been properly parsed (file
2828   not found).
2829 
2830   \param configFile1 : Full name of the xml file for the first camera.
2831   \param configFile2 : Full name of the xml file for the second camera.
2832   \param verbose : verbose flag.
2833 
2834   \note This function assumes a stereo configuration of the generic tracker.
2835 */
loadConfigFile(const std::string & configFile1,const std::string & configFile2,bool verbose)2836 void vpMbGenericTracker::loadConfigFile(const std::string &configFile1, const std::string &configFile2, bool verbose)
2837 {
2838   if (m_mapOfTrackers.size() != 2) {
2839     throw vpException(vpException::fatalError, "The tracker is not set in a stereo configuration!");
2840   }
2841 
2842   std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.begin();
2843   TrackerWrapper *tracker = it_tracker->second;
2844   tracker->loadConfigFile(configFile1, verbose);
2845 
2846   ++it_tracker;
2847   tracker = it_tracker->second;
2848   tracker->loadConfigFile(configFile2, verbose);
2849 
2850   if (m_mapOfTrackers.find(m_referenceCameraName) == m_mapOfTrackers.end()) {
2851     throw vpException(vpException::fatalError, "Cannot find the reference camera:  %s!", m_referenceCameraName.c_str());
2852   }
2853 
2854   m_mapOfTrackers[m_referenceCameraName]->getCameraParameters(m_cam);
2855   this->angleAppears = m_mapOfTrackers[m_referenceCameraName]->getAngleAppear();
2856   this->angleDisappears = m_mapOfTrackers[m_referenceCameraName]->getAngleDisappear();
2857   this->clippingFlag = m_mapOfTrackers[m_referenceCameraName]->getClipping();
2858 }
2859 
2860 /*!
2861   Load the xml configuration files.
2862   From the configuration file initialize the parameters corresponding to the
2863   objects: tracking parameters, camera intrinsic parameters.
2864 
2865   \throw vpException::ioError if the file has not been properly parsed (file
2866   not found).
2867 
2868   \param mapOfConfigFiles : Map of xml files.
2869   \param verbose : verbose flag.
2870 
2871   \note Configuration files must be supplied for all the cameras.
2872 */
loadConfigFile(const std::map<std::string,std::string> & mapOfConfigFiles,bool verbose)2873 void vpMbGenericTracker::loadConfigFile(const std::map<std::string, std::string> &mapOfConfigFiles, bool verbose)
2874 {
2875   for (std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.begin();
2876        it_tracker != m_mapOfTrackers.end(); ++it_tracker) {
2877     TrackerWrapper *tracker = it_tracker->second;
2878 
2879     std::map<std::string, std::string>::const_iterator it_config = mapOfConfigFiles.find(it_tracker->first);
2880     if (it_config != mapOfConfigFiles.end()) {
2881       tracker->loadConfigFile(it_config->second, verbose);
2882     } else {
2883       throw vpException(vpTrackingException::initializationError, "Missing configuration file for camera: %s!",
2884                         it_tracker->first.c_str());
2885     }
2886   }
2887 
2888   // Set the reference camera parameters
2889   std::map<std::string, TrackerWrapper *>::iterator it = m_mapOfTrackers.find(m_referenceCameraName);
2890   if (it != m_mapOfTrackers.end()) {
2891     TrackerWrapper *tracker = it->second;
2892     tracker->getCameraParameters(m_cam);
2893 
2894     // Set clipping
2895     this->clippingFlag = tracker->getClipping();
2896     this->angleAppears = tracker->getAngleAppear();
2897     this->angleDisappears = tracker->getAngleDisappear();
2898   } else {
2899     throw vpException(vpTrackingException::initializationError, "The reference camera: %s does not exist!",
2900                       m_referenceCameraName.c_str());
2901   }
2902 }
2903 
2904 /*!
2905   Load a 3D model from the file in parameter. This file must either be a vrml
2906   file (.wrl) or a CAO file (.cao). CAO format is described in the
2907   loadCAOModel() method.
2908 
2909   \throw vpException::ioError if the file cannot be open, or if its extension
2910 is not wrl or cao.
2911 
2912   \param modelFile : the file containing the 3D model description.
2913   The extension of this file is either .wrl or .cao.
2914   \param verbose : verbose option to print additional information when loading
2915 CAO model files which include other CAO model files.
2916   \param T : optional transformation matrix (currently only for .cao) to transform
2917   3D points expressed in the original object frame to the desired object frame.
2918 
2919   \note All the trackers will use the same model in case of stereo / multiple
2920 cameras configuration.
2921 */
loadModel(const std::string & modelFile,bool verbose,const vpHomogeneousMatrix & T)2922 void vpMbGenericTracker::loadModel(const std::string &modelFile, bool verbose, const vpHomogeneousMatrix &T)
2923 {
2924   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
2925        it != m_mapOfTrackers.end(); ++it) {
2926     TrackerWrapper *tracker = it->second;
2927     tracker->loadModel(modelFile, verbose, T);
2928   }
2929 }
2930 
2931 /*!
2932   Load a 3D model from the file in parameter. This file must either be a vrml
2933   file (.wrl) or a CAO file (.cao). CAO format is described in the
2934   loadCAOModel() method.
2935 
2936   \throw vpException::ioError if the file cannot be open, or if its extension
2937   is not wrl or cao.
2938 
2939   \param modelFile1 : the file containing the 3D model description for the
2940   first camera. The extension of this file is either .wrl or .cao.
2941   \param modelFile2 : the file containing the the 3D model description for the second
2942   camera. The extension of this file is either .wrl or .cao.
2943   \param verbose : verbose option to print additional information when loading CAO model files
2944   which include other CAO model files.
2945   \param T1 : optional transformation matrix (currently only for .cao) to transform
2946   3D points in \a modelFile1 expressed in the original object frame to the desired object frame.
2947   \param T2 : optional transformation matrix (currently only for .cao) to transform
2948   3D points in \a modelFile2 expressed in the original object frame to the desired object frame (
2949   T2==T1 if the two models have the same object frame which should be the case most of the time).
2950 
2951   \note This function assumes a stereo configuration of the generic tracker.
2952 */
loadModel(const std::string & modelFile1,const std::string & modelFile2,bool verbose,const vpHomogeneousMatrix & T1,const vpHomogeneousMatrix & T2)2953 void vpMbGenericTracker::loadModel(const std::string &modelFile1, const std::string &modelFile2, bool verbose,
2954                                    const vpHomogeneousMatrix &T1, const vpHomogeneousMatrix &T2)
2955 {
2956   if (m_mapOfTrackers.size() != 2) {
2957     throw vpException(vpException::fatalError, "The tracker is not set in a stereo configuration!");
2958   }
2959 
2960   std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.begin();
2961   TrackerWrapper *tracker = it_tracker->second;
2962   tracker->loadModel(modelFile1, verbose, T1);
2963 
2964   ++it_tracker;
2965   tracker = it_tracker->second;
2966   tracker->loadModel(modelFile2, verbose, T2);
2967 }
2968 
2969 /*!
2970   Load a 3D model from the file in parameter. This file must either be a vrml
2971   file (.wrl) or a CAO file (.cao). CAO format is described in the
2972   loadCAOModel() method.
2973 
2974   \throw vpException::ioError if the file cannot be open, or if its extension
2975   is not wrl or cao.
2976 
2977   \param mapOfModelFiles : map of files containing the 3D model description.
2978   The extension of this file is either .wrl or .cao.
2979   \param verbose : verbose option to print additional information when loading
2980   CAO model files which include other CAO model files.
2981   \param mapOfT : optional map of transformation matrices (currently only for .cao)
2982   to transform 3D points in \a mapOfModelFiles expressed in the original object frame to
2983   the desired object frame (if the models have the same object frame which should be the
2984   case most of the time, all the transformation matrices are identical).
2985 
2986   \note Each camera must have a model file.
2987 */
loadModel(const std::map<std::string,std::string> & mapOfModelFiles,bool verbose,const std::map<std::string,vpHomogeneousMatrix> & mapOfT)2988 void vpMbGenericTracker::loadModel(const std::map<std::string, std::string> &mapOfModelFiles, bool verbose,
2989                                    const std::map<std::string, vpHomogeneousMatrix> &mapOfT)
2990 {
2991   for (std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.begin();
2992        it_tracker != m_mapOfTrackers.end(); ++it_tracker) {
2993     std::map<std::string, std::string>::const_iterator it_model = mapOfModelFiles.find(it_tracker->first);
2994 
2995     if (it_model != mapOfModelFiles.end()) {
2996       TrackerWrapper *tracker = it_tracker->second;
2997       std::map<std::string, vpHomogeneousMatrix>::const_iterator it_T = mapOfT.find(it_tracker->first);
2998 
2999       if (it_T != mapOfT.end())
3000         tracker->loadModel(it_model->second, verbose, it_T->second);
3001       else
3002         tracker->loadModel(it_model->second, verbose);
3003     } else {
3004       throw vpException(vpTrackingException::initializationError, "Cannot load model for camera: %s",
3005                         it_tracker->first.c_str());
3006     }
3007   }
3008 }
3009 
3010 #ifdef VISP_HAVE_PCL
preTracking(std::map<std::string,const vpImage<unsigned char> * > & mapOfImages,std::map<std::string,pcl::PointCloud<pcl::PointXYZ>::ConstPtr> & mapOfPointClouds)3011 void vpMbGenericTracker::preTracking(std::map<std::string, const vpImage<unsigned char> *> &mapOfImages,
3012                                      std::map<std::string, pcl::PointCloud<pcl::PointXYZ>::ConstPtr> &mapOfPointClouds)
3013 {
3014   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3015        it != m_mapOfTrackers.end(); ++it) {
3016     TrackerWrapper *tracker = it->second;
3017     tracker->preTracking(mapOfImages[it->first], mapOfPointClouds[it->first]);
3018   }
3019 }
3020 #endif
3021 
preTracking(std::map<std::string,const vpImage<unsigned char> * > & mapOfImages,std::map<std::string,const std::vector<vpColVector> * > & mapOfPointClouds,std::map<std::string,unsigned int> & mapOfPointCloudWidths,std::map<std::string,unsigned int> & mapOfPointCloudHeights)3022 void vpMbGenericTracker::preTracking(std::map<std::string, const vpImage<unsigned char> *> &mapOfImages,
3023                                      std::map<std::string, const std::vector<vpColVector> *> &mapOfPointClouds,
3024                                      std::map<std::string, unsigned int> &mapOfPointCloudWidths,
3025                                      std::map<std::string, unsigned int> &mapOfPointCloudHeights)
3026 {
3027   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3028        it != m_mapOfTrackers.end(); ++it) {
3029     TrackerWrapper *tracker = it->second;
3030     tracker->preTracking(mapOfImages[it->first], mapOfPointClouds[it->first], mapOfPointCloudWidths[it->first],
3031                          mapOfPointCloudHeights[it->first]);
3032   }
3033 }
3034 
3035 /*!
3036   Re-initialize the model used by the tracker.
3037 
3038   \param I : The grayscale image containing the object to initialize.
3039   \param cad_name : Path to the file containing the 3D model description.
3040   \param cMo : The new vpHomogeneousMatrix between the camera and the new
3041   model.
3042   \param verbose : verbose option to print additional information when
3043   loading CAO model files which include other CAO model files.
3044   \param T : optional transformation matrix (currently only for .cao).
3045 */
reInitModel(const vpImage<unsigned char> & I,const std::string & cad_name,const vpHomogeneousMatrix & cMo,bool verbose,const vpHomogeneousMatrix & T)3046 void vpMbGenericTracker::reInitModel(const vpImage<unsigned char> &I, const std::string &cad_name,
3047                                      const vpHomogeneousMatrix &cMo, bool verbose,
3048                                      const vpHomogeneousMatrix &T)
3049 {
3050   if (m_mapOfTrackers.size() != 1) {
3051     throw vpException(vpTrackingException::fatalError, "This method requires exactly one camera, there are %d cameras!",
3052                       m_mapOfTrackers.size());
3053   }
3054 
3055   std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(m_referenceCameraName);
3056   if (it_tracker != m_mapOfTrackers.end()) {
3057     TrackerWrapper *tracker = it_tracker->second;
3058     tracker->reInitModel(I, cad_name, cMo, verbose, T);
3059 
3060     // Set reference pose
3061     tracker->getPose(m_cMo);
3062   } else {
3063     throw vpException(vpTrackingException::fatalError, "Cannot reInitModel() the reference camera!");
3064   }
3065 
3066   modelInitialised = true;
3067 }
3068 
3069 /*!
3070   Re-initialize the model used by the tracker.
3071 
3072   \param I_color : The color image containing the object to initialize.
3073   \param cad_name : Path to the file containing the 3D model description.
3074   \param cMo : The new vpHomogeneousMatrix between the camera and the new
3075   model.
3076   \param verbose : verbose option to print additional information when
3077   loading CAO model files which include other CAO model files.
3078   \param T : optional transformation matrix (currently only for .cao).
3079 */
reInitModel(const vpImage<vpRGBa> & I_color,const std::string & cad_name,const vpHomogeneousMatrix & cMo,bool verbose,const vpHomogeneousMatrix & T)3080 void vpMbGenericTracker::reInitModel(const vpImage<vpRGBa> &I_color, const std::string &cad_name,
3081                                      const vpHomogeneousMatrix &cMo, bool verbose,
3082                                      const vpHomogeneousMatrix &T)
3083 {
3084   if (m_mapOfTrackers.size() != 1) {
3085     throw vpException(vpTrackingException::fatalError, "This method requires exactly one camera, there are %d cameras!",
3086                       m_mapOfTrackers.size());
3087   }
3088 
3089   std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(m_referenceCameraName);
3090   if (it_tracker != m_mapOfTrackers.end()) {
3091     TrackerWrapper *tracker = it_tracker->second;
3092     tracker->reInitModel(I_color, cad_name, cMo, verbose, T);
3093 
3094     // Set reference pose
3095     tracker->getPose(m_cMo);
3096   } else {
3097     throw vpException(vpTrackingException::fatalError, "Cannot reInitModel() the reference camera!");
3098   }
3099 
3100   modelInitialised = true;
3101 }
3102 
3103 /*!
3104   Re-initialize the model used by the tracker.
3105 
3106   \param I1 : The grayscale image containing the object to initialize for the first
3107   camera.
3108   \param I2 : The grayscale image containing the object to initialize for the second camera.
3109   \param cad_name1 : Path to the file containing the 3D model description for the first camera.
3110   \param cad_name2 : Path to the file containing the 3D model description for the second camera.
3111   \param c1Mo : The new vpHomogeneousMatrix between the first camera and the new model.
3112   \param c2Mo : The new vpHomogeneousMatrix between the second camera and the new model.
3113   \param verbose : verbose option to print additional information when
3114   loading CAO model files which include other CAO model files.
3115   \param T1 : optional transformation matrix (currently only for .cao) to transform
3116   3D points in \a cad_name1 expressed in the original object frame to the desired object frame.
3117   \param T2 : optional transformation matrix (currently only for .cao) to transform
3118   3D points in \a cad_name2 expressed in the original object frame to the desired object frame (
3119   T2==T1 if the two models have the same object frame which should be the case most of the time).
3120 
3121   \note This function assumes a stereo configuration of the generic tracker.
3122 */
reInitModel(const vpImage<unsigned char> & I1,const vpImage<unsigned char> & I2,const std::string & cad_name1,const std::string & cad_name2,const vpHomogeneousMatrix & c1Mo,const vpHomogeneousMatrix & c2Mo,bool verbose,const vpHomogeneousMatrix & T1,const vpHomogeneousMatrix & T2)3123 void vpMbGenericTracker::reInitModel(const vpImage<unsigned char> &I1, const vpImage<unsigned char> &I2,
3124                                      const std::string &cad_name1, const std::string &cad_name2,
3125                                      const vpHomogeneousMatrix &c1Mo, const vpHomogeneousMatrix &c2Mo,
3126                                      bool verbose,
3127                                      const vpHomogeneousMatrix &T1, const vpHomogeneousMatrix &T2)
3128 {
3129   if (m_mapOfTrackers.size() == 2) {
3130     std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.begin();
3131 
3132     it_tracker->second->reInitModel(I1, cad_name1, c1Mo, verbose, T1);
3133 
3134     ++it_tracker;
3135 
3136     it_tracker->second->reInitModel(I2, cad_name2, c2Mo, verbose, T2);
3137 
3138     it_tracker = m_mapOfTrackers.find(m_referenceCameraName);
3139     if (it_tracker != m_mapOfTrackers.end()) {
3140       // Set reference pose
3141       it_tracker->second->getPose(m_cMo);
3142     }
3143   } else {
3144     throw vpException(vpTrackingException::fatalError, "This method requires exactly two cameras!");
3145   }
3146 
3147   modelInitialised = true;
3148 }
3149 
3150 /*!
3151   Re-initialize the model used by the tracker.
3152 
3153   \param I_color1 : The color image containing the object to initialize for the first
3154   camera.
3155   \param I_color2 : The color image containing the object to initialize for the second camera.
3156   \param cad_name1 : Path to the file containing the 3D model description for the first camera.
3157   \param cad_name2 : Path to the file containing the 3D model description for the second camera.
3158   \param c1Mo : The new vpHomogeneousMatrix between the first camera and the new model.
3159   \param c2Mo : The new vpHomogeneousMatrix between the second camera and the new model.
3160   \param verbose : verbose option to print additional information when
3161   loading CAO model files which include other CAO model files.
3162   \param T1 : optional transformation matrix (currently only for .cao) to transform
3163   3D points in \a cad_name1 expressed in the original object frame to the desired object frame.
3164   \param T2 : optional transformation matrix (currently only for .cao) to transform
3165   3D points in \a cad_name2 expressed in the original object frame to the desired object frame (
3166   T2==T1 if the two models have the same object frame which should be the case most of the time).
3167 
3168   \note This function assumes a stereo configuration of the generic tracker.
3169 */
reInitModel(const vpImage<vpRGBa> & I_color1,const vpImage<vpRGBa> & I_color2,const std::string & cad_name1,const std::string & cad_name2,const vpHomogeneousMatrix & c1Mo,const vpHomogeneousMatrix & c2Mo,bool verbose,const vpHomogeneousMatrix & T1,const vpHomogeneousMatrix & T2)3170 void vpMbGenericTracker::reInitModel(const vpImage<vpRGBa> &I_color1, const vpImage<vpRGBa> &I_color2,
3171                                      const std::string &cad_name1, const std::string &cad_name2,
3172                                      const vpHomogeneousMatrix &c1Mo, const vpHomogeneousMatrix &c2Mo,
3173                                      bool verbose,
3174                                      const vpHomogeneousMatrix &T1, const vpHomogeneousMatrix &T2)
3175 {
3176   if (m_mapOfTrackers.size() == 2) {
3177     std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.begin();
3178 
3179     it_tracker->second->reInitModel(I_color1, cad_name1, c1Mo, verbose, T1);
3180 
3181     ++it_tracker;
3182 
3183     it_tracker->second->reInitModel(I_color2, cad_name2, c2Mo, verbose, T2);
3184 
3185     it_tracker = m_mapOfTrackers.find(m_referenceCameraName);
3186     if (it_tracker != m_mapOfTrackers.end()) {
3187       // Set reference pose
3188       it_tracker->second->getPose(m_cMo);
3189     }
3190   } else {
3191     throw vpException(vpTrackingException::fatalError, "This method requires exactly two cameras!");
3192   }
3193 
3194   modelInitialised = true;
3195 }
3196 
3197 /*!
3198   Re-initialize the model used by the tracker.
3199 
3200   \param mapOfImages : Map of grayscale images.
3201   \param mapOfModelFiles : Map of model files.
3202   \param mapOfCameraPoses : The new vpHomogeneousMatrix between the cameras
3203   and the current object position.
3204   \param verbose : Verbose option to print additional information when loading CAO model
3205   files which include other CAO model files.
3206   \param mapOfT : optional map of transformation matrices (currently only for .cao) to transform
3207   3D points in \a mapOfModelFiles expressed in the original object frame to the desired object frame
3208   (if the models have the same object frame which should be the case most of the time,
3209   all the transformation matrices are identical).
3210 */
reInitModel(const std::map<std::string,const vpImage<unsigned char> * > & mapOfImages,const std::map<std::string,std::string> & mapOfModelFiles,const std::map<std::string,vpHomogeneousMatrix> & mapOfCameraPoses,bool verbose,const std::map<std::string,vpHomogeneousMatrix> & mapOfT)3211 void vpMbGenericTracker::reInitModel(const std::map<std::string, const vpImage<unsigned char> *> &mapOfImages,
3212                                      const std::map<std::string, std::string> &mapOfModelFiles,
3213                                      const std::map<std::string, vpHomogeneousMatrix> &mapOfCameraPoses,
3214                                      bool verbose,
3215                                      const std::map<std::string, vpHomogeneousMatrix> &mapOfT)
3216 {
3217   std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(m_referenceCameraName);
3218   std::map<std::string, const vpImage<unsigned char> *>::const_iterator it_img =
3219       mapOfImages.find(m_referenceCameraName);
3220   std::map<std::string, std::string>::const_iterator it_model = mapOfModelFiles.find(m_referenceCameraName);
3221   std::map<std::string, vpHomogeneousMatrix>::const_iterator it_camPose = mapOfCameraPoses.find(m_referenceCameraName);
3222 
3223   if (it_tracker != m_mapOfTrackers.end() && it_img != mapOfImages.end() && it_model != mapOfModelFiles.end() &&
3224       it_camPose != mapOfCameraPoses.end()) {
3225     TrackerWrapper *tracker = it_tracker->second;
3226     std::map<std::string, vpHomogeneousMatrix>::const_iterator it_T = mapOfT.find(it_tracker->first);
3227     if (it_T != mapOfT.end())
3228       tracker->reInitModel(*it_img->second, it_model->second, it_camPose->second, verbose, it_T->second);
3229     else
3230       tracker->reInitModel(*it_img->second, it_model->second, it_camPose->second, verbose);
3231 
3232     // Set reference pose
3233     tracker->getPose(m_cMo);
3234   } else {
3235     throw vpException(vpTrackingException::fatalError, "Cannot reInitModel() for reference camera!");
3236   }
3237 
3238   std::vector<std::string> vectorOfMissingCameras;
3239   for (it_tracker = m_mapOfTrackers.begin(); it_tracker != m_mapOfTrackers.end(); ++it_tracker) {
3240     if (it_tracker->first != m_referenceCameraName) {
3241       it_img = mapOfImages.find(it_tracker->first);
3242       it_model = mapOfModelFiles.find(it_tracker->first);
3243       it_camPose = mapOfCameraPoses.find(it_tracker->first);
3244 
3245       if (it_img != mapOfImages.end() && it_model != mapOfModelFiles.end() && it_camPose != mapOfCameraPoses.end()) {
3246         TrackerWrapper *tracker = it_tracker->second;
3247         tracker->reInitModel(*it_img->second, it_model->second, it_camPose->second, verbose);
3248       } else {
3249         vectorOfMissingCameras.push_back(it_tracker->first);
3250       }
3251     }
3252   }
3253 
3254   for (std::vector<std::string>::const_iterator it = vectorOfMissingCameras.begin(); it != vectorOfMissingCameras.end();
3255        ++it) {
3256     it_img = mapOfImages.find(*it);
3257     it_model = mapOfModelFiles.find(*it);
3258     std::map<std::string, vpHomogeneousMatrix>::const_iterator it_camTrans =
3259         m_mapOfCameraTransformationMatrix.find(*it);
3260 
3261     if (it_img != mapOfImages.end() && it_model != mapOfModelFiles.end() &&
3262         it_camTrans != m_mapOfCameraTransformationMatrix.end()) {
3263       vpHomogeneousMatrix cCurrentMo = it_camTrans->second * m_cMo;
3264       m_mapOfTrackers[*it]->reInitModel(*it_img->second, it_model->second, cCurrentMo, verbose);
3265     }
3266   }
3267 
3268   modelInitialised = true;
3269 }
3270 
3271 /*!
3272   Re-initialize the model used by the tracker.
3273 
3274   \param mapOfColorImages : Map of color images.
3275   \param mapOfModelFiles : Map of model files.
3276   \param mapOfCameraPoses : The new vpHomogeneousMatrix between the cameras
3277   and the current object position.
3278   \param verbose : Verbose option to print additional information when loading CAO model
3279   files which include other CAO model files.
3280   \param mapOfT : optional map of transformation matrices (currently only for .cao) to transform
3281   3D points in \a mapOfModelFiles expressed in the original object frame to the desired object frame
3282   (if the models have the same object frame which should be the case most of the time,
3283   all the transformation matrices are identical).
3284 */
reInitModel(const std::map<std::string,const vpImage<vpRGBa> * > & mapOfColorImages,const std::map<std::string,std::string> & mapOfModelFiles,const std::map<std::string,vpHomogeneousMatrix> & mapOfCameraPoses,bool verbose,const std::map<std::string,vpHomogeneousMatrix> & mapOfT)3285 void vpMbGenericTracker::reInitModel(const std::map<std::string, const vpImage<vpRGBa> *> &mapOfColorImages,
3286                                      const std::map<std::string, std::string> &mapOfModelFiles,
3287                                      const std::map<std::string, vpHomogeneousMatrix> &mapOfCameraPoses,
3288                                      bool verbose,
3289                                      const std::map<std::string, vpHomogeneousMatrix> &mapOfT)
3290 {
3291   std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(m_referenceCameraName);
3292   std::map<std::string, const vpImage<vpRGBa> *>::const_iterator it_img =
3293       mapOfColorImages.find(m_referenceCameraName);
3294   std::map<std::string, std::string>::const_iterator it_model = mapOfModelFiles.find(m_referenceCameraName);
3295   std::map<std::string, vpHomogeneousMatrix>::const_iterator it_camPose = mapOfCameraPoses.find(m_referenceCameraName);
3296 
3297   if (it_tracker != m_mapOfTrackers.end() && it_img != mapOfColorImages.end() && it_model != mapOfModelFiles.end() &&
3298       it_camPose != mapOfCameraPoses.end()) {
3299     TrackerWrapper *tracker = it_tracker->second;
3300     std::map<std::string, vpHomogeneousMatrix>::const_iterator it_T = mapOfT.find(it_tracker->first);
3301     if (it_T != mapOfT.end())
3302       tracker->reInitModel(*it_img->second, it_model->second, it_camPose->second, verbose, it_T->second);
3303     else
3304       tracker->reInitModel(*it_img->second, it_model->second, it_camPose->second, verbose);
3305 
3306     // Set reference pose
3307     tracker->getPose(m_cMo);
3308   } else {
3309     throw vpException(vpTrackingException::fatalError, "Cannot reInitModel() for reference camera!");
3310   }
3311 
3312   std::vector<std::string> vectorOfMissingCameras;
3313   for (it_tracker = m_mapOfTrackers.begin(); it_tracker != m_mapOfTrackers.end(); ++it_tracker) {
3314     if (it_tracker->first != m_referenceCameraName) {
3315       it_img = mapOfColorImages.find(it_tracker->first);
3316       it_model = mapOfModelFiles.find(it_tracker->first);
3317       it_camPose = mapOfCameraPoses.find(it_tracker->first);
3318 
3319       if (it_img != mapOfColorImages.end() && it_model != mapOfModelFiles.end() && it_camPose != mapOfCameraPoses.end()) {
3320         TrackerWrapper *tracker = it_tracker->second;
3321         tracker->reInitModel(*it_img->second, it_model->second, it_camPose->second, verbose);
3322       } else {
3323         vectorOfMissingCameras.push_back(it_tracker->first);
3324       }
3325     }
3326   }
3327 
3328   for (std::vector<std::string>::const_iterator it = vectorOfMissingCameras.begin(); it != vectorOfMissingCameras.end();
3329        ++it) {
3330     it_img = mapOfColorImages.find(*it);
3331     it_model = mapOfModelFiles.find(*it);
3332     std::map<std::string, vpHomogeneousMatrix>::const_iterator it_camTrans =
3333         m_mapOfCameraTransformationMatrix.find(*it);
3334 
3335     if (it_img != mapOfColorImages.end() && it_model != mapOfModelFiles.end() &&
3336         it_camTrans != m_mapOfCameraTransformationMatrix.end()) {
3337       vpHomogeneousMatrix cCurrentMo = it_camTrans->second * m_cMo;
3338       m_mapOfTrackers[*it]->reInitModel(*it_img->second, it_model->second, cCurrentMo, verbose);
3339     }
3340   }
3341 
3342   modelInitialised = true;
3343 }
3344 
3345 /*!
3346   Reset the tracker. The model is removed and the pose is set to identity.
3347   The tracker needs to be initialized with a new model and a new pose.
3348 */
resetTracker()3349 void vpMbGenericTracker::resetTracker()
3350 {
3351   m_cMo.eye();
3352 
3353   useScanLine = false;
3354 
3355 #ifdef VISP_HAVE_OGRE
3356   useOgre = false;
3357 #endif
3358 
3359   m_computeInteraction = true;
3360   m_lambda = 1.0;
3361 
3362   angleAppears = vpMath::rad(89);
3363   angleDisappears = vpMath::rad(89);
3364   clippingFlag = vpPolygon3D::NO_CLIPPING;
3365   distNearClip = 0.001;
3366   distFarClip = 100;
3367 
3368   m_optimizationMethod = vpMbTracker::GAUSS_NEWTON_OPT;
3369   m_maxIter = 30;
3370   m_stopCriteriaEpsilon = 1e-8;
3371   m_initialMu = 0.01;
3372 
3373   // Only for Edge
3374   m_percentageGdPt = 0.4;
3375 
3376   // Only for KLT
3377   m_thresholdOutlier = 0.5;
3378 
3379   // Reset default ponderation between each feature type
3380   m_mapOfFeatureFactors[EDGE_TRACKER] = 1.0;
3381 
3382 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
3383   m_mapOfFeatureFactors[KLT_TRACKER] = 1.0;
3384 #endif
3385 
3386   m_mapOfFeatureFactors[DEPTH_NORMAL_TRACKER] = 1.0;
3387   m_mapOfFeatureFactors[DEPTH_DENSE_TRACKER] = 1.0;
3388 
3389   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3390        it != m_mapOfTrackers.end(); ++it) {
3391     TrackerWrapper *tracker = it->second;
3392     tracker->resetTracker();
3393   }
3394 }
3395 
3396 /*!
3397   Set the angle used to test polygons appearance.
3398   If the angle between the normal of the polygon and the line going
3399   from the camera to the polygon center has a value lower than
3400   this parameter, the polygon is considered as appearing.
3401   The polygon will then be tracked.
3402 
3403   \param a : new angle in radian.
3404 */
setAngleAppear(const double & a)3405 void vpMbGenericTracker::setAngleAppear(const double &a)
3406 {
3407   vpMbTracker::setAngleAppear(a);
3408 
3409   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3410        it != m_mapOfTrackers.end(); ++it) {
3411     TrackerWrapper *tracker = it->second;
3412     tracker->setAngleAppear(a);
3413   }
3414 }
3415 
3416 /*!
3417   Set the angle used to test polygons appearance.
3418   If the angle between the normal of the polygon and the line going
3419   from the camera to the polygon center has a value lower than
3420   this parameter, the polygon is considered as appearing.
3421   The polygon will then be tracked.
3422 
3423   \param a1 : new angle in radian for the first camera.
3424   \param a2 : new angle in radian for the second camera.
3425 
3426   \note This function assumes a stereo configuration of the generic tracker.
3427 */
setAngleAppear(const double & a1,const double & a2)3428 void vpMbGenericTracker::setAngleAppear(const double &a1, const double &a2)
3429 {
3430   if (m_mapOfTrackers.size() == 2) {
3431     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3432     it->second->setAngleAppear(a1);
3433 
3434     ++it;
3435     it->second->setAngleAppear(a2);
3436 
3437     it = m_mapOfTrackers.find(m_referenceCameraName);
3438     if (it != m_mapOfTrackers.end()) {
3439       angleAppears = it->second->getAngleAppear();
3440     } else {
3441       std::cerr << "Cannot find the reference camera: " << m_referenceCameraName << "!" << std::endl;
3442     }
3443   } else {
3444     throw vpException(vpTrackingException::fatalError, "Require two cameras! There are %d cameras!",
3445                       m_mapOfTrackers.size());
3446   }
3447 }
3448 
3449 /*!
3450   Set the angle used to test polygons appearance.
3451   If the angle between the normal of the polygon and the line going
3452   from the camera to the polygon center has a value lower than
3453   this parameter, the polygon is considered as appearing.
3454   The polygon will then be tracked.
3455 
3456   \param mapOfAngles : Map of new angles in radian.
3457 */
setAngleAppear(const std::map<std::string,double> & mapOfAngles)3458 void vpMbGenericTracker::setAngleAppear(const std::map<std::string, double> &mapOfAngles)
3459 {
3460   for (std::map<std::string, double>::const_iterator it = mapOfAngles.begin(); it != mapOfAngles.end(); ++it) {
3461     std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(it->first);
3462 
3463     if (it_tracker != m_mapOfTrackers.end()) {
3464       TrackerWrapper *tracker = it_tracker->second;
3465       tracker->setAngleAppear(it->second);
3466 
3467       if (it->first == m_referenceCameraName) {
3468         angleAppears = it->second;
3469       }
3470     }
3471   }
3472 }
3473 
3474 /*!
3475   Set the angle used to test polygons disappearance.
3476   If the angle between the normal of the polygon and the line going
3477   from the camera to the polygon center has a value greater than
3478   this parameter, the polygon is considered as disappearing.
3479   The tracking of the polygon will then be stopped.
3480 
3481   \param a : new angle in radian.
3482 */
setAngleDisappear(const double & a)3483 void vpMbGenericTracker::setAngleDisappear(const double &a)
3484 {
3485   vpMbTracker::setAngleDisappear(a);
3486 
3487   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3488        it != m_mapOfTrackers.end(); ++it) {
3489     TrackerWrapper *tracker = it->second;
3490     tracker->setAngleDisappear(a);
3491   }
3492 }
3493 
3494 /*!
3495   Set the angle used to test polygons disappearance.
3496   If the angle between the normal of the polygon and the line going
3497   from the camera to the polygon center has a value greater than
3498   this parameter, the polygon is considered as disappearing.
3499   The tracking of the polygon will then be stopped.
3500 
3501   \param a1 : new angle in radian for the first camera.
3502   \param a2 : new angle in radian for the second camera.
3503 
3504   \note This function assumes a stereo configuration of the generic tracker.
3505 */
setAngleDisappear(const double & a1,const double & a2)3506 void vpMbGenericTracker::setAngleDisappear(const double &a1, const double &a2)
3507 {
3508   if (m_mapOfTrackers.size() == 2) {
3509     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3510     it->second->setAngleDisappear(a1);
3511 
3512     ++it;
3513     it->second->setAngleDisappear(a2);
3514 
3515     it = m_mapOfTrackers.find(m_referenceCameraName);
3516     if (it != m_mapOfTrackers.end()) {
3517       angleDisappears = it->second->getAngleDisappear();
3518     } else {
3519       std::cerr << "Cannot find the reference camera: " << m_referenceCameraName << "!" << std::endl;
3520     }
3521   } else {
3522     throw vpException(vpTrackingException::fatalError, "Require two cameras! There are %d cameras!",
3523                       m_mapOfTrackers.size());
3524   }
3525 }
3526 
3527 /*!
3528   Set the angle used to test polygons disappearance.
3529   If the angle between the normal of the polygon and the line going
3530   from the camera to the polygon center has a value greater than
3531   this parameter, the polygon is considered as disappearing.
3532   The tracking of the polygon will then be stopped.
3533 
3534   \param mapOfAngles : Map of new angles in radian.
3535 */
setAngleDisappear(const std::map<std::string,double> & mapOfAngles)3536 void vpMbGenericTracker::setAngleDisappear(const std::map<std::string, double> &mapOfAngles)
3537 {
3538   for (std::map<std::string, double>::const_iterator it = mapOfAngles.begin(); it != mapOfAngles.end(); ++it) {
3539     std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(it->first);
3540 
3541     if (it_tracker != m_mapOfTrackers.end()) {
3542       TrackerWrapper *tracker = it_tracker->second;
3543       tracker->setAngleDisappear(it->second);
3544 
3545       if (it->first == m_referenceCameraName) {
3546         angleDisappears = it->second;
3547       }
3548     }
3549   }
3550 }
3551 
3552 /*!
3553   Set the camera parameters.
3554 
3555   \param camera : the new camera parameters.
3556 */
setCameraParameters(const vpCameraParameters & camera)3557 void vpMbGenericTracker::setCameraParameters(const vpCameraParameters &camera)
3558 {
3559   vpMbTracker::setCameraParameters(camera);
3560 
3561   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3562        it != m_mapOfTrackers.end(); ++it) {
3563     TrackerWrapper *tracker = it->second;
3564     tracker->setCameraParameters(camera);
3565   }
3566 }
3567 
3568 /*!
3569   Set the camera parameters.
3570 
3571   \param camera1 : the new camera parameters for the first camera.
3572   \param camera2 : the new camera parameters for the second camera.
3573 
3574   \note This function assumes a stereo configuration of the generic tracker.
3575 */
setCameraParameters(const vpCameraParameters & camera1,const vpCameraParameters & camera2)3576 void vpMbGenericTracker::setCameraParameters(const vpCameraParameters &camera1, const vpCameraParameters &camera2)
3577 {
3578   if (m_mapOfTrackers.size() == 2) {
3579     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3580     it->second->setCameraParameters(camera1);
3581 
3582     ++it;
3583     it->second->setCameraParameters(camera2);
3584 
3585     it = m_mapOfTrackers.find(m_referenceCameraName);
3586     if (it != m_mapOfTrackers.end()) {
3587       it->second->getCameraParameters(m_cam);
3588     } else {
3589       std::cerr << "Cannot find the reference camera: " << m_referenceCameraName << "!" << std::endl;
3590     }
3591   } else {
3592     throw vpException(vpTrackingException::fatalError, "Require two cameras! There are %d cameras!",
3593                       m_mapOfTrackers.size());
3594   }
3595 }
3596 
3597 /*!
3598   Set the camera parameters.
3599 
3600   \param mapOfCameraParameters : map of new camera parameters.
3601 
3602   \note This function will set the camera parameters only for the supplied
3603   camera names.
3604 */
setCameraParameters(const std::map<std::string,vpCameraParameters> & mapOfCameraParameters)3605 void vpMbGenericTracker::setCameraParameters(const std::map<std::string, vpCameraParameters> &mapOfCameraParameters)
3606 {
3607   for (std::map<std::string, vpCameraParameters>::const_iterator it = mapOfCameraParameters.begin();
3608        it != mapOfCameraParameters.end(); ++it) {
3609     std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(it->first);
3610 
3611     if (it_tracker != m_mapOfTrackers.end()) {
3612       TrackerWrapper *tracker = it_tracker->second;
3613       tracker->setCameraParameters(it->second);
3614 
3615       if (it->first == m_referenceCameraName) {
3616         m_cam = it->second;
3617       }
3618     }
3619   }
3620 }
3621 
3622 /*!
3623   Set the camera transformation matrix for the specified camera (\f$
3624   _{}^{c_{current}}\textrm{M}_{c_{reference}} \f$).
3625 
3626   \param cameraName : Camera name.
3627   \param cameraTransformationMatrix : Camera transformation matrix between the
3628   current and the reference camera.
3629 */
setCameraTransformationMatrix(const std::string & cameraName,const vpHomogeneousMatrix & cameraTransformationMatrix)3630 void vpMbGenericTracker::setCameraTransformationMatrix(const std::string &cameraName,
3631                                                        const vpHomogeneousMatrix &cameraTransformationMatrix)
3632 {
3633   std::map<std::string, vpHomogeneousMatrix>::iterator it = m_mapOfCameraTransformationMatrix.find(cameraName);
3634 
3635   if (it != m_mapOfCameraTransformationMatrix.end()) {
3636     it->second = cameraTransformationMatrix;
3637   } else {
3638     throw vpException(vpTrackingException::fatalError, "Cannot find camera: %s!", cameraName.c_str());
3639   }
3640 }
3641 
3642 /*!
3643   Set the map of camera transformation matrices
3644   (\f$ _{}^{c_1}\textrm{M}_{c_1}, _{}^{c_2}\textrm{M}_{c_1},
3645   _{}^{c_3}\textrm{M}_{c_1}, \cdots, _{}^{c_n}\textrm{M}_{c_1} \f$).
3646 
3647   \param mapOfTransformationMatrix : map of camera transformation matrices.
3648 */
setCameraTransformationMatrix(const std::map<std::string,vpHomogeneousMatrix> & mapOfTransformationMatrix)3649 void vpMbGenericTracker::setCameraTransformationMatrix(
3650     const std::map<std::string, vpHomogeneousMatrix> &mapOfTransformationMatrix)
3651 {
3652   for (std::map<std::string, vpHomogeneousMatrix>::const_iterator it = mapOfTransformationMatrix.begin();
3653        it != mapOfTransformationMatrix.end(); ++it) {
3654     std::map<std::string, vpHomogeneousMatrix>::iterator it_camTrans =
3655         m_mapOfCameraTransformationMatrix.find(it->first);
3656 
3657     if (it_camTrans != m_mapOfCameraTransformationMatrix.end()) {
3658       it_camTrans->second = it->second;
3659     }
3660   }
3661 }
3662 
3663 /*!
3664   Specify which clipping to use.
3665 
3666   \sa vpMbtPolygonClipping
3667 
3668   \param flags : New clipping flags.
3669 
3670   \note This function will set the new parameter for all the cameras.
3671 */
setClipping(const unsigned int & flags)3672 void vpMbGenericTracker::setClipping(const unsigned int &flags)
3673 {
3674   vpMbTracker::setClipping(flags);
3675 
3676   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3677        it != m_mapOfTrackers.end(); ++it) {
3678     TrackerWrapper *tracker = it->second;
3679     tracker->setClipping(flags);
3680   }
3681 }
3682 
3683 /*!
3684   Specify which clipping to use.
3685 
3686   \sa vpMbtPolygonClipping
3687 
3688   \param flags1 : New clipping flags for the first camera.
3689   \param flags2 : New clipping flags for the second camera.
3690 
3691   \note This function assumes a stereo configuration of the generic tracker.
3692 */
setClipping(const unsigned int & flags1,const unsigned int & flags2)3693 void vpMbGenericTracker::setClipping(const unsigned int &flags1, const unsigned int &flags2)
3694 {
3695   if (m_mapOfTrackers.size() == 2) {
3696     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3697     it->second->setClipping(flags1);
3698 
3699     ++it;
3700     it->second->setClipping(flags2);
3701 
3702     it = m_mapOfTrackers.find(m_referenceCameraName);
3703     if (it != m_mapOfTrackers.end()) {
3704       clippingFlag = it->second->getClipping();
3705     } else {
3706       std::cerr << "Cannot find the reference camera: " << m_referenceCameraName << "!" << std::endl;
3707     }
3708   } else {
3709     std::stringstream ss;
3710     ss << "Require two cameras! There are " << m_mapOfTrackers.size() << " cameras!";
3711     throw vpException(vpTrackingException::fatalError, ss.str());
3712   }
3713 }
3714 
3715 /*!
3716   Specify which clipping to use.
3717 
3718   \sa vpMbtPolygonClipping
3719 
3720   \param mapOfClippingFlags : Map of new clipping flags.
3721 */
setClipping(const std::map<std::string,unsigned int> & mapOfClippingFlags)3722 void vpMbGenericTracker::setClipping(const std::map<std::string, unsigned int> &mapOfClippingFlags)
3723 {
3724   for (std::map<std::string, unsigned int>::const_iterator it = mapOfClippingFlags.begin();
3725        it != mapOfClippingFlags.end(); ++it) {
3726     std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(it->first);
3727 
3728     if (it_tracker != m_mapOfTrackers.end()) {
3729       TrackerWrapper *tracker = it_tracker->second;
3730       tracker->setClipping(it->second);
3731 
3732       if (it->first == m_referenceCameraName) {
3733         clippingFlag = it->second;
3734       }
3735     }
3736   }
3737 }
3738 
3739 /*!
3740   Set maximum distance to consider a face.
3741   You should use the maximum depth range of the sensor used.
3742 
3743   \param maxDistance : Maximum distance to the face.
3744 
3745   \sa setDepthDenseFilteringMethod
3746   \note This function will set the new parameter for all the cameras.
3747 */
setDepthDenseFilteringMaxDistance(double maxDistance)3748 void vpMbGenericTracker::setDepthDenseFilteringMaxDistance(double maxDistance)
3749 {
3750   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3751        it != m_mapOfTrackers.end(); ++it) {
3752     TrackerWrapper *tracker = it->second;
3753     tracker->setDepthDenseFilteringMaxDistance(maxDistance);
3754   }
3755 }
3756 
3757 /*!
3758   Set method to discard a face, e.g.if outside of the depth range.
3759 
3760   \param method : Depth dense filtering method.
3761 
3762   \sa vpMbtFaceDepthDense::vpDepthDenseFilteringType
3763   \note This function will set the new parameter for all the cameras.
3764 */
setDepthDenseFilteringMethod(int method)3765 void vpMbGenericTracker::setDepthDenseFilteringMethod(int method)
3766 {
3767   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3768        it != m_mapOfTrackers.end(); ++it) {
3769     TrackerWrapper *tracker = it->second;
3770     tracker->setDepthDenseFilteringMethod(method);
3771   }
3772 }
3773 
3774 /*!
3775   Set minimum distance to consider a face.
3776   You should use the minimum depth range of the sensor used.
3777 
3778   \param minDistance : Minimum distance to the face.
3779 
3780   \sa setDepthDenseFilteringMethod
3781   \note This function will set the new parameter for all the cameras.
3782 */
setDepthDenseFilteringMinDistance(double minDistance)3783 void vpMbGenericTracker::setDepthDenseFilteringMinDistance(double minDistance)
3784 {
3785   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3786        it != m_mapOfTrackers.end(); ++it) {
3787     TrackerWrapper *tracker = it->second;
3788     tracker->setDepthDenseFilteringMinDistance(minDistance);
3789   }
3790 }
3791 
3792 /*!
3793   Set depth occupancy ratio to consider a face, used to discard faces where
3794   the depth map is not well reconstructed.
3795 
3796   \param occupancyRatio : Occupancy ratio, between [0 ; 1].
3797 
3798   \sa setDepthDenseFilteringMethod
3799   \note This function will set the new parameter for all the cameras.
3800 */
setDepthDenseFilteringOccupancyRatio(double occupancyRatio)3801 void vpMbGenericTracker::setDepthDenseFilteringOccupancyRatio(double occupancyRatio)
3802 {
3803   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3804        it != m_mapOfTrackers.end(); ++it) {
3805     TrackerWrapper *tracker = it->second;
3806     tracker->setDepthDenseFilteringOccupancyRatio(occupancyRatio);
3807   }
3808 }
3809 
3810 /*!
3811   Set depth dense sampling step.
3812 
3813   \param stepX : Sampling step in x-direction.
3814   \param stepY : Sampling step in y-direction.
3815 
3816   \note This function will set the new parameter for all the cameras.
3817 */
setDepthDenseSamplingStep(unsigned int stepX,unsigned int stepY)3818 void vpMbGenericTracker::setDepthDenseSamplingStep(unsigned int stepX, unsigned int stepY)
3819 {
3820   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3821        it != m_mapOfTrackers.end(); ++it) {
3822     TrackerWrapper *tracker = it->second;
3823     tracker->setDepthDenseSamplingStep(stepX, stepY);
3824   }
3825 }
3826 
3827 /*!
3828   Set method to compute the centroid for display for depth tracker.
3829 
3830   \param method : Centroid computation method.
3831 
3832   \note This function will set the new parameter for all the cameras.
3833 */
setDepthNormalFaceCentroidMethod(const vpMbtFaceDepthNormal::vpFaceCentroidType & method)3834 void vpMbGenericTracker::setDepthNormalFaceCentroidMethod(const vpMbtFaceDepthNormal::vpFaceCentroidType &method)
3835 {
3836   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3837        it != m_mapOfTrackers.end(); ++it) {
3838     TrackerWrapper *tracker = it->second;
3839     tracker->setDepthNormalFaceCentroidMethod(method);
3840   }
3841 }
3842 
3843 /*!
3844   Set depth feature estimation method.
3845 
3846   \param method : Depth feature estimation method.
3847 
3848   \note This function will set the new parameter for all the cameras.
3849 */
setDepthNormalFeatureEstimationMethod(const vpMbtFaceDepthNormal::vpFeatureEstimationType & method)3850 void vpMbGenericTracker::setDepthNormalFeatureEstimationMethod(
3851     const vpMbtFaceDepthNormal::vpFeatureEstimationType &method)
3852 {
3853   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3854        it != m_mapOfTrackers.end(); ++it) {
3855     TrackerWrapper *tracker = it->second;
3856     tracker->setDepthNormalFeatureEstimationMethod(method);
3857   }
3858 }
3859 
3860 /*!
3861   Set depth PCL plane estimation method.
3862 
3863   \param method : Depth PCL plane estimation method.
3864 
3865   \note This function will set the new parameter for all the cameras.
3866 */
setDepthNormalPclPlaneEstimationMethod(int method)3867 void vpMbGenericTracker::setDepthNormalPclPlaneEstimationMethod(int method)
3868 {
3869   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3870        it != m_mapOfTrackers.end(); ++it) {
3871     TrackerWrapper *tracker = it->second;
3872     tracker->setDepthNormalPclPlaneEstimationMethod(method);
3873   }
3874 }
3875 
3876 /*!
3877   Set depth PCL RANSAC maximum number of iterations.
3878 
3879   \param maxIter : Depth PCL RANSAC maximum number of iterations.
3880 
3881   \note This function will set the new parameter for all the cameras.
3882 */
setDepthNormalPclPlaneEstimationRansacMaxIter(const int maxIter)3883 void vpMbGenericTracker::setDepthNormalPclPlaneEstimationRansacMaxIter(const int maxIter)
3884 {
3885   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3886        it != m_mapOfTrackers.end(); ++it) {
3887     TrackerWrapper *tracker = it->second;
3888     tracker->setDepthNormalPclPlaneEstimationRansacMaxIter(maxIter);
3889   }
3890 }
3891 
3892 /*!
3893   Set depth PCL RANSAC threshold.
3894 
3895   \param thresold : Depth PCL RANSAC threshold.
3896 
3897   \note This function will set the new parameter for all the cameras.
3898 */
setDepthNormalPclPlaneEstimationRansacThreshold(double thresold)3899 void vpMbGenericTracker::setDepthNormalPclPlaneEstimationRansacThreshold(double thresold)
3900 {
3901   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3902        it != m_mapOfTrackers.end(); ++it) {
3903     TrackerWrapper *tracker = it->second;
3904     tracker->setDepthNormalPclPlaneEstimationRansacThreshold(thresold);
3905   }
3906 }
3907 
3908 /*!
3909   Set depth sampling step.
3910 
3911   \param stepX : Sampling step in x-direction.
3912   \param stepY : Sampling step in y-direction.
3913 
3914   \note This function will set the new parameter for all the cameras.
3915 */
setDepthNormalSamplingStep(unsigned int stepX,unsigned int stepY)3916 void vpMbGenericTracker::setDepthNormalSamplingStep(unsigned int stepX, unsigned int stepY)
3917 {
3918   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3919        it != m_mapOfTrackers.end(); ++it) {
3920     TrackerWrapper *tracker = it->second;
3921     tracker->setDepthNormalSamplingStep(stepX, stepY);
3922   }
3923 }
3924 
3925 /*!
3926   Enable to display the features. By features, we meant the moving edges (ME)
3927   and the klt points if used.
3928 
3929   Note that if present, the moving edges can be displayed with different
3930   colors:
3931   - If green : The ME is a good point.
3932   - If blue : The ME is removed because of a contrast problem during the
3933   tracking phase.
3934   - If purple : The ME is removed because of a threshold problem during the
3935   tracking phase.
3936   - If red : The ME is removed because it is rejected by the robust approach
3937   in the virtual visual servoing scheme.
3938 
3939   \param displayF : set it to true to display the features.
3940 
3941   \note This function will set the new parameter for all the cameras.
3942 */
setDisplayFeatures(bool displayF)3943 void vpMbGenericTracker::setDisplayFeatures(bool displayF)
3944 {
3945   vpMbTracker::setDisplayFeatures(displayF);
3946 
3947   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3948        it != m_mapOfTrackers.end(); ++it) {
3949     TrackerWrapper *tracker = it->second;
3950     tracker->setDisplayFeatures(displayF);
3951   }
3952 }
3953 
3954 /*!
3955   Set the far distance for clipping.
3956 
3957   \param dist : Far clipping value.
3958 
3959   \note This function will set the new parameter for all the cameras.
3960 */
setFarClippingDistance(const double & dist)3961 void vpMbGenericTracker::setFarClippingDistance(const double &dist)
3962 {
3963   vpMbTracker::setFarClippingDistance(dist);
3964 
3965   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3966        it != m_mapOfTrackers.end(); ++it) {
3967     TrackerWrapper *tracker = it->second;
3968     tracker->setFarClippingDistance(dist);
3969   }
3970 }
3971 
3972 /*!
3973   Set the far distance for clipping.
3974 
3975   \param dist1 : Far clipping value for the first camera.
3976   \param dist2 : Far clipping value for the second camera.
3977 
3978   \note This function assumes a stereo configuration of the generic tracker.
3979 */
setFarClippingDistance(const double & dist1,const double & dist2)3980 void vpMbGenericTracker::setFarClippingDistance(const double &dist1, const double &dist2)
3981 {
3982   if (m_mapOfTrackers.size() == 2) {
3983     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
3984     it->second->setFarClippingDistance(dist1);
3985 
3986     ++it;
3987     it->second->setFarClippingDistance(dist2);
3988 
3989     it = m_mapOfTrackers.find(m_referenceCameraName);
3990     if (it != m_mapOfTrackers.end()) {
3991       distFarClip = it->second->getFarClippingDistance();
3992     } else {
3993       std::cerr << "Cannot find the reference camera: " << m_referenceCameraName << "!" << std::endl;
3994     }
3995   } else {
3996     throw vpException(vpTrackingException::fatalError, "Require two cameras! There are %d cameras!",
3997                       m_mapOfTrackers.size());
3998   }
3999 }
4000 
4001 /*!
4002   Set the far distance for clipping.
4003 
4004   \param mapOfClippingDists : Map of far clipping values.
4005 */
setFarClippingDistance(const std::map<std::string,double> & mapOfClippingDists)4006 void vpMbGenericTracker::setFarClippingDistance(const std::map<std::string, double> &mapOfClippingDists)
4007 {
4008   for (std::map<std::string, double>::const_iterator it = mapOfClippingDists.begin(); it != mapOfClippingDists.end();
4009        ++it) {
4010     std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(it->first);
4011 
4012     if (it_tracker != m_mapOfTrackers.end()) {
4013       TrackerWrapper *tracker = it_tracker->second;
4014       tracker->setFarClippingDistance(it->second);
4015 
4016       if (it->first == m_referenceCameraName) {
4017         distFarClip = it->second;
4018       }
4019     }
4020   }
4021 }
4022 
4023 /*!
4024   Set the feature factors used in the VVS stage (ponderation between the
4025   feature types).
4026 
4027   \param mapOfFeatureFactors : Map of feature factors.
4028 */
setFeatureFactors(const std::map<vpTrackerType,double> & mapOfFeatureFactors)4029 void vpMbGenericTracker::setFeatureFactors(const std::map<vpTrackerType, double> &mapOfFeatureFactors)
4030 {
4031   for (std::map<vpTrackerType, double>::iterator it = m_mapOfFeatureFactors.begin(); it != m_mapOfFeatureFactors.end();
4032        ++it) {
4033     std::map<vpTrackerType, double>::const_iterator it_factor = mapOfFeatureFactors.find(it->first);
4034     if (it_factor != mapOfFeatureFactors.end()) {
4035       it->second = it_factor->second;
4036     }
4037   }
4038 }
4039 
4040 /*!
4041    Set the threshold value between 0 and 1 over good moving edges ratio. It
4042   allows to decide if the tracker has enough valid moving edges to compute a
4043   pose. 1 means that all moving edges should be considered as good to have a
4044   valid pose, while 0.1 means that 10% of the moving edge are enough to
4045   declare a pose valid.
4046 
4047    \param threshold : Value between 0 and 1 that corresponds to the ratio of
4048   good moving edges that is necessary to consider that the estimated pose is
4049   valid. Default value is 0.4.
4050 
4051    \sa getGoodMovingEdgesRatioThreshold()
4052 
4053   \note This function will set the new parameter for all the cameras.
4054 */
setGoodMovingEdgesRatioThreshold(double threshold)4055 void vpMbGenericTracker::setGoodMovingEdgesRatioThreshold(double threshold)
4056 {
4057   m_percentageGdPt = threshold;
4058 
4059   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4060        it != m_mapOfTrackers.end(); ++it) {
4061     TrackerWrapper *tracker = it->second;
4062     tracker->setGoodMovingEdgesRatioThreshold(threshold);
4063   }
4064 }
4065 
4066 #ifdef VISP_HAVE_OGRE
4067 /*!
4068   Set the ratio of visibility attempts that has to be successful to consider a
4069   polygon as visible.
4070 
4071   \sa setNbRayCastingAttemptsForVisibility(const unsigned int &)
4072 
4073   \param ratio : Ratio of succesful attempts that has to be considered. Value
4074   has to be between 0.0 (0%) and 1.0 (100%).
4075 
4076   \note This function will set the new parameter for all the cameras.
4077 */
setGoodNbRayCastingAttemptsRatio(const double & ratio)4078 void vpMbGenericTracker::setGoodNbRayCastingAttemptsRatio(const double &ratio)
4079 {
4080   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4081        it != m_mapOfTrackers.end(); ++it) {
4082     TrackerWrapper *tracker = it->second;
4083     tracker->setGoodNbRayCastingAttemptsRatio(ratio);
4084   }
4085 }
4086 
4087 /*!
4088   Set the number of rays that will be sent toward each polygon for visibility
4089   test. Each ray will go from the optic center of the camera to a random point
4090   inside the considered polygon.
4091 
4092   \sa setGoodNbRayCastingAttemptsRatio(const unsigned int &)
4093 
4094   \param attempts Number of rays to be sent.
4095 
4096   \note This function will set the new parameter for all the cameras.
4097 */
setNbRayCastingAttemptsForVisibility(const unsigned int & attempts)4098 void vpMbGenericTracker::setNbRayCastingAttemptsForVisibility(const unsigned int &attempts)
4099 {
4100   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4101        it != m_mapOfTrackers.end(); ++it) {
4102     TrackerWrapper *tracker = it->second;
4103     tracker->setNbRayCastingAttemptsForVisibility(attempts);
4104   }
4105 }
4106 #endif
4107 
4108 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
4109 /*!
4110   Set the new value of the klt tracker.
4111 
4112   \param t : Klt tracker containing the new values.
4113 
4114   \note This function will set the new parameter for all the cameras.
4115 */
setKltOpencv(const vpKltOpencv & t)4116 void vpMbGenericTracker::setKltOpencv(const vpKltOpencv &t)
4117 {
4118   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4119        it != m_mapOfTrackers.end(); ++it) {
4120     TrackerWrapper *tracker = it->second;
4121     tracker->setKltOpencv(t);
4122   }
4123 }
4124 
4125 /*!
4126   Set the new value of the klt tracker.
4127 
4128   \param t1 : Klt tracker containing the new values for the first camera.
4129   \param t2 : Klt tracker containing the new values for the second camera.
4130 
4131   \note This function assumes a stereo configuration of the generic tracker.
4132 */
setKltOpencv(const vpKltOpencv & t1,const vpKltOpencv & t2)4133 void vpMbGenericTracker::setKltOpencv(const vpKltOpencv &t1, const vpKltOpencv &t2)
4134 {
4135   if (m_mapOfTrackers.size() == 2) {
4136     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4137     it->second->setKltOpencv(t1);
4138 
4139     ++it;
4140     it->second->setKltOpencv(t2);
4141   } else {
4142     throw vpException(vpTrackingException::fatalError, "Require two cameras! There are %d cameras!",
4143                       m_mapOfTrackers.size());
4144   }
4145 }
4146 
4147 /*!
4148   Set the new value of the klt tracker.
4149 
4150   \param mapOfKlts : Map of klt tracker containing the new values.
4151 */
setKltOpencv(const std::map<std::string,vpKltOpencv> & mapOfKlts)4152 void vpMbGenericTracker::setKltOpencv(const std::map<std::string, vpKltOpencv> &mapOfKlts)
4153 {
4154   for (std::map<std::string, vpKltOpencv>::const_iterator it = mapOfKlts.begin(); it != mapOfKlts.end(); ++it) {
4155     std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(it->first);
4156 
4157     if (it_tracker != m_mapOfTrackers.end()) {
4158       TrackerWrapper *tracker = it_tracker->second;
4159       tracker->setKltOpencv(it->second);
4160     }
4161   }
4162 }
4163 
4164 /*!
4165   Set the threshold for the acceptation of a point.
4166 
4167   \param th : Threshold for the weight below which a point is rejected.
4168 
4169   \note This function will set the new parameter for all the cameras.
4170 */
setKltThresholdAcceptation(double th)4171 void vpMbGenericTracker::setKltThresholdAcceptation(double th)
4172 {
4173   m_thresholdOutlier = th;
4174 
4175   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4176        it != m_mapOfTrackers.end(); ++it) {
4177     TrackerWrapper *tracker = it->second;
4178     tracker->setKltThresholdAcceptation(th);
4179   }
4180 }
4181 #endif
4182 
4183 /*!
4184   Set the flag to consider if the level of detail (LOD) is used.
4185 
4186   \param useLod : true if the level of detail must be used, false otherwise.
4187   When true, two parameters can be set, see setMinLineLengthThresh() and
4188   setMinPolygonAreaThresh(). \param name : name of the face we want to modify
4189   the LOD parameter.
4190 
4191   \sa setMinLineLengthThresh(), setMinPolygonAreaThresh()
4192 
4193   \note This function will set the new parameter for all the cameras.
4194 */
setLod(bool useLod,const std::string & name)4195 void vpMbGenericTracker::setLod(bool useLod, const std::string &name)
4196 {
4197   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4198        it != m_mapOfTrackers.end(); ++it) {
4199     TrackerWrapper *tracker = it->second;
4200     tracker->setLod(useLod, name);
4201   }
4202 }
4203 
4204 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
4205 /*!
4206   Set the erosion of the mask used on the Model faces.
4207 
4208   \param e : The desired erosion.
4209 
4210   \note This function will set the new parameter for all the cameras.
4211 */
setKltMaskBorder(const unsigned int & e)4212 void vpMbGenericTracker::setKltMaskBorder(const unsigned int &e)
4213 {
4214   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4215        it != m_mapOfTrackers.end(); ++it) {
4216     TrackerWrapper *tracker = it->second;
4217     tracker->setKltMaskBorder(e);
4218   }
4219 }
4220 
4221 /*!
4222   Set the erosion of the mask used on the Model faces.
4223 
4224   \param e1 : The desired erosion for the first camera.
4225   \param e2 : The desired erosion for the second camera.
4226 
4227   \note This function assumes a stereo configuration of the generic tracker.
4228 */
setKltMaskBorder(const unsigned int & e1,const unsigned int & e2)4229 void vpMbGenericTracker::setKltMaskBorder(const unsigned int &e1, const unsigned int &e2)
4230 {
4231   if (m_mapOfTrackers.size() == 2) {
4232     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4233     it->second->setKltMaskBorder(e1);
4234 
4235     ++it;
4236 
4237     it->second->setKltMaskBorder(e2);
4238   } else {
4239     throw vpException(vpTrackingException::fatalError, "Require two cameras! There are %d cameras!",
4240                       m_mapOfTrackers.size());
4241   }
4242 }
4243 
4244 /*!
4245   Set the erosion of the mask used on the Model faces.
4246 
4247   \param mapOfErosions : Map of desired erosions.
4248 */
setKltMaskBorder(const std::map<std::string,unsigned int> & mapOfErosions)4249 void vpMbGenericTracker::setKltMaskBorder(const std::map<std::string, unsigned int> &mapOfErosions)
4250 {
4251   for (std::map<std::string, unsigned int>::const_iterator it = mapOfErosions.begin(); it != mapOfErosions.end();
4252        ++it) {
4253     std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(it->first);
4254 
4255     if (it_tracker != m_mapOfTrackers.end()) {
4256       TrackerWrapper *tracker = it_tracker->second;
4257       tracker->setKltMaskBorder(it->second);
4258     }
4259   }
4260 }
4261 #endif
4262 
4263 /*!
4264   Set the visibility mask.
4265 
4266   \param mask : visibility mask.
4267 */
setMask(const vpImage<bool> & mask)4268 void vpMbGenericTracker::setMask(const vpImage<bool> &mask)
4269 {
4270   vpMbTracker::setMask(mask);
4271 
4272   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4273     it != m_mapOfTrackers.end(); ++it) {
4274     TrackerWrapper *tracker = it->second;
4275     tracker->setMask(mask);
4276   }
4277 }
4278 
4279 
4280 /*!
4281   Set the threshold for the minimum line length to be considered as visible in
4282   the LOD case.
4283 
4284   \param minLineLengthThresh : threshold for the minimum line length in pixel.
4285   \param name : name of the face we want to modify the LOD threshold.
4286 
4287   \sa setLod(), setMinPolygonAreaThresh()
4288 
4289   \note This function will set the new parameter for all the cameras.
4290 */
setMinLineLengthThresh(double minLineLengthThresh,const std::string & name)4291 void vpMbGenericTracker::setMinLineLengthThresh(double minLineLengthThresh, const std::string &name)
4292 {
4293   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4294        it != m_mapOfTrackers.end(); ++it) {
4295     TrackerWrapper *tracker = it->second;
4296     tracker->setMinLineLengthThresh(minLineLengthThresh, name);
4297   }
4298 }
4299 
4300 /*!
4301   Set the minimum polygon area to be considered as visible in the LOD case.
4302 
4303   \param minPolygonAreaThresh : threshold for the minimum polygon area in
4304   pixel. \param name : name of the face we want to modify the LOD threshold.
4305 
4306   \sa setLod(), setMinLineLengthThresh()
4307 
4308   \note This function will set the new parameter for all the cameras.
4309 */
setMinPolygonAreaThresh(double minPolygonAreaThresh,const std::string & name)4310 void vpMbGenericTracker::setMinPolygonAreaThresh(double minPolygonAreaThresh, const std::string &name)
4311 {
4312   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4313        it != m_mapOfTrackers.end(); ++it) {
4314     TrackerWrapper *tracker = it->second;
4315     tracker->setMinPolygonAreaThresh(minPolygonAreaThresh, name);
4316   }
4317 }
4318 
4319 /*!
4320   Set the moving edge parameters.
4321 
4322   \param me : an instance of vpMe containing all the desired parameters.
4323 
4324   \note This function will set the new parameter for all the cameras.
4325 */
setMovingEdge(const vpMe & me)4326 void vpMbGenericTracker::setMovingEdge(const vpMe &me)
4327 {
4328   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4329        it != m_mapOfTrackers.end(); ++it) {
4330     TrackerWrapper *tracker = it->second;
4331     tracker->setMovingEdge(me);
4332   }
4333 }
4334 
4335 /*!
4336   Set the moving edge parameters.
4337 
4338   \param me1 : an instance of vpMe containing all the desired parameters for
4339   the first camera. \param me2 : an instance of vpMe containing all the
4340   desired parameters for the second camera.
4341 
4342   \note This function assumes a stereo configuration of the generic tracker.
4343 */
setMovingEdge(const vpMe & me1,const vpMe & me2)4344 void vpMbGenericTracker::setMovingEdge(const vpMe &me1, const vpMe &me2)
4345 {
4346   if (m_mapOfTrackers.size() == 2) {
4347     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4348     it->second->setMovingEdge(me1);
4349 
4350     ++it;
4351 
4352     it->second->setMovingEdge(me2);
4353   } else {
4354     throw vpException(vpTrackingException::fatalError, "Require two cameras! There are %d cameras!",
4355                       m_mapOfTrackers.size());
4356   }
4357 }
4358 
4359 /*!
4360   Set the moving edge parameters.
4361 
4362   \param mapOfMe : Map of vpMe containing all the desired parameters.
4363 */
setMovingEdge(const std::map<std::string,vpMe> & mapOfMe)4364 void vpMbGenericTracker::setMovingEdge(const std::map<std::string, vpMe> &mapOfMe)
4365 {
4366   for (std::map<std::string, vpMe>::const_iterator it = mapOfMe.begin(); it != mapOfMe.end(); ++it) {
4367     std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(it->first);
4368 
4369     if (it_tracker != m_mapOfTrackers.end()) {
4370       TrackerWrapper *tracker = it_tracker->second;
4371       tracker->setMovingEdge(it->second);
4372     }
4373   }
4374 }
4375 
4376 /*!
4377   Set the near distance for clipping.
4378 
4379   \param dist : Near clipping value.
4380 
4381   \note This function will set the new parameter for all the cameras.
4382 */
setNearClippingDistance(const double & dist)4383 void vpMbGenericTracker::setNearClippingDistance(const double &dist)
4384 {
4385   vpMbTracker::setNearClippingDistance(dist);
4386 
4387   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4388        it != m_mapOfTrackers.end(); ++it) {
4389     TrackerWrapper *tracker = it->second;
4390     tracker->setNearClippingDistance(dist);
4391   }
4392 }
4393 
4394 /*!
4395   Set the near distance for clipping.
4396 
4397   \param dist1 : Near clipping value for the first camera.
4398   \param dist2 : Near clipping value for the second camera.
4399 
4400   \note This function assumes a stereo configuration of the generic tracker.
4401 */
setNearClippingDistance(const double & dist1,const double & dist2)4402 void vpMbGenericTracker::setNearClippingDistance(const double &dist1, const double &dist2)
4403 {
4404   if (m_mapOfTrackers.size() == 2) {
4405     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4406     it->second->setNearClippingDistance(dist1);
4407 
4408     ++it;
4409 
4410     it->second->setNearClippingDistance(dist2);
4411 
4412     it = m_mapOfTrackers.find(m_referenceCameraName);
4413     if (it != m_mapOfTrackers.end()) {
4414       distNearClip = it->second->getNearClippingDistance();
4415     } else {
4416       std::cerr << "Cannot find the reference camera: " << m_referenceCameraName << "!" << std::endl;
4417     }
4418   } else {
4419     throw vpException(vpTrackingException::fatalError, "Require two cameras! There are %d cameras!",
4420                       m_mapOfTrackers.size());
4421   }
4422 }
4423 
4424 /*!
4425   Set the near distance for clipping.
4426 
4427   \param mapOfDists : Map of near clipping values.
4428 */
setNearClippingDistance(const std::map<std::string,double> & mapOfDists)4429 void vpMbGenericTracker::setNearClippingDistance(const std::map<std::string, double> &mapOfDists)
4430 {
4431   for (std::map<std::string, double>::const_iterator it = mapOfDists.begin(); it != mapOfDists.end(); ++it) {
4432     std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(it->first);
4433 
4434     if (it_tracker != m_mapOfTrackers.end()) {
4435       TrackerWrapper *tracker = it_tracker->second;
4436       tracker->setNearClippingDistance(it->second);
4437 
4438       if (it->first == m_referenceCameraName) {
4439         distNearClip = it->second;
4440       }
4441     }
4442   }
4443 }
4444 
4445 /*!
4446   Enable/Disable the appearance of Ogre config dialog on startup.
4447 
4448   \warning This method has only effect when Ogre is used and Ogre visibility
4449   test is enabled using setOgreVisibilityTest() with true parameter.
4450 
4451   \param showConfigDialog : if true, shows Ogre dialog window (used to set
4452   Ogre rendering options) when Ogre visibility is enabled. By default, this
4453   functionality is turned off.
4454 
4455   \note This function will set the new parameter for all the cameras.
4456 */
setOgreShowConfigDialog(bool showConfigDialog)4457 void vpMbGenericTracker::setOgreShowConfigDialog(bool showConfigDialog)
4458 {
4459   vpMbTracker::setOgreShowConfigDialog(showConfigDialog);
4460 
4461   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4462        it != m_mapOfTrackers.end(); ++it) {
4463     TrackerWrapper *tracker = it->second;
4464     tracker->setOgreShowConfigDialog(showConfigDialog);
4465   }
4466 }
4467 
4468 /*!
4469   Use Ogre3D for visibility tests
4470 
4471   \warning This function has to be called before the initialization of the
4472   tracker.
4473 
4474   \param v : True to use it, False otherwise
4475 
4476   \note This function will set the new parameter for all the cameras.
4477 */
setOgreVisibilityTest(const bool & v)4478 void vpMbGenericTracker::setOgreVisibilityTest(const bool &v)
4479 {
4480   vpMbTracker::setOgreVisibilityTest(v);
4481 
4482   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4483        it != m_mapOfTrackers.end(); ++it) {
4484     TrackerWrapper *tracker = it->second;
4485     tracker->setOgreVisibilityTest(v);
4486   }
4487 
4488 #ifdef VISP_HAVE_OGRE
4489   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4490        it != m_mapOfTrackers.end(); ++it) {
4491     TrackerWrapper *tracker = it->second;
4492     tracker->faces.getOgreContext()->setWindowName("Multi Generic MBT (" + it->first + ")");
4493   }
4494 #endif
4495 }
4496 
4497 /*!
4498   Set the optimization method used during the tracking.
4499 
4500   \param opt : Optimization method to use (see vpMbtOptimizationMethod).
4501 
4502   \note This function will set the new parameter for all the cameras.
4503 */
setOptimizationMethod(const vpMbtOptimizationMethod & opt)4504 void vpMbGenericTracker::setOptimizationMethod(const vpMbtOptimizationMethod &opt)
4505 {
4506   vpMbTracker::setOptimizationMethod(opt);
4507 
4508   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4509        it != m_mapOfTrackers.end(); ++it) {
4510     TrackerWrapper *tracker = it->second;
4511     tracker->setOptimizationMethod(opt);
4512   }
4513 }
4514 
4515 /*!
4516   Set the pose to be used in entry (as guess) of the next call to the track()
4517   function. This pose will be just used once.
4518 
4519   \warning This functionnality is not available when tracking cylinders with
4520   the KLT tracking.
4521 
4522   \param I : grayscale image corresponding to the desired pose.
4523   \param cdMo : Pose to affect.
4524 
4525   \note This function will set the new parameter for all the cameras.
4526 */
setPose(const vpImage<unsigned char> & I,const vpHomogeneousMatrix & cdMo)4527 void vpMbGenericTracker::setPose(const vpImage<unsigned char> &I, const vpHomogeneousMatrix &cdMo)
4528 {
4529   if (m_mapOfTrackers.size() > 1) {
4530     throw vpException(vpTrackingException::initializationError, "The function setPose() requires the generic tracker "
4531                                                                 "to be configured with only one camera!");
4532   }
4533 
4534   m_cMo = cdMo;
4535 
4536   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
4537   if (it != m_mapOfTrackers.end()) {
4538     TrackerWrapper *tracker = it->second;
4539     tracker->setPose(I, cdMo);
4540   } else {
4541     throw vpException(vpTrackingException::initializationError, "The reference camera: %s does not exist!",
4542                       m_referenceCameraName.c_str());
4543   }
4544 }
4545 
4546 /*!
4547   Set the pose to be used in entry (as guess) of the next call to the track()
4548   function. This pose will be just used once.
4549 
4550   \warning This functionnality is not available when tracking cylinders with
4551   the KLT tracking.
4552 
4553   \param I_color : color image corresponding to the desired pose.
4554   \param cdMo : Pose to affect.
4555 
4556   \note This function will set the new parameter for all the cameras.
4557 */
setPose(const vpImage<vpRGBa> & I_color,const vpHomogeneousMatrix & cdMo)4558 void vpMbGenericTracker::setPose(const vpImage<vpRGBa> &I_color, const vpHomogeneousMatrix &cdMo)
4559 {
4560   if (m_mapOfTrackers.size() > 1) {
4561     throw vpException(vpTrackingException::initializationError, "The function setPose() requires the generic tracker "
4562                                                                 "to be configured with only one camera!");
4563   }
4564 
4565   m_cMo = cdMo;
4566 
4567   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(m_referenceCameraName);
4568   if (it != m_mapOfTrackers.end()) {
4569     TrackerWrapper *tracker = it->second;
4570     vpImageConvert::convert(I_color, m_I);
4571     tracker->setPose(m_I, cdMo);
4572   } else {
4573     throw vpException(vpTrackingException::initializationError, "The reference camera: %s does not exist!",
4574                       m_referenceCameraName.c_str());
4575   }
4576 }
4577 
4578 /*!
4579   Set the pose to be used in entry of the next call to the track() function.
4580   This pose will be just used once.
4581 
4582   \param I1 : First grayscale image corresponding to the desired pose.
4583   \param I2 : Second grayscale image corresponding to the desired pose.
4584   \param c1Mo : First pose to affect.
4585   \param c2Mo : Second pose to affect.
4586 
4587   \note This function assumes a stereo configuration of the generic tracker.
4588 */
setPose(const vpImage<unsigned char> & I1,const vpImage<unsigned char> & I2,const vpHomogeneousMatrix & c1Mo,const vpHomogeneousMatrix & c2Mo)4589 void vpMbGenericTracker::setPose(const vpImage<unsigned char> &I1, const vpImage<unsigned char> &I2,
4590                                  const vpHomogeneousMatrix &c1Mo, const vpHomogeneousMatrix &c2Mo)
4591 {
4592   if (m_mapOfTrackers.size() == 2) {
4593     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4594     it->second->setPose(I1, c1Mo);
4595 
4596     ++it;
4597 
4598     it->second->setPose(I2, c2Mo);
4599 
4600     it = m_mapOfTrackers.find(m_referenceCameraName);
4601     if (it != m_mapOfTrackers.end()) {
4602       // Set reference pose
4603       it->second->getPose(m_cMo);
4604     } else {
4605       throw vpException(vpTrackingException::fatalError, "The reference camera: %s does not exist!",
4606                         m_referenceCameraName.c_str());
4607     }
4608   } else {
4609     throw vpException(vpTrackingException::fatalError, "Require two cameras! There are %d cameras!",
4610                       m_mapOfTrackers.size());
4611   }
4612 }
4613 
4614 /*!
4615   Set the pose to be used in entry of the next call to the track() function.
4616   This pose will be just used once.
4617 
4618   \param I_color1 : First color image corresponding to the desired pose.
4619   \param I_color2 : Second color image corresponding to the desired pose.
4620   \param c1Mo : First pose to affect.
4621   \param c2Mo : Second pose to affect.
4622 
4623   \note This function assumes a stereo configuration of the generic tracker.
4624 */
setPose(const vpImage<vpRGBa> & I_color1,const vpImage<vpRGBa> & I_color2,const vpHomogeneousMatrix & c1Mo,const vpHomogeneousMatrix & c2Mo)4625 void vpMbGenericTracker::setPose(const vpImage<vpRGBa> &I_color1, const vpImage<vpRGBa> &I_color2,
4626                                  const vpHomogeneousMatrix &c1Mo, const vpHomogeneousMatrix &c2Mo)
4627 {
4628   if (m_mapOfTrackers.size() == 2) {
4629     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4630     it->second->setPose(I_color1, c1Mo);
4631 
4632     ++it;
4633 
4634     it->second->setPose(I_color2, c2Mo);
4635 
4636     it = m_mapOfTrackers.find(m_referenceCameraName);
4637     if (it != m_mapOfTrackers.end()) {
4638       // Set reference pose
4639       it->second->getPose(m_cMo);
4640     } else {
4641       throw vpException(vpTrackingException::fatalError, "The reference camera: %s does not exist!",
4642                         m_referenceCameraName.c_str());
4643     }
4644   } else {
4645     throw vpException(vpTrackingException::fatalError, "Require two cameras! There are %d cameras!",
4646                       m_mapOfTrackers.size());
4647   }
4648 }
4649 
4650 /*!
4651   Set the pose to be used in entry of the next call to the track() function.
4652   This pose will be just used once.
4653   The camera transformation matrices have to be set before.
4654 
4655   \param mapOfImages : Map of grayscale images.
4656   \param mapOfCameraPoses : Map of pose to affect to the cameras.
4657 
4658   \note Image and camera pose must be supplied for the reference camera. The
4659   images for all the cameras must be supplied to correctly initialize the
4660   trackers but some camera poses can be omitted. In this case, they will be
4661   initialized using the pose computed from the reference camera pose and using
4662   the known geometric transformation between each camera (see
4663   setCameraTransformationMatrix()).
4664 */
setPose(const std::map<std::string,const vpImage<unsigned char> * > & mapOfImages,const std::map<std::string,vpHomogeneousMatrix> & mapOfCameraPoses)4665 void vpMbGenericTracker::setPose(const std::map<std::string, const vpImage<unsigned char> *> &mapOfImages,
4666                                  const std::map<std::string, vpHomogeneousMatrix> &mapOfCameraPoses)
4667 {
4668   // Set the reference cMo
4669   std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(m_referenceCameraName);
4670   std::map<std::string, const vpImage<unsigned char> *>::const_iterator it_img =
4671       mapOfImages.find(m_referenceCameraName);
4672   std::map<std::string, vpHomogeneousMatrix>::const_iterator it_camPose = mapOfCameraPoses.find(m_referenceCameraName);
4673 
4674   if (it_tracker != m_mapOfTrackers.end() && it_img != mapOfImages.end() && it_camPose != mapOfCameraPoses.end()) {
4675     TrackerWrapper *tracker = it_tracker->second;
4676     tracker->setPose(*it_img->second, it_camPose->second);
4677     tracker->getPose(m_cMo);
4678   } else {
4679     throw vpException(vpTrackingException::fatalError, "Cannot set pose for the reference camera!");
4680   }
4681 
4682   // Vector of missing pose matrices for cameras
4683   std::vector<std::string> vectorOfMissingCameraPoses;
4684 
4685   // Set pose for the specified cameras
4686   for (it_tracker = m_mapOfTrackers.begin(); it_tracker != m_mapOfTrackers.end(); ++it_tracker) {
4687     if (it_tracker->first != m_referenceCameraName) {
4688       it_img = mapOfImages.find(it_tracker->first);
4689       it_camPose = mapOfCameraPoses.find(it_tracker->first);
4690 
4691       if (it_img != mapOfImages.end() && it_camPose != mapOfCameraPoses.end()) {
4692         // Set pose
4693         TrackerWrapper *tracker = it_tracker->second;
4694         tracker->setPose(*it_img->second, it_camPose->second);
4695       } else {
4696         vectorOfMissingCameraPoses.push_back(it_tracker->first);
4697       }
4698     }
4699   }
4700 
4701   for (std::vector<std::string>::const_iterator it = vectorOfMissingCameraPoses.begin();
4702        it != vectorOfMissingCameraPoses.end(); ++it) {
4703     it_img = mapOfImages.find(*it);
4704     std::map<std::string, vpHomogeneousMatrix>::const_iterator it_camTrans =
4705         m_mapOfCameraTransformationMatrix.find(*it);
4706 
4707     if (it_img != mapOfImages.end() && it_camTrans != m_mapOfCameraTransformationMatrix.end()) {
4708       vpHomogeneousMatrix cCurrentMo = it_camTrans->second * m_cMo;
4709       m_mapOfTrackers[*it]->setPose(*it_img->second, cCurrentMo);
4710     } else {
4711       throw vpException(vpTrackingException::fatalError,
4712                         "Missing image or missing camera transformation "
4713                         "matrix! Cannot set pose for camera: %s!",
4714                         it->c_str());
4715     }
4716   }
4717 }
4718 
4719 /*!
4720   Set the pose to be used in entry of the next call to the track() function.
4721   This pose will be just used once.
4722   The camera transformation matrices have to be set before.
4723 
4724   \param mapOfColorImages : Map of color images.
4725   \param mapOfCameraPoses : Map of pose to affect to the cameras.
4726 
4727   \note Image and camera pose must be supplied for the reference camera. The
4728   images for all the cameras must be supplied to correctly initialize the
4729   trackers but some camera poses can be omitted. In this case, they will be
4730   initialized using the pose computed from the reference camera pose and using
4731   the known geometric transformation between each camera (see
4732   setCameraTransformationMatrix()).
4733 */
setPose(const std::map<std::string,const vpImage<vpRGBa> * > & mapOfColorImages,const std::map<std::string,vpHomogeneousMatrix> & mapOfCameraPoses)4734 void vpMbGenericTracker::setPose(const std::map<std::string, const vpImage<vpRGBa> *> &mapOfColorImages,
4735                                  const std::map<std::string, vpHomogeneousMatrix> &mapOfCameraPoses)
4736 {
4737   // Set the reference cMo
4738   std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(m_referenceCameraName);
4739   std::map<std::string, const vpImage<vpRGBa> *>::const_iterator it_img =
4740       mapOfColorImages.find(m_referenceCameraName);
4741   std::map<std::string, vpHomogeneousMatrix>::const_iterator it_camPose = mapOfCameraPoses.find(m_referenceCameraName);
4742 
4743   if (it_tracker != m_mapOfTrackers.end() && it_img != mapOfColorImages.end() && it_camPose != mapOfCameraPoses.end()) {
4744     TrackerWrapper *tracker = it_tracker->second;
4745     tracker->setPose(*it_img->second, it_camPose->second);
4746     tracker->getPose(m_cMo);
4747   } else {
4748     throw vpException(vpTrackingException::fatalError, "Cannot set pose for the reference camera!");
4749   }
4750 
4751   // Vector of missing pose matrices for cameras
4752   std::vector<std::string> vectorOfMissingCameraPoses;
4753 
4754   // Set pose for the specified cameras
4755   for (it_tracker = m_mapOfTrackers.begin(); it_tracker != m_mapOfTrackers.end(); ++it_tracker) {
4756     if (it_tracker->first != m_referenceCameraName) {
4757       it_img = mapOfColorImages.find(it_tracker->first);
4758       it_camPose = mapOfCameraPoses.find(it_tracker->first);
4759 
4760       if (it_img != mapOfColorImages.end() && it_camPose != mapOfCameraPoses.end()) {
4761         // Set pose
4762         TrackerWrapper *tracker = it_tracker->second;
4763         tracker->setPose(*it_img->second, it_camPose->second);
4764       } else {
4765         vectorOfMissingCameraPoses.push_back(it_tracker->first);
4766       }
4767     }
4768   }
4769 
4770   for (std::vector<std::string>::const_iterator it = vectorOfMissingCameraPoses.begin();
4771        it != vectorOfMissingCameraPoses.end(); ++it) {
4772     it_img = mapOfColorImages.find(*it);
4773     std::map<std::string, vpHomogeneousMatrix>::const_iterator it_camTrans =
4774         m_mapOfCameraTransformationMatrix.find(*it);
4775 
4776     if (it_img != mapOfColorImages.end() && it_camTrans != m_mapOfCameraTransformationMatrix.end()) {
4777       vpHomogeneousMatrix cCurrentMo = it_camTrans->second * m_cMo;
4778       m_mapOfTrackers[*it]->setPose(*it_img->second, cCurrentMo);
4779     } else {
4780       throw vpException(vpTrackingException::fatalError,
4781                         "Missing image or missing camera transformation "
4782                         "matrix! Cannot set pose for camera: %s!",
4783                         it->c_str());
4784     }
4785   }
4786 }
4787 
4788 /*!
4789   Set if the projection error criteria has to be computed. This criteria could
4790   be used to detect the quality of the tracking. It computes an angle between
4791   0 and 90 degrees that is available with getProjectionError(). Closer to 0 is
4792   the value, better is the tracking.
4793 
4794   \param flag : True if the projection error criteria has to be computed,
4795   false otherwise.
4796 
4797   \sa getProjectionError()
4798 
4799   \note Available only if the edge features are used (e.g. Edge tracking or
4800   Edge + KLT tracking). Otherwise, the value of 90 degrees will be returned.
4801 */
setProjectionErrorComputation(const bool & flag)4802 void vpMbGenericTracker::setProjectionErrorComputation(const bool &flag)
4803 {
4804   vpMbTracker::setProjectionErrorComputation(flag);
4805 
4806   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4807        it != m_mapOfTrackers.end(); ++it) {
4808     TrackerWrapper *tracker = it->second;
4809     tracker->setProjectionErrorComputation(flag);
4810   }
4811 }
4812 
4813 /*!
4814   Display or not gradient and model orientation when computing the projection error.
4815 */
setProjectionErrorDisplay(bool display)4816 void vpMbGenericTracker::setProjectionErrorDisplay(bool display)
4817 {
4818   vpMbTracker::setProjectionErrorDisplay(display);
4819 
4820   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4821        it != m_mapOfTrackers.end(); ++it) {
4822     TrackerWrapper *tracker = it->second;
4823     tracker->setProjectionErrorDisplay(display);
4824   }
4825 }
4826 
4827 /*!
4828   Arrow length used to display gradient and model orientation for projection error computation.
4829 */
setProjectionErrorDisplayArrowLength(unsigned int length)4830 void vpMbGenericTracker::setProjectionErrorDisplayArrowLength(unsigned int length)
4831 {
4832   vpMbTracker::setProjectionErrorDisplayArrowLength(length);
4833 
4834   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4835        it != m_mapOfTrackers.end(); ++it) {
4836     TrackerWrapper *tracker = it->second;
4837     tracker->setProjectionErrorDisplayArrowLength(length);
4838   }
4839 }
4840 
setProjectionErrorDisplayArrowThickness(unsigned int thickness)4841 void vpMbGenericTracker::setProjectionErrorDisplayArrowThickness(unsigned int thickness)
4842 {
4843   vpMbTracker::setProjectionErrorDisplayArrowThickness(thickness);
4844 
4845   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4846        it != m_mapOfTrackers.end(); ++it) {
4847     TrackerWrapper *tracker = it->second;
4848     tracker->setProjectionErrorDisplayArrowThickness(thickness);
4849   }
4850 }
4851 
4852 /*!
4853   Set the reference camera name.
4854 
4855   \param referenceCameraName : Name of the reference camera.
4856 */
setReferenceCameraName(const std::string & referenceCameraName)4857 void vpMbGenericTracker::setReferenceCameraName(const std::string &referenceCameraName)
4858 {
4859   std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.find(referenceCameraName);
4860   if (it != m_mapOfTrackers.end()) {
4861     m_referenceCameraName = referenceCameraName;
4862   } else {
4863     std::cerr << "The reference camera: " << referenceCameraName << " does not exist!";
4864   }
4865 }
4866 
setScanLineVisibilityTest(const bool & v)4867 void vpMbGenericTracker::setScanLineVisibilityTest(const bool &v)
4868 {
4869   vpMbTracker::setScanLineVisibilityTest(v);
4870 
4871   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4872        it != m_mapOfTrackers.end(); ++it) {
4873     TrackerWrapper *tracker = it->second;
4874     tracker->setScanLineVisibilityTest(v);
4875   }
4876 }
4877 
4878 /*!
4879   Set the tracker type.
4880 
4881   \param type : Type of features to used, see vpTrackerType (e.g.
4882   vpMbGenericTracker::EDGE_TRACKER or vpMbGenericTracker::EDGE_TRACKER |
4883   vpMbGenericTracker::KLT_TRACKER).
4884 
4885   \note This function will set the new parameter for all the cameras.
4886 
4887   \warning This function has to be called before the loading of the CAD model.
4888 */
setTrackerType(int type)4889 void vpMbGenericTracker::setTrackerType(int type)
4890 {
4891   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4892        it != m_mapOfTrackers.end(); ++it) {
4893     TrackerWrapper *tracker = it->second;
4894     tracker->setTrackerType(type);
4895   }
4896 }
4897 
4898 /*!
4899   Set the tracker types.
4900 
4901   \param mapOfTrackerTypes : Map of feature types to used, see vpTrackerType
4902   (e.g. vpMbGenericTracker::EDGE_TRACKER or vpMbGenericTracker::EDGE_TRACKER |
4903   vpMbGenericTracker::KLT_TRACKER).
4904 
4905   \warning This function has to be called before the loading of the CAD model.
4906 */
setTrackerType(const std::map<std::string,int> & mapOfTrackerTypes)4907 void vpMbGenericTracker::setTrackerType(const std::map<std::string, int> &mapOfTrackerTypes)
4908 {
4909   for (std::map<std::string, int>::const_iterator it = mapOfTrackerTypes.begin(); it != mapOfTrackerTypes.end(); ++it) {
4910     std::map<std::string, TrackerWrapper *>::const_iterator it_tracker = m_mapOfTrackers.find(it->first);
4911     if (it_tracker != m_mapOfTrackers.end()) {
4912       TrackerWrapper *tracker = it_tracker->second;
4913       tracker->setTrackerType(it->second);
4914     }
4915   }
4916 }
4917 
4918 /*!
4919   Set if the polygon that has the given name has to be considered during
4920   the tracking phase.
4921 
4922   \param name : name of the polygon.
4923   \param useDepthDenseTracking : True if it has to be considered, False otherwise.
4924 
4925   \note This function will set the new parameter for all the cameras.
4926 */
setUseDepthDenseTracking(const std::string & name,const bool & useDepthDenseTracking)4927 void vpMbGenericTracker::setUseDepthDenseTracking(const std::string &name, const bool &useDepthDenseTracking)
4928 {
4929   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4930        it != m_mapOfTrackers.end(); ++it) {
4931     TrackerWrapper *tracker = it->second;
4932     tracker->setUseDepthDenseTracking(name, useDepthDenseTracking);
4933   }
4934 }
4935 
4936 /*!
4937   Set if the polygon that has the given name has to be considered during
4938   the tracking phase.
4939 
4940   \param name : name of the polygon.
4941   \param useDepthNormalTracking : True if it has to be considered, False otherwise.
4942 
4943   \note This function will set the new parameter for all the cameras.
4944 */
setUseDepthNormalTracking(const std::string & name,const bool & useDepthNormalTracking)4945 void vpMbGenericTracker::setUseDepthNormalTracking(const std::string &name, const bool &useDepthNormalTracking)
4946 {
4947   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4948        it != m_mapOfTrackers.end(); ++it) {
4949     TrackerWrapper *tracker = it->second;
4950     tracker->setUseDepthNormalTracking(name, useDepthNormalTracking);
4951   }
4952 }
4953 
4954 /*!
4955   Set if the polygon that has the given name has to be considered during
4956   the tracking phase.
4957 
4958   \param name : name of the polygon.
4959   \param useEdgeTracking : True if it has to be considered, False otherwise.
4960 
4961   \note This function will set the new parameter for all the cameras.
4962 */
setUseEdgeTracking(const std::string & name,const bool & useEdgeTracking)4963 void vpMbGenericTracker::setUseEdgeTracking(const std::string &name, const bool &useEdgeTracking)
4964 {
4965   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4966        it != m_mapOfTrackers.end(); ++it) {
4967     TrackerWrapper *tracker = it->second;
4968     tracker->setUseEdgeTracking(name, useEdgeTracking);
4969   }
4970 }
4971 
4972 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
4973 /*!
4974   Set if the polygon that has the given name has to be considered during
4975   the tracking phase.
4976 
4977   \param name : name of the polygon.
4978   \param useKltTracking : True if it has to be considered, False otherwise.
4979 
4980   \note This function will set the new parameter for all the cameras.
4981 */
setUseKltTracking(const std::string & name,const bool & useKltTracking)4982 void vpMbGenericTracker::setUseKltTracking(const std::string &name, const bool &useKltTracking)
4983 {
4984   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4985        it != m_mapOfTrackers.end(); ++it) {
4986     TrackerWrapper *tracker = it->second;
4987     tracker->setUseKltTracking(name, useKltTracking);
4988   }
4989 }
4990 #endif
4991 
testTracking()4992 void vpMbGenericTracker::testTracking()
4993 {
4994   // Test tracking fails only if all testTracking have failed
4995   bool isOneTestTrackingOk = false;
4996   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
4997        it != m_mapOfTrackers.end(); ++it) {
4998     TrackerWrapper *tracker = it->second;
4999     try {
5000       tracker->testTracking();
5001       isOneTestTrackingOk = true;
5002     } catch (...) {
5003     }
5004   }
5005 
5006   if (!isOneTestTrackingOk) {
5007     std::ostringstream oss;
5008     oss << "Not enough moving edges to track the object. Try to reduce the "
5009            "threshold="
5010         << m_percentageGdPt << " using setGoodMovingEdgesRatioThreshold()";
5011     throw vpTrackingException(vpTrackingException::fatalError, oss.str());
5012   }
5013 }
5014 
5015 /*!
5016   Realize the tracking of the object in the image.
5017 
5018   \throw vpException : if the tracking is supposed to have failed
5019 
5020   \param I : The current grayscale image.
5021 
5022   \note This function will track only for the reference camera.
5023 */
track(const vpImage<unsigned char> & I)5024 void vpMbGenericTracker::track(const vpImage<unsigned char> &I)
5025 {
5026   std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
5027   mapOfImages[m_referenceCameraName] = &I;
5028 
5029   std::map<std::string, const std::vector<vpColVector> *> mapOfPointClouds;
5030   std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
5031 
5032   track(mapOfImages, mapOfPointClouds, mapOfWidths, mapOfHeights);
5033 }
5034 
5035 /*!
5036   Realize the tracking of the object in the image.
5037 
5038   \throw vpException : if the tracking is supposed to have failed
5039 
5040   \param I_color : The current color image.
5041 
5042   \note This function will track only for the reference camera.
5043 */
track(const vpImage<vpRGBa> & I_color)5044 void vpMbGenericTracker::track(const vpImage<vpRGBa> &I_color)
5045 {
5046   std::map<std::string, const vpImage<vpRGBa> *> mapOfColorImages;
5047   mapOfColorImages[m_referenceCameraName] = &I_color;
5048 
5049   std::map<std::string, const std::vector<vpColVector> *> mapOfPointClouds;
5050   std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
5051 
5052   track(mapOfColorImages, mapOfPointClouds, mapOfWidths, mapOfHeights);
5053 }
5054 
5055 /*!
5056   Realize the tracking of the object in the image.
5057 
5058   \throw vpException : if the tracking is supposed to have failed
5059 
5060   \param I1 : The first grayscale image.
5061   \param I2 : The second grayscale image.
5062 
5063   \note This function assumes a stereo configuration of the generic tracker.
5064 */
track(const vpImage<unsigned char> & I1,const vpImage<unsigned char> & I2)5065 void vpMbGenericTracker::track(const vpImage<unsigned char> &I1, const vpImage<unsigned char> &I2)
5066 {
5067   if (m_mapOfTrackers.size() == 2) {
5068     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
5069     std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
5070     mapOfImages[it->first] = &I1;
5071     ++it;
5072 
5073     mapOfImages[it->first] = &I2;
5074 
5075     std::map<std::string, const std::vector<vpColVector> *> mapOfPointClouds;
5076     std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
5077 
5078     track(mapOfImages, mapOfPointClouds, mapOfWidths, mapOfHeights);
5079   } else {
5080     std::stringstream ss;
5081     ss << "Require two cameras! There are " << m_mapOfTrackers.size() << " cameras!";
5082     throw vpException(vpTrackingException::fatalError, ss.str().c_str());
5083   }
5084 }
5085 
5086 /*!
5087   Realize the tracking of the object in the image.
5088 
5089   \throw vpException : if the tracking is supposed to have failed
5090 
5091   \param I_color1 : The first color image.
5092   \param _colorI2 : The second color image.
5093 
5094   \note This function assumes a stereo configuration of the generic tracker.
5095 */
track(const vpImage<vpRGBa> & I_color1,const vpImage<vpRGBa> & _colorI2)5096 void vpMbGenericTracker::track(const vpImage<vpRGBa> &I_color1, const vpImage<vpRGBa> &_colorI2)
5097 {
5098   if (m_mapOfTrackers.size() == 2) {
5099     std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
5100     std::map<std::string, const vpImage<vpRGBa> *> mapOfImages;
5101     mapOfImages[it->first] = &I_color1;
5102     ++it;
5103 
5104     mapOfImages[it->first] = &_colorI2;
5105 
5106     std::map<std::string, const std::vector<vpColVector> *> mapOfPointClouds;
5107     std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
5108 
5109     track(mapOfImages, mapOfPointClouds, mapOfWidths, mapOfHeights);
5110   } else {
5111     std::stringstream ss;
5112     ss << "Require two cameras! There are " << m_mapOfTrackers.size() << " cameras!";
5113     throw vpException(vpTrackingException::fatalError, ss.str().c_str());
5114   }
5115 }
5116 
5117 /*!
5118   Realize the tracking of the object in the image.
5119 
5120   \throw vpException : if the tracking is supposed to have failed
5121 
5122   \param mapOfImages : Map of images.
5123 */
track(std::map<std::string,const vpImage<unsigned char> * > & mapOfImages)5124 void vpMbGenericTracker::track(std::map<std::string, const vpImage<unsigned char> *> &mapOfImages)
5125 {
5126   std::map<std::string, const std::vector<vpColVector> *> mapOfPointClouds;
5127   std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
5128 
5129   track(mapOfImages, mapOfPointClouds, mapOfWidths, mapOfHeights);
5130 }
5131 
5132 /*!
5133   Realize the tracking of the object in the image.
5134 
5135   \throw vpException : if the tracking is supposed to have failed
5136 
5137   \param mapOfColorImages : Map of color images.
5138 */
track(std::map<std::string,const vpImage<vpRGBa> * > & mapOfColorImages)5139 void vpMbGenericTracker::track(std::map<std::string, const vpImage<vpRGBa> *> &mapOfColorImages)
5140 {
5141   std::map<std::string, const std::vector<vpColVector> *> mapOfPointClouds;
5142   std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
5143 
5144   track(mapOfColorImages, mapOfPointClouds, mapOfWidths, mapOfHeights);
5145 }
5146 
5147 #ifdef VISP_HAVE_PCL
5148 /*!
5149   Realize the tracking of the object in the image.
5150 
5151   \throw vpException : if the tracking is supposed to have failed
5152 
5153   \param mapOfImages : Map of images.
5154   \param mapOfPointClouds : Map of PCL pointclouds.
5155 */
track(std::map<std::string,const vpImage<unsigned char> * > & mapOfImages,std::map<std::string,pcl::PointCloud<pcl::PointXYZ>::ConstPtr> & mapOfPointClouds)5156 void vpMbGenericTracker::track(std::map<std::string, const vpImage<unsigned char> *> &mapOfImages,
5157                                std::map<std::string, pcl::PointCloud<pcl::PointXYZ>::ConstPtr> &mapOfPointClouds)
5158 {
5159   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
5160        it != m_mapOfTrackers.end(); ++it) {
5161     TrackerWrapper *tracker = it->second;
5162 
5163     if ((tracker->m_trackerType & (EDGE_TRACKER |
5164 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5165                                    KLT_TRACKER |
5166 #endif
5167                                    DEPTH_NORMAL_TRACKER | DEPTH_DENSE_TRACKER)) == 0) {
5168       throw vpException(vpException::fatalError, "Bad tracker type: %d", tracker->m_trackerType);
5169     }
5170 
5171     if (tracker->m_trackerType & (EDGE_TRACKER
5172 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5173                                   | KLT_TRACKER
5174 #endif
5175                                   ) &&
5176         mapOfImages[it->first] == NULL) {
5177       throw vpException(vpException::fatalError, "Image pointer is NULL!");
5178     }
5179 
5180     if (tracker->m_trackerType & (DEPTH_NORMAL_TRACKER | DEPTH_DENSE_TRACKER) &&
5181         ! mapOfPointClouds[it->first]) { // mapOfPointClouds[it->first] == nullptr
5182       throw vpException(vpException::fatalError, "Pointcloud smart pointer is NULL!");
5183     }
5184   }
5185 
5186   preTracking(mapOfImages, mapOfPointClouds);
5187 
5188   try {
5189     computeVVS(mapOfImages);
5190   } catch (...) {
5191     covarianceMatrix = -1;
5192     throw; // throw the original exception
5193   }
5194 
5195   testTracking();
5196 
5197   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
5198        it != m_mapOfTrackers.end(); ++it) {
5199     TrackerWrapper *tracker = it->second;
5200 
5201     if (tracker->m_trackerType & EDGE_TRACKER && displayFeatures) {
5202       tracker->m_featuresToBeDisplayedEdge = tracker->getFeaturesForDisplayEdge();
5203     }
5204 
5205     tracker->postTracking(mapOfImages[it->first], mapOfPointClouds[it->first]);
5206 
5207     if (displayFeatures) {
5208 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5209       if (tracker->m_trackerType & KLT_TRACKER) {
5210         tracker->m_featuresToBeDisplayedKlt = tracker->getFeaturesForDisplayKlt();
5211       }
5212 #endif
5213 
5214       if (tracker->m_trackerType & DEPTH_NORMAL_TRACKER) {
5215         tracker->m_featuresToBeDisplayedDepthNormal = tracker->getFeaturesForDisplayDepthNormal();
5216       }
5217     }
5218   }
5219 
5220   computeProjectionError();
5221 }
5222 
5223 /*!
5224   Realize the tracking of the object in the image.
5225 
5226   \throw vpException : if the tracking is supposed to have failed
5227 
5228   \param mapOfColorImages : Map of color images.
5229   \param mapOfPointClouds : Map of PCL pointclouds.
5230 */
track(std::map<std::string,const vpImage<vpRGBa> * > & mapOfColorImages,std::map<std::string,pcl::PointCloud<pcl::PointXYZ>::ConstPtr> & mapOfPointClouds)5231 void vpMbGenericTracker::track(std::map<std::string, const vpImage<vpRGBa> *> &mapOfColorImages,
5232                                std::map<std::string, pcl::PointCloud<pcl::PointXYZ>::ConstPtr> &mapOfPointClouds)
5233 {
5234   std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
5235   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
5236        it != m_mapOfTrackers.end(); ++it) {
5237     TrackerWrapper *tracker = it->second;
5238 
5239     if ((tracker->m_trackerType & (EDGE_TRACKER |
5240 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5241                                    KLT_TRACKER |
5242 #endif
5243                                    DEPTH_NORMAL_TRACKER | DEPTH_DENSE_TRACKER)) == 0) {
5244       throw vpException(vpException::fatalError, "Bad tracker type: %d", tracker->m_trackerType);
5245     }
5246 
5247     if (tracker->m_trackerType & (EDGE_TRACKER
5248 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5249                                   | KLT_TRACKER
5250 #endif
5251                                   ) && mapOfImages[it->first] == NULL) {
5252       throw vpException(vpException::fatalError, "Image pointer is NULL!");
5253     } else if (tracker->m_trackerType & (EDGE_TRACKER
5254 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5255                                   | KLT_TRACKER
5256 #endif
5257                                   ) && mapOfImages[it->first] != NULL) {
5258       vpImageConvert::convert(*mapOfColorImages[it->first], tracker->m_I);
5259       mapOfImages[it->first] = &tracker->m_I; //update grayscale image buffer
5260     }
5261 
5262     if (tracker->m_trackerType & (DEPTH_NORMAL_TRACKER | DEPTH_DENSE_TRACKER) &&
5263         ! mapOfPointClouds[it->first]) { // mapOfPointClouds[it->first] == nullptr
5264       throw vpException(vpException::fatalError, "Pointcloud smart pointer is NULL!");
5265     }
5266   }
5267 
5268   preTracking(mapOfImages, mapOfPointClouds);
5269 
5270   try {
5271     computeVVS(mapOfImages);
5272   } catch (...) {
5273     covarianceMatrix = -1;
5274     throw; // throw the original exception
5275   }
5276 
5277   testTracking();
5278 
5279   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
5280        it != m_mapOfTrackers.end(); ++it) {
5281     TrackerWrapper *tracker = it->second;
5282 
5283     if (tracker->m_trackerType & EDGE_TRACKER && displayFeatures) {
5284       tracker->m_featuresToBeDisplayedEdge = tracker->getFeaturesForDisplayEdge();
5285     }
5286 
5287     tracker->postTracking(mapOfImages[it->first], mapOfPointClouds[it->first]);
5288 
5289     if (displayFeatures) {
5290 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5291       if (tracker->m_trackerType & KLT_TRACKER) {
5292         tracker->m_featuresToBeDisplayedKlt = tracker->getFeaturesForDisplayKlt();
5293       }
5294 #endif
5295 
5296       if (tracker->m_trackerType & DEPTH_NORMAL_TRACKER) {
5297         tracker->m_featuresToBeDisplayedDepthNormal = tracker->getFeaturesForDisplayDepthNormal();
5298       }
5299     }
5300   }
5301 
5302   computeProjectionError();
5303 }
5304 #endif
5305 
5306 /*!
5307   Realize the tracking of the object in the image.
5308 
5309   \throw vpException : if the tracking is supposed to have failed
5310 
5311   \param mapOfImages : Map of images.
5312   \param mapOfPointClouds : Map of pointclouds.
5313   \param mapOfPointCloudWidths : Map of pointcloud widths.
5314   \param mapOfPointCloudHeights : Map of pointcloud heights.
5315 */
track(std::map<std::string,const vpImage<unsigned char> * > & mapOfImages,std::map<std::string,const std::vector<vpColVector> * > & mapOfPointClouds,std::map<std::string,unsigned int> & mapOfPointCloudWidths,std::map<std::string,unsigned int> & mapOfPointCloudHeights)5316 void vpMbGenericTracker::track(std::map<std::string, const vpImage<unsigned char> *> &mapOfImages,
5317                                std::map<std::string, const std::vector<vpColVector> *> &mapOfPointClouds,
5318                                std::map<std::string, unsigned int> &mapOfPointCloudWidths,
5319                                std::map<std::string, unsigned int> &mapOfPointCloudHeights)
5320 {
5321   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
5322        it != m_mapOfTrackers.end(); ++it) {
5323     TrackerWrapper *tracker = it->second;
5324 
5325     if ((tracker->m_trackerType & (EDGE_TRACKER |
5326 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5327                                    KLT_TRACKER |
5328 #endif
5329                                    DEPTH_NORMAL_TRACKER | DEPTH_DENSE_TRACKER)) == 0) {
5330       throw vpException(vpException::fatalError, "Bad tracker type: %d", tracker->m_trackerType);
5331     }
5332 
5333     if (tracker->m_trackerType & (EDGE_TRACKER
5334 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5335                                   | KLT_TRACKER
5336 #endif
5337                                   ) &&
5338         mapOfImages[it->first] == NULL) {
5339       throw vpException(vpException::fatalError, "Image pointer is NULL!");
5340     }
5341 
5342     if (tracker->m_trackerType & (DEPTH_NORMAL_TRACKER | DEPTH_DENSE_TRACKER) &&
5343         (mapOfPointClouds[it->first] == NULL)) {
5344       throw vpException(vpException::fatalError, "Pointcloud is NULL!");
5345     }
5346   }
5347 
5348   preTracking(mapOfImages, mapOfPointClouds, mapOfPointCloudWidths, mapOfPointCloudHeights);
5349 
5350   try {
5351     computeVVS(mapOfImages);
5352   } catch (...) {
5353     covarianceMatrix = -1;
5354     throw; // throw the original exception
5355   }
5356 
5357   testTracking();
5358 
5359   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
5360        it != m_mapOfTrackers.end(); ++it) {
5361     TrackerWrapper *tracker = it->second;
5362 
5363     if (tracker->m_trackerType & EDGE_TRACKER && displayFeatures) {
5364       tracker->m_featuresToBeDisplayedEdge = tracker->getFeaturesForDisplayEdge();
5365     }
5366 
5367     tracker->postTracking(mapOfImages[it->first], mapOfPointCloudWidths[it->first], mapOfPointCloudHeights[it->first]);
5368 
5369     if (displayFeatures) {
5370 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5371       if (tracker->m_trackerType & KLT_TRACKER) {
5372         tracker->m_featuresToBeDisplayedKlt = tracker->getFeaturesForDisplayKlt();
5373       }
5374 #endif
5375 
5376       if (tracker->m_trackerType & DEPTH_NORMAL_TRACKER) {
5377         tracker->m_featuresToBeDisplayedDepthNormal = tracker->getFeaturesForDisplayDepthNormal();
5378       }
5379     }
5380   }
5381 
5382   computeProjectionError();
5383 }
5384 
5385 /*!
5386   Realize the tracking of the object in the image.
5387 
5388   \throw vpException : if the tracking is supposed to have failed
5389 
5390   \param mapOfColorImages : Map of images.
5391   \param mapOfPointClouds : Map of pointclouds.
5392   \param mapOfPointCloudWidths : Map of pointcloud widths.
5393   \param mapOfPointCloudHeights : Map of pointcloud heights.
5394 */
track(std::map<std::string,const vpImage<vpRGBa> * > & mapOfColorImages,std::map<std::string,const std::vector<vpColVector> * > & mapOfPointClouds,std::map<std::string,unsigned int> & mapOfPointCloudWidths,std::map<std::string,unsigned int> & mapOfPointCloudHeights)5395 void vpMbGenericTracker::track(std::map<std::string, const vpImage<vpRGBa> *> &mapOfColorImages,
5396                                std::map<std::string, const std::vector<vpColVector> *> &mapOfPointClouds,
5397                                std::map<std::string, unsigned int> &mapOfPointCloudWidths,
5398                                std::map<std::string, unsigned int> &mapOfPointCloudHeights)
5399 {
5400   std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
5401   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
5402        it != m_mapOfTrackers.end(); ++it) {
5403     TrackerWrapper *tracker = it->second;
5404 
5405     if ((tracker->m_trackerType & (EDGE_TRACKER |
5406 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5407                                    KLT_TRACKER |
5408 #endif
5409                                    DEPTH_NORMAL_TRACKER | DEPTH_DENSE_TRACKER)) == 0) {
5410       throw vpException(vpException::fatalError, "Bad tracker type: %d", tracker->m_trackerType);
5411     }
5412 
5413     if (tracker->m_trackerType & (EDGE_TRACKER
5414 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5415                                   | KLT_TRACKER
5416 #endif
5417                                   ) && mapOfColorImages[it->first] == NULL) {
5418       throw vpException(vpException::fatalError, "Image pointer is NULL!");
5419     } else if (tracker->m_trackerType & (EDGE_TRACKER
5420 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5421                                   | KLT_TRACKER
5422 #endif
5423                                   ) && mapOfColorImages[it->first] != NULL) {
5424       vpImageConvert::convert(*mapOfColorImages[it->first], tracker->m_I);
5425       mapOfImages[it->first] = &tracker->m_I; //update grayscale image buffer
5426     }
5427 
5428     if (tracker->m_trackerType & (DEPTH_NORMAL_TRACKER | DEPTH_DENSE_TRACKER) &&
5429         (mapOfPointClouds[it->first] == NULL)) {
5430       throw vpException(vpException::fatalError, "Pointcloud is NULL!");
5431     }
5432   }
5433 
5434   preTracking(mapOfImages, mapOfPointClouds, mapOfPointCloudWidths, mapOfPointCloudHeights);
5435 
5436   try {
5437     computeVVS(mapOfImages);
5438   } catch (...) {
5439     covarianceMatrix = -1;
5440     throw; // throw the original exception
5441   }
5442 
5443   testTracking();
5444 
5445   for (std::map<std::string, TrackerWrapper *>::const_iterator it = m_mapOfTrackers.begin();
5446        it != m_mapOfTrackers.end(); ++it) {
5447     TrackerWrapper *tracker = it->second;
5448 
5449     if (tracker->m_trackerType & EDGE_TRACKER && displayFeatures) {
5450       tracker->m_featuresToBeDisplayedEdge = tracker->getFeaturesForDisplayEdge();
5451     }
5452 
5453     tracker->postTracking(mapOfImages[it->first], mapOfPointCloudWidths[it->first], mapOfPointCloudHeights[it->first]);
5454 
5455     if (displayFeatures) {
5456 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5457       if (tracker->m_trackerType & KLT_TRACKER) {
5458         tracker->m_featuresToBeDisplayedKlt = tracker->getFeaturesForDisplayKlt();
5459       }
5460 #endif
5461 
5462       if (tracker->m_trackerType & DEPTH_NORMAL_TRACKER) {
5463         tracker->m_featuresToBeDisplayedDepthNormal = tracker->getFeaturesForDisplayDepthNormal();
5464       }
5465     }
5466   }
5467 
5468   computeProjectionError();
5469 }
5470 
5471 /** TrackerWrapper **/
TrackerWrapper()5472 vpMbGenericTracker::TrackerWrapper::TrackerWrapper()
5473   : m_error(), m_L(), m_trackerType(EDGE_TRACKER), m_w(), m_weightedError()
5474 {
5475   m_lambda = 1.0;
5476   m_maxIter = 30;
5477 
5478 #ifdef VISP_HAVE_OGRE
5479   faces.getOgreContext()->setWindowName("MBT TrackerWrapper");
5480 
5481   m_projectionErrorFaces.getOgreContext()->setWindowName("MBT TrackerWrapper (projection error)");
5482 #endif
5483 }
5484 
TrackerWrapper(int trackerType)5485 vpMbGenericTracker::TrackerWrapper::TrackerWrapper(int trackerType)
5486   : m_error(), m_L(), m_trackerType(trackerType), m_w(), m_weightedError()
5487 {
5488   if ((m_trackerType & (EDGE_TRACKER |
5489 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5490                         KLT_TRACKER |
5491 #endif
5492                         DEPTH_NORMAL_TRACKER | DEPTH_DENSE_TRACKER)) == 0) {
5493     throw vpException(vpTrackingException::badValue, "Bad value for tracker type: %d!", m_trackerType);
5494   }
5495 
5496   m_lambda = 1.0;
5497   m_maxIter = 30;
5498 
5499 #ifdef VISP_HAVE_OGRE
5500   faces.getOgreContext()->setWindowName("MBT TrackerWrapper");
5501 
5502   m_projectionErrorFaces.getOgreContext()->setWindowName("MBT TrackerWrapper (projection error)");
5503 #endif
5504 }
5505 
~TrackerWrapper()5506 vpMbGenericTracker::TrackerWrapper::~TrackerWrapper() { }
5507 
5508 // Implemented only for debugging purposes: use TrackerWrapper as a standalone tracker
computeVVS(const vpImage<unsigned char> * const ptr_I)5509 void vpMbGenericTracker::TrackerWrapper::computeVVS(const vpImage<unsigned char> *const ptr_I)
5510 {
5511   computeVVSInit(ptr_I);
5512 
5513   if (m_error.getRows() < 4) {
5514     throw vpTrackingException(vpTrackingException::notEnoughPointError, "Error: not enough features");
5515   }
5516 
5517   double normRes = 0;
5518   double normRes_1 = -1;
5519   unsigned int iter = 0;
5520 
5521   double factorEdge = 1.0;
5522 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5523   double factorKlt = 1.0;
5524 #endif
5525   double factorDepth = 1.0;
5526   double factorDepthDense = 1.0;
5527 
5528   vpMatrix LTL;
5529   vpColVector LTR, v;
5530   vpColVector error_prev;
5531 
5532   double mu = m_initialMu;
5533   vpHomogeneousMatrix cMo_prev;
5534 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5535   vpHomogeneousMatrix ctTc0_Prev; // Only for KLT
5536 #endif
5537   bool isoJoIdentity_ = true;
5538 
5539   // Covariance
5540   vpColVector W_true(m_error.getRows());
5541   vpMatrix L_true, LVJ_true;
5542 
5543   unsigned int nb_edge_features = m_error_edge.getRows();
5544 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5545   unsigned int nb_klt_features = m_error_klt.getRows();
5546 #endif
5547   unsigned int nb_depth_features = m_error_depthNormal.getRows();
5548   unsigned int nb_depth_dense_features = m_error_depthDense.getRows();
5549 
5550   while (std::fabs(normRes_1 - normRes) > m_stopCriteriaEpsilon && (iter < m_maxIter)) {
5551     computeVVSInteractionMatrixAndResidu(ptr_I);
5552 
5553     bool reStartFromLastIncrement = false;
5554     computeVVSCheckLevenbergMarquardt(iter, m_error, error_prev, cMo_prev, mu, reStartFromLastIncrement);
5555 
5556 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5557     if (reStartFromLastIncrement) {
5558       if (m_trackerType & KLT_TRACKER) {
5559         ctTc0 = ctTc0_Prev;
5560       }
5561     }
5562 #endif
5563 
5564     if (!reStartFromLastIncrement) {
5565       computeVVSWeights();
5566 
5567       if (computeCovariance) {
5568         L_true = m_L;
5569         if (!isoJoIdentity_) {
5570           vpVelocityTwistMatrix cVo;
5571           cVo.buildFrom(m_cMo);
5572           LVJ_true = (m_L * cVo * oJo);
5573         }
5574       }
5575 
5576       vpVelocityTwistMatrix cVo;
5577       if (iter == 0) {
5578         isoJoIdentity_ = true;
5579         oJo.eye();
5580 
5581         // If all the 6 dof should be estimated, we check if the interaction
5582         // matrix is full rank. If not we remove automatically the dof that
5583         // cannot be estimated This is particularly useful when consering
5584         // circles (rank 5) and cylinders (rank 4)
5585         if (isoJoIdentity_) {
5586           cVo.buildFrom(m_cMo);
5587 
5588           vpMatrix K; // kernel
5589           unsigned int rank = (m_L * cVo).kernel(K);
5590           if (rank == 0) {
5591             throw vpException(vpException::fatalError, "Rank=0, cannot estimate the pose !");
5592           }
5593 
5594           if (rank != 6) {
5595             vpMatrix I; // Identity
5596             I.eye(6);
5597             oJo = I - K.AtA();
5598 
5599             isoJoIdentity_ = false;
5600           }
5601         }
5602       }
5603 
5604       // Weighting
5605       double num = 0;
5606       double den = 0;
5607 
5608       unsigned int start_index = 0;
5609       if (m_trackerType & EDGE_TRACKER) {
5610         for (unsigned int i = 0; i < nb_edge_features; i++) {
5611           double wi = m_w_edge[i] * m_factor[i] * factorEdge;
5612           W_true[i] = wi;
5613           m_weightedError[i] = wi * m_error[i];
5614 
5615           num += wi * vpMath::sqr(m_error[i]);
5616           den += wi;
5617 
5618           for (unsigned int j = 0; j < m_L.getCols(); j++) {
5619             m_L[i][j] *= wi;
5620           }
5621         }
5622 
5623         start_index += nb_edge_features;
5624       }
5625 
5626 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5627       if (m_trackerType & KLT_TRACKER) {
5628         for (unsigned int i = 0; i < nb_klt_features; i++) {
5629           double wi = m_w_klt[i] * factorKlt;
5630           W_true[start_index + i] = wi;
5631           m_weightedError[start_index + i] = wi * m_error_klt[i];
5632 
5633           num += wi * vpMath::sqr(m_error[start_index + i]);
5634           den += wi;
5635 
5636           for (unsigned int j = 0; j < m_L.getCols(); j++) {
5637             m_L[start_index + i][j] *= wi;
5638           }
5639         }
5640 
5641         start_index += nb_klt_features;
5642       }
5643 #endif
5644 
5645       if (m_trackerType & DEPTH_NORMAL_TRACKER) {
5646         for (unsigned int i = 0; i < nb_depth_features; i++) {
5647           double wi = m_w_depthNormal[i] * factorDepth;
5648           m_w[start_index + i] = m_w_depthNormal[i];
5649           m_weightedError[start_index + i] = wi * m_error[start_index + i];
5650 
5651           num += wi * vpMath::sqr(m_error[start_index + i]);
5652           den += wi;
5653 
5654           for (unsigned int j = 0; j < m_L.getCols(); j++) {
5655             m_L[start_index + i][j] *= wi;
5656           }
5657         }
5658 
5659         start_index += nb_depth_features;
5660       }
5661 
5662       if (m_trackerType & DEPTH_DENSE_TRACKER) {
5663         for (unsigned int i = 0; i < nb_depth_dense_features; i++) {
5664           double wi = m_w_depthDense[i] * factorDepthDense;
5665           m_w[start_index + i] = m_w_depthDense[i];
5666           m_weightedError[start_index + i] = wi * m_error[start_index + i];
5667 
5668           num += wi * vpMath::sqr(m_error[start_index + i]);
5669           den += wi;
5670 
5671           for (unsigned int j = 0; j < m_L.getCols(); j++) {
5672             m_L[start_index + i][j] *= wi;
5673           }
5674         }
5675 
5676         //        start_index += nb_depth_dense_features;
5677       }
5678 
5679       computeVVSPoseEstimation(isoJoIdentity_, iter, m_L, LTL, m_weightedError, m_error, error_prev, LTR, mu, v);
5680 
5681       cMo_prev = m_cMo;
5682 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5683       if (m_trackerType & KLT_TRACKER) {
5684         ctTc0_Prev = ctTc0;
5685       }
5686 #endif
5687 
5688       m_cMo = vpExponentialMap::direct(v).inverse() * m_cMo;
5689 
5690 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5691       if (m_trackerType & KLT_TRACKER) {
5692         ctTc0 = vpExponentialMap::direct(v).inverse() * ctTc0;
5693       }
5694 #endif
5695       normRes_1 = normRes;
5696 
5697       normRes = sqrt(num / den);
5698     }
5699 
5700     iter++;
5701   }
5702 
5703   computeCovarianceMatrixVVS(isoJoIdentity_, W_true, cMo_prev, L_true, LVJ_true, m_error);
5704 
5705   if (m_trackerType & EDGE_TRACKER) {
5706     vpMbEdgeTracker::updateMovingEdgeWeights();
5707   }
5708 }
5709 
computeVVSInit()5710 void vpMbGenericTracker::TrackerWrapper::computeVVSInit()
5711 {
5712   throw vpException(vpException::fatalError, "vpMbGenericTracker::"
5713                                              "TrackerWrapper::computeVVSInit("
5714                                              ") should not be called!");
5715 }
5716 
computeVVSInit(const vpImage<unsigned char> * const ptr_I)5717 void vpMbGenericTracker::TrackerWrapper::computeVVSInit(const vpImage<unsigned char> *const ptr_I)
5718 {
5719   initMbtTracking(ptr_I);
5720 
5721   unsigned int nbFeatures = 0;
5722 
5723   if (m_trackerType & EDGE_TRACKER) {
5724     nbFeatures += m_error_edge.getRows();
5725   } else {
5726     m_error_edge.clear();
5727     m_weightedError_edge.clear();
5728     m_L_edge.clear();
5729     m_w_edge.clear();
5730   }
5731 
5732 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5733   if (m_trackerType & KLT_TRACKER) {
5734     vpMbKltTracker::computeVVSInit();
5735     nbFeatures += m_error_klt.getRows();
5736   } else {
5737     m_error_klt.clear();
5738     m_weightedError_klt.clear();
5739     m_L_klt.clear();
5740     m_w_klt.clear();
5741   }
5742 #endif
5743 
5744   if (m_trackerType & DEPTH_NORMAL_TRACKER) {
5745     vpMbDepthNormalTracker::computeVVSInit();
5746     nbFeatures += m_error_depthNormal.getRows();
5747   } else {
5748     m_error_depthNormal.clear();
5749     m_weightedError_depthNormal.clear();
5750     m_L_depthNormal.clear();
5751     m_w_depthNormal.clear();
5752   }
5753 
5754   if (m_trackerType & DEPTH_DENSE_TRACKER) {
5755     vpMbDepthDenseTracker::computeVVSInit();
5756     nbFeatures += m_error_depthDense.getRows();
5757   } else {
5758     m_error_depthDense.clear();
5759     m_weightedError_depthDense.clear();
5760     m_L_depthDense.clear();
5761     m_w_depthDense.clear();
5762   }
5763 
5764   m_L.resize(nbFeatures, 6, false, false);
5765   m_error.resize(nbFeatures, false);
5766 
5767   m_weightedError.resize(nbFeatures, false);
5768   m_w.resize(nbFeatures, false);
5769   m_w = 1;
5770 }
5771 
computeVVSInteractionMatrixAndResidu()5772 void vpMbGenericTracker::TrackerWrapper::computeVVSInteractionMatrixAndResidu()
5773 {
5774   throw vpException(vpException::fatalError, "vpMbGenericTracker::"
5775                                              "TrackerWrapper::"
5776                                              "computeVVSInteractionMatrixAndR"
5777                                              "esidu() should not be called!");
5778 }
5779 
computeVVSInteractionMatrixAndResidu(const vpImage<unsigned char> * const ptr_I)5780 void vpMbGenericTracker::TrackerWrapper::computeVVSInteractionMatrixAndResidu(const vpImage<unsigned char> *const ptr_I)
5781 {
5782   if (m_trackerType & EDGE_TRACKER) {
5783     vpMbEdgeTracker::computeVVSInteractionMatrixAndResidu(*ptr_I);
5784   }
5785 
5786 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5787   if (m_trackerType & KLT_TRACKER) {
5788     vpMbKltTracker::computeVVSInteractionMatrixAndResidu();
5789   }
5790 #endif
5791 
5792   if (m_trackerType & DEPTH_NORMAL_TRACKER) {
5793     vpMbDepthNormalTracker::computeVVSInteractionMatrixAndResidu();
5794   }
5795 
5796   if (m_trackerType & DEPTH_DENSE_TRACKER) {
5797     vpMbDepthDenseTracker::computeVVSInteractionMatrixAndResidu();
5798   }
5799 
5800   unsigned int start_index = 0;
5801   if (m_trackerType & EDGE_TRACKER) {
5802     m_L.insert(m_L_edge, start_index, 0);
5803     m_error.insert(start_index, m_error_edge);
5804 
5805     start_index += m_error_edge.getRows();
5806   }
5807 
5808 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5809   if (m_trackerType & KLT_TRACKER) {
5810     m_L.insert(m_L_klt, start_index, 0);
5811     m_error.insert(start_index, m_error_klt);
5812 
5813     start_index += m_error_klt.getRows();
5814   }
5815 #endif
5816 
5817   if (m_trackerType & DEPTH_NORMAL_TRACKER) {
5818     m_L.insert(m_L_depthNormal, start_index, 0);
5819     m_error.insert(start_index, m_error_depthNormal);
5820 
5821     start_index += m_error_depthNormal.getRows();
5822   }
5823 
5824   if (m_trackerType & DEPTH_DENSE_TRACKER) {
5825     m_L.insert(m_L_depthDense, start_index, 0);
5826     m_error.insert(start_index, m_error_depthDense);
5827 
5828     //    start_index += m_error_depthDense.getRows();
5829   }
5830 }
5831 
computeVVSWeights()5832 void vpMbGenericTracker::TrackerWrapper::computeVVSWeights()
5833 {
5834   unsigned int start_index = 0;
5835 
5836   if (m_trackerType & EDGE_TRACKER) {
5837     vpMbEdgeTracker::computeVVSWeights();
5838     m_w.insert(start_index, m_w_edge);
5839 
5840     start_index += m_w_edge.getRows();
5841   }
5842 
5843 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5844   if (m_trackerType & KLT_TRACKER) {
5845     vpMbTracker::computeVVSWeights(m_robust_klt, m_error_klt, m_w_klt);
5846     m_w.insert(start_index, m_w_klt);
5847 
5848     start_index += m_w_klt.getRows();
5849   }
5850 #endif
5851 
5852   if (m_trackerType & DEPTH_NORMAL_TRACKER) {
5853     if (m_depthNormalUseRobust) {
5854       vpMbTracker::computeVVSWeights(m_robust_depthNormal, m_error_depthNormal, m_w_depthNormal);
5855       m_w.insert(start_index, m_w_depthNormal);
5856     }
5857 
5858     start_index += m_w_depthNormal.getRows();
5859   }
5860 
5861   if (m_trackerType & DEPTH_DENSE_TRACKER) {
5862     vpMbDepthDenseTracker::computeVVSWeights();
5863     m_w.insert(start_index, m_w_depthDense);
5864 
5865     //    start_index += m_w_depthDense.getRows();
5866   }
5867 }
5868 
display(const vpImage<unsigned char> & I,const vpHomogeneousMatrix & cMo,const vpCameraParameters & cam,const vpColor & col,unsigned int thickness,bool displayFullModel)5869 void vpMbGenericTracker::TrackerWrapper::display(const vpImage<unsigned char> &I, const vpHomogeneousMatrix &cMo,
5870                                                  const vpCameraParameters &cam, const vpColor &col,
5871                                                  unsigned int thickness, bool displayFullModel)
5872 {
5873   if (displayFeatures) {
5874     std::vector<std::vector<double> > features = getFeaturesForDisplay();
5875     for (size_t i = 0; i < features.size(); i++) {
5876       if (vpMath::equal(features[i][0], 0)) {
5877         vpImagePoint ip(features[i][1], features[i][2]);
5878         int state = static_cast<int>(features[i][3]);
5879 
5880         switch (state) {
5881         case vpMeSite::NO_SUPPRESSION:
5882           vpDisplay::displayCross(I, ip, 3, vpColor::green, 1);
5883           break;
5884 
5885         case vpMeSite::CONSTRAST:
5886           vpDisplay::displayCross(I, ip, 3, vpColor::blue, 1);
5887           break;
5888 
5889         case vpMeSite::THRESHOLD:
5890           vpDisplay::displayCross(I, ip, 3, vpColor::purple, 1);
5891           break;
5892 
5893         case vpMeSite::M_ESTIMATOR:
5894           vpDisplay::displayCross(I, ip, 3, vpColor::red, 1);
5895           break;
5896 
5897         case vpMeSite::TOO_NEAR:
5898           vpDisplay::displayCross(I, ip, 3, vpColor::cyan, 1);
5899           break;
5900 
5901         default:
5902           vpDisplay::displayCross(I, ip, 3, vpColor::yellow, 1);
5903         }
5904       } else if (vpMath::equal(features[i][0], 1)) {
5905         vpImagePoint ip1(features[i][1], features[i][2]);
5906         vpDisplay::displayCross(I, ip1, 10, vpColor::red);
5907 
5908         vpImagePoint ip2(features[i][3], features[i][4]);
5909         double id = features[i][5];
5910         std::stringstream ss;
5911         ss << id;
5912         vpDisplay::displayText(I, ip2, ss.str(), vpColor::red);
5913       } else if (vpMath::equal(features[i][0], 2)) {
5914         vpImagePoint im_centroid(features[i][1], features[i][2]);
5915         vpImagePoint im_extremity(features[i][3], features[i][4]);
5916         bool desired = vpMath::equal(features[i][0], 2);
5917         vpDisplay::displayArrow(I, im_centroid, im_extremity, desired ? vpColor::blue : vpColor::red, 4, 2, thickness);
5918       }
5919     }
5920   }
5921 
5922   std::vector<std::vector<double> > models = getModelForDisplay(I.getWidth(), I.getHeight(), cMo, cam, displayFullModel);
5923   for (size_t i = 0; i < models.size(); i++) {
5924     if (vpMath::equal(models[i][0], 0)) {
5925       vpImagePoint ip1(models[i][1], models[i][2]);
5926       vpImagePoint ip2(models[i][3], models[i][4]);
5927       vpDisplay::displayLine(I, ip1, ip2, col, thickness);
5928     } else if (vpMath::equal(models[i][0], 1)) {
5929       vpImagePoint center(models[i][1], models[i][2]);
5930       double n20 = models[i][3];
5931       double n11 = models[i][4];
5932       double n02 = models[i][5];
5933       vpDisplay::displayEllipse(I, center, n20, n11, n02, true, col, thickness);
5934     }
5935   }
5936 
5937 #ifdef VISP_HAVE_OGRE
5938   if ((m_trackerType & EDGE_TRACKER)
5939     #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
5940       || (m_trackerType & KLT_TRACKER)
5941     #endif
5942       ) {
5943     if (useOgre)
5944       faces.displayOgre(cMo);
5945   }
5946 #endif
5947 }
5948 
display(const vpImage<vpRGBa> & I,const vpHomogeneousMatrix & cMo,const vpCameraParameters & cam,const vpColor & col,unsigned int thickness,bool displayFullModel)5949 void vpMbGenericTracker::TrackerWrapper::display(const vpImage<vpRGBa> &I, const vpHomogeneousMatrix &cMo,
5950                                                  const vpCameraParameters &cam, const vpColor &col,
5951                                                  unsigned int thickness, bool displayFullModel)
5952 {
5953   if (displayFeatures) {
5954     std::vector<std::vector<double> > features = getFeaturesForDisplay();
5955     for (size_t i = 0; i < features.size(); i++) {
5956       if (vpMath::equal(features[i][0], 0)) {
5957         vpImagePoint ip(features[i][1], features[i][2]);
5958         int state = static_cast<int>(features[i][3]);
5959 
5960         switch (state) {
5961         case vpMeSite::NO_SUPPRESSION:
5962           vpDisplay::displayCross(I, ip, 3, vpColor::green, 1);
5963           break;
5964 
5965         case vpMeSite::CONSTRAST:
5966           vpDisplay::displayCross(I, ip, 3, vpColor::blue, 1);
5967           break;
5968 
5969         case vpMeSite::THRESHOLD:
5970           vpDisplay::displayCross(I, ip, 3, vpColor::purple, 1);
5971           break;
5972 
5973         case vpMeSite::M_ESTIMATOR:
5974           vpDisplay::displayCross(I, ip, 3, vpColor::red, 1);
5975           break;
5976 
5977         case vpMeSite::TOO_NEAR:
5978           vpDisplay::displayCross(I, ip, 3, vpColor::cyan, 1);
5979           break;
5980 
5981         default:
5982           vpDisplay::displayCross(I, ip, 3, vpColor::yellow, 1);
5983         }
5984       } else if (vpMath::equal(features[i][0], 1)) {
5985         vpImagePoint ip1(features[i][1], features[i][2]);
5986         vpDisplay::displayCross(I, ip1, 10, vpColor::red);
5987 
5988         vpImagePoint ip2(features[i][3], features[i][4]);
5989         double id = features[i][5];
5990         std::stringstream ss;
5991         ss << id;
5992         vpDisplay::displayText(I, ip2, ss.str(), vpColor::red);
5993       } else if (vpMath::equal(features[i][0], 2)) {
5994         vpImagePoint im_centroid(features[i][1], features[i][2]);
5995         vpImagePoint im_extremity(features[i][3], features[i][4]);
5996         bool desired = vpMath::equal(features[i][0], 2);
5997         vpDisplay::displayArrow(I, im_centroid, im_extremity, desired ? vpColor::blue : vpColor::red, 4, 2, thickness);
5998       }
5999     }
6000   }
6001 
6002   std::vector<std::vector<double> > models = getModelForDisplay(I.getWidth(), I.getHeight(), cMo, cam, displayFullModel);
6003   for (size_t i = 0; i < models.size(); i++) {
6004     if (vpMath::equal(models[i][0], 0)) {
6005       vpImagePoint ip1(models[i][1], models[i][2]);
6006       vpImagePoint ip2(models[i][3], models[i][4]);
6007       vpDisplay::displayLine(I, ip1, ip2, col, thickness);
6008     } else if (vpMath::equal(models[i][0], 1)) {
6009       vpImagePoint center(models[i][1], models[i][2]);
6010       double n20 = models[i][3];
6011       double n11 = models[i][4];
6012       double n02 = models[i][5];
6013       vpDisplay::displayEllipse(I, center, n20, n11, n02, true, col, thickness);
6014     }
6015   }
6016 
6017 #ifdef VISP_HAVE_OGRE
6018   if ((m_trackerType & EDGE_TRACKER)
6019     #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6020       || (m_trackerType & KLT_TRACKER)
6021     #endif
6022       ) {
6023     if (useOgre)
6024       faces.displayOgre(cMo);
6025   }
6026 #endif
6027 }
6028 
getFeaturesForDisplay()6029 std::vector<std::vector<double> > vpMbGenericTracker::TrackerWrapper::getFeaturesForDisplay()
6030 {
6031   std::vector<std::vector<double> > features;
6032 
6033   if (m_trackerType & EDGE_TRACKER) {
6034     //m_featuresToBeDisplayedEdge updated after computeVVS()
6035     features.insert(features.end(), m_featuresToBeDisplayedEdge.begin(), m_featuresToBeDisplayedEdge.end());
6036   }
6037 
6038 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6039   if (m_trackerType & KLT_TRACKER) {
6040     //m_featuresToBeDisplayedKlt updated after postTracking()
6041     features.insert(features.end(), m_featuresToBeDisplayedKlt.begin(), m_featuresToBeDisplayedKlt.end());
6042   }
6043 #endif
6044 
6045   if (m_trackerType & DEPTH_NORMAL_TRACKER) {
6046     //m_featuresToBeDisplayedDepthNormal updated after postTracking()
6047     features.insert(features.end(), m_featuresToBeDisplayedDepthNormal.begin(), m_featuresToBeDisplayedDepthNormal.end());
6048   }
6049 
6050   return features;
6051 }
6052 
getModelForDisplay(unsigned int width,unsigned int height,const vpHomogeneousMatrix & cMo,const vpCameraParameters & cam,bool displayFullModel)6053 std::vector<std::vector<double> > vpMbGenericTracker::TrackerWrapper::getModelForDisplay(unsigned int width, unsigned int height,
6054                                                                                          const vpHomogeneousMatrix &cMo,
6055                                                                                          const vpCameraParameters &cam,
6056                                                                                          bool displayFullModel)
6057 {
6058   std::vector<std::vector<double> > models;
6059 
6060   //Do not add multiple times the same models
6061   if (m_trackerType == EDGE_TRACKER) {
6062     models = vpMbEdgeTracker::getModelForDisplay(width, height, cMo, cam, displayFullModel);
6063   }
6064 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6065   else if (m_trackerType == KLT_TRACKER) {
6066     models = vpMbKltTracker::getModelForDisplay(width, height, cMo, cam, displayFullModel);
6067   }
6068 #endif
6069   else if (m_trackerType == DEPTH_NORMAL_TRACKER) {
6070     models = vpMbDepthNormalTracker::getModelForDisplay(width, height, cMo, cam, displayFullModel);
6071   } else if (m_trackerType == DEPTH_DENSE_TRACKER) {
6072     models = vpMbDepthDenseTracker::getModelForDisplay(width, height, cMo, cam, displayFullModel);
6073   } else {
6074     //Edge and KLT trackers use the same primitives
6075     if (m_trackerType & EDGE_TRACKER) {
6076       std::vector<std::vector<double> > edgeModels = vpMbEdgeTracker::getModelForDisplay(width, height, cMo, cam, displayFullModel);
6077       models.insert(models.end(), edgeModels.begin(), edgeModels.end());
6078     }
6079 
6080     //Depth dense and depth normal trackers use the same primitives
6081     if (m_trackerType & DEPTH_DENSE_TRACKER) {
6082       std::vector<std::vector<double> > depthDenseModels = vpMbDepthDenseTracker::getModelForDisplay(width, height, cMo, cam, displayFullModel);
6083       models.insert(models.end(), depthDenseModels.begin(), depthDenseModels.end());
6084     }
6085   }
6086 
6087   return models;
6088 }
6089 
init(const vpImage<unsigned char> & I)6090 void vpMbGenericTracker::TrackerWrapper::init(const vpImage<unsigned char> &I)
6091 {
6092   if (!modelInitialised) {
6093     throw vpException(vpException::fatalError, "model not initialized");
6094   }
6095 
6096   if (useScanLine || clippingFlag > 3)
6097     m_cam.computeFov(I.getWidth(), I.getHeight());
6098 
6099   bool reInitialisation = false;
6100   if (!useOgre) {
6101     faces.setVisible(I.getWidth(), I.getHeight(), m_cam, m_cMo, angleAppears, angleDisappears, reInitialisation);
6102   } else {
6103 #ifdef VISP_HAVE_OGRE
6104     if (!faces.isOgreInitialised()) {
6105       faces.setBackgroundSizeOgre(I.getHeight(), I.getWidth());
6106 
6107       faces.setOgreShowConfigDialog(ogreShowConfigDialog);
6108       faces.initOgre(m_cam);
6109       // Turn off Ogre config dialog display for the next call to this
6110       // function since settings are saved in the ogre.cfg file and used
6111       // during the next call
6112       ogreShowConfigDialog = false;
6113     }
6114 
6115     faces.setVisibleOgre(I.getWidth(), I.getHeight(), m_cam, m_cMo, angleAppears, angleDisappears, reInitialisation);
6116 #else
6117     faces.setVisible(I.getWidth(), I.getHeight(), m_cam, m_cMo, angleAppears, angleDisappears, reInitialisation);
6118 #endif
6119   }
6120 
6121   if (useScanLine) {
6122     faces.computeClippedPolygons(m_cMo, m_cam);
6123     faces.computeScanLineRender(m_cam, I.getWidth(), I.getHeight());
6124   }
6125 
6126 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6127   if (m_trackerType & KLT_TRACKER)
6128     vpMbKltTracker::reinit(I);
6129 #endif
6130 
6131   if (m_trackerType & EDGE_TRACKER) {
6132     vpMbEdgeTracker::resetMovingEdge();
6133 
6134     bool a = false;
6135     vpMbEdgeTracker::visibleFace(I, m_cMo, a); // should be useless, but keep it for nbvisiblepolygone
6136 
6137     vpMbEdgeTracker::initMovingEdge(I, m_cMo);
6138   }
6139 
6140   if (m_trackerType & DEPTH_NORMAL_TRACKER)
6141     vpMbDepthNormalTracker::computeVisibility(I.getWidth(), I.getHeight()); // vpMbDepthNormalTracker::init(I);
6142 
6143   if (m_trackerType & DEPTH_DENSE_TRACKER)
6144     vpMbDepthDenseTracker::computeVisibility(I.getWidth(), I.getHeight()); // vpMbDepthDenseTracker::init(I);
6145 }
6146 
initCircle(const vpPoint & p1,const vpPoint & p2,const vpPoint & p3,double radius,int idFace,const std::string & name)6147 void vpMbGenericTracker::TrackerWrapper::initCircle(const vpPoint &p1, const vpPoint &p2, const vpPoint &p3,
6148                                                     double radius, int idFace, const std::string &name)
6149 {
6150   if (m_trackerType & EDGE_TRACKER)
6151     vpMbEdgeTracker::initCircle(p1, p2, p3, radius, idFace, name);
6152 
6153 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6154   if (m_trackerType & KLT_TRACKER)
6155     vpMbKltTracker::initCircle(p1, p2, p3, radius, idFace, name);
6156 #endif
6157 }
6158 
initCylinder(const vpPoint & p1,const vpPoint & p2,double radius,int idFace,const std::string & name)6159 void vpMbGenericTracker::TrackerWrapper::initCylinder(const vpPoint &p1, const vpPoint &p2, double radius,
6160                                                       int idFace, const std::string &name)
6161 {
6162   if (m_trackerType & EDGE_TRACKER)
6163     vpMbEdgeTracker::initCylinder(p1, p2, radius, idFace, name);
6164 
6165 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6166   if (m_trackerType & KLT_TRACKER)
6167     vpMbKltTracker::initCylinder(p1, p2, radius, idFace, name);
6168 #endif
6169 }
6170 
initFaceFromCorners(vpMbtPolygon & polygon)6171 void vpMbGenericTracker::TrackerWrapper::initFaceFromCorners(vpMbtPolygon &polygon)
6172 {
6173   if (m_trackerType & EDGE_TRACKER)
6174     vpMbEdgeTracker::initFaceFromCorners(polygon);
6175 
6176 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6177   if (m_trackerType & KLT_TRACKER)
6178     vpMbKltTracker::initFaceFromCorners(polygon);
6179 #endif
6180 
6181   if (m_trackerType & DEPTH_NORMAL_TRACKER)
6182     vpMbDepthNormalTracker::initFaceFromCorners(polygon);
6183 
6184   if (m_trackerType & DEPTH_DENSE_TRACKER)
6185     vpMbDepthDenseTracker::initFaceFromCorners(polygon);
6186 }
6187 
initFaceFromLines(vpMbtPolygon & polygon)6188 void vpMbGenericTracker::TrackerWrapper::initFaceFromLines(vpMbtPolygon &polygon)
6189 {
6190   if (m_trackerType & EDGE_TRACKER)
6191     vpMbEdgeTracker::initFaceFromLines(polygon);
6192 
6193 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6194   if (m_trackerType & KLT_TRACKER)
6195     vpMbKltTracker::initFaceFromLines(polygon);
6196 #endif
6197 
6198   if (m_trackerType & DEPTH_NORMAL_TRACKER)
6199     vpMbDepthNormalTracker::initFaceFromLines(polygon);
6200 
6201   if (m_trackerType & DEPTH_DENSE_TRACKER)
6202     vpMbDepthDenseTracker::initFaceFromLines(polygon);
6203 }
6204 
initMbtTracking(const vpImage<unsigned char> * const ptr_I)6205 void vpMbGenericTracker::TrackerWrapper::initMbtTracking(const vpImage<unsigned char> *const ptr_I)
6206 {
6207   if (m_trackerType & EDGE_TRACKER) {
6208     vpMbEdgeTracker::computeVVSInit();
6209     vpMbEdgeTracker::computeVVSFirstPhaseFactor(*ptr_I, 0);
6210   }
6211 }
6212 
loadConfigFile(const std::string & configFile,bool verbose)6213 void vpMbGenericTracker::TrackerWrapper::loadConfigFile(const std::string &configFile, bool verbose)
6214 {
6215   // Load projection error config
6216   vpMbTracker::loadConfigFile(configFile, verbose);
6217 
6218   vpMbtXmlGenericParser xmlp((vpMbtXmlGenericParser::vpParserType)m_trackerType);
6219   xmlp.setVerbose(verbose);
6220   xmlp.setCameraParameters(m_cam);
6221   xmlp.setAngleAppear(vpMath::deg(angleAppears));
6222   xmlp.setAngleDisappear(vpMath::deg(angleDisappears));
6223 
6224   // Edge
6225   xmlp.setEdgeMe(me);
6226 
6227   // KLT
6228   xmlp.setKltMaxFeatures(10000);
6229   xmlp.setKltWindowSize(5);
6230   xmlp.setKltQuality(0.01);
6231   xmlp.setKltMinDistance(5);
6232   xmlp.setKltHarrisParam(0.01);
6233   xmlp.setKltBlockSize(3);
6234   xmlp.setKltPyramidLevels(3);
6235 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6236   xmlp.setKltMaskBorder(maskBorder);
6237 #endif
6238 
6239   // Depth normal
6240   xmlp.setDepthNormalFeatureEstimationMethod(m_depthNormalFeatureEstimationMethod);
6241   xmlp.setDepthNormalPclPlaneEstimationMethod(m_depthNormalPclPlaneEstimationMethod);
6242   xmlp.setDepthNormalPclPlaneEstimationRansacMaxIter(m_depthNormalPclPlaneEstimationRansacMaxIter);
6243   xmlp.setDepthNormalPclPlaneEstimationRansacThreshold(m_depthNormalPclPlaneEstimationRansacThreshold);
6244   xmlp.setDepthNormalSamplingStepX(m_depthNormalSamplingStepX);
6245   xmlp.setDepthNormalSamplingStepY(m_depthNormalSamplingStepY);
6246 
6247   // Depth dense
6248   xmlp.setDepthDenseSamplingStepX(m_depthDenseSamplingStepX);
6249   xmlp.setDepthDenseSamplingStepY(m_depthDenseSamplingStepY);
6250 
6251   try {
6252     if (verbose) {
6253       std::cout << " *********** Parsing XML for";
6254     }
6255 
6256     std::vector<std::string> tracker_names;
6257     if (m_trackerType & EDGE_TRACKER)
6258       tracker_names.push_back("Edge");
6259 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6260     if (m_trackerType & KLT_TRACKER)
6261       tracker_names.push_back("Klt");
6262 #endif
6263     if (m_trackerType & DEPTH_NORMAL_TRACKER)
6264       tracker_names.push_back("Depth Normal");
6265     if (m_trackerType & DEPTH_DENSE_TRACKER)
6266       tracker_names.push_back("Depth Dense");
6267 
6268     if (verbose) {
6269       for (size_t i = 0; i < tracker_names.size(); i++) {
6270         std::cout << " " << tracker_names[i];
6271         if (i == tracker_names.size() - 1) {
6272           std::cout << " ";
6273         }
6274       }
6275 
6276       std::cout << "Model-Based Tracker ************ " << std::endl;
6277     }
6278 
6279     xmlp.parse(configFile);
6280   } catch (...) {
6281     throw vpException(vpException::ioError, "Can't open XML file \"%s\"\n ", configFile.c_str());
6282   }
6283 
6284   vpCameraParameters camera;
6285   xmlp.getCameraParameters(camera);
6286   setCameraParameters(camera);
6287 
6288   angleAppears = vpMath::rad(xmlp.getAngleAppear());
6289   angleDisappears = vpMath::rad(xmlp.getAngleDisappear());
6290 
6291   if (xmlp.hasNearClippingDistance())
6292     setNearClippingDistance(xmlp.getNearClippingDistance());
6293 
6294   if (xmlp.hasFarClippingDistance())
6295     setFarClippingDistance(xmlp.getFarClippingDistance());
6296 
6297   if (xmlp.getFovClipping()) {
6298     setClipping(vpMbEdgeTracker::clippingFlag | vpPolygon3D::FOV_CLIPPING);
6299   }
6300 
6301   useLodGeneral = xmlp.getLodState();
6302   minLineLengthThresholdGeneral = xmlp.getLodMinLineLengthThreshold();
6303   minPolygonAreaThresholdGeneral = xmlp.getLodMinPolygonAreaThreshold();
6304 
6305   applyLodSettingInConfig = false;
6306   if (this->getNbPolygon() > 0) {
6307     applyLodSettingInConfig = true;
6308     setLod(useLodGeneral);
6309     setMinLineLengthThresh(minLineLengthThresholdGeneral);
6310     setMinPolygonAreaThresh(minPolygonAreaThresholdGeneral);
6311   }
6312 
6313   // Edge
6314   vpMe meParser;
6315   xmlp.getEdgeMe(meParser);
6316   vpMbEdgeTracker::setMovingEdge(meParser);
6317 
6318 // KLT
6319 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6320   tracker.setMaxFeatures((int)xmlp.getKltMaxFeatures());
6321   tracker.setWindowSize((int)xmlp.getKltWindowSize());
6322   tracker.setQuality(xmlp.getKltQuality());
6323   tracker.setMinDistance(xmlp.getKltMinDistance());
6324   tracker.setHarrisFreeParameter(xmlp.getKltHarrisParam());
6325   tracker.setBlockSize((int)xmlp.getKltBlockSize());
6326   tracker.setPyramidLevels((int)xmlp.getKltPyramidLevels());
6327   maskBorder = xmlp.getKltMaskBorder();
6328 
6329   // if(useScanLine)
6330   faces.getMbScanLineRenderer().setMaskBorder(maskBorder);
6331 #endif
6332 
6333   // Depth normal
6334   setDepthNormalFeatureEstimationMethod(xmlp.getDepthNormalFeatureEstimationMethod());
6335   setDepthNormalPclPlaneEstimationMethod(xmlp.getDepthNormalPclPlaneEstimationMethod());
6336   setDepthNormalPclPlaneEstimationRansacMaxIter(xmlp.getDepthNormalPclPlaneEstimationRansacMaxIter());
6337   setDepthNormalPclPlaneEstimationRansacThreshold(xmlp.getDepthNormalPclPlaneEstimationRansacThreshold());
6338   setDepthNormalSamplingStep(xmlp.getDepthNormalSamplingStepX(), xmlp.getDepthNormalSamplingStepY());
6339 
6340   // Depth dense
6341   setDepthDenseSamplingStep(xmlp.getDepthDenseSamplingStepX(), xmlp.getDepthDenseSamplingStepY());
6342 }
6343 
6344 #ifdef VISP_HAVE_PCL
postTracking(const vpImage<unsigned char> * const ptr_I,const pcl::PointCloud<pcl::PointXYZ>::ConstPtr & point_cloud)6345 void vpMbGenericTracker::TrackerWrapper::postTracking(const vpImage<unsigned char> *const ptr_I,
6346                                                       const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud)
6347 {
6348 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6349   // KLT
6350   if (m_trackerType & KLT_TRACKER) {
6351     if (vpMbKltTracker::postTracking(*ptr_I, m_w_klt)) {
6352       vpMbKltTracker::reinit(*ptr_I);
6353     }
6354   }
6355 #endif
6356 
6357   // Looking for new visible face
6358   if (m_trackerType & EDGE_TRACKER) {
6359     bool newvisibleface = false;
6360     vpMbEdgeTracker::visibleFace(*ptr_I, m_cMo, newvisibleface);
6361 
6362     if (useScanLine) {
6363       faces.computeClippedPolygons(m_cMo, m_cam);
6364       faces.computeScanLineRender(m_cam, ptr_I->getWidth(), ptr_I->getHeight());
6365     }
6366   }
6367 
6368   // Depth normal
6369   if (m_trackerType & DEPTH_NORMAL_TRACKER)
6370     vpMbDepthNormalTracker::computeVisibility(point_cloud->width, point_cloud->height);
6371 
6372   // Depth dense
6373   if (m_trackerType & DEPTH_DENSE_TRACKER)
6374     vpMbDepthDenseTracker::computeVisibility(point_cloud->width, point_cloud->height);
6375 
6376   // Edge
6377   if (m_trackerType & EDGE_TRACKER) {
6378     vpMbEdgeTracker::updateMovingEdge(*ptr_I);
6379 
6380     vpMbEdgeTracker::initMovingEdge(*ptr_I, m_cMo);
6381     // Reinit the moving edge for the lines which need it.
6382     vpMbEdgeTracker::reinitMovingEdge(*ptr_I, m_cMo);
6383 
6384     if (computeProjError) {
6385       vpMbEdgeTracker::computeProjectionError(*ptr_I);
6386     }
6387   }
6388 }
6389 
preTracking(const vpImage<unsigned char> * const ptr_I,const pcl::PointCloud<pcl::PointXYZ>::ConstPtr & point_cloud)6390 void vpMbGenericTracker::TrackerWrapper::preTracking(const vpImage<unsigned char> *const ptr_I,
6391                                                      const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud)
6392 {
6393   if (m_trackerType & EDGE_TRACKER) {
6394     try {
6395       vpMbEdgeTracker::trackMovingEdge(*ptr_I);
6396     } catch (...) {
6397       std::cerr << "Error in moving edge tracking" << std::endl;
6398       throw;
6399     }
6400   }
6401 
6402 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6403   if (m_trackerType & KLT_TRACKER) {
6404     try {
6405       vpMbKltTracker::preTracking(*ptr_I);
6406     } catch (const vpException &e) {
6407       std::cerr << "Error in KLT tracking: " << e.what() << std::endl;
6408       throw;
6409     }
6410   }
6411 #endif
6412 
6413   if (m_trackerType & DEPTH_NORMAL_TRACKER) {
6414     try {
6415       vpMbDepthNormalTracker::segmentPointCloud(point_cloud);
6416     } catch (...) {
6417       std::cerr << "Error in Depth normal tracking" << std::endl;
6418       throw;
6419     }
6420   }
6421 
6422   if (m_trackerType & DEPTH_DENSE_TRACKER) {
6423     try {
6424       vpMbDepthDenseTracker::segmentPointCloud(point_cloud);
6425     } catch (...) {
6426       std::cerr << "Error in Depth dense tracking" << std::endl;
6427       throw;
6428     }
6429   }
6430 }
6431 #endif
6432 
postTracking(const vpImage<unsigned char> * const ptr_I,const unsigned int pointcloud_width,const unsigned int pointcloud_height)6433 void vpMbGenericTracker::TrackerWrapper::postTracking(const vpImage<unsigned char> *const ptr_I,
6434                                                       const unsigned int pointcloud_width,
6435                                                       const unsigned int pointcloud_height)
6436 {
6437 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6438   // KLT
6439   if (m_trackerType & KLT_TRACKER) {
6440     if (vpMbKltTracker::postTracking(*ptr_I, m_w_klt)) {
6441       vpMbKltTracker::reinit(*ptr_I);
6442     }
6443   }
6444 #endif
6445 
6446   // Looking for new visible face
6447   if (m_trackerType & EDGE_TRACKER) {
6448     bool newvisibleface = false;
6449     vpMbEdgeTracker::visibleFace(*ptr_I, m_cMo, newvisibleface);
6450 
6451     if (useScanLine) {
6452       faces.computeClippedPolygons(m_cMo, m_cam);
6453       faces.computeScanLineRender(m_cam, ptr_I->getWidth(), ptr_I->getHeight());
6454     }
6455   }
6456 
6457   // Depth normal
6458   if (m_trackerType & DEPTH_NORMAL_TRACKER)
6459     vpMbDepthNormalTracker::computeVisibility(pointcloud_width, pointcloud_height);
6460 
6461   // Depth dense
6462   if (m_trackerType & DEPTH_DENSE_TRACKER)
6463     vpMbDepthDenseTracker::computeVisibility(pointcloud_width, pointcloud_height);
6464 
6465   // Edge
6466   if (m_trackerType & EDGE_TRACKER) {
6467     vpMbEdgeTracker::updateMovingEdge(*ptr_I);
6468 
6469     vpMbEdgeTracker::initMovingEdge(*ptr_I, m_cMo);
6470     // Reinit the moving edge for the lines which need it.
6471     vpMbEdgeTracker::reinitMovingEdge(*ptr_I, m_cMo);
6472 
6473     if (computeProjError) {
6474       vpMbEdgeTracker::computeProjectionError(*ptr_I);
6475     }
6476   }
6477 }
6478 
preTracking(const vpImage<unsigned char> * const ptr_I,const std::vector<vpColVector> * const point_cloud,const unsigned int pointcloud_width,const unsigned int pointcloud_height)6479 void vpMbGenericTracker::TrackerWrapper::preTracking(const vpImage<unsigned char> * const ptr_I,
6480                                                      const std::vector<vpColVector> * const point_cloud,
6481                                                      const unsigned int pointcloud_width,
6482                                                      const unsigned int pointcloud_height)
6483 {
6484   if (m_trackerType & EDGE_TRACKER) {
6485     try {
6486       vpMbEdgeTracker::trackMovingEdge(*ptr_I);
6487     } catch (...) {
6488       std::cerr << "Error in moving edge tracking" << std::endl;
6489       throw;
6490     }
6491   }
6492 
6493 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6494   if (m_trackerType & KLT_TRACKER) {
6495     try {
6496       vpMbKltTracker::preTracking(*ptr_I);
6497     } catch (const vpException &e) {
6498       std::cerr << "Error in KLT tracking: " << e.what() << std::endl;
6499       throw;
6500     }
6501   }
6502 #endif
6503 
6504   if (m_trackerType & DEPTH_NORMAL_TRACKER) {
6505     try {
6506       vpMbDepthNormalTracker::segmentPointCloud(*point_cloud, pointcloud_width, pointcloud_height);
6507     } catch (...) {
6508       std::cerr << "Error in Depth tracking" << std::endl;
6509       throw;
6510     }
6511   }
6512 
6513   if (m_trackerType & DEPTH_DENSE_TRACKER) {
6514     try {
6515       vpMbDepthDenseTracker::segmentPointCloud(*point_cloud, pointcloud_width, pointcloud_height);
6516     } catch (...) {
6517       std::cerr << "Error in Depth dense tracking" << std::endl;
6518       throw;
6519     }
6520   }
6521 }
6522 
reInitModel(const vpImage<unsigned char> * const I,const vpImage<vpRGBa> * const I_color,const std::string & cad_name,const vpHomogeneousMatrix & cMo,bool verbose,const vpHomogeneousMatrix & T)6523 void vpMbGenericTracker::TrackerWrapper::reInitModel(const vpImage<unsigned char> * const I, const vpImage<vpRGBa> * const I_color,
6524                                                      const std::string &cad_name, const vpHomogeneousMatrix &cMo, bool verbose,
6525                                                      const vpHomogeneousMatrix &T)
6526 {
6527   m_cMo.eye();
6528 
6529   // Edge
6530   vpMbtDistanceLine *l;
6531   vpMbtDistanceCylinder *cy;
6532   vpMbtDistanceCircle *ci;
6533 
6534   for (unsigned int i = 0; i < scales.size(); i++) {
6535     if (scales[i]) {
6536       for (std::list<vpMbtDistanceLine *>::const_iterator it = lines[i].begin(); it != lines[i].end(); ++it) {
6537         l = *it;
6538         if (l != NULL)
6539           delete l;
6540         l = NULL;
6541       }
6542 
6543       for (std::list<vpMbtDistanceCylinder *>::const_iterator it = cylinders[i].begin(); it != cylinders[i].end();
6544            ++it) {
6545         cy = *it;
6546         if (cy != NULL)
6547           delete cy;
6548         cy = NULL;
6549       }
6550 
6551       for (std::list<vpMbtDistanceCircle *>::const_iterator it = circles[i].begin(); it != circles[i].end(); ++it) {
6552         ci = *it;
6553         if (ci != NULL)
6554           delete ci;
6555         ci = NULL;
6556       }
6557 
6558       lines[i].clear();
6559       cylinders[i].clear();
6560       circles[i].clear();
6561     }
6562   }
6563 
6564   nline = 0;
6565   ncylinder = 0;
6566   ncircle = 0;
6567   nbvisiblepolygone = 0;
6568 
6569 // KLT
6570 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6571 #if (VISP_HAVE_OPENCV_VERSION < 0x020408)
6572   if (cur != NULL) {
6573     cvReleaseImage(&cur);
6574     cur = NULL;
6575   }
6576 #endif
6577 
6578   // delete the Klt Polygon features
6579   for (std::list<vpMbtDistanceKltPoints *>::const_iterator it = kltPolygons.begin(); it != kltPolygons.end(); ++it) {
6580     vpMbtDistanceKltPoints *kltpoly = *it;
6581     if (kltpoly != NULL) {
6582       delete kltpoly;
6583     }
6584     kltpoly = NULL;
6585   }
6586   kltPolygons.clear();
6587 
6588   for (std::list<vpMbtDistanceKltCylinder *>::const_iterator it = kltCylinders.begin(); it != kltCylinders.end();
6589        ++it) {
6590     vpMbtDistanceKltCylinder *kltPolyCylinder = *it;
6591     if (kltPolyCylinder != NULL) {
6592       delete kltPolyCylinder;
6593     }
6594     kltPolyCylinder = NULL;
6595   }
6596   kltCylinders.clear();
6597 
6598   // delete the structures used to display circles
6599   for (std::list<vpMbtDistanceCircle *>::const_iterator it = circles_disp.begin(); it != circles_disp.end(); ++it) {
6600     ci = *it;
6601     if (ci != NULL) {
6602       delete ci;
6603     }
6604     ci = NULL;
6605   }
6606   circles_disp.clear();
6607 
6608   firstInitialisation = true;
6609 
6610 #endif
6611 
6612   // Depth normal
6613   for (size_t i = 0; i < m_depthNormalFaces.size(); i++) {
6614     delete m_depthNormalFaces[i];
6615     m_depthNormalFaces[i] = NULL;
6616   }
6617   m_depthNormalFaces.clear();
6618 
6619   // Depth dense
6620   for (size_t i = 0; i < m_depthDenseFaces.size(); i++) {
6621     delete m_depthDenseFaces[i];
6622     m_depthDenseFaces[i] = NULL;
6623   }
6624   m_depthDenseFaces.clear();
6625 
6626   faces.reset();
6627 
6628   loadModel(cad_name, verbose, T);
6629   if (I) {
6630     initFromPose(*I, cMo);
6631   } else {
6632     initFromPose(*I_color, cMo);
6633   }
6634 }
6635 
reInitModel(const vpImage<unsigned char> & I,const std::string & cad_name,const vpHomogeneousMatrix & cMo,bool verbose,const vpHomogeneousMatrix & T)6636 void vpMbGenericTracker::TrackerWrapper::reInitModel(const vpImage<unsigned char> &I, const std::string &cad_name,
6637                                                      const vpHomogeneousMatrix &cMo, bool verbose,
6638                                                      const vpHomogeneousMatrix &T)
6639 {
6640   reInitModel(&I, NULL, cad_name, cMo, verbose, T);
6641 }
6642 
reInitModel(const vpImage<vpRGBa> & I_color,const std::string & cad_name,const vpHomogeneousMatrix & cMo,bool verbose,const vpHomogeneousMatrix & T)6643 void vpMbGenericTracker::TrackerWrapper::reInitModel(const vpImage<vpRGBa> &I_color, const std::string &cad_name,
6644                                                      const vpHomogeneousMatrix &cMo, bool verbose,
6645                                                      const vpHomogeneousMatrix &T)
6646 {
6647   reInitModel(NULL, &I_color, cad_name, cMo, verbose, T);
6648 }
6649 
resetTracker()6650 void vpMbGenericTracker::TrackerWrapper::resetTracker()
6651 {
6652   vpMbEdgeTracker::resetTracker();
6653 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6654   vpMbKltTracker::resetTracker();
6655 #endif
6656   vpMbDepthNormalTracker::resetTracker();
6657   vpMbDepthDenseTracker::resetTracker();
6658 }
6659 
setCameraParameters(const vpCameraParameters & cam)6660 void vpMbGenericTracker::TrackerWrapper::setCameraParameters(const vpCameraParameters &cam)
6661 {
6662   m_cam = cam;
6663 
6664   vpMbEdgeTracker::setCameraParameters(m_cam);
6665 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6666   vpMbKltTracker::setCameraParameters(m_cam);
6667 #endif
6668   vpMbDepthNormalTracker::setCameraParameters(m_cam);
6669   vpMbDepthDenseTracker::setCameraParameters(m_cam);
6670 }
6671 
setClipping(const unsigned int & flags)6672 void vpMbGenericTracker::TrackerWrapper::setClipping(const unsigned int &flags)
6673 {
6674   vpMbEdgeTracker::setClipping(flags);
6675 }
6676 
setFarClippingDistance(const double & dist)6677 void vpMbGenericTracker::TrackerWrapper::setFarClippingDistance(const double &dist)
6678 {
6679   vpMbEdgeTracker::setFarClippingDistance(dist);
6680 }
6681 
setNearClippingDistance(const double & dist)6682 void vpMbGenericTracker::TrackerWrapper::setNearClippingDistance(const double &dist)
6683 {
6684   vpMbEdgeTracker::setNearClippingDistance(dist);
6685 }
6686 
setOgreVisibilityTest(const bool & v)6687 void vpMbGenericTracker::TrackerWrapper::setOgreVisibilityTest(const bool &v)
6688 {
6689   vpMbTracker::setOgreVisibilityTest(v);
6690 #ifdef VISP_HAVE_OGRE
6691   faces.getOgreContext()->setWindowName("TrackerWrapper");
6692 #endif
6693 }
6694 
setPose(const vpImage<unsigned char> * const I,const vpImage<vpRGBa> * const I_color,const vpHomogeneousMatrix & cdMo)6695 void vpMbGenericTracker::TrackerWrapper::setPose(const vpImage<unsigned char> * const I, const vpImage<vpRGBa> * const I_color,
6696                                                  const vpHomogeneousMatrix &cdMo)
6697 {
6698   bool performKltSetPose = false;
6699   if (I_color) {
6700     vpImageConvert::convert(*I_color, m_I);
6701   }
6702 
6703 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6704   if (m_trackerType & KLT_TRACKER) {
6705     performKltSetPose = true;
6706 
6707     if (useScanLine || clippingFlag > 3) {
6708       m_cam.computeFov(I ? I->getWidth() : m_I.getWidth(), I ? I->getHeight() : m_I.getHeight());
6709     }
6710 
6711     vpMbKltTracker::setPose(I ? *I : m_I, cdMo);
6712   }
6713 #endif
6714 
6715   if (!performKltSetPose) {
6716     m_cMo = cdMo;
6717     init(I ? *I : m_I);
6718     return;
6719   }
6720 
6721   if (m_trackerType & EDGE_TRACKER) {
6722     vpMbEdgeTracker::resetMovingEdge();
6723   }
6724 
6725   if (useScanLine) {
6726     faces.computeClippedPolygons(m_cMo, m_cam);
6727     faces.computeScanLineRender(m_cam, I ? I->getWidth() : m_I.getWidth(), I ? I->getHeight() : m_I.getHeight());
6728   }
6729 
6730 #if 0
6731   if (m_trackerType & EDGE_TRACKER) {
6732     initPyramid(I, Ipyramid);
6733 
6734     unsigned int i = (unsigned int) scales.size();
6735     do {
6736       i--;
6737       if(scales[i]){
6738         downScale(i);
6739         vpMbEdgeTracker::initMovingEdge(*Ipyramid[i], cMo);
6740         upScale(i);
6741       }
6742     } while(i != 0);
6743 
6744     cleanPyramid(Ipyramid);
6745   }
6746 #else
6747   if (m_trackerType & EDGE_TRACKER) {
6748     vpMbEdgeTracker::initMovingEdge(I ? *I : m_I, m_cMo);
6749   }
6750 #endif
6751 
6752   // Depth normal
6753   vpMbDepthNormalTracker::computeVisibility(I ? I->getWidth() : m_I.getWidth(), I ? I->getHeight() : m_I.getHeight());
6754 
6755   // Depth dense
6756   vpMbDepthDenseTracker::computeVisibility(I ? I->getWidth() : m_I.getWidth(), I ? I->getHeight() : m_I.getHeight());
6757 }
6758 
setPose(const vpImage<unsigned char> & I,const vpHomogeneousMatrix & cdMo)6759 void vpMbGenericTracker::TrackerWrapper::setPose(const vpImage<unsigned char> &I, const vpHomogeneousMatrix &cdMo)
6760 {
6761   setPose(&I, NULL, cdMo);
6762 }
6763 
setPose(const vpImage<vpRGBa> & I_color,const vpHomogeneousMatrix & cdMo)6764 void vpMbGenericTracker::TrackerWrapper::setPose(const vpImage<vpRGBa> &I_color, const vpHomogeneousMatrix &cdMo)
6765 {
6766   setPose(NULL, &I_color, cdMo);
6767 }
6768 
setProjectionErrorComputation(const bool & flag)6769 void vpMbGenericTracker::TrackerWrapper::setProjectionErrorComputation(const bool &flag)
6770 {
6771   vpMbEdgeTracker::setProjectionErrorComputation(flag);
6772 }
6773 
setScanLineVisibilityTest(const bool & v)6774 void vpMbGenericTracker::TrackerWrapper::setScanLineVisibilityTest(const bool &v)
6775 {
6776   vpMbEdgeTracker::setScanLineVisibilityTest(v);
6777 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6778   vpMbKltTracker::setScanLineVisibilityTest(v);
6779 #endif
6780   vpMbDepthNormalTracker::setScanLineVisibilityTest(v);
6781   vpMbDepthDenseTracker::setScanLineVisibilityTest(v);
6782 }
6783 
setTrackerType(int type)6784 void vpMbGenericTracker::TrackerWrapper::setTrackerType(int type)
6785 {
6786   if ((type & (EDGE_TRACKER |
6787 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6788                KLT_TRACKER |
6789 #endif
6790                DEPTH_NORMAL_TRACKER | DEPTH_DENSE_TRACKER)) == 0) {
6791     throw vpException(vpTrackingException::badValue, "bad value for tracker type: !", type);
6792   }
6793 
6794   m_trackerType = type;
6795 }
6796 
testTracking()6797 void vpMbGenericTracker::TrackerWrapper::testTracking()
6798 {
6799   if (m_trackerType & EDGE_TRACKER) {
6800     vpMbEdgeTracker::testTracking();
6801   }
6802 }
6803 
track(const vpImage<unsigned char> & I)6804 void vpMbGenericTracker::TrackerWrapper::track(const vpImage<unsigned char> &
6805 #if defined(VISP_HAVE_PCL) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
6806                                                    I
6807 #endif
6808 )
6809 {
6810   if ((m_trackerType & (EDGE_TRACKER
6811 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6812                         | KLT_TRACKER
6813 #endif
6814                         )) == 0) {
6815     std::cerr << "Bad tracker type: " << m_trackerType << std::endl;
6816     return;
6817   }
6818 
6819 #if defined(VISP_HAVE_PCL) && (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
6820   track(&I, nullptr);
6821 #endif
6822 }
6823 
track(const vpImage<vpRGBa> &)6824 void vpMbGenericTracker::TrackerWrapper::track(const vpImage<vpRGBa> &)
6825 {
6826   //not exposed to the public API, only for debug
6827 }
6828 
6829 #ifdef VISP_HAVE_PCL
track(const vpImage<unsigned char> * const ptr_I,const pcl::PointCloud<pcl::PointXYZ>::ConstPtr & point_cloud)6830 void vpMbGenericTracker::TrackerWrapper::track(const vpImage<unsigned char> *const ptr_I,
6831                                                const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud)
6832 {
6833   if ((m_trackerType & (EDGE_TRACKER |
6834 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6835                         KLT_TRACKER |
6836 #endif
6837                         DEPTH_NORMAL_TRACKER | DEPTH_DENSE_TRACKER)) == 0) {
6838     std::cerr << "Bad tracker type: " << m_trackerType << std::endl;
6839     return;
6840   }
6841 
6842   if (m_trackerType & (EDGE_TRACKER
6843 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
6844                        | KLT_TRACKER
6845 #endif
6846                        ) &&
6847       ptr_I == NULL) {
6848     throw vpException(vpException::fatalError, "Image pointer is NULL!");
6849   }
6850 
6851   if (m_trackerType & (DEPTH_NORMAL_TRACKER | DEPTH_DENSE_TRACKER) && ! point_cloud) { // point_cloud == nullptr
6852     throw vpException(vpException::fatalError, "Pointcloud smart pointer is NULL!");
6853   }
6854 
6855   // Back-up cMo in case of exception
6856   vpHomogeneousMatrix cMo_1 = m_cMo;
6857   try {
6858     preTracking(ptr_I, point_cloud);
6859 
6860     try {
6861       computeVVS(ptr_I);
6862     } catch (...) {
6863       covarianceMatrix = -1;
6864       throw; // throw the original exception
6865     }
6866 
6867     if (m_trackerType == EDGE_TRACKER)
6868       testTracking();
6869 
6870     postTracking(ptr_I, point_cloud);
6871 
6872   } catch (const vpException &e) {
6873     std::cerr << "Exception: " << e.what() << std::endl;
6874     m_cMo = cMo_1;
6875     throw; // rethrowing the original exception
6876   }
6877 }
6878 #endif
6879