1 /****************************************************************************
2  *
3  * ViSP, open source Visual Servoing Platform software.
4  * Copyright (C) 2005 - 2019 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  * See the file LICENSE.txt at the root directory of this source
11  * distribution for additional information about the GNU GPL.
12  *
13  * For using ViSP with software that can not be combined with the GNU
14  * GPL, please contact Inria about acquiring a ViSP Professional
15  * Edition License.
16  *
17  * See http://visp.inria.fr for more information.
18  *
19  * This software was developed at:
20  * Inria Rennes - Bretagne Atlantique
21  * Campus Universitaire de Beaulieu
22  * 35042 Rennes Cedex
23  * France
24  *
25  * If you have questions regarding the use of this file, please contact
26  * Inria at visp@inria.fr
27  *
28  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30  *
31  * Description:
32  * Key point functionalities.
33  *
34  * Authors:
35  * Souriya Trinh
36  *
37  *****************************************************************************/
38 #ifndef _vpKeyPoint_h_
39 #define _vpKeyPoint_h_
40 
41 #include <algorithm> // std::transform
42 #include <float.h>   // DBL_MAX
43 #include <fstream>   // std::ofstream
44 #include <limits>
45 #include <map>      // std::map
46 #include <numeric>  // std::accumulate
47 #include <stdlib.h> // srand, rand
48 #include <time.h>   // time
49 #include <vector>   // std::vector
50 
51 #include <visp3/core/vpConfig.h>
52 #include <visp3/core/vpDisplay.h>
53 #include <visp3/core/vpImageConvert.h>
54 #include <visp3/core/vpPixelMeterConversion.h>
55 #include <visp3/core/vpPlane.h>
56 #include <visp3/core/vpPoint.h>
57 #include <visp3/vision/vpBasicKeyPoint.h>
58 #include <visp3/vision/vpPose.h>
59 #ifdef VISP_HAVE_MODULE_IO
60 #  include <visp3/io/vpImageIo.h>
61 #endif
62 #include <visp3/core/vpConvert.h>
63 #include <visp3/core/vpCylinder.h>
64 #include <visp3/core/vpMeterPixelConversion.h>
65 #include <visp3/core/vpPolygon.h>
66 #include <visp3/vision/vpXmlConfigParserKeyPoint.h>
67 
68 // Require at least OpenCV >= 2.1.1
69 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101)
70 
71 #  include <opencv2/calib3d/calib3d.hpp>
72 #  include <opencv2/features2d/features2d.hpp>
73 #  include <opencv2/imgproc/imgproc.hpp>
74 
75 #  if (VISP_HAVE_OPENCV_VERSION >= 0x040000) // Require opencv >= 4.0.0
76 #    include <opencv2/imgproc/imgproc_c.h>
77 #    include <opencv2/imgproc.hpp>
78 #  endif
79 
80 #  if defined(VISP_HAVE_OPENCV_XFEATURES2D) // OpenCV >= 3.0.0
81 #    include <opencv2/xfeatures2d.hpp>
82 #  elif defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && \
83      (VISP_HAVE_OPENCV_VERSION < 0x030000)
84 #    include <opencv2/nonfree/nonfree.hpp>
85 #  endif
86 
87 /*!
88   \class vpKeyPoint
89   \ingroup group_vision_keypoints group_detection_keypoint group_detection_mbt_object
90 
91   \brief Class that allows keypoints detection (and descriptors extraction)
92   and matching thanks to OpenCV library. Thus to enable this class OpenCV should
93   be installed. Installation instructions are provided here
94   https://visp.inria.fr/3rd_opencv.
95 
96   This class permits to use different types of detectors, extractors and
97   matchers easily. So, the classical SIFT and SURF keypoints could be used, as
98   well as ORB, FAST, (etc.) keypoints, depending of the version of OpenCV you
99   use.
100 
101   \note Due to some patents, SIFT and SURF are packaged in an external module
102   called nonfree module in OpenCV version before 3.0.0 and in xfeatures2d
103   from 3.0.0. You have to check you have the corresponding module to use SIFT
104   and SURF.
105 
106   The goal of this class is to provide a tool to match reference keypoints
107   from a reference image (or train keypoints in OpenCV terminology) and detected
108   keypoints from a current image (or query keypoints in OpenCV terminology).
109 
110   If you supply the corresponding 3D coordinates corresponding to the 2D
111   coordinates of the reference keypoints, you can also estimate the pose of the
112   object by matching a set of detected keypoints in the current image with the
113   reference keypoints.
114 
115   If you use this class, the first thing you have to do is to build
116   the reference keypoints by detecting keypoints in a reference image which
117   contains the object to detect. Then you match keypoints detected in a current
118   image with those detected in a reference image by calling matchPoint()
119   methods. You can access to the lists of matched points thanks to the methods
120   getMatchedPointsInReferenceImage() and getMatchedPointsInCurrentImage(). These
121   two methods return a list of matched points. The nth element of the first list
122   is matched with the nth element of the second list. To provide easy
123   compatibility with OpenCV terminology, getTrainKeyPoints() give you access to
124   the list of keypoints detected in train images (or reference images) and
125   getQueryKeyPoints() give you access to the list of keypoints detected in a
126   query image (or current image). The method getMatches() give you access to a
127   list of cv::DMatch with the correspondence between the index of the train
128   keypoints and the index of the query keypoints.
129 
130   The following small example shows how to use the class to do the matching
131   between current and reference keypoints.
132 
133   \code
134 #include <visp3/core/vpImage.h>
135 #include <visp3/vision/vpKeyPoint.h>
136 
137 int main()
138 {
139 #if (VISP_HAVE_OPENCV_VERSION >= 0x020300)
140   vpImage<unsigned char> Irefrence;
141   vpImage<unsigned char> Icurrent;
142 
143   vpKeyPoint::vpFilterMatchingType filterType = vpKeyPoint::ratioDistanceThreshold;
144   vpKeyPoint keypoint("ORB", "ORB", "BruteForce-Hamming", filterType);
145 
146   // First grab the reference image Irefrence
147   // Add your code to load the reference image in Ireference
148 
149   // Build the reference ORB points.
150   keypoint.buildReference(Irefrence);
151 
152   // Then grab another image which represents the current image Icurrent
153 
154   // Match points between the reference points and the ORB points computed in the current image.
155   keypoint.matchPoint(Icurrent);
156 
157   // Display the matched points
158   keypoint.display(Irefrence, Icurrent);
159 #endif
160 
161   return (0);
162 }
163   \endcode
164 
165   It is also possible to build the reference keypoints in a region of interest
166   (ROI) of an image and find keypoints to match in only a part of the current
167   image. The small following example shows how to do this:
168 
169   \code
170 #include <visp3/core/vpDisplay.h>
171 #include <visp3/core/vpImage.h>
172 #include <visp3/vision/vpKeyPoint.h>
173 
174 int main()
175 {
176 #if (VISP_HAVE_OPENCV_VERSION >= 0x020300)
177   vpImage<unsigned char> Ireference;
178   vpImage<unsigned char> Icurrent;
179 
180   vpKeyPoint::vpFilterMatchingType filterType = vpKeyPoint::ratioDistanceThreshold;
181   vpKeyPoint keypoint("ORB", "ORB", "BruteForce-Hamming", filterType);
182 
183   //First grab the reference image Irefrence
184   //Add your code to load the reference image in Ireference
185 
186   //Select a part of the image by clincking on two points which define a rectangle
187   vpImagePoint corners[2];
188   for (int i=0 ; i < 2 ; i++) {
189     vpDisplay::getClick(Ireference, corners[i]);
190   }
191 
192   //Build the reference ORB points.
193   int nbrRef;
194   unsigned int height, width;
195   height = (unsigned int)(corners[1].get_i() - corners[0].get_i());
196   width = (unsigned int)(corners[1].get_j() - corners[0].get_j());
197   nbrRef = keypoint.buildReference(Ireference, corners[0], height, width);
198 
199   //Then grab another image which represents the current image Icurrent
200 
201   //Select a part of the image by clincking on two points which define a rectangle
202   for (int i=0 ; i < 2 ; i++) {
203     vpDisplay::getClick(Icurrent, corners[i]);
204   }
205 
206   //Match points between the reference points and the ORB points computed in the current image.
207   int nbrMatched;
208   height = (unsigned int)(corners[1].get_i() - corners[0].get_i());
209   width = (unsigned int)(corners[1].get_j() - corners[0].get_j());
210   nbrMatched = keypoint.matchPoint(Icurrent, corners[0], height, width);
211 
212   //Display the matched points
213   keypoint.display(Ireference, Icurrent);
214 #endif
215 
216   return(0);
217 }
218   \endcode
219 
220   This class is also described in \ref tutorial-matching.
221 */
222 class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint
223 {
224 
225 public:
226   /*! Predefined filtering method identifier. */
227   enum vpFilterMatchingType {
228     constantFactorDistanceThreshold, /*!< Keep all the points below a constant
229                                         factor threshold. */
230     stdDistanceThreshold,            /*!< Keep all the points below a minimal distance +
231                                         the standard deviation. */
232     ratioDistanceThreshold,          /*!< Keep all the points enough discriminated (the
233                                         ratio distance between the two best matches is
234                                         below the threshold). */
235     stdAndRatioDistanceThreshold,    /*!< Keep all the points which fall with the
236                                         two conditions above. */
237     noFilterMatching                 /*!< No filtering. */
238   };
239 
240   /*! Predefined detection method identifier. */
241   enum vpDetectionMethodType {
242     detectionThreshold, /*!< The object is present if the average of the
243                            descriptor distances is below the threshold. */
244     detectionScore      /*!< Same condition than the previous but with a formula
245                            taking into account the number of matches, the object is
246                            present if the score is above the threshold. */
247   };
248 
249   /*! Predefined constant for training image format. */
250   typedef enum {
251     jpgImageFormat, /*!< Save training images in JPG format. */
252     pngImageFormat, /*!< Save training images in PNG format. */
253     ppmImageFormat, /*!< Save training images in PPM format. */
254     pgmImageFormat  /*!< Save training images in PGM format. */
255   } vpImageFormatType;
256 
257   /*! Predefined constant for feature detection type. */
258   enum vpFeatureDetectorType {
259 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
260     DETECTOR_FAST,
261     DETECTOR_MSER,
262     DETECTOR_ORB,
263     DETECTOR_BRISK,
264     DETECTOR_GFTT,
265     DETECTOR_SimpleBlob,
266 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
267     DETECTOR_STAR,
268 #endif
269 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
270     (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
271     DETECTOR_SIFT,
272 #endif
273 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
274     DETECTOR_SURF,
275 #endif
276 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
277     DETECTOR_KAZE,
278     DETECTOR_AKAZE,
279     DETECTOR_AGAST,
280 #endif
281 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
282     DETECTOR_MSD,
283 #endif
284 #endif
285     DETECTOR_TYPE_SIZE
286   };
287 
288   /*! Predefined constant for descriptor extraction type. */
289   enum vpFeatureDescriptorType {
290 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
291     DESCRIPTOR_ORB,
292     DESCRIPTOR_BRISK,
293 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
294     DESCRIPTOR_FREAK,
295     DESCRIPTOR_BRIEF,
296 #endif
297 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
298     (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
299     DESCRIPTOR_SIFT,
300 #endif
301 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
302     DESCRIPTOR_SURF,
303 #endif
304 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
305     DESCRIPTOR_KAZE,
306     DESCRIPTOR_AKAZE,
307 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
308     DESCRIPTOR_DAISY,
309     DESCRIPTOR_LATCH,
310 #endif
311 #endif
312 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
313     DESCRIPTOR_VGG,
314     DESCRIPTOR_BoostDesc,
315 #endif
316 #endif
317     DESCRIPTOR_TYPE_SIZE
318   };
319 
320   vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType,
321              const std::string &matcherName, const vpFilterMatchingType &filterType = ratioDistanceThreshold);
322   vpKeyPoint(const std::string &detectorName = "ORB", const std::string &extractorName = "ORB",
323              const std::string &matcherName = "BruteForce-Hamming",
324              const vpFilterMatchingType &filterType = ratioDistanceThreshold);
325   vpKeyPoint(const std::vector<std::string> &detectorNames, const std::vector<std::string> &extractorNames,
326              const std::string &matcherName = "BruteForce",
327              const vpFilterMatchingType &filterType = ratioDistanceThreshold);
328 
329   unsigned int buildReference(const vpImage<unsigned char> &I);
330   unsigned int buildReference(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
331                               unsigned int width);
332   unsigned int buildReference(const vpImage<unsigned char> &I, const vpRect &rectangle);
333 
334   unsigned int buildReference(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &trainKeyPoints,
335                               std::vector<cv::Point3f> &points3f, bool append = false, int class_id = -1);
336   unsigned int buildReference(const vpImage<unsigned char> &I, const std::vector<cv::KeyPoint> &trainKeyPoints,
337                               const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
338                               bool append = false, int class_id = -1);
339 
340   unsigned int buildReference(const vpImage<vpRGBa> &I_color);
341   unsigned int buildReference(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
342                               unsigned int width);
343   unsigned int buildReference(const vpImage<vpRGBa> &I_color, const vpRect &rectangle);
344 
345   unsigned int buildReference(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &trainKeyPoints,
346                               std::vector<cv::Point3f> &points3f, bool append = false, int class_id = -1);
347   unsigned int buildReference(const vpImage<vpRGBa> &I, const std::vector<cv::KeyPoint> &trainKeyPoints,
348                               const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
349                               bool append = false, int class_id = -1);
350 
351   static void compute3D(const cv::KeyPoint &candidate, const std::vector<vpPoint> &roi, const vpCameraParameters &cam,
352                         const vpHomogeneousMatrix &cMo, cv::Point3f &point);
353 
354   static void compute3D(const vpImagePoint &candidate, const std::vector<vpPoint> &roi, const vpCameraParameters &cam,
355                         const vpHomogeneousMatrix &cMo, vpPoint &point);
356 
357   static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
358                                            std::vector<cv::KeyPoint> &candidates,
359                                            const std::vector<vpPolygon> &polygons,
360                                            const std::vector<std::vector<vpPoint> > &roisPt,
361                                            std::vector<cv::Point3f> &points, cv::Mat *descriptors = NULL);
362 
363   static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
364                                            std::vector<vpImagePoint> &candidates,
365                                            const std::vector<vpPolygon> &polygons,
366                                            const std::vector<std::vector<vpPoint> > &roisPt,
367                                            std::vector<vpPoint> &points, cv::Mat *descriptors = NULL);
368 
369   static void
370   compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
371                                 std::vector<cv::KeyPoint> &candidates, const std::vector<vpCylinder> &cylinders,
372                                 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
373                                 std::vector<cv::Point3f> &points, cv::Mat *descriptors = NULL);
374 
375   static void
376   compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
377                                 std::vector<vpImagePoint> &candidates, const std::vector<vpCylinder> &cylinders,
378                                 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
379                                 std::vector<vpPoint> &points, cv::Mat *descriptors = NULL);
380 
381   bool computePose(const std::vector<cv::Point2f> &imagePoints, const std::vector<cv::Point3f> &objectPoints,
382                    const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector<int> &inlierIndex,
383                    double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL);
384 
385   bool computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
386                    double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL);
387 
388   bool computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
389                    std::vector<unsigned int> &inlierIndex, double &elapsedTime,
390                    bool (*func)(const vpHomogeneousMatrix &) = NULL);
391 
392   void createImageMatching(vpImage<unsigned char> &IRef, vpImage<unsigned char> &ICurrent,
393                            vpImage<unsigned char> &IMatching);
394   void createImageMatching(vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching);
395 
396   void createImageMatching(vpImage<unsigned char> &IRef, vpImage<vpRGBa> &ICurrent,
397                            vpImage<vpRGBa> &IMatching);
398   void createImageMatching(vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching);
399 
400   void detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints,
401               const vpRect &rectangle = vpRect());
402   void detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints,
403               const vpRect &rectangle = vpRect());
404   void detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, const cv::Mat &mask = cv::Mat());
405   void detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
406               const vpRect &rectangle = vpRect());
407   void detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
408               const vpRect &rectangle = vpRect());
409   void detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
410               const cv::Mat &mask = cv::Mat());
411 
412   void detectExtractAffine(const vpImage<unsigned char> &I, std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
413                            std::vector<cv::Mat> &listOfDescriptors,
414                            std::vector<vpImage<unsigned char> > *listOfAffineI = NULL);
415 
416   void display(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent, unsigned int size = 3);
417   void display(const vpImage<unsigned char> &ICurrent, unsigned int size = 3, const vpColor &color = vpColor::green);
418   void display(const vpImage<vpRGBa> &IRef, const vpImage<vpRGBa> &ICurrent, unsigned int size = 3);
419   void display(const vpImage<vpRGBa> &ICurrent, unsigned int size = 3, const vpColor &color = vpColor::green);
420 
421   void displayMatching(const vpImage<unsigned char> &IRef, vpImage<unsigned char> &IMatching, unsigned int crossSize,
422                        unsigned int lineThickness = 1, const vpColor &color = vpColor::green);
423   void displayMatching(const vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching,
424                        const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
425                        unsigned int crossSize = 3, unsigned int lineThickness = 1);
426   void displayMatching(const vpImage<unsigned char> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
427                        unsigned int lineThickness = 1, const vpColor &color = vpColor::green);
428   void displayMatching(const vpImage<vpRGBa> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
429                        unsigned int lineThickness = 1, const vpColor &color = vpColor::green);
430   void displayMatching(const vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching,
431                        const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
432                        unsigned int crossSize = 3, unsigned int lineThickness = 1);
433 
434   void extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
435                std::vector<cv::Point3f> *trainPoints = NULL);
436   void extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
437                std::vector<cv::Point3f> *trainPoints = NULL);
438   void extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
439                std::vector<cv::Point3f> *trainPoints = NULL);
440   void extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
441                double &elapsedTime, std::vector<cv::Point3f> *trainPoints = NULL);
442   void extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
443                double &elapsedTime, std::vector<cv::Point3f> *trainPoints = NULL);
444   void extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors, double &elapsedTime,
445                std::vector<cv::Point3f> *trainPoints = NULL);
446 
447   /*!
448     Get the covariance matrix when estimating the pose using the Virtual
449     Visual Servoing approach.
450 
451     \warning The compute covariance flag has to be true if you want to compute
452     the covariance matrix.
453 
454     \sa setCovarianceComputation
455   */
getCovarianceMatrix()456   inline vpMatrix getCovarianceMatrix() const
457   {
458     if (!m_computeCovariance) {
459       std::cout << "Warning : The covariance matrix has not been computed. "
460                    "See setCovarianceComputation() to do it."
461                 << std::endl;
462       return vpMatrix();
463     }
464 
465     if (m_computeCovariance && !m_useRansacVVS) {
466       std::cout << "Warning : The covariance matrix can only be computed "
467                    "with a Virtual Visual Servoing approach."
468                 << std::endl
469                 << "Use setUseRansacVVS(true) to choose to use a pose "
470                    "estimation method based on a Virtual Visual Servoing "
471                    "approach."
472                 << std::endl;
473       return vpMatrix();
474     }
475 
476     return m_covarianceMatrix;
477   }
478 
479   /*!
480     Get the elapsed time to compute the keypoint detection.
481 
482     \return The elapsed time.
483   */
getDetectionTime()484   inline double getDetectionTime() const { return m_detectionTime; }
485 
486   /*!
487     Get the detector pointer.
488     \param type : Type of the detector.
489 
490     \return The detector or NULL if the type passed in parameter does not
491     exist.
492   */
getDetector(const vpFeatureDetectorType & type)493   inline cv::Ptr<cv::FeatureDetector> getDetector(const vpFeatureDetectorType &type) const
494   {
495     std::map<vpFeatureDetectorType, std::string>::const_iterator it_name = m_mapOfDetectorNames.find(type);
496     if (it_name == m_mapOfDetectorNames.end()) {
497       std::cerr << "Internal problem with the feature type and the "
498                    "corresponding name!"
499                 << std::endl;
500     }
501 
502     std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector =
503         m_detectors.find(it_name->second);
504     if (findDetector != m_detectors.end()) {
505       return findDetector->second;
506     }
507 
508     std::cerr << "Cannot find: " << it_name->second << std::endl;
509     return cv::Ptr<cv::FeatureDetector>();
510   }
511 
512   /*!
513     Get the detector pointer.
514     \param name : Name of the detector.
515 
516     \return The detector or NULL if the name passed in parameter does not
517     exist.
518   */
getDetector(const std::string & name)519   inline cv::Ptr<cv::FeatureDetector> getDetector(const std::string &name) const
520   {
521     std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector = m_detectors.find(name);
522     if (findDetector != m_detectors.end()) {
523       return findDetector->second;
524     }
525 
526     std::cerr << "Cannot find: " << name << std::endl;
527     return cv::Ptr<cv::FeatureDetector>();
528   }
529 
530   /*!
531     Get the feature detector name associated to the type.
532   */
getDetectorNames()533   inline std::map<vpFeatureDetectorType, std::string> getDetectorNames() const { return m_mapOfDetectorNames; }
534 
535   /*!
536     Get the elapsed time to compute the keypoint extraction.
537 
538     \return The elapsed time.
539   */
getExtractionTime()540   inline double getExtractionTime() const { return m_extractionTime; }
541 
542   /*!
543     Get the extractor pointer.
544     \param type : Type of the descriptor extractor.
545 
546     \return The descriptor extractor or NULL if the name passed in parameter
547     does not exist.
548   */
getExtractor(const vpFeatureDescriptorType & type)549   inline cv::Ptr<cv::DescriptorExtractor> getExtractor(const vpFeatureDescriptorType &type) const
550   {
551     std::map<vpFeatureDescriptorType, std::string>::const_iterator it_name = m_mapOfDescriptorNames.find(type);
552     if (it_name == m_mapOfDescriptorNames.end()) {
553       std::cerr << "Internal problem with the feature type and the "
554                    "corresponding name!"
555                 << std::endl;
556     }
557 
558     std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor =
559         m_extractors.find(it_name->second);
560     if (findExtractor != m_extractors.end()) {
561       return findExtractor->second;
562     }
563 
564     std::cerr << "Cannot find: " << it_name->second << std::endl;
565     return cv::Ptr<cv::DescriptorExtractor>();
566   }
567 
568   /*!
569     Get the extractor pointer.
570     \param name : Name of the descriptor extractor.
571 
572     \return The descriptor extractor or NULL if the name passed in parameter
573     does not exist.
574   */
getExtractor(const std::string & name)575   inline cv::Ptr<cv::DescriptorExtractor> getExtractor(const std::string &name) const
576   {
577     std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor = m_extractors.find(name);
578     if (findExtractor != m_extractors.end()) {
579       return findExtractor->second;
580     }
581 
582     std::cerr << "Cannot find: " << name << std::endl;
583     return cv::Ptr<cv::DescriptorExtractor>();
584   }
585 
586   /*!
587     Get the feature descriptor extractor name associated to the type.
588   */
getExtractorNames()589   inline std::map<vpFeatureDescriptorType, std::string> getExtractorNames() const { return m_mapOfDescriptorNames; }
590 
591   /*!
592     Get the image format to use when saving training images.
593 
594     \return The image format.
595   */
getImageFormat()596   inline vpImageFormatType getImageFormat() const { return m_imageFormat; }
597 
598   /*!
599     Get the elapsed time to compute the matching.
600 
601     \return The elapsed time.
602   */
getMatchingTime()603   inline double getMatchingTime() const { return m_matchingTime; }
604 
605   /*!
606     Get the matcher pointer.
607 
608     \return The matcher pointer.
609   */
getMatcher()610   inline cv::Ptr<cv::DescriptorMatcher> getMatcher() const { return m_matcher; }
611 
612   /*!
613     Get the list of matches (correspondences between the indexes of the
614     detected keypoints and the train keypoints).
615 
616     \return The list of matches.
617   */
getMatches()618   inline std::vector<cv::DMatch> getMatches() const { return m_filteredMatches; }
619 
620   /*!
621     Get the list of pairs with the correspondence between the matched query
622     and train keypoints.
623 
624     \return The list of pairs with the correspondence between the matched
625     query and train keypoints.
626   */
getMatchQueryToTrainKeyPoints()627   inline std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > getMatchQueryToTrainKeyPoints() const
628   {
629     std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > matchQueryToTrainKeyPoints(m_filteredMatches.size());
630     for (size_t i = 0; i < m_filteredMatches.size(); i++) {
631       matchQueryToTrainKeyPoints.push_back(
632           std::pair<cv::KeyPoint, cv::KeyPoint>(m_queryFilteredKeyPoints[(size_t)m_filteredMatches[i].queryIdx],
633                                                 m_trainKeyPoints[(size_t)m_filteredMatches[i].trainIdx]));
634     }
635     return matchQueryToTrainKeyPoints;
636   }
637 
638   /*!
639     Get the number of train images.
640 
641     \return The number of train images.
642   */
getNbImages()643   inline unsigned int getNbImages() const {
644     return static_cast<unsigned int>(m_mapOfImages.size());
645   }
646 
647   void getObjectPoints(std::vector<cv::Point3f> &objectPoints) const;
648   void getObjectPoints(std::vector<vpPoint> &objectPoints) const;
649 
650   /*!
651     Get the elapsed time to compute the pose.
652 
653     \return The elapsed time.
654   */
getPoseTime()655   inline double getPoseTime() const { return m_poseTime; }
656 
657   /*!
658      Get the descriptors matrix for the query keypoints.
659 
660      \return Matrix with descriptors values at each row for each query
661      keypoints.
662    */
getQueryDescriptors()663   inline cv::Mat getQueryDescriptors() const { return m_queryDescriptors; }
664 
665   void getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints, bool matches = true) const;
666   void getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints, bool matches = true) const;
667 
668   /*!
669     Get the list of Ransac inliers.
670 
671     \return The list of Ransac inliers.
672   */
getRansacInliers()673   inline std::vector<vpImagePoint> getRansacInliers() const { return m_ransacInliers; }
674 
675   /*!
676     Get the list of Ransac outliers.
677 
678     \return The list of Ransac outliers.
679   */
getRansacOutliers()680   inline std::vector<vpImagePoint> getRansacOutliers() const { return m_ransacOutliers; }
681 
682   /*!
683      Get the train descriptors matrix.
684 
685      \return : Matrix with descriptors values at each row for each train
686      keypoints (or reference keypoints).
687    */
getTrainDescriptors()688   inline cv::Mat getTrainDescriptors() const { return m_trainDescriptors; }
689 
690   void getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const;
691   void getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints) const;
692 
693   void getTrainPoints(std::vector<cv::Point3f> &points) const;
694   void getTrainPoints(std::vector<vpPoint> &points) const;
695 
696   void initMatcher(const std::string &matcherName);
697 
698   void insertImageMatching(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent,
699                            vpImage<unsigned char> &IMatching);
700   void insertImageMatching(const vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching);
701 
702   void insertImageMatching(const vpImage<vpRGBa> &IRef, const vpImage<vpRGBa> &ICurrent,
703                            vpImage<vpRGBa> &IMatching);
704   void insertImageMatching(const vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching);
705 
706   void loadConfigFile(const std::string &configFile);
707 
708   void loadLearningData(const std::string &filename, bool binaryMode = false, bool append = false);
709 
710   void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector<cv::DMatch> &matches,
711              double &elapsedTime);
712 
713   unsigned int matchPoint(const vpImage<unsigned char> &I);
714   unsigned int matchPoint(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
715                           unsigned int width);
716   unsigned int matchPoint(const vpImage<unsigned char> &I, const vpRect &rectangle);
717 
718   unsigned int matchPoint(const std::vector<cv::KeyPoint> &queryKeyPoints, const cv::Mat &queryDescriptors);
719   bool matchPoint(const vpImage<unsigned char> &I, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
720                   bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect());
721   bool matchPoint(const vpImage<unsigned char> &I, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
722                   double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL,
723                   const vpRect &rectangle = vpRect());
724 
725   bool matchPointAndDetect(const vpImage<unsigned char> &I, vpRect &boundingBox, vpImagePoint &centerOfGravity,
726                            const bool isPlanarObject = true, std::vector<vpImagePoint> *imPts1 = NULL,
727                            std::vector<vpImagePoint> *imPts2 = NULL, double *meanDescriptorDistance = NULL,
728                            double *detectionScore = NULL, const vpRect &rectangle = vpRect());
729 
730   bool matchPointAndDetect(const vpImage<unsigned char> &I, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
731                            double &error, double &elapsedTime, vpRect &boundingBox, vpImagePoint &centerOfGravity,
732                            bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect());
733 
734   unsigned int matchPoint(const vpImage<vpRGBa> &I_color);
735   unsigned int matchPoint(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
736                           unsigned int width);
737   unsigned int matchPoint(const vpImage<vpRGBa> &I_color, const vpRect &rectangle);
738 
739   bool matchPoint(const vpImage<vpRGBa> &I_color, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
740                   bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect());
741   bool matchPoint(const vpImage<vpRGBa> &I_color, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
742                   double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL,
743                   const vpRect &rectangle = vpRect());
744 
745   void reset();
746 
747   void saveLearningData(const std::string &filename, bool binaryMode = false,
748                         bool saveTrainingImages = true);
749 
750   /*!
751     Set if the covariance matrix has to be computed in the Virtual Visual
752     Servoing approach.
753 
754     \param flag : True if the covariance has to be computed, false otherwise.
755   */
setCovarianceComputation(const bool & flag)756   inline void setCovarianceComputation(const bool &flag)
757   {
758     m_computeCovariance = flag;
759     if (!m_useRansacVVS) {
760       std::cout << "Warning : The covariance matrix can only be computed "
761                    "with a Virtual Visual Servoing approach."
762                 << std::endl
763                 << "Use setUseRansacVVS(true) to choose to use a pose "
764                    "estimation method based on a Virtual "
765                    "Visual Servoing approach."
766                 << std::endl;
767     }
768   }
769 
770   /*!
771      Set the method to decide if the object is present or not.
772 
773      \param method : Detection method (detectionThreshold or detectionScore).
774    */
setDetectionMethod(const vpDetectionMethodType & method)775   inline void setDetectionMethod(const vpDetectionMethodType &method) { m_detectionMethod = method; }
776 
777   /*!
778      Set and initialize a detector.
779 
780      \param detectorType : Type of the detector.
781    */
setDetector(const vpFeatureDetectorType & detectorType)782   inline void setDetector(const vpFeatureDetectorType &detectorType)
783   {
784     m_detectorNames.clear();
785     m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
786     m_detectors.clear();
787     initDetector(m_mapOfDetectorNames[detectorType]);
788   }
789 
790   /*!
791      Set and initialize a detector denominated by his name \p detectorName.
792 
793      \param detectorName : Name of the detector.
794    */
setDetector(const std::string & detectorName)795   inline void setDetector(const std::string &detectorName)
796   {
797     m_detectorNames.clear();
798     m_detectorNames.push_back(detectorName);
799     m_detectors.clear();
800     initDetector(detectorName);
801   }
802 
803 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
804   /*!
805     Template function to set to a \p parameterName a value for a specific
806     detector named by his \p detectorName.
807 
808     \param detectorName : Name of the detector
809     \param parameterName : Name of the parameter
810     \param value : Value to set
811   */
812   template <typename T1, typename T2, typename T3>
setDetectorParameter(const T1 detectorName,const T2 parameterName,const T3 value)813   inline void setDetectorParameter(const T1 detectorName, const T2 parameterName, const T3 value)
814   {
815     if (m_detectors.find(detectorName) != m_detectors.end()) {
816       m_detectors[detectorName]->set(parameterName, value);
817     }
818   }
819 #endif
820 
821   /*!
822      Set and initialize a list of detectors denominated by their names \p
823      detectorNames.
824 
825      \param detectorNames : List of detector names.
826    */
setDetectors(const std::vector<std::string> & detectorNames)827   inline void setDetectors(const std::vector<std::string> &detectorNames)
828   {
829     m_detectorNames.clear();
830     m_detectors.clear();
831     m_detectorNames = detectorNames;
832     initDetectors(m_detectorNames);
833   }
834 
835   /*!
836      Set and initialize a descriptor extractor.
837 
838      \param extractorType : Type of the descriptor extractor.
839    */
setExtractor(const vpFeatureDescriptorType & extractorType)840   inline void setExtractor(const vpFeatureDescriptorType &extractorType)
841   {
842     m_extractorNames.clear();
843     m_extractorNames.push_back(m_mapOfDescriptorNames[extractorType]);
844     m_extractors.clear();
845     initExtractor(m_mapOfDescriptorNames[extractorType]);
846   }
847 
848   /*!
849      Set and initialize a descriptor extractor denominated by his name \p
850      extractorName.
851 
852      \param extractorName : Name of the extractor.
853    */
setExtractor(const std::string & extractorName)854   inline void setExtractor(const std::string &extractorName)
855   {
856     m_extractorNames.clear();
857     m_extractorNames.push_back(extractorName);
858     m_extractors.clear();
859     initExtractor(extractorName);
860   }
861 
862 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
863   /*!
864     Template function to set to a \p parameterName a value for a specific
865     extractor named by his \p extractorName.
866 
867     \param extractorName : Name of the extractor
868     \param parameterName : Name of the parameter
869     \param value : Value to set
870   */
871   template <typename T1, typename T2, typename T3>
setExtractorParameter(const T1 extractorName,const T2 parameterName,const T3 value)872   inline void setExtractorParameter(const T1 extractorName, const T2 parameterName, const T3 value)
873   {
874     if (m_extractors.find(extractorName) != m_extractors.end()) {
875       m_extractors[extractorName]->set(parameterName, value);
876     }
877   }
878 #endif
879 
880   /*!
881      Set and initialize a list of extractors denominated by their names \p
882      extractorNames.
883 
884      \param extractorNames : List of extractor names.
885    */
setExtractors(const std::vector<std::string> & extractorNames)886   inline void setExtractors(const std::vector<std::string> &extractorNames)
887   {
888     m_extractorNames.clear();
889     m_extractorNames = extractorNames;
890     m_extractors.clear();
891     initExtractors(m_extractorNames);
892   }
893 
894   /*!
895     Set the image format to use when saving training images.
896 
897     \param imageFormat : The image format.
898   */
setImageFormat(const vpImageFormatType & imageFormat)899   inline void setImageFormat(const vpImageFormatType &imageFormat) { m_imageFormat = imageFormat; }
900 
901   /*!
902      Set and initialize a matcher denominated by his name \p matcherName.
903      The different matchers are:
904        - BruteForce (it uses L2 distance)
905        - BruteForce-L1
906        - BruteForce-Hamming
907        - BruteForce-Hamming(2)
908        - FlannBased
909 
910      L1 and L2 norms are preferable choices for SIFT and SURF descriptors,
911      NORM_HAMMING should be used with ORB, BRISK and BRIEF, NORM_HAMMING2
912      should be used with ORB when WTA_K==3 or 4.
913 
914      \param matcherName : Name of the matcher.
915    */
setMatcher(const std::string & matcherName)916   inline void setMatcher(const std::string &matcherName)
917   {
918     m_matcherName = matcherName;
919     initMatcher(m_matcherName);
920   }
921 
922   /*!
923    * Set maximum number of keypoints to extract.
924    * \warning This functionality is only available for ORB and SIFT extactors.
925    * \param maxFeatures : Maximum number of keypoints to extract. Set -1 to use default values.
926    */
setMaxFeatures(int maxFeatures)927   void setMaxFeatures(int maxFeatures)
928   {
929     m_maxFeatures = maxFeatures;
930   }
931 
932   /*!
933     Set the filtering method to eliminate false matching.
934     The different methods are:
935       - vpKeyPoint::constantFactorDistanceThreshold : Keep matches whose descriptor
936     distance is below dist_min * factor.
937       - vpKeyPoint::stdDistanceThreshold : Keep matches whose the descriptor distance is
938     below dist_min + standard_deviation.
939       - vpKeyPoint::ratioDistanceThreshold : Keep matches enough discriminated when the ratio
940     distance between the 2 best matches is below the threshold.
941       - vpKeyPoint::stdAndRatioDistanceThreshold : Keep matches that agree with at least
942     one of the two conditions.
943       - vpKeyPoint::noFilterMatching : No filter is applied.
944 
945     \param filterType : Type of the filtering method
946   */
setFilterMatchingType(const vpFilterMatchingType & filterType)947   inline void setFilterMatchingType(const vpFilterMatchingType &filterType)
948   {
949     m_filterType = filterType;
950 
951     // Use k-nearest neighbors (knn) to retrieve the two best matches for a
952     // keypoint  So this is useful only for ratioDistanceThreshold method
953     if (filterType == ratioDistanceThreshold || filterType == stdAndRatioDistanceThreshold) {
954       m_useKnn = true;
955 
956 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
957       if (m_matcher != NULL && m_matcherName == "BruteForce") {
958         // if a matcher is already initialized, disable the crossCheck
959         // because it will not work with knnMatch
960         m_matcher->set("crossCheck", false);
961       }
962 #endif
963     } else {
964       m_useKnn = false;
965 
966 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
967       if (m_matcher != NULL && m_matcherName == "BruteForce") {
968         // if a matcher is already initialized, set the crossCheck mode if
969         // necessary
970         m_matcher->set("crossCheck", m_useBruteForceCrossCheck);
971       }
972 #endif
973     }
974   }
975 
976   /*!
977     Set the factor value for the filtering method:
978     constantFactorDistanceThreshold.
979 
980     \param factor : Factor value
981   */
setMatchingFactorThreshold(const double factor)982   inline void setMatchingFactorThreshold(const double factor)
983   {
984     if (factor > 0.0) {
985       m_matchingFactorThreshold = factor;
986     } else {
987       throw vpException(vpException::badValue, "The factor must be positive.");
988     }
989   }
990 
991   /*!
992     Set the ratio value for the filtering method: ratioDistanceThreshold.
993 
994     \param ratio : Ratio value (]0 ; 1])
995   */
setMatchingRatioThreshold(double ratio)996   inline void setMatchingRatioThreshold(double ratio)
997   {
998     if (ratio > 0.0 && (ratio < 1.0 || std::fabs(ratio - 1.0) < std::numeric_limits<double>::epsilon())) {
999       m_matchingRatioThreshold = ratio;
1000     } else {
1001       throw vpException(vpException::badValue, "The ratio must be in the interval ]0 ; 1].");
1002     }
1003   }
1004 
1005   /*!
1006     Set the percentage value for defining the cardinality of the consensus
1007     group.
1008 
1009     \param percentage : Percentage value (]0 ; 100])
1010   */
setRansacConsensusPercentage(double percentage)1011   inline void setRansacConsensusPercentage(double percentage)
1012   {
1013     if (percentage > 0.0 &&
1014         (percentage < 100.0 || std::fabs(percentage - 100.0) < std::numeric_limits<double>::epsilon())) {
1015       m_ransacConsensusPercentage = percentage;
1016     } else {
1017       throw vpException(vpException::badValue, "The percentage must be in the interval ]0 ; 100].");
1018     }
1019   }
1020 
1021   /*!
1022     Set filter flag for RANSAC pose estimation.
1023   */
setRansacFilterFlag(const vpPose::RANSAC_FILTER_FLAGS & flag)1024   inline void setRansacFilterFlag(const vpPose::RANSAC_FILTER_FLAGS &flag)
1025   {
1026     m_ransacFilterFlag = flag;
1027   }
1028 
1029   /*!
1030     Set the maximum number of iterations for the Ransac pose estimation
1031     method.
1032 
1033     \param nbIter : Maximum number of iterations for the Ransac
1034   */
setRansacIteration(int nbIter)1035   inline void setRansacIteration(int nbIter)
1036   {
1037     if (nbIter > 0) {
1038       m_nbRansacIterations = nbIter;
1039     } else {
1040       throw vpException(vpException::badValue, "The number of iterations must be greater than zero.");
1041     }
1042   }
1043 
1044   /*!
1045     Use or not the multithreaded version.
1046 
1047     \note Needs C++11 or higher.
1048   */
setRansacParallel(bool parallel)1049   inline void setRansacParallel(bool parallel)
1050   {
1051     m_ransacParallel = parallel;
1052   }
1053 
1054   /*!
1055     Set the number of threads to use if multithreaded RANSAC pose.
1056 
1057     \param nthreads : Number of threads, if 0 the number of CPU threads will be determined
1058     \sa setRansacParallel
1059   */
setRansacParallelNbThreads(unsigned int nthreads)1060   inline void setRansacParallelNbThreads(unsigned int nthreads)
1061   {
1062     m_ransacParallelNbThreads = nthreads;
1063   }
1064 
1065   /*!
1066     Set the maximum reprojection error (in pixel) to determine if a point is
1067     an inlier or not.
1068 
1069     \param reprojectionError : Maximum reprojection error in pixel (used by
1070     OpenCV function)
1071   */
setRansacReprojectionError(double reprojectionError)1072   inline void setRansacReprojectionError(double reprojectionError)
1073   {
1074     if (reprojectionError > 0.0) {
1075       m_ransacReprojectionError = reprojectionError;
1076     } else {
1077       throw vpException(vpException::badValue, "The Ransac reprojection "
1078                                                "threshold must be positive "
1079                                                "as we deal with distance.");
1080     }
1081   }
1082 
1083   /*!
1084     Set the minimum number of inlier for the Ransac pose estimation method.
1085 
1086     \param minCount : Minimum number of inlier for the consensus
1087   */
setRansacMinInlierCount(int minCount)1088   inline void setRansacMinInlierCount(int minCount)
1089   {
1090     if (minCount > 0) {
1091       m_nbRansacMinInlierCount = minCount;
1092     } else {
1093       throw vpException(vpException::badValue, "The minimum number of inliers must be greater than zero.");
1094     }
1095   }
1096 
1097   /*!
1098     Set the maximum error (in meter) to determine if a point is an inlier or
1099     not.
1100 
1101     \param threshold : Maximum error in meter for ViSP function
1102   */
setRansacThreshold(double threshold)1103   inline void setRansacThreshold(double threshold)
1104   {
1105     if (threshold > 0.0) {
1106       m_ransacThreshold = threshold;
1107     } else {
1108       throw vpException(vpException::badValue, "The Ransac threshold must be positive as we deal with distance.");
1109     }
1110   }
1111 
1112   /*!
1113     Set if multiple affine transformations must be used to detect and extract
1114     keypoints.
1115 
1116     \param useAffine : True to use multiple affine transformations, false
1117     otherwise
1118   */
setUseAffineDetection(bool useAffine)1119   inline void setUseAffineDetection(bool useAffine) { m_useAffineDetection = useAffine; }
1120 
1121 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1122   /*!
1123     Set if cross check method must be used to eliminate some false matches
1124     with a brute-force matching method.
1125 
1126     \param useCrossCheck : True to use cross check, false otherwise
1127   */
setUseBruteForceCrossCheck(bool useCrossCheck)1128   inline void setUseBruteForceCrossCheck(bool useCrossCheck)
1129   {
1130     // Only available with BruteForce and with k=1 (i.e not used with a
1131     // ratioDistanceThreshold method)
1132     if (m_matcher != NULL && !m_useKnn && m_matcherName == "BruteForce") {
1133       m_matcher->set("crossCheck", useCrossCheck);
1134     } else if (m_matcher != NULL && m_useKnn && m_matcherName == "BruteForce") {
1135       std::cout << "Warning, you try to set the crossCheck parameter with a "
1136                    "BruteForce matcher but knn is enabled";
1137       std::cout << " (the filtering method uses a ratio constraint)" << std::endl;
1138     }
1139   }
1140 #endif
1141 
1142   /*!
1143     Set if we want to match the train keypoints to the query keypoints.
1144 
1145     \param useMatchTrainToQuery : True to match the train keypoints to the
1146     query keypoints
1147    */
setUseMatchTrainToQuery(bool useMatchTrainToQuery)1148   inline void setUseMatchTrainToQuery(bool useMatchTrainToQuery)
1149   {
1150     m_useMatchTrainToQuery = useMatchTrainToQuery;
1151   }
1152 
1153   /*!
1154     Set the flag to choose between a percentage value of inliers for the
1155     cardinality of the consensus group or a minimum number.
1156 
1157     \param usePercentage : True to a percentage ratio of inliers, otherwise
1158     use a specified number of inliers
1159   */
setUseRansacConsensusPercentage(bool usePercentage)1160   inline void setUseRansacConsensusPercentage(bool usePercentage) { m_useConsensusPercentage = usePercentage; }
1161 
1162   /*!
1163     Set the flag to choose between the OpenCV or ViSP Ransac pose estimation
1164     function.
1165 
1166     \param ransacVVS : True to use ViSP function, otherwise use OpenCV
1167     function
1168   */
setUseRansacVVS(bool ransacVVS)1169   inline void setUseRansacVVS(bool ransacVVS) { m_useRansacVVS = ransacVVS; }
1170 
1171   /*!
1172     Set the flag to filter matches where multiple query keypoints are matched
1173     to the same train keypoints.
1174 
1175     \param singleMatchFilter : True to use the single match filter.
1176    */
setUseSingleMatchFilter(bool singleMatchFilter)1177   inline void setUseSingleMatchFilter(bool singleMatchFilter) { m_useSingleMatchFilter = singleMatchFilter; }
1178 
1179 private:
1180   //! If true, compute covariance matrix if the user select the pose
1181   //! estimation method using ViSP
1182   bool m_computeCovariance;
1183   //! Covariance matrix
1184   vpMatrix m_covarianceMatrix;
1185   //! Current id associated to the training image used for the learning.
1186   int m_currentImageId;
1187   //! Method (based on descriptor distances) to decide if the object is
1188   //! present or not.
1189   vpDetectionMethodType m_detectionMethod;
1190   //! Detection score to decide if the object is present or not.
1191   double m_detectionScore;
1192   //! Detection threshold based on average of descriptor distances to decide
1193   //! if the object is present or not.
1194   double m_detectionThreshold;
1195   //! Elapsed time to detect keypoints.
1196   double m_detectionTime;
1197   //! List of detector names.
1198   std::vector<std::string> m_detectorNames;
1199   //! Map of smart reference-counting pointers (similar to shared_ptr in
1200   //! Boost) detectors,
1201   // with a key based upon the detector name.
1202   std::map<std::string, cv::Ptr<cv::FeatureDetector> > m_detectors;
1203   //! Elapsed time to extract descriptors for the detected keypoints.
1204   double m_extractionTime;
1205   //! List of extractor name.
1206   std::vector<std::string> m_extractorNames;
1207   //! Map of smart reference-counting pointers (similar to shared_ptr in
1208   //! Boost) extractors,
1209   // with a key based upon the extractor name.
1210   std::map<std::string, cv::Ptr<cv::DescriptorExtractor> > m_extractors;
1211   //! List of filtered matches between the detected and the trained keypoints.
1212   std::vector<cv::DMatch> m_filteredMatches;
1213   //! Chosen method of filtering to eliminate false matching.
1214   vpFilterMatchingType m_filterType;
1215   //! Image format to use when saving the training images
1216   vpImageFormatType m_imageFormat;
1217   //! List of k-nearest neighbors for each detected keypoints (if the method
1218   //! chosen is based upon on knn).
1219   std::vector<std::vector<cv::DMatch> > m_knnMatches;
1220   //! Map descriptor enum type to string.
1221   std::map<vpFeatureDescriptorType, std::string> m_mapOfDescriptorNames;
1222   //! Map detector enum type to string.
1223   std::map<vpFeatureDetectorType, std::string> m_mapOfDetectorNames;
1224   //! Map of image id to know to which training image is related a training
1225   //! keypoints.
1226   std::map<int, int> m_mapOfImageId;
1227   //! Map of images to have access to the image buffer according to his image
1228   //! id.
1229   std::map<int, vpImage<unsigned char> > m_mapOfImages;
1230   //! Smart reference-counting pointer (similar to shared_ptr in Boost) of
1231   //! descriptor matcher (e.g. BruteForce or FlannBased).
1232   cv::Ptr<cv::DescriptorMatcher> m_matcher;
1233   //! Name of the matcher.
1234   std::string m_matcherName;
1235   //! List of matches between the detected and the trained keypoints.
1236   std::vector<cv::DMatch> m_matches;
1237   //! Factor value for the filtering method: constantFactorDistanceThreshold.
1238   double m_matchingFactorThreshold;
1239   //! Ratio value for the filtering method: ratioDistanceThreshold.
1240   double m_matchingRatioThreshold;
1241   //! Elapsed time to do the matching.
1242   double m_matchingTime;
1243   //! List of pairs between the keypoint and the 3D point after the Ransac.
1244   std::vector<std::pair<cv::KeyPoint, cv::Point3f> > m_matchRansacKeyPointsToPoints;
1245   //! Maximum number of iterations for the Ransac method.
1246   int m_nbRansacIterations;
1247   //! Minimum number of inliers for the Ransac method.
1248   int m_nbRansacMinInlierCount;
1249   //! List of 3D points (in the object frame) filtered after the matching to
1250   //! compute the pose.
1251   std::vector<cv::Point3f> m_objectFilteredPoints;
1252   //! Elapsed time to compute the pose.
1253   double m_poseTime;
1254   /*! Matrix of descriptors (each row contains the descriptors values for each
1255      keypoints detected in the current image). */
1256   cv::Mat m_queryDescriptors;
1257   //! List of detected keypoints filtered after the matching.
1258   std::vector<cv::KeyPoint> m_queryFilteredKeyPoints;
1259   //! List of keypoints detected in the current image.
1260   std::vector<cv::KeyPoint> m_queryKeyPoints;
1261   //! Percentage value to determine the number of inliers for the Ransac
1262   //! method.
1263   double m_ransacConsensusPercentage;
1264   //!Filtering flag for RANSAC and degenerate configuration check
1265   vpPose::RANSAC_FILTER_FLAGS m_ransacFilterFlag;
1266   //! List of inliers.
1267   std::vector<vpImagePoint> m_ransacInliers;
1268   //! List of outliers.
1269   std::vector<vpImagePoint> m_ransacOutliers;
1270   //! If true, use parallel RANSAC
1271   bool m_ransacParallel;
1272   //! Number of threads (if 0, try to determine the number of CPU threads)
1273   unsigned int m_ransacParallelNbThreads;
1274   //! Maximum reprojection error (in pixel for the OpenCV method) to decide if
1275   //! a point is an inlier or not.
1276   double m_ransacReprojectionError;
1277   //! Maximum error (in meter for the ViSP method) to decide if a point is an
1278   //! inlier or not.
1279   double m_ransacThreshold;
1280   //! Matrix of descriptors (each row contains the descriptors values for each
1281   //! keypoints
1282   // detected in the train images).
1283   cv::Mat m_trainDescriptors;
1284   //! List of keypoints detected in the train images.
1285   std::vector<cv::KeyPoint> m_trainKeyPoints;
1286   //! List of 3D points (in the object frame) corresponding to the train
1287   //! keypoints.
1288   std::vector<cv::Point3f> m_trainPoints;
1289   //! List of 3D points in vpPoint format (in the object frame) corresponding
1290   //! to the train keypoints.
1291   std::vector<vpPoint> m_trainVpPoints;
1292   //! If true, use multiple affine transformations to cober the 6 affine
1293   //! parameters
1294   bool m_useAffineDetection;
1295 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1296   //! If true, some false matches will be eliminate by keeping only pairs
1297   //! (i,j) such that for i-th query descriptor the j-th descriptor in the
1298   //! matcher’s collection is the nearest and vice versa.
1299   bool m_useBruteForceCrossCheck;
1300 #endif
1301   //! Flag set if a percentage value is used to determine the number of
1302   //! inliers for the Ransac method.
1303   bool m_useConsensusPercentage;
1304   //! Flag set if a knn matching method must be used.
1305   bool m_useKnn;
1306   //! Flag set if we want to match the train keypoints to the query keypoints,
1307   //! useful when there is only one train image because it reduces the number
1308   //! of possible false matches (by default it is the inverse because normally
1309   //! there are multiple train images of different views of the object)
1310   bool m_useMatchTrainToQuery;
1311   //! Flag set if a Ransac VVS pose estimation must be used.
1312   bool m_useRansacVVS;
1313   //! If true, keep only pairs of keypoints where each train keypoint is
1314   //! matched to a single query keypoint
1315   bool m_useSingleMatchFilter;
1316   //! Grayscale image buffer, used when passing color images
1317   vpImage<unsigned char> m_I;
1318   //! Max number of features to extract, -1 to use default values
1319   int m_maxFeatures;
1320 
1321   void affineSkew(double tilt, double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai);
1322 
1323   double computePoseEstimationError(const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1324                                     const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est);
1325 
1326   void filterMatches();
1327 
1328   void init();
1329   void initDetector(const std::string &detectorNames);
1330   void initDetectors(const std::vector<std::string> &detectorNames);
1331 
1332   void initExtractor(const std::string &extractorName);
1333   void initExtractors(const std::vector<std::string> &extractorNames);
1334 
1335   void initFeatureNames();
1336 
myKeypointHash(const cv::KeyPoint & kp)1337   inline size_t myKeypointHash(const cv::KeyPoint &kp)
1338   {
1339     size_t _Val = 2166136261U, scale = 16777619U;
1340     Cv32suf u;
1341     u.f = kp.pt.x;
1342     _Val = (scale * _Val) ^ u.u;
1343     u.f = kp.pt.y;
1344     _Val = (scale * _Val) ^ u.u;
1345     u.f = kp.size;
1346     _Val = (scale * _Val) ^ u.u;
1347     // As the keypoint angle can be computed for certain type of keypoint only
1348     // when extracting  the corresponding descriptor, the angle field is not
1349     // taking into account for the hash
1350     //    u.f = kp.angle; _Val = (scale * _Val) ^ u.u;
1351     u.f = kp.response;
1352     _Val = (scale * _Val) ^ u.u;
1353     _Val = (scale * _Val) ^ ((size_t)kp.octave);
1354     _Val = (scale * _Val) ^ ((size_t)kp.class_id);
1355     return _Val;
1356   }
1357 
1358 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
1359   /*
1360    * Adapts a detector to detect points over multiple levels of a Gaussian
1361    * pyramid. Useful for detectors that are not inherently scaled.
1362    * From OpenCV 2.4.11 source code.
1363    */
1364   class PyramidAdaptedFeatureDetector : public cv::FeatureDetector
1365   {
1366   public:
1367     // maxLevel - The 0-based index of the last pyramid layer
1368     PyramidAdaptedFeatureDetector(const cv::Ptr<cv::FeatureDetector> &detector, int maxLevel = 2);
1369 
1370     // TODO implement read/write
1371     virtual bool empty() const;
1372 
1373   protected:
1374     virtual void detect(cv::InputArray image, CV_OUT std::vector<cv::KeyPoint> &keypoints,
1375                         cv::InputArray mask = cv::noArray());
1376     virtual void detectImpl(const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
1377                             const cv::Mat &mask = cv::Mat()) const;
1378 
1379     cv::Ptr<cv::FeatureDetector> detector;
1380     int maxLevel;
1381   };
1382 
1383   /*
1384    * A class filters a vector of keypoints.
1385    * Because now it is difficult to provide a convenient interface for all
1386    * usage scenarios of the keypoints filter class, it has only several needed
1387    * by now static methods.
1388    */
1389   class KeyPointsFilter
1390   {
1391   public:
KeyPointsFilter()1392     KeyPointsFilter() {}
1393 
1394     /*
1395      * Remove keypoints within borderPixels of an image edge.
1396      */
1397     static void runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize, int borderSize);
1398     /*
1399      * Remove keypoints of sizes out of range.
1400      */
1401     static void runByKeypointSize(std::vector<cv::KeyPoint> &keypoints, float minSize, float maxSize = FLT_MAX);
1402     /*
1403      * Remove keypoints from some image by mask for pixels of this image.
1404      */
1405     static void runByPixelsMask(std::vector<cv::KeyPoint> &keypoints, const cv::Mat &mask);
1406     /*
1407      * Remove duplicated keypoints.
1408      */
1409     static void removeDuplicated(std::vector<cv::KeyPoint> &keypoints);
1410 
1411     /*
1412      * Retain the specified number of the best keypoints (according to the
1413      * response)
1414      */
1415     static void retainBest(std::vector<cv::KeyPoint> &keypoints, int npoints);
1416   };
1417 
1418 #endif
1419 };
1420 
1421 #endif
1422 #endif
1423