1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 //  By downloading, copying, installing or using the software you agree to this license.
6 //  If you do not agree to this license, do not download, install,
7 //  copy or use the software.
8 //
9 //
10 //                          License Agreement
11 //                For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 //   * Redistribution's of source code must retain the above copyright notice,
21 //     this list of conditions and the following disclaimer.
22 //
23 //   * Redistribution's in binary form must reproduce the above copyright notice,
24 //     this list of conditions and the following disclaimer in the documentation
25 //     and/or other materials provided with the distribution.
26 //
27 //   * The name of the copyright holders may not be used to endorse or promote products
28 //     derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42 
43 #include "precomp.hpp"
44 #include "opencv2/core/core_c.h"
45 #include "opencv2/calib3d/calib3d_c.h"
46 #include "opencv2/core/cvdef.h"
47 
48 using namespace cv;
49 using namespace cv::detail;
50 
51 namespace {
52 
53 struct IncDistance
54 {
IncDistance__anon410860cf0111::IncDistance55     IncDistance(std::vector<int> &vdists) : dists(&vdists[0]) {}
operator ()__anon410860cf0111::IncDistance56     void operator ()(const GraphEdge &edge) { dists[edge.to] = dists[edge.from] + 1; }
57     int* dists;
58 };
59 
60 
61 struct CalcRotation
62 {
CalcRotation__anon410860cf0111::CalcRotation63     CalcRotation(int _num_images, const std::vector<MatchesInfo> &_pairwise_matches, std::vector<CameraParams> &_cameras)
64         : num_images(_num_images), pairwise_matches(&_pairwise_matches[0]), cameras(&_cameras[0]) {}
65 
operator ()__anon410860cf0111::CalcRotation66     void operator ()(const GraphEdge &edge)
67     {
68         int pair_idx = edge.from * num_images + edge.to;
69 
70         Mat_<double> K_from = Mat::eye(3, 3, CV_64F);
71         K_from(0,0) = cameras[edge.from].focal;
72         K_from(1,1) = cameras[edge.from].focal * cameras[edge.from].aspect;
73         K_from(0,2) = cameras[edge.from].ppx;
74         K_from(1,2) = cameras[edge.from].ppy;
75 
76         Mat_<double> K_to = Mat::eye(3, 3, CV_64F);
77         K_to(0,0) = cameras[edge.to].focal;
78         K_to(1,1) = cameras[edge.to].focal * cameras[edge.to].aspect;
79         K_to(0,2) = cameras[edge.to].ppx;
80         K_to(1,2) = cameras[edge.to].ppy;
81 
82         Mat R = K_from.inv() * pairwise_matches[pair_idx].H.inv() * K_to;
83         cameras[edge.to].R = cameras[edge.from].R * R;
84     }
85 
86     int num_images;
87     const MatchesInfo* pairwise_matches;
88     CameraParams* cameras;
89 };
90 
91 
92 /**
93  * @brief Functor calculating final transformation by chaining linear transformations
94  */
95 struct CalcAffineTransform
96 {
CalcAffineTransform__anon410860cf0111::CalcAffineTransform97     CalcAffineTransform(int _num_images,
98                       const std::vector<MatchesInfo> &_pairwise_matches,
99                       std::vector<CameraParams> &_cameras)
100     : num_images(_num_images), pairwise_matches(&_pairwise_matches[0]), cameras(&_cameras[0]) {}
101 
operator ()__anon410860cf0111::CalcAffineTransform102     void operator()(const GraphEdge &edge)
103     {
104         int pair_idx = edge.from * num_images + edge.to;
105         cameras[edge.to].R = cameras[edge.from].R * pairwise_matches[pair_idx].H;
106     }
107 
108     int num_images;
109     const MatchesInfo *pairwise_matches;
110     CameraParams *cameras;
111 };
112 
113 
114 //////////////////////////////////////////////////////////////////////////////
115 
calcDeriv(const Mat & err1,const Mat & err2,double h,Mat res)116 void calcDeriv(const Mat &err1, const Mat &err2, double h, Mat res)
117 {
118     for (int i = 0; i < err1.rows; ++i)
119         res.at<double>(i, 0) = (err2.at<double>(i, 0) - err1.at<double>(i, 0)) / h;
120 }
121 
122 } // namespace
123 
124 
125 namespace cv {
126 namespace detail {
127 
estimate(const std::vector<ImageFeatures> & features,const std::vector<MatchesInfo> & pairwise_matches,std::vector<CameraParams> & cameras)128 bool HomographyBasedEstimator::estimate(
129         const std::vector<ImageFeatures> &features,
130         const std::vector<MatchesInfo> &pairwise_matches,
131         std::vector<CameraParams> &cameras)
132 {
133     LOGLN("Estimating rotations...");
134 #if ENABLE_LOG
135     int64 t = getTickCount();
136 #endif
137 
138     const int num_images = static_cast<int>(features.size());
139 
140 #if 0
141     // Robustly estimate focal length from rotating cameras
142     std::vector<Mat> Hs;
143     for (int iter = 0; iter < 100; ++iter)
144     {
145         int len = 2 + rand()%(pairwise_matches.size() - 1);
146         std::vector<int> subset;
147         selectRandomSubset(len, pairwise_matches.size(), subset);
148         Hs.clear();
149         for (size_t i = 0; i < subset.size(); ++i)
150             if (!pairwise_matches[subset[i]].H.empty())
151                 Hs.push_back(pairwise_matches[subset[i]].H);
152         Mat_<double> K;
153         if (Hs.size() >= 2)
154         {
155             if (calibrateRotatingCamera(Hs, K))
156                 cin.get();
157         }
158     }
159 #endif
160 
161     if (!is_focals_estimated_)
162     {
163         // Estimate focal length and set it for all cameras
164         std::vector<double> focals;
165         estimateFocal(features, pairwise_matches, focals);
166         cameras.assign(num_images, CameraParams());
167         for (int i = 0; i < num_images; ++i)
168             cameras[i].focal = focals[i];
169     }
170     else
171     {
172         for (int i = 0; i < num_images; ++i)
173         {
174             cameras[i].ppx -= 0.5 * features[i].img_size.width;
175             cameras[i].ppy -= 0.5 * features[i].img_size.height;
176         }
177     }
178 
179     // Restore global motion
180     Graph span_tree;
181     std::vector<int> span_tree_centers;
182     findMaxSpanningTree(num_images, pairwise_matches, span_tree, span_tree_centers);
183     span_tree.walkBreadthFirst(span_tree_centers[0], CalcRotation(num_images, pairwise_matches, cameras));
184 
185     // As calculations were performed under assumption that p.p. is in image center
186     for (int i = 0; i < num_images; ++i)
187     {
188         cameras[i].ppx += 0.5 * features[i].img_size.width;
189         cameras[i].ppy += 0.5 * features[i].img_size.height;
190     }
191 
192     LOGLN("Estimating rotations, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
193     return true;
194 }
195 
196 
197 //////////////////////////////////////////////////////////////////////////////
198 
estimate(const std::vector<ImageFeatures> & features,const std::vector<MatchesInfo> & pairwise_matches,std::vector<CameraParams> & cameras)199 bool AffineBasedEstimator::estimate(const std::vector<ImageFeatures> &features,
200                                     const std::vector<MatchesInfo> &pairwise_matches,
201                                     std::vector<CameraParams> &cameras)
202 {
203     cameras.assign(features.size(), CameraParams());
204     const int num_images = static_cast<int>(features.size());
205 
206     // find maximum spaning tree on pairwise matches
207     cv::detail::Graph span_tree;
208     std::vector<int> span_tree_centers;
209     // uses number of inliers as weights
210     findMaxSpanningTree(num_images, pairwise_matches, span_tree,
211                       span_tree_centers);
212 
213     // compute final transform by chaining H together
214     span_tree.walkBreadthFirst(
215             span_tree_centers[0],
216             CalcAffineTransform(num_images, pairwise_matches, cameras));
217     // this estimator never fails
218     return true;
219 }
220 
221 
222 //////////////////////////////////////////////////////////////////////////////
223 
estimate(const std::vector<ImageFeatures> & features,const std::vector<MatchesInfo> & pairwise_matches,std::vector<CameraParams> & cameras)224 bool BundleAdjusterBase::estimate(const std::vector<ImageFeatures> &features,
225                                   const std::vector<MatchesInfo> &pairwise_matches,
226                                   std::vector<CameraParams> &cameras)
227 {
228     LOG_CHAT("Bundle adjustment");
229 #if ENABLE_LOG
230     int64 t = getTickCount();
231 #endif
232 
233     num_images_ = static_cast<int>(features.size());
234     features_ = &features[0];
235     pairwise_matches_ = &pairwise_matches[0];
236 
237     setUpInitialCameraParams(cameras);
238 
239     // Leave only consistent image pairs
240     edges_.clear();
241     for (int i = 0; i < num_images_ - 1; ++i)
242     {
243         for (int j = i + 1; j < num_images_; ++j)
244         {
245             const MatchesInfo& matches_info = pairwise_matches_[i * num_images_ + j];
246             if (matches_info.confidence > conf_thresh_)
247                 edges_.push_back(std::make_pair(i, j));
248         }
249     }
250 
251     // Compute number of correspondences
252     total_num_matches_ = 0;
253     for (size_t i = 0; i < edges_.size(); ++i)
254         total_num_matches_ += static_cast<int>(pairwise_matches[edges_[i].first * num_images_ +
255                                                                 edges_[i].second].num_inliers);
256 
257     CvLevMarq solver(num_images_ * num_params_per_cam_,
258                      total_num_matches_ * num_errs_per_measurement_,
259                      cvTermCriteria(term_criteria_));
260 
261     Mat err, jac;
262     CvMat matParams = cvMat(cam_params_);
263     cvCopy(&matParams, solver.param);
264 
265     int iter = 0;
266     for(;;)
267     {
268         const CvMat* _param = 0;
269         CvMat* _jac = 0;
270         CvMat* _err = 0;
271 
272         bool proceed = solver.update(_param, _jac, _err);
273 
274         cvCopy(_param, &matParams);
275 
276         if (!proceed || !_err)
277             break;
278 
279         if (_jac)
280         {
281             calcJacobian(jac);
282             CvMat tmp = cvMat(jac);
283             cvCopy(&tmp, _jac);
284         }
285 
286         if (_err)
287         {
288             calcError(err);
289             LOG_CHAT(".");
290             iter++;
291             CvMat tmp = cvMat(err);
292             cvCopy(&tmp, _err);
293         }
294     }
295 
296     LOGLN_CHAT("");
297     LOGLN_CHAT("Bundle adjustment, final RMS error: " << std::sqrt(err.dot(err) / total_num_matches_));
298     LOGLN_CHAT("Bundle adjustment, iterations done: " << iter);
299 
300     // Check if all camera parameters are valid
301     bool ok = true;
302     for (int i = 0; i < cam_params_.rows; ++i)
303     {
304         if (cvIsNaN(cam_params_.at<double>(i,0)))
305         {
306             ok = false;
307             break;
308         }
309     }
310     if (!ok)
311         return false;
312 
313     obtainRefinedCameraParams(cameras);
314 
315     // Normalize motion to center image
316     Graph span_tree;
317     std::vector<int> span_tree_centers;
318     findMaxSpanningTree(num_images_, pairwise_matches, span_tree, span_tree_centers);
319     Mat R_inv = cameras[span_tree_centers[0]].R.inv();
320     for (int i = 0; i < num_images_; ++i)
321         cameras[i].R = R_inv * cameras[i].R;
322 
323     LOGLN_CHAT("Bundle adjustment, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
324     return true;
325 }
326 
327 
328 //////////////////////////////////////////////////////////////////////////////
329 
setUpInitialCameraParams(const std::vector<CameraParams> & cameras)330 void BundleAdjusterReproj::setUpInitialCameraParams(const std::vector<CameraParams> &cameras)
331 {
332     cam_params_.create(num_images_ * 7, 1, CV_64F);
333     SVD svd;
334     for (int i = 0; i < num_images_; ++i)
335     {
336         cam_params_.at<double>(i * 7, 0) = cameras[i].focal;
337         cam_params_.at<double>(i * 7 + 1, 0) = cameras[i].ppx;
338         cam_params_.at<double>(i * 7 + 2, 0) = cameras[i].ppy;
339         cam_params_.at<double>(i * 7 + 3, 0) = cameras[i].aspect;
340 
341         svd(cameras[i].R, SVD::FULL_UV);
342         Mat R = svd.u * svd.vt;
343         if (determinant(R) < 0)
344             R *= -1;
345 
346         Mat rvec;
347         Rodrigues(R, rvec);
348         CV_Assert(rvec.type() == CV_32F);
349         cam_params_.at<double>(i * 7 + 4, 0) = rvec.at<float>(0, 0);
350         cam_params_.at<double>(i * 7 + 5, 0) = rvec.at<float>(1, 0);
351         cam_params_.at<double>(i * 7 + 6, 0) = rvec.at<float>(2, 0);
352     }
353 }
354 
355 
obtainRefinedCameraParams(std::vector<CameraParams> & cameras) const356 void BundleAdjusterReproj::obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const
357 {
358     for (int i = 0; i < num_images_; ++i)
359     {
360         cameras[i].focal = cam_params_.at<double>(i * 7, 0);
361         cameras[i].ppx = cam_params_.at<double>(i * 7 + 1, 0);
362         cameras[i].ppy = cam_params_.at<double>(i * 7 + 2, 0);
363         cameras[i].aspect = cam_params_.at<double>(i * 7 + 3, 0);
364 
365         Mat rvec(3, 1, CV_64F);
366         rvec.at<double>(0, 0) = cam_params_.at<double>(i * 7 + 4, 0);
367         rvec.at<double>(1, 0) = cam_params_.at<double>(i * 7 + 5, 0);
368         rvec.at<double>(2, 0) = cam_params_.at<double>(i * 7 + 6, 0);
369         Rodrigues(rvec, cameras[i].R);
370 
371         Mat tmp;
372         cameras[i].R.convertTo(tmp, CV_32F);
373         cameras[i].R = tmp;
374     }
375 }
376 
377 
calcError(Mat & err)378 void BundleAdjusterReproj::calcError(Mat &err)
379 {
380     err.create(total_num_matches_ * 2, 1, CV_64F);
381 
382     int match_idx = 0;
383     for (size_t edge_idx = 0; edge_idx < edges_.size(); ++edge_idx)
384     {
385         int i = edges_[edge_idx].first;
386         int j = edges_[edge_idx].second;
387         double f1 = cam_params_.at<double>(i * 7, 0);
388         double f2 = cam_params_.at<double>(j * 7, 0);
389         double ppx1 = cam_params_.at<double>(i * 7 + 1, 0);
390         double ppx2 = cam_params_.at<double>(j * 7 + 1, 0);
391         double ppy1 = cam_params_.at<double>(i * 7 + 2, 0);
392         double ppy2 = cam_params_.at<double>(j * 7 + 2, 0);
393         double a1 = cam_params_.at<double>(i * 7 + 3, 0);
394         double a2 = cam_params_.at<double>(j * 7 + 3, 0);
395 
396         double R1[9];
397         Mat R1_(3, 3, CV_64F, R1);
398         Mat rvec(3, 1, CV_64F);
399         rvec.at<double>(0, 0) = cam_params_.at<double>(i * 7 + 4, 0);
400         rvec.at<double>(1, 0) = cam_params_.at<double>(i * 7 + 5, 0);
401         rvec.at<double>(2, 0) = cam_params_.at<double>(i * 7 + 6, 0);
402         Rodrigues(rvec, R1_);
403 
404         double R2[9];
405         Mat R2_(3, 3, CV_64F, R2);
406         rvec.at<double>(0, 0) = cam_params_.at<double>(j * 7 + 4, 0);
407         rvec.at<double>(1, 0) = cam_params_.at<double>(j * 7 + 5, 0);
408         rvec.at<double>(2, 0) = cam_params_.at<double>(j * 7 + 6, 0);
409         Rodrigues(rvec, R2_);
410 
411         const ImageFeatures& features1 = features_[i];
412         const ImageFeatures& features2 = features_[j];
413         const MatchesInfo& matches_info = pairwise_matches_[i * num_images_ + j];
414 
415         Mat_<double> K1 = Mat::eye(3, 3, CV_64F);
416         K1(0,0) = f1; K1(0,2) = ppx1;
417         K1(1,1) = f1*a1; K1(1,2) = ppy1;
418 
419         Mat_<double> K2 = Mat::eye(3, 3, CV_64F);
420         K2(0,0) = f2; K2(0,2) = ppx2;
421         K2(1,1) = f2*a2; K2(1,2) = ppy2;
422 
423         Mat_<double> H = K2 * R2_.inv() * R1_ * K1.inv();
424 
425         for (size_t k = 0; k < matches_info.matches.size(); ++k)
426         {
427             if (!matches_info.inliers_mask[k])
428                 continue;
429 
430             const DMatch& m = matches_info.matches[k];
431             Point2f p1 = features1.keypoints[m.queryIdx].pt;
432             Point2f p2 = features2.keypoints[m.trainIdx].pt;
433             double x = H(0,0)*p1.x + H(0,1)*p1.y + H(0,2);
434             double y = H(1,0)*p1.x + H(1,1)*p1.y + H(1,2);
435             double z = H(2,0)*p1.x + H(2,1)*p1.y + H(2,2);
436 
437             err.at<double>(2 * match_idx, 0) = p2.x - x/z;
438             err.at<double>(2 * match_idx + 1, 0) = p2.y - y/z;
439             match_idx++;
440         }
441     }
442 }
443 
444 
calcJacobian(Mat & jac)445 void BundleAdjusterReproj::calcJacobian(Mat &jac)
446 {
447     jac.create(total_num_matches_ * 2, num_images_ * 7, CV_64F);
448     jac.setTo(0);
449 
450     double val;
451     const double step = 1e-4;
452 
453     for (int i = 0; i < num_images_; ++i)
454     {
455         if (refinement_mask_.at<uchar>(0, 0))
456         {
457             val = cam_params_.at<double>(i * 7, 0);
458             cam_params_.at<double>(i * 7, 0) = val - step;
459             calcError(err1_);
460             cam_params_.at<double>(i * 7, 0) = val + step;
461             calcError(err2_);
462             calcDeriv(err1_, err2_, 2 * step, jac.col(i * 7));
463             cam_params_.at<double>(i * 7, 0) = val;
464         }
465         if (refinement_mask_.at<uchar>(0, 2))
466         {
467             val = cam_params_.at<double>(i * 7 + 1, 0);
468             cam_params_.at<double>(i * 7 + 1, 0) = val - step;
469             calcError(err1_);
470             cam_params_.at<double>(i * 7 + 1, 0) = val + step;
471             calcError(err2_);
472             calcDeriv(err1_, err2_, 2 * step, jac.col(i * 7 + 1));
473             cam_params_.at<double>(i * 7 + 1, 0) = val;
474         }
475         if (refinement_mask_.at<uchar>(1, 2))
476         {
477             val = cam_params_.at<double>(i * 7 + 2, 0);
478             cam_params_.at<double>(i * 7 + 2, 0) = val - step;
479             calcError(err1_);
480             cam_params_.at<double>(i * 7 + 2, 0) = val + step;
481             calcError(err2_);
482             calcDeriv(err1_, err2_, 2 * step, jac.col(i * 7 + 2));
483             cam_params_.at<double>(i * 7 + 2, 0) = val;
484         }
485         if (refinement_mask_.at<uchar>(1, 1))
486         {
487             val = cam_params_.at<double>(i * 7 + 3, 0);
488             cam_params_.at<double>(i * 7 + 3, 0) = val - step;
489             calcError(err1_);
490             cam_params_.at<double>(i * 7 + 3, 0) = val + step;
491             calcError(err2_);
492             calcDeriv(err1_, err2_, 2 * step, jac.col(i * 7 + 3));
493             cam_params_.at<double>(i * 7 + 3, 0) = val;
494         }
495         for (int j = 4; j < 7; ++j)
496         {
497             val = cam_params_.at<double>(i * 7 + j, 0);
498             cam_params_.at<double>(i * 7 + j, 0) = val - step;
499             calcError(err1_);
500             cam_params_.at<double>(i * 7 + j, 0) = val + step;
501             calcError(err2_);
502             calcDeriv(err1_, err2_, 2 * step, jac.col(i * 7 + j));
503             cam_params_.at<double>(i * 7 + j, 0) = val;
504         }
505     }
506 }
507 
508 
509 //////////////////////////////////////////////////////////////////////////////
510 
setUpInitialCameraParams(const std::vector<CameraParams> & cameras)511 void BundleAdjusterRay::setUpInitialCameraParams(const std::vector<CameraParams> &cameras)
512 {
513     cam_params_.create(num_images_ * 4, 1, CV_64F);
514     SVD svd;
515     for (int i = 0; i < num_images_; ++i)
516     {
517         cam_params_.at<double>(i * 4, 0) = cameras[i].focal;
518 
519         svd(cameras[i].R, SVD::FULL_UV);
520         Mat R = svd.u * svd.vt;
521         if (determinant(R) < 0)
522             R *= -1;
523 
524         Mat rvec;
525         Rodrigues(R, rvec);
526         CV_Assert(rvec.type() == CV_32F);
527         cam_params_.at<double>(i * 4 + 1, 0) = rvec.at<float>(0, 0);
528         cam_params_.at<double>(i * 4 + 2, 0) = rvec.at<float>(1, 0);
529         cam_params_.at<double>(i * 4 + 3, 0) = rvec.at<float>(2, 0);
530     }
531 }
532 
533 
obtainRefinedCameraParams(std::vector<CameraParams> & cameras) const534 void BundleAdjusterRay::obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const
535 {
536     for (int i = 0; i < num_images_; ++i)
537     {
538         cameras[i].focal = cam_params_.at<double>(i * 4, 0);
539 
540         Mat rvec(3, 1, CV_64F);
541         rvec.at<double>(0, 0) = cam_params_.at<double>(i * 4 + 1, 0);
542         rvec.at<double>(1, 0) = cam_params_.at<double>(i * 4 + 2, 0);
543         rvec.at<double>(2, 0) = cam_params_.at<double>(i * 4 + 3, 0);
544         Rodrigues(rvec, cameras[i].R);
545 
546         Mat tmp;
547         cameras[i].R.convertTo(tmp, CV_32F);
548         cameras[i].R = tmp;
549     }
550 }
551 
552 
calcError(Mat & err)553 void BundleAdjusterRay::calcError(Mat &err)
554 {
555     err.create(total_num_matches_ * 3, 1, CV_64F);
556 
557     int match_idx = 0;
558     for (size_t edge_idx = 0; edge_idx < edges_.size(); ++edge_idx)
559     {
560         int i = edges_[edge_idx].first;
561         int j = edges_[edge_idx].second;
562         double f1 = cam_params_.at<double>(i * 4, 0);
563         double f2 = cam_params_.at<double>(j * 4, 0);
564 
565         double R1[9];
566         Mat R1_(3, 3, CV_64F, R1);
567         Mat rvec(3, 1, CV_64F);
568         rvec.at<double>(0, 0) = cam_params_.at<double>(i * 4 + 1, 0);
569         rvec.at<double>(1, 0) = cam_params_.at<double>(i * 4 + 2, 0);
570         rvec.at<double>(2, 0) = cam_params_.at<double>(i * 4 + 3, 0);
571         Rodrigues(rvec, R1_);
572 
573         double R2[9];
574         Mat R2_(3, 3, CV_64F, R2);
575         rvec.at<double>(0, 0) = cam_params_.at<double>(j * 4 + 1, 0);
576         rvec.at<double>(1, 0) = cam_params_.at<double>(j * 4 + 2, 0);
577         rvec.at<double>(2, 0) = cam_params_.at<double>(j * 4 + 3, 0);
578         Rodrigues(rvec, R2_);
579 
580         const ImageFeatures& features1 = features_[i];
581         const ImageFeatures& features2 = features_[j];
582         const MatchesInfo& matches_info = pairwise_matches_[i * num_images_ + j];
583 
584         Mat_<double> K1 = Mat::eye(3, 3, CV_64F);
585         K1(0,0) = f1; K1(0,2) = features1.img_size.width * 0.5;
586         K1(1,1) = f1; K1(1,2) = features1.img_size.height * 0.5;
587 
588         Mat_<double> K2 = Mat::eye(3, 3, CV_64F);
589         K2(0,0) = f2; K2(0,2) = features2.img_size.width * 0.5;
590         K2(1,1) = f2; K2(1,2) = features2.img_size.height * 0.5;
591 
592         Mat_<double> H1 = R1_ * K1.inv();
593         Mat_<double> H2 = R2_ * K2.inv();
594 
595         for (size_t k = 0; k < matches_info.matches.size(); ++k)
596         {
597             if (!matches_info.inliers_mask[k])
598                 continue;
599 
600             const DMatch& m = matches_info.matches[k];
601 
602             Point2f p1 = features1.keypoints[m.queryIdx].pt;
603             double x1 = H1(0,0)*p1.x + H1(0,1)*p1.y + H1(0,2);
604             double y1 = H1(1,0)*p1.x + H1(1,1)*p1.y + H1(1,2);
605             double z1 = H1(2,0)*p1.x + H1(2,1)*p1.y + H1(2,2);
606             double len = std::sqrt(x1*x1 + y1*y1 + z1*z1);
607             x1 /= len; y1 /= len; z1 /= len;
608 
609             Point2f p2 = features2.keypoints[m.trainIdx].pt;
610             double x2 = H2(0,0)*p2.x + H2(0,1)*p2.y + H2(0,2);
611             double y2 = H2(1,0)*p2.x + H2(1,1)*p2.y + H2(1,2);
612             double z2 = H2(2,0)*p2.x + H2(2,1)*p2.y + H2(2,2);
613             len = std::sqrt(x2*x2 + y2*y2 + z2*z2);
614             x2 /= len; y2 /= len; z2 /= len;
615 
616             double mult = std::sqrt(f1 * f2);
617             err.at<double>(3 * match_idx, 0) = mult * (x1 - x2);
618             err.at<double>(3 * match_idx + 1, 0) = mult * (y1 - y2);
619             err.at<double>(3 * match_idx + 2, 0) = mult * (z1 - z2);
620 
621             match_idx++;
622         }
623     }
624 }
625 
626 
calcJacobian(Mat & jac)627 void BundleAdjusterRay::calcJacobian(Mat &jac)
628 {
629     jac.create(total_num_matches_ * 3, num_images_ * 4, CV_64F);
630 
631     double val;
632     const double step = 1e-3;
633 
634     for (int i = 0; i < num_images_; ++i)
635     {
636         for (int j = 0; j < 4; ++j)
637         {
638             val = cam_params_.at<double>(i * 4 + j, 0);
639             cam_params_.at<double>(i * 4 + j, 0) = val - step;
640             calcError(err1_);
641             cam_params_.at<double>(i * 4 + j, 0) = val + step;
642             calcError(err2_);
643             calcDeriv(err1_, err2_, 2 * step, jac.col(i * 4 + j));
644             cam_params_.at<double>(i * 4 + j, 0) = val;
645         }
646     }
647 }
648 
649 //////////////////////////////////////////////////////////////////////////////
650 
setUpInitialCameraParams(const std::vector<CameraParams> & cameras)651 void BundleAdjusterAffine::setUpInitialCameraParams(const std::vector<CameraParams> &cameras)
652 {
653     cam_params_.create(num_images_ * 6, 1, CV_64F);
654     for (size_t i = 0; i < static_cast<size_t>(num_images_); ++i)
655     {
656         CV_Assert(cameras[i].R.type() == CV_32F);
657         // cameras[i].R is
658         //     a b tx
659         //     c d ty
660         //     0 0 1. (optional)
661         // cam_params_ model for LevMarq is
662         //     (a, b, tx, c, d, ty)
663         Mat params (2, 3, CV_64F, cam_params_.ptr<double>() + i * 6);
664         cameras[i].R.rowRange(0, 2).convertTo(params, CV_64F);
665     }
666 }
667 
668 
obtainRefinedCameraParams(std::vector<CameraParams> & cameras) const669 void BundleAdjusterAffine::obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const
670 {
671     for (int i = 0; i < num_images_; ++i)
672     {
673         // cameras[i].R will be
674         //     a b tx
675         //     c d ty
676         //     0 0 1
677         cameras[i].R = Mat::eye(3, 3, CV_32F);
678         Mat params = cam_params_.rowRange(i * 6, i * 6 + 6).reshape(1, 2);
679         params.convertTo(cameras[i].R.rowRange(0, 2), CV_32F);
680     }
681 }
682 
683 
calcError(Mat & err)684 void BundleAdjusterAffine::calcError(Mat &err)
685 {
686     err.create(total_num_matches_ * 2, 1, CV_64F);
687 
688     int match_idx = 0;
689     for (size_t edge_idx = 0; edge_idx < edges_.size(); ++edge_idx)
690     {
691         size_t i = edges_[edge_idx].first;
692         size_t j = edges_[edge_idx].second;
693 
694         const ImageFeatures& features1 = features_[i];
695         const ImageFeatures& features2 = features_[j];
696         const MatchesInfo& matches_info = pairwise_matches_[i * num_images_ + j];
697 
698         Mat H1 (2, 3, CV_64F, cam_params_.ptr<double>() + i * 6);
699         Mat H2 (2, 3, CV_64F, cam_params_.ptr<double>() + j * 6);
700 
701         // invert H1
702         Mat H1_inv;
703         invertAffineTransform(H1, H1_inv);
704 
705         // convert to representation in homogeneous coordinates
706         Mat last_row = Mat::zeros(1, 3, CV_64F);
707         last_row.at<double>(2) = 1.;
708         H1_inv.push_back(last_row);
709         H2.push_back(last_row);
710 
711         Mat_<double> H = H1_inv * H2;
712 
713         for (size_t k = 0; k < matches_info.matches.size(); ++k)
714         {
715             if (!matches_info.inliers_mask[k])
716                 continue;
717 
718             const DMatch& m = matches_info.matches[k];
719             const Point2f& p1 = features1.keypoints[m.queryIdx].pt;
720             const Point2f& p2 = features2.keypoints[m.trainIdx].pt;
721 
722             double x = H(0,0)*p1.x + H(0,1)*p1.y + H(0,2);
723             double y = H(1,0)*p1.x + H(1,1)*p1.y + H(1,2);
724 
725             err.at<double>(2 * match_idx + 0, 0) = p2.x - x;
726             err.at<double>(2 * match_idx + 1, 0) = p2.y - y;
727 
728             ++match_idx;
729         }
730     }
731 }
732 
733 
calcJacobian(Mat & jac)734 void BundleAdjusterAffine::calcJacobian(Mat &jac)
735 {
736     jac.create(total_num_matches_ * 2, num_images_ * 6, CV_64F);
737 
738     double val;
739     const double step = 1e-4;
740 
741     for (int i = 0; i < num_images_; ++i)
742     {
743         for (int j = 0; j < 6; ++j)
744         {
745             val = cam_params_.at<double>(i * 6 + j, 0);
746             cam_params_.at<double>(i * 6 + j, 0) = val - step;
747             calcError(err1_);
748             cam_params_.at<double>(i * 6 + j, 0) = val + step;
749             calcError(err2_);
750             calcDeriv(err1_, err2_, 2 * step, jac.col(i * 6 + j));
751             cam_params_.at<double>(i * 6 + j, 0) = val;
752         }
753     }
754 }
755 
756 
757 //////////////////////////////////////////////////////////////////////////////
758 
setUpInitialCameraParams(const std::vector<CameraParams> & cameras)759 void BundleAdjusterAffinePartial::setUpInitialCameraParams(const std::vector<CameraParams> &cameras)
760 {
761     cam_params_.create(num_images_ * 4, 1, CV_64F);
762     for (size_t i = 0; i < static_cast<size_t>(num_images_); ++i)
763     {
764         CV_Assert(cameras[i].R.type() == CV_32F);
765         // cameras[i].R is
766         //     a -b tx
767         //     b  a ty
768         //     0  0 1. (optional)
769         // cam_params_ model for LevMarq is
770         //     (a, b, tx, ty)
771         double *params = cam_params_.ptr<double>() + i * 4;
772         params[0] = cameras[i].R.at<float>(0, 0);
773         params[1] = cameras[i].R.at<float>(1, 0);
774         params[2] = cameras[i].R.at<float>(0, 2);
775         params[3] = cameras[i].R.at<float>(1, 2);
776     }
777 }
778 
779 
obtainRefinedCameraParams(std::vector<CameraParams> & cameras) const780 void BundleAdjusterAffinePartial::obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const
781 {
782     for (size_t i = 0; i < static_cast<size_t>(num_images_); ++i)
783     {
784         // cameras[i].R will be
785         //     a -b tx
786         //     b  a ty
787         //     0  0 1
788         // cam_params_ model for LevMarq is
789         //     (a, b, tx, ty)
790         const double *params = cam_params_.ptr<double>() + i * 4;
791         double transform_buf[9] =
792         {
793             params[0], -params[1], params[2],
794             params[1],  params[0], params[3],
795             0., 0., 1.
796         };
797         Mat transform(3, 3, CV_64F, transform_buf);
798         transform.convertTo(cameras[i].R, CV_32F);
799     }
800 }
801 
802 
calcError(Mat & err)803 void BundleAdjusterAffinePartial::calcError(Mat &err)
804 {
805     err.create(total_num_matches_ * 2, 1, CV_64F);
806 
807     int match_idx = 0;
808     for (size_t edge_idx = 0; edge_idx < edges_.size(); ++edge_idx)
809     {
810         size_t i = edges_[edge_idx].first;
811         size_t j = edges_[edge_idx].second;
812 
813         const ImageFeatures& features1 = features_[i];
814         const ImageFeatures& features2 = features_[j];
815         const MatchesInfo& matches_info = pairwise_matches_[i * num_images_ + j];
816 
817         const double *H1_ptr = cam_params_.ptr<double>() + i * 4;
818         double H1_buf[9] =
819         {
820             H1_ptr[0], -H1_ptr[1], H1_ptr[2],
821             H1_ptr[1],  H1_ptr[0], H1_ptr[3],
822             0., 0., 1.
823         };
824         Mat H1 (3, 3, CV_64F, H1_buf);
825         const double *H2_ptr = cam_params_.ptr<double>() + j * 4;
826         double H2_buf[9] =
827         {
828             H2_ptr[0], -H2_ptr[1], H2_ptr[2],
829             H2_ptr[1],  H2_ptr[0], H2_ptr[3],
830             0., 0., 1.
831         };
832         Mat H2 (3, 3, CV_64F, H2_buf);
833 
834         // invert H1
835         Mat H1_aff (H1, Range(0, 2));
836         double H1_inv_buf[6];
837         Mat H1_inv (2, 3, CV_64F, H1_inv_buf);
838         invertAffineTransform(H1_aff, H1_inv);
839         H1_inv.copyTo(H1_aff);
840 
841         Mat_<double> H = H1 * H2;
842 
843         for (size_t k = 0; k < matches_info.matches.size(); ++k)
844         {
845             if (!matches_info.inliers_mask[k])
846                 continue;
847 
848             const DMatch& m = matches_info.matches[k];
849             const Point2f& p1 = features1.keypoints[m.queryIdx].pt;
850             const Point2f& p2 = features2.keypoints[m.trainIdx].pt;
851 
852             double x = H(0,0)*p1.x + H(0,1)*p1.y + H(0,2);
853             double y = H(1,0)*p1.x + H(1,1)*p1.y + H(1,2);
854 
855             err.at<double>(2 * match_idx + 0, 0) = p2.x - x;
856             err.at<double>(2 * match_idx + 1, 0) = p2.y - y;
857 
858             ++match_idx;
859         }
860     }
861 }
862 
863 
calcJacobian(Mat & jac)864 void BundleAdjusterAffinePartial::calcJacobian(Mat &jac)
865 {
866     jac.create(total_num_matches_ * 2, num_images_ * 4, CV_64F);
867 
868     double val;
869     const double step = 1e-4;
870 
871     for (int i = 0; i < num_images_; ++i)
872     {
873         for (int j = 0; j < 4; ++j)
874         {
875             val = cam_params_.at<double>(i * 4 + j, 0);
876             cam_params_.at<double>(i * 4 + j, 0) = val - step;
877             calcError(err1_);
878             cam_params_.at<double>(i * 4 + j, 0) = val + step;
879             calcError(err2_);
880             calcDeriv(err1_, err2_, 2 * step, jac.col(i * 4 + j));
881             cam_params_.at<double>(i * 4 + j, 0) = val;
882         }
883     }
884 }
885 
886 
887 //////////////////////////////////////////////////////////////////////////////
888 
autoDetectWaveCorrectKind(const std::vector<Mat> & rmats)889 WaveCorrectKind autoDetectWaveCorrectKind(const std::vector<Mat> &rmats)
890 {
891     std::vector<float> xs, ys;
892     xs.reserve(rmats.size());
893     ys.reserve(rmats.size());
894 
895     // Project a [0, 0, 1, 1] point to the camera image frame
896     // Ignore intrinsic parameters and camera translation as they
897     // have little influence
898     // This also means we can simply use "rmat.col(2)" as the
899     // projected point homogeneous coordinate
900     for (const Mat& rmat: rmats)
901     {
902         CV_Assert(rmat.type() == CV_32F);
903         xs.push_back(rmat.at<float>(0, 2) / rmat.at<float>(2, 2));
904         ys.push_back(rmat.at<float>(1, 2) / rmat.at<float>(2, 2));
905     }
906 
907     // Calculate the delta between the max and min values for
908     // both the X and Y axis
909     auto min_max_x = std::minmax_element(xs.begin(), xs.end());
910     auto min_max_y = std::minmax_element(ys.begin(), ys.end());
911     double delta_x = *min_max_x.second - *min_max_x.first;
912     double delta_y = *min_max_y.second - *min_max_y.first;
913 
914     // If the Y delta is the biggest, it means the images
915     // mostly span along the vertical axis: correct this axis
916     if (delta_y > delta_x)
917     {
918         LOGLN("  using vertical wave correction");
919         return WAVE_CORRECT_VERT;
920     }
921     else
922     {
923         LOGLN("  using horizontal wave correction");
924         return WAVE_CORRECT_HORIZ;
925     }
926 }
927 
waveCorrect(std::vector<Mat> & rmats,WaveCorrectKind kind)928 void waveCorrect(std::vector<Mat> &rmats, WaveCorrectKind kind)
929 {
930     LOGLN("Wave correcting...");
931 #if ENABLE_LOG
932     int64 t = getTickCount();
933 #endif
934     if (rmats.size() <= 1)
935     {
936         LOGLN("Wave correcting, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
937         return;
938     }
939 
940     if (kind == WAVE_CORRECT_AUTO)
941     {
942         kind = autoDetectWaveCorrectKind(rmats);
943     }
944 
945     Mat moment = Mat::zeros(3, 3, CV_32F);
946     for (size_t i = 0; i < rmats.size(); ++i)
947     {
948         Mat col = rmats[i].col(0);
949         moment += col * col.t();
950     }
951 
952     Mat eigen_vals, eigen_vecs;
953     eigen(moment, eigen_vals, eigen_vecs);
954 
955     Mat rg1;
956     if (kind == WAVE_CORRECT_HORIZ)
957         rg1 = eigen_vecs.row(2).t();
958     else if (kind == WAVE_CORRECT_VERT)
959         rg1 = eigen_vecs.row(0).t();
960     else
961         CV_Error(CV_StsBadArg, "unsupported kind of wave correction");
962 
963     Mat img_k = Mat::zeros(3, 1, CV_32F);
964     for (size_t i = 0; i < rmats.size(); ++i)
965         img_k += rmats[i].col(2);
966     Mat rg0 = rg1.cross(img_k);
967     double rg0_norm = norm(rg0);
968 
969     if( rg0_norm <= DBL_MIN )
970     {
971         return;
972     }
973 
974     rg0 /= rg0_norm;
975 
976     Mat rg2 = rg0.cross(rg1);
977 
978     double conf = 0;
979     if (kind == WAVE_CORRECT_HORIZ)
980     {
981         for (size_t i = 0; i < rmats.size(); ++i)
982             conf += rg0.dot(rmats[i].col(0));
983         if (conf < 0)
984         {
985             rg0 *= -1;
986             rg1 *= -1;
987         }
988     }
989     else if (kind == WAVE_CORRECT_VERT)
990     {
991         for (size_t i = 0; i < rmats.size(); ++i)
992             conf -= rg1.dot(rmats[i].col(0));
993         if (conf < 0)
994         {
995             rg0 *= -1;
996             rg1 *= -1;
997         }
998     }
999 
1000     Mat R = Mat::zeros(3, 3, CV_32F);
1001     Mat tmp = R.row(0);
1002     Mat(rg0.t()).copyTo(tmp);
1003     tmp = R.row(1);
1004     Mat(rg1.t()).copyTo(tmp);
1005     tmp = R.row(2);
1006     Mat(rg2.t()).copyTo(tmp);
1007 
1008     for (size_t i = 0; i < rmats.size(); ++i)
1009         rmats[i] = R * rmats[i];
1010 
1011     LOGLN("Wave correcting, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
1012 }
1013 
1014 
1015 //////////////////////////////////////////////////////////////////////////////
1016 
matchesGraphAsString(std::vector<String> & pathes,std::vector<MatchesInfo> & pairwise_matches,float conf_threshold)1017 String matchesGraphAsString(std::vector<String> &pathes, std::vector<MatchesInfo> &pairwise_matches,
1018                                 float conf_threshold)
1019 {
1020     std::stringstream str;
1021     str << "graph matches_graph{\n";
1022 
1023     const int num_images = static_cast<int>(pathes.size());
1024     std::set<std::pair<int,int> > span_tree_edges;
1025     DisjointSets comps(num_images);
1026 
1027     for (int i = 0; i < num_images; ++i)
1028     {
1029         for (int j = 0; j < num_images; ++j)
1030         {
1031             if (pairwise_matches[i*num_images + j].confidence < conf_threshold)
1032                 continue;
1033             int comp1 = comps.findSetByElem(i);
1034             int comp2 = comps.findSetByElem(j);
1035             if (comp1 != comp2)
1036             {
1037                 comps.mergeSets(comp1, comp2);
1038                 span_tree_edges.insert(std::make_pair(i, j));
1039             }
1040         }
1041     }
1042 
1043     for (std::set<std::pair<int,int> >::const_iterator itr = span_tree_edges.begin();
1044          itr != span_tree_edges.end(); ++itr)
1045     {
1046         std::pair<int,int> edge = *itr;
1047         if (span_tree_edges.find(edge) != span_tree_edges.end())
1048         {
1049             String name_src = pathes[edge.first];
1050             size_t prefix_len = name_src.find_last_of("/\\");
1051             if (prefix_len != String::npos) prefix_len++; else prefix_len = 0;
1052             name_src = name_src.substr(prefix_len, name_src.size() - prefix_len);
1053 
1054             String name_dst = pathes[edge.second];
1055             prefix_len = name_dst.find_last_of("/\\");
1056             if (prefix_len != String::npos) prefix_len++; else prefix_len = 0;
1057             name_dst = name_dst.substr(prefix_len, name_dst.size() - prefix_len);
1058 
1059             int pos = edge.first*num_images + edge.second;
1060             str << "\"" << name_src.c_str() << "\" -- \"" << name_dst.c_str() << "\""
1061                 << "[label=\"Nm=" << pairwise_matches[pos].matches.size()
1062                 << ", Ni=" << pairwise_matches[pos].num_inliers
1063                 << ", C=" << pairwise_matches[pos].confidence << "\"];\n";
1064         }
1065     }
1066 
1067     for (size_t i = 0; i < comps.size.size(); ++i)
1068     {
1069         if (comps.size[comps.findSetByElem((int)i)] == 1)
1070         {
1071             String name = pathes[i];
1072             size_t prefix_len = name.find_last_of("/\\");
1073             if (prefix_len != String::npos) prefix_len++; else prefix_len = 0;
1074             name = name.substr(prefix_len, name.size() - prefix_len);
1075             str << "\"" << name.c_str() << "\";\n";
1076         }
1077     }
1078 
1079     str << "}";
1080     return str.str().c_str();
1081 }
1082 
leaveBiggestComponent(std::vector<ImageFeatures> & features,std::vector<MatchesInfo> & pairwise_matches,float conf_threshold)1083 std::vector<int> leaveBiggestComponent(std::vector<ImageFeatures> &features,  std::vector<MatchesInfo> &pairwise_matches,
1084                                       float conf_threshold)
1085 {
1086     const int num_images = static_cast<int>(features.size());
1087 
1088     DisjointSets comps(num_images);
1089     for (int i = 0; i < num_images; ++i)
1090     {
1091         for (int j = 0; j < num_images; ++j)
1092         {
1093             if (pairwise_matches[i*num_images + j].confidence < conf_threshold)
1094                 continue;
1095             int comp1 = comps.findSetByElem(i);
1096             int comp2 = comps.findSetByElem(j);
1097             if (comp1 != comp2)
1098                 comps.mergeSets(comp1, comp2);
1099         }
1100     }
1101 
1102     int max_comp = static_cast<int>(std::max_element(comps.size.begin(), comps.size.end()) - comps.size.begin());
1103 
1104     std::vector<int> indices;
1105     std::vector<int> indices_removed;
1106     for (int i = 0; i < num_images; ++i)
1107         if (comps.findSetByElem(i) == max_comp)
1108             indices.push_back(i);
1109         else
1110             indices_removed.push_back(i);
1111 
1112     std::vector<ImageFeatures> features_subset;
1113     std::vector<MatchesInfo> pairwise_matches_subset;
1114     for (size_t i = 0; i < indices.size(); ++i)
1115     {
1116         features_subset.push_back(features[indices[i]]);
1117         for (size_t j = 0; j < indices.size(); ++j)
1118         {
1119             pairwise_matches_subset.push_back(pairwise_matches[indices[i]*num_images + indices[j]]);
1120             pairwise_matches_subset.back().src_img_idx = static_cast<int>(i);
1121             pairwise_matches_subset.back().dst_img_idx = static_cast<int>(j);
1122         }
1123     }
1124 
1125     if (static_cast<int>(features_subset.size()) == num_images)
1126         return indices;
1127 
1128     LOG("Removed some images, because can't match them or there are too similar images: (");
1129     LOG(indices_removed[0] + 1);
1130     for (size_t i = 1; i < indices_removed.size(); ++i)
1131         LOG(", " << indices_removed[i]+1);
1132     LOGLN(").");
1133     LOGLN("Try to decrease the match confidence threshold and/or check if you're stitching duplicates.");
1134 
1135     features = features_subset;
1136     pairwise_matches = pairwise_matches_subset;
1137 
1138     return indices;
1139 }
1140 
1141 
findMaxSpanningTree(int num_images,const std::vector<MatchesInfo> & pairwise_matches,Graph & span_tree,std::vector<int> & centers)1142 void findMaxSpanningTree(int num_images, const std::vector<MatchesInfo> &pairwise_matches,
1143                              Graph &span_tree, std::vector<int> &centers)
1144 {
1145     Graph graph(num_images);
1146     std::vector<GraphEdge> edges;
1147 
1148     // Construct images graph and remember its edges
1149     for (int i = 0; i < num_images; ++i)
1150     {
1151         for (int j = 0; j < num_images; ++j)
1152         {
1153             if (pairwise_matches[i * num_images + j].H.empty())
1154                 continue;
1155             float conf = static_cast<float>(pairwise_matches[i * num_images + j].num_inliers);
1156             graph.addEdge(i, j, conf);
1157             edges.push_back(GraphEdge(i, j, conf));
1158         }
1159     }
1160 
1161     DisjointSets comps(num_images);
1162     span_tree.create(num_images);
1163     std::vector<int> span_tree_powers(num_images, 0);
1164 
1165     // Find maximum spanning tree
1166     sort(edges.begin(), edges.end(), std::greater<GraphEdge>());
1167     for (size_t i = 0; i < edges.size(); ++i)
1168     {
1169         int comp1 = comps.findSetByElem(edges[i].from);
1170         int comp2 = comps.findSetByElem(edges[i].to);
1171         if (comp1 != comp2)
1172         {
1173             comps.mergeSets(comp1, comp2);
1174             span_tree.addEdge(edges[i].from, edges[i].to, edges[i].weight);
1175             span_tree.addEdge(edges[i].to, edges[i].from, edges[i].weight);
1176             span_tree_powers[edges[i].from]++;
1177             span_tree_powers[edges[i].to]++;
1178         }
1179     }
1180 
1181     // Find spanning tree leafs
1182     std::vector<int> span_tree_leafs;
1183     for (int i = 0; i < num_images; ++i)
1184         if (span_tree_powers[i] == 1)
1185             span_tree_leafs.push_back(i);
1186 
1187     // Find maximum distance from each spanning tree vertex
1188     std::vector<int> max_dists(num_images, 0);
1189     std::vector<int> cur_dists;
1190     for (size_t i = 0; i < span_tree_leafs.size(); ++i)
1191     {
1192         cur_dists.assign(num_images, 0);
1193         span_tree.walkBreadthFirst(span_tree_leafs[i], IncDistance(cur_dists));
1194         for (int j = 0; j < num_images; ++j)
1195             max_dists[j] = std::max(max_dists[j], cur_dists[j]);
1196     }
1197 
1198     // Find min-max distance
1199     int min_max_dist = max_dists[0];
1200     for (int i = 1; i < num_images; ++i)
1201         if (min_max_dist > max_dists[i])
1202             min_max_dist = max_dists[i];
1203 
1204     // Find spanning tree centers
1205     centers.clear();
1206     for (int i = 0; i < num_images; ++i)
1207         if (max_dists[i] == min_max_dist)
1208             centers.push_back(i);
1209     CV_Assert(centers.size() > 0 && centers.size() <= 2);
1210 }
1211 
1212 } // namespace detail
1213 } // namespace cv
1214