1
2 #include <iostream>
3 #include <fstream>
4 #include <string>
5 #include "opencv2/opencv_modules.hpp"
6 #include <opencv2/core/utility.hpp>
7 #include "opencv2/imgcodecs.hpp"
8 #include "opencv2/highgui.hpp"
9 #include "opencv2/stitching/detail/autocalib.hpp"
10 #include "opencv2/stitching/detail/blenders.hpp"
11 #include "opencv2/stitching/detail/timelapsers.hpp"
12 #include "opencv2/stitching/detail/camera.hpp"
13 #include "opencv2/stitching/detail/exposure_compensate.hpp"
14 #include "opencv2/stitching/detail/matchers.hpp"
15 #include "opencv2/stitching/detail/motion_estimators.hpp"
16 #include "opencv2/stitching/detail/seam_finders.hpp"
17 #include "opencv2/stitching/detail/warpers.hpp"
18 #include "opencv2/stitching/warpers.hpp"
19
20 #ifdef HAVE_OPENCV_XFEATURES2D
21 #include "opencv2/xfeatures2d.hpp"
22 #include "opencv2/xfeatures2d/nonfree.hpp"
23 #endif
24
25 #define ENABLE_LOG 1
26 #define LOG(msg) std::cout << msg
27 #define LOGLN(msg) std::cout << msg << std::endl
28
29 using namespace std;
30 using namespace cv;
31 using namespace cv::detail;
32
printUsage(char ** argv)33 static void printUsage(char** argv)
34 {
35 cout <<
36 "Rotation model images stitcher.\n\n"
37 << argv[0] << " img1 img2 [...imgN] [flags]\n\n"
38 "Flags:\n"
39 " --preview\n"
40 " Run stitching in the preview mode. Works faster than usual mode,\n"
41 " but output image will have lower resolution.\n"
42 " --try_cuda (yes|no)\n"
43 " Try to use CUDA. The default value is 'no'. All default values\n"
44 " are for CPU mode.\n"
45 "\nMotion Estimation Flags:\n"
46 " --work_megapix <float>\n"
47 " Resolution for image registration step. The default is 0.6 Mpx.\n"
48 " --features (surf|orb|sift|akaze)\n"
49 " Type of features used for images matching.\n"
50 " The default is surf if available, orb otherwise.\n"
51 " --matcher (homography|affine)\n"
52 " Matcher used for pairwise image matching.\n"
53 " --estimator (homography|affine)\n"
54 " Type of estimator used for transformation estimation.\n"
55 " --match_conf <float>\n"
56 " Confidence for feature matching step. The default is 0.65 for surf and 0.3 for orb.\n"
57 " --conf_thresh <float>\n"
58 " Threshold for two images are from the same panorama confidence.\n"
59 " The default is 1.0.\n"
60 " --ba (no|reproj|ray|affine)\n"
61 " Bundle adjustment cost function. The default is ray.\n"
62 " --ba_refine_mask (mask)\n"
63 " Set refinement mask for bundle adjustment. It looks like 'x_xxx',\n"
64 " where 'x' means refine respective parameter and '_' means don't\n"
65 " refine one, and has the following format:\n"
66 " <fx><skew><ppx><aspect><ppy>. The default mask is 'xxxxx'. If bundle\n"
67 " adjustment doesn't support estimation of selected parameter then\n"
68 " the respective flag is ignored.\n"
69 " --wave_correct (no|horiz|vert)\n"
70 " Perform wave effect correction. The default is 'horiz'.\n"
71 " --save_graph <file_name>\n"
72 " Save matches graph represented in DOT language to <file_name> file.\n"
73 " Labels description: Nm is number of matches, Ni is number of inliers,\n"
74 " C is confidence.\n"
75 "\nCompositing Flags:\n"
76 " --warp (affine|plane|cylindrical|spherical|fisheye|stereographic|compressedPlaneA2B1|compressedPlaneA1.5B1|compressedPlanePortraitA2B1|compressedPlanePortraitA1.5B1|paniniA2B1|paniniA1.5B1|paniniPortraitA2B1|paniniPortraitA1.5B1|mercator|transverseMercator)\n"
77 " Warp surface type. The default is 'spherical'.\n"
78 " --seam_megapix <float>\n"
79 " Resolution for seam estimation step. The default is 0.1 Mpx.\n"
80 " --seam (no|voronoi|gc_color|gc_colorgrad)\n"
81 " Seam estimation method. The default is 'gc_color'.\n"
82 " --compose_megapix <float>\n"
83 " Resolution for compositing step. Use -1 for original resolution.\n"
84 " The default is -1.\n"
85 " --expos_comp (no|gain|gain_blocks|channels|channels_blocks)\n"
86 " Exposure compensation method. The default is 'gain_blocks'.\n"
87 " --expos_comp_nr_feeds <int>\n"
88 " Number of exposure compensation feed. The default is 1.\n"
89 " --expos_comp_nr_filtering <int>\n"
90 " Number of filtering iterations of the exposure compensation gains.\n"
91 " Only used when using a block exposure compensation method.\n"
92 " The default is 2.\n"
93 " --expos_comp_block_size <int>\n"
94 " BLock size in pixels used by the exposure compensator.\n"
95 " Only used when using a block exposure compensation method.\n"
96 " The default is 32.\n"
97 " --blend (no|feather|multiband)\n"
98 " Blending method. The default is 'multiband'.\n"
99 " --blend_strength <float>\n"
100 " Blending strength from [0,100] range. The default is 5.\n"
101 " --output <result_img>\n"
102 " The default is 'result.jpg'.\n"
103 " --timelapse (as_is|crop) \n"
104 " Output warped images separately as frames of a time lapse movie, with 'fixed_' prepended to input file names.\n"
105 " --rangewidth <int>\n"
106 " uses range_width to limit number of images to match with.\n";
107 }
108
109
110 // Default command line args
111 vector<String> img_names;
112 bool preview = false;
113 bool try_cuda = false;
114 double work_megapix = 0.6;
115 double seam_megapix = 0.1;
116 double compose_megapix = -1;
117 float conf_thresh = 1.f;
118 #ifdef HAVE_OPENCV_XFEATURES2D
119 string features_type = "surf";
120 float match_conf = 0.65f;
121 #else
122 string features_type = "orb";
123 float match_conf = 0.3f;
124 #endif
125 string matcher_type = "homography";
126 string estimator_type = "homography";
127 string ba_cost_func = "ray";
128 string ba_refine_mask = "xxxxx";
129 bool do_wave_correct = true;
130 WaveCorrectKind wave_correct = detail::WAVE_CORRECT_HORIZ;
131 bool save_graph = false;
132 std::string save_graph_to;
133 string warp_type = "spherical";
134 int expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
135 int expos_comp_nr_feeds = 1;
136 int expos_comp_nr_filtering = 2;
137 int expos_comp_block_size = 32;
138 string seam_find_type = "gc_color";
139 int blend_type = Blender::MULTI_BAND;
140 int timelapse_type = Timelapser::AS_IS;
141 float blend_strength = 5;
142 string result_name = "result.jpg";
143 bool timelapse = false;
144 int range_width = -1;
145
146
parseCmdArgs(int argc,char ** argv)147 static int parseCmdArgs(int argc, char** argv)
148 {
149 if (argc == 1)
150 {
151 printUsage(argv);
152 return -1;
153 }
154 for (int i = 1; i < argc; ++i)
155 {
156 if (string(argv[i]) == "--help" || string(argv[i]) == "/?")
157 {
158 printUsage(argv);
159 return -1;
160 }
161 else if (string(argv[i]) == "--preview")
162 {
163 preview = true;
164 }
165 else if (string(argv[i]) == "--try_cuda")
166 {
167 if (string(argv[i + 1]) == "no")
168 try_cuda = false;
169 else if (string(argv[i + 1]) == "yes")
170 try_cuda = true;
171 else
172 {
173 cout << "Bad --try_cuda flag value\n";
174 return -1;
175 }
176 i++;
177 }
178 else if (string(argv[i]) == "--work_megapix")
179 {
180 work_megapix = atof(argv[i + 1]);
181 i++;
182 }
183 else if (string(argv[i]) == "--seam_megapix")
184 {
185 seam_megapix = atof(argv[i + 1]);
186 i++;
187 }
188 else if (string(argv[i]) == "--compose_megapix")
189 {
190 compose_megapix = atof(argv[i + 1]);
191 i++;
192 }
193 else if (string(argv[i]) == "--result")
194 {
195 result_name = argv[i + 1];
196 i++;
197 }
198 else if (string(argv[i]) == "--features")
199 {
200 features_type = argv[i + 1];
201 if (string(features_type) == "orb")
202 match_conf = 0.3f;
203 i++;
204 }
205 else if (string(argv[i]) == "--matcher")
206 {
207 if (string(argv[i + 1]) == "homography" || string(argv[i + 1]) == "affine")
208 matcher_type = argv[i + 1];
209 else
210 {
211 cout << "Bad --matcher flag value\n";
212 return -1;
213 }
214 i++;
215 }
216 else if (string(argv[i]) == "--estimator")
217 {
218 if (string(argv[i + 1]) == "homography" || string(argv[i + 1]) == "affine")
219 estimator_type = argv[i + 1];
220 else
221 {
222 cout << "Bad --estimator flag value\n";
223 return -1;
224 }
225 i++;
226 }
227 else if (string(argv[i]) == "--match_conf")
228 {
229 match_conf = static_cast<float>(atof(argv[i + 1]));
230 i++;
231 }
232 else if (string(argv[i]) == "--conf_thresh")
233 {
234 conf_thresh = static_cast<float>(atof(argv[i + 1]));
235 i++;
236 }
237 else if (string(argv[i]) == "--ba")
238 {
239 ba_cost_func = argv[i + 1];
240 i++;
241 }
242 else if (string(argv[i]) == "--ba_refine_mask")
243 {
244 ba_refine_mask = argv[i + 1];
245 if (ba_refine_mask.size() != 5)
246 {
247 cout << "Incorrect refinement mask length.\n";
248 return -1;
249 }
250 i++;
251 }
252 else if (string(argv[i]) == "--wave_correct")
253 {
254 if (string(argv[i + 1]) == "no")
255 do_wave_correct = false;
256 else if (string(argv[i + 1]) == "horiz")
257 {
258 do_wave_correct = true;
259 wave_correct = detail::WAVE_CORRECT_HORIZ;
260 }
261 else if (string(argv[i + 1]) == "vert")
262 {
263 do_wave_correct = true;
264 wave_correct = detail::WAVE_CORRECT_VERT;
265 }
266 else
267 {
268 cout << "Bad --wave_correct flag value\n";
269 return -1;
270 }
271 i++;
272 }
273 else if (string(argv[i]) == "--save_graph")
274 {
275 save_graph = true;
276 save_graph_to = argv[i + 1];
277 i++;
278 }
279 else if (string(argv[i]) == "--warp")
280 {
281 warp_type = string(argv[i + 1]);
282 i++;
283 }
284 else if (string(argv[i]) == "--expos_comp")
285 {
286 if (string(argv[i + 1]) == "no")
287 expos_comp_type = ExposureCompensator::NO;
288 else if (string(argv[i + 1]) == "gain")
289 expos_comp_type = ExposureCompensator::GAIN;
290 else if (string(argv[i + 1]) == "gain_blocks")
291 expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
292 else if (string(argv[i + 1]) == "channels")
293 expos_comp_type = ExposureCompensator::CHANNELS;
294 else if (string(argv[i + 1]) == "channels_blocks")
295 expos_comp_type = ExposureCompensator::CHANNELS_BLOCKS;
296 else
297 {
298 cout << "Bad exposure compensation method\n";
299 return -1;
300 }
301 i++;
302 }
303 else if (string(argv[i]) == "--expos_comp_nr_feeds")
304 {
305 expos_comp_nr_feeds = atoi(argv[i + 1]);
306 i++;
307 }
308 else if (string(argv[i]) == "--expos_comp_nr_filtering")
309 {
310 expos_comp_nr_filtering = atoi(argv[i + 1]);
311 i++;
312 }
313 else if (string(argv[i]) == "--expos_comp_block_size")
314 {
315 expos_comp_block_size = atoi(argv[i + 1]);
316 i++;
317 }
318 else if (string(argv[i]) == "--seam")
319 {
320 if (string(argv[i + 1]) == "no" ||
321 string(argv[i + 1]) == "voronoi" ||
322 string(argv[i + 1]) == "gc_color" ||
323 string(argv[i + 1]) == "gc_colorgrad" ||
324 string(argv[i + 1]) == "dp_color" ||
325 string(argv[i + 1]) == "dp_colorgrad")
326 seam_find_type = argv[i + 1];
327 else
328 {
329 cout << "Bad seam finding method\n";
330 return -1;
331 }
332 i++;
333 }
334 else if (string(argv[i]) == "--blend")
335 {
336 if (string(argv[i + 1]) == "no")
337 blend_type = Blender::NO;
338 else if (string(argv[i + 1]) == "feather")
339 blend_type = Blender::FEATHER;
340 else if (string(argv[i + 1]) == "multiband")
341 blend_type = Blender::MULTI_BAND;
342 else
343 {
344 cout << "Bad blending method\n";
345 return -1;
346 }
347 i++;
348 }
349 else if (string(argv[i]) == "--timelapse")
350 {
351 timelapse = true;
352
353 if (string(argv[i + 1]) == "as_is")
354 timelapse_type = Timelapser::AS_IS;
355 else if (string(argv[i + 1]) == "crop")
356 timelapse_type = Timelapser::CROP;
357 else
358 {
359 cout << "Bad timelapse method\n";
360 return -1;
361 }
362 i++;
363 }
364 else if (string(argv[i]) == "--rangewidth")
365 {
366 range_width = atoi(argv[i + 1]);
367 i++;
368 }
369 else if (string(argv[i]) == "--blend_strength")
370 {
371 blend_strength = static_cast<float>(atof(argv[i + 1]));
372 i++;
373 }
374 else if (string(argv[i]) == "--output")
375 {
376 result_name = argv[i + 1];
377 i++;
378 }
379 else
380 img_names.push_back(argv[i]);
381 }
382 if (preview)
383 {
384 compose_megapix = 0.6;
385 }
386 return 0;
387 }
388
389
main(int argc,char * argv[])390 int main(int argc, char* argv[])
391 {
392 #if ENABLE_LOG
393 int64 app_start_time = getTickCount();
394 #endif
395
396 #if 0
397 cv::setBreakOnError(true);
398 #endif
399
400 int retval = parseCmdArgs(argc, argv);
401 if (retval)
402 return retval;
403
404 // Check if have enough images
405 int num_images = static_cast<int>(img_names.size());
406 if (num_images < 2)
407 {
408 LOGLN("Need more images");
409 return -1;
410 }
411
412 double work_scale = 1, seam_scale = 1, compose_scale = 1;
413 bool is_work_scale_set = false, is_seam_scale_set = false, is_compose_scale_set = false;
414
415 LOGLN("Finding features...");
416 #if ENABLE_LOG
417 int64 t = getTickCount();
418 #endif
419
420 Ptr<Feature2D> finder;
421 if (features_type == "orb")
422 {
423 finder = ORB::create();
424 }
425 else if (features_type == "akaze")
426 {
427 finder = AKAZE::create();
428 }
429 #ifdef HAVE_OPENCV_XFEATURES2D
430 else if (features_type == "surf")
431 {
432 finder = xfeatures2d::SURF::create();
433 }
434 #endif
435 else if (features_type == "sift")
436 {
437 finder = SIFT::create();
438 }
439 else
440 {
441 cout << "Unknown 2D features type: '" << features_type << "'.\n";
442 return -1;
443 }
444
445 Mat full_img, img;
446 vector<ImageFeatures> features(num_images);
447 vector<Mat> images(num_images);
448 vector<Size> full_img_sizes(num_images);
449 double seam_work_aspect = 1;
450
451 for (int i = 0; i < num_images; ++i)
452 {
453 full_img = imread(samples::findFile(img_names[i]));
454 full_img_sizes[i] = full_img.size();
455
456 if (full_img.empty())
457 {
458 LOGLN("Can't open image " << img_names[i]);
459 return -1;
460 }
461 if (work_megapix < 0)
462 {
463 img = full_img;
464 work_scale = 1;
465 is_work_scale_set = true;
466 }
467 else
468 {
469 if (!is_work_scale_set)
470 {
471 work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area()));
472 is_work_scale_set = true;
473 }
474 resize(full_img, img, Size(), work_scale, work_scale, INTER_LINEAR_EXACT);
475 }
476 if (!is_seam_scale_set)
477 {
478 seam_scale = min(1.0, sqrt(seam_megapix * 1e6 / full_img.size().area()));
479 seam_work_aspect = seam_scale / work_scale;
480 is_seam_scale_set = true;
481 }
482
483 computeImageFeatures(finder, img, features[i]);
484 features[i].img_idx = i;
485 LOGLN("Features in image #" << i+1 << ": " << features[i].keypoints.size());
486
487 resize(full_img, img, Size(), seam_scale, seam_scale, INTER_LINEAR_EXACT);
488 images[i] = img.clone();
489 }
490
491 full_img.release();
492 img.release();
493
494 LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
495
496 LOG("Pairwise matching");
497 #if ENABLE_LOG
498 t = getTickCount();
499 #endif
500 vector<MatchesInfo> pairwise_matches;
501 Ptr<FeaturesMatcher> matcher;
502 if (matcher_type == "affine")
503 matcher = makePtr<AffineBestOf2NearestMatcher>(false, try_cuda, match_conf);
504 else if (range_width==-1)
505 matcher = makePtr<BestOf2NearestMatcher>(try_cuda, match_conf);
506 else
507 matcher = makePtr<BestOf2NearestRangeMatcher>(range_width, try_cuda, match_conf);
508
509 (*matcher)(features, pairwise_matches);
510 matcher->collectGarbage();
511
512 LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
513
514 // Check if we should save matches graph
515 if (save_graph)
516 {
517 LOGLN("Saving matches graph...");
518 ofstream f(save_graph_to.c_str());
519 f << matchesGraphAsString(img_names, pairwise_matches, conf_thresh);
520 }
521
522 // Leave only images we are sure are from the same panorama
523 vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);
524 vector<Mat> img_subset;
525 vector<String> img_names_subset;
526 vector<Size> full_img_sizes_subset;
527 for (size_t i = 0; i < indices.size(); ++i)
528 {
529 img_names_subset.push_back(img_names[indices[i]]);
530 img_subset.push_back(images[indices[i]]);
531 full_img_sizes_subset.push_back(full_img_sizes[indices[i]]);
532 }
533
534 images = img_subset;
535 img_names = img_names_subset;
536 full_img_sizes = full_img_sizes_subset;
537
538 // Check if we still have enough images
539 num_images = static_cast<int>(img_names.size());
540 if (num_images < 2)
541 {
542 LOGLN("Need more images");
543 return -1;
544 }
545
546 Ptr<Estimator> estimator;
547 if (estimator_type == "affine")
548 estimator = makePtr<AffineBasedEstimator>();
549 else
550 estimator = makePtr<HomographyBasedEstimator>();
551
552 vector<CameraParams> cameras;
553 if (!(*estimator)(features, pairwise_matches, cameras))
554 {
555 cout << "Homography estimation failed.\n";
556 return -1;
557 }
558
559 for (size_t i = 0; i < cameras.size(); ++i)
560 {
561 Mat R;
562 cameras[i].R.convertTo(R, CV_32F);
563 cameras[i].R = R;
564 LOGLN("Initial camera intrinsics #" << indices[i]+1 << ":\nK:\n" << cameras[i].K() << "\nR:\n" << cameras[i].R);
565 }
566
567 Ptr<detail::BundleAdjusterBase> adjuster;
568 if (ba_cost_func == "reproj") adjuster = makePtr<detail::BundleAdjusterReproj>();
569 else if (ba_cost_func == "ray") adjuster = makePtr<detail::BundleAdjusterRay>();
570 else if (ba_cost_func == "affine") adjuster = makePtr<detail::BundleAdjusterAffinePartial>();
571 else if (ba_cost_func == "no") adjuster = makePtr<NoBundleAdjuster>();
572 else
573 {
574 cout << "Unknown bundle adjustment cost function: '" << ba_cost_func << "'.\n";
575 return -1;
576 }
577 adjuster->setConfThresh(conf_thresh);
578 Mat_<uchar> refine_mask = Mat::zeros(3, 3, CV_8U);
579 if (ba_refine_mask[0] == 'x') refine_mask(0,0) = 1;
580 if (ba_refine_mask[1] == 'x') refine_mask(0,1) = 1;
581 if (ba_refine_mask[2] == 'x') refine_mask(0,2) = 1;
582 if (ba_refine_mask[3] == 'x') refine_mask(1,1) = 1;
583 if (ba_refine_mask[4] == 'x') refine_mask(1,2) = 1;
584 adjuster->setRefinementMask(refine_mask);
585 if (!(*adjuster)(features, pairwise_matches, cameras))
586 {
587 cout << "Camera parameters adjusting failed.\n";
588 return -1;
589 }
590
591 // Find median focal length
592
593 vector<double> focals;
594 for (size_t i = 0; i < cameras.size(); ++i)
595 {
596 LOGLN("Camera #" << indices[i]+1 << ":\nK:\n" << cameras[i].K() << "\nR:\n" << cameras[i].R);
597 focals.push_back(cameras[i].focal);
598 }
599
600 sort(focals.begin(), focals.end());
601 float warped_image_scale;
602 if (focals.size() % 2 == 1)
603 warped_image_scale = static_cast<float>(focals[focals.size() / 2]);
604 else
605 warped_image_scale = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;
606
607 if (do_wave_correct)
608 {
609 vector<Mat> rmats;
610 for (size_t i = 0; i < cameras.size(); ++i)
611 rmats.push_back(cameras[i].R.clone());
612 waveCorrect(rmats, wave_correct);
613 for (size_t i = 0; i < cameras.size(); ++i)
614 cameras[i].R = rmats[i];
615 }
616
617 LOGLN("Warping images (auxiliary)... ");
618 #if ENABLE_LOG
619 t = getTickCount();
620 #endif
621
622 vector<Point> corners(num_images);
623 vector<UMat> masks_warped(num_images);
624 vector<UMat> images_warped(num_images);
625 vector<Size> sizes(num_images);
626 vector<UMat> masks(num_images);
627
628 // Prepare images masks
629 for (int i = 0; i < num_images; ++i)
630 {
631 masks[i].create(images[i].size(), CV_8U);
632 masks[i].setTo(Scalar::all(255));
633 }
634
635 // Warp images and their masks
636
637 Ptr<WarperCreator> warper_creator;
638 #ifdef HAVE_OPENCV_CUDAWARPING
639 if (try_cuda && cuda::getCudaEnabledDeviceCount() > 0)
640 {
641 if (warp_type == "plane")
642 warper_creator = makePtr<cv::PlaneWarperGpu>();
643 else if (warp_type == "cylindrical")
644 warper_creator = makePtr<cv::CylindricalWarperGpu>();
645 else if (warp_type == "spherical")
646 warper_creator = makePtr<cv::SphericalWarperGpu>();
647 }
648 else
649 #endif
650 {
651 if (warp_type == "plane")
652 warper_creator = makePtr<cv::PlaneWarper>();
653 else if (warp_type == "affine")
654 warper_creator = makePtr<cv::AffineWarper>();
655 else if (warp_type == "cylindrical")
656 warper_creator = makePtr<cv::CylindricalWarper>();
657 else if (warp_type == "spherical")
658 warper_creator = makePtr<cv::SphericalWarper>();
659 else if (warp_type == "fisheye")
660 warper_creator = makePtr<cv::FisheyeWarper>();
661 else if (warp_type == "stereographic")
662 warper_creator = makePtr<cv::StereographicWarper>();
663 else if (warp_type == "compressedPlaneA2B1")
664 warper_creator = makePtr<cv::CompressedRectilinearWarper>(2.0f, 1.0f);
665 else if (warp_type == "compressedPlaneA1.5B1")
666 warper_creator = makePtr<cv::CompressedRectilinearWarper>(1.5f, 1.0f);
667 else if (warp_type == "compressedPlanePortraitA2B1")
668 warper_creator = makePtr<cv::CompressedRectilinearPortraitWarper>(2.0f, 1.0f);
669 else if (warp_type == "compressedPlanePortraitA1.5B1")
670 warper_creator = makePtr<cv::CompressedRectilinearPortraitWarper>(1.5f, 1.0f);
671 else if (warp_type == "paniniA2B1")
672 warper_creator = makePtr<cv::PaniniWarper>(2.0f, 1.0f);
673 else if (warp_type == "paniniA1.5B1")
674 warper_creator = makePtr<cv::PaniniWarper>(1.5f, 1.0f);
675 else if (warp_type == "paniniPortraitA2B1")
676 warper_creator = makePtr<cv::PaniniPortraitWarper>(2.0f, 1.0f);
677 else if (warp_type == "paniniPortraitA1.5B1")
678 warper_creator = makePtr<cv::PaniniPortraitWarper>(1.5f, 1.0f);
679 else if (warp_type == "mercator")
680 warper_creator = makePtr<cv::MercatorWarper>();
681 else if (warp_type == "transverseMercator")
682 warper_creator = makePtr<cv::TransverseMercatorWarper>();
683 }
684
685 if (!warper_creator)
686 {
687 cout << "Can't create the following warper '" << warp_type << "'\n";
688 return 1;
689 }
690
691 Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));
692
693 for (int i = 0; i < num_images; ++i)
694 {
695 Mat_<float> K;
696 cameras[i].K().convertTo(K, CV_32F);
697 float swa = (float)seam_work_aspect;
698 K(0,0) *= swa; K(0,2) *= swa;
699 K(1,1) *= swa; K(1,2) *= swa;
700
701 corners[i] = warper->warp(images[i], K, cameras[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);
702 sizes[i] = images_warped[i].size();
703
704 warper->warp(masks[i], K, cameras[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
705 }
706
707 vector<UMat> images_warped_f(num_images);
708 for (int i = 0; i < num_images; ++i)
709 images_warped[i].convertTo(images_warped_f[i], CV_32F);
710
711 LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
712
713 LOGLN("Compensating exposure...");
714 #if ENABLE_LOG
715 t = getTickCount();
716 #endif
717
718 Ptr<ExposureCompensator> compensator = ExposureCompensator::createDefault(expos_comp_type);
719 if (dynamic_cast<GainCompensator*>(compensator.get()))
720 {
721 GainCompensator* gcompensator = dynamic_cast<GainCompensator*>(compensator.get());
722 gcompensator->setNrFeeds(expos_comp_nr_feeds);
723 }
724
725 if (dynamic_cast<ChannelsCompensator*>(compensator.get()))
726 {
727 ChannelsCompensator* ccompensator = dynamic_cast<ChannelsCompensator*>(compensator.get());
728 ccompensator->setNrFeeds(expos_comp_nr_feeds);
729 }
730
731 if (dynamic_cast<BlocksCompensator*>(compensator.get()))
732 {
733 BlocksCompensator* bcompensator = dynamic_cast<BlocksCompensator*>(compensator.get());
734 bcompensator->setNrFeeds(expos_comp_nr_feeds);
735 bcompensator->setNrGainsFilteringIterations(expos_comp_nr_filtering);
736 bcompensator->setBlockSize(expos_comp_block_size, expos_comp_block_size);
737 }
738
739 compensator->feed(corners, images_warped, masks_warped);
740
741 LOGLN("Compensating exposure, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
742
743 LOGLN("Finding seams...");
744 #if ENABLE_LOG
745 t = getTickCount();
746 #endif
747
748 Ptr<SeamFinder> seam_finder;
749 if (seam_find_type == "no")
750 seam_finder = makePtr<detail::NoSeamFinder>();
751 else if (seam_find_type == "voronoi")
752 seam_finder = makePtr<detail::VoronoiSeamFinder>();
753 else if (seam_find_type == "gc_color")
754 {
755 #ifdef HAVE_OPENCV_CUDALEGACY
756 if (try_cuda && cuda::getCudaEnabledDeviceCount() > 0)
757 seam_finder = makePtr<detail::GraphCutSeamFinderGpu>(GraphCutSeamFinderBase::COST_COLOR);
758 else
759 #endif
760 seam_finder = makePtr<detail::GraphCutSeamFinder>(GraphCutSeamFinderBase::COST_COLOR);
761 }
762 else if (seam_find_type == "gc_colorgrad")
763 {
764 #ifdef HAVE_OPENCV_CUDALEGACY
765 if (try_cuda && cuda::getCudaEnabledDeviceCount() > 0)
766 seam_finder = makePtr<detail::GraphCutSeamFinderGpu>(GraphCutSeamFinderBase::COST_COLOR_GRAD);
767 else
768 #endif
769 seam_finder = makePtr<detail::GraphCutSeamFinder>(GraphCutSeamFinderBase::COST_COLOR_GRAD);
770 }
771 else if (seam_find_type == "dp_color")
772 seam_finder = makePtr<detail::DpSeamFinder>(DpSeamFinder::COLOR);
773 else if (seam_find_type == "dp_colorgrad")
774 seam_finder = makePtr<detail::DpSeamFinder>(DpSeamFinder::COLOR_GRAD);
775 if (!seam_finder)
776 {
777 cout << "Can't create the following seam finder '" << seam_find_type << "'\n";
778 return 1;
779 }
780
781 seam_finder->find(images_warped_f, corners, masks_warped);
782
783 LOGLN("Finding seams, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
784
785 // Release unused memory
786 images.clear();
787 images_warped.clear();
788 images_warped_f.clear();
789 masks.clear();
790
791 LOGLN("Compositing...");
792 #if ENABLE_LOG
793 t = getTickCount();
794 #endif
795
796 Mat img_warped, img_warped_s;
797 Mat dilated_mask, seam_mask, mask, mask_warped;
798 Ptr<Blender> blender;
799 Ptr<Timelapser> timelapser;
800 //double compose_seam_aspect = 1;
801 double compose_work_aspect = 1;
802
803 for (int img_idx = 0; img_idx < num_images; ++img_idx)
804 {
805 LOGLN("Compositing image #" << indices[img_idx]+1);
806
807 // Read image and resize it if necessary
808 full_img = imread(samples::findFile(img_names[img_idx]));
809 if (!is_compose_scale_set)
810 {
811 if (compose_megapix > 0)
812 compose_scale = min(1.0, sqrt(compose_megapix * 1e6 / full_img.size().area()));
813 is_compose_scale_set = true;
814
815 // Compute relative scales
816 //compose_seam_aspect = compose_scale / seam_scale;
817 compose_work_aspect = compose_scale / work_scale;
818
819 // Update warped image scale
820 warped_image_scale *= static_cast<float>(compose_work_aspect);
821 warper = warper_creator->create(warped_image_scale);
822
823 // Update corners and sizes
824 for (int i = 0; i < num_images; ++i)
825 {
826 // Update intrinsics
827 cameras[i].focal *= compose_work_aspect;
828 cameras[i].ppx *= compose_work_aspect;
829 cameras[i].ppy *= compose_work_aspect;
830
831 // Update corner and size
832 Size sz = full_img_sizes[i];
833 if (std::abs(compose_scale - 1) > 1e-1)
834 {
835 sz.width = cvRound(full_img_sizes[i].width * compose_scale);
836 sz.height = cvRound(full_img_sizes[i].height * compose_scale);
837 }
838
839 Mat K;
840 cameras[i].K().convertTo(K, CV_32F);
841 Rect roi = warper->warpRoi(sz, K, cameras[i].R);
842 corners[i] = roi.tl();
843 sizes[i] = roi.size();
844 }
845 }
846 if (abs(compose_scale - 1) > 1e-1)
847 resize(full_img, img, Size(), compose_scale, compose_scale, INTER_LINEAR_EXACT);
848 else
849 img = full_img;
850 full_img.release();
851 Size img_size = img.size();
852
853 Mat K;
854 cameras[img_idx].K().convertTo(K, CV_32F);
855
856 // Warp the current image
857 warper->warp(img, K, cameras[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);
858
859 // Warp the current image mask
860 mask.create(img_size, CV_8U);
861 mask.setTo(Scalar::all(255));
862 warper->warp(mask, K, cameras[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);
863
864 // Compensate exposure
865 compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);
866
867 img_warped.convertTo(img_warped_s, CV_16S);
868 img_warped.release();
869 img.release();
870 mask.release();
871
872 dilate(masks_warped[img_idx], dilated_mask, Mat());
873 resize(dilated_mask, seam_mask, mask_warped.size(), 0, 0, INTER_LINEAR_EXACT);
874 mask_warped = seam_mask & mask_warped;
875
876 if (!blender && !timelapse)
877 {
878 blender = Blender::createDefault(blend_type, try_cuda);
879 Size dst_sz = resultRoi(corners, sizes).size();
880 float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength / 100.f;
881 if (blend_width < 1.f)
882 blender = Blender::createDefault(Blender::NO, try_cuda);
883 else if (blend_type == Blender::MULTI_BAND)
884 {
885 MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(blender.get());
886 mb->setNumBands(static_cast<int>(ceil(log(blend_width)/log(2.)) - 1.));
887 LOGLN("Multi-band blender, number of bands: " << mb->numBands());
888 }
889 else if (blend_type == Blender::FEATHER)
890 {
891 FeatherBlender* fb = dynamic_cast<FeatherBlender*>(blender.get());
892 fb->setSharpness(1.f/blend_width);
893 LOGLN("Feather blender, sharpness: " << fb->sharpness());
894 }
895 blender->prepare(corners, sizes);
896 }
897 else if (!timelapser && timelapse)
898 {
899 timelapser = Timelapser::createDefault(timelapse_type);
900 timelapser->initialize(corners, sizes);
901 }
902
903 // Blend the current image
904 if (timelapse)
905 {
906 timelapser->process(img_warped_s, Mat::ones(img_warped_s.size(), CV_8UC1), corners[img_idx]);
907 String fixedFileName;
908 size_t pos_s = String(img_names[img_idx]).find_last_of("/\\");
909 if (pos_s == String::npos)
910 {
911 fixedFileName = "fixed_" + img_names[img_idx];
912 }
913 else
914 {
915 fixedFileName = "fixed_" + String(img_names[img_idx]).substr(pos_s + 1, String(img_names[img_idx]).length() - pos_s);
916 }
917 imwrite(fixedFileName, timelapser->getDst());
918 }
919 else
920 {
921 blender->feed(img_warped_s, mask_warped, corners[img_idx]);
922 }
923 }
924
925 if (!timelapse)
926 {
927 Mat result, result_mask;
928 blender->blend(result, result_mask);
929
930 LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
931
932 imwrite(result_name, result);
933 }
934
935 LOGLN("Finished, total time: " << ((getTickCount() - app_start_time) / getTickFrequency()) << " sec");
936 return 0;
937 }
938