1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 //  By downloading, copying, installing or using the software you agree to this license.
6 //  If you do not agree to this license, do not download, install,
7 //  copy or use the software.
8 //
9 //
10 //                           License Agreement
11 //                For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 //   * Redistribution's of source code must retain the above copyright notice,
21 //     this list of conditions and the following disclaimer.
22 //
23 //   * Redistribution's in binary form must reproduce the above copyright notice,
24 //     this list of conditions and the following disclaimer in the documentation
25 //     and/or other materials provided with the distribution.
26 //
27 //   * The name of the copyright holders may not be used to endorse or promote products
28 //     derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42 
43 #ifndef OPENCV_PHOTO_HPP
44 #define OPENCV_PHOTO_HPP
45 
46 #include "opencv2/core.hpp"
47 #include "opencv2/imgproc.hpp"
48 
49 /**
50 @defgroup photo Computational Photography
51 
52 This module includes photo processing algorithms
53 @{
54     @defgroup photo_inpaint Inpainting
55     @defgroup photo_denoise Denoising
56     @defgroup photo_hdr HDR imaging
57 
58 This section describes high dynamic range imaging algorithms namely tonemapping, exposure alignment,
59 camera calibration with multiple exposures and exposure fusion.
60 
61     @defgroup photo_decolor Contrast Preserving Decolorization
62 
63 Useful links:
64 
65 http://www.cse.cuhk.edu.hk/leojia/projects/color2gray/index.html
66 
67     @defgroup photo_clone Seamless Cloning
68 
69 Useful links:
70 
71 https://www.learnopencv.com/seamless-cloning-using-opencv-python-cpp
72 
73     @defgroup photo_render Non-Photorealistic Rendering
74 
75 Useful links:
76 
77 http://www.inf.ufrgs.br/~eslgastal/DomainTransform
78 
79 https://www.learnopencv.com/non-photorealistic-rendering-using-opencv-python-c/
80 
81     @defgroup photo_c C API
82 @}
83   */
84 
85 namespace cv
86 {
87 
88 //! @addtogroup photo
89 //! @{
90 
91 //! @addtogroup photo_inpaint
92 //! @{
93 //! the inpainting algorithm
94 enum
95 {
96     INPAINT_NS    = 0, //!< Use Navier-Stokes based method
97     INPAINT_TELEA = 1 //!< Use the algorithm proposed by Alexandru Telea @cite Telea04
98 };
99 
100 /** @brief Restores the selected region in an image using the region neighborhood.
101 
102 @param src Input 8-bit, 16-bit unsigned or 32-bit float 1-channel or 8-bit 3-channel image.
103 @param inpaintMask Inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that
104 needs to be inpainted.
105 @param dst Output image with the same size and type as src .
106 @param inpaintRadius Radius of a circular neighborhood of each point inpainted that is considered
107 by the algorithm.
108 @param flags Inpainting method that could be cv::INPAINT_NS or cv::INPAINT_TELEA
109 
110 The function reconstructs the selected image area from the pixel near the area boundary. The
111 function may be used to remove dust and scratches from a scanned photo, or to remove undesirable
112 objects from still images or video. See <http://en.wikipedia.org/wiki/Inpainting> for more details.
113 
114 @note
115    -   An example using the inpainting technique can be found at
116         opencv_source_code/samples/cpp/inpaint.cpp
117    -   (Python) An example using the inpainting technique can be found at
118         opencv_source_code/samples/python/inpaint.py
119  */
120 CV_EXPORTS_W void inpaint( InputArray src, InputArray inpaintMask,
121         OutputArray dst, double inpaintRadius, int flags );
122 
123 //! @} photo_inpaint
124 
125 //! @addtogroup photo_denoise
126 //! @{
127 
128 /** @brief Perform image denoising using Non-local Means Denoising algorithm
129 <http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/> with several computational
130 optimizations. Noise expected to be a gaussian white noise
131 
132 @param src Input 8-bit 1-channel, 2-channel, 3-channel or 4-channel image.
133 @param dst Output image with the same size and type as src .
134 @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
135 Should be odd. Recommended value 7 pixels
136 @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
137 given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
138 denoising time. Recommended value 21 pixels
139 @param h Parameter regulating filter strength. Big h value perfectly removes noise but also
140 removes image details, smaller h value preserves details but also preserves some noise
141 
142 This function expected to be applied to grayscale images. For colored images look at
143 fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored
144 image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting
145 image to CIELAB colorspace and then separately denoise L and AB components with different h
146 parameter.
147  */
148 CV_EXPORTS_W void fastNlMeansDenoising( InputArray src, OutputArray dst, float h = 3,
149         int templateWindowSize = 7, int searchWindowSize = 21);
150 
151 /** @brief Perform image denoising using Non-local Means Denoising algorithm
152 <http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/> with several computational
153 optimizations. Noise expected to be a gaussian white noise
154 
155 @param src Input 8-bit or 16-bit (only with NORM_L1) 1-channel,
156 2-channel, 3-channel or 4-channel image.
157 @param dst Output image with the same size and type as src .
158 @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
159 Should be odd. Recommended value 7 pixels
160 @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
161 given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
162 denoising time. Recommended value 21 pixels
163 @param h Array of parameters regulating filter strength, either one
164 parameter applied to all channels or one per channel in dst. Big h value
165 perfectly removes noise but also removes image details, smaller h
166 value preserves details but also preserves some noise
167 @param normType Type of norm used for weight calculation. Can be either NORM_L2 or NORM_L1
168 
169 This function expected to be applied to grayscale images. For colored images look at
170 fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored
171 image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting
172 image to CIELAB colorspace and then separately denoise L and AB components with different h
173 parameter.
174  */
175 CV_EXPORTS_W void fastNlMeansDenoising( InputArray src, OutputArray dst,
176                                         const std::vector<float>& h,
177                                         int templateWindowSize = 7, int searchWindowSize = 21,
178                                         int normType = NORM_L2);
179 
180 /** @brief Modification of fastNlMeansDenoising function for colored images
181 
182 @param src Input 8-bit 3-channel image.
183 @param dst Output image with the same size and type as src .
184 @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
185 Should be odd. Recommended value 7 pixels
186 @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
187 given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
188 denoising time. Recommended value 21 pixels
189 @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly
190 removes noise but also removes image details, smaller h value preserves details but also preserves
191 some noise
192 @param hColor The same as h but for color components. For most images value equals 10
193 will be enough to remove colored noise and do not distort colors
194 
195 The function converts image to CIELAB colorspace and then separately denoise L and AB components
196 with given h parameters using fastNlMeansDenoising function.
197  */
198 CV_EXPORTS_W void fastNlMeansDenoisingColored( InputArray src, OutputArray dst,
199         float h = 3, float hColor = 3,
200         int templateWindowSize = 7, int searchWindowSize = 21);
201 
202 /** @brief Modification of fastNlMeansDenoising function for images sequence where consecutive images have been
203 captured in small period of time. For example video. This version of the function is for grayscale
204 images or for manual manipulation with colorspaces. For more details see
205 <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394>
206 
207 @param srcImgs Input 8-bit 1-channel, 2-channel, 3-channel or
208 4-channel images sequence. All images should have the same type and
209 size.
210 @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
211 @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
212 be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
213 imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
214 srcImgs[imgToDenoiseIndex] image.
215 @param dst Output image with the same size and type as srcImgs images.
216 @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
217 Should be odd. Recommended value 7 pixels
218 @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
219 given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
220 denoising time. Recommended value 21 pixels
221 @param h Parameter regulating filter strength. Bigger h value
222 perfectly removes noise but also removes image details, smaller h
223 value preserves details but also preserves some noise
224  */
225 CV_EXPORTS_W void fastNlMeansDenoisingMulti( InputArrayOfArrays srcImgs, OutputArray dst,
226         int imgToDenoiseIndex, int temporalWindowSize,
227         float h = 3, int templateWindowSize = 7, int searchWindowSize = 21);
228 
229 /** @brief Modification of fastNlMeansDenoising function for images sequence where consecutive images have been
230 captured in small period of time. For example video. This version of the function is for grayscale
231 images or for manual manipulation with colorspaces. For more details see
232 <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394>
233 
234 @param srcImgs Input 8-bit or 16-bit (only with NORM_L1) 1-channel,
235 2-channel, 3-channel or 4-channel images sequence. All images should
236 have the same type and size.
237 @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
238 @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
239 be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
240 imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
241 srcImgs[imgToDenoiseIndex] image.
242 @param dst Output image with the same size and type as srcImgs images.
243 @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
244 Should be odd. Recommended value 7 pixels
245 @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
246 given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
247 denoising time. Recommended value 21 pixels
248 @param h Array of parameters regulating filter strength, either one
249 parameter applied to all channels or one per channel in dst. Big h value
250 perfectly removes noise but also removes image details, smaller h
251 value preserves details but also preserves some noise
252 @param normType Type of norm used for weight calculation. Can be either NORM_L2 or NORM_L1
253  */
254 CV_EXPORTS_W void fastNlMeansDenoisingMulti( InputArrayOfArrays srcImgs, OutputArray dst,
255                                              int imgToDenoiseIndex, int temporalWindowSize,
256                                              const std::vector<float>& h,
257                                              int templateWindowSize = 7, int searchWindowSize = 21,
258                                              int normType = NORM_L2);
259 
260 /** @brief Modification of fastNlMeansDenoisingMulti function for colored images sequences
261 
262 @param srcImgs Input 8-bit 3-channel images sequence. All images should have the same type and
263 size.
264 @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
265 @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
266 be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
267 imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
268 srcImgs[imgToDenoiseIndex] image.
269 @param dst Output image with the same size and type as srcImgs images.
270 @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
271 Should be odd. Recommended value 7 pixels
272 @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
273 given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
274 denoising time. Recommended value 21 pixels
275 @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly
276 removes noise but also removes image details, smaller h value preserves details but also preserves
277 some noise.
278 @param hColor The same as h but for color components.
279 
280 The function converts images to CIELAB colorspace and then separately denoise L and AB components
281 with given h parameters using fastNlMeansDenoisingMulti function.
282  */
283 CV_EXPORTS_W void fastNlMeansDenoisingColoredMulti( InputArrayOfArrays srcImgs, OutputArray dst,
284         int imgToDenoiseIndex, int temporalWindowSize,
285         float h = 3, float hColor = 3,
286         int templateWindowSize = 7, int searchWindowSize = 21);
287 
288 /** @brief Primal-dual algorithm is an algorithm for solving special types of variational problems (that is,
289 finding a function to minimize some functional). As the image denoising, in particular, may be seen
290 as the variational problem, primal-dual algorithm then can be used to perform denoising and this is
291 exactly what is implemented.
292 
293 It should be noted, that this implementation was taken from the July 2013 blog entry
294 @cite MA13 , which also contained (slightly more general) ready-to-use source code on Python.
295 Subsequently, that code was rewritten on C++ with the usage of openCV by Vadim Pisarevsky at the end
296 of July 2013 and finally it was slightly adapted by later authors.
297 
298 Although the thorough discussion and justification of the algorithm involved may be found in
299 @cite ChambolleEtAl, it might make sense to skim over it here, following @cite MA13 . To begin
300 with, we consider the 1-byte gray-level images as the functions from the rectangular domain of
301 pixels (it may be seen as set
302 \f$\left\{(x,y)\in\mathbb{N}\times\mathbb{N}\mid 1\leq x\leq n,\;1\leq y\leq m\right\}\f$ for some
303 \f$m,\;n\in\mathbb{N}\f$) into \f$\{0,1,\dots,255\}\f$. We shall denote the noised images as \f$f_i\f$ and with
304 this view, given some image \f$x\f$ of the same size, we may measure how bad it is by the formula
305 
306 \f[\left\|\left\|\nabla x\right\|\right\| + \lambda\sum_i\left\|\left\|x-f_i\right\|\right\|\f]
307 
308 \f$\|\|\cdot\|\|\f$ here denotes \f$L_2\f$-norm and as you see, the first addend states that we want our
309 image to be smooth (ideally, having zero gradient, thus being constant) and the second states that
310 we want our result to be close to the observations we've got. If we treat \f$x\f$ as a function, this is
311 exactly the functional what we seek to minimize and here the Primal-Dual algorithm comes into play.
312 
313 @param observations This array should contain one or more noised versions of the image that is to
314 be restored.
315 @param result Here the denoised image will be stored. There is no need to do pre-allocation of
316 storage space, as it will be automatically allocated, if necessary.
317 @param lambda Corresponds to \f$\lambda\f$ in the formulas above. As it is enlarged, the smooth
318 (blurred) images are treated more favorably than detailed (but maybe more noised) ones. Roughly
319 speaking, as it becomes smaller, the result will be more blur but more sever outliers will be
320 removed.
321 @param niters Number of iterations that the algorithm will run. Of course, as more iterations as
322 better, but it is hard to quantitatively refine this statement, so just use the default and
323 increase it if the results are poor.
324  */
325 CV_EXPORTS_W void denoise_TVL1(const std::vector<Mat>& observations,Mat& result, double lambda=1.0, int niters=30);
326 
327 //! @} photo_denoise
328 
329 //! @addtogroup photo_hdr
330 //! @{
331 
332 enum { LDR_SIZE = 256 };
333 
334 /** @brief Base class for tonemapping algorithms - tools that are used to map HDR image to 8-bit range.
335  */
336 class CV_EXPORTS_W Tonemap : public Algorithm
337 {
338 public:
339     /** @brief Tonemaps image
340 
341     @param src source image - CV_32FC3 Mat (float 32 bits 3 channels)
342     @param dst destination image - CV_32FC3 Mat with values in [0, 1] range
343      */
344     CV_WRAP virtual void process(InputArray src, OutputArray dst) = 0;
345 
346     CV_WRAP virtual float getGamma() const = 0;
347     CV_WRAP virtual void setGamma(float gamma) = 0;
348 };
349 
350 /** @brief Creates simple linear mapper with gamma correction
351 
352 @param gamma positive value for gamma correction. Gamma value of 1.0 implies no correction, gamma
353 equal to 2.2f is suitable for most displays.
354 Generally gamma \> 1 brightens the image and gamma \< 1 darkens it.
355  */
356 CV_EXPORTS_W Ptr<Tonemap> createTonemap(float gamma = 1.0f);
357 
358 /** @brief Adaptive logarithmic mapping is a fast global tonemapping algorithm that scales the image in
359 logarithmic domain.
360 
361 Since it's a global operator the same function is applied to all the pixels, it is controlled by the
362 bias parameter.
363 
364 Optional saturation enhancement is possible as described in @cite FL02 .
365 
366 For more information see @cite DM03 .
367  */
368 class CV_EXPORTS_W TonemapDrago : public Tonemap
369 {
370 public:
371 
372     CV_WRAP virtual float getSaturation() const = 0;
373     CV_WRAP virtual void setSaturation(float saturation) = 0;
374 
375     CV_WRAP virtual float getBias() const = 0;
376     CV_WRAP virtual void setBias(float bias) = 0;
377 };
378 
379 /** @brief Creates TonemapDrago object
380 
381 @param gamma gamma value for gamma correction. See createTonemap
382 @param saturation positive saturation enhancement value. 1.0 preserves saturation, values greater
383 than 1 increase saturation and values less than 1 decrease it.
384 @param bias value for bias function in [0, 1] range. Values from 0.7 to 0.9 usually give best
385 results, default value is 0.85.
386  */
387 CV_EXPORTS_W Ptr<TonemapDrago> createTonemapDrago(float gamma = 1.0f, float saturation = 1.0f, float bias = 0.85f);
388 
389 
390 /** @brief This is a global tonemapping operator that models human visual system.
391 
392 Mapping function is controlled by adaptation parameter, that is computed using light adaptation and
393 color adaptation.
394 
395 For more information see @cite RD05 .
396  */
397 class CV_EXPORTS_W TonemapReinhard : public Tonemap
398 {
399 public:
400     CV_WRAP virtual float getIntensity() const = 0;
401     CV_WRAP virtual void setIntensity(float intensity) = 0;
402 
403     CV_WRAP virtual float getLightAdaptation() const = 0;
404     CV_WRAP virtual void setLightAdaptation(float light_adapt) = 0;
405 
406     CV_WRAP virtual float getColorAdaptation() const = 0;
407     CV_WRAP virtual void setColorAdaptation(float color_adapt) = 0;
408 };
409 
410 /** @brief Creates TonemapReinhard object
411 
412 @param gamma gamma value for gamma correction. See createTonemap
413 @param intensity result intensity in [-8, 8] range. Greater intensity produces brighter results.
414 @param light_adapt light adaptation in [0, 1] range. If 1 adaptation is based only on pixel
415 value, if 0 it's global, otherwise it's a weighted mean of this two cases.
416 @param color_adapt chromatic adaptation in [0, 1] range. If 1 channels are treated independently,
417 if 0 adaptation level is the same for each channel.
418  */
419 CV_EXPORTS_W Ptr<TonemapReinhard>
420 createTonemapReinhard(float gamma = 1.0f, float intensity = 0.0f, float light_adapt = 1.0f, float color_adapt = 0.0f);
421 
422 /** @brief This algorithm transforms image to contrast using gradients on all levels of gaussian pyramid,
423 transforms contrast values to HVS response and scales the response. After this the image is
424 reconstructed from new contrast values.
425 
426 For more information see @cite MM06 .
427  */
428 class CV_EXPORTS_W TonemapMantiuk : public Tonemap
429 {
430 public:
431     CV_WRAP virtual float getScale() const = 0;
432     CV_WRAP virtual void setScale(float scale) = 0;
433 
434     CV_WRAP virtual float getSaturation() const = 0;
435     CV_WRAP virtual void setSaturation(float saturation) = 0;
436 };
437 
438 /** @brief Creates TonemapMantiuk object
439 
440 @param gamma gamma value for gamma correction. See createTonemap
441 @param scale contrast scale factor. HVS response is multiplied by this parameter, thus compressing
442 dynamic range. Values from 0.6 to 0.9 produce best results.
443 @param saturation saturation enhancement value. See createTonemapDrago
444  */
445 CV_EXPORTS_W Ptr<TonemapMantiuk>
446 createTonemapMantiuk(float gamma = 1.0f, float scale = 0.7f, float saturation = 1.0f);
447 
448 /** @brief The base class for algorithms that align images of the same scene with different exposures
449  */
450 class CV_EXPORTS_W AlignExposures : public Algorithm
451 {
452 public:
453     /** @brief Aligns images
454 
455     @param src vector of input images
456     @param dst vector of aligned images
457     @param times vector of exposure time values for each image
458     @param response 256x1 matrix with inverse camera response function for each pixel value, it should
459     have the same number of channels as images.
460      */
461     CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst,
462                                  InputArray times, InputArray response) = 0;
463 };
464 
465 /** @brief This algorithm converts images to median threshold bitmaps (1 for pixels brighter than median
466 luminance and 0 otherwise) and than aligns the resulting bitmaps using bit operations.
467 
468 It is invariant to exposure, so exposure values and camera response are not necessary.
469 
470 In this implementation new image regions are filled with zeros.
471 
472 For more information see @cite GW03 .
473  */
474 class CV_EXPORTS_W AlignMTB : public AlignExposures
475 {
476 public:
477     CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst,
478                                  InputArray times, InputArray response) CV_OVERRIDE = 0;
479 
480     /** @brief Short version of process, that doesn't take extra arguments.
481 
482     @param src vector of input images
483     @param dst vector of aligned images
484      */
485     CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst) = 0;
486 
487     /** @brief Calculates shift between two images, i. e. how to shift the second image to correspond it with the
488     first.
489 
490     @param img0 first image
491     @param img1 second image
492      */
493     CV_WRAP virtual Point calculateShift(InputArray img0, InputArray img1) = 0;
494     /** @brief Helper function, that shift Mat filling new regions with zeros.
495 
496     @param src input image
497     @param dst result image
498     @param shift shift value
499      */
500     CV_WRAP virtual void shiftMat(InputArray src, OutputArray dst, const Point shift) = 0;
501     /** @brief Computes median threshold and exclude bitmaps of given image.
502 
503     @param img input image
504     @param tb median threshold bitmap
505     @param eb exclude bitmap
506      */
507     CV_WRAP virtual void computeBitmaps(InputArray img, OutputArray tb, OutputArray eb) = 0;
508 
509     CV_WRAP virtual int getMaxBits() const = 0;
510     CV_WRAP virtual void setMaxBits(int max_bits) = 0;
511 
512     CV_WRAP virtual int getExcludeRange() const = 0;
513     CV_WRAP virtual void setExcludeRange(int exclude_range) = 0;
514 
515     CV_WRAP virtual bool getCut() const = 0;
516     CV_WRAP virtual void setCut(bool value) = 0;
517 };
518 
519 /** @brief Creates AlignMTB object
520 
521 @param max_bits logarithm to the base 2 of maximal shift in each dimension. Values of 5 and 6 are
522 usually good enough (31 and 63 pixels shift respectively).
523 @param exclude_range range for exclusion bitmap that is constructed to suppress noise around the
524 median value.
525 @param cut if true cuts images, otherwise fills the new regions with zeros.
526  */
527 CV_EXPORTS_W Ptr<AlignMTB> createAlignMTB(int max_bits = 6, int exclude_range = 4, bool cut = true);
528 
529 /** @brief The base class for camera response calibration algorithms.
530  */
531 class CV_EXPORTS_W CalibrateCRF : public Algorithm
532 {
533 public:
534     /** @brief Recovers inverse camera response.
535 
536     @param src vector of input images
537     @param dst 256x1 matrix with inverse camera response function
538     @param times vector of exposure time values for each image
539      */
540     CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0;
541 };
542 
543 /** @brief Inverse camera response function is extracted for each brightness value by minimizing an objective
544 function as linear system. Objective function is constructed using pixel values on the same position
545 in all images, extra term is added to make the result smoother.
546 
547 For more information see @cite DM97 .
548  */
549 class CV_EXPORTS_W CalibrateDebevec : public CalibrateCRF
550 {
551 public:
552     CV_WRAP virtual float getLambda() const = 0;
553     CV_WRAP virtual void setLambda(float lambda) = 0;
554 
555     CV_WRAP virtual int getSamples() const = 0;
556     CV_WRAP virtual void setSamples(int samples) = 0;
557 
558     CV_WRAP virtual bool getRandom() const = 0;
559     CV_WRAP virtual void setRandom(bool random) = 0;
560 };
561 
562 /** @brief Creates CalibrateDebevec object
563 
564 @param samples number of pixel locations to use
565 @param lambda smoothness term weight. Greater values produce smoother results, but can alter the
566 response.
567 @param random if true sample pixel locations are chosen at random, otherwise they form a
568 rectangular grid.
569  */
570 CV_EXPORTS_W Ptr<CalibrateDebevec> createCalibrateDebevec(int samples = 70, float lambda = 10.0f, bool random = false);
571 
572 /** @brief Inverse camera response function is extracted for each brightness value by minimizing an objective
573 function as linear system. This algorithm uses all image pixels.
574 
575 For more information see @cite RB99 .
576  */
577 class CV_EXPORTS_W CalibrateRobertson : public CalibrateCRF
578 {
579 public:
580     CV_WRAP virtual int getMaxIter() const = 0;
581     CV_WRAP virtual void setMaxIter(int max_iter) = 0;
582 
583     CV_WRAP virtual float getThreshold() const = 0;
584     CV_WRAP virtual void setThreshold(float threshold) = 0;
585 
586     CV_WRAP virtual Mat getRadiance() const = 0;
587 };
588 
589 /** @brief Creates CalibrateRobertson object
590 
591 @param max_iter maximal number of Gauss-Seidel solver iterations.
592 @param threshold target difference between results of two successive steps of the minimization.
593  */
594 CV_EXPORTS_W Ptr<CalibrateRobertson> createCalibrateRobertson(int max_iter = 30, float threshold = 0.01f);
595 
596 /** @brief The base class algorithms that can merge exposure sequence to a single image.
597  */
598 class CV_EXPORTS_W MergeExposures : public Algorithm
599 {
600 public:
601     /** @brief Merges images.
602 
603     @param src vector of input images
604     @param dst result image
605     @param times vector of exposure time values for each image
606     @param response 256x1 matrix with inverse camera response function for each pixel value, it should
607     have the same number of channels as images.
608      */
609     CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
610                                  InputArray times, InputArray response) = 0;
611 };
612 
613 /** @brief The resulting HDR image is calculated as weighted average of the exposures considering exposure
614 values and camera response.
615 
616 For more information see @cite DM97 .
617  */
618 class CV_EXPORTS_W MergeDebevec : public MergeExposures
619 {
620 public:
621     CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
622                                  InputArray times, InputArray response) CV_OVERRIDE = 0;
623     CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0;
624 };
625 
626 /** @brief Creates MergeDebevec object
627  */
628 CV_EXPORTS_W Ptr<MergeDebevec> createMergeDebevec();
629 
630 /** @brief Pixels are weighted using contrast, saturation and well-exposedness measures, than images are
631 combined using laplacian pyramids.
632 
633 The resulting image weight is constructed as weighted average of contrast, saturation and
634 well-exposedness measures.
635 
636 The resulting image doesn't require tonemapping and can be converted to 8-bit image by multiplying
637 by 255, but it's recommended to apply gamma correction and/or linear tonemapping.
638 
639 For more information see @cite MK07 .
640  */
641 class CV_EXPORTS_W MergeMertens : public MergeExposures
642 {
643 public:
644     CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
645                                  InputArray times, InputArray response) CV_OVERRIDE = 0;
646     /** @brief Short version of process, that doesn't take extra arguments.
647 
648     @param src vector of input images
649     @param dst result image
650      */
651     CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst) = 0;
652 
653     CV_WRAP virtual float getContrastWeight() const = 0;
654     CV_WRAP virtual void setContrastWeight(float contrast_weiht) = 0;
655 
656     CV_WRAP virtual float getSaturationWeight() const = 0;
657     CV_WRAP virtual void setSaturationWeight(float saturation_weight) = 0;
658 
659     CV_WRAP virtual float getExposureWeight() const = 0;
660     CV_WRAP virtual void setExposureWeight(float exposure_weight) = 0;
661 };
662 
663 /** @brief Creates MergeMertens object
664 
665 @param contrast_weight contrast measure weight. See MergeMertens.
666 @param saturation_weight saturation measure weight
667 @param exposure_weight well-exposedness measure weight
668  */
669 CV_EXPORTS_W Ptr<MergeMertens>
670 createMergeMertens(float contrast_weight = 1.0f, float saturation_weight = 1.0f, float exposure_weight = 0.0f);
671 
672 /** @brief The resulting HDR image is calculated as weighted average of the exposures considering exposure
673 values and camera response.
674 
675 For more information see @cite RB99 .
676  */
677 class CV_EXPORTS_W MergeRobertson : public MergeExposures
678 {
679 public:
680     CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
681                                  InputArray times, InputArray response) CV_OVERRIDE = 0;
682     CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0;
683 };
684 
685 /** @brief Creates MergeRobertson object
686  */
687 CV_EXPORTS_W Ptr<MergeRobertson> createMergeRobertson();
688 
689 //! @} photo_hdr
690 
691 //! @addtogroup photo_decolor
692 //! @{
693 
694 /** @brief Transforms a color image to a grayscale image. It is a basic tool in digital printing, stylized
695 black-and-white photograph rendering, and in many single channel image processing applications
696 @cite CL12 .
697 
698 @param src Input 8-bit 3-channel image.
699 @param grayscale Output 8-bit 1-channel image.
700 @param color_boost Output 8-bit 3-channel image.
701 
702 This function is to be applied on color images.
703  */
704 CV_EXPORTS_W void decolor( InputArray src, OutputArray grayscale, OutputArray color_boost);
705 
706 //! @} photo_decolor
707 
708 //! @addtogroup photo_clone
709 //! @{
710 
711 
712 //! seamlessClone algorithm flags
713 enum
714 {
715     /** The power of the method is fully expressed when inserting objects with complex outlines into a new background*/
716     NORMAL_CLONE = 1,
717     /** The classic method, color-based selection and alpha masking might be time consuming and often leaves an undesirable
718     halo. Seamless cloning, even averaged with the original image, is not effective. Mixed seamless cloning based on a loose selection proves effective.*/
719     MIXED_CLONE  = 2,
720     /** Monochrome transfer allows the user to easily replace certain features of one object by alternative features.*/
721     MONOCHROME_TRANSFER = 3};
722 
723 
724 /** @example samples/cpp/tutorial_code/photo/seamless_cloning/cloning_demo.cpp
725 An example using seamlessClone function
726 */
727 /** @brief Image editing tasks concern either global changes (color/intensity corrections, filters,
728 deformations) or local changes concerned to a selection. Here we are interested in achieving local
729 changes, ones that are restricted to a region manually selected (ROI), in a seamless and effortless
730 manner. The extent of the changes ranges from slight distortions to complete replacement by novel
731 content @cite PM03 .
732 
733 @param src Input 8-bit 3-channel image.
734 @param dst Input 8-bit 3-channel image.
735 @param mask Input 8-bit 1 or 3-channel image.
736 @param p Point in dst image where object is placed.
737 @param blend Output image with the same size and type as dst.
738 @param flags Cloning method that could be cv::NORMAL_CLONE, cv::MIXED_CLONE or cv::MONOCHROME_TRANSFER
739  */
740 CV_EXPORTS_W void seamlessClone( InputArray src, InputArray dst, InputArray mask, Point p,
741         OutputArray blend, int flags);
742 
743 /** @brief Given an original color image, two differently colored versions of this image can be mixed
744 seamlessly.
745 
746 @param src Input 8-bit 3-channel image.
747 @param mask Input 8-bit 1 or 3-channel image.
748 @param dst Output image with the same size and type as src .
749 @param red_mul R-channel multiply factor.
750 @param green_mul G-channel multiply factor.
751 @param blue_mul B-channel multiply factor.
752 
753 Multiplication factor is between .5 to 2.5.
754  */
755 CV_EXPORTS_W void colorChange(InputArray src, InputArray mask, OutputArray dst, float red_mul = 1.0f,
756         float green_mul = 1.0f, float blue_mul = 1.0f);
757 
758 /** @brief Applying an appropriate non-linear transformation to the gradient field inside the selection and
759 then integrating back with a Poisson solver, modifies locally the apparent illumination of an image.
760 
761 @param src Input 8-bit 3-channel image.
762 @param mask Input 8-bit 1 or 3-channel image.
763 @param dst Output image with the same size and type as src.
764 @param alpha Value ranges between 0-2.
765 @param beta Value ranges between 0-2.
766 
767 This is useful to highlight under-exposed foreground objects or to reduce specular reflections.
768  */
769 CV_EXPORTS_W void illuminationChange(InputArray src, InputArray mask, OutputArray dst,
770         float alpha = 0.2f, float beta = 0.4f);
771 
772 /** @brief By retaining only the gradients at edge locations, before integrating with the Poisson solver, one
773 washes out the texture of the selected region, giving its contents a flat aspect. Here Canny Edge %Detector is used.
774 
775 @param src Input 8-bit 3-channel image.
776 @param mask Input 8-bit 1 or 3-channel image.
777 @param dst Output image with the same size and type as src.
778 @param low_threshold %Range from 0 to 100.
779 @param high_threshold Value \> 100.
780 @param kernel_size The size of the Sobel kernel to be used.
781 
782 @note
783 The algorithm assumes that the color of the source image is close to that of the destination. This
784 assumption means that when the colors don't match, the source image color gets tinted toward the
785 color of the destination image.
786  */
787 CV_EXPORTS_W void textureFlattening(InputArray src, InputArray mask, OutputArray dst,
788         float low_threshold = 30, float high_threshold = 45,
789         int kernel_size = 3);
790 
791 //! @} photo_clone
792 
793 //! @addtogroup photo_render
794 //! @{
795 
796 //! Edge preserving filters
797 enum
798 {
799     RECURS_FILTER = 1, //!< Recursive Filtering
800     NORMCONV_FILTER = 2 //!< Normalized Convolution Filtering
801 };
802 
803 /** @brief Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing
804 filters are used in many different applications @cite EM11 .
805 
806 @param src Input 8-bit 3-channel image.
807 @param dst Output 8-bit 3-channel image.
808 @param flags Edge preserving filters: cv::RECURS_FILTER or cv::NORMCONV_FILTER
809 @param sigma_s %Range between 0 to 200.
810 @param sigma_r %Range between 0 to 1.
811  */
812 CV_EXPORTS_W void edgePreservingFilter(InputArray src, OutputArray dst, int flags = 1,
813         float sigma_s = 60, float sigma_r = 0.4f);
814 
815 /** @brief This filter enhances the details of a particular image.
816 
817 @param src Input 8-bit 3-channel image.
818 @param dst Output image with the same size and type as src.
819 @param sigma_s %Range between 0 to 200.
820 @param sigma_r %Range between 0 to 1.
821  */
822 CV_EXPORTS_W void detailEnhance(InputArray src, OutputArray dst, float sigma_s = 10,
823         float sigma_r = 0.15f);
824 
825 /** @example samples/cpp/tutorial_code/photo/non_photorealistic_rendering/npr_demo.cpp
826 An example using non-photorealistic line drawing functions
827 */
828 /** @brief Pencil-like non-photorealistic line drawing
829 
830 @param src Input 8-bit 3-channel image.
831 @param dst1 Output 8-bit 1-channel image.
832 @param dst2 Output image with the same size and type as src.
833 @param sigma_s %Range between 0 to 200.
834 @param sigma_r %Range between 0 to 1.
835 @param shade_factor %Range between 0 to 0.1.
836  */
837 CV_EXPORTS_W void pencilSketch(InputArray src, OutputArray dst1, OutputArray dst2,
838         float sigma_s = 60, float sigma_r = 0.07f, float shade_factor = 0.02f);
839 
840 /** @brief Stylization aims to produce digital imagery with a wide variety of effects not focused on
841 photorealism. Edge-aware filters are ideal for stylization, as they can abstract regions of low
842 contrast while preserving, or enhancing, high-contrast features.
843 
844 @param src Input 8-bit 3-channel image.
845 @param dst Output image with the same size and type as src.
846 @param sigma_s %Range between 0 to 200.
847 @param sigma_r %Range between 0 to 1.
848  */
849 CV_EXPORTS_W void stylization(InputArray src, OutputArray dst, float sigma_s = 60,
850         float sigma_r = 0.45f);
851 
852 //! @} photo_render
853 
854 //! @} photo
855 
856 } // cv
857 
858 #endif
859