1 /*
2     Scan Tailor - Interactive post-processing tool for scanned pages.
3     Copyright (C)  Joseph Artsimovich <joseph.artsimovich@gmail.com>
4 
5     This program is free software: you can redistribute it and/or modify
6     it under the terms of the GNU General Public License as published by
7     the Free Software Foundation, either version 3 of the License, or
8     (at your option) any later version.
9 
10     This program is distributed in the hope that it will be useful,
11     but WITHOUT ANY WARRANTY; without even the implied warranty of
12     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13     GNU General Public License for more details.
14 
15     You should have received a copy of the GNU General Public License
16     along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "OutputGenerator.h"
20 #include <BlackOnWhiteEstimator.h>
21 #include <Despeckle.h>
22 #include <imageproc/BackgroundColorCalculator.h>
23 #include <imageproc/ColorSegmenter.h>
24 #include <imageproc/ColorTable.h>
25 #include <imageproc/ImageCombination.h>
26 #include <QDebug>
27 #include <QPainter>
28 #include <QPainterPath>
29 #include <QtCore/QSettings>
30 #include <boost/bind.hpp>
31 #include "DebugImages.h"
32 #include "Dpm.h"
33 #include "EstimateBackground.h"
34 #include "FillColorProperty.h"
35 #include "FilterData.h"
36 #include "RenderParams.h"
37 #include "TaskStatus.h"
38 #include "Utils.h"
39 #include "dewarping/CylindricalSurfaceDewarper.h"
40 #include "dewarping/DewarpingPointMapper.h"
41 #include "dewarping/DistortionModelBuilder.h"
42 #include "dewarping/RasterDewarper.h"
43 #include "dewarping/TextLineTracer.h"
44 #include "dewarping/TopBottomEdgeTracer.h"
45 #include "imageproc/AdjustBrightness.h"
46 #include "imageproc/Binarize.h"
47 #include "imageproc/ConnCompEraser.h"
48 #include "imageproc/ConnectivityMap.h"
49 #include "imageproc/Constants.h"
50 #include "imageproc/DrawOver.h"
51 #include "imageproc/GrayRasterOp.h"
52 #include "imageproc/Grayscale.h"
53 #include "imageproc/InfluenceMap.h"
54 #include "imageproc/Morphology.h"
55 #include "imageproc/OrthogonalRotation.h"
56 #include "imageproc/PolygonRasterizer.h"
57 #include "imageproc/PolynomialSurface.h"
58 #include "imageproc/RasterOp.h"
59 #include "imageproc/SavGolFilter.h"
60 #include "imageproc/Scale.h"
61 #include "imageproc/SeedFill.h"
62 #include "imageproc/Transform.h"
63 
64 using namespace imageproc;
65 using namespace dewarping;
66 
67 namespace output {
68 namespace {
69 struct RaiseAboveBackground {
transformoutput::__anone2330a6d0111::RaiseAboveBackground70   static uint8_t transform(uint8_t src, uint8_t dst) {
71     // src: orig
72     // dst: background (dst >= src)
73     if (dst - src < 1) {
74       return 0xff;
75     }
76     const unsigned orig = src;
77     const unsigned background = dst;
78 
79     return static_cast<uint8_t>((orig * 255 + background / 2) / background);
80   }
81 };
82 
83 struct CombineInverted {
transformoutput::__anone2330a6d0111::CombineInverted84   static uint8_t transform(uint8_t src, uint8_t dst) {
85     const unsigned dilated = dst;
86     const unsigned eroded = src;
87     const unsigned res = 255 - (255 - dilated) * eroded / 255;
88 
89     return static_cast<uint8_t>(res);
90   }
91 };
92 
93 
94 template <typename PixelType>
95 PixelType reserveBlackAndWhite(PixelType color);
96 
97 template <>
reserveBlackAndWhite(uint32_t color)98 uint32_t reserveBlackAndWhite(uint32_t color) {
99   // We handle both RGB32 and ARGB32 here.
100   switch (color & 0x00FFFFFF) {
101     case 0x00000000:
102       return 0xFF010101;
103     case 0x00FFFFFF:
104       return 0xFFFEFEFE;
105     default:
106       return color;
107   }
108 }
109 
110 template <>
reserveBlackAndWhite(uint8_t color)111 uint8_t reserveBlackAndWhite(uint8_t color) {
112   switch (color) {
113     case 0x00:
114       return 0x01;
115     case 0xFF:
116       return 0xFE;
117     default:
118       return color;
119   }
120 }
121 
122 template <typename PixelType>
reserveBlackAndWhite(QImage & img)123 void reserveBlackAndWhite(QImage& img) {
124   const int width = img.width();
125   const int height = img.height();
126 
127   auto* image_line = reinterpret_cast<PixelType*>(img.bits());
128   const int image_stride = img.bytesPerLine() / sizeof(PixelType);
129 
130   for (int y = 0; y < height; ++y) {
131     for (int x = 0; x < width; ++x) {
132       image_line[x] = reserveBlackAndWhite<PixelType>(image_line[x]);
133     }
134     image_line += image_stride;
135   }
136 }
137 
reserveBlackAndWhite(QImage & img)138 void reserveBlackAndWhite(QImage& img) {
139   switch (img.format()) {
140     case QImage::Format_Indexed8:
141       reserveBlackAndWhite<uint8_t>(img);
142       break;
143     case QImage::Format_RGB32:
144     case QImage::Format_ARGB32:
145       reserveBlackAndWhite<uint32_t>(img);
146       break;
147     default:
148       throw std::invalid_argument("reserveBlackAndWhite: wrong image format.");
149       ;
150   }
151 }
152 
153 template <typename PixelType>
reserveBlackAndWhite(QImage & img,const BinaryImage & mask)154 void reserveBlackAndWhite(QImage& img, const BinaryImage& mask) {
155   const int width = img.width();
156   const int height = img.height();
157 
158   auto* image_line = reinterpret_cast<PixelType*>(img.bits());
159   const int image_stride = img.bytesPerLine() / sizeof(PixelType);
160   const uint32_t* mask_line = mask.data();
161   const int mask_stride = mask.wordsPerLine();
162   const uint32_t msb = uint32_t(1) << 31;
163 
164   for (int y = 0; y < height; ++y) {
165     for (int x = 0; x < width; ++x) {
166       if (mask_line[x >> 5] & (msb >> (x & 31))) {
167         image_line[x] = reserveBlackAndWhite<PixelType>(image_line[x]);
168       }
169     }
170     image_line += image_stride;
171     mask_line += mask_stride;
172   }
173 }
174 
reserveBlackAndWhite(QImage & img,const BinaryImage & mask)175 void reserveBlackAndWhite(QImage& img, const BinaryImage& mask) {
176   switch (img.format()) {
177     case QImage::Format_Indexed8:
178       reserveBlackAndWhite<uint8_t>(img, mask);
179       break;
180     case QImage::Format_RGB32:
181     case QImage::Format_ARGB32:
182       reserveBlackAndWhite<uint32_t>(img, mask);
183       break;
184     default:
185       throw std::invalid_argument("reserveBlackAndWhite: wrong image format.");
186       ;
187   }
188 }
189 
190 template <typename MixedPixel>
fillExcept(QImage & image,const BinaryImage & bw_mask,const QColor & color)191 void fillExcept(QImage& image, const BinaryImage& bw_mask, const QColor& color) {
192   auto* image_line = reinterpret_cast<MixedPixel*>(image.bits());
193   const int image_stride = image.bytesPerLine() / sizeof(MixedPixel);
194   const uint32_t* bw_mask_line = bw_mask.data();
195   const int bw_mask_stride = bw_mask.wordsPerLine();
196   const int width = image.width();
197   const int height = image.height();
198   const uint32_t msb = uint32_t(1) << 31;
199   const auto fillingPixel = static_cast<MixedPixel>(color.rgba());
200 
201   for (int y = 0; y < height; ++y) {
202     for (int x = 0; x < width; ++x) {
203       if (!(bw_mask_line[x >> 5] & (msb >> (x & 31)))) {
204         image_line[x] = fillingPixel;
205       }
206     }
207     image_line += image_stride;
208     bw_mask_line += bw_mask_stride;
209   }
210 }
211 
fillExcept(BinaryImage & image,const BinaryImage & bw_mask,const BWColor color)212 void fillExcept(BinaryImage& image, const BinaryImage& bw_mask, const BWColor color) {
213   uint32_t* image_line = image.data();
214   const int image_stride = image.wordsPerLine();
215   const uint32_t* bw_mask_line = bw_mask.data();
216   const int bw_mask_stride = bw_mask.wordsPerLine();
217   const int width = image.width();
218   const int height = image.height();
219   const uint32_t msb = uint32_t(1) << 31;
220 
221   for (int y = 0; y < height; ++y) {
222     for (int x = 0; x < width; ++x) {
223       if (!(bw_mask_line[x >> 5] & (msb >> (x & 31)))) {
224         if (color == BLACK) {
225           image_line[x >> 5] |= (msb >> (x & 31));
226         } else {
227           image_line[x >> 5] &= ~(msb >> (x & 31));
228         }
229       }
230     }
231     image_line += image_stride;
232     bw_mask_line += bw_mask_stride;
233   }
234 }
235 
removeAutoPictureZones(ZoneSet & picture_zones)236 void removeAutoPictureZones(ZoneSet& picture_zones) {
237   for (auto it = picture_zones.begin(); it != picture_zones.end();) {
238     const Zone& zone = *it;
239     if (zone.properties().locateOrDefault<ZoneCategoryProperty>()->zone_category()
240         == ZoneCategoryProperty::RECTANGULAR_OUTLINE) {
241       it = picture_zones.erase(it);
242     } else {
243       ++it;
244     }
245   }
246 }
247 
updateBlackOnWhite(const FilterData & input,const PageId & pageId,const intrusive_ptr<Settings> & settings)248 bool updateBlackOnWhite(const FilterData& input, const PageId& pageId, const intrusive_ptr<Settings>& settings) {
249   QSettings appSettings;
250   Params params = settings->getParams(pageId);
251   if ((appSettings.value("settings/blackOnWhiteDetection", true).toBool()
252        && appSettings.value("settings/blackOnWhiteDetectionAtOutput", true).toBool())
253       && !settings->getOutputProcessingParams(pageId).isBlackOnWhiteSetManually()) {
254     if (params.isBlackOnWhite() != input.isBlackOnWhite()) {
255       params.setBlackOnWhite(input.isBlackOnWhite());
256       settings->setParams(pageId, params);
257     }
258     return input.isBlackOnWhite();
259   } else {
260     return params.isBlackOnWhite();
261   }
262 }
263 
getBackgroundColorCalculator(const PageId & pageId,const intrusive_ptr<Settings> & settings)264 BackgroundColorCalculator getBackgroundColorCalculator(const PageId& pageId, const intrusive_ptr<Settings>& settings) {
265   QSettings appSettings;
266   if (!(appSettings.value("settings/blackOnWhiteDetection", true).toBool()
267         && appSettings.value("settings/blackOnWhiteDetectionAtOutput", true).toBool())
268       && !settings->getOutputProcessingParams(pageId).isBlackOnWhiteSetManually()) {
269     return BackgroundColorCalculator();
270   } else {
271     return BackgroundColorCalculator(false);
272   }
273 }
274 }  // namespace
275 
OutputGenerator(const Dpi & dpi,const ColorParams & color_params,const SplittingOptions & splitting_options,const PictureShapeOptions & picture_shape_options,const DewarpingOptions & dewarping_options,const OutputProcessingParams & output_processing_params,const double despeckle_level,const ImageTransformation & xform,const QPolygonF & content_rect_phys)276 OutputGenerator::OutputGenerator(const Dpi& dpi,
277                                  const ColorParams& color_params,
278                                  const SplittingOptions& splitting_options,
279                                  const PictureShapeOptions& picture_shape_options,
280                                  const DewarpingOptions& dewarping_options,
281                                  const OutputProcessingParams& output_processing_params,
282                                  const double despeckle_level,
283                                  const ImageTransformation& xform,
284                                  const QPolygonF& content_rect_phys)
285     : m_dpi(dpi),
286       m_colorParams(color_params),
287       m_splittingOptions(splitting_options),
288       m_pictureShapeOptions(picture_shape_options),
289       m_dewarpingOptions(dewarping_options),
290       m_outputProcessingParams(output_processing_params),
291       m_xform(xform),
292       m_outRect(xform.resultingRect().toRect()),
293       m_contentRect(xform.transform().map(content_rect_phys).boundingRect().toRect()),
294       m_despeckleLevel(despeckle_level) {
295   assert(m_outRect.topLeft() == QPoint(0, 0));
296 
297   if (!m_contentRect.isEmpty()) {
298     // prevents a crash due to round error on transforming virtual coordinates to output image coordinates
299     // when m_contentRect coordinates could exceed m_outRect ones by 1 px
300     m_contentRect = m_contentRect.intersected(m_outRect);
301     // Note that QRect::contains(<empty rect>) always returns false, so we don't use it here.
302     assert(m_outRect.contains(m_contentRect.topLeft()) && m_outRect.contains(m_contentRect.bottomRight()));
303   }
304 }
305 
process(const TaskStatus & status,const FilterData & input,ZoneSet & picture_zones,const ZoneSet & fill_zones,dewarping::DistortionModel & distortion_model,const DepthPerception & depth_perception,imageproc::BinaryImage * auto_picture_mask,imageproc::BinaryImage * speckles_image,DebugImages * dbg,const PageId & pageId,const intrusive_ptr<Settings> & settings,SplitImage * splitImage)306 QImage OutputGenerator::process(const TaskStatus& status,
307                                 const FilterData& input,
308                                 ZoneSet& picture_zones,
309                                 const ZoneSet& fill_zones,
310                                 dewarping::DistortionModel& distortion_model,
311                                 const DepthPerception& depth_perception,
312                                 imageproc::BinaryImage* auto_picture_mask,
313                                 imageproc::BinaryImage* speckles_image,
314                                 DebugImages* dbg,
315                                 const PageId& pageId,
316                                 const intrusive_ptr<Settings>& settings,
317                                 SplitImage* splitImage) {
318   QImage image(processImpl(status, input, picture_zones, fill_zones, distortion_model, depth_perception,
319                            auto_picture_mask, speckles_image, dbg, pageId, settings, splitImage));
320   // Set the correct DPI.
321   const RenderParams renderParams(m_colorParams, m_splittingOptions);
322   const Dpm output_dpm(m_dpi);
323 
324   if (!renderParams.splitOutput()) {
325     assert(!image.isNull());
326 
327     image.setDotsPerMeterX(output_dpm.horizontal());
328     image.setDotsPerMeterY(output_dpm.vertical());
329   } else {
330     splitImage->applyToLayerImages([&output_dpm](QImage& img) {
331       img.setDotsPerMeterX(output_dpm.horizontal());
332       img.setDotsPerMeterY(output_dpm.vertical());
333     });
334   }
335 
336   return image;
337 }
338 
outputImageSize() const339 QSize OutputGenerator::outputImageSize() const {
340   return m_outRect.size();
341 }
342 
outputContentRect() const343 QRect OutputGenerator::outputContentRect() const {
344   return m_contentRect;
345 }
346 
normalizeIlluminationGray(const TaskStatus & status,const QImage & input,const QPolygonF & area_to_consider,const QTransform & xform,const QRect & target_rect,GrayImage * background,DebugImages * const dbg)347 GrayImage OutputGenerator::normalizeIlluminationGray(const TaskStatus& status,
348                                                      const QImage& input,
349                                                      const QPolygonF& area_to_consider,
350                                                      const QTransform& xform,
351                                                      const QRect& target_rect,
352                                                      GrayImage* background,
353                                                      DebugImages* const dbg) {
354   GrayImage to_be_normalized(transformToGray(input, xform, target_rect, OutsidePixels::assumeWeakNearest()));
355   if (dbg) {
356     dbg->add(to_be_normalized, "to_be_normalized");
357   }
358 
359   status.throwIfCancelled();
360 
361   QPolygonF transformed_consideration_area(xform.map(area_to_consider));
362   transformed_consideration_area.translate(-target_rect.topLeft());
363 
364   const PolynomialSurface bg_ps(estimateBackground(to_be_normalized, transformed_consideration_area, status, dbg));
365 
366   status.throwIfCancelled();
367 
368   GrayImage bg_img(bg_ps.render(to_be_normalized.size()));
369   if (dbg) {
370     dbg->add(bg_img, "background");
371   }
372   if (background) {
373     *background = bg_img;
374   }
375 
376   status.throwIfCancelled();
377 
378   grayRasterOp<RaiseAboveBackground>(bg_img, to_be_normalized);
379   if (dbg) {
380     dbg->add(bg_img, "normalized_illumination");
381   }
382 
383   return bg_img;
384 }  // OutputGenerator::normalizeIlluminationGray
385 
estimateBinarizationMask(const TaskStatus & status,const GrayImage & gray_source,const QRect & source_rect,const QRect & source_sub_rect,DebugImages * const dbg) const386 imageproc::BinaryImage OutputGenerator::estimateBinarizationMask(const TaskStatus& status,
387                                                                  const GrayImage& gray_source,
388                                                                  const QRect& source_rect,
389                                                                  const QRect& source_sub_rect,
390                                                                  DebugImages* const dbg) const {
391   assert(source_rect.contains(source_sub_rect));
392 
393   // If we need to strip some of the margins from a grayscale
394   // image, we may actually do it without copying anything.
395   // We are going to construct a QImage from existing data.
396   // That image won't own that data, but gray_source is not
397   // going anywhere, so it's fine.
398 
399   GrayImage trimmed_image;
400 
401   if (source_rect == source_sub_rect) {
402     trimmed_image = gray_source;  // Shallow copy.
403   } else {
404     // Sub-rectangle in input image coordinates.
405     QRect relative_subrect(source_sub_rect);
406     relative_subrect.moveTopLeft(source_sub_rect.topLeft() - source_rect.topLeft());
407 
408     const int stride = gray_source.stride();
409     const int offset = relative_subrect.top() * stride + relative_subrect.left();
410 
411     trimmed_image = GrayImage(QImage(gray_source.data() + offset, relative_subrect.width(), relative_subrect.height(),
412                                      stride, QImage::Format_Indexed8));
413   }
414 
415   status.throwIfCancelled();
416 
417   const QSize downscaled_size(to300dpi(trimmed_image.size(), m_dpi));
418 
419   // A 300dpi version of trimmed_image.
420   GrayImage downscaled_input(scaleToGray(trimmed_image, downscaled_size));
421   trimmed_image = GrayImage();  // Save memory.
422   status.throwIfCancelled();
423 
424   // Light areas indicate pictures.
425   GrayImage picture_areas(detectPictures(downscaled_input, status, dbg));
426   downscaled_input = GrayImage();  // Save memory.
427   status.throwIfCancelled();
428 
429   const BinaryThreshold threshold(48);
430   // Scale back to original size.
431   picture_areas = scaleToGray(picture_areas, source_sub_rect.size());
432 
433   return BinaryImage(picture_areas, threshold);
434 }  // OutputGenerator::estimateBinarizationMask
435 
modifyBinarizationMask(imageproc::BinaryImage & bw_mask,const QRect & mask_rect,const ZoneSet & zones) const436 void OutputGenerator::modifyBinarizationMask(imageproc::BinaryImage& bw_mask,
437                                              const QRect& mask_rect,
438                                              const ZoneSet& zones) const {
439   QTransform xform(m_xform.transform());
440   xform *= QTransform().translate(-mask_rect.x(), -mask_rect.y());
441 
442   typedef PictureLayerProperty PLP;
443 
444   // Pass 1: ERASER1
445   for (const Zone& zone : zones) {
446     if (zone.properties().locateOrDefault<PLP>()->layer() == PLP::ERASER1) {
447       const QPolygonF poly(zone.spline().toPolygon());
448       PolygonRasterizer::fill(bw_mask, BLACK, xform.map(poly), Qt::WindingFill);
449     }
450   }
451 
452   // Pass 2: PAINTER2
453   for (const Zone& zone : zones) {
454     if (zone.properties().locateOrDefault<PLP>()->layer() == PLP::PAINTER2) {
455       const QPolygonF poly(zone.spline().toPolygon());
456       PolygonRasterizer::fill(bw_mask, WHITE, xform.map(poly), Qt::WindingFill);
457     }
458   }
459 
460   // Pass 1: ERASER3
461   for (const Zone& zone : zones) {
462     if (zone.properties().locateOrDefault<PLP>()->layer() == PLP::ERASER3) {
463       const QPolygonF poly(zone.spline().toPolygon());
464       PolygonRasterizer::fill(bw_mask, BLACK, xform.map(poly), Qt::WindingFill);
465     }
466   }
467 }
468 
processImpl(const TaskStatus & status,const FilterData & input,ZoneSet & picture_zones,const ZoneSet & fill_zones,dewarping::DistortionModel & distortion_model,const DepthPerception & depth_perception,imageproc::BinaryImage * auto_picture_mask,imageproc::BinaryImage * speckles_image,DebugImages * dbg,const PageId & pageId,const intrusive_ptr<Settings> & settings,SplitImage * splitImage)469 QImage OutputGenerator::processImpl(const TaskStatus& status,
470                                     const FilterData& input,
471                                     ZoneSet& picture_zones,
472                                     const ZoneSet& fill_zones,
473                                     dewarping::DistortionModel& distortion_model,
474                                     const DepthPerception& depth_perception,
475                                     imageproc::BinaryImage* auto_picture_mask,
476                                     imageproc::BinaryImage* speckles_image,
477                                     DebugImages* dbg,
478                                     const PageId& pageId,
479                                     const intrusive_ptr<Settings>& settings,
480                                     SplitImage* splitImage) {
481   if ((m_dewarpingOptions.dewarpingMode() == AUTO) || (m_dewarpingOptions.dewarpingMode() == MARGINAL)
482       || ((m_dewarpingOptions.dewarpingMode() == MANUAL) && distortion_model.isValid())) {
483     return processWithDewarping(status, input, picture_zones, fill_zones, distortion_model, depth_perception,
484                                 auto_picture_mask, speckles_image, dbg, pageId, settings, splitImage);
485   } else {
486     return processWithoutDewarping(status, input, picture_zones, fill_zones, auto_picture_mask, speckles_image, dbg,
487                                    pageId, settings, splitImage);
488   }
489 }
490 
processWithoutDewarping(const TaskStatus & status,const FilterData & input,ZoneSet & picture_zones,const ZoneSet & fill_zones,imageproc::BinaryImage * auto_picture_mask,imageproc::BinaryImage * speckles_image,DebugImages * dbg,const PageId & pageId,const intrusive_ptr<Settings> & settings,SplitImage * splitImage)491 QImage OutputGenerator::processWithoutDewarping(const TaskStatus& status,
492                                                 const FilterData& input,
493                                                 ZoneSet& picture_zones,
494                                                 const ZoneSet& fill_zones,
495                                                 imageproc::BinaryImage* auto_picture_mask,
496                                                 imageproc::BinaryImage* speckles_image,
497                                                 DebugImages* dbg,
498                                                 const PageId& pageId,
499                                                 const intrusive_ptr<Settings>& settings,
500                                                 SplitImage* splitImage) {
501   const RenderParams render_params(m_colorParams, m_splittingOptions);
502 
503   const QPolygonF preCropArea = [this, &render_params]() {
504     if (render_params.fillOffcut()) {
505       return m_xform.resultingPreCropArea();
506     } else {
507       const QPolygonF imageRectInOutputCs = m_xform.transform().map(m_xform.origRect());
508       return imageRectInOutputCs.intersected(QRectF(m_outRect));
509     }
510   }();
511   const QPolygonF contentArea
512       = preCropArea.intersected(QRectF(render_params.fillMargins() ? m_contentRect : m_outRect));
513   const QRect contentRect = contentArea.boundingRect().toRect();
514   const QPolygonF outCropArea = preCropArea.intersected(QRectF(m_outRect));
515 
516   const QSize target_size(m_outRect.size().expandedTo(QSize(1, 1)));
517   // If the content area is empty or outside the cropping area, return a blank page.
518   if (contentRect.isEmpty()) {
519     QImage emptyImage(BinaryImage(target_size, WHITE).toQImage());
520     if (!render_params.splitOutput()) {
521       return emptyImage;
522     } else {
523       splitImage->setForegroundImage(emptyImage);
524       splitImage->setBackgroundImage(emptyImage.convertToFormat(QImage::Format_Indexed8));
525       return QImage();
526     }
527   }
528 
529   // For various reasons, we need some whitespace around the content
530   // area.  This is the number of pixels of such whitespace.
531   const int content_margin = m_dpi.vertical() * 20 / 300;
532   // The content area (in output image coordinates) extended
533   // with content_margin.  Note that we prevent that extension
534   // from reaching the neighboring page.
535   // This is the area we are going to pass to estimateBackground().
536   // estimateBackground() needs some margins around content, and
537   // generally smaller margins are better, except when there is
538   // some garbage that connects the content to the edge of the
539   // image area.
540   const QRect workingBoundingRect(
541       preCropArea
542           .intersected(QRectF(contentRect.adjusted(-content_margin, -content_margin, content_margin, content_margin)))
543           .boundingRect()
544           .toRect());
545   const QRect contentRectInWorkingCs(contentRect.translated(-workingBoundingRect.topLeft()));
546   const QPolygonF contentAreaInWorkingCs(contentArea.translated(-workingBoundingRect.topLeft()));
547   const QPolygonF outCropAreaInWorkingCs(outCropArea.translated(-workingBoundingRect.topLeft()));
548   const QPolygonF preCropAreaInOriginalCs(m_xform.transformBack().map(preCropArea));
549   const QPolygonF contentAreaInOriginalCs(m_xform.transformBack().map(contentArea));
550   const QPolygonF outCropAreaInOriginalCs(m_xform.transformBack().map(outCropArea));
551 
552   const bool isBlackOnWhite = updateBlackOnWhite(input, pageId, settings);
553   const GrayImage inputGrayImage = isBlackOnWhite ? input.grayImage() : input.grayImage().inverted();
554   const QImage inputOrigImage = [&input, &isBlackOnWhite]() {
555     QImage result = input.origImage();
556     if (!result.allGray() && (result.format() != QImage::Format_ARGB32) && (result.format() != QImage::Format_RGB32)) {
557       result = result.convertToFormat(QImage::Format_RGB32);
558     }
559     if (!isBlackOnWhite) {
560       result.invertPixels();
561     }
562 
563     return result;
564   }();
565 
566   const BackgroundColorCalculator backgroundColorCalculator = getBackgroundColorCalculator(pageId, settings);
567   QColor outsideBackgroundColor = backgroundColorCalculator.calcDominantBackgroundColor(
568       inputOrigImage.allGray() ? inputGrayImage : inputOrigImage, outCropAreaInOriginalCs, dbg);
569 
570   const bool needNormalizeIllumination
571       = (render_params.normalizeIllumination() && render_params.needBinarization())
572         || (render_params.normalizeIlluminationColor() && !render_params.needBinarization());
573 
574   QImage maybe_normalized;
575   if (needNormalizeIllumination) {
576     maybe_normalized = normalizeIlluminationGray(status, inputGrayImage, preCropAreaInOriginalCs, m_xform.transform(),
577                                                  workingBoundingRect, nullptr, dbg);
578   } else {
579     if (inputOrigImage.allGray()) {
580       maybe_normalized = transformToGray(inputGrayImage, m_xform.transform(), workingBoundingRect,
581                                          OutsidePixels::assumeColor(outsideBackgroundColor));
582     } else {
583       maybe_normalized = transform(inputOrigImage, m_xform.transform(), workingBoundingRect,
584                                    OutsidePixels::assumeColor(outsideBackgroundColor));
585     }
586   }
587 
588   if (needNormalizeIllumination && !inputOrigImage.allGray()) {
589     assert(maybe_normalized.format() == QImage::Format_Indexed8);
590     QImage tmp(transform(inputOrigImage, m_xform.transform(), workingBoundingRect,
591                          OutsidePixels::assumeColor(outsideBackgroundColor)));
592 
593     status.throwIfCancelled();
594 
595     adjustBrightnessGrayscale(tmp, maybe_normalized);
596     maybe_normalized = tmp;
597   }
598 
599   if (dbg) {
600     dbg->add(maybe_normalized, "maybe_normalized");
601   }
602 
603   if (needNormalizeIllumination) {
604     outsideBackgroundColor
605         = backgroundColorCalculator.calcDominantBackgroundColor(maybe_normalized, outCropAreaInWorkingCs, dbg);
606   }
607 
608   status.throwIfCancelled();
609 
610   if (render_params.binaryOutput()) {
611     BinaryImage dst(target_size, WHITE);
612 
613     QImage maybe_smoothed;
614     // We only do smoothing if we are going to do binarization later.
615     if (!render_params.needSavitzkyGolaySmoothing()) {
616       maybe_smoothed = maybe_normalized;
617     } else {
618       maybe_smoothed = smoothToGrayscale(maybe_normalized, m_dpi);
619       if (dbg) {
620         dbg->add(maybe_smoothed, "smoothed");
621       }
622     }
623 
624     // don't destroy as it's needed for color segmentation
625     if (!render_params.needColorSegmentation()) {
626       maybe_normalized = QImage();
627     }
628 
629     status.throwIfCancelled();
630 
631     BinaryImage bw_content(binarize(maybe_smoothed, contentAreaInWorkingCs));
632 
633     maybe_smoothed = QImage();
634     if (dbg) {
635       dbg->add(bw_content, "binarized_and_cropped");
636     }
637 
638     if (render_params.needMorphologicalSmoothing()) {
639       morphologicalSmoothInPlace(bw_content, status);
640       if (dbg) {
641         dbg->add(bw_content, "edges_smoothed");
642       }
643     }
644 
645     status.throwIfCancelled();
646 
647     rasterOp<RopSrc>(dst, contentRect, bw_content, contentRectInWorkingCs.topLeft());
648     bw_content.release();  // Save memory.
649 
650     // It's important to keep despeckling the very last operation
651     // affecting the binary part of the output. That's because
652     // we will be reconstructing the input to this despeckling
653     // operation from the final output file.
654     maybeDespeckleInPlace(dst, m_outRect, m_outRect, m_despeckleLevel, speckles_image, m_dpi, status, dbg);
655 
656     if (!render_params.needColorSegmentation()) {
657       if (!isBlackOnWhite) {
658         dst.invert();
659       }
660 
661       applyFillZonesInPlace(dst, fill_zones);
662 
663       return dst.toQImage();
664     } else {
665       QImage segmented_image;
666       {
667         QImage color_image(target_size, maybe_normalized.format());
668         color_image.fill(Qt::white);
669         if (maybe_normalized.format() == QImage::Format_Indexed8) {
670           color_image.setColorTable(createGrayscalePalette());
671         }
672         drawOver(color_image, contentRect, maybe_normalized, contentRectInWorkingCs);
673         maybe_normalized = QImage();
674 
675         segmented_image = segmentImage(dst, color_image);
676         dst.release();
677       }
678 
679       if (dbg) {
680         dbg->add(segmented_image, "segmented");
681       }
682 
683       status.throwIfCancelled();
684 
685       if (!isBlackOnWhite) {
686         segmented_image.invertPixels();
687       }
688 
689       applyFillZonesInPlace(segmented_image, fill_zones, false);
690 
691       if (dbg) {
692         dbg->add(segmented_image, "segmented_with_fill_zones");
693       }
694 
695       status.throwIfCancelled();
696 
697       if (render_params.posterize()) {
698         segmented_image = posterizeImage(segmented_image, outsideBackgroundColor);
699 
700         if (dbg) {
701           dbg->add(segmented_image, "posterized");
702         }
703 
704         status.throwIfCancelled();
705       }
706 
707       return segmented_image;
708     }
709   }
710 
711   BinaryImage bw_content_mask_output;
712   QImage original_background;
713   if (render_params.mixedOutput()) {
714     BinaryImage bw_mask(workingBoundingRect.size(), BLACK);
715 
716     if ((m_pictureShapeOptions.getPictureShape() != RECTANGULAR_SHAPE)
717         || !m_outputProcessingParams.isAutoZonesFound()) {
718       if (m_pictureShapeOptions.getPictureShape() != OFF_SHAPE) {
719         bw_mask = estimateBinarizationMask(status, GrayImage(maybe_normalized), workingBoundingRect,
720                                            workingBoundingRect, dbg);
721       }
722 
723       removeAutoPictureZones(picture_zones);
724       settings->setPictureZones(pageId, picture_zones);
725       m_outputProcessingParams.setAutoZonesFound(false);
726       settings->setOutputProcessingParams(pageId, m_outputProcessingParams);
727     }
728     if ((m_pictureShapeOptions.getPictureShape() == RECTANGULAR_SHAPE)
729         && !m_outputProcessingParams.isAutoZonesFound()) {
730       std::vector<QRect> areas;
731       bw_mask.rectangularizeAreas(areas, WHITE, m_pictureShapeOptions.getSensitivity());
732 
733       QTransform xform1(m_xform.transform());
734       xform1 *= QTransform().translate(-workingBoundingRect.x(), -workingBoundingRect.y());
735       QTransform inv_xform(xform1.inverted());
736 
737       for (auto i : areas) {
738         QRectF area0(i);
739         QPolygonF area1(area0);
740         QPolygonF area(inv_xform.map(area1));
741 
742         Zone zone1(area);
743 
744         picture_zones.add(zone1);
745       }
746       settings->setPictureZones(pageId, picture_zones);
747       m_outputProcessingParams.setAutoZonesFound(true);
748       settings->setOutputProcessingParams(pageId, m_outputProcessingParams);
749 
750       bw_mask.fill(BLACK);
751     }
752 
753     if (dbg) {
754       dbg->add(bw_mask, "bw_mask");
755     }
756 
757     if (auto_picture_mask) {
758       if (auto_picture_mask->size() != target_size) {
759         BinaryImage(target_size).swap(*auto_picture_mask);
760       }
761       auto_picture_mask->fill(BLACK);
762 
763       rasterOp<RopSrc>(*auto_picture_mask, contentRect, bw_mask, contentRectInWorkingCs.topLeft());
764     }
765 
766     status.throwIfCancelled();
767 
768     modifyBinarizationMask(bw_mask, workingBoundingRect, picture_zones);
769     fillMarginsInPlace(bw_mask, contentAreaInWorkingCs, BLACK);
770     if (dbg) {
771       dbg->add(bw_mask, "bw_mask with zones");
772     }
773 
774     {
775       bw_content_mask_output = BinaryImage(target_size, BLACK);
776       rasterOp<RopSrc>(bw_content_mask_output, contentRect, bw_mask, contentRectInWorkingCs.topLeft());
777     }
778 
779     status.throwIfCancelled();
780 
781     if (render_params.needBinarization()) {
782       QImage maybe_smoothed;
783       if (!render_params.needSavitzkyGolaySmoothing()) {
784         maybe_smoothed = maybe_normalized;
785       } else {
786         maybe_smoothed = smoothToGrayscale(maybe_normalized, m_dpi);
787         if (dbg) {
788           dbg->add(maybe_smoothed, "smoothed");
789         }
790       }
791 
792       BinaryImage bw_mask_filled(bw_mask);
793       fillMarginsInPlace(bw_mask_filled, contentAreaInWorkingCs, WHITE);
794 
795       BinaryImage bw_content(binarize(maybe_smoothed, bw_mask_filled));
796 
797       bw_mask_filled.release();
798       maybe_smoothed = QImage();  // Save memory.
799 
800       if (dbg) {
801         dbg->add(bw_content, "binarized_and_cropped");
802       }
803 
804       status.throwIfCancelled();
805 
806       if (render_params.needMorphologicalSmoothing()) {
807         morphologicalSmoothInPlace(bw_content, status);
808         if (dbg) {
809           dbg->add(bw_content, "edges_smoothed");
810         }
811       }
812 
813       // We don't want speckles in non-B/W areas, as they would
814       // then get visualized on the Despeckling tab.
815       status.throwIfCancelled();
816 
817       // It's important to keep despeckling the very last operation
818       // affecting the binary part of the output. That's because
819       // we will be reconstructing the input to this despeckling
820       // operation from the final output file.
821       maybeDespeckleInPlace(bw_content, workingBoundingRect, contentRect, m_despeckleLevel, speckles_image, m_dpi,
822                             status, dbg);
823 
824       status.throwIfCancelled();
825 
826       if (needNormalizeIllumination && !render_params.normalizeIlluminationColor()) {
827         outsideBackgroundColor = backgroundColorCalculator.calcDominantBackgroundColor(
828             inputOrigImage.allGray() ? inputGrayImage : inputOrigImage, outCropAreaInOriginalCs, dbg);
829 
830         if (inputOrigImage.allGray()) {
831           maybe_normalized = transformToGray(inputGrayImage, m_xform.transform(), workingBoundingRect,
832                                              OutsidePixels::assumeColor(outsideBackgroundColor));
833         } else {
834           maybe_normalized = transform(inputOrigImage, m_xform.transform(), workingBoundingRect,
835                                        OutsidePixels::assumeColor(outsideBackgroundColor));
836         }
837 
838         status.throwIfCancelled();
839       }
840 
841       if (render_params.originalBackground()) {
842         original_background = maybe_normalized;
843 
844         QImage original_background_dst(target_size, original_background.format());
845         if (original_background.format() == QImage::Format_Indexed8) {
846           original_background_dst.setColorTable(createGrayscalePalette());
847         }
848         if (original_background_dst.isNull()) {
849           // Both the constructor and setColorTable() above can leave the image null.
850           throw std::bad_alloc();
851         }
852 
853         QColor outsideOriginalBackgroundColor = outsideBackgroundColor;
854         if (m_colorParams.colorCommonOptions().getFillingColor() == FILL_WHITE) {
855           outsideOriginalBackgroundColor = isBlackOnWhite ? Qt::white : Qt::black;
856         }
857         fillMarginsInPlace(original_background, contentAreaInWorkingCs, outsideOriginalBackgroundColor);
858         original_background_dst.fill(outsideOriginalBackgroundColor);
859 
860         drawOver(original_background_dst, contentRect, original_background, contentRectInWorkingCs);
861 
862         reserveBlackAndWhite(original_background_dst);
863 
864         original_background = original_background_dst;
865 
866         status.throwIfCancelled();
867       }
868 
869       if (!render_params.needColorSegmentation()) {
870         combineImages(maybe_normalized, bw_content, bw_mask);
871       } else {
872         QImage segmented_image;
873         {
874           QImage maybe_normalized_content(maybe_normalized);
875           applyMask(maybe_normalized_content, bw_mask);
876           segmented_image = segmentImage(bw_content, maybe_normalized_content);
877           maybe_normalized_content = QImage();
878 
879           if (dbg) {
880             dbg->add(segmented_image, "segmented");
881           }
882 
883           status.throwIfCancelled();
884 
885           if (render_params.posterize()) {
886             segmented_image = posterizeImage(segmented_image, outsideBackgroundColor);
887 
888             if (dbg) {
889               dbg->add(segmented_image, "posterized");
890             }
891 
892             status.throwIfCancelled();
893           }
894         }
895 
896         combineImages(maybe_normalized, segmented_image, bw_mask);
897       }
898 
899       reserveBlackAndWhite(maybe_normalized, bw_mask.inverted());
900 
901       if (dbg) {
902         dbg->add(maybe_normalized, "combined");
903       }
904 
905       status.throwIfCancelled();
906     }
907   }
908 
909   status.throwIfCancelled();
910 
911   assert(!target_size.isEmpty());
912   QImage dst(target_size, maybe_normalized.format());
913 
914   if (maybe_normalized.format() == QImage::Format_Indexed8) {
915     dst.setColorTable(createGrayscalePalette());
916   }
917 
918   if (dst.isNull()) {
919     // Both the constructor and setColorTable() above can leave the image null.
920     throw std::bad_alloc();
921   }
922 
923   if (render_params.needBinarization()) {
924     outsideBackgroundColor = Qt::white;
925   } else if (m_colorParams.colorCommonOptions().getFillingColor() == FILL_WHITE) {
926     outsideBackgroundColor = isBlackOnWhite ? Qt::white : Qt::black;
927     if (!render_params.needBinarization()) {
928       reserveBlackAndWhite(maybe_normalized);
929     }
930   }
931   fillMarginsInPlace(maybe_normalized, contentAreaInWorkingCs, outsideBackgroundColor);
932   dst.fill(outsideBackgroundColor);
933 
934   drawOver(dst, contentRect, maybe_normalized, contentRectInWorkingCs);
935   maybe_normalized = QImage();
936 
937   if (!isBlackOnWhite) {
938     dst.invertPixels();
939   }
940 
941   if (render_params.mixedOutput() && render_params.needBinarization()) {
942     applyFillZonesToMixedInPlace(dst, fill_zones, bw_content_mask_output, !render_params.needColorSegmentation());
943   } else {
944     applyFillZonesInPlace(dst, fill_zones);
945   }
946 
947   if (dbg) {
948     dbg->add(dst, "fill_zones");
949   }
950 
951   status.throwIfCancelled();
952 
953   if (render_params.splitOutput()) {
954     const SplitImage::ForegroundType foreground_type
955         = render_params.needBinarization()
956               ? render_params.needColorSegmentation() ? SplitImage::INDEXED_FOREGROUND : SplitImage::BINARY_FOREGROUND
957               : SplitImage::COLOR_FOREGROUND;
958 
959     splitImage->setMask(bw_content_mask_output, foreground_type);
960     splitImage->setBackgroundImage(dst);
961 
962     if (render_params.needBinarization() && render_params.originalBackground()) {
963       if (!isBlackOnWhite) {
964         dst.invertPixels();
965       }
966 
967       BinaryImage background_mask = BinaryImage(dst, BinaryThreshold(255)).inverted();
968       fillMarginsInPlace(background_mask, preCropArea, BLACK);
969       applyMask(original_background, background_mask, isBlackOnWhite ? WHITE : BLACK);
970       applyMask(original_background, bw_content_mask_output, isBlackOnWhite ? BLACK : WHITE);
971 
972       if (!isBlackOnWhite) {
973         original_background.invertPixels();
974       }
975       splitImage->setOriginalBackgroundImage(original_background);
976     }
977 
978     return QImage();
979   }
980 
981   if (!render_params.mixedOutput() && render_params.posterize()) {
982     dst = posterizeImage(dst);
983 
984     if (dbg) {
985       dbg->add(dst, "posterized");
986     }
987 
988     status.throwIfCancelled();
989   }
990 
991   return dst;
992 }  // OutputGenerator::processWithoutDewarping
993 
processWithDewarping(const TaskStatus & status,const FilterData & input,ZoneSet & picture_zones,const ZoneSet & fill_zones,dewarping::DistortionModel & distortion_model,const DepthPerception & depth_perception,imageproc::BinaryImage * auto_picture_mask,imageproc::BinaryImage * speckles_image,DebugImages * dbg,const PageId & pageId,const intrusive_ptr<Settings> & settings,SplitImage * splitImage)994 QImage OutputGenerator::processWithDewarping(const TaskStatus& status,
995                                              const FilterData& input,
996                                              ZoneSet& picture_zones,
997                                              const ZoneSet& fill_zones,
998                                              dewarping::DistortionModel& distortion_model,
999                                              const DepthPerception& depth_perception,
1000                                              imageproc::BinaryImage* auto_picture_mask,
1001                                              imageproc::BinaryImage* speckles_image,
1002                                              DebugImages* dbg,
1003                                              const PageId& pageId,
1004                                              const intrusive_ptr<Settings>& settings,
1005                                              SplitImage* splitImage) {
1006   const RenderParams render_params(m_colorParams, m_splittingOptions);
1007 
1008   const QPolygonF preCropArea = [this, &render_params]() {
1009     if (render_params.fillOffcut()) {
1010       return m_xform.resultingPreCropArea();
1011     } else {
1012       const QPolygonF imageRectInOutputCs = m_xform.transform().map(m_xform.origRect());
1013       return imageRectInOutputCs.intersected(QRectF(m_outRect));
1014     }
1015   }();
1016   const QPolygonF contentArea
1017       = preCropArea.intersected(QRectF(render_params.fillMargins() ? m_contentRect : m_outRect));
1018   const QRect contentRect = contentArea.boundingRect().toRect();
1019   const QPolygonF outCropArea = preCropArea.intersected(QRectF(m_outRect));
1020 
1021   const QSize target_size(m_outRect.size().expandedTo(QSize(1, 1)));
1022   // If the content area is empty or outside the cropping area, return a blank page.
1023   if (contentRect.isEmpty()) {
1024     QImage emptyImage(BinaryImage(target_size, WHITE).toQImage());
1025     if (!render_params.splitOutput()) {
1026       return emptyImage;
1027     } else {
1028       splitImage->setForegroundImage(emptyImage);
1029       splitImage->setBackgroundImage(emptyImage.convertToFormat(QImage::Format_Indexed8));
1030       return QImage();
1031     }
1032   }
1033 
1034   // For various reasons, we need some whitespace around the content
1035   // area.  This is the number of pixels of such whitespace.
1036   const int content_margin = m_dpi.vertical() * 20 / 300;
1037   // The content area (in output image coordinates) extended
1038   // with content_margin.  Note that we prevent that extension
1039   // from reaching the neighboring page.
1040   // This is the area we are going to pass to estimateBackground().
1041   // estimateBackground() needs some margins around content, and
1042   // generally smaller margins are better, except when there is
1043   // some garbage that connects the content to the edge of the
1044   // image area.
1045   const QRect workingBoundingRect(
1046       preCropArea
1047           .intersected(QRectF(contentRect.adjusted(-content_margin, -content_margin, content_margin, content_margin)))
1048           .boundingRect()
1049           .toRect());
1050   const QRect contentRectInWorkingCs(contentRect.translated(-workingBoundingRect.topLeft()));
1051   const QPolygonF contentAreaInWorkingCs(contentArea.translated(-workingBoundingRect.topLeft()));
1052   const QPolygonF outCropAreaInWorkingCs(outCropArea.translated(-workingBoundingRect.topLeft()));
1053   const QPolygonF preCropAreaInOriginalCs(m_xform.transformBack().map(preCropArea));
1054   const QPolygonF contentAreaInOriginalCs(m_xform.transformBack().map(contentArea));
1055   const QPolygonF outCropAreaInOriginalCs(m_xform.transformBack().map(outCropArea));
1056 
1057   const bool isBlackOnWhite = updateBlackOnWhite(input, pageId, settings);
1058   const GrayImage inputGrayImage = isBlackOnWhite ? input.grayImage() : input.grayImage().inverted();
1059   const QImage inputOrigImage = [&input, &isBlackOnWhite]() {
1060     QImage result = input.origImage();
1061     if (!result.allGray() && (result.format() != QImage::Format_ARGB32) && (result.format() != QImage::Format_RGB32)) {
1062       result = result.convertToFormat(QImage::Format_RGB32);
1063     }
1064     if (!isBlackOnWhite) {
1065       result.invertPixels();
1066     }
1067 
1068     return result;
1069   }();
1070 
1071   const BackgroundColorCalculator backgroundColorCalculator = getBackgroundColorCalculator(pageId, settings);
1072   QColor outsideBackgroundColor = backgroundColorCalculator.calcDominantBackgroundColor(
1073       inputOrigImage.allGray() ? inputGrayImage : inputOrigImage, outCropAreaInOriginalCs, dbg);
1074 
1075   const bool color_original = !inputOrigImage.allGray();
1076 
1077   const bool needNormalizeIllumination
1078       = (render_params.normalizeIllumination() && render_params.needBinarization())
1079         || (render_params.normalizeIlluminationColor() && !render_params.needBinarization());
1080 
1081   // Original image, but:
1082   // 1. In a format we can handle, that is grayscale, RGB32, ARGB32
1083   // 2. With illumination normalized over the content area, if required.
1084   // 3. With margins filled with white, if required.
1085   QImage normalized_original;
1086 
1087   // The output we would get if dewarping was turned off, except always grayscale.
1088   // Used for automatic picture detection and binarization threshold calculation.
1089   // This image corresponds to the area of normalize_illumination_rect above.
1090   GrayImage warped_gray_output;
1091   // Picture mask (white indicate a picture) in the same coordinates as
1092   // warped_gray_output.  Only built for Mixed mode.
1093   BinaryImage warped_bw_mask;
1094 
1095   BinaryThreshold bw_threshold(128);
1096 
1097   const QTransform norm_illum_to_original(QTransform().translate(workingBoundingRect.left(), workingBoundingRect.top())
1098                                           * m_xform.transformBack());
1099 
1100   if (!needNormalizeIllumination) {
1101     if (color_original) {
1102       normalized_original = convertToRGBorRGBA(inputOrigImage);
1103     } else {
1104       normalized_original = inputGrayImage;
1105     }
1106     warped_gray_output = transformToGray(inputGrayImage, m_xform.transform(), workingBoundingRect,
1107                                          OutsidePixels::assumeWeakColor(outsideBackgroundColor));
1108   } else {
1109     GrayImage warped_gray_background;
1110     warped_gray_output = normalizeIlluminationGray(status, inputGrayImage, preCropAreaInOriginalCs, m_xform.transform(),
1111                                                    workingBoundingRect, &warped_gray_background, dbg);
1112 
1113     status.throwIfCancelled();
1114 
1115     // Transform warped_gray_background to original image coordinates.
1116     warped_gray_background = transformToGray(warped_gray_background.toQImage(), norm_illum_to_original,
1117                                              inputOrigImage.rect(), OutsidePixels::assumeWeakColor(Qt::black));
1118     if (dbg) {
1119       dbg->add(warped_gray_background, "orig_background");
1120     }
1121 
1122     status.throwIfCancelled();
1123     // Turn background into a grayscale, illumination-normalized image.
1124     grayRasterOp<RaiseAboveBackground>(warped_gray_background, inputGrayImage);
1125     if (dbg) {
1126       dbg->add(warped_gray_background, "norm_illum_gray");
1127     }
1128 
1129     status.throwIfCancelled();
1130 
1131     if (!color_original) {
1132       normalized_original = warped_gray_background;
1133     } else {
1134       normalized_original = convertToRGBorRGBA(inputOrigImage);
1135       adjustBrightnessGrayscale(normalized_original, warped_gray_background);
1136       if (dbg) {
1137         dbg->add(normalized_original, "norm_illum_color");
1138       }
1139     }
1140 
1141     outsideBackgroundColor
1142         = backgroundColorCalculator.calcDominantBackgroundColor(normalized_original, outCropAreaInOriginalCs, dbg);
1143   }
1144 
1145   status.throwIfCancelled();
1146 
1147   if (render_params.binaryOutput()) {
1148     bw_threshold = calcBinarizationThreshold(warped_gray_output, contentAreaInWorkingCs);
1149 
1150     status.throwIfCancelled();
1151   } else if (render_params.mixedOutput()) {
1152     warped_bw_mask = BinaryImage(workingBoundingRect.size(), BLACK);
1153 
1154     if ((m_pictureShapeOptions.getPictureShape() != RECTANGULAR_SHAPE)
1155         || !m_outputProcessingParams.isAutoZonesFound()) {
1156       if (m_pictureShapeOptions.getPictureShape() != OFF_SHAPE) {
1157         warped_bw_mask = estimateBinarizationMask(status, GrayImage(warped_gray_output), workingBoundingRect,
1158                                                   workingBoundingRect, dbg);
1159         if (dbg) {
1160           dbg->add(warped_bw_mask, "warped_bw_mask");
1161         }
1162       }
1163 
1164       removeAutoPictureZones(picture_zones);
1165       settings->setPictureZones(pageId, picture_zones);
1166       m_outputProcessingParams.setAutoZonesFound(false);
1167       settings->setOutputProcessingParams(pageId, m_outputProcessingParams);
1168     }
1169     if ((m_pictureShapeOptions.getPictureShape() == RECTANGULAR_SHAPE)
1170         && !m_outputProcessingParams.isAutoZonesFound()) {
1171       std::vector<QRect> areas;
1172       warped_bw_mask.rectangularizeAreas(areas, WHITE, m_pictureShapeOptions.getSensitivity());
1173 
1174       QTransform xform1(m_xform.transform());
1175       xform1 *= QTransform().translate(-workingBoundingRect.x(), -workingBoundingRect.y());
1176       QTransform inv_xform(xform1.inverted());
1177 
1178       for (auto i : areas) {
1179         QRectF area0(i);
1180         QPolygonF area1(area0);
1181         QPolygonF area(inv_xform.map(area1));
1182 
1183         Zone zone1(area);
1184 
1185         picture_zones.add(zone1);
1186       }
1187       settings->setPictureZones(pageId, picture_zones);
1188       m_outputProcessingParams.setAutoZonesFound(true);
1189       settings->setOutputProcessingParams(pageId, m_outputProcessingParams);
1190 
1191       warped_bw_mask.fill(BLACK);
1192     }
1193 
1194     status.throwIfCancelled();
1195 
1196     if (auto_picture_mask) {
1197       if (auto_picture_mask->size() != target_size) {
1198         BinaryImage(target_size).swap(*auto_picture_mask);
1199       }
1200       auto_picture_mask->fill(BLACK);
1201 
1202       rasterOp<RopSrc>(*auto_picture_mask, contentRect, warped_bw_mask, contentRectInWorkingCs.topLeft());
1203     }
1204 
1205     status.throwIfCancelled();
1206 
1207     modifyBinarizationMask(warped_bw_mask, workingBoundingRect, picture_zones);
1208     if (dbg) {
1209       dbg->add(warped_bw_mask, "warped_bw_mask with zones");
1210     }
1211 
1212     status.throwIfCancelled();
1213 
1214     // For Mixed output, we mask out pictures when calculating binarization threshold.
1215     bw_threshold = calcBinarizationThreshold(warped_gray_output, contentAreaInWorkingCs, &warped_bw_mask);
1216 
1217     status.throwIfCancelled();
1218   }
1219 
1220   if (m_dewarpingOptions.dewarpingMode() == AUTO) {
1221     DistortionModelBuilder model_builder(Vec2d(0, 1));
1222 
1223     TextLineTracer::trace(warped_gray_output, m_dpi, contentRectInWorkingCs, model_builder, status, dbg);
1224     model_builder.transform(norm_illum_to_original);
1225 
1226     TopBottomEdgeTracer::trace(inputGrayImage, model_builder.verticalBounds(), model_builder, status, dbg);
1227 
1228     distortion_model = model_builder.tryBuildModel(dbg, &inputGrayImage.toQImage());
1229     if (!distortion_model.isValid()) {
1230       setupTrivialDistortionModel(distortion_model);
1231     }
1232 
1233     BinaryImage bw_image(inputGrayImage, BinaryThreshold(64));
1234 
1235     QTransform transform = m_xform.preRotation().transform(bw_image.size());
1236     QTransform inv_transform = transform.inverted();
1237 
1238     int degrees = m_xform.preRotation().toDegrees();
1239     bw_image = orthogonalRotation(bw_image, degrees);
1240 
1241     const std::vector<QPointF>& top_polyline0 = distortion_model.topCurve().polyline();
1242     const std::vector<QPointF>& bottom_polyline0 = distortion_model.bottomCurve().polyline();
1243 
1244     std::vector<QPointF> top_polyline;
1245     std::vector<QPointF> bottom_polyline;
1246 
1247     for (auto i : top_polyline0) {
1248       top_polyline.push_back(transform.map(i));
1249     }
1250 
1251     for (auto i : bottom_polyline0) {
1252       bottom_polyline.push_back(transform.map(i));
1253     }
1254 
1255     QString stAngle;
1256 
1257     float max_angle = 2.75;
1258 
1259     if ((pageId.subPage() == PageId::SINGLE_PAGE) || (pageId.subPage() == PageId::LEFT_PAGE)) {
1260       float vert_skew_angle_left = vert_border_skew_angle(top_polyline.front(), bottom_polyline.front());
1261 
1262       stAngle.setNum(vert_skew_angle_left);
1263 
1264 
1265       if (vert_skew_angle_left > max_angle) {
1266         auto top_x = static_cast<float>(top_polyline.front().x());
1267         auto bottom_x = static_cast<float>(bottom_polyline.front().x());
1268 
1269         if (top_x < bottom_x) {
1270           std::vector<QPointF> new_bottom_polyline;
1271 
1272           QPointF pt(top_x, bottom_polyline.front().y());
1273 
1274           new_bottom_polyline.push_back(pt);
1275 
1276           for (auto i : bottom_polyline) {
1277             new_bottom_polyline.push_back(inv_transform.map(i));
1278           }
1279 
1280           distortion_model.setBottomCurve(dewarping::Curve(new_bottom_polyline));
1281         } else {
1282           std::vector<QPointF> new_top_polyline;
1283 
1284           QPointF pt(bottom_x, top_polyline.front().y());
1285 
1286           new_top_polyline.push_back(pt);
1287 
1288           for (auto i : top_polyline) {
1289             new_top_polyline.push_back(inv_transform.map(i));
1290           }
1291 
1292           distortion_model.setBottomCurve(dewarping::Curve(new_top_polyline));
1293         }
1294       }
1295     } else {
1296       float vert_skew_angle_right = vert_border_skew_angle(top_polyline.back(), bottom_polyline.back());
1297 
1298       stAngle.setNum(vert_skew_angle_right);
1299 
1300 
1301       if (vert_skew_angle_right > max_angle) {
1302         auto top_x = static_cast<float>(top_polyline.back().x());
1303         auto bottom_x = static_cast<float>(bottom_polyline.back().x());
1304 
1305         if (top_x > bottom_x) {
1306           std::vector<QPointF> new_bottom_polyline;
1307 
1308           QPointF pt(top_x, bottom_polyline.back().y());
1309 
1310           for (auto i : bottom_polyline) {
1311             new_bottom_polyline.push_back(inv_transform.map(i));
1312           }
1313 
1314           new_bottom_polyline.push_back(pt);
1315 
1316           distortion_model.setBottomCurve(dewarping::Curve(new_bottom_polyline));
1317         } else {
1318           std::vector<QPointF> new_top_polyline;
1319 
1320           QPointF pt(bottom_x, top_polyline.back().y());
1321 
1322           for (auto i : top_polyline) {
1323             new_top_polyline.push_back(inv_transform.map(i));
1324           }
1325 
1326           new_top_polyline.push_back(pt);
1327 
1328           distortion_model.setBottomCurve(dewarping::Curve(new_top_polyline));
1329         }
1330       }
1331     }
1332   } else if (m_dewarpingOptions.dewarpingMode() == MARGINAL) {
1333     BinaryImage bw_image(inputGrayImage, BinaryThreshold(64));
1334 
1335     QTransform transform = m_xform.preRotation().transform(bw_image.size());
1336     QTransform inv_transform = transform.inverted();
1337 
1338     int degrees = m_xform.preRotation().toDegrees();
1339     bw_image = orthogonalRotation(bw_image, degrees);
1340 
1341     setupTrivialDistortionModel(distortion_model);
1342 
1343     int max_red_points = 5;
1344     XSpline top_spline;
1345 
1346     const std::vector<QPointF>& top_polyline = distortion_model.topCurve().polyline();
1347 
1348     const QLineF top_line(transform.map(top_polyline.front()), transform.map(top_polyline.back()));
1349 
1350     top_spline.appendControlPoint(top_line.p1(), 0);
1351 
1352     if ((pageId.subPage() == PageId::SINGLE_PAGE) || (pageId.subPage() == PageId::LEFT_PAGE)) {
1353       for (int i = 29 - max_red_points; i < 29; i++) {
1354         top_spline.appendControlPoint(top_line.pointAt((float) i / 29.0), 1);
1355       }
1356     } else {
1357       for (int i = 1; i <= max_red_points; i++) {
1358         top_spline.appendControlPoint(top_line.pointAt((float) i / 29.0), 1);
1359       }
1360     }
1361 
1362     top_spline.appendControlPoint(top_line.p2(), 0);
1363 
1364     for (int i = 0; i <= top_spline.numSegments(); i++) {
1365       movePointToTopMargin(bw_image, top_spline, i);
1366     }
1367 
1368     for (int i = 0; i <= top_spline.numSegments(); i++) {
1369       top_spline.moveControlPoint(i, inv_transform.map(top_spline.controlPointPosition(i)));
1370     }
1371 
1372     distortion_model.setTopCurve(dewarping::Curve(top_spline));
1373 
1374 
1375     XSpline bottom_spline;
1376 
1377     const std::vector<QPointF>& bottom_polyline = distortion_model.bottomCurve().polyline();
1378 
1379     const QLineF bottom_line(transform.map(bottom_polyline.front()), transform.map(bottom_polyline.back()));
1380 
1381     bottom_spline.appendControlPoint(bottom_line.p1(), 0);
1382 
1383     if ((pageId.subPage() == PageId::SINGLE_PAGE) || (pageId.subPage() == PageId::LEFT_PAGE)) {
1384       for (int i = 29 - max_red_points; i < 29; i++) {
1385         bottom_spline.appendControlPoint(top_line.pointAt((float) i / 29.0), 1);
1386       }
1387     } else {
1388       for (int i = 1; i <= max_red_points; i++) {
1389         bottom_spline.appendControlPoint(top_line.pointAt((float) i / 29.0), 1);
1390       }
1391     }
1392 
1393     bottom_spline.appendControlPoint(bottom_line.p2(), 0);
1394 
1395     for (int i = 0; i <= bottom_spline.numSegments(); i++) {
1396       movePointToBottomMargin(bw_image, bottom_spline, i);
1397     }
1398 
1399     for (int i = 0; i <= bottom_spline.numSegments(); i++) {
1400       bottom_spline.moveControlPoint(i, inv_transform.map(bottom_spline.controlPointPosition(i)));
1401     }
1402 
1403     distortion_model.setBottomCurve(dewarping::Curve(bottom_spline));
1404 
1405     if (!distortion_model.isValid()) {
1406       setupTrivialDistortionModel(distortion_model);
1407     }
1408 
1409     if (dbg) {
1410       QImage out_image(bw_image.toQImage().convertToFormat(QImage::Format_RGB32));
1411       for (int i = 0; i <= top_spline.numSegments(); i++) {
1412         drawPoint(out_image, top_spline.controlPointPosition(i));
1413       }
1414       for (int i = 0; i <= bottom_spline.numSegments(); i++) {
1415         drawPoint(out_image, bottom_spline.controlPointPosition(i));
1416       }
1417       dbg->add(out_image, "marginal dewarping");
1418     }
1419   }
1420   warped_gray_output = GrayImage();  // Save memory.
1421 
1422   status.throwIfCancelled();
1423 
1424   QImage dewarped;
1425   try {
1426     dewarped = dewarp(QTransform(), normalized_original, m_xform.transform(), distortion_model, depth_perception,
1427                       outsideBackgroundColor);
1428   } catch (const std::runtime_error&) {
1429     // Probably an impossible distortion model.  Let's fall back to a trivial one.
1430     setupTrivialDistortionModel(distortion_model);
1431     dewarped = dewarp(QTransform(), normalized_original, m_xform.transform(), distortion_model, depth_perception,
1432                       outsideBackgroundColor);
1433   }
1434 
1435   normalized_original = QImage();  // Save memory.
1436   if (dbg) {
1437     dbg->add(dewarped, "dewarped");
1438   }
1439 
1440   status.throwIfCancelled();
1441 
1442   std::shared_ptr<DewarpingPointMapper> mapper(
1443       new DewarpingPointMapper(distortion_model, depth_perception.value(), m_xform.transform(), contentRect));
1444   const boost::function<QPointF(const QPointF&)> orig_to_output(
1445       boost::bind(&DewarpingPointMapper::mapToDewarpedSpace, mapper, _1));
1446 
1447   const double deskew_angle = maybe_deskew(&dewarped, m_dewarpingOptions, outsideBackgroundColor);
1448 
1449   {
1450     QTransform post_rotate;
1451 
1452     QPointF center(m_outRect.width() / 2.0, m_outRect.height() / 2.0);
1453 
1454     post_rotate.translate(center.x(), center.y());
1455     post_rotate.rotate(-deskew_angle);
1456     post_rotate.translate(-center.x(), -center.y());
1457 
1458     m_postTransform = post_rotate;
1459   }
1460 
1461   BinaryImage dewarping_content_area_mask(inputGrayImage.size(), BLACK);
1462   fillMarginsInPlace(dewarping_content_area_mask, contentAreaInOriginalCs, WHITE);
1463   QImage dewarping_content_area_mask_dewarped(dewarp(QTransform(), dewarping_content_area_mask.toQImage(),
1464                                                      m_xform.transform(), distortion_model, depth_perception,
1465                                                      Qt::white));
1466   deskew(&dewarping_content_area_mask_dewarped, deskew_angle, Qt::white);
1467   dewarping_content_area_mask = BinaryImage(dewarping_content_area_mask_dewarped);
1468   dewarping_content_area_mask_dewarped = QImage();
1469 
1470   if (render_params.binaryOutput()) {
1471     QImage dewarped_and_maybe_smoothed;
1472     // We only do smoothing if we are going to do binarization later.
1473     if (!render_params.needSavitzkyGolaySmoothing()) {
1474       dewarped_and_maybe_smoothed = dewarped;
1475     } else {
1476       dewarped_and_maybe_smoothed = smoothToGrayscale(dewarped, m_dpi);
1477       if (dbg) {
1478         dbg->add(dewarped_and_maybe_smoothed, "smoothed");
1479       }
1480     }
1481 
1482     // don't destroy as it's needed for color segmentation
1483     if (!render_params.needColorSegmentation()) {
1484       dewarped = QImage();
1485     }
1486 
1487     status.throwIfCancelled();
1488 
1489     BinaryImage dewarped_bw_content(binarize(dewarped_and_maybe_smoothed, dewarping_content_area_mask));
1490 
1491     status.throwIfCancelled();
1492 
1493     dewarped_and_maybe_smoothed = QImage();  // Save memory.
1494     if (dbg) {
1495       dbg->add(dewarped_bw_content, "dewarped_bw_content");
1496     }
1497 
1498     if (render_params.needMorphologicalSmoothing()) {
1499       morphologicalSmoothInPlace(dewarped_bw_content, status);
1500       if (dbg) {
1501         dbg->add(dewarped_bw_content, "edges_smoothed");
1502       }
1503     }
1504 
1505     status.throwIfCancelled();
1506 
1507     // It's important to keep despeckling the very last operation
1508     // affecting the binary part of the output. That's because
1509     // we will be reconstructing the input to this despeckling
1510     // operation from the final output file.
1511     maybeDespeckleInPlace(dewarped_bw_content, m_outRect, m_outRect, m_despeckleLevel, speckles_image, m_dpi, status,
1512                           dbg);
1513 
1514     if (!render_params.needColorSegmentation()) {
1515       if (!isBlackOnWhite) {
1516         dewarped_bw_content.invert();
1517       }
1518 
1519       applyFillZonesInPlace(dewarped_bw_content, fill_zones, orig_to_output, m_postTransform);
1520 
1521       return dewarped_bw_content.toQImage();
1522     } else {
1523       QImage segmented_image = segmentImage(dewarped_bw_content, dewarped);
1524       dewarped = QImage();
1525       dewarped_bw_content.release();
1526 
1527       if (dbg) {
1528         dbg->add(segmented_image, "segmented");
1529       }
1530 
1531       status.throwIfCancelled();
1532 
1533       if (!isBlackOnWhite) {
1534         segmented_image.invertPixels();
1535       }
1536 
1537       applyFillZonesInPlace(segmented_image, fill_zones, orig_to_output, m_postTransform, false);
1538 
1539       if (dbg) {
1540         dbg->add(segmented_image, "segmented_with_fill_zones");
1541       }
1542 
1543       status.throwIfCancelled();
1544 
1545       if (render_params.posterize()) {
1546         segmented_image = posterizeImage(segmented_image, outsideBackgroundColor);
1547 
1548         if (dbg) {
1549           dbg->add(segmented_image, "posterized");
1550         }
1551 
1552         status.throwIfCancelled();
1553       }
1554 
1555       return segmented_image;
1556     }
1557   }
1558 
1559   BinaryImage dewarped_bw_content_mask;
1560   QImage original_background;
1561   if (render_params.mixedOutput()) {
1562     const QTransform orig_to_working_cs(
1563         m_xform.transform() * QTransform().translate(-workingBoundingRect.left(), -workingBoundingRect.top()));
1564     QTransform working_to_output_cs;
1565     working_to_output_cs.translate(workingBoundingRect.left(), workingBoundingRect.top());
1566     BinaryImage dewarped_bw_mask(dewarp(orig_to_working_cs, warped_bw_mask.toQImage(), working_to_output_cs,
1567                                         distortion_model, depth_perception, Qt::black));
1568     warped_bw_mask.release();
1569 
1570     {
1571       QImage dewarped_bw_mask_deskewed(dewarped_bw_mask.toQImage());
1572       deskew(&dewarped_bw_mask_deskewed, deskew_angle, Qt::black);
1573       dewarped_bw_mask = BinaryImage(dewarped_bw_mask_deskewed);
1574     }
1575 
1576     fillMarginsInPlace(dewarped_bw_mask, dewarping_content_area_mask, BLACK);
1577 
1578     if (dbg) {
1579       dbg->add(dewarped_bw_mask, "dewarped_bw_mask");
1580     }
1581 
1582     status.throwIfCancelled();
1583 
1584     dewarped_bw_content_mask = dewarped_bw_mask;
1585 
1586     if (render_params.needBinarization()) {
1587       QImage dewarped_and_maybe_smoothed;
1588       if (!render_params.needSavitzkyGolaySmoothing()) {
1589         dewarped_and_maybe_smoothed = dewarped;
1590       } else {
1591         dewarped_and_maybe_smoothed = smoothToGrayscale(dewarped, m_dpi);
1592         if (dbg) {
1593           dbg->add(dewarped_and_maybe_smoothed, "smoothed");
1594         }
1595       }
1596 
1597       status.throwIfCancelled();
1598 
1599       BinaryImage dewarped_bw_mask_filled(dewarped_bw_mask);
1600       fillMarginsInPlace(dewarped_bw_mask_filled, dewarping_content_area_mask, WHITE);
1601 
1602       BinaryImage dewarped_bw_content(binarize(dewarped_and_maybe_smoothed, dewarped_bw_mask_filled));
1603 
1604       dewarped_bw_mask_filled.release();
1605       dewarped_and_maybe_smoothed = QImage();  // Save memory.
1606 
1607       if (dbg) {
1608         dbg->add(dewarped_bw_content, "dewarped_bw_content");
1609       }
1610 
1611       status.throwIfCancelled();
1612 
1613       if (render_params.needMorphologicalSmoothing()) {
1614         morphologicalSmoothInPlace(dewarped_bw_content, status);
1615         if (dbg) {
1616           dbg->add(dewarped_bw_content, "edges_smoothed");
1617         }
1618       }
1619 
1620       status.throwIfCancelled();
1621 
1622       if (render_params.needMorphologicalSmoothing()) {
1623         morphologicalSmoothInPlace(dewarped_bw_content, status);
1624         if (dbg) {
1625           dbg->add(dewarped_bw_content, "edges_smoothed");
1626         }
1627       }
1628 
1629       status.throwIfCancelled();
1630 
1631       // It's important to keep despeckling the very last operation
1632       // affecting the binary part of the output. That's because
1633       // we will be reconstructing the input to this despeckling
1634       // operation from the final output file.
1635       maybeDespeckleInPlace(dewarped_bw_content, m_outRect, contentRect, m_despeckleLevel, speckles_image, m_dpi,
1636                             status, dbg);
1637 
1638       status.throwIfCancelled();
1639 
1640       if (needNormalizeIllumination && !render_params.normalizeIlluminationColor()) {
1641         outsideBackgroundColor = backgroundColorCalculator.calcDominantBackgroundColor(
1642             inputOrigImage.allGray() ? inputGrayImage : inputOrigImage, outCropAreaInOriginalCs, dbg);
1643 
1644         QImage orig_without_illumination;
1645         if (color_original) {
1646           orig_without_illumination = convertToRGBorRGBA(inputOrigImage);
1647         } else {
1648           orig_without_illumination = inputGrayImage;
1649         }
1650 
1651         status.throwIfCancelled();
1652 
1653         try {
1654           dewarped = dewarp(QTransform(), orig_without_illumination, m_xform.transform(), distortion_model,
1655                             depth_perception, outsideBackgroundColor);
1656         } catch (const std::runtime_error&) {
1657           setupTrivialDistortionModel(distortion_model);
1658           dewarped = dewarp(QTransform(), orig_without_illumination, m_xform.transform(), distortion_model,
1659                             depth_perception, outsideBackgroundColor);
1660         }
1661         orig_without_illumination = QImage();
1662 
1663         deskew(&dewarped, deskew_angle, outsideBackgroundColor);
1664 
1665         status.throwIfCancelled();
1666       }
1667 
1668       if (render_params.originalBackground()) {
1669         original_background = dewarped;
1670 
1671         QColor outsideOriginalBackgroundColor = outsideBackgroundColor;
1672         if (m_colorParams.colorCommonOptions().getFillingColor() == FILL_WHITE) {
1673           outsideOriginalBackgroundColor = isBlackOnWhite ? Qt::white : Qt::black;
1674         }
1675         fillMarginsInPlace(original_background, dewarping_content_area_mask, outsideOriginalBackgroundColor);
1676 
1677         reserveBlackAndWhite(original_background);
1678 
1679         status.throwIfCancelled();
1680       }
1681 
1682       if (!render_params.needColorSegmentation()) {
1683         combineImages(dewarped, dewarped_bw_content, dewarped_bw_mask);
1684       } else {
1685         QImage segmented_image;
1686         {
1687           QImage dewarped_content(dewarped);
1688           applyMask(dewarped_content, dewarped_bw_mask);
1689           segmented_image = segmentImage(dewarped_bw_content, dewarped_content);
1690           dewarped_content = QImage();
1691 
1692           if (dbg) {
1693             dbg->add(segmented_image, "segmented");
1694           }
1695 
1696           status.throwIfCancelled();
1697 
1698           if (render_params.posterize()) {
1699             segmented_image = posterizeImage(segmented_image, outsideBackgroundColor);
1700 
1701             if (dbg) {
1702               dbg->add(segmented_image, "posterized");
1703             }
1704 
1705             status.throwIfCancelled();
1706           }
1707         }
1708 
1709         combineImages(dewarped, segmented_image, dewarped_bw_mask);
1710       }
1711 
1712       reserveBlackAndWhite(dewarped, dewarped_bw_mask.inverted());
1713 
1714       if (dbg) {
1715         dbg->add(dewarped, "combined_image");
1716       }
1717 
1718       status.throwIfCancelled();
1719     }
1720   }
1721 
1722   if (render_params.needBinarization()) {
1723     outsideBackgroundColor = Qt::white;
1724   } else if (m_colorParams.colorCommonOptions().getFillingColor() == FILL_WHITE) {
1725     outsideBackgroundColor = isBlackOnWhite ? Qt::white : Qt::black;
1726     if (!render_params.needBinarization()) {
1727       reserveBlackAndWhite(dewarped);
1728     }
1729   }
1730   fillMarginsInPlace(dewarped, dewarping_content_area_mask, outsideBackgroundColor);
1731 
1732   if (!isBlackOnWhite) {
1733     dewarped.invertPixels();
1734   }
1735 
1736   if (render_params.mixedOutput() && render_params.needBinarization()) {
1737     applyFillZonesToMixedInPlace(dewarped, fill_zones, orig_to_output, m_postTransform, dewarped_bw_content_mask,
1738                                  !render_params.needColorSegmentation());
1739   } else {
1740     applyFillZonesInPlace(dewarped, fill_zones, orig_to_output, m_postTransform);
1741   }
1742 
1743   if (dbg) {
1744     dbg->add(dewarped, "fill_zones");
1745   }
1746 
1747   status.throwIfCancelled();
1748 
1749   if (render_params.splitOutput()) {
1750     const SplitImage::ForegroundType foreground_type
1751         = render_params.needBinarization()
1752               ? render_params.needColorSegmentation() ? SplitImage::INDEXED_FOREGROUND : SplitImage::BINARY_FOREGROUND
1753               : SplitImage::COLOR_FOREGROUND;
1754 
1755     splitImage->setMask(dewarped_bw_content_mask, foreground_type);
1756     splitImage->setBackgroundImage(dewarped);
1757 
1758     if (render_params.needBinarization() && render_params.originalBackground()) {
1759       if (!isBlackOnWhite) {
1760         dewarped.invertPixels();
1761       }
1762 
1763       BinaryImage background_mask = BinaryImage(dewarped, BinaryThreshold(255)).inverted();
1764       fillMarginsInPlace(background_mask, dewarping_content_area_mask, BLACK);
1765       applyMask(original_background, background_mask, isBlackOnWhite ? WHITE : BLACK);
1766 
1767       applyMask(original_background, dewarped_bw_content_mask, isBlackOnWhite ? BLACK : WHITE);
1768 
1769       if (!isBlackOnWhite) {
1770         original_background.invertPixels();
1771       }
1772       splitImage->setOriginalBackgroundImage(original_background);
1773     }
1774 
1775     return QImage();
1776   }
1777 
1778   if (!render_params.mixedOutput() && render_params.posterize()) {
1779     dewarped = posterizeImage(dewarped);
1780 
1781     if (dbg) {
1782       dbg->add(dewarped, "posterized");
1783     }
1784 
1785     status.throwIfCancelled();
1786   }
1787 
1788   return dewarped;
1789 }  // OutputGenerator::processWithDewarping
1790 
1791 /**
1792  * Set up a distortion model corresponding to the content rect,
1793  * which will result in no distortion correction.
1794  */
setupTrivialDistortionModel(DistortionModel & distortion_model) const1795 void OutputGenerator::setupTrivialDistortionModel(DistortionModel& distortion_model) const {
1796   QPolygonF poly;
1797   if (!m_contentRect.isEmpty()) {
1798     poly = QRectF(m_contentRect);
1799   } else {
1800     poly << m_contentRect.topLeft() + QPointF(-0.5, -0.5);
1801     poly << m_contentRect.topLeft() + QPointF(0.5, -0.5);
1802     poly << m_contentRect.topLeft() + QPointF(0.5, 0.5);
1803     poly << m_contentRect.topLeft() + QPointF(-0.5, 0.5);
1804   }
1805   poly = m_xform.transformBack().map(poly);
1806 
1807   std::vector<QPointF> top_polyline, bottom_polyline;
1808   top_polyline.push_back(poly[0]);     // top-left
1809   top_polyline.push_back(poly[1]);     // top-right
1810   bottom_polyline.push_back(poly[3]);  // bottom-left
1811   bottom_polyline.push_back(poly[2]);  // bottom-right
1812   distortion_model.setTopCurve(Curve(top_polyline));
1813   distortion_model.setBottomCurve(Curve(bottom_polyline));
1814 }
1815 
createDewarper(const DistortionModel & distortion_model,const QTransform & distortion_model_to_target,double depth_perception)1816 CylindricalSurfaceDewarper OutputGenerator::createDewarper(const DistortionModel& distortion_model,
1817                                                            const QTransform& distortion_model_to_target,
1818                                                            double depth_perception) {
1819   if (distortion_model_to_target.isIdentity()) {
1820     return CylindricalSurfaceDewarper(distortion_model.topCurve().polyline(), distortion_model.bottomCurve().polyline(),
1821                                       depth_perception);
1822   }
1823 
1824   std::vector<QPointF> top_polyline(distortion_model.topCurve().polyline());
1825   std::vector<QPointF> bottom_polyline(distortion_model.bottomCurve().polyline());
1826   for (QPointF& pt : top_polyline) {
1827     pt = distortion_model_to_target.map(pt);
1828   }
1829   for (QPointF& pt : bottom_polyline) {
1830     pt = distortion_model_to_target.map(pt);
1831   }
1832 
1833   return CylindricalSurfaceDewarper(top_polyline, bottom_polyline, depth_perception);
1834 }
1835 
1836 /**
1837  * \param orig_to_src Transformation from the original image coordinates
1838  *                    to the coordinate system of \p src image.
1839  * \param src_to_output Transformation from the \p src image coordinates
1840  *                      to output image coordinates.
1841  * \param distortion_model Distortion model.
1842  * \param depth_perception Depth perception.
1843  * \param bg_color The color to use for areas outsize of \p src.
1844  * \param modified_content_rect A vertically shrunk version of outputContentRect().
1845  *                              See function definition for more details.
1846  */
dewarp(const QTransform & orig_to_src,const QImage & src,const QTransform & src_to_output,const DistortionModel & distortion_model,const DepthPerception & depth_perception,const QColor & bg_color) const1847 QImage OutputGenerator::dewarp(const QTransform& orig_to_src,
1848                                const QImage& src,
1849                                const QTransform& src_to_output,
1850                                const DistortionModel& distortion_model,
1851                                const DepthPerception& depth_perception,
1852                                const QColor& bg_color) const {
1853   const CylindricalSurfaceDewarper dewarper(createDewarper(distortion_model, orig_to_src, depth_perception.value()));
1854 
1855   // Model domain is a rectangle in output image coordinates that
1856   // will be mapped to our curved quadrilateral.
1857   const QRect model_domain(
1858       distortion_model.modelDomain(dewarper, orig_to_src * src_to_output, outputContentRect()).toRect());
1859   if (model_domain.isEmpty()) {
1860     GrayImage out(src.size());
1861     out.fill(0xff);  // white
1862 
1863     return out;
1864   }
1865 
1866   return RasterDewarper::dewarp(src, m_outRect.size(), dewarper, model_domain, bg_color);
1867 }
1868 
from300dpi(const QSize & size,const Dpi & target_dpi)1869 QSize OutputGenerator::from300dpi(const QSize& size, const Dpi& target_dpi) {
1870   const double hscale = target_dpi.horizontal() / 300.0;
1871   const double vscale = target_dpi.vertical() / 300.0;
1872   const int width = qRound(size.width() * hscale);
1873   const int height = qRound(size.height() * vscale);
1874 
1875   return QSize(std::max(1, width), std::max(1, height));
1876 }
1877 
to300dpi(const QSize & size,const Dpi & source_dpi)1878 QSize OutputGenerator::to300dpi(const QSize& size, const Dpi& source_dpi) {
1879   const double hscale = 300.0 / source_dpi.horizontal();
1880   const double vscale = 300.0 / source_dpi.vertical();
1881   const int width = qRound(size.width() * hscale);
1882   const int height = qRound(size.height() * vscale);
1883 
1884   return QSize(std::max(1, width), std::max(1, height));
1885 }
1886 
convertToRGBorRGBA(const QImage & src)1887 QImage OutputGenerator::convertToRGBorRGBA(const QImage& src) {
1888   const QImage::Format fmt = src.hasAlphaChannel() ? QImage::Format_ARGB32 : QImage::Format_RGB32;
1889 
1890   return src.convertToFormat(fmt);
1891 }
1892 
fillMarginsInPlace(QImage & image,const QPolygonF & content_poly,const QColor & color,const bool antialiasing)1893 void OutputGenerator::fillMarginsInPlace(QImage& image,
1894                                          const QPolygonF& content_poly,
1895                                          const QColor& color,
1896                                          const bool antialiasing) {
1897   if ((image.format() == QImage::Format_Mono) || (image.format() == QImage::Format_MonoLSB)) {
1898     BinaryImage binaryImage(image);
1899     PolygonRasterizer::fillExcept(binaryImage, (color == Qt::black) ? BLACK : WHITE, content_poly, Qt::WindingFill);
1900     image = binaryImage.toQImage();
1901 
1902     return;
1903   }
1904 
1905   if ((image.format() == QImage::Format_Indexed8) && image.isGrayscale()) {
1906     PolygonRasterizer::grayFillExcept(image, static_cast<unsigned char>(qGray(color.rgb())), content_poly,
1907                                       Qt::WindingFill);
1908 
1909     return;
1910   }
1911 
1912   assert(image.format() == QImage::Format_RGB32 || image.format() == QImage::Format_ARGB32);
1913 
1914   const QImage::Format imageFormat = image.format();
1915   image = image.convertToFormat(QImage::Format_ARGB32_Premultiplied);
1916 
1917   {
1918     QPainter painter(&image);
1919     painter.setRenderHint(QPainter::Antialiasing, antialiasing);
1920     painter.setBrush(color);
1921     painter.setPen(Qt::NoPen);
1922 
1923     QPainterPath outer_path;
1924     outer_path.addRect(image.rect());
1925     QPainterPath inner_path;
1926     inner_path.addPolygon(content_poly);
1927 
1928     painter.drawPath(outer_path.subtracted(inner_path));
1929   }
1930 
1931   image = image.convertToFormat(imageFormat);
1932 }  // OutputGenerator::fillMarginsInPlace
1933 
fillMarginsInPlace(BinaryImage & image,const QPolygonF & content_poly,const BWColor & color)1934 void OutputGenerator::fillMarginsInPlace(BinaryImage& image, const QPolygonF& content_poly, const BWColor& color) {
1935   PolygonRasterizer::fillExcept(image, color, content_poly, Qt::WindingFill);
1936 }
1937 
fillMarginsInPlace(QImage & image,const BinaryImage & content_mask,const QColor & color)1938 void OutputGenerator::fillMarginsInPlace(QImage& image, const BinaryImage& content_mask, const QColor& color) {
1939   if ((image.format() == QImage::Format_Mono) || (image.format() == QImage::Format_MonoLSB)) {
1940     BinaryImage binaryImage(image);
1941     fillExcept(binaryImage, content_mask, (color == Qt::black) ? BLACK : WHITE);
1942     image = binaryImage.toQImage();
1943 
1944     return;
1945   }
1946 
1947   if (image.format() == QImage::Format_Indexed8) {
1948     fillExcept<uint8_t>(image, content_mask, color);
1949   } else {
1950     assert(image.format() == QImage::Format_RGB32 || image.format() == QImage::Format_ARGB32);
1951 
1952     fillExcept<uint32_t>(image, content_mask, color);
1953   }
1954 }
1955 
fillMarginsInPlace(BinaryImage & image,const BinaryImage & content_mask,const BWColor & color)1956 void OutputGenerator::fillMarginsInPlace(BinaryImage& image, const BinaryImage& content_mask, const BWColor& color) {
1957   fillExcept(image, content_mask, color);
1958 }
1959 
detectPictures(const GrayImage & input_300dpi,const TaskStatus & status,DebugImages * const dbg) const1960 GrayImage OutputGenerator::detectPictures(const GrayImage& input_300dpi,
1961                                           const TaskStatus& status,
1962                                           DebugImages* const dbg) const {
1963   // We stretch the range of gray levels to cover the whole
1964   // range of [0, 255].  We do it because we want text
1965   // and background to be equally far from the center
1966   // of the whole range.  Otherwise text printed with a big
1967   // font will be considered a picture.
1968   GrayImage stretched(stretchGrayRange(input_300dpi, 0.01, 0.01));
1969   if (dbg) {
1970     dbg->add(stretched, "stretched");
1971   }
1972 
1973   status.throwIfCancelled();
1974 
1975   GrayImage eroded(erodeGray(stretched, QSize(3, 3), 0x00));
1976   if (dbg) {
1977     dbg->add(eroded, "eroded");
1978   }
1979 
1980   status.throwIfCancelled();
1981 
1982   GrayImage dilated(dilateGray(stretched, QSize(3, 3), 0xff));
1983   if (dbg) {
1984     dbg->add(dilated, "dilated");
1985   }
1986 
1987   stretched = GrayImage();  // Save memory.
1988   status.throwIfCancelled();
1989 
1990   grayRasterOp<CombineInverted>(dilated, eroded);
1991   GrayImage gray_gradient(dilated);
1992   dilated = GrayImage();
1993   eroded = GrayImage();
1994   if (dbg) {
1995     dbg->add(gray_gradient, "gray_gradient");
1996   }
1997 
1998   status.throwIfCancelled();
1999 
2000   GrayImage marker(erodeGray(gray_gradient, QSize(35, 35), 0x00));
2001   if (dbg) {
2002     dbg->add(marker, "marker");
2003   }
2004 
2005   status.throwIfCancelled();
2006 
2007   seedFillGrayInPlace(marker, gray_gradient, CONN8);
2008   GrayImage reconstructed(marker);
2009   marker = GrayImage();
2010   if (dbg) {
2011     dbg->add(reconstructed, "reconstructed");
2012   }
2013 
2014   status.throwIfCancelled();
2015 
2016   grayRasterOp<GRopInvert<GRopSrc>>(reconstructed, reconstructed);
2017   if (dbg) {
2018     dbg->add(reconstructed, "reconstructed_inverted");
2019   }
2020 
2021   status.throwIfCancelled();
2022 
2023   GrayImage holes_filled(createFramedImage(reconstructed.size()));
2024   seedFillGrayInPlace(holes_filled, reconstructed, CONN8);
2025   reconstructed = GrayImage();
2026   if (dbg) {
2027     dbg->add(holes_filled, "holes_filled");
2028   }
2029 
2030   if (m_pictureShapeOptions.isHigherSearchSensitivity()) {
2031     GrayImage stretched2(stretchGrayRange(holes_filled, 5.0, 0.01));
2032     if (dbg) {
2033       dbg->add(stretched2, "stretched2");
2034     }
2035 
2036     return stretched2;
2037   }
2038 
2039   return holes_filled;
2040 }  // OutputGenerator::detectPictures
2041 
smoothToGrayscale(const QImage & src,const Dpi & dpi)2042 QImage OutputGenerator::smoothToGrayscale(const QImage& src, const Dpi& dpi) {
2043   const int min_dpi = std::min(dpi.horizontal(), dpi.vertical());
2044   int window;
2045   int degree;
2046   if (min_dpi <= 200) {
2047     window = 5;
2048     degree = 3;
2049   } else if (min_dpi <= 400) {
2050     window = 7;
2051     degree = 4;
2052   } else if (min_dpi <= 800) {
2053     window = 11;
2054     degree = 4;
2055   } else {
2056     window = 11;
2057     degree = 2;
2058   }
2059 
2060   return savGolFilter(src, QSize(window, window), degree, degree);
2061 }
2062 
adjustThreshold(BinaryThreshold threshold) const2063 BinaryThreshold OutputGenerator::adjustThreshold(BinaryThreshold threshold) const {
2064   const int adjusted = threshold + m_colorParams.blackWhiteOptions().thresholdAdjustment();
2065 
2066   // Hard-bounding threshold values is necessary for example
2067   // if all the content went into the picture mask.
2068   return BinaryThreshold(qBound(30, adjusted, 225));
2069 }
2070 
calcBinarizationThreshold(const QImage & image,const BinaryImage & mask) const2071 BinaryThreshold OutputGenerator::calcBinarizationThreshold(const QImage& image, const BinaryImage& mask) const {
2072   GrayscaleHistogram hist(image, mask);
2073 
2074   return adjustThreshold(BinaryThreshold::otsuThreshold(hist));
2075 }
2076 
calcBinarizationThreshold(const QImage & image,const QPolygonF & crop_area,const BinaryImage * mask) const2077 BinaryThreshold OutputGenerator::calcBinarizationThreshold(const QImage& image,
2078                                                            const QPolygonF& crop_area,
2079                                                            const BinaryImage* mask) const {
2080   QPainterPath path;
2081   path.addPolygon(crop_area);
2082 
2083   if (path.contains(image.rect())) {
2084     return adjustThreshold(BinaryThreshold::otsuThreshold(image));
2085   } else {
2086     BinaryImage modified_mask(image.size(), BLACK);
2087     PolygonRasterizer::fillExcept(modified_mask, WHITE, crop_area, Qt::WindingFill);
2088     modified_mask = erodeBrick(modified_mask, QSize(3, 3), WHITE);
2089 
2090     if (mask) {
2091       rasterOp<RopAnd<RopSrc, RopDst>>(modified_mask, *mask);
2092     }
2093 
2094     return calcBinarizationThreshold(image, modified_mask);
2095   }
2096 }
2097 
binarize(const QImage & image) const2098 BinaryImage OutputGenerator::binarize(const QImage& image) const {
2099   if ((image.format() == QImage::Format_Mono) || (image.format() == QImage::Format_MonoLSB)) {
2100     return BinaryImage(image);
2101   }
2102 
2103   const BlackWhiteOptions& blackWhiteOptions = m_colorParams.blackWhiteOptions();
2104   const BinarizationMethod binarizationMethod = blackWhiteOptions.getBinarizationMethod();
2105 
2106   QImage imageToBinarize = image;
2107 
2108   BinaryImage binarized;
2109   switch (binarizationMethod) {
2110     case OTSU: {
2111       GrayscaleHistogram hist(imageToBinarize);
2112       const BinaryThreshold bw_thresh(BinaryThreshold::otsuThreshold(hist));
2113 
2114       binarized = BinaryImage(imageToBinarize, adjustThreshold(bw_thresh));
2115       break;
2116     }
2117     case SAUVOLA: {
2118       QSize windowsSize = QSize(blackWhiteOptions.getWindowSize(), blackWhiteOptions.getWindowSize());
2119       double sauvolaCoef = blackWhiteOptions.getSauvolaCoef();
2120 
2121       binarized = binarizeSauvola(imageToBinarize, windowsSize, sauvolaCoef);
2122       break;
2123     }
2124     case WOLF: {
2125       QSize windowsSize = QSize(blackWhiteOptions.getWindowSize(), blackWhiteOptions.getWindowSize());
2126       auto lowerBound = (unsigned char) blackWhiteOptions.getWolfLowerBound();
2127       auto upperBound = (unsigned char) blackWhiteOptions.getWolfUpperBound();
2128       double wolfCoef = blackWhiteOptions.getWolfCoef();
2129 
2130       binarized = binarizeWolf(imageToBinarize, windowsSize, lowerBound, upperBound, wolfCoef);
2131       break;
2132     }
2133   }
2134 
2135   return binarized;
2136 }
2137 
binarize(const QImage & image,const BinaryImage & mask) const2138 BinaryImage OutputGenerator::binarize(const QImage& image, const BinaryImage& mask) const {
2139   BinaryImage binarized = binarize(image);
2140 
2141   rasterOp<RopAnd<RopSrc, RopDst>>(binarized, mask);
2142 
2143   return binarized;
2144 }
2145 
binarize(const QImage & image,const QPolygonF & crop_area,const BinaryImage * mask) const2146 BinaryImage OutputGenerator::binarize(const QImage& image, const QPolygonF& crop_area, const BinaryImage* mask) const {
2147   QPainterPath path;
2148   path.addPolygon(crop_area);
2149 
2150   if (path.contains(image.rect()) && !mask) {
2151     return binarize(image);
2152   } else {
2153     BinaryImage modified_mask(image.size(), BLACK);
2154     PolygonRasterizer::fillExcept(modified_mask, WHITE, crop_area, Qt::WindingFill);
2155     modified_mask = erodeBrick(modified_mask, QSize(3, 3), WHITE);
2156 
2157     if (mask) {
2158       rasterOp<RopAnd<RopSrc, RopDst>>(modified_mask, *mask);
2159     }
2160 
2161     return binarize(image, modified_mask);
2162   }
2163 }
2164 
2165 /**
2166  * \brief Remove small connected components that are considered to be garbage.
2167  *
2168  * Both the size and the distance to other components are taken into account.
2169  *
2170  * \param[in,out] image The image to despeckle.
2171  * \param image_rect The rectangle corresponding to \p image in the same
2172  *        coordinate system where m_contentRect and m_cropRect are defined.
2173  * \param mask_rect The area within the image to consider.  Defined not
2174  *        relative to \p image, but in the same coordinate system where
2175  *        m_contentRect and m_cropRect are defined.  This only affects
2176  *        \p speckles_img, if provided.
2177  * \param level Despeckling aggressiveness.
2178  * \param speckles_img If provided, the removed black speckles will be written
2179  *        there.  The speckles image is always considered to correspond
2180  *        to m_cropRect, so it will have the size of m_cropRect.size().
2181  *        Only the area within \p mask_rect will be copied to \p speckles_img.
2182  *        The rest will be filled with white.
2183  * \param dpi The DPI of the input image.  See the note below.
2184  * \param status Task status.
2185  * \param dbg An optional sink for debugging images.
2186  *
2187  * \note This function only works effectively when the DPI is symmetric,
2188  * that is, its horizontal and vertical components are equal.
2189  */
maybeDespeckleInPlace(imageproc::BinaryImage & image,const QRect & image_rect,const QRect & mask_rect,const double level,BinaryImage * speckles_img,const Dpi & dpi,const TaskStatus & status,DebugImages * dbg) const2190 void OutputGenerator::maybeDespeckleInPlace(imageproc::BinaryImage& image,
2191                                             const QRect& image_rect,
2192                                             const QRect& mask_rect,
2193                                             const double level,
2194                                             BinaryImage* speckles_img,
2195                                             const Dpi& dpi,
2196                                             const TaskStatus& status,
2197                                             DebugImages* dbg) const {
2198   const QRect src_rect(mask_rect.translated(-image_rect.topLeft()));
2199   const QRect dst_rect(mask_rect);
2200 
2201   if (speckles_img) {
2202     BinaryImage(m_outRect.size(), WHITE).swap(*speckles_img);
2203     if (!mask_rect.isEmpty()) {
2204       rasterOp<RopSrc>(*speckles_img, dst_rect, image, src_rect.topLeft());
2205     }
2206   }
2207 
2208   if (level != 0) {
2209     Despeckle::despeckleInPlace(image, dpi, level, status, dbg);
2210 
2211     if (dbg) {
2212       dbg->add(image, "despeckled");
2213     }
2214   }
2215 
2216   if (speckles_img) {
2217     if (!mask_rect.isEmpty()) {
2218       rasterOp<RopSubtract<RopDst, RopSrc>>(*speckles_img, dst_rect, image, src_rect.topLeft());
2219     }
2220   }
2221 }  // OutputGenerator::maybeDespeckleInPlace
2222 
morphologicalSmoothInPlace(BinaryImage & bin_img,const TaskStatus & status)2223 void OutputGenerator::morphologicalSmoothInPlace(BinaryImage& bin_img, const TaskStatus& status) {
2224   // When removing black noise, remove small ones first.
2225 
2226   {
2227     const char pattern[]
2228         = "XXX"
2229           " - "
2230           "   ";
2231     hitMissReplaceAllDirections(bin_img, pattern, 3, 3);
2232   }
2233 
2234   status.throwIfCancelled();
2235 
2236   {
2237     const char pattern[]
2238         = "X ?"
2239           "X  "
2240           "X- "
2241           "X- "
2242           "X  "
2243           "X ?";
2244     hitMissReplaceAllDirections(bin_img, pattern, 3, 6);
2245   }
2246 
2247   status.throwIfCancelled();
2248 
2249   {
2250     const char pattern[]
2251         = "X ?"
2252           "X ?"
2253           "X  "
2254           "X- "
2255           "X- "
2256           "X- "
2257           "X  "
2258           "X ?"
2259           "X ?";
2260     hitMissReplaceAllDirections(bin_img, pattern, 3, 9);
2261   }
2262 
2263   status.throwIfCancelled();
2264 
2265   {
2266     const char pattern[]
2267         = "XX?"
2268           "XX?"
2269           "XX "
2270           "X+ "
2271           "X+ "
2272           "X+ "
2273           "XX "
2274           "XX?"
2275           "XX?";
2276     hitMissReplaceAllDirections(bin_img, pattern, 3, 9);
2277   }
2278 
2279   status.throwIfCancelled();
2280 
2281   {
2282     const char pattern[]
2283         = "XX?"
2284           "XX "
2285           "X+ "
2286           "X+ "
2287           "XX "
2288           "XX?";
2289     hitMissReplaceAllDirections(bin_img, pattern, 3, 6);
2290   }
2291 
2292   status.throwIfCancelled();
2293 
2294   {
2295     const char pattern[]
2296         = "   "
2297           "X+X"
2298           "XXX";
2299     hitMissReplaceAllDirections(bin_img, pattern, 3, 3);
2300   }
2301 }  // OutputGenerator::morphologicalSmoothInPlace
2302 
hitMissReplaceAllDirections(imageproc::BinaryImage & img,const char * const pattern,const int pattern_width,const int pattern_height)2303 void OutputGenerator::hitMissReplaceAllDirections(imageproc::BinaryImage& img,
2304                                                   const char* const pattern,
2305                                                   const int pattern_width,
2306                                                   const int pattern_height) {
2307   hitMissReplaceInPlace(img, WHITE, pattern, pattern_width, pattern_height);
2308 
2309   std::vector<char> pattern_data(static_cast<unsigned long long int>(pattern_width * pattern_height), ' ');
2310   char* const new_pattern = &pattern_data[0];
2311 
2312   // Rotate 90 degrees clockwise.
2313   const char* p = pattern;
2314   int new_width = pattern_height;
2315   int new_height = pattern_width;
2316   for (int y = 0; y < pattern_height; ++y) {
2317     for (int x = 0; x < pattern_width; ++x, ++p) {
2318       const int new_x = pattern_height - 1 - y;
2319       const int new_y = x;
2320       new_pattern[new_y * new_width + new_x] = *p;
2321     }
2322   }
2323   hitMissReplaceInPlace(img, WHITE, new_pattern, new_width, new_height);
2324 
2325   // Rotate upside down.
2326   p = pattern;
2327   new_width = pattern_width;
2328   new_height = pattern_height;
2329   for (int y = 0; y < pattern_height; ++y) {
2330     for (int x = 0; x < pattern_width; ++x, ++p) {
2331       const int new_x = pattern_width - 1 - x;
2332       const int new_y = pattern_height - 1 - y;
2333       new_pattern[new_y * new_width + new_x] = *p;
2334     }
2335   }
2336   hitMissReplaceInPlace(img, WHITE, new_pattern, new_width, new_height);
2337   // Rotate 90 degrees counter-clockwise.
2338   p = pattern;
2339   new_width = pattern_height;
2340   new_height = pattern_width;
2341   for (int y = 0; y < pattern_height; ++y) {
2342     for (int x = 0; x < pattern_width; ++x, ++p) {
2343       const int new_x = y;
2344       const int new_y = pattern_width - 1 - x;
2345       new_pattern[new_y * new_width + new_x] = *p;
2346     }
2347   }
2348   hitMissReplaceInPlace(img, WHITE, new_pattern, new_width, new_height);
2349 }  // OutputGenerator::hitMissReplaceAllDirections
2350 
calcLocalWindowSize(const Dpi & dpi)2351 QSize OutputGenerator::calcLocalWindowSize(const Dpi& dpi) {
2352   const QSizeF size_mm(3, 30);
2353   const QSizeF size_inch(size_mm * constants::MM2INCH);
2354   const QSizeF size_pixels_f(dpi.horizontal() * size_inch.width(), dpi.vertical() * size_inch.height());
2355   QSize size_pixels(size_pixels_f.toSize());
2356 
2357   if (size_pixels.width() < 3) {
2358     size_pixels.setWidth(3);
2359   }
2360   if (size_pixels.height() < 3) {
2361     size_pixels.setHeight(3);
2362   }
2363 
2364   return size_pixels;
2365 }
2366 
applyFillZonesInPlace(QImage & img,const ZoneSet & zones,const boost::function<QPointF (const QPointF &)> & orig_to_output,const QTransform & postTransform,const bool antialiasing) const2367 void OutputGenerator::applyFillZonesInPlace(QImage& img,
2368                                             const ZoneSet& zones,
2369                                             const boost::function<QPointF(const QPointF&)>& orig_to_output,
2370                                             const QTransform& postTransform,
2371                                             const bool antialiasing) const {
2372   if (zones.empty()) {
2373     return;
2374   }
2375 
2376   QImage canvas(img.convertToFormat(QImage::Format_ARGB32_Premultiplied));
2377 
2378   {
2379     QPainter painter(&canvas);
2380     painter.setRenderHint(QPainter::Antialiasing, antialiasing);
2381     painter.setPen(Qt::NoPen);
2382 
2383     for (const Zone& zone : zones) {
2384       const QColor color(zone.properties().locateOrDefault<FillColorProperty>()->color());
2385       const QPolygonF poly(postTransform.map(zone.spline().transformed(orig_to_output).toPolygon()));
2386       painter.setBrush(color);
2387       painter.drawPolygon(poly, Qt::WindingFill);
2388     }
2389   }
2390 
2391   if ((img.format() == QImage::Format_Indexed8) && img.isGrayscale()) {
2392     img = toGrayscale(canvas);
2393   } else {
2394     img = canvas.convertToFormat(img.format());
2395   }
2396 }
2397 
applyFillZonesInPlace(QImage & img,const ZoneSet & zones,const boost::function<QPointF (const QPointF &)> & orig_to_output,const bool antialiasing) const2398 void OutputGenerator::applyFillZonesInPlace(QImage& img,
2399                                             const ZoneSet& zones,
2400                                             const boost::function<QPointF(const QPointF&)>& orig_to_output,
2401                                             const bool antialiasing) const {
2402   applyFillZonesInPlace(img, zones, orig_to_output, QTransform(), antialiasing);
2403 }
2404 
applyFillZonesInPlace(QImage & img,const ZoneSet & zones,const QTransform & postTransform,const bool antialiasing) const2405 void OutputGenerator::applyFillZonesInPlace(QImage& img,
2406                                             const ZoneSet& zones,
2407                                             const QTransform& postTransform,
2408                                             const bool antialiasing) const {
2409   typedef QPointF (QTransform::*MapPointFunc)(const QPointF&) const;
2410   applyFillZonesInPlace(img, zones, boost::bind((MapPointFunc) &QTransform::map, m_xform.transform(), _1),
2411                         postTransform, antialiasing);
2412 }
2413 
applyFillZonesInPlace(QImage & img,const ZoneSet & zones,const bool antialiasing) const2414 void OutputGenerator::applyFillZonesInPlace(QImage& img, const ZoneSet& zones, const bool antialiasing) const {
2415   typedef QPointF (QTransform::*MapPointFunc)(const QPointF&) const;
2416   applyFillZonesInPlace(img, zones, boost::bind((MapPointFunc) &QTransform::map, m_xform.transform(), _1),
2417                         antialiasing);
2418 }
2419 
applyFillZonesInPlace(imageproc::BinaryImage & img,const ZoneSet & zones,const boost::function<QPointF (const QPointF &)> & orig_to_output,const QTransform & postTransform) const2420 void OutputGenerator::applyFillZonesInPlace(imageproc::BinaryImage& img,
2421                                             const ZoneSet& zones,
2422                                             const boost::function<QPointF(const QPointF&)>& orig_to_output,
2423                                             const QTransform& postTransform) const {
2424   if (zones.empty()) {
2425     return;
2426   }
2427 
2428   for (const Zone& zone : zones) {
2429     const QColor color(zone.properties().locateOrDefault<FillColorProperty>()->color());
2430     const BWColor bw_color = qGray(color.rgb()) < 128 ? BLACK : WHITE;
2431     const QPolygonF poly(postTransform.map(zone.spline().transformed(orig_to_output).toPolygon()));
2432     PolygonRasterizer::fill(img, bw_color, poly, Qt::WindingFill);
2433   }
2434 }
2435 
applyFillZonesInPlace(imageproc::BinaryImage & img,const ZoneSet & zones,const boost::function<QPointF (const QPointF &)> & orig_to_output) const2436 void OutputGenerator::applyFillZonesInPlace(imageproc::BinaryImage& img,
2437                                             const ZoneSet& zones,
2438                                             const boost::function<QPointF(const QPointF&)>& orig_to_output) const {
2439   applyFillZonesInPlace(img, zones, orig_to_output, QTransform());
2440 }
2441 
applyFillZonesInPlace(imageproc::BinaryImage & img,const ZoneSet & zones,const QTransform & postTransform) const2442 void OutputGenerator::applyFillZonesInPlace(imageproc::BinaryImage& img,
2443                                             const ZoneSet& zones,
2444                                             const QTransform& postTransform) const {
2445   typedef QPointF (QTransform::*MapPointFunc)(const QPointF&) const;
2446   applyFillZonesInPlace(img, zones, boost::bind((MapPointFunc) &QTransform::map, m_xform.transform(), _1),
2447                         postTransform);
2448 }
2449 
2450 /**
2451  * A simplified version of the above, using toOutput() for translation
2452  * from original image to output image coordinates.
2453  */
applyFillZonesInPlace(imageproc::BinaryImage & img,const ZoneSet & zones) const2454 void OutputGenerator::applyFillZonesInPlace(imageproc::BinaryImage& img, const ZoneSet& zones) const {
2455   typedef QPointF (QTransform::*MapPointFunc)(const QPointF&) const;
2456   applyFillZonesInPlace(img, zones, boost::bind((MapPointFunc) &QTransform::map, m_xform.transform(), _1));
2457 }
2458 
movePointToTopMargin(BinaryImage & bw_image,XSpline & spline,int idx) const2459 void OutputGenerator::movePointToTopMargin(BinaryImage& bw_image, XSpline& spline, int idx) const {
2460   QPointF pos = spline.controlPointPosition(idx);
2461 
2462   for (int j = 0; j < pos.y(); j++) {
2463     if (bw_image.getPixel(static_cast<int>(pos.x()), j) == WHITE) {
2464       int count = 0;
2465       int check_num = 16;
2466 
2467       for (int jj = j; jj < (j + check_num); jj++) {
2468         if (bw_image.getPixel(static_cast<int>(pos.x()), jj) == WHITE) {
2469           count++;
2470         }
2471       }
2472 
2473       if (count == check_num) {
2474         pos.setY(j);
2475         spline.moveControlPoint(idx, pos);
2476         break;
2477       }
2478     }
2479   }
2480 }
2481 
movePointToBottomMargin(BinaryImage & bw_image,XSpline & spline,int idx) const2482 void OutputGenerator::movePointToBottomMargin(BinaryImage& bw_image, XSpline& spline, int idx) const {
2483   QPointF pos = spline.controlPointPosition(idx);
2484 
2485   for (int j = bw_image.height() - 1; j > pos.y(); j--) {
2486     if (bw_image.getPixel(static_cast<int>(pos.x()), j) == WHITE) {
2487       int count = 0;
2488       int check_num = 16;
2489 
2490       for (int jj = j; jj > (j - check_num); jj--) {
2491         if (bw_image.getPixel(static_cast<int>(pos.x()), jj) == WHITE) {
2492           count++;
2493         }
2494       }
2495 
2496       if (count == check_num) {
2497         pos.setY(j);
2498 
2499         spline.moveControlPoint(idx, pos);
2500 
2501         break;
2502       }
2503     }
2504   }
2505 }
2506 
drawPoint(QImage & image,const QPointF & pt) const2507 void OutputGenerator::drawPoint(QImage& image, const QPointF& pt) const {
2508   QPoint pts = pt.toPoint();
2509 
2510   for (int i = pts.x() - 10; i < pts.x() + 10; i++) {
2511     for (int j = pts.y() - 10; j < pts.y() + 10; j++) {
2512       QPoint p1(i, j);
2513 
2514       image.setPixel(p1, qRgb(255, 0, 0));
2515     }
2516   }
2517 }
2518 
movePointToTopMargin(BinaryImage & bw_image,std::vector<QPointF> & polyline,int idx) const2519 void OutputGenerator::movePointToTopMargin(BinaryImage& bw_image, std::vector<QPointF>& polyline, int idx) const {
2520   QPointF& pos = polyline[idx];
2521 
2522   for (int j = 0; j < pos.y(); j++) {
2523     if (bw_image.getPixel(static_cast<int>(pos.x()), j) == WHITE) {
2524       int count = 0;
2525       int check_num = 16;
2526 
2527       for (int jj = j; jj < (j + check_num); jj++) {
2528         if (bw_image.getPixel(static_cast<int>(pos.x()), jj) == WHITE) {
2529           count++;
2530         }
2531       }
2532 
2533       if (count == check_num) {
2534         pos.setY(j);
2535 
2536         break;
2537       }
2538     }
2539   }
2540 }
2541 
movePointToBottomMargin(BinaryImage & bw_image,std::vector<QPointF> & polyline,int idx) const2542 void OutputGenerator::movePointToBottomMargin(BinaryImage& bw_image, std::vector<QPointF>& polyline, int idx) const {
2543   QPointF& pos = polyline[idx];
2544 
2545   for (int j = bw_image.height() - 1; j > pos.y(); j--) {
2546     if (bw_image.getPixel(static_cast<int>(pos.x()), j) == WHITE) {
2547       int count = 0;
2548       int check_num = 16;
2549 
2550       for (int jj = j; jj > (j - check_num); jj--) {
2551         if (bw_image.getPixel(static_cast<int>(pos.x()), jj) == WHITE) {
2552           count++;
2553         }
2554       }
2555 
2556       if (count == check_num) {
2557         pos.setY(j);
2558 
2559         break;
2560       }
2561     }
2562   }
2563 }
2564 
vert_border_skew_angle(const QPointF & top,const QPointF & bottom) const2565 float OutputGenerator::vert_border_skew_angle(const QPointF& top, const QPointF& bottom) const {
2566   return static_cast<float>(qFabs(qAtan((bottom.x() - top.x()) / (bottom.y() - top.y())) * 180 / M_PI));
2567 }
2568 
deskew(QImage * image,const double angle,const QColor & outside_color) const2569 void OutputGenerator::deskew(QImage* image, const double angle, const QColor& outside_color) const {
2570   if (angle == .0) {
2571     return;
2572   }
2573 
2574   QPointF center(image->width() / 2.0, image->height() / 2.0);
2575 
2576   QTransform rot;
2577   rot.translate(center.x(), center.y());
2578   rot.rotate(-angle);
2579   rot.translate(-center.x(), -center.y());
2580 
2581   *image = imageproc::transform(*image, rot, image->rect(), OutsidePixels::assumeWeakColor(outside_color));
2582 }
2583 
maybe_deskew(QImage * dewarped,DewarpingOptions m_dewarpingOptions,const QColor & outside_color) const2584 double OutputGenerator::maybe_deskew(QImage* dewarped,
2585                                      DewarpingOptions m_dewarpingOptions,
2586                                      const QColor& outside_color) const {
2587   if (m_dewarpingOptions.needPostDeskew()
2588       && ((m_dewarpingOptions.dewarpingMode() == MARGINAL) || (m_dewarpingOptions.dewarpingMode() == MANUAL))) {
2589     BinaryThreshold bw_threshold(128);
2590     BinaryImage bw_image(*dewarped, bw_threshold);
2591 
2592     SkewFinder skew_finder;
2593     const Skew skew(skew_finder.findSkew(bw_image));
2594     if ((skew.angle() != 0.0) && (skew.confidence() >= Skew::GOOD_CONFIDENCE)) {
2595       const double angle_deg = skew.angle();
2596 
2597       deskew(dewarped, angle_deg, outside_color);
2598 
2599       return angle_deg;
2600     }
2601   }
2602 
2603   return .0;
2604 }
2605 
getPostTransform() const2606 const QTransform& OutputGenerator::getPostTransform() const {
2607   return m_postTransform;
2608 }
2609 
applyFillZonesToMixedInPlace(QImage & img,const ZoneSet & zones,const BinaryImage & picture_mask,const bool binary_mode) const2610 void OutputGenerator::applyFillZonesToMixedInPlace(QImage& img,
2611                                                    const ZoneSet& zones,
2612                                                    const BinaryImage& picture_mask,
2613                                                    const bool binary_mode) const {
2614   if (binary_mode) {
2615     BinaryImage bw_content(img, BinaryThreshold(1));
2616     applyFillZonesInPlace(bw_content, zones);
2617     applyFillZonesInPlace(img, zones);
2618     combineImages(img, bw_content, picture_mask);
2619   } else {
2620     QImage content(img);
2621     applyMask(content, picture_mask);
2622     applyFillZonesInPlace(content, zones, false);
2623     applyFillZonesInPlace(img, zones);
2624     combineImages(img, content, picture_mask);
2625   }
2626 }
2627 
applyFillZonesToMixedInPlace(QImage & img,const ZoneSet & zones,const boost::function<QPointF (const QPointF &)> & orig_to_output,const QTransform & postTransform,const BinaryImage & picture_mask,const bool binary_mode) const2628 void OutputGenerator::applyFillZonesToMixedInPlace(QImage& img,
2629                                                    const ZoneSet& zones,
2630                                                    const boost::function<QPointF(const QPointF&)>& orig_to_output,
2631                                                    const QTransform& postTransform,
2632                                                    const BinaryImage& picture_mask,
2633                                                    const bool binary_mode) const {
2634   if (binary_mode) {
2635     BinaryImage bw_content(img, BinaryThreshold(1));
2636     applyFillZonesInPlace(bw_content, zones, orig_to_output, postTransform);
2637     applyFillZonesInPlace(img, zones, orig_to_output, postTransform);
2638     combineImages(img, bw_content, picture_mask);
2639   } else {
2640     QImage content(img);
2641     applyMask(content, picture_mask);
2642     applyFillZonesInPlace(content, zones, orig_to_output, postTransform, false);
2643     applyFillZonesInPlace(img, zones, orig_to_output, postTransform);
2644     combineImages(img, content, picture_mask);
2645   }
2646 }
2647 
segmentImage(const BinaryImage & image,const QImage & color_image) const2648 QImage OutputGenerator::segmentImage(const BinaryImage& image, const QImage& color_image) const {
2649   const BlackWhiteOptions::ColorSegmenterOptions& segmenterOptions
2650       = m_colorParams.blackWhiteOptions().getColorSegmenterOptions();
2651   if (!color_image.allGray()) {
2652     return ColorSegmenter(image, color_image, m_dpi, segmenterOptions.getNoiseReduction(),
2653                           segmenterOptions.getRedThresholdAdjustment(), segmenterOptions.getGreenThresholdAdjustment(),
2654                           segmenterOptions.getBlueThresholdAdjustment())
2655         .getImage();
2656   } else {
2657     return ColorSegmenter(image, GrayImage(color_image), m_dpi, segmenterOptions.getNoiseReduction()).getImage();
2658   }
2659 }
2660 
posterizeImage(const QImage & image,const QColor & background_color) const2661 QImage OutputGenerator::posterizeImage(const QImage& image, const QColor& background_color) const {
2662   const ColorCommonOptions::PosterizationOptions& posterizationOptions
2663       = m_colorParams.colorCommonOptions().getPosterizationOptions();
2664 
2665   return ColorTable(image)
2666       .posterize(posterizationOptions.getLevel(), posterizationOptions.isNormalizationEnabled(),
2667                  posterizationOptions.isForceBlackAndWhite(), 0, qRound(background_color.lightnessF() * 255))
2668       .getImage();
2669 }
2670 }  // namespace output
2671