1 /* -*- C++ -*-
2  *
3  *  This file is part of RawTherapee.
4  *
5  *  Copyright (c) 2018 Alberto Griggio <alberto.griggio@gmail.com>
6  *
7  *  RawTherapee is free software: you can redistribute it and/or modify
8  *  it under the terms of the GNU General Public License as published by
9  *  the Free Software Foundation, either version 3 of the License, or
10  *  (at your option) any later version.
11  *
12  *  RawTherapee is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *  GNU General Public License for more details.
16  *
17  *  You should have received a copy of the GNU General Public License
18  *  along with RawTherapee.  If not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /*
22  * Haze removal using the algorithm described in the paper:
23  *
24  * Single Image Haze Removal Using Dark Channel Prior
25  * by He, Sun and Tang
26  *
27  * using a guided filter for the "soft matting" of the transmission map
28  *
29  */
30 
31 #include "improcfun.h"
32 #include "guidedfilter.h"
33 #include "rt_math.h"
34 #include "rt_algo.h"
35 #include "rescale.h"
36 //#include "gauss.h"
37 #include "boxblur.h"
38 #include <iostream>
39 #include <queue>
40 
41 extern Options options;
42 
43 namespace rtengine {
44 
45 namespace {
46 
47 #if 0
48 #  define DEBUG_DUMP(arr)                                                 \
49     do {                                                                \
50         Imagefloat im(arr.width(), arr.height());                      \
51         const char *out = "/tmp/" #arr ".tif";                     \
52         for (int y = 0; y < im.getHeight(); ++y) {                      \
53             for (int x = 0; x < im.getWidth(); ++x) {                   \
54                 im.r(y, x) = im.g(y, x) = im.b(y, x) = arr[y][x] * 65535.f; \
55             }                                                           \
56         }                                                               \
57         im.saveTIFF(out, 16);                                           \
58     } while (false)
59 #else
60 #  define DEBUG_DUMP(arr)
61 #endif
62 
63 
normalize(Imagefloat * rgb,bool multithread)64 float normalize(Imagefloat *rgb, bool multithread)
65 {
66     float maxval = 0.f;
67     const int W = rgb->getWidth();
68     const int H = rgb->getHeight();
69 #ifdef _OPENMP
70 #   pragma omp parallel for reduction(max:maxval) if (multithread)
71 #endif
72     for (int y = 0; y < H; ++y) {
73         for (int x = 0; x < W; ++x) {
74             maxval = max(maxval, rgb->r(y, x), rgb->g(y, x), rgb->b(y, x));
75         }
76     }
77     maxval = max(maxval * 2.f, 65535.f);
78     rgb->multiply(1.f/maxval, multithread);
79     return maxval;
80 }
81 
82 
restore(Imagefloat * rgb,float maxval,bool multithread)83 void restore(Imagefloat *rgb, float maxval, bool multithread)
84 {
85     rgb->multiply(maxval, multithread);
86 }
87 
88 
get_dark_channel(const array2D<float> & R,const array2D<float> & G,const array2D<float> & B,array2D<float> & dst,int patchsize,const float ambient[3],bool clip,bool multithread)89 int get_dark_channel(const array2D<float> &R, const array2D<float> &G, const array2D<float> &B, array2D<float> &dst, int patchsize, const float ambient[3], bool clip, bool multithread)
90 {
91     const int W = R.width();
92     const int H = R.height();
93 
94 #ifdef _OPENMP
95     #pragma omp parallel for if (multithread)
96 #endif
97     for (int y = 0; y < H; y += patchsize) {
98         const int pH = min(y + patchsize, H);
99         for (int x = 0; x < W; x += patchsize) {
100             float val = RT_INFINITY_F;
101             const int pW = min(x + patchsize, W);
102             for (int yy = y; yy < pH; ++yy) {
103                 for (int xx = x; xx < pW; ++xx) {
104                     float r = R[yy][xx];
105                     float g = G[yy][xx];
106                     float b = B[yy][xx];
107                     if (ambient) {
108                         r /= ambient[0];
109                         g /= ambient[1];
110                         b /= ambient[2];
111                     }
112                     val = min(val, r, g, b);
113                 }
114             }
115             if (clip) {
116                 val = LIM01(val);
117             }
118             for (int yy = y; yy < pH; ++yy) {
119                 std::fill(dst[yy] + x, dst[yy] + pW, val);
120             }
121         }
122     }
123 
124     return (W / patchsize + ((W % patchsize) > 0)) * (H / patchsize + ((H % patchsize) > 0));
125 }
126 
127 
estimate_ambient_light(const array2D<float> & R,const array2D<float> & G,const array2D<float> & B,const array2D<float> & dark,int patchsize,int npatches,float ambient[3])128 float estimate_ambient_light(const array2D<float> &R, const array2D<float> &G, const array2D<float> &B, const array2D<float> &dark, int patchsize, int npatches, float ambient[3])
129 {
130     const int W = R.width();
131     const int H = R.height();
132 
133     const auto get_percentile =
134         [](std::priority_queue<float> &q, float prcnt) -> float
135         {
136             size_t n = LIM<size_t>(q.size() * prcnt, 1, q.size());
137             while (q.size() > n) {
138                 q.pop();
139             }
140             return q.top();
141         };
142 
143     const auto OOG =
144         [](float val, float high) -> bool
145         {
146             return (val < 0.f) || (val > high);
147         };
148 
149     float darklim = RT_INFINITY_F;
150     {
151         std::priority_queue<float> p;
152         for (int y = 0; y < H; y += patchsize) {
153             for (int x = 0; x < W; x += patchsize) {
154                 if (!OOG(dark[y][x], 1.f - 1e-5f)) {
155                     p.push(dark[y][x]);
156                 }
157             }
158         }
159         darklim = get_percentile(p, 0.95);
160     }
161 
162     std::vector<std::pair<int, int>> patches;
163     patches.reserve(npatches);
164 
165     for (int y = 0; y < H; y += patchsize) {
166         for (int x = 0; x < W; x += patchsize) {
167             if (dark[y][x] >= darklim && !OOG(dark[y][x], 1.f)) {
168                 patches.push_back(std::make_pair(x, y));
169             }
170         }
171     }
172 
173     if (options.rtSettings.verbose) {
174         std::cout << "dehaze: computing ambient light from " << patches.size()
175                   << " patches" << std::endl;
176     }
177 
178     float bright_lim = RT_INFINITY_F;
179     {
180         std::priority_queue<float> l;
181 
182         for (auto &p : patches) {
183             const int pW = min(p.first+patchsize, W);
184             const int pH = min(p.second+patchsize, H);
185 
186             for (int y = p.second; y < pH; ++y) {
187                 for (int x = p.first; x < pW; ++x) {
188                     l.push(R[y][x] + G[y][x] + B[y][x]);
189                 }
190             }
191         }
192 
193         bright_lim = get_percentile(l, 0.95);
194     }
195 
196     double rr = 0, gg = 0, bb = 0;
197     int n = 0;
198     for (auto &p : patches) {
199         const int pW = min(p.first+patchsize, W);
200         const int pH = min(p.second+patchsize, H);
201 
202         for (int y = p.second; y < pH; ++y) {
203             for (int x = p.first; x < pW; ++x) {
204                 float r = R[y][x];
205                 float g = G[y][x];
206                 float b = B[y][x];
207                 if (r + g + b >= bright_lim) {
208                     rr += r;
209                     gg += g;
210                     bb += b;
211                     ++n;
212                 }
213             }
214         }
215     }
216     n = std::max(n, 1);
217     ambient[0] = rr / n;
218     ambient[1] = gg / n;
219     ambient[2] = bb / n;
220 
221     // taken from darktable
222     return darklim > 0 ? -1.125f * std::log(darklim) : std::log(std::numeric_limits<float>::max()) / 2;
223 }
224 
225 
extract_channels(Imagefloat * img,array2D<float> & r,array2D<float> & g,array2D<float> & b,int radius,float epsilon,bool multithread)226 void extract_channels(Imagefloat *img, array2D<float> &r, array2D<float> &g, array2D<float> &b, int radius, float epsilon, bool multithread)
227 {
228     const int W = img->getWidth();
229     const int H = img->getHeight();
230 
231     array2D<float> imgR(W, H, img->r.ptrs, ARRAY2D_BYREFERENCE);
232     rtengine::guidedFilter(imgR, imgR, r, radius, epsilon, multithread);
233 
234     array2D<float> imgG(W, H, img->g.ptrs, ARRAY2D_BYREFERENCE);
235     rtengine::guidedFilter(imgG, imgG, g, radius, epsilon, multithread);
236 
237     array2D<float> imgB(W, H, img->b.ptrs, ARRAY2D_BYREFERENCE);
238     rtengine::guidedFilter(imgB, imgB, b, radius, epsilon, multithread);
239 }
240 
241 
subtract_black(Imagefloat * img,int percent,bool multithread)242 void subtract_black(Imagefloat *img, int percent, bool multithread)
243 {
244     const int W = img->getWidth();
245     const int H = img->getHeight();
246 
247     constexpr int sizecap = 200;
248     float r = float(W)/float(H);
249     int ww = r >= 1.f ? sizecap : float(sizecap) / r;
250     int hh = r >= 1.f ? float(sizecap) / r : sizecap;
251 
252     float black[3] = { RT_INFINITY_F, RT_INFINITY_F, RT_INFINITY_F };
253     {
254         array2D<float> rr(ww, hh, ARRAY2D_ALIGNED);
255         array2D<float> gg(ww, hh, ARRAY2D_ALIGNED);
256         array2D<float> bb(ww, hh, ARRAY2D_ALIGNED);
257         rescaleNearest(img->r.ptrs, W, H, static_cast<float **>(rr), ww, hh, multithread);
258         rescaleNearest(img->g.ptrs, W, H, static_cast<float **>(gg), ww, hh, multithread);
259         rescaleNearest(img->b.ptrs, W, H, static_cast<float **>(bb), ww, hh, multithread);
260 
261         int radius = std::max(std::max(ww, hh) / 20, 1);
262         boxblur(rr, rr, radius, ww, hh, multithread);
263         boxblur(gg, gg, radius, ww, hh, multithread);
264         boxblur(bb, bb, radius, ww, hh, multithread);
265 
266         for (int y = 0; y < hh; ++y) {
267             for (int x = 0; x < ww; ++x) {
268                 black[0] = std::min(black[0], rr[y][x]);
269                 black[1] = std::min(black[1], gg[y][x]);
270                 black[2] = std::min(black[2], bb[y][x]);
271             }
272         }
273     }
274 
275     const float scaling = float(percent) / 100.f;
276     for (int c = 0; c < 3; ++c) {
277         black[c] = std::max(0.f, black[c] * scaling);
278     }
279 
280     if (options.rtSettings.verbose) {
281         std::cout << "BLACK POINTS: " << black[0] << " " << black[1] << " " << black[2] << std::endl;
282     }
283 
284 #ifdef _OPENMP
285 #   pragma omp parallel for if (multithread)
286 #endif
287     for (int y = 0; y < H; ++y) {
288         for (int x = 0; x < W; ++x) {
289             img->r(y, x) = std::max(img->r(y, x) - black[0], 0.f);
290             img->g(y, x) = std::max(img->g(y, x) - black[1], 0.f);
291             img->b(y, x) = std::max(img->b(y, x) - black[2], 0.f);
292         }
293     }
294 }
295 
296 } // namespace
297 
298 
dehaze(Imagefloat * img)299 void ImProcFunctions::dehaze(Imagefloat *img)
300 {
301     PlanarWhateverData<float> *editWhatever = nullptr;
302     EditUniqueID editID = pipetteBuffer ? pipetteBuffer->getEditID() : EUID_None;
303 
304     if (editID == EUID_DehazeStrength && pipetteBuffer->getDataProvider()->getCurrSubscriber()->getPipetteBufferType() == BT_SINGLEPLANE_FLOAT) {
305         editWhatever = pipetteBuffer->getSinglePlaneBuffer();
306     }
307 
308     if (!params->dehaze.enabled) {
309         if (editWhatever) {
310             editWhatever->fill(0.f);
311         }
312         return;
313     }
314 
315     TMatrix ws = ICCStore::getInstance()->workingSpaceMatrix(params->icm.workingProfile);
316 
317     img->setMode(Imagefloat::Mode::RGB, multiThread);
318     const float maxchan = normalize(img, multiThread);
319 
320     const int W = img->getWidth();
321     const int H = img->getHeight();
322 
323     if (editWhatever) {
324 #ifdef _OPENMP
325 #       pragma omp parallel for if (multiThread)
326 #endif
327         for (int y = 0; y < editWhatever->getHeight(); ++y) {
328             int yy = y + offset_y;
329             for (int x = 0; x < editWhatever->getWidth(); ++x) {
330                 int xx = x + offset_x;
331                 float r = img->r(yy, xx), g = img->g(yy, xx), b = img->b(yy, xx);
332                 float Y = Color::rgbLuminance(r, g, b, ws) * maxchan;
333                 float s = Color::gamma2curve[Y];
334                 editWhatever->v(y, x) = LIM01(s / 65535.f);
335             }
336         }
337     }
338 
339     if (params->dehaze.blackpoint) {
340         subtract_black(img, params->dehaze.blackpoint, multiThread);
341     }
342 
343     // const float strength = LIM01(float(std::abs(params->dehaze.strength)) / 100.f * 0.9f);
344     // const bool add_haze = params->dehaze.strength < 0;
345 
346     // if (options.rtSettings.verbose) {
347     //     std::cout << "dehaze: strength = " << strength << std::endl;
348     // }
349 
350     array2D<float> dark(W, H, ARRAY2D_ALIGNED);
351 
352     int patchsize = max(int(5 / scale), 2);
353     float ambient[3];
354     array2D<float> &t_tilde = dark;
355     float max_t = 0.f;
356 
357     {
358         //array2D<float> R(W, H);
359         array2D<float> &R = dark; // R and dark can safely use the same buffer, which is faster and reduces memory allocations/deallocations
360         array2D<float> G(W, H, ARRAY2D_ALIGNED);
361         array2D<float> B(W, H, ARRAY2D_ALIGNED);
362         extract_channels(img, R, G, B, patchsize, 1e-1, multiThread);
363 
364         {
365             constexpr int sizecap = 200;
366             float r = float(W)/float(H);
367             int ww = r >= 1.f ? sizecap : float(sizecap) / r;
368             int hh = r >= 1.f ? float(sizecap) / r : sizecap;
369             array2D<float> RR(ww, hh, ARRAY2D_ALIGNED);
370             array2D<float> GG(ww, hh, ARRAY2D_ALIGNED);
371             array2D<float> BB(ww, hh, ARRAY2D_ALIGNED);
372             rescaleNearest(R, RR, multiThread);
373             rescaleNearest(G, GG, multiThread);
374             rescaleNearest(B, BB, multiThread);
375             array2D<float> D(ww, hh);
376 
377             patchsize = 2;
378             int npatches = get_dark_channel(RR, GG, BB, D, patchsize, nullptr, false, multiThread);
379             max_t = estimate_ambient_light(RR, GG, BB, D, patchsize, npatches, ambient);
380         }
381 
382         patchsize = max(max(W, H) / 600, 2);
383 
384         if (options.rtSettings.verbose) {
385             std::cout << "dehaze: ambient light is "
386                       << ambient[0] << ", " << ambient[1] << ", " << ambient[2]
387                       << std::endl;
388         }
389 
390         get_dark_channel(R, G, B, dark, patchsize, ambient, true, multiThread);
391     }
392 
393     // if (min(ambient[0], ambient[1], ambient[2]) < 0.01f) {
394     //     if (options.rtSettings.verbose) {
395     //         std::cout << "dehaze: no haze detected" << std::endl;
396     //     }
397     //     restore(img, maxchan, multiThread);
398     //     return; // probably no haze at all
399     // }
400 
401     DEBUG_DUMP(t_tilde);
402 
403     array2D<bool> add_haze(W, H);
404 
405     FlatCurve strength_curve(params->dehaze.strength, false);
406     strength_curve.setIdentityValue(0.5);
407     LUTf strength(65536);
408     for (int i = 0; i < 65536; ++i) {
409         strength[i] = (strength_curve.getVal(Color::gamma2curve[i] / 65535.f) - 0.5f) * 1.3f;
410     }
411 
412 #ifdef _OPENMP
413     #pragma omp parallel for if (multiThread)
414 #endif
415     for (int y = 0; y < H; ++y) {
416         for (int x = 0; x < W; ++x) {
417             float Y = Color::rgbLuminance(img->r(y, x), img->g(y, x), img->b(y, x), ws) * maxchan;
418             float s = strength[Y];
419             add_haze[y][x] = s < 0;
420             dark[y][x] = 1.f - std::abs(s) * dark[y][x];
421         }
422     }
423 
424     const int radius = patchsize * 4;
425     const float epsilon = 1e-5;
426     array2D<float> &t = t_tilde;
427 
428     {
429         array2D<float> guideB(W, H, img->b.ptrs, ARRAY2D_BYREFERENCE);
430         rtengine::guidedFilter(guideB, t_tilde, t, radius, epsilon, multiThread);
431     }
432 
433     DEBUG_DUMP(t);
434 
435     if (options.rtSettings.verbose) {
436         std::cout << "dehaze: max distance is " << max_t << std::endl;
437     }
438 
439     float depth = -float(params->dehaze.depth) / 100.f;
440     const float teps = 1e-6f;
441     const float t0 = max(teps, std::exp(depth * max_t));
442     const bool luminance = params->dehaze.luminance;
443     const float ambientY = Color::rgbLuminance(ambient[0], ambient[1], ambient[2], ws);
444 #ifdef _OPENMP
445     #pragma omp parallel for if (multiThread)
446 #endif
447     for (int y = 0; y < H; ++y) {
448         for (int x = 0; x < W; ++x) {
449             // ensure that the transmission is such that to avoid clipping...
450             float rgb[3] = { img->r(y, x), img->g(y, x), img->b(y, x) };
451             // ... t >= tl to avoid negative values
452             float tl = 1.f - min(rgb[0]/ambient[0], rgb[1]/ambient[1], rgb[2]/ambient[2]);
453             // // ... t >= tu to avoid values > 1
454             // float tu = t0 - teps;
455             // for (int c = 0; c < 3; ++c) {
456             //     if (ambient[c] < 1) {
457             //         tu = max(tu, (rgb[c] - ambient[c])/(1.f - ambient[c]));
458             //     }
459             // }
460             float &ir = img->r(y, x);
461             float &ig = img->g(y, x);
462             float &ib = img->b(y, x);
463 
464             float mt = max(t[y][x], t0, tl + teps);//, tu + teps);
465             if (params->dehaze.showDepthMap) {
466                 img->r(y, x) = img->g(y, x) = img->b(y, x) = LIM01(1.f - mt);
467             } else if (luminance) {
468                 float Y = Color::rgbLuminance(rgb[0], rgb[1], rgb[2], ws);
469                 float YY = (Y - ambientY) / mt + ambientY;
470                 if (Y > 1e-5f) {
471                     if (add_haze[y][x]) {
472                         YY = Y + Y - YY;
473                     }
474                     float f = YY / Y;
475                     ir = rgb[0] * f;
476                     ig = rgb[1] * f;
477                     ib = rgb[2] * f;
478                 }
479             } else {
480                 float r = (rgb[0] - ambient[0]) / mt + ambient[0];
481                 float g = (rgb[1] - ambient[1]) / mt + ambient[1];
482                 float b = (rgb[2] - ambient[2]) / mt + ambient[2];
483 
484                 if (add_haze[y][x]) {
485                     ir += (ir - r);
486                     ig += (ig - g);
487                     ib += (ib - b);
488                 } else {
489                     ir = r;
490                     ig = g;
491                     ib = b;
492                 }
493             }
494         }
495     }
496 
497     restore(img, maxchan, multiThread);
498 }
499 
500 
501 } // namespace rtengine
502