1 /*
2 * This file is part of RawTherapee.
3 *
4 * Copyright (c) 2004-2010 Gabor Horvath <hgabor@rawtherapee.com>
5 *
6 * RawTherapee is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 3 of the License, or
9 * (at your option) any later version.
10 *
11 * RawTherapee is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with RawTherapee. If not, see <https://www.gnu.org/licenses/>.
18 */
19 #include <cmath>
20 #include <cstdlib>
21 #include <cstring>
22
23 #include "gauss.h"
24
25 #include "boxblur.h"
26 #include "opthelper.h"
27 #include "rt_math.h"
28
29 namespace
30 {
31
compute7x7kernel(float sigma,float kernel[7][7])32 void compute7x7kernel(float sigma, float kernel[7][7]) {
33 const double temp = -2.f * rtengine::SQR(sigma);
34 float sum = 0.f;
35 for (int i = -3; i <= 3; ++i) {
36 for (int j = -3; j <= 3; ++j) {
37 if((rtengine::SQR(i) + rtengine::SQR(j)) <= rtengine::SQR(3.0 * 1.15)) {
38 kernel[i + 3][j + 3] = std::exp((rtengine::SQR(i) + rtengine::SQR(j)) / temp);
39 sum += kernel[i + 3][j + 3];
40 } else {
41 kernel[i + 3][j + 3] = 0.f;
42 }
43 }
44 }
45
46 for (int i = 0; i < 7; ++i) {
47 for (int j = 0; j < 7; ++j) {
48 kernel[i][j] /= sum;
49 }
50 }
51 }
52
compute5x5kernel(float sigma,float kernel[5][5])53 void compute5x5kernel(float sigma, float kernel[5][5]) {
54 const double temp = -2.f * rtengine::SQR(sigma);
55 float sum = 0.f;
56 for (int i = -2; i <= 2; ++i) {
57 for (int j = -2; j <= 2; ++j) {
58 if((rtengine::SQR(i) + rtengine::SQR(j)) <= rtengine::SQR(3.0 * 0.84)) {
59 kernel[i + 2][j + 2] = std::exp((rtengine::SQR(i) + rtengine::SQR(j)) / temp);
60 sum += kernel[i + 2][j + 2];
61 } else {
62 kernel[i + 2][j + 2] = 0.f;
63 }
64 }
65 }
66
67 for (int i = 0; i < 5; ++i) {
68 for (int j = 0; j < 5; ++j) {
69 kernel[i][j] /= sum;
70 }
71 }
72 }
73
calculateYvVFactors(const T sigma,T & b1,T & b2,T & b3,T & B,T M[3][3])74 template<class T> void calculateYvVFactors( const T sigma, T &b1, T &b2, T &b3, T &B, T M[3][3])
75 {
76 // coefficient calculation
77 T q;
78
79 if (sigma < 2.5) {
80 q = 3.97156 - 4.14554 * sqrt (1.0 - 0.26891 * sigma);
81 } else {
82 q = 0.98711 * sigma - 0.96330;
83 }
84
85 T b0 = 1.57825 + 2.44413 * q + 1.4281 * q * q + 0.422205 * q * q * q;
86 b1 = 2.44413 * q + 2.85619 * q * q + 1.26661 * q * q * q;
87 b2 = -1.4281 * q * q - 1.26661 * q * q * q;
88 b3 = 0.422205 * q * q * q;
89 B = 1.0 - (b1 + b2 + b3) / b0;
90
91 b1 /= b0;
92 b2 /= b0;
93 b3 /= b0;
94
95 // From: Bill Triggs, Michael Sdika: Boundary Conditions for Young-van Vliet Recursive Filtering
96 M[0][0] = -b3 * b1 + 1.0 - b3 * b3 - b2;
97 M[0][1] = (b3 + b1) * (b2 + b3 * b1);
98 M[0][2] = b3 * (b1 + b3 * b2);
99 M[1][0] = b1 + b3 * b2;
100 M[1][1] = -(b2 - 1.0) * (b2 + b3 * b1);
101 M[1][2] = -(b3 * b1 + b3 * b3 + b2 - 1.0) * b3;
102 M[2][0] = b3 * b1 + b2 + b1 * b1 - b2 * b2;
103 M[2][1] = b1 * b2 + b3 * b2 * b2 - b1 * b3 * b3 - b3 * b3 * b3 - b3 * b2 + b3;
104 M[2][2] = b3 * (b1 + b3 * b2);
105
106 }
107
108 // classical filtering if the support window is small and src != dst
gauss3x3(T ** RESTRICT src,T ** RESTRICT dst,const int W,const int H,const T c0,const T c1,const T c2,const T b0,const T b1)109 template<class T> void gauss3x3 (T** RESTRICT src, T** RESTRICT dst, const int W, const int H, const T c0, const T c1, const T c2, const T b0, const T b1)
110 {
111
112 // first row
113 #ifdef _OPENMP
114 #pragma omp single nowait
115 #endif
116 {
117 dst[0][0] = src[0][0];
118
119 for (int j = 1; j < W - 1; j++)
120 {
121 dst[0][j] = b1 * (src[0][j - 1] + src[0][j + 1]) + b0 * src[0][j];
122 }
123
124 dst[0][W - 1] = src[0][W - 1];
125 }
126
127 #ifdef _OPENMP
128 #pragma omp for nowait
129 #endif
130
131 for (int i = 1; i < H - 1; i++) {
132 dst[i][0] = b1 * (src[i - 1][0] + src[i + 1][0]) + b0 * src[i][0];
133
134 for (int j = 1; j < W - 1; j++) {
135 dst[i][j] = c2 * (src[i - 1][j - 1] + src[i - 1][j + 1] + src[i + 1][j - 1] + src[i + 1][j + 1]) + c1 * (src[i - 1][j] + src[i][j - 1] + src[i][j + 1] + src[i + 1][j]) + c0 * src[i][j];
136 }
137
138 dst[i][W - 1] = b1 * (src[i - 1][W - 1] + src[i + 1][W - 1]) + b0 * src[i][W - 1];
139 }
140
141 // last row
142 #ifdef _OPENMP
143 #pragma omp single
144 #endif
145 {
146 dst[H - 1][0] = src[H - 1][0];
147
148 for (int j = 1; j < W - 1; j++) {
149 dst[H - 1][j] = b1 * (src[H - 1][j - 1] + src[H - 1][j + 1]) + b0 * src[H - 1][j];
150 }
151
152 dst[H - 1][W - 1] = src[H - 1][W - 1];
153 }
154 }
155
156 // classical filtering if the support window is small and src != dst
gauss3x3mult(T ** RESTRICT src,T ** RESTRICT dst,const int W,const int H,const T c0,const T c1,const T c2,const T b0,const T b1)157 template<class T> void gauss3x3mult (T** RESTRICT src, T** RESTRICT dst, const int W, const int H, const T c0, const T c1, const T c2, const T b0, const T b1)
158 {
159
160 // first row
161 #ifdef _OPENMP
162 #pragma omp single nowait
163 #endif
164 {
165 dst[0][0] *= src[0][0];
166
167 for (int j = 1; j < W - 1; j++)
168 {
169 dst[0][j] *= b1 * (src[0][j - 1] + src[0][j + 1]) + b0 * src[0][j];
170 }
171
172 dst[0][W - 1] *= src[0][W - 1];
173 }
174
175 #ifdef _OPENMP
176 #pragma omp for nowait
177 #endif
178
179 for (int i = 1; i < H - 1; i++) {
180 dst[i][0] *= b1 * (src[i - 1][0] + src[i + 1][0]) + b0 * src[i][0];
181
182 for (int j = 1; j < W - 1; j++) {
183 dst[i][j] *= c2 * (src[i - 1][j - 1] + src[i - 1][j + 1] + src[i + 1][j - 1] + src[i + 1][j + 1]) + c1 * (src[i - 1][j] + src[i][j - 1] + src[i][j + 1] + src[i + 1][j]) + c0 * src[i][j];
184 }
185
186 dst[i][W - 1] *= b1 * (src[i - 1][W - 1] + src[i + 1][W - 1]) + b0 * src[i][W - 1];
187 }
188
189 // last row
190 #ifdef _OPENMP
191 #pragma omp single
192 #endif
193 {
194 dst[H - 1][0] *= src[H - 1][0];
195
196 for (int j = 1; j < W - 1; j++) {
197 dst[H - 1][j] *= b1 * (src[H - 1][j - 1] + src[H - 1][j + 1]) + b0 * src[H - 1][j];
198 }
199
200 dst[H - 1][W - 1] *= src[H - 1][W - 1];
201 }
202 }
203
gauss3x3div(T ** RESTRICT src,T ** RESTRICT dst,T ** RESTRICT divBuffer,const int W,const int H,const T c0,const T c1,const T c2,const T b0,const T b1)204 template<class T> void gauss3x3div (T** RESTRICT src, T** RESTRICT dst, T** RESTRICT divBuffer, const int W, const int H, const T c0, const T c1, const T c2, const T b0, const T b1)
205 {
206
207 // first row
208 #ifdef _OPENMP
209 #pragma omp single nowait
210 #endif
211 {
212 dst[0][0] = rtengine::max(divBuffer[0][0] / (src[0][0] > 0.f ? src[0][0] : 1.f), 0.f);
213
214 for (int j = 1; j < W - 1; j++)
215 {
216 float tmp = (b1 * (src[0][j - 1] + src[0][j + 1]) + b0 * src[0][j]);
217 dst[0][j] = rtengine::max(divBuffer[0][j] / (tmp > 0.f ? tmp : 1.f), 0.f);
218 }
219
220 dst[0][W - 1] = rtengine::max(divBuffer[0][W - 1] / (src[0][W - 1] > 0.f ? src[0][W - 1] : 1.f), 0.f);
221 }
222
223 #ifdef _OPENMP
224 #pragma omp for nowait
225 #endif
226
227 for (int i = 1; i < H - 1; i++) {
228 float tmp = (b1 * (src[i - 1][0] + src[i + 1][0]) + b0 * src[i][0]);
229 dst[i][0] = rtengine::max(divBuffer[i][0] / (tmp > 0.f ? tmp : 1.f), 0.f);
230
231 for (int j = 1; j < W - 1; j++) {
232 tmp = (c2 * (src[i - 1][j - 1] + src[i - 1][j + 1] + src[i + 1][j - 1] + src[i + 1][j + 1]) + c1 * (src[i - 1][j] + src[i][j - 1] + src[i][j + 1] + src[i + 1][j]) + c0 * src[i][j]);
233 dst[i][j] = rtengine::max(divBuffer[i][j] / (tmp > 0.f ? tmp : 1.f), 0.f);
234 }
235
236 tmp = (b1 * (src[i - 1][W - 1] + src[i + 1][W - 1]) + b0 * src[i][W - 1]);
237 dst[i][W - 1] = rtengine::max(divBuffer[i][W - 1] / (tmp > 0.f ? tmp : 1.f), 0.f);
238 }
239
240 // last row
241 #ifdef _OPENMP
242 #pragma omp single
243 #endif
244 {
245 dst[H - 1][0] = rtengine::max(divBuffer[H - 1][0] / (src[H - 1][0] > 0.f ? src[H - 1][0] : 1.f), 0.f);
246
247 for (int j = 1; j < W - 1; j++) {
248 float tmp = (b1 * (src[H - 1][j - 1] + src[H - 1][j + 1]) + b0 * src[H - 1][j]);
249 dst[H - 1][j] = rtengine::max(divBuffer[H - 1][j] / (tmp > 0.f ? tmp : 1.f), 0.f);
250 }
251
252 dst[H - 1][W - 1] = rtengine::max(divBuffer[H - 1][W - 1] / (src[H - 1][W - 1] > 0.f ? src[H - 1][W - 1] : 1.f), 0.f);
253 }
254 }
255
gauss7x7div(T ** RESTRICT src,T ** RESTRICT dst,T ** RESTRICT divBuffer,const int W,const int H,float sigma)256 template<class T> void gauss7x7div (T** RESTRICT src, T** RESTRICT dst, T** RESTRICT divBuffer, const int W, const int H, float sigma)
257 {
258
259 float kernel[7][7];
260 compute7x7kernel(sigma, kernel);
261
262 const float c31 = kernel[0][2];
263 const float c30 = kernel[0][3];
264 const float c22 = kernel[1][1];
265 const float c21 = kernel[1][2];
266 const float c20 = kernel[1][3];
267 const float c11 = kernel[2][2];
268 const float c10 = kernel[2][3];
269 const float c00 = kernel[3][3];
270
271 #ifdef _OPENMP
272 #pragma omp for schedule(dynamic, 16) nowait
273 #endif
274
275 for (int i = 3; i < H - 3; ++i) {
276 dst[i][0] = dst[i][1] = dst[i][2] = 1.f;
277 // I tried hand written SSE code but gcc vectorizes better
278 for (int j = 3; j < W - 3; ++j) {
279 const float val = c31 * (src[i - 3][j - 1] + src[i - 3][j + 1] + src[i - 1][j - 3] + src[i - 1][j + 3] + src[i + 1][j - 3] + src[i + 1][j + 3] + src[i + 3][j - 1] + src[i + 3][j + 1]) +
280 c30 * (src[i - 3][j] + src[i][j - 3] + src[i][j + 3] + src[i + 3][j]) +
281 c22 * (src[i - 2][j - 2] + src[i - 2][j + 2] + src[i + 2][j - 2] + src[i + 2][j + 2]) +
282 c21 * (src[i - 2][j - 1] + src[i - 2][j + 1] * c21 + src[i - 1][j - 2] + src[i - 1][j + 2] + src[i + 1][j - 2] + src[i + 1][j + 2] + src[i + 2][j - 1] + src[i + 2][j + 1]) +
283 c20 * (src[i - 2][j] + src[i][j - 2] + src[i][j + 2] + src[i + 2][j]) +
284 c11 * (src[i - 1][j - 1] + src[i - 1][j + 1] + src[i + 1][j - 1] + src[i + 1][j + 1]) +
285 c10 * (src[i - 1][j] + src[i][j - 1] + src[i][j + 1] + src[i + 1][j]) +
286 c00 * src[i][j];
287
288 dst[i][j] = divBuffer[i][j] / std::max(val, 0.00001f);
289 }
290 dst[i][W - 3] = dst[i][W - 2] = dst[i][W - 1] = 1.f;
291 }
292
293 // first and last rows
294 #ifdef _OPENMP
295 #pragma omp single
296 #endif
297 {
298 for (int i = 0; i < 3; ++i) {
299 for (int j = 0; j < W; ++j) {
300 dst[i][j] = 1.f;
301 }
302 }
303 for (int i = H - 3 ; i < H; ++i) {
304 for (int j = 0; j < W; ++j) {
305 dst[i][j] = 1.f;
306 }
307 }
308 }
309 }
310
gauss5x5div(T ** RESTRICT src,T ** RESTRICT dst,T ** RESTRICT divBuffer,const int W,const int H,float sigma)311 template<class T> void gauss5x5div (T** RESTRICT src, T** RESTRICT dst, T** RESTRICT divBuffer, const int W, const int H, float sigma)
312 {
313
314 float kernel[5][5];
315 compute5x5kernel(sigma, kernel);
316
317 const float c21 = kernel[0][1];
318 const float c20 = kernel[0][2];
319 const float c11 = kernel[1][1];
320 const float c10 = kernel[1][2];
321 const float c00 = kernel[2][2];
322
323 #ifdef _OPENMP
324 #pragma omp for schedule(dynamic, 16) nowait
325 #endif
326
327 for (int i = 2; i < H - 2; ++i) {
328 dst[i][0] = dst[i][1] = 1.f;
329 // I tried hand written SSE code but gcc vectorizes better
330 for (int j = 2; j < W - 2; ++j) {
331 const float val = c21 * (src[i - 2][j - 1] + src[i - 2][j + 1] + src[i - 1][j - 2] + src[i - 1][j + 2] + src[i + 1][j - 2] + src[i + 1][j + 2] + src[i + 2][j - 1] + src[i + 2][j + 1]) +
332 c20 * (src[i - 2][j] + src[i][j - 2] + src[i][j + 2] + src[i + 2][j]) +
333 c11 * (src[i - 1][j - 1] + src[i - 1][j + 1] + src[i + 1][j - 1] + src[i + 1][j + 1]) +
334 c10 * (src[i - 1][j] + src[i][j - 1] + src[i][j + 1] + src[i + 1][j]) +
335 c00 * src[i][j];
336
337 dst[i][j] = divBuffer[i][j] / std::max(val, 0.00001f);
338 }
339 dst[i][W - 2] = dst[i][W - 1] = 1.f;
340 }
341
342 // first and last rows
343 #ifdef _OPENMP
344 #pragma omp single
345 #endif
346 {
347 for (int i = 0; i < 2; ++i) {
348 for (int j = 0; j < W; ++j) {
349 dst[i][j] = 1.f;
350 }
351 }
352 for (int i = H - 2 ; i < H; ++i) {
353 for (int j = 0; j < W; ++j) {
354 dst[i][j] = 1.f;
355 }
356 }
357 }
358 }
359
gauss7x7mult(T ** RESTRICT src,T ** RESTRICT dst,const int W,const int H,float sigma)360 template<class T> void gauss7x7mult (T** RESTRICT src, T** RESTRICT dst, const int W, const int H, float sigma)
361 {
362
363 float kernel[7][7];
364 compute7x7kernel(sigma, kernel);
365 const float c31 = kernel[0][2];
366 const float c30 = kernel[0][3];
367 const float c22 = kernel[1][1];
368 const float c21 = kernel[1][2];
369 const float c20 = kernel[1][3];
370 const float c11 = kernel[2][2];
371 const float c10 = kernel[2][3];
372 const float c00 = kernel[3][3];
373
374 #ifdef _OPENMP
375 #pragma omp for schedule(dynamic, 16)
376 #endif
377
378 for (int i = 3; i < H - 3; ++i) {
379 // I tried hand written SSE code but gcc vectorizes better
380 for (int j = 3; j < W - 3; ++j) {
381 const float val = c31 * (src[i - 3][j - 1] + src[i - 3][j + 1] + src[i - 1][j - 3] + src[i - 1][j + 3] + src[i + 1][j - 3] + src[i + 1][j + 3] + src[i + 3][j - 1] + src[i + 3][j + 1]) +
382 c30 * (src[i - 3][j] + src[i][j - 3] + src[i][j + 3] + src[i + 3][j]) +
383 c22 * (src[i - 2][j - 2] + src[i - 2][j + 2] + src[i + 2][j - 2] + src[i + 2][j + 2]) +
384 c21 * (src[i - 2][j - 1] + src[i - 2][j + 1] * c21 + src[i - 1][j - 2] + src[i - 1][j + 2] + src[i + 1][j - 2] + src[i + 1][j + 2] + src[i + 2][j - 1] + src[i + 2][j + 1]) +
385 c20 * (src[i - 2][j] + src[i][j - 2] + src[i][j + 2] + src[i + 2][j]) +
386 c11 * (src[i - 1][j - 1] + src[i - 1][j + 1] + src[i + 1][j - 1] + src[i + 1][j + 1]) +
387 c10 * (src[i - 1][j] + src[i][j - 1] + src[i][j + 1] + src[i + 1][j]) +
388 c00 * src[i][j];
389
390 dst[i][j] *= val;
391 }
392 }
393 }
394
gauss5x5mult(T ** RESTRICT src,T ** RESTRICT dst,const int W,const int H,float sigma)395 template<class T> void gauss5x5mult (T** RESTRICT src, T** RESTRICT dst, const int W, const int H, float sigma)
396 {
397
398 float kernel[5][5];
399 compute5x5kernel(sigma, kernel);
400
401 const float c21 = kernel[0][1];
402 const float c20 = kernel[0][2];
403 const float c11 = kernel[1][1];
404 const float c10 = kernel[1][2];
405 const float c00 = kernel[2][2];
406
407 #ifdef _OPENMP
408 #pragma omp for schedule(dynamic, 16)
409 #endif
410
411 for (int i = 2; i < H - 2; ++i) {
412 // I tried hand written SSE code but gcc vectorizes better
413 for (int j = 2; j < W - 2; ++j) {
414 const float val = c21 * (src[i - 2][j - 1] + src[i - 2][j + 1] + src[i - 1][j - 2] + src[i - 1][j + 2] + src[i + 1][j - 2] + src[i + 1][j + 2] + src[i + 2][j - 1] + src[i + 2][j + 1]) +
415 c20 * (src[i - 2][j] + src[i][j - 2] + src[i][j + 2] + src[i + 2][j]) +
416 c11 * (src[i - 1][j - 1] + src[i - 1][j + 1] + src[i + 1][j - 1] + src[i + 1][j + 1]) +
417 c10 * (src[i - 1][j] + src[i][j - 1] + src[i][j + 1] + src[i + 1][j]) +
418 c00 * src[i][j];
419
420 dst[i][j] *= val;
421 }
422 }
423 }
424
425 // use separated filter if the support window is small and src == dst
gaussHorizontal3(T ** src,T ** dst,int W,int H,const float c0,const float c1)426 template<class T> void gaussHorizontal3 (T** src, T** dst, int W, int H, const float c0, const float c1)
427 {
428 T temp[W] ALIGNED16;
429 #ifdef _OPENMP
430 #pragma omp for
431 #endif
432
433 for (int i = 0; i < H; i++) {
434 for (int j = 1; j < W - 1; j++) {
435 temp[j] = (T)(c1 * (src[i][j - 1] + src[i][j + 1]) + c0 * src[i][j]);
436 }
437
438 dst[i][0] = src[i][0];
439 memcpy (dst[i] + 1, temp + 1, (W - 2)*sizeof(T));
440
441 dst[i][W - 1] = src[i][W - 1];
442 }
443 }
444
445 #ifdef __SSE2__
gaussVertical3(T ** src,T ** dst,int W,int H,const float c0,const float c1)446 template<class T> void gaussVertical3 (T** src, T** dst, int W, int H, const float c0, const float c1)
447 {
448 vfloat Tv = F2V(0.f), Tm1v, Tp1v;
449 vfloat Tv1 = F2V(0.f), Tm1v1, Tp1v1;
450 vfloat c0v, c1v;
451 c0v = F2V(c0);
452 c1v = F2V(c1);
453
454 #ifdef _OPENMP
455 #pragma omp for nowait
456 #endif
457
458 // process 8 columns per iteration for better usage of cpu cache
459 for (int i = 0; i < W - 7; i += 8) {
460 Tm1v = LVFU( src[0][i] );
461 Tm1v1 = LVFU( src[0][i + 4] );
462 STVFU( dst[0][i], Tm1v);
463 STVFU( dst[0][i + 4], Tm1v1);
464
465 if (H > 1) {
466 Tv = LVFU( src[1][i]);
467 Tv1 = LVFU( src[1][i + 4]);
468 }
469
470 for (int j = 1; j < H - 1; j++) {
471 Tp1v = LVFU( src[j + 1][i]);
472 Tp1v1 = LVFU( src[j + 1][i + 4]);
473 STVFU( dst[j][i], c1v * (Tp1v + Tm1v) + Tv * c0v);
474 STVFU( dst[j][i + 4], c1v * (Tp1v1 + Tm1v1) + Tv1 * c0v);
475 Tm1v = Tv;
476 Tm1v1 = Tv1;
477 Tv = Tp1v;
478 Tv1 = Tp1v1;
479 }
480
481 STVFU( dst[H - 1][i], LVFU( src[H - 1][i]));
482 STVFU( dst[H - 1][i + 4], LVFU( src[H - 1][i + 4]));
483 }
484
485 // Borders are done without SSE
486 float temp[H] ALIGNED16;
487 #ifdef _OPENMP
488 #pragma omp single
489 #endif
490
491 for (int i = W - (W % 8); i < W; i++) {
492 for (int j = 1; j < H - 1; j++) {
493 temp[j] = c1 * (src[j - 1][i] + src[j + 1][i]) + c0 * src[j][i];
494 }
495
496 dst[0][i] = src[0][i];
497
498 for (int j = 1; j < H - 1; j++) {
499 dst[j][i] = temp[j];
500 }
501
502 dst[H - 1][i] = src[H - 1][i];
503 }
504 }
505 #else
gaussVertical3(T ** src,T ** dst,int W,int H,const float c0,const float c1)506 template<class T> void gaussVertical3 (T** src, T** dst, int W, int H, const float c0, const float c1)
507 {
508 T temp[H] ALIGNED16;
509 #ifdef _OPENMP
510 #pragma omp for
511 #endif
512
513 for (int i = 0; i < W; i++) {
514 for (int j = 1; j < H - 1; j++) {
515 temp[j] = (T)(c1 * (src[j - 1][i] + src[j + 1][i]) + c0 * src[j][i]);
516 }
517
518 dst[0][i] = src[0][i];
519
520 for (int j = 1; j < H - 1; j++) {
521 dst[j][i] = temp[j];
522 }
523
524 dst[H - 1][i] = src[H - 1][i];
525 }
526 }
527 #endif
528
529 #ifdef __SSE2__
530 // fast gaussian approximation if the support window is large
gaussHorizontalSse(T ** src,T ** dst,const int W,const int H,const float sigma)531 template<class T> void gaussHorizontalSse (T** src, T** dst, const int W, const int H, const float sigma)
532 {
533 double b1, b2, b3, B, M[3][3];
534 calculateYvVFactors<double>(sigma, b1, b2, b3, B, M);
535
536 for (int i = 0; i < 3; i++)
537 for (int j = 0; j < 3; j++) {
538 M[i][j] *= (1.0 + b2 + (b1 - b3) * b3);
539 M[i][j] /= (1.0 + b1 - b2 + b3) * (1.0 - b1 - b2 - b3);
540 }
541
542 vfloat Rv;
543 vfloat Tv, Tm2v, Tm3v;
544 vfloat Bv, b1v, b2v, b3v;
545 vfloat temp2W, temp2Wp1;
546 float tmp[W][4] ALIGNED16;
547 Bv = F2V(B);
548 b1v = F2V(b1);
549 b2v = F2V(b2);
550 b3v = F2V(b3);
551
552 #ifdef _OPENMP
553 #pragma omp for nowait
554 #endif
555
556 for (int i = 0; i < H - 3; i += 4) {
557 Tv = _mm_set_ps(src[i][0], src[i + 1][0], src[i + 2][0], src[i + 3][0]);
558 Tm3v = Tv * (Bv + b1v + b2v + b3v);
559 STVF( tmp[0][0], Tm3v );
560
561 Tm2v = _mm_set_ps(src[i][1], src[i + 1][1], src[i + 2][1], src[i + 3][1]) * Bv + Tm3v * b1v + Tv * (b2v + b3v);
562 STVF( tmp[1][0], Tm2v );
563
564 Rv = _mm_set_ps(src[i][2], src[i + 1][2], src[i + 2][2], src[i + 3][2]) * Bv + Tm2v * b1v + Tm3v * b2v + Tv * b3v;
565 STVF( tmp[2][0], Rv );
566
567 for (int j = 3; j < W; j++) {
568 Tv = Rv;
569 Rv = _mm_set_ps(src[i][j], src[i + 1][j], src[i + 2][j], src[i + 3][j]) * Bv + Tv * b1v + Tm2v * b2v + Tm3v * b3v;
570 STVF( tmp[j][0], Rv );
571 Tm3v = Tm2v;
572 Tm2v = Tv;
573 }
574
575 Tv = _mm_set_ps(src[i][W - 1], src[i + 1][W - 1], src[i + 2][W - 1], src[i + 3][W - 1]);
576
577 temp2Wp1 = Tv + F2V(M[2][0]) * (Rv - Tv) + F2V(M[2][1]) * ( Tm2v - Tv ) + F2V(M[2][2]) * (Tm3v - Tv);
578 temp2W = Tv + F2V(M[1][0]) * (Rv - Tv) + F2V(M[1][1]) * (Tm2v - Tv) + F2V(M[1][2]) * (Tm3v - Tv);
579
580 Rv = Tv + F2V(M[0][0]) * (Rv - Tv) + F2V(M[0][1]) * (Tm2v - Tv) + F2V(M[0][2]) * (Tm3v - Tv);
581 STVF(tmp[W - 1][0], Rv);
582
583 Tm2v = Bv * Tm2v + b1v * Rv + b2v * temp2W + b3v * temp2Wp1;
584 STVF(tmp[W - 2][0], Tm2v);
585
586 Tm3v = Bv * Tm3v + b1v * Tm2v + b2v * Rv + b3v * temp2W;
587 STVF(tmp[W - 3][0], Tm3v);
588
589 Tv = Rv;
590 Rv = Tm3v;
591 Tm3v = Tv;
592
593 for (int j = W - 4; j >= 0; j--) {
594 Tv = Rv;
595 Rv = LVF(tmp[j][0]) * Bv + Tv * b1v + Tm2v * b2v + Tm3v * b3v;
596 STVF(tmp[j][0], Rv);
597 Tm3v = Tm2v;
598 Tm2v = Tv;
599 }
600
601 for (int j = 0; j < W; j++) {
602 dst[i + 3][j] = tmp[j][0];
603 dst[i + 2][j] = tmp[j][1];
604 dst[i + 1][j] = tmp[j][2];
605 dst[i + 0][j] = tmp[j][3];
606 }
607
608
609 }
610
611 // Borders are done without SSE
612 #ifdef _OPENMP
613 #pragma omp single
614 #endif
615
616 for (int i = H - (H % 4); i < H; i++) {
617 tmp[0][0] = src[i][0] * (B + b1 + b2 + b3);
618 tmp[1][0] = B * src[i][1] + b1 * tmp[0][0] + src[i][0] * (b2 + b3);
619 tmp[2][0] = B * src[i][2] + b1 * tmp[1][0] + b2 * tmp[0][0] + b3 * src[i][0];
620
621 for (int j = 3; j < W; j++) {
622 tmp[j][0] = B * src[i][j] + b1 * tmp[j - 1][0] + b2 * tmp[j - 2][0] + b3 * tmp[j - 3][0];
623 }
624
625 float temp2Wm1 = src[i][W - 1] + M[0][0] * (tmp[W - 1][0] - src[i][W - 1]) + M[0][1] * (tmp[W - 2][0] - src[i][W - 1]) + M[0][2] * (tmp[W - 3][0] - src[i][W - 1]);
626 float temp2W = src[i][W - 1] + M[1][0] * (tmp[W - 1][0] - src[i][W - 1]) + M[1][1] * (tmp[W - 2][0] - src[i][W - 1]) + M[1][2] * (tmp[W - 3][0] - src[i][W - 1]);
627 float temp2Wp1 = src[i][W - 1] + M[2][0] * (tmp[W - 1][0] - src[i][W - 1]) + M[2][1] * (tmp[W - 2][0] - src[i][W - 1]) + M[2][2] * (tmp[W - 3][0] - src[i][W - 1]);
628
629 tmp[W - 1][0] = temp2Wm1;
630 tmp[W - 2][0] = B * tmp[W - 2][0] + b1 * tmp[W - 1][0] + b2 * temp2W + b3 * temp2Wp1;
631 tmp[W - 3][0] = B * tmp[W - 3][0] + b1 * tmp[W - 2][0] + b2 * tmp[W - 1][0] + b3 * temp2W;
632
633 for (int j = W - 4; j >= 0; j--) {
634 tmp[j][0] = B * tmp[j][0] + b1 * tmp[j + 1][0] + b2 * tmp[j + 2][0] + b3 * tmp[j + 3][0];
635 }
636
637 for (int j = 0; j < W; j++) {
638 dst[i][j] = tmp[j][0];
639 }
640 }
641 }
642 #endif
643
644 // fast gaussian approximation if the support window is large
gaussHorizontal(T ** src,T ** dst,const int W,const int H,const double sigma)645 template<class T> void gaussHorizontal (T** src, T** dst, const int W, const int H, const double sigma)
646 {
647 double b1, b2, b3, B, M[3][3];
648 calculateYvVFactors<double>(sigma, b1, b2, b3, B, M);
649
650 for (int i = 0; i < 3; i++)
651 for (int j = 0; j < 3; j++) {
652 M[i][j] /= (1.0 + b1 - b2 + b3) * (1.0 + b2 + (b1 - b3) * b3);
653 }
654
655 double temp2[W] ALIGNED16;
656
657 #ifdef _OPENMP
658 #pragma omp for
659 #endif
660
661 for (int i = 0; i < H; i++) {
662
663 temp2[0] = B * src[i][0] + b1 * src[i][0] + b2 * src[i][0] + b3 * src[i][0];
664 temp2[1] = B * src[i][1] + b1 * temp2[0] + b2 * src[i][0] + b3 * src[i][0];
665 temp2[2] = B * src[i][2] + b1 * temp2[1] + b2 * temp2[0] + b3 * src[i][0];
666
667 for (int j = 3; j < W; j++) {
668 temp2[j] = B * src[i][j] + b1 * temp2[j - 1] + b2 * temp2[j - 2] + b3 * temp2[j - 3];
669 }
670
671 double temp2Wm1 = src[i][W - 1] + M[0][0] * (temp2[W - 1] - src[i][W - 1]) + M[0][1] * (temp2[W - 2] - src[i][W - 1]) + M[0][2] * (temp2[W - 3] - src[i][W - 1]);
672 double temp2W = src[i][W - 1] + M[1][0] * (temp2[W - 1] - src[i][W - 1]) + M[1][1] * (temp2[W - 2] - src[i][W - 1]) + M[1][2] * (temp2[W - 3] - src[i][W - 1]);
673 double temp2Wp1 = src[i][W - 1] + M[2][0] * (temp2[W - 1] - src[i][W - 1]) + M[2][1] * (temp2[W - 2] - src[i][W - 1]) + M[2][2] * (temp2[W - 3] - src[i][W - 1]);
674
675 temp2[W - 1] = temp2Wm1;
676 temp2[W - 2] = B * temp2[W - 2] + b1 * temp2[W - 1] + b2 * temp2W + b3 * temp2Wp1;
677 temp2[W - 3] = B * temp2[W - 3] + b1 * temp2[W - 2] + b2 * temp2[W - 1] + b3 * temp2W;
678
679 for (int j = W - 4; j >= 0; j--) {
680 temp2[j] = B * temp2[j] + b1 * temp2[j + 1] + b2 * temp2[j + 2] + b3 * temp2[j + 3];
681 }
682
683 for (int j = 0; j < W; j++) {
684 dst[i][j] = (T)temp2[j];
685 }
686
687 }
688 }
689
690 #ifdef __SSE2__
gaussVerticalSse(T ** src,T ** dst,const int W,const int H,const float sigma)691 template<class T> void gaussVerticalSse (T** src, T** dst, const int W, const int H, const float sigma)
692 {
693 double b1, b2, b3, B, M[3][3];
694 calculateYvVFactors<double>(sigma, b1, b2, b3, B, M);
695
696 for (int i = 0; i < 3; i++)
697 for (int j = 0; j < 3; j++) {
698 M[i][j] *= (1.0 + b2 + (b1 - b3) * b3);
699 M[i][j] /= (1.0 + b1 - b2 + b3) * (1.0 - b1 - b2 - b3);
700 }
701
702 float tmp[H][8] ALIGNED16;
703 vfloat Rv;
704 vfloat Tv, Tm2v, Tm3v;
705 vfloat Rv1;
706 vfloat Tv1, Tm2v1, Tm3v1;
707 vfloat Bv, b1v, b2v, b3v;
708 vfloat temp2W, temp2Wp1;
709 vfloat temp2W1, temp2Wp11;
710 Bv = F2V(B);
711 b1v = F2V(b1);
712 b2v = F2V(b2);
713 b3v = F2V(b3);
714
715 #ifdef _OPENMP
716 #pragma omp for nowait
717 #endif
718
719 // process 8 columns per iteration for better usage of cpu cache
720 for (int i = 0; i < W - 7; i += 8) {
721 Tv = LVFU( src[0][i]);
722 Tv1 = LVFU( src[0][i + 4]);
723 Rv = Tv * (Bv + b1v + b2v + b3v);
724 Rv1 = Tv1 * (Bv + b1v + b2v + b3v);
725 Tm3v = Rv;
726 Tm3v1 = Rv1;
727 STVF( tmp[0][0], Rv );
728 STVF( tmp[0][4], Rv1 );
729
730 Rv = LVFU(src[1][i]) * Bv + Rv * b1v + Tv * (b2v + b3v);
731 Rv1 = LVFU(src[1][i + 4]) * Bv + Rv1 * b1v + Tv1 * (b2v + b3v);
732 Tm2v = Rv;
733 Tm2v1 = Rv1;
734 STVF( tmp[1][0], Rv );
735 STVF( tmp[1][4], Rv1 );
736
737 Rv = LVFU(src[2][i]) * Bv + Rv * b1v + Tm3v * b2v + Tv * b3v;
738 Rv1 = LVFU(src[2][i + 4]) * Bv + Rv1 * b1v + Tm3v1 * b2v + Tv1 * b3v;
739 STVF( tmp[2][0], Rv );
740 STVF( tmp[2][4], Rv1 );
741
742 for (int j = 3; j < H; j++) {
743 Tv = Rv;
744 Tv1 = Rv1;
745 Rv = LVFU(src[j][i]) * Bv + Tv * b1v + Tm2v * b2v + Tm3v * b3v;
746 Rv1 = LVFU(src[j][i + 4]) * Bv + Tv1 * b1v + Tm2v1 * b2v + Tm3v1 * b3v;
747 STVF( tmp[j][0], Rv );
748 STVF( tmp[j][4], Rv1 );
749 Tm3v = Tm2v;
750 Tm3v1 = Tm2v1;
751 Tm2v = Tv;
752 Tm2v1 = Tv1;
753 }
754
755 Tv = LVFU(src[H - 1][i]);
756 Tv1 = LVFU(src[H - 1][i + 4]);
757
758 temp2Wp1 = Tv + F2V(M[2][0]) * (Rv - Tv) + F2V(M[2][1]) * (Tm2v - Tv) + F2V(M[2][2]) * (Tm3v - Tv);
759 temp2Wp11 = Tv1 + F2V(M[2][0]) * (Rv1 - Tv1) + F2V(M[2][1]) * (Tm2v1 - Tv1) + F2V(M[2][2]) * (Tm3v1 - Tv1);
760 temp2W = Tv + F2V(M[1][0]) * (Rv - Tv) + F2V(M[1][1]) * (Tm2v - Tv) + F2V(M[1][2]) * (Tm3v - Tv);
761 temp2W1 = Tv1 + F2V(M[1][0]) * (Rv1 - Tv1) + F2V(M[1][1]) * (Tm2v1 - Tv1) + F2V(M[1][2]) * (Tm3v1 - Tv1);
762
763 Rv = Tv + F2V(M[0][0]) * (Rv - Tv) + F2V(M[0][1]) * (Tm2v - Tv) + F2V(M[0][2]) * (Tm3v - Tv);
764 Rv1 = Tv1 + F2V(M[0][0]) * (Rv1 - Tv1) + F2V(M[0][1]) * (Tm2v1 - Tv1) + F2V(M[0][2]) * (Tm3v1 - Tv1);
765 STVFU( dst[H - 1][i], Rv );
766 STVFU( dst[H - 1][i + 4], Rv1 );
767
768 Tm2v = Bv * Tm2v + b1v * Rv + b2v * temp2W + b3v * temp2Wp1;
769 Tm2v1 = Bv * Tm2v1 + b1v * Rv1 + b2v * temp2W1 + b3v * temp2Wp11;
770 STVFU( dst[H - 2][i], Tm2v );
771 STVFU( dst[H - 2][i + 4], Tm2v1 );
772
773 Tm3v = Bv * Tm3v + b1v * Tm2v + b2v * Rv + b3v * temp2W;
774 Tm3v1 = Bv * Tm3v1 + b1v * Tm2v1 + b2v * Rv1 + b3v * temp2W1;
775 STVFU( dst[H - 3][i], Tm3v );
776 STVFU( dst[H - 3][i + 4], Tm3v1 );
777
778 Tv = Rv;
779 Tv1 = Rv1;
780 Rv = Tm3v;
781 Rv1 = Tm3v1;
782 Tm3v = Tv;
783 Tm3v1 = Tv1;
784
785 for (int j = H - 4; j >= 0; j--) {
786 Tv = Rv;
787 Tv1 = Rv1;
788 Rv = LVF(tmp[j][0]) * Bv + Tv * b1v + Tm2v * b2v + Tm3v * b3v;
789 Rv1 = LVF(tmp[j][4]) * Bv + Tv1 * b1v + Tm2v1 * b2v + Tm3v1 * b3v;
790 STVFU( dst[j][i], Rv );
791 STVFU( dst[j][i + 4], Rv1 );
792 Tm3v = Tm2v;
793 Tm3v1 = Tm2v1;
794 Tm2v = Tv;
795 Tm2v1 = Tv1;
796 }
797 }
798
799 // Borders are done without SSE
800 #ifdef _OPENMP
801 #pragma omp single
802 #endif
803
804 for (int i = W - (W % 8); i < W; i++) {
805 tmp[0][0] = src[0][i] * (B + b1 + b2 + b3);
806 tmp[1][0] = B * src[1][i] + b1 * tmp[0][0] + src[0][i] * (b2 + b3);
807 tmp[2][0] = B * src[2][i] + b1 * tmp[1][0] + b2 * tmp[0][0] + b3 * src[0][i];
808
809 for (int j = 3; j < H; j++) {
810 tmp[j][0] = B * src[j][i] + b1 * tmp[j - 1][0] + b2 * tmp[j - 2][0] + b3 * tmp[j - 3][0];
811 }
812
813 float temp2Hm1 = src[H - 1][i] + M[0][0] * (tmp[H - 1][0] - src[H - 1][i]) + M[0][1] * (tmp[H - 2][0] - src[H - 1][i]) + M[0][2] * (tmp[H - 3][0] - src[H - 1][i]);
814 float temp2H = src[H - 1][i] + M[1][0] * (tmp[H - 1][0] - src[H - 1][i]) + M[1][1] * (tmp[H - 2][0] - src[H - 1][i]) + M[1][2] * (tmp[H - 3][0] - src[H - 1][i]);
815 float temp2Hp1 = src[H - 1][i] + M[2][0] * (tmp[H - 1][0] - src[H - 1][i]) + M[2][1] * (tmp[H - 2][0] - src[H - 1][i]) + M[2][2] * (tmp[H - 3][0] - src[H - 1][i]);
816
817 tmp[H - 1][0] = temp2Hm1;
818 tmp[H - 2][0] = B * tmp[H - 2][0] + b1 * tmp[H - 1][0] + b2 * temp2H + b3 * temp2Hp1;
819 tmp[H - 3][0] = B * tmp[H - 3][0] + b1 * tmp[H - 2][0] + b2 * tmp[H - 1][0] + b3 * temp2H;
820
821 for (int j = H - 4; j >= 0; j--) {
822 tmp[j][0] = B * tmp[j][0] + b1 * tmp[j + 1][0] + b2 * tmp[j + 2][0] + b3 * tmp[j + 3][0];
823 }
824
825 for (int j = 0; j < H; j++) {
826 dst[j][i] = tmp[j][0];
827 }
828
829 }
830 }
831 #endif
832
833 #ifdef __SSE2__
gaussVerticalSsemult(T ** RESTRICT src,T ** RESTRICT dst,const int W,const int H,const float sigma)834 template<class T> void gaussVerticalSsemult (T** RESTRICT src, T** RESTRICT dst, const int W, const int H, const float sigma)
835 {
836 double b1, b2, b3, B, M[3][3];
837 calculateYvVFactors<double>(sigma, b1, b2, b3, B, M);
838
839 for (int i = 0; i < 3; i++)
840 for (int j = 0; j < 3; j++) {
841 M[i][j] *= (1.0 + b2 + (b1 - b3) * b3);
842 M[i][j] /= (1.0 + b1 - b2 + b3) * (1.0 - b1 - b2 - b3);
843 }
844
845 float tmp[H][8] ALIGNED16;
846 vfloat Rv;
847 vfloat Tv, Tm2v, Tm3v;
848 vfloat Rv1;
849 vfloat Tv1, Tm2v1, Tm3v1;
850 vfloat Bv, b1v, b2v, b3v;
851 vfloat temp2W, temp2Wp1;
852 vfloat temp2W1, temp2Wp11;
853 Bv = F2V(B);
854 b1v = F2V(b1);
855 b2v = F2V(b2);
856 b3v = F2V(b3);
857
858 #ifdef _OPENMP
859 #pragma omp for nowait
860 #endif
861
862 // process 8 columns per iteration for better usage of cpu cache
863 for (int i = 0; i < W - 7; i += 8) {
864 Tv = LVFU( src[0][i]);
865 Tv1 = LVFU( src[0][i + 4]);
866 Rv = Tv * (Bv + b1v + b2v + b3v);
867 Rv1 = Tv1 * (Bv + b1v + b2v + b3v);
868 Tm3v = Rv;
869 Tm3v1 = Rv1;
870 STVF( tmp[0][0], Rv );
871 STVF( tmp[0][4], Rv1 );
872
873 Rv = LVFU(src[1][i]) * Bv + Rv * b1v + Tv * (b2v + b3v);
874 Rv1 = LVFU(src[1][i + 4]) * Bv + Rv1 * b1v + Tv1 * (b2v + b3v);
875 Tm2v = Rv;
876 Tm2v1 = Rv1;
877 STVF( tmp[1][0], Rv );
878 STVF( tmp[1][4], Rv1 );
879
880 Rv = LVFU(src[2][i]) * Bv + Rv * b1v + Tm3v * b2v + Tv * b3v;
881 Rv1 = LVFU(src[2][i + 4]) * Bv + Rv1 * b1v + Tm3v1 * b2v + Tv1 * b3v;
882 STVF( tmp[2][0], Rv );
883 STVF( tmp[2][4], Rv1 );
884
885 for (int j = 3; j < H; j++) {
886 Tv = Rv;
887 Tv1 = Rv1;
888 Rv = LVFU(src[j][i]) * Bv + Tv * b1v + Tm2v * b2v + Tm3v * b3v;
889 Rv1 = LVFU(src[j][i + 4]) * Bv + Tv1 * b1v + Tm2v1 * b2v + Tm3v1 * b3v;
890 STVF( tmp[j][0], Rv );
891 STVF( tmp[j][4], Rv1 );
892 Tm3v = Tm2v;
893 Tm3v1 = Tm2v1;
894 Tm2v = Tv;
895 Tm2v1 = Tv1;
896 }
897
898 Tv = LVFU(src[H - 1][i]);
899 Tv1 = LVFU(src[H - 1][i + 4]);
900
901 temp2Wp1 = Tv + F2V(M[2][0]) * (Rv - Tv) + F2V(M[2][1]) * (Tm2v - Tv) + F2V(M[2][2]) * (Tm3v - Tv);
902 temp2Wp11 = Tv1 + F2V(M[2][0]) * (Rv1 - Tv1) + F2V(M[2][1]) * (Tm2v1 - Tv1) + F2V(M[2][2]) * (Tm3v1 - Tv1);
903 temp2W = Tv + F2V(M[1][0]) * (Rv - Tv) + F2V(M[1][1]) * (Tm2v - Tv) + F2V(M[1][2]) * (Tm3v - Tv);
904 temp2W1 = Tv1 + F2V(M[1][0]) * (Rv1 - Tv1) + F2V(M[1][1]) * (Tm2v1 - Tv1) + F2V(M[1][2]) * (Tm3v1 - Tv1);
905
906 Rv = Tv + F2V(M[0][0]) * (Rv - Tv) + F2V(M[0][1]) * (Tm2v - Tv) + F2V(M[0][2]) * (Tm3v - Tv);
907 Rv1 = Tv1 + F2V(M[0][0]) * (Rv1 - Tv1) + F2V(M[0][1]) * (Tm2v1 - Tv1) + F2V(M[0][2]) * (Tm3v1 - Tv1);
908 STVFU( dst[H - 1][i], LVFU(dst[H - 1][i]) * Rv );
909 STVFU( dst[H - 1][i + 4], LVFU(dst[H - 1][i + 4]) * Rv1 );
910
911 Tm2v = Bv * Tm2v + b1v * Rv + b2v * temp2W + b3v * temp2Wp1;
912 Tm2v1 = Bv * Tm2v1 + b1v * Rv1 + b2v * temp2W1 + b3v * temp2Wp11;
913 STVFU( dst[H - 2][i], LVFU(dst[H - 2][i]) * Tm2v );
914 STVFU( dst[H - 2][i + 4], LVFU(dst[H - 2][i + 4]) * Tm2v1 );
915
916 Tm3v = Bv * Tm3v + b1v * Tm2v + b2v * Rv + b3v * temp2W;
917 Tm3v1 = Bv * Tm3v1 + b1v * Tm2v1 + b2v * Rv1 + b3v * temp2W1;
918 STVFU( dst[H - 3][i], LVFU(dst[H - 3][i]) * Tm3v );
919 STVFU( dst[H - 3][i + 4], LVFU(dst[H - 3][i + 4]) * Tm3v1 );
920
921 Tv = Rv;
922 Tv1 = Rv1;
923 Rv = Tm3v;
924 Rv1 = Tm3v1;
925 Tm3v = Tv;
926 Tm3v1 = Tv1;
927
928 for (int j = H - 4; j >= 0; j--) {
929 Tv = Rv;
930 Tv1 = Rv1;
931 Rv = LVF(tmp[j][0]) * Bv + Tv * b1v + Tm2v * b2v + Tm3v * b3v;
932 Rv1 = LVF(tmp[j][4]) * Bv + Tv1 * b1v + Tm2v1 * b2v + Tm3v1 * b3v;
933 STVFU( dst[j][i], LVFU(dst[j][i]) * Rv );
934 STVFU( dst[j][i + 4], LVFU(dst[j][i + 4]) * Rv1 );
935 Tm3v = Tm2v;
936 Tm3v1 = Tm2v1;
937 Tm2v = Tv;
938 Tm2v1 = Tv1;
939 }
940 }
941
942 // Borders are done without SSE
943 #ifdef _OPENMP
944 #pragma omp single
945 #endif
946
947 for (int i = W - (W % 8); i < W; i++) {
948 tmp[0][0] = src[0][i] * (B + b1 + b2 + b3);
949 tmp[1][0] = B * src[1][i] + b1 * tmp[0][0] + src[0][i] * (b2 + b3);
950 tmp[2][0] = B * src[2][i] + b1 * tmp[1][0] + b2 * tmp[0][0] + b3 * src[0][i];
951
952 for (int j = 3; j < H; j++) {
953 tmp[j][0] = B * src[j][i] + b1 * tmp[j - 1][0] + b2 * tmp[j - 2][0] + b3 * tmp[j - 3][0];
954 }
955
956 float temp2Hm1 = src[H - 1][i] + M[0][0] * (tmp[H - 1][0] - src[H - 1][i]) + M[0][1] * (tmp[H - 2][0] - src[H - 1][i]) + M[0][2] * (tmp[H - 3][0] - src[H - 1][i]);
957 float temp2H = src[H - 1][i] + M[1][0] * (tmp[H - 1][0] - src[H - 1][i]) + M[1][1] * (tmp[H - 2][0] - src[H - 1][i]) + M[1][2] * (tmp[H - 3][0] - src[H - 1][i]);
958 float temp2Hp1 = src[H - 1][i] + M[2][0] * (tmp[H - 1][0] - src[H - 1][i]) + M[2][1] * (tmp[H - 2][0] - src[H - 1][i]) + M[2][2] * (tmp[H - 3][0] - src[H - 1][i]);
959
960 tmp[H - 1][0] = temp2Hm1;
961 tmp[H - 2][0] = B * tmp[H - 2][0] + b1 * tmp[H - 1][0] + b2 * temp2H + b3 * temp2Hp1;
962 tmp[H - 3][0] = B * tmp[H - 3][0] + b1 * tmp[H - 2][0] + b2 * tmp[H - 1][0] + b3 * temp2H;
963
964 for (int j = H - 4; j >= 0; j--) {
965 tmp[j][0] = B * tmp[j][0] + b1 * tmp[j + 1][0] + b2 * tmp[j + 2][0] + b3 * tmp[j + 3][0];
966 }
967
968 for (int j = 0; j < H; j++) {
969 dst[j][i] *= tmp[j][0];
970 }
971
972 }
973 }
974
gaussVerticalSsediv(T ** src,T ** dst,T ** divBuffer,const int W,const int H,const float sigma)975 template<class T> void gaussVerticalSsediv (T** src, T** dst, T** divBuffer, const int W, const int H, const float sigma)
976 {
977 double b1, b2, b3, B, M[3][3];
978 calculateYvVFactors<double>(sigma, b1, b2, b3, B, M);
979
980 for (int i = 0; i < 3; i++)
981 for (int j = 0; j < 3; j++) {
982 M[i][j] *= (1.0 + b2 + (b1 - b3) * b3);
983 M[i][j] /= (1.0 + b1 - b2 + b3) * (1.0 - b1 - b2 - b3);
984 }
985
986 float tmp[H][8] ALIGNED16;
987 vfloat Rv;
988 vfloat Tv, Tm2v, Tm3v;
989 vfloat Rv1;
990 vfloat Tv1, Tm2v1, Tm3v1;
991 vfloat Bv, b1v, b2v, b3v;
992 vfloat temp2W, temp2Wp1;
993 vfloat temp2W1, temp2Wp11;
994 vfloat onev = F2V(1.f);
995 Bv = F2V(B);
996 b1v = F2V(b1);
997 b2v = F2V(b2);
998 b3v = F2V(b3);
999
1000 #ifdef _OPENMP
1001 #pragma omp for nowait
1002 #endif
1003
1004 // process 8 columns per iteration for better usage of cpu cache
1005 for (int i = 0; i < W - 7; i += 8) {
1006 Tv = LVFU( src[0][i]);
1007 Tv1 = LVFU( src[0][i + 4]);
1008 Rv = Tv * (Bv + b1v + b2v + b3v);
1009 Rv1 = Tv1 * (Bv + b1v + b2v + b3v);
1010 Tm3v = Rv;
1011 Tm3v1 = Rv1;
1012 STVF( tmp[0][0], Rv );
1013 STVF( tmp[0][4], Rv1 );
1014
1015 Rv = LVFU(src[1][i]) * Bv + Rv * b1v + Tv * (b2v + b3v);
1016 Rv1 = LVFU(src[1][i + 4]) * Bv + Rv1 * b1v + Tv1 * (b2v + b3v);
1017 Tm2v = Rv;
1018 Tm2v1 = Rv1;
1019 STVF( tmp[1][0], Rv );
1020 STVF( tmp[1][4], Rv1 );
1021
1022 Rv = LVFU(src[2][i]) * Bv + Rv * b1v + Tm3v * b2v + Tv * b3v;
1023 Rv1 = LVFU(src[2][i + 4]) * Bv + Rv1 * b1v + Tm3v1 * b2v + Tv1 * b3v;
1024 STVF( tmp[2][0], Rv );
1025 STVF( tmp[2][4], Rv1 );
1026
1027 for (int j = 3; j < H; j++) {
1028 Tv = Rv;
1029 Tv1 = Rv1;
1030 Rv = LVFU(src[j][i]) * Bv + Tv * b1v + Tm2v * b2v + Tm3v * b3v;
1031 Rv1 = LVFU(src[j][i + 4]) * Bv + Tv1 * b1v + Tm2v1 * b2v + Tm3v1 * b3v;
1032 STVF( tmp[j][0], Rv );
1033 STVF( tmp[j][4], Rv1 );
1034 Tm3v = Tm2v;
1035 Tm3v1 = Tm2v1;
1036 Tm2v = Tv;
1037 Tm2v1 = Tv1;
1038 }
1039
1040 Tv = LVFU(src[H - 1][i]);
1041 Tv1 = LVFU(src[H - 1][i + 4]);
1042
1043 temp2Wp1 = Tv + F2V(M[2][0]) * (Rv - Tv) + F2V(M[2][1]) * (Tm2v - Tv) + F2V(M[2][2]) * (Tm3v - Tv);
1044 temp2Wp11 = Tv1 + F2V(M[2][0]) * (Rv1 - Tv1) + F2V(M[2][1]) * (Tm2v1 - Tv1) + F2V(M[2][2]) * (Tm3v1 - Tv1);
1045 temp2W = Tv + F2V(M[1][0]) * (Rv - Tv) + F2V(M[1][1]) * (Tm2v - Tv) + F2V(M[1][2]) * (Tm3v - Tv);
1046 temp2W1 = Tv1 + F2V(M[1][0]) * (Rv1 - Tv1) + F2V(M[1][1]) * (Tm2v1 - Tv1) + F2V(M[1][2]) * (Tm3v1 - Tv1);
1047
1048 Rv = Tv + F2V(M[0][0]) * (Rv - Tv) + F2V(M[0][1]) * (Tm2v - Tv) + F2V(M[0][2]) * (Tm3v - Tv);
1049 Rv1 = Tv1 + F2V(M[0][0]) * (Rv1 - Tv1) + F2V(M[0][1]) * (Tm2v1 - Tv1) + F2V(M[0][2]) * (Tm3v1 - Tv1);
1050
1051 STVFU( dst[H - 1][i], LVFU(divBuffer[H - 1][i]) / vself(vmaskf_gt(Rv, ZEROV), Rv, onev));
1052 STVFU( dst[H - 1][i + 4], LVFU(divBuffer[H - 1][i + 4]) / vself(vmaskf_gt(Rv1, ZEROV), Rv1, onev));
1053
1054 Tm2v = Bv * Tm2v + b1v * Rv + b2v * temp2W + b3v * temp2Wp1;
1055 Tm2v1 = Bv * Tm2v1 + b1v * Rv1 + b2v * temp2W1 + b3v * temp2Wp11;
1056 STVFU( dst[H - 2][i], LVFU(divBuffer[H - 2][i]) / vself(vmaskf_gt(Tm2v, ZEROV), Tm2v, onev));
1057 STVFU( dst[H - 2][i + 4], LVFU(divBuffer[H - 2][i + 4]) / vself(vmaskf_gt(Tm2v1, ZEROV), Tm2v1, onev));
1058
1059 Tm3v = Bv * Tm3v + b1v * Tm2v + b2v * Rv + b3v * temp2W;
1060 Tm3v1 = Bv * Tm3v1 + b1v * Tm2v1 + b2v * Rv1 + b3v * temp2W1;
1061 STVFU( dst[H - 3][i], LVFU(divBuffer[H - 3][i]) / vself(vmaskf_gt(Tm3v, ZEROV), Tm3v, onev));
1062 STVFU( dst[H - 3][i + 4], LVFU(divBuffer[H - 3][i + 4]) / vself(vmaskf_gt(Tm3v1, ZEROV), Tm3v1, onev));
1063
1064 Tv = Rv;
1065 Tv1 = Rv1;
1066 Rv = Tm3v;
1067 Rv1 = Tm3v1;
1068 Tm3v = Tv;
1069 Tm3v1 = Tv1;
1070
1071 for (int j = H - 4; j >= 0; j--) {
1072 Tv = Rv;
1073 Tv1 = Rv1;
1074 Rv = LVF(tmp[j][0]) * Bv + Tv * b1v + Tm2v * b2v + Tm3v * b3v;
1075 Rv1 = LVF(tmp[j][4]) * Bv + Tv1 * b1v + Tm2v1 * b2v + Tm3v1 * b3v;
1076 STVFU( dst[j][i], vmaxf(LVFU(divBuffer[j][i]) / vself(vmaskf_gt(Rv, ZEROV), Rv, onev), ZEROV));
1077 STVFU( dst[j][i + 4], vmaxf(LVFU(divBuffer[j][i + 4]) / vself(vmaskf_gt(Rv1, ZEROV), Rv1, onev), ZEROV));
1078 Tm3v = Tm2v;
1079 Tm3v1 = Tm2v1;
1080 Tm2v = Tv;
1081 Tm2v1 = Tv1;
1082 }
1083 }
1084
1085 // Borders are done without SSE
1086 #ifdef _OPENMP
1087 #pragma omp single
1088 #endif
1089
1090 for (int i = W - (W % 8); i < W; i++) {
1091 tmp[0][0] = src[0][i] * (B + b1 + b2 + b3);
1092 tmp[1][0] = B * src[1][i] + b1 * tmp[0][0] + src[0][i] * (b2 + b3);
1093 tmp[2][0] = B * src[2][i] + b1 * tmp[1][0] + b2 * tmp[0][0] + b3 * src[0][i];
1094
1095 for (int j = 3; j < H; j++) {
1096 tmp[j][0] = B * src[j][i] + b1 * tmp[j - 1][0] + b2 * tmp[j - 2][0] + b3 * tmp[j - 3][0];
1097 }
1098
1099 float temp2Hm1 = src[H - 1][i] + M[0][0] * (tmp[H - 1][0] - src[H - 1][i]) + M[0][1] * (tmp[H - 2][0] - src[H - 1][i]) + M[0][2] * (tmp[H - 3][0] - src[H - 1][i]);
1100 float temp2H = src[H - 1][i] + M[1][0] * (tmp[H - 1][0] - src[H - 1][i]) + M[1][1] * (tmp[H - 2][0] - src[H - 1][i]) + M[1][2] * (tmp[H - 3][0] - src[H - 1][i]);
1101 float temp2Hp1 = src[H - 1][i] + M[2][0] * (tmp[H - 1][0] - src[H - 1][i]) + M[2][1] * (tmp[H - 2][0] - src[H - 1][i]) + M[2][2] * (tmp[H - 3][0] - src[H - 1][i]);
1102
1103 tmp[H - 1][0] = temp2Hm1;
1104 tmp[H - 2][0] = B * tmp[H - 2][0] + b1 * tmp[H - 1][0] + b2 * temp2H + b3 * temp2Hp1;
1105 tmp[H - 3][0] = B * tmp[H - 3][0] + b1 * tmp[H - 2][0] + b2 * tmp[H - 1][0] + b3 * temp2H;
1106
1107 for (int j = H - 4; j >= 0; j--) {
1108 tmp[j][0] = B * tmp[j][0] + b1 * tmp[j + 1][0] + b2 * tmp[j + 2][0] + b3 * tmp[j + 3][0];
1109 }
1110
1111 for (int j = 0; j < H; j++) {
1112 dst[j][i] = rtengine::max(divBuffer[j][i] / (tmp[j][0] > 0.f ? tmp[j][0] : 1.f), 0.f);
1113 }
1114
1115 }
1116 }
1117
1118 #endif
1119
gaussVertical(T ** src,T ** dst,const int W,const int H,const double sigma)1120 template<class T> void gaussVertical (T** src, T** dst, const int W, const int H, const double sigma)
1121 {
1122 double b1, b2, b3, B, M[3][3];
1123 calculateYvVFactors<double>(sigma, b1, b2, b3, B, M);
1124
1125 for (int i = 0; i < 3; i++)
1126 for (int j = 0; j < 3; j++) {
1127 M[i][j] /= (1.0 + b1 - b2 + b3) * (1.0 + b2 + (b1 - b3) * b3);
1128 }
1129
1130 // process 'numcols' columns for better usage of L1 cpu cache (especially faster for large values of H)
1131 static const int numcols = 8;
1132 double temp2[H][numcols] ALIGNED16;
1133 double temp2Hm1[numcols], temp2H[numcols], temp2Hp1[numcols];
1134 #ifdef _OPENMP
1135 #pragma omp for nowait
1136 #endif
1137
1138 for (unsigned int i = 0; i < static_cast<unsigned>(std::max(0, W - numcols + 1)); i += numcols) {
1139 for (int k = 0; k < numcols; k++) {
1140 temp2[0][k] = B * src[0][i + k] + b1 * src[0][i + k] + b2 * src[0][i + k] + b3 * src[0][i + k];
1141 temp2[1][k] = B * src[1][i + k] + b1 * temp2[0][k] + b2 * src[0][i + k] + b3 * src[0][i + k];
1142 temp2[2][k] = B * src[2][i + k] + b1 * temp2[1][k] + b2 * temp2[0][k] + b3 * src[0][i + k];
1143 }
1144
1145 for (int j = 3; j < H; j++) {
1146 for (int k = 0; k < numcols; k++) {
1147 temp2[j][k] = B * src[j][i + k] + b1 * temp2[j - 1][k] + b2 * temp2[j - 2][k] + b3 * temp2[j - 3][k];
1148 }
1149 }
1150
1151 for (int k = 0; k < numcols; k++) {
1152 temp2Hm1[k] = src[H - 1][i + k] + M[0][0] * (temp2[H - 1][k] - src[H - 1][i + k]) + M[0][1] * (temp2[H - 2][k] - src[H - 1][i + k]) + M[0][2] * (temp2[H - 3][k] - src[H - 1][i + k]);
1153 temp2H[k] = src[H - 1][i + k] + M[1][0] * (temp2[H - 1][k] - src[H - 1][i + k]) + M[1][1] * (temp2[H - 2][k] - src[H - 1][i + k]) + M[1][2] * (temp2[H - 3][k] - src[H - 1][i + k]);
1154 temp2Hp1[k] = src[H - 1][i + k] + M[2][0] * (temp2[H - 1][k] - src[H - 1][i + k]) + M[2][1] * (temp2[H - 2][k] - src[H - 1][i + k]) + M[2][2] * (temp2[H - 3][k] - src[H - 1][i + k]);
1155 }
1156
1157 for (int k = 0; k < numcols; k++) {
1158 dst[H - 1][i + k] = temp2[H - 1][k] = temp2Hm1[k];
1159 dst[H - 2][i + k] = temp2[H - 2][k] = B * temp2[H - 2][k] + b1 * temp2[H - 1][k] + b2 * temp2H[k] + b3 * temp2Hp1[k];
1160 dst[H - 3][i + k] = temp2[H - 3][k] = B * temp2[H - 3][k] + b1 * temp2[H - 2][k] + b2 * temp2[H - 1][k] + b3 * temp2H[k];
1161 }
1162
1163 for (int j = H - 4; j >= 0; j--) {
1164 for (int k = 0; k < numcols; k++) {
1165 dst[j][i + k] = temp2[j][k] = B * temp2[j][k] + b1 * temp2[j + 1][k] + b2 * temp2[j + 2][k] + b3 * temp2[j + 3][k];
1166 }
1167 }
1168 }
1169
1170 #ifdef _OPENMP
1171 #pragma omp single
1172 #endif
1173
1174 // process remaining columns
1175 for (int i = W - (W % numcols); i < W; i++) {
1176 temp2[0][0] = B * src[0][i] + b1 * src[0][i] + b2 * src[0][i] + b3 * src[0][i];
1177 temp2[1][0] = B * src[1][i] + b1 * temp2[0][0] + b2 * src[0][i] + b3 * src[0][i];
1178 temp2[2][0] = B * src[2][i] + b1 * temp2[1][0] + b2 * temp2[0][0] + b3 * src[0][i];
1179
1180 for (int j = 3; j < H; j++) {
1181 temp2[j][0] = B * src[j][i] + b1 * temp2[j - 1][0] + b2 * temp2[j - 2][0] + b3 * temp2[j - 3][0];
1182 }
1183
1184 double temp2Hm1 = src[H - 1][i] + M[0][0] * (temp2[H - 1][0] - src[H - 1][i]) + M[0][1] * (temp2[H - 2][0] - src[H - 1][i]) + M[0][2] * (temp2[H - 3][0] - src[H - 1][i]);
1185 double temp2H = src[H - 1][i] + M[1][0] * (temp2[H - 1][0] - src[H - 1][i]) + M[1][1] * (temp2[H - 2][0] - src[H - 1][i]) + M[1][2] * (temp2[H - 3][0] - src[H - 1][i]);
1186 double temp2Hp1 = src[H - 1][i] + M[2][0] * (temp2[H - 1][0] - src[H - 1][i]) + M[2][1] * (temp2[H - 2][0] - src[H - 1][i]) + M[2][2] * (temp2[H - 3][0] - src[H - 1][i]);
1187
1188 dst[H - 1][i] = temp2[H - 1][0] = temp2Hm1;
1189 dst[H - 2][i] = temp2[H - 2][0] = B * temp2[H - 2][0] + b1 * temp2[H - 1][0] + b2 * temp2H + b3 * temp2Hp1;
1190 dst[H - 3][i] = temp2[H - 3][0] = B * temp2[H - 3][0] + b1 * temp2[H - 2][0] + b2 * temp2[H - 1][0] + b3 * temp2H;
1191
1192 for (int j = H - 4; j >= 0; j--) {
1193 dst[j][i] = temp2[j][0] = B * temp2[j][0] + b1 * temp2[j + 1][0] + b2 * temp2[j + 2][0] + b3 * temp2[j + 3][0];
1194 }
1195 }
1196 }
1197
1198 #ifndef __SSE2__
gaussVerticaldiv(T ** src,T ** dst,T ** divBuffer,const int W,const int H,const double sigma)1199 template<class T> void gaussVerticaldiv (T** src, T** dst, T** divBuffer, const int W, const int H, const double sigma)
1200 {
1201 double b1, b2, b3, B, M[3][3];
1202 calculateYvVFactors<double>(sigma, b1, b2, b3, B, M);
1203
1204 for (int i = 0; i < 3; i++)
1205 for (int j = 0; j < 3; j++) {
1206 M[i][j] /= (1.0 + b1 - b2 + b3) * (1.0 + b2 + (b1 - b3) * b3);
1207 }
1208
1209 // process 'numcols' columns for better usage of L1 cpu cache (especially faster for large values of H)
1210 static const int numcols = 8;
1211 double temp2[H][numcols] ALIGNED16;
1212 double temp2Hm1[numcols], temp2H[numcols], temp2Hp1[numcols];
1213 #ifdef _OPENMP
1214 #pragma omp for nowait
1215 #endif
1216
1217 for (int i = 0; i < W - numcols + 1; i += numcols) {
1218 for (int k = 0; k < numcols; k++) {
1219 temp2[0][k] = B * src[0][i + k] + b1 * src[0][i + k] + b2 * src[0][i + k] + b3 * src[0][i + k];
1220 temp2[1][k] = B * src[1][i + k] + b1 * temp2[0][k] + b2 * src[0][i + k] + b3 * src[0][i + k];
1221 temp2[2][k] = B * src[2][i + k] + b1 * temp2[1][k] + b2 * temp2[0][k] + b3 * src[0][i + k];
1222 }
1223
1224 for (int j = 3; j < H; j++) {
1225 for (int k = 0; k < numcols; k++) {
1226 temp2[j][k] = B * src[j][i + k] + b1 * temp2[j - 1][k] + b2 * temp2[j - 2][k] + b3 * temp2[j - 3][k];
1227 }
1228 }
1229
1230 for (int k = 0; k < numcols; k++) {
1231 temp2Hm1[k] = src[H - 1][i + k] + M[0][0] * (temp2[H - 1][k] - src[H - 1][i + k]) + M[0][1] * (temp2[H - 2][k] - src[H - 1][i + k]) + M[0][2] * (temp2[H - 3][k] - src[H - 1][i + k]);
1232 temp2H[k] = src[H - 1][i + k] + M[1][0] * (temp2[H - 1][k] - src[H - 1][i + k]) + M[1][1] * (temp2[H - 2][k] - src[H - 1][i + k]) + M[1][2] * (temp2[H - 3][k] - src[H - 1][i + k]);
1233 temp2Hp1[k] = src[H - 1][i + k] + M[2][0] * (temp2[H - 1][k] - src[H - 1][i + k]) + M[2][1] * (temp2[H - 2][k] - src[H - 1][i + k]) + M[2][2] * (temp2[H - 3][k] - src[H - 1][i + k]);
1234 }
1235
1236 for (int k = 0; k < numcols; k++) {
1237 dst[H - 1][i + k] = rtengine::max(divBuffer[H - 1][i + k] / (temp2[H - 1][k] = temp2Hm1[k]), 0.0);
1238 dst[H - 2][i + k] = rtengine::max(divBuffer[H - 2][i + k] / (temp2[H - 2][k] = B * temp2[H - 2][k] + b1 * temp2[H - 1][k] + b2 * temp2H[k] + b3 * temp2Hp1[k]), 0.0);
1239 dst[H - 3][i + k] = rtengine::max(divBuffer[H - 3][i + k] / (temp2[H - 3][k] = B * temp2[H - 3][k] + b1 * temp2[H - 2][k] + b2 * temp2[H - 1][k] + b3 * temp2H[k]), 0.0);
1240 }
1241
1242 for (int j = H - 4; j >= 0; j--) {
1243 for (int k = 0; k < numcols; k++) {
1244 dst[j][i + k] = rtengine::max(divBuffer[j][i + k] / (temp2[j][k] = B * temp2[j][k] + b1 * temp2[j + 1][k] + b2 * temp2[j + 2][k] + b3 * temp2[j + 3][k]), 0.0);
1245 }
1246 }
1247 }
1248
1249 #ifdef _OPENMP
1250 #pragma omp single
1251 #endif
1252
1253 // process remaining columns
1254 for (int i = W - (W % numcols); i < W; i++) {
1255 temp2[0][0] = B * src[0][i] + b1 * src[0][i] + b2 * src[0][i] + b3 * src[0][i];
1256 temp2[1][0] = B * src[1][i] + b1 * temp2[0][0] + b2 * src[0][i] + b3 * src[0][i];
1257 temp2[2][0] = B * src[2][i] + b1 * temp2[1][0] + b2 * temp2[0][0] + b3 * src[0][i];
1258
1259 for (int j = 3; j < H; j++) {
1260 temp2[j][0] = B * src[j][i] + b1 * temp2[j - 1][0] + b2 * temp2[j - 2][0] + b3 * temp2[j - 3][0];
1261 }
1262
1263 double temp2Hm1 = src[H - 1][i] + M[0][0] * (temp2[H - 1][0] - src[H - 1][i]) + M[0][1] * (temp2[H - 2][0] - src[H - 1][i]) + M[0][2] * (temp2[H - 3][0] - src[H - 1][i]);
1264 double temp2H = src[H - 1][i] + M[1][0] * (temp2[H - 1][0] - src[H - 1][i]) + M[1][1] * (temp2[H - 2][0] - src[H - 1][i]) + M[1][2] * (temp2[H - 3][0] - src[H - 1][i]);
1265 double temp2Hp1 = src[H - 1][i] + M[2][0] * (temp2[H - 1][0] - src[H - 1][i]) + M[2][1] * (temp2[H - 2][0] - src[H - 1][i]) + M[2][2] * (temp2[H - 3][0] - src[H - 1][i]);
1266
1267 dst[H - 1][i] = rtengine::max(divBuffer[H - 1][i] / (temp2[H - 1][0] = temp2Hm1), 0.0);
1268 dst[H - 2][i] = rtengine::max(divBuffer[H - 2][i] / (temp2[H - 2][0] = B * temp2[H - 2][0] + b1 * temp2[H - 1][0] + b2 * temp2H + b3 * temp2Hp1), 0.0);
1269 dst[H - 3][i] = rtengine::max(divBuffer[H - 3][i] / (temp2[H - 3][0] = B * temp2[H - 3][0] + b1 * temp2[H - 2][0] + b2 * temp2[H - 1][0] + b3 * temp2H), 0.0);
1270
1271 for (int j = H - 4; j >= 0; j--) {
1272 dst[j][i] = rtengine::max(divBuffer[j][i] / (temp2[j][0] = B * temp2[j][0] + b1 * temp2[j + 1][0] + b2 * temp2[j + 2][0] + b3 * temp2[j + 3][0]), 0.0);
1273 }
1274 }
1275 }
1276
gaussVerticalmult(T ** src,T ** dst,const int W,const int H,const double sigma)1277 template<class T> void gaussVerticalmult (T** src, T** dst, const int W, const int H, const double sigma)
1278 {
1279 double b1, b2, b3, B, M[3][3];
1280 calculateYvVFactors<double>(sigma, b1, b2, b3, B, M);
1281
1282 for (int i = 0; i < 3; i++)
1283 for (int j = 0; j < 3; j++) {
1284 M[i][j] /= (1.0 + b1 - b2 + b3) * (1.0 + b2 + (b1 - b3) * b3);
1285 }
1286
1287 // process 'numcols' columns for better usage of L1 cpu cache (especially faster for large values of H)
1288 static const int numcols = 8;
1289 double temp2[H][numcols] ALIGNED16;
1290 double temp2Hm1[numcols], temp2H[numcols], temp2Hp1[numcols];
1291 #ifdef _OPENMP
1292 #pragma omp for nowait
1293 #endif
1294
1295 for (int i = 0; i < W - numcols + 1; i += numcols) {
1296 for (int k = 0; k < numcols; k++) {
1297 temp2[0][k] = B * src[0][i + k] + b1 * src[0][i + k] + b2 * src[0][i + k] + b3 * src[0][i + k];
1298 temp2[1][k] = B * src[1][i + k] + b1 * temp2[0][k] + b2 * src[0][i + k] + b3 * src[0][i + k];
1299 temp2[2][k] = B * src[2][i + k] + b1 * temp2[1][k] + b2 * temp2[0][k] + b3 * src[0][i + k];
1300 }
1301
1302 for (int j = 3; j < H; j++) {
1303 for (int k = 0; k < numcols; k++) {
1304 temp2[j][k] = B * src[j][i + k] + b1 * temp2[j - 1][k] + b2 * temp2[j - 2][k] + b3 * temp2[j - 3][k];
1305 }
1306 }
1307
1308 for (int k = 0; k < numcols; k++) {
1309 temp2Hm1[k] = src[H - 1][i + k] + M[0][0] * (temp2[H - 1][k] - src[H - 1][i + k]) + M[0][1] * (temp2[H - 2][k] - src[H - 1][i + k]) + M[0][2] * (temp2[H - 3][k] - src[H - 1][i + k]);
1310 temp2H[k] = src[H - 1][i + k] + M[1][0] * (temp2[H - 1][k] - src[H - 1][i + k]) + M[1][1] * (temp2[H - 2][k] - src[H - 1][i + k]) + M[1][2] * (temp2[H - 3][k] - src[H - 1][i + k]);
1311 temp2Hp1[k] = src[H - 1][i + k] + M[2][0] * (temp2[H - 1][k] - src[H - 1][i + k]) + M[2][1] * (temp2[H - 2][k] - src[H - 1][i + k]) + M[2][2] * (temp2[H - 3][k] - src[H - 1][i + k]);
1312 }
1313
1314 for (int k = 0; k < numcols; k++) {
1315 dst[H - 1][i + k] *= temp2[H - 1][k] = temp2Hm1[k];
1316 dst[H - 2][i + k] *= temp2[H - 2][k] = B * temp2[H - 2][k] + b1 * temp2[H - 1][k] + b2 * temp2H[k] + b3 * temp2Hp1[k];
1317 dst[H - 3][i + k] *= temp2[H - 3][k] = B * temp2[H - 3][k] + b1 * temp2[H - 2][k] + b2 * temp2[H - 1][k] + b3 * temp2H[k];
1318 }
1319
1320 for (int j = H - 4; j >= 0; j--) {
1321 for (int k = 0; k < numcols; k++) {
1322 dst[j][i + k] *= (temp2[j][k] = B * temp2[j][k] + b1 * temp2[j + 1][k] + b2 * temp2[j + 2][k] + b3 * temp2[j + 3][k]);
1323 }
1324 }
1325 }
1326
1327 #ifdef _OPENMP
1328 #pragma omp single
1329 #endif
1330
1331 // process remaining columns
1332 for (int i = W - (W % numcols); i < W; i++) {
1333 temp2[0][0] = B * src[0][i] + b1 * src[0][i] + b2 * src[0][i] + b3 * src[0][i];
1334 temp2[1][0] = B * src[1][i] + b1 * temp2[0][0] + b2 * src[0][i] + b3 * src[0][i];
1335 temp2[2][0] = B * src[2][i] + b1 * temp2[1][0] + b2 * temp2[0][0] + b3 * src[0][i];
1336
1337 for (int j = 3; j < H; j++) {
1338 temp2[j][0] = B * src[j][i] + b1 * temp2[j - 1][0] + b2 * temp2[j - 2][0] + b3 * temp2[j - 3][0];
1339 }
1340
1341 double temp2Hm1 = src[H - 1][i] + M[0][0] * (temp2[H - 1][0] - src[H - 1][i]) + M[0][1] * (temp2[H - 2][0] - src[H - 1][i]) + M[0][2] * (temp2[H - 3][0] - src[H - 1][i]);
1342 double temp2H = src[H - 1][i] + M[1][0] * (temp2[H - 1][0] - src[H - 1][i]) + M[1][1] * (temp2[H - 2][0] - src[H - 1][i]) + M[1][2] * (temp2[H - 3][0] - src[H - 1][i]);
1343 double temp2Hp1 = src[H - 1][i] + M[2][0] * (temp2[H - 1][0] - src[H - 1][i]) + M[2][1] * (temp2[H - 2][0] - src[H - 1][i]) + M[2][2] * (temp2[H - 3][0] - src[H - 1][i]);
1344
1345 dst[H - 1][i] *= temp2[H - 1][0] = temp2Hm1;
1346 dst[H - 2][i] *= temp2[H - 2][0] = B * temp2[H - 2][0] + b1 * temp2[H - 1][0] + b2 * temp2H + b3 * temp2Hp1;
1347 dst[H - 3][i] *= temp2[H - 3][0] = B * temp2[H - 3][0] + b1 * temp2[H - 2][0] + b2 * temp2[H - 1][0] + b3 * temp2H;
1348
1349 for (int j = H - 4; j >= 0; j--) {
1350 dst[j][i] *= (temp2[j][0] = B * temp2[j][0] + b1 * temp2[j + 1][0] + b2 * temp2[j + 2][0] + b3 * temp2[j + 3][0]);
1351 }
1352 }
1353 }
1354 #endif
1355
gaussianBlurImpl(T ** src,T ** dst,const int W,const int H,const double sigma,bool useBoxBlur,eGaussType gausstype=GAUSS_STANDARD,T ** buffer2=nullptr)1356 template<class T> void gaussianBlurImpl(T** src, T** dst, const int W, const int H, const double sigma, bool useBoxBlur, eGaussType gausstype = GAUSS_STANDARD, T** buffer2 = nullptr)
1357 {
1358 static constexpr auto GAUSS_SKIP = 0.25;
1359 static constexpr auto GAUSS_3X3_LIMIT = 0.6;
1360 static constexpr auto GAUSS_5X5_LIMIT = 0.84;
1361 static constexpr auto GAUSS_7X7_LIMIT = 1.15;
1362 static constexpr auto GAUSS_DOUBLE = 25.0;
1363
1364 if (useBoxBlur) {
1365 // special variant for very large sigma, currently only used by retinex algorithm
1366 // use iterated boxblur to approximate gaussian blur
1367 // Compute ideal averaging filter width and number of iterations
1368 int n = 1;
1369 double wIdeal = sqrt((12 * sigma * sigma) + 1);
1370
1371 while(wIdeal > W || wIdeal > H) {
1372 n++;
1373 wIdeal = sqrt((12 * sigma * sigma / n) + 1);
1374 }
1375
1376 if(n < 3) {
1377 n = 3;
1378 wIdeal = sqrt((12 * sigma * sigma / n) + 1);
1379 } else if(n > 6) {
1380 n = 6;
1381 }
1382
1383 int wl = wIdeal;
1384
1385 if(wl % 2 == 0) {
1386 wl--;
1387 }
1388
1389 int wu = wl + 2;
1390
1391 double mIdeal = (12 * sigma * sigma - n * wl * wl - 4 * n * wl - 3 * n) / (-4 * wl - 4);
1392 int m = round(mIdeal);
1393
1394 int sizes[n];
1395
1396 for(int i = 0; i < n; i++) {
1397 sizes[i] = ((i < m ? wl : wu) - 1) / 2;
1398 }
1399
1400 rtengine::boxblur(src, dst, sizes[0], W, H, true);
1401
1402 for(int i = 1; i < n; i++) {
1403 rtengine::boxblur(dst, dst, sizes[i], W, H, true);
1404 }
1405 } else {
1406 if (sigma < GAUSS_SKIP) {
1407 // don't perform filtering
1408 if (src != dst) {
1409 for(int i = 0; i < H; ++i) {
1410 memcpy(dst[i], src[i], W * sizeof(T));
1411 }
1412 }
1413 } else if (sigma < GAUSS_3X3_LIMIT) {
1414 if(src != dst) {
1415 // If src != dst we can take the fast way
1416 // compute 3x3 kernel values
1417 double c0 = 1.0;
1418 double c1 = exp( -0.5 * (rtengine::SQR(1.0 / sigma)) );
1419 double c2 = exp( -rtengine::SQR(1.0 / sigma) );
1420
1421 // normalize kernel values
1422 double sum = c0 + 4.0 * (c1 + c2);
1423 c0 /= sum;
1424 c1 /= sum;
1425 c2 /= sum;
1426 // compute kernel values for border pixels
1427 double b1 = exp (-1.0 / (2.0 * sigma * sigma));
1428 double bsum = 2.0 * b1 + 1.0;
1429 b1 /= bsum;
1430 double b0 = 1.0 / bsum;
1431
1432 switch (gausstype) {
1433 case GAUSS_MULT :
1434 gauss3x3mult<T> (src, dst, W, H, c0, c1, c2, b0, b1);
1435 break;
1436
1437 case GAUSS_DIV :
1438 gauss3x3div<T> (src, dst, buffer2, W, H, c0, c1, c2, b0, b1);
1439 break;
1440
1441 case GAUSS_STANDARD :
1442 gauss3x3<T> (src, dst, W, H, c0, c1, c2, b0, b1);
1443 break;
1444 }
1445 } else {
1446 // compute kernel values for separated 3x3 gaussian blur
1447 double c1 = exp (-1.0 / (2.0 * sigma * sigma));
1448 double csum = 2.0 * c1 + 1.0;
1449 c1 /= csum;
1450 double c0 = 1.0 / csum;
1451 gaussHorizontal3<T> (src, dst, W, H, c0, c1);
1452 gaussVertical3<T> (dst, dst, W, H, c0, c1);
1453 }
1454 } else {
1455 #ifdef __SSE2__
1456
1457 if (sigma < GAUSS_DOUBLE) {
1458 switch (gausstype) {
1459 case GAUSS_MULT : {
1460 if (sigma <= GAUSS_5X5_LIMIT && src != dst) {
1461 gauss5x5mult(src, dst, W, H, sigma);
1462 } else if (sigma <= GAUSS_7X7_LIMIT && src != dst) {
1463 gauss7x7mult(src, dst, W, H, sigma);
1464 } else {
1465 gaussHorizontalSse<T> (src, src, W, H, sigma);
1466 gaussVerticalSsemult<T> (src, dst, W, H, sigma);
1467 }
1468 break;
1469 }
1470
1471 case GAUSS_DIV : {
1472 if (sigma <= GAUSS_5X5_LIMIT && src != dst) {
1473 gauss5x5div (src, dst, buffer2, W, H, sigma);
1474 } else if (sigma <= GAUSS_7X7_LIMIT && src != dst) {
1475 gauss7x7div (src, dst, buffer2, W, H, sigma);
1476 } else {
1477 gaussHorizontalSse<T> (src, dst, W, H, sigma);
1478 gaussVerticalSsediv<T> (dst, dst, buffer2, W, H, sigma);
1479 }
1480 break;
1481 }
1482
1483 case GAUSS_STANDARD : {
1484 gaussHorizontalSse<T> (src, dst, W, H, sigma);
1485 gaussVerticalSse<T> (dst, dst, W, H, sigma);
1486 break;
1487 }
1488 }
1489 } else { // large sigma only with double precision
1490 gaussHorizontal<T> (src, dst, W, H, sigma);
1491 gaussVertical<T> (dst, dst, W, H, sigma);
1492 }
1493
1494 #else
1495
1496 if (sigma < GAUSS_DOUBLE) {
1497 switch (gausstype) {
1498 case GAUSS_MULT : {
1499 if (sigma <= GAUSS_5X5_LIMIT && src != dst) {
1500 gauss5x5mult(src, dst, W, H, sigma);
1501 } else if (sigma <= GAUSS_7X7_LIMIT && src != dst) {
1502 gauss7x7mult(src, dst, W, H, sigma);
1503 } else {
1504 gaussHorizontal<T> (src, src, W, H, sigma);
1505 gaussVerticalmult<T> (src, dst, W, H, sigma);
1506 }
1507 break;
1508 }
1509
1510 case GAUSS_DIV : {
1511 if (sigma <= GAUSS_5X5_LIMIT && src != dst) {
1512 gauss5x5div (src, dst, buffer2, W, H, sigma);
1513 } else if (sigma <= GAUSS_7X7_LIMIT && src != dst) {
1514 gauss7x7div (src, dst, buffer2, W, H, sigma);
1515 } else {
1516 gaussHorizontal<T> (src, dst, W, H, sigma);
1517 gaussVerticaldiv<T> (dst, dst, buffer2, W, H, sigma);
1518 }
1519 break;
1520 }
1521
1522 case GAUSS_STANDARD : {
1523 gaussHorizontal<T> (src, dst, W, H, sigma);
1524 gaussVertical<T> (dst, dst, W, H, sigma);
1525 break;
1526 }
1527 }
1528 } else { // large sigma only with double precision
1529 gaussHorizontal<T> (src, dst, W, H, sigma);
1530 gaussVertical<T> (dst, dst, W, H, sigma);
1531 }
1532
1533 #endif
1534 }
1535 }
1536 }
1537 }
1538
gaussianBlur(float ** src,float ** dst,const int W,const int H,const double sigma,bool useBoxBlur,eGaussType gausstype,float ** buffer2)1539 void gaussianBlur(float** src, float** dst, const int W, const int H, const double sigma, bool useBoxBlur, eGaussType gausstype, float** buffer2)
1540 {
1541 gaussianBlurImpl<float>(src, dst, W, H, sigma, useBoxBlur, gausstype, buffer2);
1542 }
1543
1544