1 #include "img_conv.h"
2
3 #ifdef __cplusplus
4 extern "C" {
5 #endif
6
7 #define var (( PImage) self)
8 #define my ((( PImage) self)-> self)
9
10 #define BS_BYTEIMPACT_ROP(type,op,expr) \
11 void bs_##type##_##op( \
12 type * src_data, type * dst_data, \
13 int w, int x, int absx, long step \
14 ){ \
15 Fixed count = {0}; \
16 int last = 0; \
17 int i, k; \
18 int j = ( x == absx) ? 0 : ( absx - 1); \
19 int inc = ( x == absx) ? 1 : -1; \
20 dst_data[k=j] = src_data[0]; \
21 j += inc; \
22 for ( i = 0; i < w; i++) { \
23 if ( count.i.i > last) { \
24 dst_data[k=j] = src_data[i]; \
25 j += inc; \
26 last = count.i.i; \
27 } \
28 count.l += step; \
29 expr; \
30 } \
31 }
32
33 #define BS_BYTEIMPACT(type) \
34 BS_BYTEIMPACT_ROP(type,in,)
35
36 #define BS_BYTEIMPACT_INT(type) \
37 BS_BYTEIMPACT_ROP(type,in,) \
38 BS_BYTEIMPACT_ROP(type,and,dst_data[k]&=src_data[i]) \
39 BS_BYTEIMPACT_ROP(type,or,dst_data[k]|=src_data[i])
40
41 #define BS_BYTEEXPAND( type) \
42 void bs_##type##_out( \
43 type * src_data, type * dst_data, \
44 int w, int x, int absx, long step \
45 ){ \
46 Fixed count = {0}; \
47 int i; \
48 int j = ( x == absx) ? 0 : ( absx - 1); \
49 int inc = ( x == absx) ? 1 : -1; \
50 int last = 0; \
51 for ( i = 0; i < absx; i++) { \
52 if ( count.i.i > last) { \
53 last = count.i.i; \
54 src_data++; \
55 } \
56 count.l += step; \
57 dst_data[j] = *src_data; \
58 j += inc; \
59 } \
60 }
61
62 BS_BYTEEXPAND( uint8_t)
BS_BYTEEXPAND(int16_t)63 BS_BYTEEXPAND( int16_t)
64 BS_BYTEEXPAND( RGBColor)
65 BS_BYTEEXPAND( int32_t)
66 BS_BYTEEXPAND( float)
67 BS_BYTEEXPAND( double)
68 BS_BYTEEXPAND( Complex)
69 BS_BYTEEXPAND( DComplex)
70
71 BS_BYTEIMPACT_INT( uint8_t)
72 BS_BYTEIMPACT_INT( int16_t)
73 BS_BYTEIMPACT_INT( int32_t)
74 BS_BYTEIMPACT( RGBColor)
75 BS_BYTEIMPACT_ROP(RGBColor,and,dst_data[k].r&=src_data[i].r;dst_data[k].g&=src_data[i].g;dst_data[k].b&=src_data[i].b)
76 BS_BYTEIMPACT_ROP(RGBColor,or, dst_data[k].r|=src_data[i].r;dst_data[k].g|=src_data[i].g;dst_data[k].b|=src_data[i].b)
77 BS_BYTEIMPACT( float)
78 BS_BYTEIMPACT( double)
79 BS_BYTEIMPACT( Complex)
80 BS_BYTEIMPACT( DComplex)
81
82
83 void
84 bs_mono_in( uint8_t * src_data, uint8_t * dst_data, int w, int x, int absx, long step)
85 {
86 Fixed count = {0};
87 int last = 0;
88 register int i, j;
89 register U16 xd, xs;
90
91 if ( x == absx) {
92 xd = (( xs = src_data[0]) >> 7) & 1;
93 j = 1;
94 for ( i = 0; i < w; i++) {
95 if (( i & 7) == 0) xs = src_data[ i >> 3];
96 xs <<= 1;
97 if ( count.i.i > last) {
98 if (( j & 7) == 0) dst_data[ ( j - 1) >> 3] = xd;
99 xd <<= 1;
100 xd |= ( xs >> 8) & 1;
101 j++;
102 last = count.i.i;
103 }
104 count.l += step;
105 }
106 i = j & 7;
107 dst_data[( j - 1) >> 3] = xd << ( i ? ( 8 - i) : 0);
108 } else {
109 j = absx - 1;
110 xd = ( xs = src_data[ j >> 3]) & 0x80;
111 for ( i = 0; i < w; i++) {
112 if (( i & 7) == 0) xs = src_data[ i >> 3];
113 xs <<= 1;
114 if ( count.i.i > last) {
115 if (( j & 7) == 0) dst_data[ ( j + 1) >> 3] = xd;
116 xd >>= 1;
117 xd |= ( xs >> 1) & 0x80;
118 j--;
119 last = count.i.i;
120 }
121 count.l += step;
122 }
123 dst_data[( j + 1) >> 3] = xd;
124 }
125 }
126
127 #define BIT(x) (1 << (7 - (x & 7)))
128
129 void
bs_mono_and(uint8_t * src_data,uint8_t * dst_data,int w,int x,int absx,long step)130 bs_mono_and( uint8_t * src_data, uint8_t * dst_data, int w, int x, int absx, long step)
131 {
132 Fixed count = {0};
133 int last = 0;
134 int i, k;
135 int j = (x == absx) ? 0 : (absx - 1);
136 int inc = (x == absx) ? 1 : -1;
137 k = j;
138 dst_data[j >> 3] = src_data[0] & 0x80;
139 j += inc;
140 for ( i = 0; i < w; i++) {
141 if ( count.i.i > last) {
142 k = j;
143 if ( src_data[i >> 3] & BIT(i))
144 dst_data[j >> 3] |= BIT(j);
145 else
146 dst_data[j >> 3] &= ~BIT(j);
147 j += inc;
148 last = count.i.i;
149 } else if (( src_data[i >> 3] & BIT(i)) == 0)
150 dst_data[k >> 3] &= ~BIT(k);
151 count.l += step;
152 }
153 }
154
155 void
bs_mono_or(uint8_t * src_data,uint8_t * dst_data,int w,int x,int absx,long step)156 bs_mono_or( uint8_t * src_data, uint8_t * dst_data, int w, int x, int absx, long step)
157 {
158 Fixed count = {0};
159 int last = 0;
160 int i, k;
161 int j = (x == absx) ? 0 : (absx - 1);
162 int inc = (x == absx) ? 1 : -1;
163 k = j;
164 dst_data[j >> 3] = src_data[0] & 0x80;
165 j += inc;
166 for ( i = 0; i < w; i++) {
167 if ( count.i.i > last) {
168 k = j;
169 if ( src_data[i >> 3] & BIT(i))
170 dst_data[j >> 3] |= BIT(j);
171 else
172 dst_data[j >> 3] &= ~BIT(j);
173 j += inc;
174 last = count.i.i;
175 } else if ( src_data[i >> 3] & BIT(i))
176 dst_data[k >> 3] |= BIT(k);
177 count.l += step;
178 }
179 }
180
181 #undef BIT
182
183 void
bs_mono_out(uint8_t * src_data,uint8_t * dst_data,int w,int x,int absx,long step)184 bs_mono_out( uint8_t * src_data, uint8_t * dst_data, int w, int x, int absx, long step)
185 {
186 Fixed count = {step/2};
187 register int i, j = 0;
188 register U16 xd = 0, xs;
189 int last = 0;
190
191 if ( x == absx) {
192 xs = src_data[0];
193 for ( i = 0; i < absx; i++) {
194 if ( count.i.i > last) {
195 last = count.i.i;
196 xs <<= 1;
197 j++;
198 if (( j & 7) == 0) xs = src_data[ j >> 3];
199 }
200 count.l += step;
201 xd <<= 1;
202 xd |= ( xs >> 7) & 1;
203 if ((( i + 1) & 7) == 0) dst_data[ i >> 3] = xd;
204 }
205 if ( i & 7) dst_data[ i >> 3] = xd << ( 8 - ( i & 7));
206 /* dst_data[ i >> 3] = xd << (( i & 7) ? ( 8 - ( i & 7)) : 0); */
207 } else {
208 register int k = absx;
209 xs = src_data[ j >> 3];
210 for ( i = 0; i < absx; i++) {
211 if ( count.i.i > last) {
212 last = count.i.i;
213 xs <<= 1;
214 j++;
215 if (( j & 7) == 0) xs = src_data[ j >> 3];
216 }
217 count.l += step;
218 xd >>= 1;
219 xd |= xs & 0x80;
220 k--;
221 if (( k & 7) == 0) dst_data[( k + 1) >> 3] = xd;
222 }
223 dst_data[( k + 0) >> 3] = xd;
224 }
225 }
226
227 /* nibble stretching functions are requiring *dst_data filled with zeros */
228
bs_nibble_in(uint8_t * src_data,uint8_t * dst_data,int w,int x,int absx,long step)229 void bs_nibble_in( uint8_t * src_data, uint8_t * dst_data, int w, int x, int absx, long step)
230 {
231 Fixed count = {0};
232 int last = 0;
233 int i;
234 int j = (x == absx) ? 0 : ( absx - 1);
235 int inc = (x == absx) ? 1 : -1;
236 dst_data[ j >> 1] |= ( j & 1) ? ( src_data[ 0] >> 4) : ( src_data[ 0] & 0xF0);
237 j += inc;
238 for ( i = 0; i < w; i++) {
239 if ( count.i.i > last) {
240 if ( i & 1)
241 dst_data[ j >> 1] |= ( j & 1) ? ( src_data[ i >> 1] & 0x0F) : ( src_data[ i >> 1] << 4);
242 else
243 dst_data[ j >> 1] |= ( j & 1) ? ( src_data[ i >> 1] >> 4) : ( src_data[ i >> 1] & 0xF0);
244 j += inc;
245 last = count.i.i;
246 }
247 count.l += step;
248 }
249 }
250
bs_nibble_and(uint8_t * src_data,uint8_t * dst_data,int w,int x,int absx,long step)251 void bs_nibble_and( uint8_t * src_data, uint8_t * dst_data, int w, int x, int absx, long step)
252 {
253 Fixed count = {0};
254 int last = 0;
255 int i, k;
256 int j = (x == absx) ? 0 : ( absx - 1);
257 int inc = (x == absx) ? 1 : -1;
258 k = j;
259 dst_data[ j >> 1] |= ( j & 1) ? ( src_data[ 0] >> 4) : ( src_data[ 0] & 0xF0);
260 j += inc;
261 for ( i = 0; i < w; i++) {
262 if ( count.i.i > last) {
263 k = j;
264 if ( i & 1)
265 dst_data[ j >> 1] |= ( j & 1) ? ( src_data[ i >> 1] & 0x0F) : ( src_data[ i >> 1] << 4);
266 else
267 dst_data[ j >> 1] |= ( j & 1) ? ( src_data[ i >> 1] >> 4) : ( src_data[ i >> 1] & 0xF0);
268 j += inc;
269 last = count.i.i;
270 } else {
271 if ( i & 1)
272 dst_data[ k >> 1] &= ( k & 1) ? ((src_data[ i >> 1] & 0x0F) | 0xF0) : ((src_data[ i >> 1] << 4) | 0x0F);
273 else
274 dst_data[ k >> 1] &= ( k & 1) ? ((src_data[ i >> 1] >> 4) | 0xF0) : (( src_data[ i >> 1] & 0xF0) | 0x0F);
275 }
276 count.l += step;
277 }
278 }
279
bs_nibble_or(uint8_t * src_data,uint8_t * dst_data,int w,int x,int absx,long step)280 void bs_nibble_or( uint8_t * src_data, uint8_t * dst_data, int w, int x, int absx, long step)
281 {
282 Fixed count = {0};
283 int last = 0;
284 int i, k;
285 int j = (x == absx) ? 0 : ( absx - 1);
286 int inc = (x == absx) ? 1 : -1;
287 k = j;
288 dst_data[ j >> 1] |= ( j & 1) ? ( src_data[ 0] >> 4) : ( src_data[ 0] & 0xF0);
289 j += inc;
290 for ( i = 0; i < w; i++) {
291 if ( count.i.i > last) {
292 k = j;
293 if ( i & 1)
294 dst_data[ j >> 1] |= ( j & 1) ? ( src_data[ i >> 1] & 0x0F) : ( src_data[ i >> 1] << 4);
295 else
296 dst_data[ j >> 1] |= ( j & 1) ? ( src_data[ i >> 1] >> 4) : ( src_data[ i >> 1] & 0xF0);
297 j += inc;
298 last = count.i.i;
299 } else {
300 if ( i & 1)
301 dst_data[ k >> 1] |= ( k & 1) ? ( src_data[ i >> 1] & 0x0F) : ( src_data[ i >> 1] << 4);
302 else
303 dst_data[ k >> 1] |= ( k & 1) ? ( src_data[ i >> 1] >> 4) : ( src_data[ i >> 1] & 0xF0);
304 }
305 count.l += step;
306 }
307 }
308
bs_nibble_out(uint8_t * src_data,uint8_t * dst_data,int w,int x,int absx,long step)309 void bs_nibble_out( uint8_t * src_data, uint8_t * dst_data, int w, int x, int absx, long step)
310 {
311 Fixed count = {0};
312 int i, k = 0;
313 int j = (x == absx) ? 0 : ( absx - 1);
314 int inc = (x == absx) ? 1 : -1;
315 int last = 0;
316 for ( i = 0; i < absx; i++) {
317 if ( count.i.i > last) {
318 if (( k & 1) == 1) src_data++;
319 k++;
320 last = count.i.i;
321 }
322 count.l += step;
323 if ( k & 1)
324 dst_data[ j >> 1] |= ( j & 1) ? ( *src_data & 0x0F) : ( *src_data << 4);
325 else
326 dst_data[ j >> 1] |= ( j & 1) ? ( *src_data >> 4) : ( *src_data & 0xF0);
327 j += inc;
328 }
329 }
330
331 #define STEP(x) ((double)x * (double)(UINT16_PRECISION))
332
333 typedef struct {
334 int type;
335 PStretchProc in, and, or, out;
336 } stretch_t;
337
338 static stretch_t stretch_procs[] = {
339 #define SDECL(a,b) ((PStretchProc)bs_##a##_##b)
340 #define STRETCH_STD(type) SDECL(type,in),SDECL(type,in), SDECL(type,in),SDECL(type,out)
341 #define STRETCH_INT(type) SDECL(type,in),SDECL(type,and),SDECL(type,or),SDECL(type,out)
342 {imMono, STRETCH_INT(mono) },
343 {imBW, STRETCH_INT(mono) },
344 {imNibble, STRETCH_INT(nibble) },
345 {imNibble|imGrayScale, STRETCH_INT(nibble) },
346 {imByte, STRETCH_INT(uint8_t) },
347 {im256, STRETCH_INT(uint8_t) },
348 {imRGB, STRETCH_INT(RGBColor) },
349 {imRGB|imGrayScale, STRETCH_INT(RGBColor) },
350 {imShort, STRETCH_INT(int16_t) },
351 {imLong, STRETCH_INT(int32_t) },
352 {imFloat, STRETCH_STD(float) },
353 {imDouble, STRETCH_STD(double) },
354 {imComplex, STRETCH_STD(Complex) },
355 {imDComplex, STRETCH_STD(DComplex) },
356 {imTrigComplex, STRETCH_STD(Complex) },
357 {imTrigDComplex, STRETCH_STD(DComplex) }
358 #undef STRETCH_STD
359 #undef STRETCH_INT
360 #undef SDECL
361 };
362
363 typedef void BitBltProc( Byte * src, Byte * dst, int count);
364 typedef BitBltProc *PBitBltProc;
365
366 static void
bitblt_or(Byte * src,Byte * dst,int count)367 bitblt_or( Byte * src, Byte * dst, int count)
368 {
369 while ( count--) *(dst++) |= *(src++);
370 }
371
372 static void
bitblt_and(Byte * src,Byte * dst,int count)373 bitblt_and( Byte * src, Byte * dst, int count)
374 {
375 while ( count--) *(dst++) &= *(src++);
376 }
377
378 Bool
ic_stretch_box(int type,Byte * src_data,int src_w,int src_h,Byte * dst_data,int w,int h,int scaling,char * error)379 ic_stretch_box( int type, Byte * src_data, int src_w, int src_h, Byte * dst_data, int w, int h, int scaling, char * error)
380 {
381 int abs_h = h < 0 ? -h : h;
382 int abs_w = w < 0 ? -w : w;
383 int src_line = LINE_SIZE(src_w, type);
384 int dst_line = LINE_SIZE(abs_w, type);
385 Bool x_stretch, y_stretch;
386
387 Fixed xstep, ystep, count;
388 int last = 0;
389 int i;
390 int y_min = ( src_h > abs_h) ? abs_h : src_h;
391 PStretchProc xproc = NULL;
392 PBitBltProc yproc = NULL;
393 Byte *src_last = NULL, impl_buf[1024];
394 semistatic_t pimpl_buf;
395
396 if ( scaling < istAND ) {
397 x_stretch = scaling & istBoxX;
398 y_stretch = scaling & istBoxY;
399 } else
400 x_stretch = y_stretch = 1;
401 if ( w == src_w) x_stretch = false;
402 if ( h == src_h) y_stretch = false;
403
404 /* transfer */
405 if ( !x_stretch && !y_stretch && ( w > 0)) {
406 int y;
407 int x_min = (( type & imBPP) < 8) ?
408 ( src_line > dst_line) ? dst_line : src_line :
409 (((( src_w > abs_w) ? abs_w : src_w) * ( type & imBPP)) / 8);
410 if ( src_w < w || src_h < abs_h) memset( dst_data, 0, dst_line * abs_h);
411 if ( h < 0)
412 {
413 dst_data += dst_line * ( y_min - 1);
414 dst_line = -dst_line;
415 }
416 for ( y = 0; y < y_min; y++, src_data += src_line, dst_data += dst_line)
417 memcpy( dst_data, src_data, x_min);
418 return true;
419 }
420
421 /* define processors */
422 if ( y_stretch && abs_h < src_h && (scaling == istAND || scaling == istOR))
423 yproc = (scaling == istAND) ? bitblt_and : bitblt_or;
424 for ( i = 0; i < sizeof(stretch_procs)/sizeof(stretch_t); i++) {
425 if ( stretch_procs[i].type != type ) continue;
426 if ( src_w > abs_w) {
427 if ( scaling == istAND )
428 xproc = stretch_procs[i].and;
429 else if ( scaling == istOR )
430 xproc = stretch_procs[i].or;
431 else
432 xproc = stretch_procs[i].in;
433 if ( xproc == stretch_procs[i].in )
434 yproc = NULL;
435 } else
436 xproc = stretch_procs[i].out;
437 break;
438 }
439 if ( xproc == NULL ) {
440 strncpy(error, "Cannot stretch this image type", 255);
441 return false;
442 }
443
444
445 /* y-only stretch case */
446 if ( !x_stretch && y_stretch && ( w > 0)) {
447 int x_min = (( type & imBPP) < 8) ?
448 ( src_line > dst_line) ? dst_line : src_line :
449 (((( src_w > abs_w) ? abs_w : src_w) * ( type & imBPP)) / 8);
450 Byte *last_dst_data;
451 count.l = 0;
452 if ( src_w < w) memset( dst_data, 0, dst_line * abs_h);
453 if ( h < 0) {
454 dst_data += dst_line * ( abs_h - 1);
455 dst_line = -dst_line;
456 }
457 if ( abs_h < src_h) {
458 ystep.l = STEP( abs_h / src_h );
459 memcpy( last_dst_data = dst_data, src_data, x_min);
460 dst_data += dst_line;
461 for ( i = 0; i < src_h; i++) {
462 if ( count.i.i > last) {
463 memcpy( last_dst_data = dst_data, src_data, x_min);
464 dst_data += dst_line;
465 last = count.i.i;
466 } else if ( yproc && i > 0 )
467 yproc( src_data, last_dst_data, x_min );
468 count.l += ystep.l;
469 src_data += src_line;
470 }
471 } else {
472 ystep.l = STEP( src_h / abs_h);
473 for ( i = 0; i < abs_h; i++) {
474 if ( count.i.i > last) {
475 src_data += src_line;
476 last = count.i.i;
477 }
478 count.l += ystep.l;
479 memcpy( dst_data, src_data, x_min);
480 dst_data += dst_line;
481 }
482 }
483 return true;
484 }
485
486 /* general actions for x-scaling */
487 count.l = 0;
488 if ( src_w < abs_w || src_h < abs_h || ( type & imBPP) == imNibble)
489 memset( dst_data, 0, dst_line * abs_h);
490 if ( abs_w < src_w)
491 xstep. l = STEP( abs_w / src_w);
492 else
493 xstep. l = STEP( src_w / abs_w);
494
495 /* no vertical stretch case */
496 if ( !y_stretch || ( src_h == -h)) {
497 if ( h < 0) {
498 dst_data += dst_line * ( y_min - 1);
499 dst_line = -dst_line;
500 }
501 for ( i = 0; i < y_min; i++, src_data += src_line, dst_data += dst_line)
502 xproc( src_data, dst_data, src_w, w, abs_w, xstep.l);
503 return true;
504 }
505
506 /* general case */
507 if ( yproc ) {
508 semistatic_init(&pimpl_buf, &impl_buf, 1, 1024);
509 if ( !semistatic_expand(&pimpl_buf, dst_line)) {
510 strncpy(error, "Not enough memory", 255);
511 return false;
512 }
513 }
514
515 if ( h < 0) {
516 dst_data += dst_line * ( abs_h - 1);
517 dst_line = -dst_line;
518 }
519 if ( abs_h < src_h) {
520 int j = 0;
521 Byte * last_dst_data;
522 ystep.l = STEP( abs_h / src_h);
523 xproc( src_data, last_dst_data = dst_data, src_w, w, abs_w, xstep.l);
524 dst_data += dst_line;
525 j++;
526 for ( i = 0; i < src_h; i++) {
527 if ( count.i.i > last) {
528 xproc( src_data, last_dst_data = dst_data, src_w, w, abs_w, xstep.l);
529 dst_data += dst_line;
530 last = count.i.i;
531 j++;
532 } else if ( yproc && i > 0 ) {
533 bzero(pimpl_buf.heap, dst_line);
534 xproc( src_data , pimpl_buf.heap, src_w, w, abs_w, xstep.l);
535 yproc( pimpl_buf.heap, last_dst_data, dst_line);
536 }
537 count.l += ystep.l;
538 src_data += src_line;
539 }
540 } else {
541 ystep.l = STEP( src_h / abs_h);
542 for ( i = 0; i < abs_h; i++) {
543 if ( count.i.i > last) {
544 src_data += src_line;
545 last = count.i.i;
546 }
547 count.l += ystep.l;
548 if ( src_last == src_data) {
549 memcpy( dst_data, dst_data - dst_line, dst_line < 0 ? -dst_line : dst_line);
550 } else {
551 xproc( src_data, dst_data, src_w, w, abs_w, xstep.l);
552 src_last = src_data;
553 }
554 dst_data += dst_line;
555 }
556 }
557
558 if ( yproc )
559 semistatic_done(&pimpl_buf);
560
561 return true;
562 }
563
564 /* Resizing with filters - stolen from ImageMagick MagickCore/resize.c */
565
566 #define PI 3.14159265358979323846264338327950288419716939937510
567 #define PI2 1.57079632679489661923132169163975144209858469968755
568
569 static double
filter_sinc_fast(const double x)570 filter_sinc_fast(const double x)
571 {
572 /*
573 Approximations of the sinc function sin(pi x)/(pi x) over the interval
574 [-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding
575 from the Natural Sciences and Engineering Research Council of Canada.
576
577 Although the approximations are polynomials (for low order of
578 approximation) and quotients of polynomials (for higher order of
579 approximation) and consequently are similar in form to Taylor polynomials /
580 Pade approximants, the approximations are computed with a completely
581 different technique.
582
583 Summary: These approximations are "the best" in terms of bang (accuracy)
584 for the buck (flops). More specifically: Among the polynomial quotients
585 that can be computed using a fixed number of flops (with a given "+ - * /
586 budget"), the chosen polynomial quotient is the one closest to the
587 approximated function with respect to maximum absolute relative error over
588 the given interval.
589
590 The Remez algorithm, as implemented in the boost library's minimax package,
591 is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/
592 math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html
593
594 If outside of the interval of approximation, use the standard trig formula.
595 */
596 if (x > 4.0)
597 {
598 const double alpha=(double) (PI*x);
599 return(sin((double) alpha)/alpha);
600 }
601 {
602 /*
603 The approximations only depend on x^2 (sinc is an even function).
604 */
605 const double xx = x*x;
606 /*
607 Maximum absolute relative error 6.3e-6 < 1/2^17.
608 */
609 const double c0 = 0.173610016489197553621906385078711564924e-2L;
610 const double c1 = -0.384186115075660162081071290162149315834e-3L;
611 const double c2 = 0.393684603287860108352720146121813443561e-4L;
612 const double c3 = -0.248947210682259168029030370205389323899e-5L;
613 const double c4 = 0.107791837839662283066379987646635416692e-6L;
614 const double c5 = -0.324874073895735800961260474028013982211e-8L;
615 const double c6 = 0.628155216606695311524920882748052490116e-10L;
616 const double c7 = -0.586110644039348333520104379959307242711e-12L;
617 const double p =
618 c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
619 return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
620 }
621 }
622
filter_triangle(const double x)623 static double filter_triangle(const double x)
624 {
625 /*
626 1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or
627 a Bartlett 2D Cone filter.
628 */
629 if (x < 1.0)
630 return(1.0-x);
631 return(0.0);
632 }
633
634
635 static double
filter_quadratic(const double x)636 filter_quadratic(const double x)
637 {
638 /*
639 2rd order (quadratic) B-Spline approximation of Gaussian.
640 */
641 if (x < 0.5)
642 return (0.75-x*x);
643 if (x < 1.5)
644 return (0.5*(x-1.5)*(x-1.5));
645 return(0.0);
646 }
647
648 /*
649 Cubic Filters using B,C determined values:
650 Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter
651 Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears
652 Spline B = 1 C = 0 B-Spline Gaussian approximation
653 Hermite B = 0 C = 0 B-Spline interpolator
654
655 See paper by Mitchell and Netravali, Reconstruction Filters in Computer
656 Graphics Computer Graphics, Volume 22, Number 4, August 1988
657 http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/
658 Mitchell.pdf.
659
660 Coefficents are determined from B,C values:
661 P0 = ( 6 - 2*B )/6 = coeff[0]
662 P1 = 0
663 P2 = (-18 +12*B + 6*C )/6 = coeff[1]
664 P3 = ( 12 - 9*B - 6*C )/6 = coeff[2]
665 Q0 = ( 8*B +24*C )/6 = coeff[3]
666 Q1 = ( -12*B -48*C )/6 = coeff[4]
667 Q2 = ( 6*B +30*C )/6 = coeff[5]
668 Q3 = ( - 1*B - 6*C )/6 = coeff[6]
669
670 which are used to define the filter:
671
672 P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1
673 Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2
674
675 which ensures function is continuous in value and derivative (slope).
676 */
677 static double
filter_cubic_spline0(const double x)678 filter_cubic_spline0(const double x)
679 {
680 if (x < 1.0) return 1+x*(x*(-3.0+x*2));
681 return 0.0;
682 }
683
684 static double
filter_cubic_spline1(const double x)685 filter_cubic_spline1(const double x)
686 {
687 if (x < 1.0)
688 return(2.0/3.0+x*(x*(-1.0+x/2.0)));
689 if (x < 2.0)
690 return(4.0/3.0+x*(-2.0+x*(1.0-x/6.0)));
691 return(0.0);
692 }
693
694 static double
filter_gaussian(const double x)695 filter_gaussian(const double x)
696 {
697 /* Gaussian with a sigma = 1/2 */
698 return exp((double)(x*x*-2.0));
699 }
700
701
702 FilterRec ist_filters[] = {
703 { istTriangle, filter_triangle, 1.0 },
704 { istQuadratic, filter_quadratic, 1.5 },
705 { istSinc, filter_sinc_fast, 4.0 },
706 { istHermite, filter_cubic_spline0, 1.0 },
707 { istCubic, filter_cubic_spline1, 2.0 },
708 { istGaussian, filter_gaussian, 2.0 },
709 { 0, NULL, 0.0 }
710 };
711
712 static int
fill_contributions(FilterRec * filter,double * contributions,int * start,int offset,double factor,int max,double support,Bool as_fixed)713 fill_contributions( FilterRec * filter, double * contributions, int * start, int offset, double factor, int max, double support, Bool as_fixed )
714 {
715 double bisect, density;
716 int n, stop;
717
718 bisect = (double) (offset + 0.5) / factor;
719 *start = bisect - support + 0.5;
720 if ( *start < 0 ) *start = 0;
721 stop = bisect + support + 0.5;
722 if ( stop > max ) stop = max;
723
724 density = 0.0;
725 for (n = 0; n < (stop-*start); n++) {
726 contributions[n] = filter->filter(fabs((double) (*start+n)-bisect+0.5));
727 density += contributions[n];
728 }
729
730 if ( density != 0.0 && density != 1.0 ) {
731 int i;
732 for ( i = 0; i < n; i++) contributions[i] /= density;
733 }
734
735 if ( as_fixed && n > 0 ) {
736 int i = (sizeof(Fixed) > sizeof(double)) ? (n - 1) : 0;
737 int to = (sizeof(Fixed) > sizeof(double)) ? -1 : n;
738 int incr = (sizeof(Fixed) > sizeof(double)) ? -1 : 1;
739 for ( ; i != to; i += incr )
740 ((Fixed*)(contributions))[i].l = contributions[i] * 65536.0 + .5;
741 }
742
743 return n;
744 }
745
746 #define STRETCH_HORIZONTAL_OPEN(type) \
747 static void \
748 stretch_horizontal_##type( \
749 FilterRec * filter, double scale, double * contribution_storage, double support, \
750 int channels, Byte * src_data, int src_w, int src_h, \
751 Byte * dst_data, int dst_w, int dst_h, double x_factor, int contribution_chunk \
752 ) { \
753 int x, src_line_size, dst_line_size; \
754 src_line_size = LINE_SIZE(src_w * channels, 8*sizeof(type)); \
755 dst_line_size = LINE_SIZE(dst_w * channels, 8*sizeof(type));\
756 if ( src_w == dst_w && src_h == dst_h ) { \
757 memcpy( dst_data, src_data, dst_line_size * dst_h);\
758 return;\
759 }
760
761 #define STRETCH_HORIZONTAL_LOOP(type,pixel_init,pixel_access,as_fixed,contrib,roundoff) \
762 for (x = 0; x < dst_w; x++) { \
763 double * contributions = (double*) (((Byte*)contribution_storage) + contribution_chunk * OMP_THREAD_NUM); \
764 int y, c, start, n = fill_contributions( filter, contributions, &start, x, x_factor, src_w, support, as_fixed ); \
765 Byte * src = src_data + start * channels * sizeof(type); \
766 Byte * dst = dst_data + x * channels * sizeof(type); \
767 for ( c = 0; c < channels; c++, src += sizeof(type), dst += sizeof(type)) { \
768 Byte *src_y = src, *dst_y = dst; \
769 for ( y = 0; y < dst_h; y++, src_y += src_line_size, dst_y += dst_line_size ) { \
770 register int j; \
771 pixel_init;\
772 register type *src_j = (type*)src_y; \
773 for ( j = 0; j < n; j++, src_j += channels) \
774 pixel_access += (contrib * *src_j roundoff);
775
776 #define STRETCH_PUTBACK(type) \
777 *((type*)(dst_y))
778 #define STRETCH_HORIZONTAL_CLOSE(type) STRETCH_PUTBACK(type) = pixel; }}}}
779
780 #define STRETCH_VERTICAL_OPEN(type) \
781 static void \
782 stretch_vertical_##type( \
783 FilterRec * filter, double scale, double * contribution_storage, double support, \
784 Byte * src_data, int src_w, int src_h, \
785 Byte * dst_data, int dst_w, int dst_h, double y_factor, int contribution_chunk \
786 ) { \
787 int y, src_line_size, dst_line_size; \
788 src_line_size = LINE_SIZE(src_w, 8*sizeof(type)); \
789 dst_line_size = LINE_SIZE(dst_w, 8*sizeof(type));\
790 if ( src_w == dst_w && src_h == dst_h ) { \
791 memcpy( dst_data, src_data, dst_line_size * dst_h);\
792 return;\
793 }
794
795 #define STRETCH_VERTICAL_LOOP(type,pixel_init,pixel_access,as_fixed,contrib,roundoff) \
796 for ( y = 0; y < dst_h; y++) { \
797 Byte *src_y, *dst_y; \
798 double * contributions = (double*) (((Byte*)contribution_storage) + contribution_chunk * OMP_THREAD_NUM); \
799 int x, start, n = fill_contributions( filter, contributions, &start, y, y_factor, src_h, support, as_fixed ); \
800 src_y = src_data + start * src_line_size; \
801 dst_y = dst_data + y * dst_line_size; \
802 for ( x = 0; x < dst_w; x++, src_y += sizeof(type), dst_y += sizeof(type)) { \
803 int j; \
804 pixel_init; \
805 Byte * src = src_y;\
806 for ( j = 0; j < n; j++, src += src_line_size) \
807 pixel_access += (contrib * *((type*)(src)) roundoff);
808
809 #define STRETCH_VERTICAL_CLOSE(type) STRETCH_PUTBACK(type) = pixel; }}}
810
811 #define STRETCH_FIXED_BYTE_CLAMP \
812 if ( pixel.i.i < 0 ) pixel.i.i = 0;\
813 if ( pixel.i.i > 255 ) pixel.i.i = 255;\
814
815 STRETCH_HORIZONTAL_OPEN(Byte)
816 #ifdef HAVE_OPENMP
817 #pragma omp parallel for
818 #endif
819 STRETCH_HORIZONTAL_LOOP(Byte,Fixed pixel = {0},pixel.l,1,((Fixed*)contributions)[j].l,)
820 STRETCH_FIXED_BYTE_CLAMP
821 STRETCH_PUTBACK(Byte)=pixel.i.i;}}}
822 }
823
824 STRETCH_VERTICAL_OPEN(Byte)
825 #ifdef HAVE_OPENMP
826 #pragma omp parallel for
827 #endif
828 STRETCH_VERTICAL_LOOP(Byte,Fixed pixel = {0},pixel.l,1,((Fixed*)contributions)[j].l,)
829 STRETCH_FIXED_BYTE_CLAMP
830 STRETCH_PUTBACK(Byte)=pixel.i.i;}}
831 }
832
833 #define STRETCH_CLAMP(min,max) \
834 if ( pixel < min ) pixel = min;\
835 if ( pixel > max ) pixel = max;
836
837 STRETCH_HORIZONTAL_OPEN(Short)
838 #ifdef HAVE_OPENMP
839 #pragma omp parallel for
840 #endif
841 STRETCH_HORIZONTAL_LOOP(Short,register long pixel = 0,pixel,0,contributions[j],+.5)
842 STRETCH_CLAMP(INT16_MIN,INT16_MAX)
843 STRETCH_HORIZONTAL_CLOSE(Short)
844
845 STRETCH_VERTICAL_OPEN(Short)
846 #ifdef HAVE_OPENMP
847 #pragma omp parallel for
848 #endif
849 STRETCH_VERTICAL_LOOP(Short,register long pixel = 0,pixel,0,contributions[j],+.5)
850 STRETCH_CLAMP(INT16_MIN,INT16_MAX)
851 STRETCH_VERTICAL_CLOSE(Short)
852
853 STRETCH_HORIZONTAL_OPEN(Long)
854 #ifdef HAVE_OPENMP
855 #pragma omp parallel for
856 #endif
857 STRETCH_HORIZONTAL_LOOP(Long,register int64_t pixel = 0,pixel,0,contributions[j],+.5)
858 STRETCH_CLAMP(INT32_MIN,INT32_MAX)
859 STRETCH_HORIZONTAL_CLOSE(Long)
860
861 STRETCH_VERTICAL_OPEN(Long)
862 #ifdef HAVE_OPENMP
863 #pragma omp parallel for
864 #endif
865 STRETCH_VERTICAL_LOOP(Long,register int64_t pixel = 0,pixel,0,contributions[j],+.5)
866 STRETCH_CLAMP(INT32_MIN,INT32_MAX)
867 STRETCH_VERTICAL_CLOSE(Long)
868
869 STRETCH_HORIZONTAL_OPEN(float)
870 #ifdef HAVE_OPENMP
871 #pragma omp parallel for
872 #endif
873 STRETCH_HORIZONTAL_LOOP(float,register double pixel = 0,pixel,0,contributions[j],)
874 STRETCH_HORIZONTAL_CLOSE(float)
875
876 STRETCH_VERTICAL_OPEN(float)
877 #ifdef HAVE_OPENMP
878 #pragma omp parallel for
879 #endif
880 STRETCH_VERTICAL_LOOP(float,register double pixel = 0,pixel,0,contributions[j],)
881 STRETCH_VERTICAL_CLOSE(float)
882
883 STRETCH_HORIZONTAL_OPEN(double)
884 #ifdef HAVE_OPENMP
885 #pragma omp parallel for
886 #endif
887 STRETCH_HORIZONTAL_LOOP(double,register double pixel = 0,pixel,0,contributions[j],)
888 STRETCH_HORIZONTAL_CLOSE(double)
889
890 STRETCH_VERTICAL_OPEN(double)
891 #ifdef HAVE_OPENMP
892 #pragma omp parallel for
893 #endif
894 STRETCH_VERTICAL_LOOP(double,register double pixel = 0,pixel,0,contributions[j],)
895 STRETCH_VERTICAL_CLOSE(double)
896
897 static Bool
898 stretch_filtered( int type, Byte * old_data, int old_w, int old_h, Byte * new_data, int w, int h, int scaling, char * error )
899 {
900 int channels, fw, fh, flw, i, support_size;
901 double factor_x, factor_y, scale_x, scale_y, *contributions, support_x, support_y ;
902 Byte * filter_data;
903 FilterRec * filter = NULL;
904
905 for ( i = 0; ; i++) {
906 if ( ist_filters[i]. id == 0 ) break;
907 if ( ist_filters[i]. id == scaling ) {
908 filter = &ist_filters[i];
909 break;
910 }
911 }
912 if ( !filter ) {
913 strncpy( error, "no appropriate scaling filter found", 255);
914 return false;
915 }
916 if ( w <= 0 || h <= 0) {
917 strncpy(error, "image dimensions must be positive", 255);
918 return false;
919 }
920
921 switch (type) {
922 case imMono:
923 case imBW:
924 case imNibble:
925 case im256:
926 case imNibble | imGrayScale:
927 strncpy(error, "type not supported", 255);
928 return false;
929 case imRGB:
930 channels = 3;
931 break;
932 case imByte:
933 channels = 1;
934 break;
935 case imComplex:
936 case imDComplex:
937 case imTrigComplex:
938 case imTrigDComplex:
939 channels = 2;
940 break;
941 default:
942 channels = 1;
943 }
944
945 /* convert to multi-channel structures */
946 if ( type == imRGB ) {
947 type = imByte;
948 w *= 3;
949 old_w *= 3;
950 }
951 if ( channels == 2 ) {
952 w *= 2;
953 old_w *= 2;
954 type = (( type & imBPP ) / 2) | imGrayScale | imRealNumber;
955 }
956
957 /* allocate space for semi-filtered and target data */
958 factor_x = (double) w / (double) old_w;
959 factor_y = (double) h / (double) old_h;
960 if (factor_x > factor_y) {
961 fw = w;
962 fh = old_h;
963 } else {
964 fw = old_w;
965 fh = h;
966 }
967 flw = LINE_SIZE( fw, type);
968
969 if ( !( filter_data = malloc( flw * fh ))) {
970 snprintf(error, 255, "not enough memory: %d bytes", flw * fh);
971 return false;
972 }
973
974 scale_x = 1.0 / factor_x;
975 if ( scale_x < 1.0 ) scale_x = 1.0;
976 scale_y = 1.0 / factor_y;
977 if ( scale_y < 1.0 ) scale_y = 1.0;
978 support_x = scale_x * filter-> support;
979 support_y = scale_y * filter-> support;
980 scale_x = 1.0 / scale_x;
981 scale_y = 1.0 / scale_y;
982 /* Support too small even for nearest neighbour: Reduce to point sampling. */
983 if (support_x < 0.5) support_x = (double) 0.5;
984 if (support_y < 0.5) support_y = (double) 0.5;
985 support_size = (int)(2.0 * (( support_x < support_y ) ? support_y : support_x) * 3.0);
986 support_size *= (sizeof(Fixed) > sizeof(double)) ? sizeof(Fixed) : sizeof(double);
987 if (!(contributions = malloc(support_size * OMP_MAX_THREADS))) {
988 free( filter_data );
989 snprintf(error, 255, "not enough memory: %d bytes", support_size);
990 return false;
991 }
992
993 /* stretch */
994 if (factor_x > factor_y) {
995 #define HORIZONTAL(type) stretch_horizontal_##type( \
996 filter, scale_x, contributions, support_x, channels, \
997 old_data, old_w / channels, old_h, filter_data, fw / channels, fh, factor_x, support_size)
998 #define VERTICAL(type) stretch_vertical_##type ( \
999 filter, scale_y, contributions, support_y, \
1000 filter_data, fw, fh, new_data, w, h, factor_y, support_size )
1001 #define HANDLE_TYPE(type,name) \
1002 case name: \
1003 HORIZONTAL(type);\
1004 VERTICAL(type);\
1005 break
1006 switch ( type ) {
1007 HANDLE_TYPE(Byte, imByte);
1008 HANDLE_TYPE(Short, imShort);
1009 HANDLE_TYPE(Long, imLong);
1010 HANDLE_TYPE(float, imFloat);
1011 HANDLE_TYPE(double, imDouble);
1012 default:
1013 croak("panic: bad image type: %x", type);
1014 }
1015 #undef HORIZONTAL
1016 #undef VERTICAL
1017 #undef HANDLE_TYPE
1018 } else {
1019 #define VERTICAL(type) stretch_vertical_##type ( \
1020 filter, scale_y, contributions, support_y, \
1021 old_data, old_w, old_h, filter_data, fw, fh, factor_y, support_size)
1022 #define HORIZONTAL(type) stretch_horizontal_##type( \
1023 filter, scale_x, contributions, support_x, channels, \
1024 filter_data, fw / channels, fh, new_data, w / channels, h, factor_x, support_size)
1025 #define HANDLE_TYPE(type,name) \
1026 case name: \
1027 VERTICAL(type);\
1028 HORIZONTAL(type);\
1029 break
1030 switch ( type ) {
1031 HANDLE_TYPE(Byte, imByte);
1032 HANDLE_TYPE(Short, imShort);
1033 HANDLE_TYPE(Long, imLong);
1034 HANDLE_TYPE(float, imFloat);
1035 HANDLE_TYPE(double, imDouble);
1036 default:
1037 croak("panic: bad image type: %x", type);
1038 }
1039 #undef HORIZONTAL
1040 #undef VERTICAL
1041 #undef HANDLE_TYPE
1042 }
1043 free( contributions );
1044 free( filter_data );
1045 return true;
1046 }
1047
1048 Bool
1049 ic_stretch_filtered( int type, Byte * old_data, int old_w, int old_h, Byte * new_data, int w, int h, int scaling, char * error )
1050 {
1051 int abs_w, abs_h;
1052 Bool mirror_x, mirror_y;
1053
1054 abs_w = abs(w);
1055 abs_h = abs(h);
1056 mirror_x = w < 0;
1057 mirror_y = h < 0;
1058
1059 /* if it's cheaper to mirror before the conversion, do it */
1060 if ( mirror_y && old_h < abs_h ) {
1061 img_mirror_raw( type, old_w, old_h, old_data, 1 );
1062 mirror_y = 0;
1063 }
1064 if ( mirror_x && old_w < abs_w) {
1065 img_mirror_raw( type, old_w, old_h, old_data, 0 );
1066 mirror_x = 0;
1067 }
1068
1069 if ( !stretch_filtered( type, old_data, old_w, old_h, new_data, w, h, scaling, error ))
1070 return false;
1071
1072 if ( mirror_x ) img_mirror_raw( type, w, h, new_data, 0 );
1073 if ( mirror_y ) img_mirror_raw( type, w, h, new_data, 1 );
1074
1075 return true;
1076 }
1077
1078 int
1079 ic_stretch_suggest_type( int type, int scaling )
1080 {
1081 if ( scaling <= istOR )
1082 return type;
1083
1084 switch (type) {
1085 case imMono:
1086 case imNibble:
1087 case im256:
1088 case imRGB:
1089 return imRGB;
1090 case imBW:
1091 case imNibble | imGrayScale:
1092 case imByte:
1093 return imByte;
1094 break;
1095 default:
1096 return type;
1097 }
1098 }
1099
1100 Bool
1101 ic_stretch( int type, Byte * src_data, int src_w, int src_h, Byte * dst_data, int w, int h, int scaling, char * error)
1102 {
1103 return ( scaling <= istOR ) ?
1104 ic_stretch_box( type, src_data, src_w, src_h, dst_data, w, h, scaling, error):
1105 ic_stretch_filtered( type, src_data, src_w, src_h, dst_data, w, h, scaling, error );
1106 }
1107
1108 #ifdef __cplusplus
1109 }
1110 #endif
1111