1 //----------------------------------------------------------------------------
2 // Anti-Grain Geometry - Version 2.4 (Public License)
3 // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
4 //
5 // Anti-Grain Geometry - Version 2.4 Release Milano 3 (AggPas 2.4 RM3)
6 // Pascal Port By: Milan Marusinec alias Milano
7 //                 milan@marusinec.sk
8 //                 http://www.aggpas.org
9 // Copyright (c) 2005-2006
10 //
11 // Permission to copy, use, modify, sell and distribute this software
12 // is granted provided this copyright notice appears in all copies.
13 // This software is provided "as is" without express or implied
14 // warranty, and with no claim as to its suitability for any purpose.
15 //
16 //----------------------------------------------------------------------------
17 // Contact: mcseem@antigrain.com
18 //          mcseemagg@yahoo.com
19 //          http://www.antigrain.com
20 //----------------------------------------------------------------------------
21 //
22 // Adaptation for high precision colors has been sponsored by
23 // Liberty Technology Systems, Inc., visit http://lib-sys.com
24 //
25 // Liberty Technology Systems, Inc. is the provider of
26 // PostScript and PDF technology for software developers.
27 //
28 // [Pascal Port History] -----------------------------------------------------
29 //
30 // 12.10.2007-Milano: comp_op_rgba_invert & comp_op_rgba_invert_rgb
31 // 08.10.2007-Milano: pixfmt_alpha_blend_rgba
32 // 13.09.2007-Milano: comp_op_adaptor_clip_to_dst_rgba_pre
33 // 23.06.2006-Milano: ptrcomp adjustments
34 // 18.03.2006-Milano: pf_xxx.inc completed
35 // 13.01.2006-Milano: rgba ,argb & abgr stuff
36 // 16.11.2005-Milano: Unit port establishment
37 //
38 { agg_pixfmt_rgba.pas }
39 unit
40  agg_pixfmt_rgba ;
41 
42 INTERFACE
43 
44 {$I agg_mode.inc }
45 {$Q- }
46 {$R- }
47 uses
48  agg_basics ,
49  agg_pixfmt ,
50  agg_color ,
51  agg_rendering_buffer ;
52 
53 { GLOBAL PROCEDURES }
54  procedure pixfmt_bgra32(var pixf : pixel_formats; rb : rendering_buffer_ptr );
55  procedure pixfmt_rgba32(var pixf : pixel_formats; rb : rendering_buffer_ptr );
56  procedure pixfmt_argb32(var pixf : pixel_formats; rb : rendering_buffer_ptr );
57  procedure pixfmt_abgr32(var pixf : pixel_formats; rb : rendering_buffer_ptr );
58 
59  procedure pixfmt_bgra32_pre(var pixf : pixel_formats; rb : rendering_buffer_ptr );
60  procedure pixfmt_rgba32_pre(var pixf : pixel_formats; rb : rendering_buffer_ptr );
61  procedure pixfmt_argb32_pre(var pixf : pixel_formats; rb : rendering_buffer_ptr );
62  procedure pixfmt_abgr32_pre(var pixf : pixel_formats; rb : rendering_buffer_ptr );
63 
64  procedure comp_op_adaptor_rgba                (this : pixel_formats_ptr; op : unsigned; p : int8u_ptr; cr ,cg ,cb ,ca ,cover : unsigned );
65  procedure comp_op_adaptor_clip_to_dst_rgba_pre(this : pixel_formats_ptr; op : unsigned; p : int8u_ptr; cr ,cg ,cb ,ca ,cover : unsigned );
66 
67  procedure pixfmt_alpha_blend_rgba (var pixf : pixel_formats; rb : rendering_buffer_ptr; order : order_type );
68  procedure pixfmt_custom_blend_rgba(var pixf : pixel_formats; rb : rendering_buffer_ptr; bl : func_blender; order : order_type );
69 
70 
71 IMPLEMENTATION
72 { LOCAL VARIABLES & CONSTANTS }
73 { UNIT IMPLEMENTATION }
74 { fmt32_row }
fmt32_rownull75 function fmt32_row(this : pixel_formats_ptr; x ,y : int ) : row_data_type;
76 begin
77  result.Construct(
78   x ,this._width - 1 ,
79   int8u_ptr(ptrcomp(this.m_rbuf.row(y ) ) + x * 4 * sizeof(int8u ) ) );
80 
81 end;
82 
83 { fmt32_copy_from }
84 procedure fmt32_copy_from(this : pixel_formats_ptr; from : rendering_buffer_ptr; xdst ,ydst ,xsrc ,ysrc : int; len : unsigned );
85 begin
86  move(
87   unsigned_ptr(ptrcomp(from.row(ysrc ) ) + xsrc * 4 )^ ,
88   unsigned_ptr(ptrcomp(this.m_rbuf.row(ydst ) ) + xdst * 4 )^ ,
89   len * 4 );
90 
91 end;
92 
93 { order32_for_each_pixel }
94 procedure order32_for_each_pixel(this : pixel_formats_ptr; f : func_apply_gamma );
95 var
96  y ,len : unsigned;
97 
98  p : int8u_ptr;
99 
100 begin
101  y:=0;
102 
103  while y < this._height do
104   begin
105    len:=this._width;
106 
107    p:=this.m_rbuf.row(y );
108 
109    repeat
110     f(this ,p );
111 
112     inc(ptrcomp(p ) ,4 );
113     dec(len );
114 
115    until len = 0;
116 
117    inc(y );
118 
119   end;
120 
121 end;
122 
123 { order32_gamma_dir_apply }
124 procedure order32_gamma_dir_apply(this : pixel_formats_ptr; p : int8u_ptr );
125 begin
126  int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u(this.m_apply.dir(int8u_ptr(ptrcomp(p ) + this.m_order.R )^ ) );
127  int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u(this.m_apply.dir(int8u_ptr(ptrcomp(p ) + this.m_order.G )^ ) );
128  int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u(this.m_apply.dir(int8u_ptr(ptrcomp(p ) + this.m_order.B )^ ) );
129 
130 end;
131 
132 { order32_gamma_inv_apply }
133 procedure order32_gamma_inv_apply(this : pixel_formats_ptr; p : int8u_ptr );
134 begin
135  int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u(this.m_apply.inv(int8u_ptr(ptrcomp(p ) + this.m_order.R )^ ) );
136  int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u(this.m_apply.inv(int8u_ptr(ptrcomp(p ) + this.m_order.G )^ ) );
137  int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u(this.m_apply.inv(int8u_ptr(ptrcomp(p ) + this.m_order.B )^ ) );
138 
139 end;
140 
141 { order32_pixel_premultiply }
142 procedure order32_pixel_premultiply(this : pixel_formats_ptr; p : int8u_ptr );
143 var
144  a : unsigned;
145 
146 begin
147  a:=int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
148 
149  if a = 0 then
150   begin
151    int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=0;
152    int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=0;
153    int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=0;
154 
155   end
156  else
157   begin
158    int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=
159     int8u((int8u_ptr(ptrcomp(p ) + this.m_order.R )^ * a + base_mask ) shr base_shift );
160 
161    int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=
162     int8u((int8u_ptr(ptrcomp(p ) + this.m_order.G )^ * a + base_mask ) shr base_shift );
163 
164    int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=
165     int8u((int8u_ptr(ptrcomp(p ) + this.m_order.B )^ * a + base_mask ) shr base_shift );
166 
167   end;
168 
169 end;
170 
171 { order32_pixel_demultiply }
172 procedure order32_pixel_demultiply(this : pixel_formats_ptr; p : int8u_ptr );
173 var
174  r ,g ,b ,a : unsigned;
175 
176 begin
177  a:=int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
178 
179  if a = 0 then
180   begin
181    int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=0;
182    int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=0;
183    int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=0;
184 
185   end
186  else
187   begin
188    r:=(int8u_ptr(ptrcomp(p ) + this.m_order.R )^ * base_mask ) div a;
189    g:=(int8u_ptr(ptrcomp(p ) + this.m_order.G )^ * base_mask ) div a;
190    b:=(int8u_ptr(ptrcomp(p ) + this.m_order.B )^ * base_mask ) div a;
191 
192    if r > base_mask then
193     int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=base_mask
194    else
195     int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=r;
196 
197    if g > base_mask then
198     int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=base_mask
199    else
200     int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=g;
201 
202    if b > base_mask then
203     int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=base_mask
204    else
205     int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=b;
206 
207   end;
208 
209 end;
210 
211 {$I pf_bgra32.inc }
212 
213 { PIXFMT_BGRA32 }
214 procedure pixfmt_bgra32;
215 begin
216  pixf.Construct(rb );
217 
218  pixf.m_order:=bgra_order;
219 
220  pixf.m_pix_width:=4;
221 
222  pixf.copy_pixel :=@bgra32_copy_pixel;
223  pixf.blend_pixel:=@bgra32_blend_pixel;
224 
225  pixf.pixel:=@bgra32_pixel;
226  pixf.row  :=@fmt32_row;
227 
228  pixf.copy_hline:=@bgra32_copy_hline;
229  pixf.copy_vline:=@bgra32_copy_vline;
230 
231  pixf.blend_hline:=@bgra32_blend_hline;
232  pixf.blend_vline:=@bgra32_blend_vline;
233 
234  pixf.blend_solid_hspan:=@bgra32_blend_solid_hspan;
235  pixf.blend_solid_vspan:=@bgra32_blend_solid_vspan;
236 
237  pixf.copy_color_hspan:=@bgra32_copy_color_hspan;
238  pixf.copy_color_vspan:=@bgra32_copy_color_vspan;
239 
240  pixf.blend_color_hspan:=@bgra32_blend_color_hspan;
241  pixf.blend_color_vspan:=@bgra32_blend_color_vspan;
242 
243  pixf.copy_from :=@fmt32_copy_from;
244  pixf.blend_from:=@bgra32_blend_from;
245 
246  pixf.blend_from_color:=@bgra32_blend_from_color;
247  pixf.blend_from_lut  :=@bgra32_blend_from_lut;
248 
249  pixf.for_each_pixel :=@order32_for_each_pixel;
250  pixf.gamma_dir_apply:=@order32_gamma_dir_apply;
251  pixf.gamma_inv_apply:=@order32_gamma_inv_apply;
252 
253  pixf.pixel_premultiply:=@order32_pixel_premultiply;
254  pixf.pixel_demultiply :=@order32_pixel_demultiply;
255 
256 end;
257 
258 {$I pf_rgba32.inc }
259 
260 { PIXFMT_RGBA32 }
261 procedure pixfmt_rgba32;
262 begin
263  pixf.Construct(rb );
264 
265  pixf.m_order:=rgba_order;
266 
267  pixf.m_pix_width:=4;
268 
269  pixf.copy_pixel :=@rgba32_copy_pixel;
270  pixf.blend_pixel:=@rgba32_blend_pixel;
271 
272  pixf.pixel:=@rgba32_pixel;
273  pixf.row  :=@fmt32_row;
274 
275  pixf.copy_hline:=@rgba32_copy_hline;
276  pixf.copy_vline:=@rgba32_copy_vline;
277 
278  pixf.blend_hline:=@rgba32_blend_hline;
279  pixf.blend_vline:=@rgba32_blend_vline;
280 
281  pixf.blend_solid_hspan:=@rgba32_blend_solid_hspan;
282  pixf.blend_solid_vspan:=@rgba32_blend_solid_vspan;
283 
284  pixf.copy_color_hspan:=@rgba32_copy_color_hspan;
285  pixf.copy_color_vspan:=@rgba32_copy_color_vspan;
286 
287  pixf.blend_color_hspan:=@rgba32_blend_color_hspan;
288  pixf.blend_color_vspan:=@rgba32_blend_color_vspan;
289 
290  pixf.copy_from :=@fmt32_copy_from;
291  pixf.blend_from:=@rgba32_blend_from;
292 
293  pixf.blend_from_color:=@rgba32_blend_from_color;
294  pixf.blend_from_lut  :=@rgba32_blend_from_lut;
295 
296  pixf.for_each_pixel :=@order32_for_each_pixel;
297  pixf.gamma_dir_apply:=@order32_gamma_dir_apply;
298  pixf.gamma_inv_apply:=@order32_gamma_inv_apply;
299 
300  pixf.pixel_premultiply:=@order32_pixel_premultiply;
301  pixf.pixel_demultiply :=@order32_pixel_demultiply;
302 
303 end;
304 
305 {$I pf_argb32.inc }
306 
307 { PIXFMT_ARGB32 }
308 procedure pixfmt_argb32;
309 begin
310  pixf.Construct(rb );
311 
312  pixf.m_order:=argb_order;
313 
314  pixf.m_pix_width:=4;
315 
316  pixf.copy_pixel :=@argb32_copy_pixel;
317  pixf.blend_pixel:=@argb32_blend_pixel;
318 
319  pixf.pixel:=@argb32_pixel;
320  pixf.row  :=@fmt32_row;
321 
322  pixf.copy_hline:=@argb32_copy_hline;
323  pixf.copy_vline:=@argb32_copy_vline;
324 
325  pixf.blend_hline:=@argb32_blend_hline;
326  pixf.blend_vline:=@argb32_blend_vline;
327 
328  pixf.blend_solid_hspan:=@argb32_blend_solid_hspan;
329  pixf.blend_solid_vspan:=@argb32_blend_solid_vspan;
330 
331  pixf.copy_color_hspan:=@argb32_copy_color_hspan;
332  pixf.copy_color_vspan:=@argb32_copy_color_vspan;
333 
334  pixf.blend_color_hspan:=@argb32_blend_color_hspan;
335  pixf.blend_color_vspan:=@argb32_blend_color_vspan;
336 
337  pixf.copy_from :=@fmt32_copy_from;
338  pixf.blend_from:=@argb32_blend_from;
339 
340  pixf.blend_from_color:=@argb32_blend_from_color;
341  pixf.blend_from_lut  :=@argb32_blend_from_lut;
342 
343  pixf.for_each_pixel :=@order32_for_each_pixel;
344  pixf.gamma_dir_apply:=@order32_gamma_dir_apply;
345  pixf.gamma_inv_apply:=@order32_gamma_inv_apply;
346 
347  pixf.pixel_premultiply:=@order32_pixel_premultiply;
348  pixf.pixel_demultiply :=@order32_pixel_demultiply;
349 
350 end;
351 
352 {$I pf_abgr32.inc }
353 
354 { PIXFMT_ABGR32 }
355 procedure pixfmt_abgr32;
356 begin
357  pixf.Construct(rb );
358 
359  pixf.m_order:=abgr_order;
360 
361  pixf.m_pix_width:=4;
362 
363  pixf.copy_pixel :=@abgr32_copy_pixel;
364  pixf.blend_pixel:=@abgr32_blend_pixel;
365 
366  pixf.pixel:=@abgr32_pixel;
367  pixf.row  :=@fmt32_row;
368 
369  pixf.copy_hline:=@abgr32_copy_hline;
370  pixf.copy_vline:=@abgr32_copy_vline;
371 
372  pixf.blend_hline:=@abgr32_blend_hline;
373  pixf.blend_vline:=@abgr32_blend_vline;
374 
375  pixf.blend_solid_hspan:=@abgr32_blend_solid_hspan;
376  pixf.blend_solid_vspan:=@abgr32_blend_solid_vspan;
377 
378  pixf.copy_color_hspan:=@abgr32_copy_color_hspan;
379  pixf.copy_color_vspan:=@abgr32_copy_color_vspan;
380 
381  pixf.blend_color_hspan:=@abgr32_blend_color_hspan;
382  pixf.blend_color_vspan:=@abgr32_blend_color_vspan;
383 
384  pixf.copy_from :=@fmt32_copy_from;
385  pixf.blend_from:=@abgr32_blend_from;
386 
387  pixf.blend_from_color:=@abgr32_blend_from_color;
388  pixf.blend_from_lut  :=@abgr32_blend_from_lut;
389 
390  pixf.for_each_pixel :=@order32_for_each_pixel;
391  pixf.gamma_dir_apply:=@order32_gamma_dir_apply;
392  pixf.gamma_inv_apply:=@order32_gamma_inv_apply;
393 
394  pixf.pixel_premultiply:=@order32_pixel_premultiply;
395  pixf.pixel_demultiply :=@order32_pixel_demultiply;
396 
397 end;
398 
399 {$I pf_bgra32_pre.inc }
400 
401 { PIXFMT_BGRA32_PRE }
402 procedure pixfmt_bgra32_pre;
403 begin
404  pixf.Construct(rb );
405 
406  pixf.m_order:=bgra_order;
407 
408  pixf.m_pix_width:=4;
409 
410  pixf.copy_pixel :=@bgra32_copy_pixel;
411  pixf.blend_pixel:=@bgra32_pre_blend_pixel;
412 
413  pixf.pixel:=@bgra32_pixel;
414  pixf.row  :=@fmt32_row;
415 
416  pixf.copy_hline:=@bgra32_copy_hline;
417  pixf.copy_vline:=@bgra32_copy_vline;
418 
419  pixf.blend_hline:=@bgra32_pre_blend_hline;
420  pixf.blend_vline:=@bgra32_pre_blend_vline;
421 
422  pixf.blend_solid_hspan:=@bgra32_pre_blend_solid_hspan;
423  pixf.blend_solid_vspan:=@bgra32_pre_blend_solid_vspan;
424 
425  pixf.copy_color_hspan:=bgra32_copy_color_hspan;
426  pixf.copy_color_vspan:=bgra32_copy_color_vspan;
427 
428  pixf.blend_color_hspan:=@bgra32_pre_blend_color_hspan;
429  pixf.blend_color_vspan:=@bgra32_pre_blend_color_vspan;
430 
431  pixf.copy_from :=@fmt32_copy_from;
432  pixf.blend_from:=@bgra32_pre_blend_from;
433 
434  pixf.blend_from_color:=@bgra32_pre_blend_from_color;
435  pixf.blend_from_lut  :=@bgra32_pre_blend_from_lut;
436 
437  pixf.for_each_pixel :=@order32_for_each_pixel;
438  pixf.gamma_dir_apply:=@order32_gamma_dir_apply;
439  pixf.gamma_inv_apply:=@order32_gamma_inv_apply;
440 
441  pixf.pixel_premultiply:=@order32_pixel_premultiply;
442  pixf.pixel_demultiply :=@order32_pixel_demultiply;
443 
444 end;
445 
446 {$I pf_rgba32_pre.inc }
447 
448 { PIXFMT_RGBA32_PRE }
449 procedure pixfmt_rgba32_pre;
450 begin
451  pixf.Construct(rb );
452 
453  pixf.m_order:=rgba_order;
454 
455  pixf.m_pix_width:=4;
456 
457  pixf.copy_pixel :=@rgba32_copy_pixel;
458  pixf.blend_pixel:=@rgba32_pre_blend_pixel;
459 
460  pixf.pixel:=@rgba32_pixel;
461  pixf.row  :=@fmt32_row;
462 
463  pixf.copy_hline:=@rgba32_copy_hline;
464  pixf.copy_vline:=@rgba32_copy_vline;
465 
466  pixf.blend_hline:=@rgba32_pre_blend_hline;
467  pixf.blend_vline:=@rgba32_pre_blend_vline;
468 
469  pixf.blend_solid_hspan:=@rgba32_pre_blend_solid_hspan;
470  pixf.blend_solid_vspan:=@rgba32_pre_blend_solid_vspan;
471 
472  pixf.copy_color_hspan:=rgba32_copy_color_hspan;
473  pixf.copy_color_vspan:=rgba32_copy_color_vspan;
474 
475  pixf.blend_color_hspan:=@rgba32_pre_blend_color_hspan;
476  pixf.blend_color_vspan:=@rgba32_pre_blend_color_vspan;
477 
478  pixf.copy_from :=@fmt32_copy_from;
479  pixf.blend_from:=@rgba32_pre_blend_from;
480 
481  pixf.blend_from_color:=@rgba32_pre_blend_from_color;
482  pixf.blend_from_lut  :=@rgba32_pre_blend_from_lut;
483 
484  pixf.for_each_pixel :=@order32_for_each_pixel;
485  pixf.gamma_dir_apply:=@order32_gamma_dir_apply;
486  pixf.gamma_inv_apply:=@order32_gamma_inv_apply;
487 
488  pixf.pixel_premultiply:=@order32_pixel_premultiply;
489  pixf.pixel_demultiply :=@order32_pixel_demultiply;
490 
491 end;
492 
493 {$I pf_argb32_pre.inc }
494 
495 { PIXFMT_ARGB32_PRE }
496 procedure pixfmt_argb32_pre;
497 begin
498  pixf.Construct(rb );
499 
500  pixf.m_order:=argb_order;
501 
502  pixf.m_pix_width:=4;
503 
504  pixf.copy_pixel :=@argb32_copy_pixel;
505  pixf.blend_pixel:=@argb32_pre_blend_pixel;
506 
507  pixf.pixel:=@argb32_pixel;
508  pixf.row  :=@fmt32_row;
509 
510  pixf.copy_hline:=@argb32_copy_hline;
511  pixf.copy_vline:=@argb32_copy_vline;
512 
513  pixf.blend_hline:=@argb32_pre_blend_hline;
514  pixf.blend_vline:=@argb32_pre_blend_vline;
515 
516  pixf.blend_solid_hspan:=@argb32_pre_blend_solid_hspan;
517  pixf.blend_solid_vspan:=@argb32_pre_blend_solid_vspan;
518 
519  pixf.copy_color_hspan:=argb32_copy_color_hspan;
520  pixf.copy_color_vspan:=argb32_copy_color_vspan;
521 
522  pixf.blend_color_hspan:=@argb32_pre_blend_color_hspan;
523  pixf.blend_color_vspan:=@argb32_pre_blend_color_vspan;
524 
525  pixf.copy_from :=@fmt32_copy_from;
526  pixf.blend_from:=@argb32_pre_blend_from;
527 
528  pixf.blend_from_color:=@argb32_pre_blend_from_color;
529  pixf.blend_from_lut  :=@argb32_pre_blend_from_lut;
530 
531  pixf.for_each_pixel :=@order32_for_each_pixel;
532  pixf.gamma_dir_apply:=@order32_gamma_dir_apply;
533  pixf.gamma_inv_apply:=@order32_gamma_inv_apply;
534 
535  pixf.pixel_premultiply:=@order32_pixel_premultiply;
536  pixf.pixel_demultiply :=@order32_pixel_demultiply;
537 
538 end;
539 
540 {$I pf_abgr32_pre.inc }
541 
542 { PIXFMT_ABGR32_PRE }
543 procedure pixfmt_abgr32_pre;
544 begin
545  pixf.Construct(rb );
546 
547  pixf.m_order:=abgr_order;
548 
549  pixf.m_pix_width:=4;
550 
551  pixf.copy_pixel :=@abgr32_copy_pixel;
552  pixf.blend_pixel:=@abgr32_pre_blend_pixel;
553 
554  pixf.pixel:=@abgr32_pixel;
555  pixf.row  :=@fmt32_row;
556 
557  pixf.copy_hline:=@abgr32_copy_hline;
558  pixf.copy_vline:=@abgr32_copy_vline;
559 
560  pixf.blend_hline:=@abgr32_pre_blend_hline;
561  pixf.blend_vline:=@abgr32_pre_blend_vline;
562 
563  pixf.blend_solid_hspan:=@abgr32_pre_blend_solid_hspan;
564  pixf.blend_solid_vspan:=@abgr32_pre_blend_solid_vspan;
565 
566  pixf.copy_color_hspan:=abgr32_copy_color_hspan;
567  pixf.copy_color_vspan:=abgr32_copy_color_vspan;
568 
569  pixf.blend_color_hspan:=@abgr32_pre_blend_color_hspan;
570  pixf.blend_color_vspan:=@abgr32_pre_blend_color_vspan;
571 
572  pixf.copy_from :=@fmt32_copy_from;
573  pixf.blend_from:=@abgr32_pre_blend_from;
574 
575  pixf.blend_from_color:=@abgr32_pre_blend_from_color;
576  pixf.blend_from_lut  :=@abgr32_pre_blend_from_lut;
577 
578  pixf.for_each_pixel :=@order32_for_each_pixel;
579  pixf.gamma_dir_apply:=@order32_gamma_dir_apply;
580  pixf.gamma_inv_apply:=@order32_gamma_inv_apply;
581 
582  pixf.pixel_premultiply:=@order32_pixel_premultiply;
583  pixf.pixel_demultiply :=@order32_pixel_demultiply;
584 
585 end;
586 
587 {$I pf_alpha32.inc }
588 
589 { PIXFMT_ALPHA_BLEND_RGBA }
590 procedure pixfmt_alpha_blend_rgba;
591 begin
592  pixf.Construct(rb );
593 
594  pixf.m_order:=order;
595 
596  pixf.m_pix_width:=4;
597 
598  pixf.copy_pixel :=@alpha32_copy_pixel;
599  pixf.blend_pixel:=@alpha32_blend_pixel;
600 
601  pixf.pixel:=@alpha32_pixel;
602  pixf.row  :=@fmt32_row;
603 
604  pixf.copy_hline:=@alpha32_copy_hline;
605  pixf.copy_vline:=@alpha32_copy_vline;
606 
607  pixf.blend_hline:=@alpha32_blend_hline;
608  pixf.blend_vline:=@alpha32_blend_vline;
609 
610  pixf.blend_solid_hspan:=@alpha32_blend_solid_hspan;
611  pixf.blend_solid_vspan:=@alpha32_blend_solid_vspan;
612 
613  pixf.copy_color_hspan:=@alpha32_copy_color_hspan;
614  pixf.copy_color_vspan:=@alpha32_copy_color_vspan;
615 
616  pixf.blend_color_hspan:=@alpha32_blend_color_hspan;
617  pixf.blend_color_vspan:=@alpha32_blend_color_vspan;
618 
619  pixf.copy_from :=@fmt32_copy_from;
620  pixf.blend_from:=@alpha32_blend_from;
621 
622  pixf.blend_from_color:=@alpha32_blend_from_color;
623  pixf.blend_from_lut  :=@alpha32_blend_from_lut;
624 
625  pixf.for_each_pixel :=@order32_for_each_pixel;
626  pixf.gamma_dir_apply:=@order32_gamma_dir_apply;
627  pixf.gamma_inv_apply:=@order32_gamma_inv_apply;
628 
629  pixf.pixel_premultiply:=@order32_pixel_premultiply;
630  pixf.pixel_demultiply :=@order32_pixel_demultiply;
631 
632 end;
633 
634 {$I pf_cubl32.inc }
635 
636 { PIXFMT_CUSTOM_BLEND_RGBA }
637 procedure pixfmt_custom_blend_rgba;
638 begin
639  pixf.Construct(rb );
640 
641  pixf.m_order:=order;
642  pixf.blender:=bl;
643 
644  pixf.m_pix_width:=4;
645 
646  pixf.copy_pixel :=@cubl_copy_pixel;
647  pixf.blend_pixel:=@cubl_blend_pixel;
648 
649  pixf.pixel:=@cubl_pixel;
650  pixf.row  :=@fmt32_row;
651 
652  pixf.copy_hline:=@cubl_copy_hline;
653  pixf.copy_vline:=@cubl_copy_vline;
654 
655  pixf.blend_hline:=@cubl_blend_hline;
656  pixf.blend_vline:=@cubl_blend_vline;
657 
658  pixf.blend_solid_hspan:=@cubl_blend_solid_hspan;
659  pixf.blend_solid_vspan:=@cubl_blend_solid_vspan;
660 
661  pixf.copy_color_hspan:=@cubl_copy_color_hspan;
662  pixf.copy_color_vspan:=@cubl_copy_color_vspan;
663 
664  pixf.blend_color_hspan:=@cubl_blend_color_hspan;
665  pixf.blend_color_vspan:=@cubl_blend_color_vspan;
666 
667  pixf.copy_from :=@fmt32_copy_from;
668  pixf.blend_from:=@cubl_blend_from;
669 
670  pixf.blend_from_color:=@cubl_blend_from_color;
671  pixf.blend_from_lut  :=@cubl_blend_from_lut;
672 
673  pixf.for_each_pixel :=@order32_for_each_pixel;
674  pixf.gamma_dir_apply:=@order32_gamma_dir_apply;
675  pixf.gamma_inv_apply:=@order32_gamma_inv_apply;
676 
677  pixf.pixel_premultiply:=@order32_pixel_premultiply;
678  pixf.pixel_demultiply :=@order32_pixel_demultiply;
679 
680 end;
681 
682 { comp_op_rgba_clear }
683 procedure comp_op_rgba_clear(this : pixel_formats_ptr; p : int8u_ptr; cr ,cg ,cb ,alpha ,cover : unsigned );
684 begin
685  if cover < 255 then
686   begin
687    cover:=255 - cover;
688 
689    int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u((int8u_ptr(ptrcomp(p ) + this.m_order.R )^ * cover + 255 ) shr 8 );
690    int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u((int8u_ptr(ptrcomp(p ) + this.m_order.G )^ * cover + 255 ) shr 8 );
691    int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u((int8u_ptr(ptrcomp(p ) + this.m_order.B )^ * cover + 255 ) shr 8 );
692    int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=int8u((int8u_ptr(ptrcomp(p ) + this.m_order.A )^ * cover + 255 ) shr 8 );
693 
694   end
695  else
696   begin
697    int8u_ptr(ptrcomp(p ) + 0 )^:=0;
698    int8u_ptr(ptrcomp(p ) + 1 )^:=0;
699    int8u_ptr(ptrcomp(p ) + 2 )^:=0;
700    int8u_ptr(ptrcomp(p ) + 3 )^:=0;
701 
702   end;
703 
704 end;
705 
706 { comp_op_rgba_src }
707 procedure comp_op_rgba_src(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
708 var
709  alpha : unsigned;
710 
711 begin
712  if cover < 255 then
713   begin
714    alpha:=255 - cover;
715 
716    int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u(((int8u_ptr(ptrcomp(p ) + this.m_order.R )^ * alpha + 255 ) shr 8 ) + ((sr * cover + 255 ) shr 8 ) );
717    int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u(((int8u_ptr(ptrcomp(p ) + this.m_order.G )^ * alpha + 255 ) shr 8 ) + ((sg * cover + 255 ) shr 8 ) );
718    int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u(((int8u_ptr(ptrcomp(p ) + this.m_order.B )^ * alpha + 255 ) shr 8 ) + ((sb * cover + 255 ) shr 8 ) );
719    int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=int8u(((int8u_ptr(ptrcomp(p ) + this.m_order.A )^ * alpha + 255 ) shr 8 ) + ((sa * cover + 255 ) shr 8 ) );
720 
721   end
722  else
723   begin
724    int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=sr;
725    int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=sg;
726    int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=sb;
727    int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=sa;
728 
729   end;
730 
731 end;
732 
733 { comp_op_rgba_dst }
734 procedure comp_op_rgba_dst(this : pixel_formats_ptr; p : int8u_ptr; cr ,cg ,cb ,alpha ,cover : unsigned );
735 begin
736 end;
737 
738 { comp_op_rgba_src_over }
739 //   Dca' = Sca + Dca.(1 - Sa)
740 //   Da'  = Sa + Da - Sa.Da
741 procedure comp_op_rgba_src_over(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
742 var
743  s1a : unsigned;
744 
745 begin
746  if cover < 255 then
747   begin
748    sr:=(sr * cover + 255 ) shr 8;
749    sg:=(sg * cover + 255 ) shr 8;
750    sb:=(sb * cover + 255 ) shr 8;
751    sa:=(sa * cover + 255 ) shr 8;
752 
753   end;
754 
755  s1a:=base_mask - sa;
756 
757  int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=
758   int8u(sr + ((int8u_ptr(ptrcomp(p ) + this.m_order.R )^ * s1a + base_mask ) shr base_shift ) );
759 
760  int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=
761   int8u(sg + ((int8u_ptr(ptrcomp(p ) + this.m_order.G )^ * s1a + base_mask ) shr base_shift ) );
762 
763  int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=
764   int8u(sb + ((int8u_ptr(ptrcomp(p ) + this.m_order.B )^ * s1a + base_mask ) shr base_shift ) );
765 
766  int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=
767   int8u(sa + int8u_ptr(ptrcomp(p ) + this.m_order.A )^ - ((sa * int8u_ptr(ptrcomp(p ) + this.m_order.A )^ + base_mask ) shr base_shift ) );
768 
769 end;
770 
771 { comp_op_rgba_dst_over }
772 // Dca' = Dca + Sca.(1 - Da)
773 // Da'  = Sa + Da - Sa.Da
774 procedure comp_op_rgba_dst_over(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
775 var
776  d1a : unsigned;
777 
778 begin
779  if cover < 255 then
780   begin
781    sr:=(sr * cover + 255 ) shr 8;
782    sg:=(sg * cover + 255 ) shr 8;
783    sb:=(sb * cover + 255 ) shr 8;
784    sa:=(sa * cover + 255 ) shr 8;
785 
786   end;
787 
788  d1a:=base_mask - int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
789 
790  int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=
791   int8u(int8u_ptr(ptrcomp(p ) + this.m_order.R )^ + ((sr * d1a + base_mask ) shr base_shift ) );
792 
793  int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=
794   int8u(int8u_ptr(ptrcomp(p ) + this.m_order.G )^ + ((sg * d1a + base_mask ) shr base_shift ) );
795 
796  int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=
797   int8u(int8u_ptr(ptrcomp(p ) + this.m_order.B )^ + ((sb * d1a + base_mask ) shr base_shift ) );
798 
799  int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=
800   int8u(sa + int8u_ptr(ptrcomp(p ) + this.m_order.A )^ - ((sa * int8u_ptr(ptrcomp(p ) + this.m_order.A )^ + base_mask ) shr base_shift ) );
801 
802 end;
803 
804 { comp_op_rgba_src_in }
805 // Dca' = Sca.Da
806 // Da'  = Sa.Da
807 procedure comp_op_rgba_src_in(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
808 var
809  da ,alpha : unsigned;
810 
811 begin
812  da:=int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
813 
814  if cover < 255 then
815   begin
816    alpha:=255 - cover;
817 
818    int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=
819     int8u(((int8u_ptr(ptrcomp(p ) + this.m_order.R )^ * alpha + 255 ) shr 8 ) + ((((sr * da + base_mask ) shr base_shift ) * cover + 255 ) shr 8 ) );
820 
821    int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=
822     int8u(((int8u_ptr(ptrcomp(p ) + this.m_order.G )^ * alpha + 255 ) shr 8 ) + ((((sg * da + base_mask ) shr base_shift ) * cover + 255 ) shr 8 ) );
823 
824    int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=
825     int8u(((int8u_ptr(ptrcomp(p ) + this.m_order.B )^ * alpha + 255 ) shr 8 ) + ((((sb * da + base_mask ) shr base_shift ) * cover + 255 ) shr 8 ) );
826 
827    int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=
828     int8u(((int8u_ptr(ptrcomp(p ) + this.m_order.A )^ * alpha + 255 ) shr 8 ) + ((((sa * da + base_mask ) shr base_shift ) * cover + 255 ) shr 8 ) );
829 
830   end
831  else
832   begin
833    int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u((sr * da + base_mask ) shr base_shift );
834    int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u((sg * da + base_mask ) shr base_shift );
835    int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u((sb * da + base_mask ) shr base_shift );
836    int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=int8u((sa * da + base_mask ) shr base_shift );
837 
838   end;
839 
840 end;
841 
842 { comp_op_rgba_dst_in }
843 // Dca' = Dca.Sa
844 // Da'  = Sa.Da
845 procedure comp_op_rgba_dst_in(this : pixel_formats_ptr; p : int8u_ptr; cr ,cg ,cb ,sa ,cover : unsigned );
846 begin
847  if cover < 255 then
848   sa:=base_mask - ((cover * (base_mask - sa ) + 255 ) shr 8 );
849 
850  int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u((int8u_ptr(ptrcomp(p ) + this.m_order.R )^ * sa + base_mask ) shr base_shift );
851  int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u((int8u_ptr(ptrcomp(p ) + this.m_order.G )^ * sa + base_mask ) shr base_shift );
852  int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u((int8u_ptr(ptrcomp(p ) + this.m_order.B )^ * sa + base_mask ) shr base_shift );
853  int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=int8u((int8u_ptr(ptrcomp(p ) + this.m_order.A )^ * sa + base_mask ) shr base_shift );
854 
855 end;
856 
857 { comp_op_rgba_src_out }
858 // Dca' = Sca.(1 - Da)
859 // Da'  = Sa.(1 - Da)
860 procedure comp_op_rgba_src_out(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
861 var
862  da ,alpha : unsigned;
863 
864 begin
865  da:=base_mask - int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
866 
867  if cover < 255 then
868   begin
869    alpha:=255 - cover;
870 
871    int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=
872     int8u(((int8u_ptr(ptrcomp(p ) + this.m_order.R )^ * alpha + 255 ) shr 8 ) + ((((sr * da + base_mask ) shr base_shift ) * cover + 255 ) shr 8 ) );
873 
874    int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=
875     int8u(((int8u_ptr(ptrcomp(p ) + this.m_order.G )^ * alpha + 255 ) shr 8 ) + ((((sg * da + base_mask ) shr base_shift ) * cover + 255 ) shr 8 ) );
876 
877    int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=
878     int8u(((int8u_ptr(ptrcomp(p ) + this.m_order.B )^ * alpha + 255 ) shr 8 ) + ((((sb * da + base_mask ) shr base_shift ) * cover + 255 ) shr 8 ) );
879 
880    int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=
881     int8u(((int8u_ptr(ptrcomp(p ) + this.m_order.A )^ * alpha + 255 ) shr 8 ) + ((((sa * da + base_mask ) shr base_shift ) * cover + 255 ) shr 8 ) );
882 
883   end
884  else
885   begin
886    int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u((sr * da + base_mask ) shr base_shift );
887    int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u((sg * da + base_mask ) shr base_shift );
888    int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u((sb * da + base_mask ) shr base_shift );
889    int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=int8u((sa * da + base_mask ) shr base_shift );
890 
891   end;
892 
893 end;
894 
895 { comp_op_rgba_dst_out }
896 // Dca' = Dca.(1 - Sa)
897 // Da'  = Da.(1 - Sa)
898 procedure comp_op_rgba_dst_out(this : pixel_formats_ptr; p : int8u_ptr; cr ,cg ,cb ,sa ,cover : unsigned );
899 begin
900  if cover < 255 then
901   sa:=(sa * cover + 255 ) shr 8;
902 
903  sa:=base_mask - sa;
904 
905  int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u((int8u_ptr(ptrcomp(p ) + this.m_order.R )^ * sa + base_shift ) shr base_shift );
906  int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u((int8u_ptr(ptrcomp(p ) + this.m_order.G )^ * sa + base_shift ) shr base_shift );
907  int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u((int8u_ptr(ptrcomp(p ) + this.m_order.B )^ * sa + base_shift ) shr base_shift );
908  int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=int8u((int8u_ptr(ptrcomp(p ) + this.m_order.A )^ * sa + base_shift ) shr base_shift );
909 
910 end;
911 
912 { comp_op_rgba_src_atop }
913 // Dca' = Sca.Da + Dca.(1 - Sa)
914 // Da'  = Da
915 procedure comp_op_rgba_src_atop(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
916 var
917  da : unsigned;
918 
919 begin
920  if cover < 255 then
921   begin
922    sr:=(sr * cover + 255 ) shr 8;
923    sg:=(sg * cover + 255 ) shr 8;
924    sb:=(sb * cover + 255 ) shr 8;
925    sa:=(sa * cover + 255 ) shr 8;
926 
927   end;
928 
929  da:=int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
930  sa:=base_mask - sa;
931 
932  int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u((sr * da + int8u_ptr(ptrcomp(p ) + this.m_order.R )^ * sa + base_mask ) shr base_shift );
933  int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u((sg * da + int8u_ptr(ptrcomp(p ) + this.m_order.G )^ * sa + base_mask ) shr base_shift );
934  int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u((sb * da + int8u_ptr(ptrcomp(p ) + this.m_order.B )^ * sa + base_mask ) shr base_shift );
935 
936 end;
937 
938 { comp_op_rgba_dst_atop }
939 // Dca' = Dca.Sa + Sca.(1 - Da)
940 // Da'  = Sa
941 procedure comp_op_rgba_dst_atop(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
942 var
943  da ,alpha : unsigned;
944 
945 begin
946  da:=base_mask - int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
947 
948  if cover < 255 then
949   begin
950    alpha:=255 - cover;
951 
952    sr:=(int8u_ptr(ptrcomp(p ) + this.m_order.R )^ * sa + sr * da + base_mask ) shr base_shift;
953    sg:=(int8u_ptr(ptrcomp(p ) + this.m_order.G )^ * sa + sg * da + base_mask ) shr base_shift;
954    sb:=(int8u_ptr(ptrcomp(p ) + this.m_order.B )^ * sa + sb * da + base_mask ) shr base_shift;
955 
956    int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=
957     int8u(((int8u_ptr(ptrcomp(p ) + this.m_order.R )^ * alpha + 255 ) shr 8) + ((sr * cover + 255 ) shr 8 ) );
958 
959    int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=
960     int8u(((int8u_ptr(ptrcomp(p ) + this.m_order.G )^ * alpha + 255 ) shr 8) + ((sg * cover + 255 ) shr 8 ) );
961 
962    int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=
963     int8u(((int8u_ptr(ptrcomp(p ) + this.m_order.B )^ * alpha + 255 ) shr 8) + ((sb * cover + 255 ) shr 8 ) );
964 
965    int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=
966     int8u(((int8u_ptr(ptrcomp(p ) + this.m_order.A )^ * alpha + 255 ) shr 8) + ((sa * cover + 255 ) shr 8 ) );
967 
968   end
969  else
970   begin
971    int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u((int8u_ptr(ptrcomp(p ) + this.m_order.R )^ * sa + sr * da + base_mask ) shr base_shift );
972    int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u((int8u_ptr(ptrcomp(p ) + this.m_order.G )^ * sa + sg * da + base_mask ) shr base_shift );
973    int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u((int8u_ptr(ptrcomp(p ) + this.m_order.B )^ * sa + sb * da + base_mask ) shr base_shift );
974    int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=int8u(sa );
975 
976   end;
977 
978 end;
979 
980 { comp_op_rgba_xor }
981 // Dca' = Sca.(1 - Da) + Dca.(1 - Sa)
982 // Da'  = Sa + Da - 2.Sa.Da
983 procedure comp_op_rgba_xor(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
984 var
985  s1a ,d1a : unsigned;
986 
987 begin
988  if cover < 255 then
989   begin
990    sr:=(sr * cover + 255 ) shr 8;
991    sg:=(sg * cover + 255 ) shr 8;
992    sb:=(sb * cover + 255 ) shr 8;
993    sa:=(sa * cover + 255 ) shr 8;
994 
995   end;
996 
997  s1a:=base_mask - sa;
998  d1a:=base_mask - int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
999 
1000  int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=
1001   int8u((int8u_ptr(ptrcomp(p ) + this.m_order.R )^ * s1a + sr * d1a + base_mask ) shr base_shift );
1002 
1003  int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=
1004   int8u((int8u_ptr(ptrcomp(p ) + this.m_order.G )^ * s1a + sg * d1a + base_mask ) shr base_shift );
1005 
1006  int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=
1007   int8u((int8u_ptr(ptrcomp(p ) + this.m_order.B )^ * s1a + sb * d1a + base_mask ) shr base_shift );
1008 
1009  int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=
1010   int8u(sa + int8u_ptr(ptrcomp(p ) + this.m_order.A )^ - ((sa * int8u_ptr(ptrcomp(p ) + this.m_order.A )^ + base_mask div 2 ) shr (base_shift - 1 ) ) );
1011 
1012 end;
1013 
1014 { comp_op_rgba_plus }
1015 // Dca' = Sca + Dca
1016 // Da'  = Sa + Da
1017 procedure comp_op_rgba_plus(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
1018 var
1019  dr ,dg ,db ,da : unsigned;
1020 
1021 begin
1022  if cover < 255 then
1023   begin
1024    sr:=(sr * cover + 255 ) shr 8;
1025    sg:=(sg * cover + 255 ) shr 8;
1026    sb:=(sb * cover + 255 ) shr 8;
1027    sa:=(sa * cover + 255 ) shr 8;
1028 
1029   end;
1030 
1031  dr:=int8u_ptr(ptrcomp(p ) + this.m_order.R )^ + sr;
1032  dg:=int8u_ptr(ptrcomp(p ) + this.m_order.G )^ + sg;
1033  db:=int8u_ptr(ptrcomp(p ) + this.m_order.B )^ + sb;
1034  da:=int8u_ptr(ptrcomp(p ) + this.m_order.A )^ + sa;
1035 
1036  if dr > base_mask then
1037   int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=base_mask
1038  else
1039   int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u(dr );
1040 
1041  if dg > base_mask then
1042   int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=base_mask
1043  else
1044   int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u(dg );
1045 
1046  if db > base_mask then
1047   int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=base_mask
1048  else
1049   int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u(db );
1050 
1051  if da > base_mask then
1052   int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=base_mask
1053  else
1054   int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=int8u(da );
1055 
1056 end;
1057 
1058 { comp_op_rgba_minus }
1059 // Dca' = Dca - Sca
1060 // Da' = 1 - (1 - Sa).(1 - Da)
1061 procedure comp_op_rgba_minus(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
1062 var
1063  dr ,dg ,db : unsigned;
1064 
1065 begin
1066  if cover < 255 then
1067   begin
1068    sr:=(sr * cover + 255 ) shr 8;
1069    sg:=(sg * cover + 255 ) shr 8;
1070    sb:=(sb * cover + 255 ) shr 8;
1071    sa:=(sa * cover + 255 ) shr 8;
1072 
1073   end;
1074 
1075  dr:=int8u_ptr(ptrcomp(p ) + this.m_order.R )^ - sr;
1076  dg:=int8u_ptr(ptrcomp(p ) + this.m_order.G )^ - sg;
1077  db:=int8u_ptr(ptrcomp(p ) + this.m_order.B )^ - sb;
1078 
1079  if dr > base_mask then
1080   int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=0
1081  else
1082   int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u(dr );
1083 
1084  if dg > base_mask then
1085   int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=0
1086  else
1087   int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u(dg );
1088 
1089  if db > base_mask then
1090   int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=0
1091  else
1092   int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u(db );
1093 
1094  int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=
1095   int8u(base_mask - (((base_mask - sa ) * (base_mask - int8u_ptr(ptrcomp(p ) + this.m_order.A )^ ) + base_mask ) shr base_shift ) );
1096 
1097 end;
1098 
1099 { comp_op_rgba_multiply }
1100 // Dca' = Sca.Dca + Sca.(1 - Da) + Dca.(1 - Sa)
1101 // Da'  = Sa + Da - Sa.Da
1102 procedure comp_op_rgba_multiply(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
1103 var
1104  s1a ,d1a ,dr ,dg ,db : unsigned;
1105 
1106 begin
1107  if cover < 255 then
1108   begin
1109    sr:=(sr * cover + 255 ) shr 8;
1110    sg:=(sg * cover + 255 ) shr 8;
1111    sb:=(sb * cover + 255 ) shr 8;
1112    sa:=(sa * cover + 255 ) shr 8;
1113 
1114   end;
1115 
1116  s1a:=base_mask - sa;
1117  d1a:=base_mask - int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1118  dr :=int8u_ptr(ptrcomp(p ) + this.m_order.R )^;
1119  dg :=int8u_ptr(ptrcomp(p ) + this.m_order.G )^;
1120  db :=int8u_ptr(ptrcomp(p ) + this.m_order.B )^;
1121 
1122  int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u((sr * dr + sr * d1a + dr * s1a + base_mask ) shr base_shift );
1123  int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u((sg * dg + sg * d1a + dg * s1a + base_mask ) shr base_shift );
1124  int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u((sb * db + sb * d1a + db * s1a + base_mask ) shr base_shift );
1125  int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=
1126   int8u(sa + int8u_ptr(ptrcomp(p ) + this.m_order.A )^ - ((sa * int8u_ptr(ptrcomp(p ) + this.m_order.A )^ + base_mask ) shr base_shift ) );
1127 
1128 end;
1129 
1130 { comp_op_rgba_screen }
1131 // Dca' = Sca + Dca - Sca.Dca
1132 // Da'  = Sa + Da - Sa.Da
1133 procedure comp_op_rgba_screen(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
1134 var
1135  dr ,dg ,db ,da : unsigned;
1136 
1137 begin
1138  if cover < 255 then
1139   begin
1140    sr:=(sr * cover + 255 ) shr 8;
1141    sg:=(sg * cover + 255 ) shr 8;
1142    sb:=(sb * cover + 255 ) shr 8;
1143    sa:=(sa * cover + 255 ) shr 8;
1144 
1145   end;
1146 
1147  dr:=int8u_ptr(ptrcomp(p ) + this.m_order.R )^;
1148  dg:=int8u_ptr(ptrcomp(p ) + this.m_order.G )^;
1149  db:=int8u_ptr(ptrcomp(p ) + this.m_order.B )^;
1150  da:=int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1151 
1152  int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u(sr + dr - ((sr * dr + base_mask ) shr base_shift ) );
1153  int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u(sg + dg - ((sg * dg + base_mask ) shr base_shift ) );
1154  int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u(sb + db - ((sb * db + base_mask ) shr base_shift ) );
1155  int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=int8u(sa + da - ((sa * da + base_mask ) shr base_shift ) );
1156 
1157 end;
1158 
1159 { comp_op_rgba_overlay }
1160 // if 2.Dca < Da
1161 //   Dca' = 2.Sca.Dca + Sca.(1 - Da) + Dca.(1 - Sa)
1162 // otherwise
1163 //   Dca' = Sa.Da - 2.(Da - Dca).(Sa - Sca) + Sca.(1 - Da) + Dca.(1 - Sa)
1164 //
1165 // Da' = Sa + Da - Sa.Da
1166 procedure comp_op_rgba_overlay(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
1167 var
1168  d1a ,s1a ,dr ,dg ,db ,da ,sada : unsigned;
1169 
1170 begin
1171  if cover < 255 then
1172   begin
1173    sr:=(sr * cover + 255 ) shr 8;
1174    sg:=(sg * cover + 255 ) shr 8;
1175    sb:=(sb * cover + 255 ) shr 8;
1176    sa:=(sa * cover + 255 ) shr 8;
1177 
1178   end;
1179 
1180  d1a :=base_mask - int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1181  s1a :=base_mask - sa;
1182  dr  :=int8u_ptr(ptrcomp(p ) + this.m_order.R )^;
1183  dg  :=int8u_ptr(ptrcomp(p ) + this.m_order.G )^;
1184  db  :=int8u_ptr(ptrcomp(p ) + this.m_order.B )^;
1185  da  :=int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1186  sada:=sa * int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1187 
1188  if 2 * dr < da then
1189   int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u((2 * sr * dr + sr * d1a + dr * s1a ) shr base_shift )
1190  else
1191   int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u((sada - 2 * (da - dr ) * (sa - sr ) + sr * d1a + dr * s1a ) shr base_shift );
1192 
1193  if 2 * dg < da then
1194   int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u((2 * sg * dg + sg * d1a + dg * s1a ) shr base_shift )
1195  else
1196   int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u((sada - 2 * (da - dg ) * (sa - sg ) + sg * d1a + dg * s1a ) shr base_shift );
1197 
1198  if 2 * db < da then
1199   int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u((2 * sb * db + sb * d1a + db * s1a ) shr base_shift )
1200  else
1201   int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u((sada - 2 * (da - db ) * (sa - sb ) + sb * d1a + db * s1a ) shr base_shift );
1202 
1203  int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=int8u(sa + da - ((sa * da + base_mask ) shr base_shift ) );
1204 
1205 end;
1206 
1207 { sd_min }
1208 function sd_min(a ,b : unsigned ) : unsigned;
1209 begin
1210  if a < b then
1211   result:=a
1212  else
1213   result:=b;
1214 
1215 end;
1216 
1217 { sd_max }
1218 function sd_max(a ,b : unsigned ) : unsigned;
1219 begin
1220  if a > b then
1221   result:=a
1222  else
1223   result:=b;
1224 
1225 end;
1226 
1227 { comp_op_rgba_darken }
1228 // Dca' = min(Sca.Da, Dca.Sa) + Sca.(1 - Da) + Dca.(1 - Sa)
1229 // Da'  = Sa + Da - Sa.Da
1230 procedure comp_op_rgba_darken(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
1231 var
1232  d1a ,s1a ,dr ,dg ,db ,da : unsigned;
1233 
1234 begin
1235  if cover < 255 then
1236   begin
1237    sr:=(sr * cover + 255 ) shr 8;
1238    sg:=(sg * cover + 255 ) shr 8;
1239    sb:=(sb * cover + 255 ) shr 8;
1240    sa:=(sa * cover + 255 ) shr 8;
1241 
1242   end;
1243 
1244  d1a:=base_mask - int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1245  s1a:=base_mask - sa;
1246  dr :=int8u_ptr(ptrcomp(p ) + this.m_order.R )^;
1247  dg :=int8u_ptr(ptrcomp(p ) + this.m_order.G )^;
1248  db :=int8u_ptr(ptrcomp(p ) + this.m_order.B )^;
1249  da :=int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1250 
1251  int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u((sd_min(sr * da, dr * sa ) + sr * d1a + dr * s1a ) shr base_shift );
1252  int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u((sd_min(sg * da, dg * sa ) + sg * d1a + dg * s1a ) shr base_shift );
1253  int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u((sd_min(sb * da, db * sa ) + sb * d1a + db * s1a ) shr base_shift );
1254  int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=int8u(sa + da - ((sa * da + base_mask ) shr base_shift ) );
1255 
1256 end;
1257 
1258 { comp_op_rgba_lighten }
1259 // Dca' = max(Sca.Da, Dca.Sa) + Sca.(1 - Da) + Dca.(1 - Sa)
1260 // Da'  = Sa + Da - Sa.Da
1261 procedure comp_op_rgba_lighten(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
1262 var
1263  d1a ,s1a ,dr ,dg ,db ,da : unsigned;
1264 
1265 begin
1266  if cover < 255 then
1267   begin
1268    sr:=(sr * cover + 255 ) shr 8;
1269    sg:=(sg * cover + 255 ) shr 8;
1270    sb:=(sb * cover + 255 ) shr 8;
1271    sa:=(sa * cover + 255 ) shr 8;
1272 
1273   end;
1274 
1275  d1a:=base_mask - int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1276  s1a:=base_mask - sa;
1277  dr :=int8u_ptr(ptrcomp(p ) + this.m_order.R )^;
1278  dg :=int8u_ptr(ptrcomp(p ) + this.m_order.G )^;
1279  db :=int8u_ptr(ptrcomp(p ) + this.m_order.B )^;
1280  da :=int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1281 
1282  int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u((sd_max(sr * da, dr * sa ) + sr * d1a + dr * s1a ) shr base_shift );
1283  int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u((sd_max(sg * da, dg * sa ) + sg * d1a + dg * s1a ) shr base_shift );
1284  int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u((sd_max(sb * da, db * sa ) + sb * d1a + db * s1a ) shr base_shift );
1285  int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=int8u(sa + da - ((sa * da + base_mask ) shr base_shift ) );
1286 
1287 end;
1288 
1289 { comp_op_rgba_color_dodge }
1290 // if Sca.Da + Dca.Sa >= Sa.Da
1291 //   Dca' = Sa.Da + Sca.(1 - Da) + Dca.(1 - Sa)
1292 // otherwise
1293 //   Dca' = Dca.Sa/(1-Sca/Sa) + Sca.(1 - Da) + Dca.(1 - Sa)
1294 //
1295 // Da'  = Sa + Da - Sa.Da
1296 procedure comp_op_rgba_color_dodge(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
1297 var
1298  d1a ,s1a ,dr ,dg ,db ,da : unsigned;
1299 
1300  drsa ,dgsa ,dbsa ,srda ,sgda ,sbda ,sada : int;
1301 
1302 begin
1303  if cover < 255 then
1304   begin
1305    sr:=(sr * cover + 255 ) shr 8;
1306    sg:=(sg * cover + 255 ) shr 8;
1307    sb:=(sb * cover + 255 ) shr 8;
1308    sa:=(sa * cover + 255 ) shr 8;
1309 
1310   end;
1311 
1312  d1a :=base_mask - int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1313  s1a :=base_mask - sa;
1314  dr  :=int8u_ptr(ptrcomp(p ) + this.m_order.R )^;
1315  dg  :=int8u_ptr(ptrcomp(p ) + this.m_order.G )^;
1316  db  :=int8u_ptr(ptrcomp(p ) + this.m_order.B )^;
1317  da  :=int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1318  drsa:=dr * sa;
1319  dgsa:=dg * sa;
1320  dbsa:=db * sa;
1321  srda:=sr * da;
1322  sgda:=sg * da;
1323  sbda:=sb * da;
1324  sada:=sa * da;
1325 
1326  if srda + drsa >= sada then
1327   int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u(shr_int32(sada + sr * d1a + dr * s1a ,base_shift ) )
1328  else
1329   int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u(drsa div (base_mask - (sr shl base_shift ) div sa ) + ((sr * d1a + dr * s1a ) shr base_shift ) );
1330 
1331  if  sgda + dgsa >= sada then
1332   int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u(shr_int32(sada + sg * d1a + dg * s1a ,base_shift ) )
1333  else
1334   int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u(dgsa div (base_mask - (sg shl base_shift ) div sa ) + ((sg * d1a + dg * s1a ) shr base_shift ) );
1335 
1336  if sbda + dbsa >= sada then
1337   int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u(shr_int32(sada + sb * d1a + db * s1a ,base_shift ) )
1338  else
1339   int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u(dbsa div (base_mask - (sb shl base_shift ) div sa ) + ((sb * d1a + db * s1a ) shr base_shift ) );
1340 
1341  int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=int8u(sa + da - ((sa * da + base_mask) shr base_shift ) );
1342 
1343 end;
1344 
1345 { comp_op_rgba_color_burn }
1346 // if Sca.Da + Dca.Sa <= Sa.Da
1347 //   Dca' = Sca.(1 - Da) + Dca.(1 - Sa)
1348 // otherwise
1349 //   Dca' = Sa.(Sca.Da + Dca.Sa - Sa.Da)/Sca + Sca.(1 - Da) + Dca.(1 - Sa)
1350 //
1351 // Da'  = Sa + Da - Sa.Da
1352 procedure comp_op_rgba_color_burn(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
1353 var
1354  d1a ,s1a ,dr ,dg ,db ,da : unsigned;
1355 
1356  drsa ,dgsa ,dbsa ,srda ,sgda ,sbda ,sada : int;
1357 
1358 begin
1359  if cover < 255 then
1360   begin
1361    sr:=(sr * cover + 255 ) shr 8;
1362    sg:=(sg * cover + 255 ) shr 8;
1363    sb:=(sb * cover + 255 ) shr 8;
1364    sa:=(sa * cover + 255 ) shr 8;
1365 
1366   end;
1367 
1368  d1a :=base_mask - int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1369  s1a :=base_mask - sa;
1370  dr  :=int8u_ptr(ptrcomp(p ) + this.m_order.R )^;
1371  dg  :=int8u_ptr(ptrcomp(p ) + this.m_order.G )^;
1372  db  :=int8u_ptr(ptrcomp(p ) + this.m_order.B )^;
1373  da  :=int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1374  drsa:=dr * sa;
1375  dgsa:=dg * sa;
1376  dbsa:=db * sa;
1377  srda:=sr * da;
1378  sgda:=sg * da;
1379  sbda:=sb * da;
1380  sada:=sa * da;
1381 
1382  if srda + drsa <= sada then
1383   int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u((sr * d1a + dr * s1a ) shr base_shift )
1384  else
1385   int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u(shr_int32(sa * (srda + drsa - sada ) div sr + sr * d1a + dr * s1a ,base_shift ) );
1386 
1387  if sgda + dgsa <= sada then
1388   int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u((sg * d1a + dg * s1a ) shr base_shift)
1389  else
1390   int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u(shr_int32(sa * (sgda + dgsa - sada ) div sg + sg * d1a + dg * s1a ,base_shift ) );
1391 
1392  if sbda + dbsa <= sada then
1393   int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u((sb * d1a + db * s1a ) shr base_shift)
1394  else
1395   int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u(shr_int32(sa * (sbda + dbsa - sada ) div sb + sb * d1a + db * s1a ,base_shift ) );
1396 
1397  int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=int8u(sa + da - ((sa * da + base_mask ) shr base_shift ) );
1398 
1399 end;
1400 
1401 { comp_op_rgba_hard_light }
1402 // if 2.Sca < Sa
1403 //    Dca' = 2.Sca.Dca + Sca.(1 - Da) + Dca.(1 - Sa)
1404 // otherwise
1405 //    Dca' = Sa.Da - 2.(Da - Dca).(Sa - Sca) + Sca.(1 - Da) + Dca.(1 - Sa)
1406 //
1407 // Da'  = Sa + Da - Sa.Da
1408 procedure comp_op_rgba_hard_light(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
1409 var
1410  d1a ,s1a ,dr ,dg ,db ,da ,sada : unsigned;
1411 
1412 begin
1413  if cover < 255 then
1414   begin
1415    sr:=(sr * cover + 255 ) shr 8;
1416    sg:=(sg * cover + 255 ) shr 8;
1417    sb:=(sb * cover + 255 ) shr 8;
1418    sa:=(sa * cover + 255 ) shr 8;
1419 
1420   end;
1421 
1422  d1a :=base_mask - int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1423  s1a :=base_mask - sa;
1424  dr  :=int8u_ptr(ptrcomp(p ) + this.m_order.R )^;
1425  dg  :=int8u_ptr(ptrcomp(p ) + this.m_order.G )^;
1426  db  :=int8u_ptr(ptrcomp(p ) + this.m_order.B )^;
1427  da  :=int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1428  sada:=sa * da;
1429 
1430  if 2*sr < sa then
1431   int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u((2 * sr * dr + sr * d1a + dr * s1a ) shr base_shift )
1432  else
1433   int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u((sada - 2 * (da - dr ) * (sa - sr ) + sr * d1a + dr * s1a ) shr base_shift );
1434 
1435  if 2*sg < sa then
1436   int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u((2 * sg * dg + sg * d1a + dg * s1a ) shr base_shift )
1437  else
1438   int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u((sada - 2 * (da - dg ) * (sa - sg ) + sg * d1a + dg * s1a ) shr base_shift );
1439 
1440  if 2*sb < sa then
1441   int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u((2 * sb * db + sb * d1a + db * s1a ) shr base_shift )
1442  else
1443   int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u((sada - 2 * (da - db ) * (sa - sb ) + sb * d1a + db * s1a ) shr base_shift );
1444 
1445  int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=int8u(sa + da - ((sa * da + base_mask ) shr base_shift ) );
1446 
1447 end;
1448 
1449 { comp_op_rgba_soft_light }
1450 // if 2.Sca < Sa
1451 //   Dca' = Dca.(Sa + (1 - Dca/Da).(2.Sca - Sa)) + Sca.(1 - Da) + Dca.(1 - Sa)
1452 // otherwise if 8.Dca <= Da
1453 //   Dca' = Dca.(Sa + (1 - Dca/Da).(2.Sca - Sa).(3 - 8.Dca/Da)) + Sca.(1 - Da) + Dca.(1 - Sa)
1454 // otherwise
1455 //   Dca' = (Dca.Sa + ((Dca/Da)^(0.5).Da - Dca).(2.Sca - Sa)) + Sca.(1 - Da) + Dca.(1 - Sa)
1456 //
1457 // Da'  = Sa + Da - Sa.Da
1458 procedure comp_op_rgba_soft_light(this : pixel_formats_ptr; p : int8u_ptr; r ,g ,b ,a ,cover : unsigned );
1459 var
1460  sr ,sg ,sb ,sa ,dr ,dg ,db ,da : double;
1461 
1462 begin
1463  sr:=(r * cover ) / (base_mask * 255 );
1464  sg:=(g * cover ) / (base_mask * 255 );
1465  sb:=(b * cover ) / (base_mask * 255 );
1466  sa:=(a * cover ) / (base_mask * 255 );
1467  dr:=int8u_ptr(ptrcomp(p ) + this.m_order.R )^ / base_mask;
1468  dg:=int8u_ptr(ptrcomp(p ) + this.m_order.G )^ / base_mask;
1469  db:=int8u_ptr(ptrcomp(p ) + this.m_order.B )^ / base_mask;
1470 
1471  if int8u_ptr(ptrcomp(p ) + this.m_order.A )^ <> 0 then
1472   da:=int8u_ptr(ptrcomp(p ) + this.m_order.A )^ / base_mask
1473  else
1474   da:=1 / base_mask;
1475 
1476  if cover < 255 then
1477   a:=(a * cover + 255 ) shr 8;
1478 
1479  if 2 * sr < sa then
1480   dr:=dr * (sa + (1 - dr / da ) * (2 * sr - sa ) ) + sr * (1 - da ) + dr * (1 - sa )
1481  else
1482   if 8 * dr <= da then
1483    dr:=dr * (sa + (1 - dr / da ) * (2 * sr - sa ) * (3 - 8 * dr / da ) ) + sr * (1 - da ) + dr * (1 - sa )
1484   else
1485    dr:=(dr * sa + (Sqrt(dr / da ) * da - dr ) * (2 * sr - sa ) ) + sr * (1 - da ) + dr * (1 - sa );
1486 
1487  if 2 * sg < sa then
1488   dg:=dg * (sa + (1 - dg / da ) * (2 * sg - sa ) ) + sg * (1 - da ) + dg * (1 - sa )
1489  else
1490   if 8 * dg <= da then
1491    dg:=dg * (sa + (1 - dg / da ) * (2 * sg - sa ) * (3 - 8 * dg / da ) ) + sg * (1 - da ) + dg * (1 - sa )
1492   else
1493    dg:=(dg * sa + (Sqrt(dg / da ) * da - dg ) * (2 * sg - sa ) ) + sg * (1 - da ) + dg * (1 - sa );
1494 
1495  if 2 * sb < sa then
1496   db:=db * (sa + (1 - db / da ) * (2 * sb - sa ) ) + sb * (1 - da ) + db * (1 - sa )
1497  else
1498   if 8 * db <= da then
1499    db:=db * (sa + (1 - db / da ) * (2 * sb - sa ) * (3 - 8 * db / da ) ) + sb * (1 - da ) + db * (1 - sa )
1500   else
1501    db:=(db * sa + (Sqrt(db / da ) * da - db ) * (2 * sb - sa ) ) + sb * (1 - da ) + db * (1 - sa );
1502 
1503  int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u(trunc(dr * base_mask ) );
1504  int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u(trunc(dg * base_mask ) );
1505  int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u(trunc(db * base_mask ) );
1506  int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=
1507   int8u(a + int8u_ptr(ptrcomp(p ) + this.m_order.A )^ - ((a * int8u_ptr(ptrcomp(p ) + this.m_order.A )^ + base_mask ) shr base_shift ) );
1508 
1509 end;
1510 
1511 { comp_op_rgba_difference }
1512 // Dca' = Sca + Dca - 2.min(Sca.Da, Dca.Sa)
1513 // Da'  = Sa + Da - Sa.Da
1514 procedure comp_op_rgba_difference(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
1515 var
1516  dr ,dg ,db ,da : unsigned;
1517 
1518 begin
1519  if cover < 255 then
1520   begin
1521    sr:=(sr * cover + 255 ) shr 8;
1522    sg:=(sg * cover + 255 ) shr 8;
1523    sb:=(sb * cover + 255 ) shr 8;
1524    sa:=(sa * cover + 255 ) shr 8;
1525 
1526   end;
1527 
1528  dr:=int8u_ptr(ptrcomp(p ) + this.m_order.R )^;
1529  dg:=int8u_ptr(ptrcomp(p ) + this.m_order.G )^;
1530  db:=int8u_ptr(ptrcomp(p ) + this.m_order.B )^;
1531  da:=int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1532 
1533  int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u(sr + dr - ((2 * sd_min(sr * da ,dr * sa ) ) shr base_shift ) );
1534  int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u(sg + dg - ((2 * sd_min(sg * da ,dg * sa ) ) shr base_shift ) );
1535  int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u(sb + db - ((2 * sd_min(sb * da ,db * sa ) ) shr base_shift ) );
1536  int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=int8u(sa + da - ((sa * da + base_mask ) shr base_shift ) );
1537 
1538 end;
1539 
1540 { comp_op_rgba_exclusion }
1541 // Dca' = (Sca.Da + Dca.Sa - 2.Sca.Dca) + Sca.(1 - Da) + Dca.(1 - Sa)
1542 // Da'  = Sa + Da - Sa.Da
1543 procedure comp_op_rgba_exclusion(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
1544 var
1545  d1a ,s1a ,dr ,dg ,db ,da : unsigned;
1546 
1547 begin
1548  if cover < 255 then
1549   begin
1550    sr:=(sr * cover + 255 ) shr 8;
1551    sg:=(sg * cover + 255 ) shr 8;
1552    sb:=(sb * cover + 255 ) shr 8;
1553    sa:=(sa * cover + 255 ) shr 8;
1554 
1555   end;
1556 
1557  d1a:=base_mask - int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1558  s1a:=base_mask - sa;
1559  dr :=int8u_ptr(ptrcomp(p ) + this.m_order.R )^;
1560  dg :=int8u_ptr(ptrcomp(p ) + this.m_order.G )^;
1561  db :=int8u_ptr(ptrcomp(p ) + this.m_order.B )^;
1562  da :=int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1563 
1564  int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u((sr * da + dr * sa - 2 * sr * dr + sr * d1a + dr * s1a ) shr base_shift );
1565  int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u((sg * da + dg * sa - 2 * sg * dg + sg * d1a + dg * s1a ) shr base_shift );
1566  int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u((sb * da + db * sa - 2 * sb * db + sb * d1a + db * s1a ) shr base_shift );
1567  int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=int8u(sa + da - ((sa * da + base_mask ) shr base_shift ) );
1568 
1569 end;
1570 
1571 { comp_op_rgba_contrast }
1572 procedure comp_op_rgba_contrast(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
1573 var
1574  dr ,dg ,db ,da ,d2a ,r ,g ,b : int;
1575 
1576  s2a : unsigned;
1577 
1578 begin
1579  if cover < 255 then
1580   begin
1581    sr:=(sr * cover + 255 ) shr 8;
1582    sg:=(sg * cover + 255 ) shr 8;
1583    sb:=(sb * cover + 255 ) shr 8;
1584    sa:=(sa * cover + 255 ) shr 8;
1585 
1586   end;
1587 
1588  dr :=int8u_ptr(ptrcomp(p ) + this.m_order.R )^;
1589  dg :=int8u_ptr(ptrcomp(p ) + this.m_order.G )^;
1590  db :=int8u_ptr(ptrcomp(p ) + this.m_order.B )^;
1591  da :=int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1592  d2a:=shr_int32(da ,1 );
1593  s2a:=sa shr 1;
1594 
1595  r:=shr_int32((dr - d2a) * ((sr - s2a ) * 2 + base_mask ) ,base_shift ) + d2a;
1596  g:=shr_int32((dg - d2a) * ((sg - s2a ) * 2 + base_mask ) ,base_shift ) + d2a;
1597  b:=shr_int32((db - d2a) * ((sb - s2a ) * 2 + base_mask ) ,base_shift ) + d2a;
1598 
1599  if r < 0 then
1600   r:=0;
1601 
1602  if g < 0 then
1603   g:=0;
1604 
1605  if b < 0 then
1606   b:=0;
1607 
1608  if r > da then
1609   int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u(trunc(da ) )
1610  else
1611   int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=int8u(trunc(r ) );
1612 
1613  if g > da then
1614   int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u(trunc(da ) )
1615  else
1616   int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=int8u(trunc(g ) );
1617 
1618  if b > da then
1619   int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u(trunc(da ) )
1620  else
1621   int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=int8u(trunc(b ) );
1622 
1623 end;
1624 
1625 { comp_op_rgba_invert }
1626 // Dca' = (Da - Dca) * Sa + Dca.(1 - Sa)
1627 // Da'  = Sa + Da - Sa.Da
1628 procedure comp_op_rgba_invert(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
1629 var
1630  da ,dr ,dg ,db ,s1a : int;
1631 
1632 begin
1633  sa:=(sa * cover + 255 ) shr 8;
1634 
1635  if sa <> 0 then
1636   begin
1637    da :=int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1638    dr :=shr_int32((da - int8u_ptr(ptrcomp(p ) + this.m_order.R )^ ) * sa + base_mask ,base_shift );
1639    dg :=shr_int32((da - int8u_ptr(ptrcomp(p ) + this.m_order.G )^ ) * sa + base_mask ,base_shift );
1640    db :=shr_int32((da - int8u_ptr(ptrcomp(p ) + this.m_order.B )^ ) * sa + base_mask ,base_shift );
1641    s1a:=base_mask - sa;
1642 
1643    int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=
1644     int8u(
1645      dr + shr_int32(int8u_ptr(ptrcomp(p ) + this.m_order.R )^ * s1a + base_mask ,base_shift ) );
1646 
1647    int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=
1648     int8u(
1649      dg + shr_int32(int8u_ptr(ptrcomp(p ) + this.m_order.G )^ * s1a + base_mask ,base_shift ) );
1650 
1651    int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=
1652     int8u(
1653      db + shr_int32(int8u_ptr(ptrcomp(p ) + this.m_order.B )^ * s1a + base_mask ,base_shift ) );
1654 
1655    int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=
1656      int8u(
1657       sa + da - shr_int32(sa * da + base_mask ,base_shift ) );
1658 
1659   end;
1660 
1661 end;
1662 
1663 { comp_op_rgba_invert_rgb }
1664 // Dca' = (Da - Dca) * Sca + Dca.(1 - Sa)
1665 // Da'  = Sa + Da - Sa.Da
1666 procedure comp_op_rgba_invert_rgb(this : pixel_formats_ptr; p : int8u_ptr; sr ,sg ,sb ,sa ,cover : unsigned );
1667 var
1668  da ,dr ,dg ,db ,s1a : int;
1669 
1670 begin
1671  if cover < 255 then
1672   begin
1673    sr:=shr_int32(sr * cover + 255 ,8 );
1674    sg:=shr_int32(sg * cover + 255 ,8 );
1675    sb:=shr_int32(sb * cover + 255 ,8 );
1676    sa:=shr_int32(sa * cover + 255 ,8 );
1677 
1678   end;
1679 
1680  if sa <> 0 then
1681   begin
1682    da :=int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1683    dr :=shr_int32((da - int8u_ptr(ptrcomp(p ) + this.m_order.R )^ ) * sr + base_mask ,base_shift );
1684    dg :=shr_int32((da - int8u_ptr(ptrcomp(p ) + this.m_order.G )^ ) * sg + base_mask ,base_shift );
1685    db :=shr_int32((da - int8u_ptr(ptrcomp(p ) + this.m_order.B )^ ) * sb + base_mask ,base_shift );
1686    s1a:=base_mask - sa;
1687 
1688    int8u_ptr(ptrcomp(p ) + this.m_order.R )^:=
1689     int8u(
1690      dr + shr_int32(int8u_ptr(ptrcomp(p ) + this.m_order.R )^ * s1a + base_mask ,base_shift ) );
1691 
1692    int8u_ptr(ptrcomp(p ) + this.m_order.G )^:=
1693     int8u(
1694      dg + shr_int32(int8u_ptr(ptrcomp(p ) + this.m_order.G )^ * s1a + base_mask ,base_shift ) );
1695 
1696    int8u_ptr(ptrcomp(p ) + this.m_order.B )^:=
1697     int8u(
1698      db + shr_int32(int8u_ptr(ptrcomp(p ) + this.m_order.B )^ * s1a + base_mask ,base_shift ) );
1699 
1700    int8u_ptr(ptrcomp(p ) + this.m_order.A )^:=
1701     int8u(
1702      sa + da - shr_int32(sa * da + base_mask ,base_shift ) );
1703 
1704   end;
1705 
1706 end;
1707 
1708 const
1709  comp_op_table_rgba : array[0..byte(end_of_comp_op_e ) - 1 ] of func_blend_pix = (
1710 
1711   comp_op_rgba_clear ,
1712   comp_op_rgba_src ,
1713   comp_op_rgba_dst ,
1714   comp_op_rgba_src_over ,
1715   comp_op_rgba_dst_over ,
1716   comp_op_rgba_src_in ,
1717   comp_op_rgba_dst_in ,
1718   comp_op_rgba_src_out ,
1719   comp_op_rgba_dst_out ,
1720   comp_op_rgba_src_atop ,
1721   comp_op_rgba_dst_atop ,
1722   comp_op_rgba_xor ,
1723   comp_op_rgba_plus ,
1724   comp_op_rgba_minus ,
1725   comp_op_rgba_multiply ,
1726   comp_op_rgba_screen ,
1727   comp_op_rgba_overlay ,
1728   comp_op_rgba_darken ,
1729   comp_op_rgba_lighten ,
1730   comp_op_rgba_color_dodge ,
1731   comp_op_rgba_color_burn ,
1732   comp_op_rgba_hard_light ,
1733   comp_op_rgba_soft_light ,
1734   comp_op_rgba_difference ,
1735   comp_op_rgba_exclusion ,
1736   comp_op_rgba_contrast ,
1737   comp_op_rgba_invert ,
1738   comp_op_rgba_invert_rgb );
1739 
1740 { COMP_OP_ADAPTOR_RGBA }
1741 procedure comp_op_adaptor_rgba;
1742 begin
1743  comp_op_table_rgba[op ](
1744   this ,p ,
1745   (cr * ca + base_mask ) shr base_shift ,
1746   (cg * ca + base_mask ) shr base_shift ,
1747   (cb * ca + base_mask ) shr base_shift ,
1748   ca ,cover );
1749 
1750 end;
1751 
1752 { COMP_OP_ADAPTOR_CLIP_TO_DST_RGBA }
1753 procedure comp_op_adaptor_clip_to_dst_rgba_pre(this : pixel_formats_ptr; op : unsigned; p : int8u_ptr; cr ,cg ,cb ,ca ,cover : unsigned );
1754 var
1755  da : unsigned;
1756 
1757 begin
1758  da:=int8u_ptr(ptrcomp(p ) + this.m_order.A )^;
1759 
1760  comp_op_table_rgba[op ](
1761   this ,p ,
1762   (cr * da + base_mask ) shr base_shift ,
1763   (cg * da + base_mask ) shr base_shift ,
1764   (cb * da + base_mask ) shr base_shift ,
1765   (ca * da + base_mask ) shr base_shift ,
1766   cover );
1767 
1768 end;
1769 
1770 END.
1771 
1772