1 /* -*- Mode: c; tab-width: 8; c-basic-offset: 4; indent-tabs-mode: t; -*- */
2 /* glitter-paths - polygon scan converter
3  *
4  * Copyright (c) 2008  M Joonas Pihlaja
5  * Copyright (c) 2007  David Turner
6  *
7  * Permission is hereby granted, free of charge, to any person
8  * obtaining a copy of this software and associated documentation
9  * files (the "Software"), to deal in the Software without
10  * restriction, including without limitation the rights to use,
11  * copy, modify, merge, publish, distribute, sublicense, and/or sell
12  * copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following
14  * conditions:
15  *
16  * The above copyright notice and this permission notice shall be
17  * included in all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
21  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
22  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
23  * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
24  * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26  * OTHER DEALINGS IN THE SOFTWARE.
27  */
28 /* This is the Glitter paths scan converter incorporated into cairo.
29  * The source is from commit 734c53237a867a773640bd5b64816249fa1730f8
30  * of
31  *
32  *   https://gitweb.freedesktop.org/?p=users/joonas/glitter-paths
33  */
34 /* Glitter-paths is a stand alone polygon rasteriser derived from
35  * David Turner's reimplementation of Tor Anderssons's 15x17
36  * supersampling rasteriser from the Apparition graphics library.  The
37  * main new feature here is cheaply choosing per-scan line between
38  * doing fully analytical coverage computation for an entire row at a
39  * time vs. using a supersampling approach.
40  *
41  * David Turner's code can be found at
42  *
43  *   http://david.freetype.org/rasterizer-shootout/raster-comparison-20070813.tar.bz2
44  *
45  * In particular this file incorporates large parts of ftgrays_tor10.h
46  * from raster-comparison-20070813.tar.bz2
47  */
48 /* Overview
49  *
50  * A scan converter's basic purpose to take polygon edges and convert
51  * them into an RLE compressed A8 mask.  This one works in two phases:
52  * gathering edges and generating spans.
53  *
54  * 1) As the user feeds the scan converter edges they are vertically
55  * clipped and bucketted into a _polygon_ data structure.  The edges
56  * are also snapped from the user's coordinates to the subpixel grid
57  * coordinates used during scan conversion.
58  *
59  *     user
60  *      |
61  *      | edges
62  *      V
63  *    polygon buckets
64  *
65  * 2) Generating spans works by performing a vertical sweep of pixel
66  * rows from top to bottom and maintaining an _active_list_ of edges
67  * that intersect the row.  From the active list the fill rule
68  * determines which edges are the left and right edges of the start of
69  * each span, and their contribution is then accumulated into a pixel
70  * coverage list (_cell_list_) as coverage deltas.  Once the coverage
71  * deltas of all edges are known we can form spans of constant pixel
72  * coverage by summing the deltas during a traversal of the cell list.
73  * At the end of a pixel row the cell list is sent to a coverage
74  * blitter for rendering to some target surface.
75  *
76  * The pixel coverages are computed by either supersampling the row
77  * and box filtering a mono rasterisation, or by computing the exact
78  * coverages of edges in the active list.  The supersampling method is
79  * used whenever some edge starts or stops within the row or there are
80  * edge intersections in the row.
81  *
82  *   polygon bucket for       \
83  *   current pixel row        |
84  *      |                     |
85  *      | activate new edges  |  Repeat GRID_Y times if we
86  *      V                     \  are supersampling this row,
87  *   active list              /  or just once if we're computing
88  *      |                     |  analytical coverage.
89  *      | coverage deltas     |
90  *      V                     |
91  *   pixel coverage list     /
92  *      |
93  *      V
94  *   coverage blitter
95  */
96 #include "cairoint.h"
97 #include "cairo-spans-private.h"
98 #include "cairo-error-private.h"
99 
100 #include <stdlib.h>
101 #include <string.h>
102 #include <limits.h>
103 #include <setjmp.h>
104 
105 /*-------------------------------------------------------------------------
106  * cairo specific config
107  */
108 #define I static
109 
110 /* Prefer cairo's status type. */
111 #define GLITTER_HAVE_STATUS_T 1
112 #define GLITTER_STATUS_SUCCESS CAIRO_STATUS_SUCCESS
113 #define GLITTER_STATUS_NO_MEMORY CAIRO_STATUS_NO_MEMORY
114 typedef cairo_status_t glitter_status_t;
115 
116 /* The input coordinate scale and the rasterisation grid scales. */
117 #define GLITTER_INPUT_BITS CAIRO_FIXED_FRAC_BITS
118 //#define GRID_X_BITS CAIRO_FIXED_FRAC_BITS
119 //#define GRID_Y 15
120 #define GRID_X_BITS 2
121 #define GRID_Y_BITS 2
122 
123 /* Set glitter up to use a cairo span renderer to do the coverage
124  * blitting. */
125 struct pool;
126 struct cell_list;
127 
128 /*-------------------------------------------------------------------------
129  * glitter-paths.h
130  */
131 
132 /* "Input scaled" numbers are fixed precision reals with multiplier
133  * 2**GLITTER_INPUT_BITS.  Input coordinates are given to glitter as
134  * pixel scaled numbers.  These get converted to the internal grid
135  * scaled numbers as soon as possible. Internal overflow is possible
136  * if GRID_X/Y inside glitter-paths.c is larger than
137  * 1<<GLITTER_INPUT_BITS. */
138 #ifndef GLITTER_INPUT_BITS
139 #  define GLITTER_INPUT_BITS 8
140 #endif
141 #define GLITTER_INPUT_SCALE (1<<GLITTER_INPUT_BITS)
142 typedef int glitter_input_scaled_t;
143 
144 #if !GLITTER_HAVE_STATUS_T
145 typedef enum {
146     GLITTER_STATUS_SUCCESS = 0,
147     GLITTER_STATUS_NO_MEMORY
148 } glitter_status_t;
149 #endif
150 
151 #ifndef I
152 # define I /*static*/
153 #endif
154 
155 /* Opaque type for scan converting. */
156 typedef struct glitter_scan_converter glitter_scan_converter_t;
157 
158 /* Reset a scan converter to accept polygon edges and set the clip box
159  * in pixels.  Allocates O(ymax-ymin) bytes of memory.	The clip box
160  * is set to integer pixel coordinates xmin <= x < xmax, ymin <= y <
161  * ymax. */
162 I glitter_status_t
163 glitter_scan_converter_reset(
164     glitter_scan_converter_t *converter,
165     int xmin, int ymin,
166     int xmax, int ymax);
167 
168 /* Render the polygon in the scan converter to the given A8 format
169  * image raster.  Only the pixels accessible as pixels[y*stride+x] for
170  * x,y inside the clip box are written to, where xmin <= x < xmax,
171  * ymin <= y < ymax.  The image is assumed to be clear on input.
172  *
173  * If nonzero_fill is true then the interior of the polygon is
174  * computed with the non-zero fill rule.  Otherwise the even-odd fill
175  * rule is used.
176  *
177  * The scan converter must be reset or destroyed after this call. */
178 
179 /*-------------------------------------------------------------------------
180  * glitter-paths.c: Implementation internal types
181  */
182 #include <stdlib.h>
183 #include <string.h>
184 #include <limits.h>
185 
186 /* All polygon coordinates are snapped onto a subsample grid. "Grid
187  * scaled" numbers are fixed precision reals with multiplier GRID_X or
188  * GRID_Y. */
189 typedef int grid_scaled_t;
190 typedef int grid_scaled_x_t;
191 typedef int grid_scaled_y_t;
192 
193 /* Default x/y scale factors.
194  *  You can either define GRID_X/Y_BITS to get a power-of-two scale
195  *  or define GRID_X/Y separately. */
196 #if !defined(GRID_X) && !defined(GRID_X_BITS)
197 #  define GRID_X_BITS 8
198 #endif
199 #if !defined(GRID_Y) && !defined(GRID_Y_BITS)
200 #  define GRID_Y 15
201 #endif
202 
203 /* Use GRID_X/Y_BITS to define GRID_X/Y if they're available. */
204 #ifdef GRID_X_BITS
205 #  define GRID_X (1 << GRID_X_BITS)
206 #endif
207 #ifdef GRID_Y_BITS
208 #  define GRID_Y (1 << GRID_Y_BITS)
209 #endif
210 
211 /* The GRID_X_TO_INT_FRAC macro splits a grid scaled coordinate into
212  * integer and fractional parts. The integer part is floored. */
213 #if defined(GRID_X_TO_INT_FRAC)
214   /* do nothing */
215 #elif defined(GRID_X_BITS)
216 #  define GRID_X_TO_INT_FRAC(x, i, f) \
217 	_GRID_TO_INT_FRAC_shift(x, i, f, GRID_X_BITS)
218 #else
219 #  define GRID_X_TO_INT_FRAC(x, i, f) \
220 	_GRID_TO_INT_FRAC_general(x, i, f, GRID_X)
221 #endif
222 
223 #define _GRID_TO_INT_FRAC_general(t, i, f, m) do {	\
224     (i) = (t) / (m);					\
225     (f) = (t) % (m);					\
226     if ((f) < 0) {					\
227 	--(i);						\
228 	(f) += (m);					\
229     }							\
230 } while (0)
231 
232 #define _GRID_TO_INT_FRAC_shift(t, i, f, b) do {	\
233     (f) = (t) & ((1 << (b)) - 1);			\
234     (i) = (t) >> (b);					\
235 } while (0)
236 
237 /* A grid area is a real in [0,1] scaled by 2*GRID_X*GRID_Y.  We want
238  * to be able to represent exactly areas of subpixel trapezoids whose
239  * vertices are given in grid scaled coordinates.  The scale factor
240  * comes from needing to accurately represent the area 0.5*dx*dy of a
241  * triangle with base dx and height dy in grid scaled numbers. */
242 #define GRID_XY (2*GRID_X*GRID_Y) /* Unit area on the grid. */
243 
244 /* GRID_AREA_TO_ALPHA(area): map [0,GRID_XY] to [0,255]. */
245 #if GRID_XY == 510
246 #  define GRID_AREA_TO_ALPHA(c)	  (((c)+1) >> 1)
247 #elif GRID_XY == 255
248 #  define  GRID_AREA_TO_ALPHA(c)  (c)
249 #elif GRID_XY == 64
250 #  define  GRID_AREA_TO_ALPHA(c)  (((c) << 2) | -(((c) & 0x40) >> 6))
251 #elif GRID_XY == 32
252 #  define  GRID_AREA_TO_ALPHA(c)  (((c) << 3) | -(((c) & 0x20) >> 5))
253 #elif GRID_XY == 128
254 #  define  GRID_AREA_TO_ALPHA(c)  ((((c) << 1) | -((c) >> 7)) & 255)
255 #elif GRID_XY == 256
256 #  define  GRID_AREA_TO_ALPHA(c)  (((c) | -((c) >> 8)) & 255)
257 #elif GRID_XY == 15
258 #  define  GRID_AREA_TO_ALPHA(c)  (((c) << 4) + (c))
259 #elif GRID_XY == 2*256*15
260 #  define  GRID_AREA_TO_ALPHA(c)  (((c) + ((c)<<4) + 256) >> 9)
261 #else
262 #  define  GRID_AREA_TO_ALPHA(c)  (((c)*255 + GRID_XY/2) / GRID_XY)
263 #endif
264 
265 #define UNROLL3(x) x x x
266 
267 struct quorem {
268     int32_t quo;
269     int32_t rem;
270 };
271 
272 /* Header for a chunk of memory in a memory pool. */
273 struct _pool_chunk {
274     /* # bytes used in this chunk. */
275     size_t size;
276 
277     /* # bytes total in this chunk */
278     size_t capacity;
279 
280     /* Pointer to the previous chunk or %NULL if this is the sentinel
281      * chunk in the pool header. */
282     struct _pool_chunk *prev_chunk;
283 
284     /* Actual data starts here.	 Well aligned for pointers. */
285 };
286 
287 /* A memory pool.  This is supposed to be embedded on the stack or
288  * within some other structure.	 It may optionally be followed by an
289  * embedded array from which requests are fulfilled until
290  * malloc needs to be called to allocate a first real chunk. */
291 struct pool {
292     /* Chunk we're allocating from. */
293     struct _pool_chunk *current;
294 
295     jmp_buf *jmp;
296 
297     /* Free list of previously allocated chunks.  All have >= default
298      * capacity. */
299     struct _pool_chunk *first_free;
300 
301     /* The default capacity of a chunk. */
302     size_t default_capacity;
303 
304     /* Header for the sentinel chunk.  Directly following the pool
305      * struct should be some space for embedded elements from which
306      * the sentinel chunk allocates from. */
307     struct _pool_chunk sentinel[1];
308 };
309 
310 /* A polygon edge. */
311 struct edge {
312     /* Next in y-bucket or active list. */
313     struct edge *next, *prev;
314 
315     /* Number of subsample rows remaining to scan convert of this
316      * edge. */
317     grid_scaled_y_t height_left;
318 
319     /* Original sign of the edge: +1 for downwards, -1 for upwards
320      * edges.  */
321     int dir;
322     int vertical;
323 
324     /* Current x coordinate while the edge is on the active
325      * list. Initialised to the x coordinate of the top of the
326      * edge. The quotient is in grid_scaled_x_t units and the
327      * remainder is mod dy in grid_scaled_y_t units.*/
328     struct quorem x;
329 
330     /* Advance of the current x when moving down a subsample line. */
331     struct quorem dxdy;
332 
333     /* The clipped y of the top of the edge. */
334     grid_scaled_y_t ytop;
335 
336     /* y2-y1 after orienting the edge downwards.  */
337     grid_scaled_y_t dy;
338 };
339 
340 #define EDGE_Y_BUCKET_INDEX(y, ymin) (((y) - (ymin))/GRID_Y)
341 
342 /* A collection of sorted and vertically clipped edges of the polygon.
343  * Edges are moved from the polygon to an active list while scan
344  * converting. */
345 struct polygon {
346     /* The vertical clip extents. */
347     grid_scaled_y_t ymin, ymax;
348 
349     /* Array of edges all starting in the same bucket.	An edge is put
350      * into bucket EDGE_BUCKET_INDEX(edge->ytop, polygon->ymin) when
351      * it is added to the polygon. */
352     struct edge **y_buckets;
353     struct edge *y_buckets_embedded[64];
354 
355     struct {
356 	struct pool base[1];
357 	struct edge embedded[32];
358     } edge_pool;
359 };
360 
361 /* A cell records the effect on pixel coverage of polygon edges
362  * passing through a pixel.  It contains two accumulators of pixel
363  * coverage.
364  *
365  * Consider the effects of a polygon edge on the coverage of a pixel
366  * it intersects and that of the following one.  The coverage of the
367  * following pixel is the height of the edge multiplied by the width
368  * of the pixel, and the coverage of the pixel itself is the area of
369  * the trapezoid formed by the edge and the right side of the pixel.
370  *
371  * +-----------------------+-----------------------+
372  * |                       |                       |
373  * |                       |                       |
374  * |_______________________|_______________________|
375  * |   \...................|.......................|\
376  * |    \..................|.......................| |
377  * |     \.................|.......................| |
378  * |      \....covered.....|.......................| |
379  * |       \....area.......|.......................| } covered height
380  * |        \..............|.......................| |
381  * |uncovered\.............|.......................| |
382  * |  area    \............|.......................| |
383  * |___________\...........|.......................|/
384  * |                       |                       |
385  * |                       |                       |
386  * |                       |                       |
387  * +-----------------------+-----------------------+
388  *
389  * Since the coverage of the following pixel will always be a multiple
390  * of the width of the pixel, we can store the height of the covered
391  * area instead.  The coverage of the pixel itself is the total
392  * coverage minus the area of the uncovered area to the left of the
393  * edge.  As it's faster to compute the uncovered area we only store
394  * that and subtract it from the total coverage later when forming
395  * spans to blit.
396  *
397  * The heights and areas are signed, with left edges of the polygon
398  * having positive sign and right edges having negative sign.  When
399  * two edges intersect they swap their left/rightness so their
400  * contribution above and below the intersection point must be
401  * computed separately. */
402 struct cell {
403     struct cell		*next;
404     int			 x;
405     int16_t		 uncovered_area;
406     int16_t		 covered_height;
407 };
408 
409 /* A cell list represents the scan line sparsely as cells ordered by
410  * ascending x.  It is geared towards scanning the cells in order
411  * using an internal cursor. */
412 struct cell_list {
413     /* Sentinel nodes */
414     struct cell head, tail;
415 
416     /* Cursor state for iterating through the cell list. */
417     struct cell *cursor, *rewind;
418 
419     /* Cells in the cell list are owned by the cell list and are
420      * allocated from this pool.  */
421     struct {
422 	struct pool base[1];
423 	struct cell embedded[32];
424     } cell_pool;
425 };
426 
427 struct cell_pair {
428     struct cell *cell1;
429     struct cell *cell2;
430 };
431 
432 /* The active list contains edges in the current scan line ordered by
433  * the x-coordinate of the intercept of the edge and the scan line. */
434 struct active_list {
435     /* Leftmost edge on the current scan line. */
436     struct edge head, tail;
437 
438     /* A lower bound on the height of the active edges is used to
439      * estimate how soon some active edge ends.	 We can't advance the
440      * scan conversion by a full pixel row if an edge ends somewhere
441      * within it. */
442     grid_scaled_y_t min_height;
443     int is_vertical;
444 };
445 
446 struct glitter_scan_converter {
447     struct polygon	polygon[1];
448     struct active_list	active[1];
449     struct cell_list	coverages[1];
450 
451     cairo_half_open_span_t *spans;
452     cairo_half_open_span_t spans_embedded[64];
453 
454     /* Clip box. */
455     grid_scaled_x_t xmin, xmax;
456     grid_scaled_y_t ymin, ymax;
457 };
458 
459 /* Compute the floored division a/b. Assumes / and % perform symmetric
460  * division. */
461 inline static struct quorem
floored_divrem(int a,int b)462 floored_divrem(int a, int b)
463 {
464     struct quorem qr;
465     qr.quo = a/b;
466     qr.rem = a%b;
467     if ((a^b)<0 && qr.rem) {
468 	qr.quo -= 1;
469 	qr.rem += b;
470     }
471     return qr;
472 }
473 
474 /* Compute the floored division (x*a)/b. Assumes / and % perform symmetric
475  * division. */
476 static struct quorem
floored_muldivrem(int x,int a,int b)477 floored_muldivrem(int x, int a, int b)
478 {
479     struct quorem qr;
480     long long xa = (long long)x*a;
481     qr.quo = xa/b;
482     qr.rem = xa%b;
483     if ((xa>=0) != (b>=0) && qr.rem) {
484 	qr.quo -= 1;
485 	qr.rem += b;
486     }
487     return qr;
488 }
489 
490 static struct _pool_chunk *
_pool_chunk_init(struct _pool_chunk * p,struct _pool_chunk * prev_chunk,size_t capacity)491 _pool_chunk_init(
492     struct _pool_chunk *p,
493     struct _pool_chunk *prev_chunk,
494     size_t capacity)
495 {
496     p->prev_chunk = prev_chunk;
497     p->size = 0;
498     p->capacity = capacity;
499     return p;
500 }
501 
502 static struct _pool_chunk *
_pool_chunk_create(struct pool * pool,size_t size)503 _pool_chunk_create(struct pool *pool, size_t size)
504 {
505     struct _pool_chunk *p;
506 
507     p = _cairo_malloc (size + sizeof(struct _pool_chunk));
508     if (unlikely (NULL == p))
509 	longjmp (*pool->jmp, _cairo_error (CAIRO_STATUS_NO_MEMORY));
510 
511     return _pool_chunk_init(p, pool->current, size);
512 }
513 
514 static void
pool_init(struct pool * pool,jmp_buf * jmp,size_t default_capacity,size_t embedded_capacity)515 pool_init(struct pool *pool,
516 	  jmp_buf *jmp,
517 	  size_t default_capacity,
518 	  size_t embedded_capacity)
519 {
520     pool->jmp = jmp;
521     pool->current = pool->sentinel;
522     pool->first_free = NULL;
523     pool->default_capacity = default_capacity;
524     _pool_chunk_init(pool->sentinel, NULL, embedded_capacity);
525 }
526 
527 static void
pool_fini(struct pool * pool)528 pool_fini(struct pool *pool)
529 {
530     struct _pool_chunk *p = pool->current;
531     do {
532 	while (NULL != p) {
533 	    struct _pool_chunk *prev = p->prev_chunk;
534 	    if (p != pool->sentinel)
535 		free(p);
536 	    p = prev;
537 	}
538 	p = pool->first_free;
539 	pool->first_free = NULL;
540     } while (NULL != p);
541 }
542 
543 /* Satisfy an allocation by first allocating a new large enough chunk
544  * and adding it to the head of the pool's chunk list. This function
545  * is called as a fallback if pool_alloc() couldn't do a quick
546  * allocation from the current chunk in the pool. */
547 static void *
_pool_alloc_from_new_chunk(struct pool * pool,size_t size)548 _pool_alloc_from_new_chunk(
549     struct pool *pool,
550     size_t size)
551 {
552     struct _pool_chunk *chunk;
553     void *obj;
554     size_t capacity;
555 
556     /* If the allocation is smaller than the default chunk size then
557      * try getting a chunk off the free list.  Force alloc of a new
558      * chunk for large requests. */
559     capacity = size;
560     chunk = NULL;
561     if (size < pool->default_capacity) {
562 	capacity = pool->default_capacity;
563 	chunk = pool->first_free;
564 	if (chunk) {
565 	    pool->first_free = chunk->prev_chunk;
566 	    _pool_chunk_init(chunk, pool->current, chunk->capacity);
567 	}
568     }
569 
570     if (NULL == chunk)
571 	chunk = _pool_chunk_create (pool, capacity);
572     pool->current = chunk;
573 
574     obj = ((unsigned char*)chunk + sizeof(*chunk) + chunk->size);
575     chunk->size += size;
576     return obj;
577 }
578 
579 /* Allocate size bytes from the pool.  The first allocated address
580  * returned from a pool is aligned to sizeof(void*).  Subsequent
581  * addresses will maintain alignment as long as multiples of void* are
582  * allocated.  Returns the address of a new memory area or %NULL on
583  * allocation failures.	 The pool retains ownership of the returned
584  * memory. */
585 inline static void *
pool_alloc(struct pool * pool,size_t size)586 pool_alloc (struct pool *pool, size_t size)
587 {
588     struct _pool_chunk *chunk = pool->current;
589 
590     if (size <= chunk->capacity - chunk->size) {
591 	void *obj = ((unsigned char*)chunk + sizeof(*chunk) + chunk->size);
592 	chunk->size += size;
593 	return obj;
594     } else {
595 	return _pool_alloc_from_new_chunk(pool, size);
596     }
597 }
598 
599 /* Relinquish all pool_alloced memory back to the pool. */
600 static void
pool_reset(struct pool * pool)601 pool_reset (struct pool *pool)
602 {
603     /* Transfer all used chunks to the chunk free list. */
604     struct _pool_chunk *chunk = pool->current;
605     if (chunk != pool->sentinel) {
606 	while (chunk->prev_chunk != pool->sentinel) {
607 	    chunk = chunk->prev_chunk;
608 	}
609 	chunk->prev_chunk = pool->first_free;
610 	pool->first_free = pool->current;
611     }
612     /* Reset the sentinel as the current chunk. */
613     pool->current = pool->sentinel;
614     pool->sentinel->size = 0;
615 }
616 
617 /* Rewinds the cell list's cursor to the beginning.  After rewinding
618  * we're good to cell_list_find() the cell any x coordinate. */
619 inline static void
cell_list_rewind(struct cell_list * cells)620 cell_list_rewind (struct cell_list *cells)
621 {
622     cells->cursor = &cells->head;
623 }
624 
625 inline static void
cell_list_maybe_rewind(struct cell_list * cells,int x)626 cell_list_maybe_rewind (struct cell_list *cells, int x)
627 {
628     if (x < cells->cursor->x) {
629 	cells->cursor = cells->rewind;
630 	if (x < cells->cursor->x)
631 	    cells->cursor = &cells->head;
632     }
633 }
634 
635 inline static void
cell_list_set_rewind(struct cell_list * cells)636 cell_list_set_rewind (struct cell_list *cells)
637 {
638     cells->rewind = cells->cursor;
639 }
640 
641 static void
cell_list_init(struct cell_list * cells,jmp_buf * jmp)642 cell_list_init(struct cell_list *cells, jmp_buf *jmp)
643 {
644     pool_init(cells->cell_pool.base, jmp,
645 	      256*sizeof(struct cell),
646 	      sizeof(cells->cell_pool.embedded));
647     cells->tail.next = NULL;
648     cells->tail.x = INT_MAX;
649     cells->head.x = INT_MIN;
650     cells->head.next = &cells->tail;
651     cell_list_rewind (cells);
652 }
653 
654 static void
cell_list_fini(struct cell_list * cells)655 cell_list_fini(struct cell_list *cells)
656 {
657     pool_fini (cells->cell_pool.base);
658 }
659 
660 /* Empty the cell list.  This is called at the start of every pixel
661  * row. */
662 inline static void
cell_list_reset(struct cell_list * cells)663 cell_list_reset (struct cell_list *cells)
664 {
665     cell_list_rewind (cells);
666     cells->head.next = &cells->tail;
667     pool_reset (cells->cell_pool.base);
668 }
669 
670 inline static struct cell *
cell_list_alloc(struct cell_list * cells,struct cell * tail,int x)671 cell_list_alloc (struct cell_list *cells,
672 		 struct cell *tail,
673 		 int x)
674 {
675     struct cell *cell;
676 
677     cell = pool_alloc (cells->cell_pool.base, sizeof (struct cell));
678     cell->next = tail->next;
679     tail->next = cell;
680     cell->x = x;
681     *(uint32_t *)&cell->uncovered_area = 0;
682 
683     return cell;
684 }
685 
686 /* Find a cell at the given x-coordinate.  Returns %NULL if a new cell
687  * needed to be allocated but couldn't be.  Cells must be found with
688  * non-decreasing x-coordinate until the cell list is rewound using
689  * cell_list_rewind(). Ownership of the returned cell is retained by
690  * the cell list. */
691 inline static struct cell *
cell_list_find(struct cell_list * cells,int x)692 cell_list_find (struct cell_list *cells, int x)
693 {
694     struct cell *tail = cells->cursor;
695 
696     if (tail->x == x)
697 	return tail;
698 
699     while (1) {
700 	UNROLL3({
701 		if (tail->next->x > x)
702 			break;
703 		tail = tail->next;
704 	});
705     }
706 
707     if (tail->x != x)
708 	tail = cell_list_alloc (cells, tail, x);
709     return cells->cursor = tail;
710 
711 }
712 
713 /* Find two cells at x1 and x2.	 This is exactly equivalent
714  * to
715  *
716  *   pair.cell1 = cell_list_find(cells, x1);
717  *   pair.cell2 = cell_list_find(cells, x2);
718  *
719  * except with less function call overhead. */
720 inline static struct cell_pair
cell_list_find_pair(struct cell_list * cells,int x1,int x2)721 cell_list_find_pair(struct cell_list *cells, int x1, int x2)
722 {
723     struct cell_pair pair;
724 
725     pair.cell1 = cells->cursor;
726     while (1) {
727 	UNROLL3({
728 		if (pair.cell1->next->x > x1)
729 			break;
730 		pair.cell1 = pair.cell1->next;
731 	});
732     }
733     if (pair.cell1->x != x1)
734 	pair.cell1 = cell_list_alloc (cells, pair.cell1, x1);
735 
736     pair.cell2 = pair.cell1;
737     while (1) {
738 	UNROLL3({
739 		if (pair.cell2->next->x > x2)
740 			break;
741 		pair.cell2 = pair.cell2->next;
742 	});
743     }
744     if (pair.cell2->x != x2)
745 	pair.cell2 = cell_list_alloc (cells, pair.cell2, x2);
746 
747     cells->cursor = pair.cell2;
748     return pair;
749 }
750 
751 /* Add a subpixel span covering [x1, x2) to the coverage cells. */
752 inline static void
cell_list_add_subspan(struct cell_list * cells,grid_scaled_x_t x1,grid_scaled_x_t x2)753 cell_list_add_subspan(struct cell_list *cells,
754 		      grid_scaled_x_t x1,
755 		      grid_scaled_x_t x2)
756 {
757     int ix1, fx1;
758     int ix2, fx2;
759 
760     if (x1 == x2)
761 	return;
762 
763     GRID_X_TO_INT_FRAC(x1, ix1, fx1);
764     GRID_X_TO_INT_FRAC(x2, ix2, fx2);
765 
766     if (ix1 != ix2) {
767 	struct cell_pair p;
768 	p = cell_list_find_pair(cells, ix1, ix2);
769 	p.cell1->uncovered_area += 2*fx1;
770 	++p.cell1->covered_height;
771 	p.cell2->uncovered_area -= 2*fx2;
772 	--p.cell2->covered_height;
773     } else {
774 	struct cell *cell = cell_list_find(cells, ix1);
775 	cell->uncovered_area += 2*(fx1-fx2);
776     }
777 }
778 
779 /* Adds the analytical coverage of an edge crossing the current pixel
780  * row to the coverage cells and advances the edge's x position to the
781  * following row.
782  *
783  * This function is only called when we know that during this pixel row:
784  *
785  * 1) The relative order of all edges on the active list doesn't
786  * change.  In particular, no edges intersect within this row to pixel
787  * precision.
788  *
789  * 2) No new edges start in this row.
790  *
791  * 3) No existing edges end mid-row.
792  *
793  * This function depends on being called with all edges from the
794  * active list in the order they appear on the list (i.e. with
795  * non-decreasing x-coordinate.)  */
796 static void
cell_list_render_edge(struct cell_list * cells,struct edge * edge,int sign)797 cell_list_render_edge(struct cell_list *cells,
798 		      struct edge *edge,
799 		      int sign)
800 {
801     grid_scaled_x_t fx;
802     struct cell *cell;
803     int ix;
804 
805     GRID_X_TO_INT_FRAC(edge->x.quo, ix, fx);
806 
807     /* We always know that ix1 is >= the cell list cursor in this
808      * case due to the no-intersections precondition.  */
809     cell = cell_list_find(cells, ix);
810     cell->covered_height += sign*GRID_Y;
811     cell->uncovered_area += sign*2*fx*GRID_Y;
812 }
813 
814 static void
polygon_init(struct polygon * polygon,jmp_buf * jmp)815 polygon_init (struct polygon *polygon, jmp_buf *jmp)
816 {
817     polygon->ymin = polygon->ymax = 0;
818     polygon->y_buckets = polygon->y_buckets_embedded;
819     pool_init (polygon->edge_pool.base, jmp,
820 	       8192 - sizeof (struct _pool_chunk),
821 	       sizeof (polygon->edge_pool.embedded));
822 }
823 
824 static void
polygon_fini(struct polygon * polygon)825 polygon_fini (struct polygon *polygon)
826 {
827     if (polygon->y_buckets != polygon->y_buckets_embedded)
828 	free (polygon->y_buckets);
829 
830     pool_fini (polygon->edge_pool.base);
831 }
832 
833 /* Empties the polygon of all edges. The polygon is then prepared to
834  * receive new edges and clip them to the vertical range
835  * [ymin,ymax). */
836 static glitter_status_t
polygon_reset(struct polygon * polygon,grid_scaled_y_t ymin,grid_scaled_y_t ymax)837 polygon_reset (struct polygon *polygon,
838 	       grid_scaled_y_t ymin,
839 	       grid_scaled_y_t ymax)
840 {
841     unsigned h = ymax - ymin;
842     unsigned num_buckets = EDGE_Y_BUCKET_INDEX(ymax + GRID_Y-1, ymin);
843 
844     pool_reset(polygon->edge_pool.base);
845 
846     if (unlikely (h > 0x7FFFFFFFU - GRID_Y))
847 	goto bail_no_mem; /* even if you could, you wouldn't want to. */
848 
849     if (polygon->y_buckets != polygon->y_buckets_embedded)
850 	free (polygon->y_buckets);
851 
852     polygon->y_buckets =  polygon->y_buckets_embedded;
853     if (num_buckets > ARRAY_LENGTH (polygon->y_buckets_embedded)) {
854 	polygon->y_buckets = _cairo_malloc_ab (num_buckets,
855 					       sizeof (struct edge *));
856 	if (unlikely (NULL == polygon->y_buckets))
857 	    goto bail_no_mem;
858     }
859     memset (polygon->y_buckets, 0, num_buckets * sizeof (struct edge *));
860 
861     polygon->ymin = ymin;
862     polygon->ymax = ymax;
863     return GLITTER_STATUS_SUCCESS;
864 
865 bail_no_mem:
866     polygon->ymin = 0;
867     polygon->ymax = 0;
868     return GLITTER_STATUS_NO_MEMORY;
869 }
870 
871 static void
_polygon_insert_edge_into_its_y_bucket(struct polygon * polygon,struct edge * e)872 _polygon_insert_edge_into_its_y_bucket(struct polygon *polygon,
873 				       struct edge *e)
874 {
875     unsigned ix = EDGE_Y_BUCKET_INDEX(e->ytop, polygon->ymin);
876     struct edge **ptail = &polygon->y_buckets[ix];
877     e->next = *ptail;
878     *ptail = e;
879 }
880 
881 inline static void
polygon_add_edge(struct polygon * polygon,const cairo_edge_t * edge)882 polygon_add_edge (struct polygon *polygon,
883 		  const cairo_edge_t *edge)
884 {
885     struct edge *e;
886     grid_scaled_x_t dx;
887     grid_scaled_y_t dy;
888     grid_scaled_y_t ytop, ybot;
889     grid_scaled_y_t ymin = polygon->ymin;
890     grid_scaled_y_t ymax = polygon->ymax;
891 
892     if (unlikely (edge->top >= ymax || edge->bottom <= ymin))
893 	return;
894 
895     e = pool_alloc (polygon->edge_pool.base, sizeof (struct edge));
896 
897     dx = edge->line.p2.x - edge->line.p1.x;
898     dy = edge->line.p2.y - edge->line.p1.y;
899     e->dy = dy;
900     e->dir = edge->dir;
901 
902     ytop = edge->top >= ymin ? edge->top : ymin;
903     ybot = edge->bottom <= ymax ? edge->bottom : ymax;
904     e->ytop = ytop;
905     e->height_left = ybot - ytop;
906 
907     if (dx == 0) {
908 	e->vertical = TRUE;
909 	e->x.quo = edge->line.p1.x;
910 	e->x.rem = 0;
911 	e->dxdy.quo = 0;
912 	e->dxdy.rem = 0;
913     } else {
914 	e->vertical = FALSE;
915 	e->dxdy = floored_divrem (dx, dy);
916 	if (ytop == edge->line.p1.y) {
917 	    e->x.quo = edge->line.p1.x;
918 	    e->x.rem = 0;
919 	} else {
920 	    e->x = floored_muldivrem (ytop - edge->line.p1.y, dx, dy);
921 	    e->x.quo += edge->line.p1.x;
922 	}
923     }
924 
925     _polygon_insert_edge_into_its_y_bucket (polygon, e);
926 
927     e->x.rem -= dy;		/* Bias the remainder for faster
928 				 * edge advancement. */
929 }
930 
931 static void
active_list_reset(struct active_list * active)932 active_list_reset (struct active_list *active)
933 {
934     active->head.vertical = 1;
935     active->head.height_left = INT_MAX;
936     active->head.x.quo = INT_MIN;
937     active->head.prev = NULL;
938     active->head.next = &active->tail;
939     active->tail.prev = &active->head;
940     active->tail.next = NULL;
941     active->tail.x.quo = INT_MAX;
942     active->tail.height_left = INT_MAX;
943     active->tail.vertical = 1;
944     active->min_height = 0;
945     active->is_vertical = 1;
946 }
947 
948 static void
active_list_init(struct active_list * active)949 active_list_init(struct active_list *active)
950 {
951     active_list_reset(active);
952 }
953 
954 /*
955  * Merge two sorted edge lists.
956  * Input:
957  *  - head_a: The head of the first list.
958  *  - head_b: The head of the second list; head_b cannot be NULL.
959  * Output:
960  * Returns the head of the merged list.
961  *
962  * Implementation notes:
963  * To make it fast (in particular, to reduce to an insertion sort whenever
964  * one of the two input lists only has a single element) we iterate through
965  * a list until its head becomes greater than the head of the other list,
966  * then we switch their roles. As soon as one of the two lists is empty, we
967  * just attach the other one to the current list and exit.
968  * Writes to memory are only needed to "switch" lists (as it also requires
969  * attaching to the output list the list which we will be iterating next) and
970  * to attach the last non-empty list.
971  */
972 static struct edge *
merge_sorted_edges(struct edge * head_a,struct edge * head_b)973 merge_sorted_edges (struct edge *head_a, struct edge *head_b)
974 {
975     struct edge *head, **next, *prev;
976     int32_t x;
977 
978     prev = head_a->prev;
979     next = &head;
980     if (head_a->x.quo <= head_b->x.quo) {
981 	head = head_a;
982     } else {
983 	head = head_b;
984 	head_b->prev = prev;
985 	goto start_with_b;
986     }
987 
988     do {
989 	x = head_b->x.quo;
990 	while (head_a != NULL && head_a->x.quo <= x) {
991 	    prev = head_a;
992 	    next = &head_a->next;
993 	    head_a = head_a->next;
994 	}
995 
996 	head_b->prev = prev;
997 	*next = head_b;
998 	if (head_a == NULL)
999 	    return head;
1000 
1001 start_with_b:
1002 	x = head_a->x.quo;
1003 	while (head_b != NULL && head_b->x.quo <= x) {
1004 	    prev = head_b;
1005 	    next = &head_b->next;
1006 	    head_b = head_b->next;
1007 	}
1008 
1009 	head_a->prev = prev;
1010 	*next = head_a;
1011 	if (head_b == NULL)
1012 	    return head;
1013     } while (1);
1014 }
1015 
1016 /*
1017  * Sort (part of) a list.
1018  * Input:
1019  *  - list: The list to be sorted; list cannot be NULL.
1020  *  - limit: Recursion limit.
1021  * Output:
1022  *  - head_out: The head of the sorted list containing the first 2^(level+1) elements of the
1023  *              input list; if the input list has fewer elements, head_out be a sorted list
1024  *              containing all the elements of the input list.
1025  * Returns the head of the list of unprocessed elements (NULL if the sorted list contains
1026  * all the elements of the input list).
1027  *
1028  * Implementation notes:
1029  * Special case single element list, unroll/inline the sorting of the first two elements.
1030  * Some tail recursion is used since we iterate on the bottom-up solution of the problem
1031  * (we start with a small sorted list and keep merging other lists of the same size to it).
1032  */
1033 static struct edge *
sort_edges(struct edge * list,unsigned int level,struct edge ** head_out)1034 sort_edges (struct edge *list,
1035 	    unsigned int level,
1036 	    struct edge **head_out)
1037 {
1038     struct edge *head_other, *remaining;
1039     unsigned int i;
1040 
1041     head_other = list->next;
1042 
1043     if (head_other == NULL) {
1044 	*head_out = list;
1045 	return NULL;
1046     }
1047 
1048     remaining = head_other->next;
1049     if (list->x.quo <= head_other->x.quo) {
1050 	*head_out = list;
1051 	head_other->next = NULL;
1052     } else {
1053 	*head_out = head_other;
1054 	head_other->prev = list->prev;
1055 	head_other->next = list;
1056 	list->prev = head_other;
1057 	list->next = NULL;
1058     }
1059 
1060     for (i = 0; i < level && remaining; i++) {
1061 	remaining = sort_edges (remaining, i, &head_other);
1062 	*head_out = merge_sorted_edges (*head_out, head_other);
1063     }
1064 
1065     return remaining;
1066 }
1067 
1068 static struct edge *
merge_unsorted_edges(struct edge * head,struct edge * unsorted)1069 merge_unsorted_edges (struct edge *head, struct edge *unsorted)
1070 {
1071     sort_edges (unsorted, UINT_MAX, &unsorted);
1072     return merge_sorted_edges (head, unsorted);
1073 }
1074 
1075 /* Test if the edges on the active list can be safely advanced by a
1076  * full row without intersections or any edges ending. */
1077 inline static int
can_do_full_row(struct active_list * active)1078 can_do_full_row (struct active_list *active)
1079 {
1080     const struct edge *e;
1081 
1082     /* Recomputes the minimum height of all edges on the active
1083      * list if we have been dropping edges. */
1084     if (active->min_height <= 0) {
1085 	int min_height = INT_MAX;
1086 	int is_vertical = 1;
1087 
1088 	e = active->head.next;
1089 	while (NULL != e) {
1090 	    if (e->height_left < min_height)
1091 		min_height = e->height_left;
1092 	    is_vertical &= e->vertical;
1093 	    e = e->next;
1094 	}
1095 
1096 	active->is_vertical = is_vertical;
1097 	active->min_height = min_height;
1098     }
1099 
1100     if (active->min_height < GRID_Y)
1101 	return 0;
1102 
1103     return active->is_vertical;
1104 }
1105 
1106 /* Merges edges on the given subpixel row from the polygon to the
1107  * active_list. */
1108 inline static void
active_list_merge_edges_from_bucket(struct active_list * active,struct edge * edges)1109 active_list_merge_edges_from_bucket(struct active_list *active,
1110 				    struct edge *edges)
1111 {
1112     active->head.next = merge_unsorted_edges (active->head.next, edges);
1113 }
1114 
1115 inline static void
polygon_fill_buckets(struct active_list * active,struct edge * edge,int y,struct edge ** buckets)1116 polygon_fill_buckets (struct active_list *active,
1117 		      struct edge *edge,
1118 		      int y,
1119 		      struct edge **buckets)
1120 {
1121     grid_scaled_y_t min_height = active->min_height;
1122     int is_vertical = active->is_vertical;
1123 
1124     while (edge) {
1125 	struct edge *next = edge->next;
1126 	int suby = edge->ytop - y;
1127 	if (buckets[suby])
1128 	    buckets[suby]->prev = edge;
1129 	edge->next = buckets[suby];
1130 	edge->prev = NULL;
1131 	buckets[suby] = edge;
1132 	if (edge->height_left < min_height)
1133 	    min_height = edge->height_left;
1134 	is_vertical &= edge->vertical;
1135 	edge = next;
1136     }
1137 
1138     active->is_vertical = is_vertical;
1139     active->min_height = min_height;
1140 }
1141 
1142 inline static void
sub_row(struct active_list * active,struct cell_list * coverages,unsigned int mask)1143 sub_row (struct active_list *active,
1144 	 struct cell_list *coverages,
1145 	 unsigned int mask)
1146 {
1147     struct edge *edge = active->head.next;
1148     int xstart = INT_MIN, prev_x = INT_MIN;
1149     int winding = 0;
1150 
1151     cell_list_rewind (coverages);
1152 
1153     while (&active->tail != edge) {
1154 	struct edge *next = edge->next;
1155 	int xend = edge->x.quo;
1156 
1157 	if (--edge->height_left) {
1158 	    edge->x.quo += edge->dxdy.quo;
1159 	    edge->x.rem += edge->dxdy.rem;
1160 	    if (edge->x.rem >= 0) {
1161 		++edge->x.quo;
1162 		edge->x.rem -= edge->dy;
1163 	    }
1164 
1165 	    if (edge->x.quo < prev_x) {
1166 		struct edge *pos = edge->prev;
1167 		pos->next = next;
1168 		next->prev = pos;
1169 		do {
1170 		    pos = pos->prev;
1171 		} while (edge->x.quo < pos->x.quo);
1172 		pos->next->prev = edge;
1173 		edge->next = pos->next;
1174 		edge->prev = pos;
1175 		pos->next = edge;
1176 	    } else
1177 		prev_x = edge->x.quo;
1178 	} else {
1179 	    edge->prev->next = next;
1180 	    next->prev = edge->prev;
1181 	}
1182 
1183 	winding += edge->dir;
1184 	if ((winding & mask) == 0) {
1185 	    if (next->x.quo != xend) {
1186 		cell_list_add_subspan (coverages, xstart, xend);
1187 		xstart = INT_MIN;
1188 	    }
1189 	} else if (xstart == INT_MIN)
1190 	    xstart = xend;
1191 
1192 	edge = next;
1193     }
1194 }
1195 
dec(struct edge * e,int h)1196 inline static void dec (struct edge *e, int h)
1197 {
1198     e->height_left -= h;
1199     if (e->height_left == 0) {
1200 	e->prev->next = e->next;
1201 	e->next->prev = e->prev;
1202     }
1203 }
1204 
1205 static void
full_row(struct active_list * active,struct cell_list * coverages,unsigned int mask)1206 full_row (struct active_list *active,
1207 	  struct cell_list *coverages,
1208 	  unsigned int mask)
1209 {
1210     struct edge *left = active->head.next;
1211 
1212     while (&active->tail != left) {
1213 	struct edge *right;
1214 	int winding;
1215 
1216 	dec (left, GRID_Y);
1217 
1218 	winding = left->dir;
1219 	right = left->next;
1220 	do {
1221 	    dec (right, GRID_Y);
1222 
1223 	    winding += right->dir;
1224 	    if ((winding & mask) == 0 && right->next->x.quo != right->x.quo)
1225 		break;
1226 
1227 	    right = right->next;
1228 	} while (1);
1229 
1230 	cell_list_set_rewind (coverages);
1231 	cell_list_render_edge (coverages, left, +1);
1232 	cell_list_render_edge (coverages, right, -1);
1233 
1234 	left = right->next;
1235     }
1236 }
1237 
1238 static void
_glitter_scan_converter_init(glitter_scan_converter_t * converter,jmp_buf * jmp)1239 _glitter_scan_converter_init(glitter_scan_converter_t *converter, jmp_buf *jmp)
1240 {
1241     polygon_init(converter->polygon, jmp);
1242     active_list_init(converter->active);
1243     cell_list_init(converter->coverages, jmp);
1244     converter->xmin=0;
1245     converter->ymin=0;
1246     converter->xmax=0;
1247     converter->ymax=0;
1248 }
1249 
1250 static void
_glitter_scan_converter_fini(glitter_scan_converter_t * self)1251 _glitter_scan_converter_fini(glitter_scan_converter_t *self)
1252 {
1253     if (self->spans != self->spans_embedded)
1254 	free (self->spans);
1255 
1256     polygon_fini(self->polygon);
1257     cell_list_fini(self->coverages);
1258 
1259     self->xmin=0;
1260     self->ymin=0;
1261     self->xmax=0;
1262     self->ymax=0;
1263 }
1264 
1265 static grid_scaled_t
int_to_grid_scaled(int i,int scale)1266 int_to_grid_scaled(int i, int scale)
1267 {
1268     /* Clamp to max/min representable scaled number. */
1269     if (i >= 0) {
1270 	if (i >= INT_MAX/scale)
1271 	    i = INT_MAX/scale;
1272     }
1273     else {
1274 	if (i <= INT_MIN/scale)
1275 	    i = INT_MIN/scale;
1276     }
1277     return i*scale;
1278 }
1279 
1280 #define int_to_grid_scaled_x(x) int_to_grid_scaled((x), GRID_X)
1281 #define int_to_grid_scaled_y(x) int_to_grid_scaled((x), GRID_Y)
1282 
1283 I glitter_status_t
glitter_scan_converter_reset(glitter_scan_converter_t * converter,int xmin,int ymin,int xmax,int ymax)1284 glitter_scan_converter_reset(
1285 			     glitter_scan_converter_t *converter,
1286 			     int xmin, int ymin,
1287 			     int xmax, int ymax)
1288 {
1289     glitter_status_t status;
1290     int max_num_spans;
1291 
1292     converter->xmin = 0; converter->xmax = 0;
1293     converter->ymin = 0; converter->ymax = 0;
1294 
1295     max_num_spans = xmax - xmin + 1;
1296 
1297     if (max_num_spans > ARRAY_LENGTH(converter->spans_embedded)) {
1298 	converter->spans = _cairo_malloc_ab (max_num_spans,
1299 					     sizeof (cairo_half_open_span_t));
1300 	if (unlikely (converter->spans == NULL))
1301 	    return _cairo_error (CAIRO_STATUS_NO_MEMORY);
1302     } else
1303 	converter->spans = converter->spans_embedded;
1304 
1305     xmin = int_to_grid_scaled_x(xmin);
1306     ymin = int_to_grid_scaled_y(ymin);
1307     xmax = int_to_grid_scaled_x(xmax);
1308     ymax = int_to_grid_scaled_y(ymax);
1309 
1310     active_list_reset(converter->active);
1311     cell_list_reset(converter->coverages);
1312     status = polygon_reset(converter->polygon, ymin, ymax);
1313     if (status)
1314 	return status;
1315 
1316     converter->xmin = xmin;
1317     converter->xmax = xmax;
1318     converter->ymin = ymin;
1319     converter->ymax = ymax;
1320     return GLITTER_STATUS_SUCCESS;
1321 }
1322 
1323 /* INPUT_TO_GRID_X/Y (in_coord, out_grid_scaled, grid_scale)
1324  *   These macros convert an input coordinate in the client's
1325  *   device space to the rasterisation grid.
1326  */
1327 /* Gah.. this bit of ugly defines INPUT_TO_GRID_X/Y so as to use
1328  * shifts if possible, and something saneish if not.
1329  */
1330 #if !defined(INPUT_TO_GRID_Y) && defined(GRID_Y_BITS) && GRID_Y_BITS <= GLITTER_INPUT_BITS
1331 #  define INPUT_TO_GRID_Y(in, out) (out) = (in) >> (GLITTER_INPUT_BITS - GRID_Y_BITS)
1332 #else
1333 #  define INPUT_TO_GRID_Y(in, out) INPUT_TO_GRID_general(in, out, GRID_Y)
1334 #endif
1335 
1336 #if !defined(INPUT_TO_GRID_X) && defined(GRID_X_BITS) && GRID_X_BITS <= GLITTER_INPUT_BITS
1337 #  define INPUT_TO_GRID_X(in, out) (out) = (in) >> (GLITTER_INPUT_BITS - GRID_X_BITS)
1338 #else
1339 #  define INPUT_TO_GRID_X(in, out) INPUT_TO_GRID_general(in, out, GRID_X)
1340 #endif
1341 
1342 #define INPUT_TO_GRID_general(in, out, grid_scale) do {		\
1343     long long tmp__ = (long long)(grid_scale) * (in);	\
1344     tmp__ >>= GLITTER_INPUT_BITS;				\
1345     (out) = tmp__;						\
1346 } while (0)
1347 
1348 /* Add a new polygon edge from pixel (x1,y1) to (x2,y2) to the scan
1349  * converter.  The coordinates represent pixel positions scaled by
1350  * 2**GLITTER_PIXEL_BITS.  If this function fails then the scan
1351  * converter should be reset or destroyed.  Dir must be +1 or -1,
1352  * with the latter reversing the orientation of the edge. */
1353 I void
glitter_scan_converter_add_edge(glitter_scan_converter_t * converter,const cairo_edge_t * edge)1354 glitter_scan_converter_add_edge (glitter_scan_converter_t *converter,
1355 				 const cairo_edge_t *edge)
1356 {
1357     cairo_edge_t e;
1358 
1359     INPUT_TO_GRID_Y (edge->top, e.top);
1360     INPUT_TO_GRID_Y (edge->bottom, e.bottom);
1361     if (e.top >= e.bottom)
1362 	return;
1363 
1364     /* XXX: possible overflows if GRID_X/Y > 2**GLITTER_INPUT_BITS */
1365     INPUT_TO_GRID_Y (edge->line.p1.y, e.line.p1.y);
1366     INPUT_TO_GRID_Y (edge->line.p2.y, e.line.p2.y);
1367     if (e.line.p1.y == e.line.p2.y)
1368 	e.line.p2.y++; /* Fudge to prevent div-by-zero */
1369 
1370     INPUT_TO_GRID_X (edge->line.p1.x, e.line.p1.x);
1371     INPUT_TO_GRID_X (edge->line.p2.x, e.line.p2.x);
1372 
1373     e.dir = edge->dir;
1374 
1375     polygon_add_edge (converter->polygon, &e);
1376 }
1377 
1378 static void
step_edges(struct active_list * active,int count)1379 step_edges (struct active_list *active, int count)
1380 {
1381     struct edge *edge;
1382 
1383     count *= GRID_Y;
1384     for (edge = active->head.next; edge != &active->tail; edge = edge->next) {
1385 	edge->height_left -= count;
1386 	if (! edge->height_left) {
1387 	    edge->prev->next = edge->next;
1388 	    edge->next->prev = edge->prev;
1389 	}
1390     }
1391 }
1392 
1393 static glitter_status_t
blit_a8(struct cell_list * cells,cairo_span_renderer_t * renderer,cairo_half_open_span_t * spans,int y,int height,int xmin,int xmax)1394 blit_a8 (struct cell_list *cells,
1395 	 cairo_span_renderer_t *renderer,
1396 	 cairo_half_open_span_t *spans,
1397 	 int y, int height,
1398 	 int xmin, int xmax)
1399 {
1400     struct cell *cell = cells->head.next;
1401     int prev_x = xmin, last_x = -1;
1402     int16_t cover = 0, last_cover = 0;
1403     unsigned num_spans;
1404 
1405     if (cell == &cells->tail)
1406 	return CAIRO_STATUS_SUCCESS;
1407 
1408     /* Skip cells to the left of the clip region. */
1409     while (cell->x < xmin) {
1410 	cover += cell->covered_height;
1411 	cell = cell->next;
1412     }
1413     cover *= GRID_X*2;
1414 
1415     /* Form the spans from the coverages and areas. */
1416     num_spans = 0;
1417     for (; cell->x < xmax; cell = cell->next) {
1418 	int x = cell->x;
1419 	int16_t area;
1420 
1421 	if (x > prev_x && cover != last_cover) {
1422 	    spans[num_spans].x = prev_x;
1423 	    spans[num_spans].coverage = GRID_AREA_TO_ALPHA (cover);
1424 	    last_cover = cover;
1425 	    last_x = prev_x;
1426 	    ++num_spans;
1427 	}
1428 
1429 	cover += cell->covered_height*GRID_X*2;
1430 	area = cover - cell->uncovered_area;
1431 
1432 	if (area != last_cover) {
1433 	    spans[num_spans].x = x;
1434 	    spans[num_spans].coverage = GRID_AREA_TO_ALPHA (area);
1435 	    last_cover = area;
1436 	    last_x = x;
1437 	    ++num_spans;
1438 	}
1439 
1440 	prev_x = x+1;
1441     }
1442 
1443     if (prev_x <= xmax && cover != last_cover) {
1444 	spans[num_spans].x = prev_x;
1445 	spans[num_spans].coverage = GRID_AREA_TO_ALPHA (cover);
1446 	last_cover = cover;
1447 	last_x = prev_x;
1448 	++num_spans;
1449     }
1450 
1451     if (last_x < xmax && last_cover) {
1452 	spans[num_spans].x = xmax;
1453 	spans[num_spans].coverage = 0;
1454 	++num_spans;
1455     }
1456 
1457     /* Dump them into the renderer. */
1458     return renderer->render_rows (renderer, y, height, spans, num_spans);
1459 }
1460 
1461 #define GRID_AREA_TO_A1(A)  ((GRID_AREA_TO_ALPHA (A) > 127) ? 255 : 0)
1462 static glitter_status_t
blit_a1(struct cell_list * cells,cairo_span_renderer_t * renderer,cairo_half_open_span_t * spans,int y,int height,int xmin,int xmax)1463 blit_a1 (struct cell_list *cells,
1464 	 cairo_span_renderer_t *renderer,
1465 	 cairo_half_open_span_t *spans,
1466 	 int y, int height,
1467 	 int xmin, int xmax)
1468 {
1469     struct cell *cell = cells->head.next;
1470     int prev_x = xmin, last_x = -1;
1471     int16_t cover = 0;
1472     uint8_t coverage, last_cover = 0;
1473     unsigned num_spans;
1474 
1475     if (cell == &cells->tail)
1476 	return CAIRO_STATUS_SUCCESS;
1477 
1478     /* Skip cells to the left of the clip region. */
1479     while (cell->x < xmin) {
1480 	cover += cell->covered_height;
1481 	cell = cell->next;
1482     }
1483     cover *= GRID_X*2;
1484 
1485     /* Form the spans from the coverages and areas. */
1486     num_spans = 0;
1487     for (; cell->x < xmax; cell = cell->next) {
1488 	int x = cell->x;
1489 	int16_t area;
1490 
1491 	coverage = GRID_AREA_TO_A1 (cover);
1492 	if (x > prev_x && coverage != last_cover) {
1493 	    last_x = spans[num_spans].x = prev_x;
1494 	    last_cover = spans[num_spans].coverage = coverage;
1495 	    ++num_spans;
1496 	}
1497 
1498 	cover += cell->covered_height*GRID_X*2;
1499 	area = cover - cell->uncovered_area;
1500 
1501 	coverage = GRID_AREA_TO_A1 (area);
1502 	if (coverage != last_cover) {
1503 	    last_x = spans[num_spans].x = x;
1504 	    last_cover = spans[num_spans].coverage = coverage;
1505 	    ++num_spans;
1506 	}
1507 
1508 	prev_x = x+1;
1509     }
1510 
1511     coverage = GRID_AREA_TO_A1 (cover);
1512     if (prev_x <= xmax && coverage != last_cover) {
1513 	last_x = spans[num_spans].x = prev_x;
1514 	last_cover = spans[num_spans].coverage = coverage;
1515 	++num_spans;
1516     }
1517 
1518     if (last_x < xmax && last_cover) {
1519 	spans[num_spans].x = xmax;
1520 	spans[num_spans].coverage = 0;
1521 	++num_spans;
1522     }
1523     if (num_spans == 1)
1524 	return CAIRO_STATUS_SUCCESS;
1525 
1526     /* Dump them into the renderer. */
1527     return renderer->render_rows (renderer, y, height, spans, num_spans);
1528 }
1529 
1530 
1531 I void
glitter_scan_converter_render(glitter_scan_converter_t * converter,unsigned int winding_mask,int antialias,cairo_span_renderer_t * renderer)1532 glitter_scan_converter_render(glitter_scan_converter_t *converter,
1533 			      unsigned int winding_mask,
1534 			      int antialias,
1535 			      cairo_span_renderer_t *renderer)
1536 {
1537     int i, j;
1538     int ymax_i = converter->ymax / GRID_Y;
1539     int ymin_i = converter->ymin / GRID_Y;
1540     int xmin_i, xmax_i;
1541     int h = ymax_i - ymin_i;
1542     struct polygon *polygon = converter->polygon;
1543     struct cell_list *coverages = converter->coverages;
1544     struct active_list *active = converter->active;
1545     struct edge *buckets[GRID_Y] = { 0 };
1546 
1547     xmin_i = converter->xmin / GRID_X;
1548     xmax_i = converter->xmax / GRID_X;
1549     if (xmin_i >= xmax_i)
1550 	return;
1551 
1552     /* Render each pixel row. */
1553     for (i = 0; i < h; i = j) {
1554 	int do_full_row = 0;
1555 
1556 	j = i + 1;
1557 
1558 	/* Determine if we can ignore this row or use the full pixel
1559 	 * stepper. */
1560 	if (! polygon->y_buckets[i]) {
1561 	    if (active->head.next == &active->tail) {
1562 		active->min_height = INT_MAX;
1563 		active->is_vertical = 1;
1564 		for (; j < h && ! polygon->y_buckets[j]; j++)
1565 		    ;
1566 		continue;
1567 	    }
1568 
1569 	    do_full_row = can_do_full_row (active);
1570 	}
1571 
1572 	if (do_full_row) {
1573 	    /* Step by a full pixel row's worth. */
1574 	    full_row (active, coverages, winding_mask);
1575 
1576 	    if (active->is_vertical) {
1577 		while (j < h &&
1578 		       polygon->y_buckets[j] == NULL &&
1579 		       active->min_height >= 2*GRID_Y)
1580 		{
1581 		    active->min_height -= GRID_Y;
1582 		    j++;
1583 		}
1584 		if (j != i + 1)
1585 		    step_edges (active, j - (i + 1));
1586 	    }
1587 	} else {
1588 	    int sub;
1589 
1590 	    polygon_fill_buckets (active,
1591 				  polygon->y_buckets[i],
1592 				  (i+ymin_i)*GRID_Y,
1593 				  buckets);
1594 
1595 	    /* Subsample this row. */
1596 	    for (sub = 0; sub < GRID_Y; sub++) {
1597 		if (buckets[sub]) {
1598 		    active_list_merge_edges_from_bucket (active, buckets[sub]);
1599 		    buckets[sub] = NULL;
1600 		}
1601 
1602 		sub_row (active, coverages, winding_mask);
1603 	    }
1604 	}
1605 
1606 	if (antialias)
1607 	    blit_a8 (coverages, renderer, converter->spans,
1608 		     i+ymin_i, j-i, xmin_i, xmax_i);
1609 	else
1610 	    blit_a1 (coverages, renderer, converter->spans,
1611 		     i+ymin_i, j-i, xmin_i, xmax_i);
1612 	cell_list_reset (coverages);
1613 
1614 	active->min_height -= GRID_Y;
1615     }
1616 }
1617 
1618 struct _cairo_tor22_scan_converter {
1619     cairo_scan_converter_t base;
1620 
1621     glitter_scan_converter_t converter[1];
1622     cairo_fill_rule_t fill_rule;
1623     cairo_antialias_t antialias;
1624 
1625     jmp_buf jmp;
1626 };
1627 
1628 typedef struct _cairo_tor22_scan_converter cairo_tor22_scan_converter_t;
1629 
1630 static void
_cairo_tor22_scan_converter_destroy(void * converter)1631 _cairo_tor22_scan_converter_destroy (void *converter)
1632 {
1633     cairo_tor22_scan_converter_t *self = converter;
1634     if (self == NULL) {
1635 	return;
1636     }
1637     _glitter_scan_converter_fini (self->converter);
1638     free(self);
1639 }
1640 
1641 cairo_status_t
_cairo_tor22_scan_converter_add_polygon(void * converter,const cairo_polygon_t * polygon)1642 _cairo_tor22_scan_converter_add_polygon (void		*converter,
1643 				       const cairo_polygon_t *polygon)
1644 {
1645     cairo_tor22_scan_converter_t *self = converter;
1646     int i;
1647 
1648 #if 0
1649     FILE *file = fopen ("polygon.txt", "w");
1650     _cairo_debug_print_polygon (file, polygon);
1651     fclose (file);
1652 #endif
1653 
1654     for (i = 0; i < polygon->num_edges; i++)
1655 	 glitter_scan_converter_add_edge (self->converter, &polygon->edges[i]);
1656 
1657     return CAIRO_STATUS_SUCCESS;
1658 }
1659 
1660 static cairo_status_t
_cairo_tor22_scan_converter_generate(void * converter,cairo_span_renderer_t * renderer)1661 _cairo_tor22_scan_converter_generate (void			*converter,
1662 				    cairo_span_renderer_t	*renderer)
1663 {
1664     cairo_tor22_scan_converter_t *self = converter;
1665     cairo_status_t status;
1666 
1667     if ((status = setjmp (self->jmp)))
1668 	return _cairo_scan_converter_set_error (self, _cairo_error (status));
1669 
1670     glitter_scan_converter_render (self->converter,
1671 				   self->fill_rule == CAIRO_FILL_RULE_WINDING ? ~0 : 1,
1672 				   self->antialias != CAIRO_ANTIALIAS_NONE,
1673 				   renderer);
1674     return CAIRO_STATUS_SUCCESS;
1675 }
1676 
1677 cairo_scan_converter_t *
_cairo_tor22_scan_converter_create(int xmin,int ymin,int xmax,int ymax,cairo_fill_rule_t fill_rule,cairo_antialias_t antialias)1678 _cairo_tor22_scan_converter_create (int			xmin,
1679 				  int			ymin,
1680 				  int			xmax,
1681 				  int			ymax,
1682 				  cairo_fill_rule_t	fill_rule,
1683 				  cairo_antialias_t	antialias)
1684 {
1685     cairo_tor22_scan_converter_t *self;
1686     cairo_status_t status;
1687 
1688     self = _cairo_malloc (sizeof(struct _cairo_tor22_scan_converter));
1689     if (unlikely (self == NULL)) {
1690 	status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
1691 	goto bail_nomem;
1692     }
1693 
1694     self->base.destroy = _cairo_tor22_scan_converter_destroy;
1695     self->base.generate = _cairo_tor22_scan_converter_generate;
1696 
1697     _glitter_scan_converter_init (self->converter, &self->jmp);
1698     status = glitter_scan_converter_reset (self->converter,
1699 					   xmin, ymin, xmax, ymax);
1700     if (unlikely (status))
1701 	goto bail;
1702 
1703     self->fill_rule = fill_rule;
1704     self->antialias = antialias;
1705 
1706     return &self->base;
1707 
1708  bail:
1709     self->base.destroy(&self->base);
1710  bail_nomem:
1711     return _cairo_scan_converter_create_in_error (status);
1712 }
1713