1 /**************************************************************************
2  *
3  * Copyright 2007-2010 VMware, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 /*
29  * Rasterization for binned triangles within a tile
30  */
31 
32 
33 
34 /**
35  * Prototype for a 8 plane rasterizer function.  Will codegenerate
36  * several of these.
37  *
38  * XXX: Varients for more/fewer planes.
39  * XXX: Need ways of dropping planes as we descend.
40  * XXX: SIMD
41  */
42 static void
TAG(do_block_4)43 TAG(do_block_4)(struct lp_rasterizer_task *task,
44                 const struct lp_rast_triangle *tri,
45                 const struct lp_rast_plane *plane,
46                 int x, int y,
47                 const int64_t *c)
48 {
49    int j;
50 #ifndef MULTISAMPLE
51    unsigned mask = 0xffff;
52 #else
53    uint64_t mask = UINT64_MAX;
54 #endif
55 
56    for (j = 0; j < NR_PLANES; j++) {
57 #ifndef MULTISAMPLE
58 #ifdef RASTER_64
59       mask &= ~BUILD_MASK_LINEAR(((c[j] - 1) >> (int64_t)FIXED_ORDER),
60                                  -plane[j].dcdx >> FIXED_ORDER,
61                                  plane[j].dcdy >> FIXED_ORDER);
62 #else
63       mask &= ~BUILD_MASK_LINEAR((c[j] - 1),
64                                  -plane[j].dcdx,
65                                  plane[j].dcdy);
66 #endif
67 #else
68       for (unsigned s = 0; s < 4; s++) {
69          int64_t new_c = (c[j]) + ((IMUL64(task->scene->fixed_sample_pos[s][1], plane[j].dcdy) + IMUL64(task->scene->fixed_sample_pos[s][0], -plane[j].dcdx)) >> FIXED_ORDER);
70          uint32_t build_mask;
71 #ifdef RASTER_64
72          build_mask = BUILD_MASK_LINEAR((int32_t)((new_c - 1) >> (int64_t)FIXED_ORDER),
73                                         -plane[j].dcdx >> FIXED_ORDER,
74                                         plane[j].dcdy >> FIXED_ORDER);
75 #else
76          build_mask = BUILD_MASK_LINEAR((new_c - 1),
77                                         -plane[j].dcdx,
78                                         plane[j].dcdy);
79 #endif
80          mask &= ~((uint64_t)build_mask << (s * 16));
81       }
82 #endif
83    }
84 
85    /* Now pass to the shader:
86     */
87    if (mask)
88       lp_rast_shade_quads_mask_sample(task, &tri->inputs, x, y, mask);
89 }
90 
91 /**
92  * Evaluate a 16x16 block of pixels to determine which 4x4 subblocks are in/out
93  * of the triangle's bounds.
94  */
95 static void
TAG(do_block_16)96 TAG(do_block_16)(struct lp_rasterizer_task *task,
97                  const struct lp_rast_triangle *tri,
98                  const struct lp_rast_plane *plane,
99                  int x, int y,
100                  const int64_t *c)
101 {
102    unsigned outmask, inmask, partmask, partial_mask;
103    unsigned j;
104 
105    outmask = 0;                 /* outside one or more trivial reject planes */
106    partmask = 0;                /* outside one or more trivial accept planes */
107 
108    for (j = 0; j < NR_PLANES; j++) {
109 #ifdef RASTER_64
110       int32_t dcdx = -plane[j].dcdx >> FIXED_ORDER;
111       int32_t dcdy = plane[j].dcdy >> FIXED_ORDER;
112       const int32_t cox = plane[j].eo >> FIXED_ORDER;
113       const int32_t ei = (dcdy + dcdx - cox) << 2;
114       const int32_t cox_s = cox << 2;
115       const int32_t co = (int32_t)(c[j] >> (int64_t)FIXED_ORDER) + cox_s;
116       int32_t cdiff;
117       cdiff = ei - cox_s + ((int32_t)((c[j] - 1) >> (int64_t)FIXED_ORDER) -
118                             (int32_t)(c[j] >> (int64_t)FIXED_ORDER));
119       dcdx <<= 2;
120       dcdy <<= 2;
121 #else
122       const int64_t dcdx = -IMUL64(plane[j].dcdx, 4);
123       const int64_t dcdy = IMUL64(plane[j].dcdy, 4);
124       const int64_t cox = IMUL64(plane[j].eo, 4);
125       const int32_t ei = plane[j].dcdy - plane[j].dcdx - (int64_t)plane[j].eo;
126       const int64_t cio = IMUL64(ei, 4) - 1;
127       int32_t co, cdiff;
128       co = c[j] + cox;
129       cdiff = cio - cox;
130 #endif
131 
132       BUILD_MASKS(co, cdiff,
133                   dcdx, dcdy,
134                   &outmask,   /* sign bits from c[i][0..15] + cox */
135                   &partmask); /* sign bits from c[i][0..15] + cio */
136    }
137 
138    if (outmask == 0xffff)
139       return;
140 
141    /* Mask of sub-blocks which are inside all trivial accept planes:
142     */
143    inmask = ~partmask & 0xffff;
144 
145    /* Mask of sub-blocks which are inside all trivial reject planes,
146     * but outside at least one trivial accept plane:
147     */
148    partial_mask = partmask & ~outmask;
149 
150    assert((partial_mask & inmask) == 0);
151 
152    LP_COUNT_ADD(nr_empty_4, util_bitcount(0xffff & ~(partial_mask | inmask)));
153 
154    /* Iterate over partials:
155     */
156    while (partial_mask) {
157       int i = ffs(partial_mask) - 1;
158       int ix = (i & 3) * 4;
159       int iy = (i >> 2) * 4;
160       int px = x + ix;
161       int py = y + iy;
162       int64_t cx[NR_PLANES];
163 
164       partial_mask &= ~(1 << i);
165 
166       LP_COUNT(nr_partially_covered_4);
167 
168       for (j = 0; j < NR_PLANES; j++)
169          cx[j] = (c[j]
170                   - IMUL64(plane[j].dcdx, ix)
171                   + IMUL64(plane[j].dcdy, iy));
172 
173       TAG(do_block_4)(task, tri, plane, px, py, cx);
174    }
175 
176    /* Iterate over fulls:
177     */
178    while (inmask) {
179       int i = ffs(inmask) - 1;
180       int ix = (i & 3) * 4;
181       int iy = (i >> 2) * 4;
182       int px = x + ix;
183       int py = y + iy;
184 
185       inmask &= ~(1 << i);
186 
187       LP_COUNT(nr_fully_covered_4);
188       block_full_4(task, tri, px, py);
189    }
190 }
191 
192 
193 /**
194  * Scan the tile in chunks and figure out which pixels to rasterize
195  * for this triangle.
196  */
197 void
TAG(lp_rast_triangle)198 TAG(lp_rast_triangle)(struct lp_rasterizer_task *task,
199                       const union lp_rast_cmd_arg arg)
200 {
201    const struct lp_rast_triangle *tri = arg.triangle.tri;
202    unsigned plane_mask = arg.triangle.plane_mask;
203    const struct lp_rast_plane *tri_plane = GET_PLANES(tri);
204    const int x = task->x, y = task->y;
205    struct lp_rast_plane plane[NR_PLANES];
206    int64_t c[NR_PLANES];
207    unsigned outmask, inmask, partmask, partial_mask;
208    unsigned j = 0;
209 
210    if (tri->inputs.disable) {
211       /* This triangle was partially binned and has been disabled */
212       return;
213    }
214 
215    outmask = 0;                 /* outside one or more trivial reject planes */
216    partmask = 0;                /* outside one or more trivial accept planes */
217 
218    while (plane_mask) {
219       int i = ffs(plane_mask) - 1;
220       plane[j] = tri_plane[i];
221       plane_mask &= ~(1 << i);
222       c[j] = plane[j].c + IMUL64(plane[j].dcdy, y) - IMUL64(plane[j].dcdx, x);
223 
224       {
225 #ifdef RASTER_64
226          /*
227           * Strip off lower FIXED_ORDER bits. Note that those bits from
228           * dcdx, dcdy, eo are always 0 (by definition).
229           * c values, however, are not. This means that for every
230           * addition of the form c + n*dcdx the lower FIXED_ORDER bits will
231           * NOT change. And those bits are not relevant to the sign bit (which
232           * is only what we need!) that is,
233           * sign(c + n*dcdx) == sign((c >> FIXED_ORDER) + n*(dcdx >> FIXED_ORDER))
234           * This means we can get away with using 32bit math for the most part.
235           * Only tricky part is the -1 adjustment for cdiff.
236           */
237          int32_t dcdx = -plane[j].dcdx >> FIXED_ORDER;
238          int32_t dcdy = plane[j].dcdy >> FIXED_ORDER;
239          const int32_t cox = plane[j].eo >> FIXED_ORDER;
240          const int32_t ei = (dcdy + dcdx - cox) << 4;
241          const int32_t cox_s = cox << 4;
242          const int32_t co = (int32_t)(c[j] >> (int64_t)FIXED_ORDER) + cox_s;
243          int32_t cdiff;
244          /*
245           * Plausibility check to ensure the 32bit math works.
246           * Note that within a tile, the max we can move the edge function
247           * is essentially dcdx * TILE_SIZE + dcdy * TILE_SIZE.
248           * TILE_SIZE is 64, dcdx/dcdy are nominally 21 bit (for 8192 max size
249           * and 8 subpixel bits), I'd be happy with 2 bits more too (1 for
250           * increasing fb size to 16384, the required d3d11 value, another one
251           * because I'm not quite sure we can't be _just_ above the max value
252           * here). This gives us 30 bits max - hence if c would exceed that here
253           * that means the plane is either trivial reject for the whole tile
254           * (in which case the tri will not get binned), or trivial accept for
255           * the whole tile (in which case plane_mask will not include it).
256           */
257 #if 0
258          assert((c[j] >> (int64_t)FIXED_ORDER) > (int32_t)0xb0000000 &&
259                 (c[j] >> (int64_t)FIXED_ORDER) < (int32_t)0x3fffffff);
260 #endif
261          /*
262           * Note the fixup part is constant throughout the tile - thus could
263           * just calculate this and avoid _all_ 64bit math in rasterization
264           * (except exactly this fixup calc).
265           * In fact theoretically could move that even to setup, albeit that
266           * seems tricky (pre-bin certainly can have values larger than 32bit,
267           * and would need to communicate that fixup value through).
268           * And if we want to support msaa, we'd probably don't want to do the
269           * downscaling in setup in any case...
270           */
271          cdiff = ei - cox_s + ((int32_t)((c[j] - 1) >> (int64_t)FIXED_ORDER) -
272                                (int32_t)(c[j] >> (int64_t)FIXED_ORDER));
273          dcdx <<= 4;
274          dcdy <<= 4;
275 #else
276          const int32_t dcdx = -plane[j].dcdx << 4;
277          const int32_t dcdy = plane[j].dcdy << 4;
278          const int32_t cox = plane[j].eo << 4;
279          const int32_t ei = plane[j].dcdy - plane[j].dcdx - (int32_t)plane[j].eo;
280          const int32_t cio = (ei << 4) - 1;
281          int32_t co, cdiff;
282          co = c[j] + cox;
283          cdiff = cio - cox;
284 #endif
285          BUILD_MASKS(co, cdiff,
286                      dcdx, dcdy,
287                      &outmask,   /* sign bits from c[i][0..15] + cox */
288                      &partmask); /* sign bits from c[i][0..15] + cio */
289       }
290 
291       j++;
292    }
293 
294    if (outmask == 0xffff)
295       return;
296 
297    /* Mask of sub-blocks which are inside all trivial accept planes:
298     */
299    inmask = ~partmask & 0xffff;
300 
301    /* Mask of sub-blocks which are inside all trivial reject planes,
302     * but outside at least one trivial accept plane:
303     */
304    partial_mask = partmask & ~outmask;
305 
306    assert((partial_mask & inmask) == 0);
307 
308    LP_COUNT_ADD(nr_empty_16, util_bitcount(0xffff & ~(partial_mask | inmask)));
309 
310    /* Iterate over partials:
311     */
312    while (partial_mask) {
313       int i = ffs(partial_mask) - 1;
314       int ix = (i & 3) * 16;
315       int iy = (i >> 2) * 16;
316       int px = x + ix;
317       int py = y + iy;
318       int64_t cx[NR_PLANES];
319 
320       for (j = 0; j < NR_PLANES; j++)
321          cx[j] = (c[j]
322                   - IMUL64(plane[j].dcdx, ix)
323                   + IMUL64(plane[j].dcdy, iy));
324 
325       partial_mask &= ~(1 << i);
326 
327       LP_COUNT(nr_partially_covered_16);
328       TAG(do_block_16)(task, tri, plane, px, py, cx);
329    }
330 
331    /* Iterate over fulls:
332     */
333    while (inmask) {
334       int i = ffs(inmask) - 1;
335       int ix = (i & 3) * 16;
336       int iy = (i >> 2) * 16;
337       int px = x + ix;
338       int py = y + iy;
339 
340       inmask &= ~(1 << i);
341 
342       LP_COUNT(nr_fully_covered_16);
343       block_full_16(task, tri, px, py);
344    }
345 }
346 
347 #if defined(PIPE_ARCH_SSE) && defined(TRI_16)
348 /* XXX: special case this when intersection is not required.
349  *      - tile completely within bbox,
350  *      - bbox completely within tile.
351  */
352 void
TRI_16(struct lp_rasterizer_task * task,const union lp_rast_cmd_arg arg)353 TRI_16(struct lp_rasterizer_task *task,
354        const union lp_rast_cmd_arg arg)
355 {
356    const struct lp_rast_triangle *tri = arg.triangle.tri;
357    const struct lp_rast_plane *plane = GET_PLANES(tri);
358    unsigned mask = arg.triangle.plane_mask;
359    unsigned outmask, partial_mask;
360    unsigned j;
361    __m128i cstep4[NR_PLANES][4];
362 
363    int x = (mask & 0xff);
364    int y = (mask >> 8);
365 
366    outmask = 0;                 /* outside one or more trivial reject planes */
367 
368    if (x + 12 >= 64) {
369       int i = ((x + 12) - 64) / 4;
370       outmask |= right_mask_tab[i];
371    }
372 
373    if (y + 12 >= 64) {
374       int i = ((y + 12) - 64) / 4;
375       outmask |= bottom_mask_tab[i];
376    }
377 
378    x += task->x;
379    y += task->y;
380 
381    for (j = 0; j < NR_PLANES; j++) {
382       const int dcdx = -plane[j].dcdx * 4;
383       const int dcdy = plane[j].dcdy * 4;
384       __m128i xdcdy = _mm_set1_epi32(dcdy);
385 
386       cstep4[j][0] = _mm_setr_epi32(0, dcdx, dcdx*2, dcdx*3);
387       cstep4[j][1] = _mm_add_epi32(cstep4[j][0], xdcdy);
388       cstep4[j][2] = _mm_add_epi32(cstep4[j][1], xdcdy);
389       cstep4[j][3] = _mm_add_epi32(cstep4[j][2], xdcdy);
390 
391       {
392 	 const int c = plane[j].c + plane[j].dcdy * y - plane[j].dcdx * x;
393 	 const int cox = plane[j].eo * 4;
394 
395 	 outmask |= sign_bits4(cstep4[j], c + cox);
396       }
397    }
398 
399    if (outmask == 0xffff)
400       return;
401 
402 
403    /* Mask of sub-blocks which are inside all trivial reject planes,
404     * but outside at least one trivial accept plane:
405     */
406    partial_mask = 0xffff & ~outmask;
407 
408    /* Iterate over partials:
409     */
410    while (partial_mask) {
411       int i = ffs(partial_mask) - 1;
412       int ix = (i & 3) * 4;
413       int iy = (i >> 2) * 4;
414       int px = x + ix;
415       int py = y + iy;
416       unsigned mask = 0xffff;
417 
418       partial_mask &= ~(1 << i);
419 
420       for (j = 0; j < NR_PLANES; j++) {
421          const int cx = (plane[j].c - 1
422 			 - plane[j].dcdx * px
423 			 + plane[j].dcdy * py) * 4;
424 
425 	 mask &= ~sign_bits4(cstep4[j], cx);
426       }
427 
428       if (mask)
429 	 lp_rast_shade_quads_mask(task, &tri->inputs, px, py, mask);
430    }
431 }
432 #endif
433 
434 #if defined(PIPE_ARCH_SSE) && defined(TRI_4)
435 void
TRI_4(struct lp_rasterizer_task * task,const union lp_rast_cmd_arg arg)436 TRI_4(struct lp_rasterizer_task *task,
437       const union lp_rast_cmd_arg arg)
438 {
439    const struct lp_rast_triangle *tri = arg.triangle.tri;
440    const struct lp_rast_plane *plane = GET_PLANES(tri);
441    unsigned mask = arg.triangle.plane_mask;
442    const int x = task->x + (mask & 0xff);
443    const int y = task->y + (mask >> 8);
444    unsigned j;
445 
446    /* Iterate over partials:
447     */
448    {
449       unsigned mask = 0xffff;
450 
451       for (j = 0; j < NR_PLANES; j++) {
452 	 const int cx = (plane[j].c
453 			 - plane[j].dcdx * x
454 			 + plane[j].dcdy * y);
455 
456 	 const int dcdx = -plane[j].dcdx;
457 	 const int dcdy = plane[j].dcdy;
458 	 __m128i xdcdy = _mm_set1_epi32(dcdy);
459 
460 	 __m128i cstep0 = _mm_setr_epi32(cx, cx + dcdx, cx + dcdx*2, cx + dcdx*3);
461 	 __m128i cstep1 = _mm_add_epi32(cstep0, xdcdy);
462 	 __m128i cstep2 = _mm_add_epi32(cstep1, xdcdy);
463 	 __m128i cstep3 = _mm_add_epi32(cstep2, xdcdy);
464 
465 	 __m128i cstep01 = _mm_packs_epi32(cstep0, cstep1);
466 	 __m128i cstep23 = _mm_packs_epi32(cstep2, cstep3);
467 	 __m128i result = _mm_packs_epi16(cstep01, cstep23);
468 
469 	 /* Extract the sign bits
470 	  */
471 	 mask &= ~_mm_movemask_epi8(result);
472       }
473 
474       if (mask)
475 	 lp_rast_shade_quads_mask(task, &tri->inputs, x, y, mask);
476    }
477 }
478 #endif
479 
480 
481 
482 #undef TAG
483 #undef TRI_4
484 #undef TRI_16
485 #undef NR_PLANES
486 
487