1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /*
29 * Binning code for triangles
30 */
31
32 #include "util/u_math.h"
33 #include "util/u_memory.h"
34 #include "util/u_rect.h"
35 #include "util/u_sse.h"
36 #include "lp_perf.h"
37 #include "lp_setup_context.h"
38 #include "lp_rast.h"
39 #include "lp_state_fs.h"
40 #include "lp_state_setup.h"
41 #include "lp_context.h"
42
43 #include <inttypes.h>
44
45 #define NUM_CHANNELS 4
46
47 #if defined(PIPE_ARCH_SSE)
48 #include <emmintrin.h>
49 #elif defined(_ARCH_PWR8) && UTIL_ARCH_LITTLE_ENDIAN
50 #ifdef __clang__
51 #undef vector
52 #undef pixel
53 #undef bool
54 #endif
55 #include <altivec.h>
56 #include "util/u_pwr8.h"
57 #endif
58
59 #if !defined(PIPE_ARCH_SSE)
60
61 static inline int
subpixel_snap(float a)62 subpixel_snap(float a)
63 {
64 return util_iround(FIXED_ONE * a);
65 }
66
67 #endif
68
69 /* Position and area in fixed point coordinates */
70 struct fixed_position {
71 int32_t x[4];
72 int32_t y[4];
73 int32_t dx01;
74 int32_t dy01;
75 int32_t dx20;
76 int32_t dy20;
77 int64_t area;
78 };
79
80
81 /**
82 * Alloc space for a new triangle plus the input.a0/dadx/dady arrays
83 * immediately after it.
84 * The memory is allocated from the per-scene pool, not per-tile.
85 * \param tri_size returns number of bytes allocated
86 * \param num_inputs number of fragment shader inputs
87 * \return pointer to triangle space
88 */
89 struct lp_rast_triangle *
lp_setup_alloc_triangle(struct lp_scene * scene,unsigned nr_inputs,unsigned nr_planes,unsigned * tri_size)90 lp_setup_alloc_triangle(struct lp_scene *scene,
91 unsigned nr_inputs,
92 unsigned nr_planes,
93 unsigned *tri_size)
94 {
95 unsigned input_array_sz = NUM_CHANNELS * (nr_inputs + 1) * sizeof(float);
96 unsigned plane_sz = nr_planes * sizeof(struct lp_rast_plane);
97 struct lp_rast_triangle *tri;
98
99 STATIC_ASSERT(sizeof(struct lp_rast_plane) % 8 == 0);
100
101 *tri_size = (sizeof(struct lp_rast_triangle) +
102 3 * input_array_sz +
103 plane_sz);
104
105 tri = lp_scene_alloc_aligned( scene, *tri_size, 16 );
106 if (!tri)
107 return NULL;
108
109 tri->inputs.stride = input_array_sz;
110
111 {
112 char *a = (char *)tri;
113 char *b = (char *)&GET_PLANES(tri)[nr_planes];
114 assert(b - a == *tri_size);
115 }
116
117 return tri;
118 }
119
120 void
lp_setup_print_vertex(struct lp_setup_context * setup,const char * name,const float (* v)[4])121 lp_setup_print_vertex(struct lp_setup_context *setup,
122 const char *name,
123 const float (*v)[4])
124 {
125 const struct lp_setup_variant_key *key = &setup->setup.variant->key;
126 int i, j;
127
128 debug_printf(" wpos (%s[0]) xyzw %f %f %f %f\n",
129 name,
130 v[0][0], v[0][1], v[0][2], v[0][3]);
131
132 for (i = 0; i < key->num_inputs; i++) {
133 const float *in = v[key->inputs[i].src_index];
134
135 debug_printf(" in[%d] (%s[%d]) %s%s%s%s ",
136 i,
137 name, key->inputs[i].src_index,
138 (key->inputs[i].usage_mask & 0x1) ? "x" : " ",
139 (key->inputs[i].usage_mask & 0x2) ? "y" : " ",
140 (key->inputs[i].usage_mask & 0x4) ? "z" : " ",
141 (key->inputs[i].usage_mask & 0x8) ? "w" : " ");
142
143 for (j = 0; j < 4; j++)
144 if (key->inputs[i].usage_mask & (1<<j))
145 debug_printf("%.5f ", in[j]);
146
147 debug_printf("\n");
148 }
149 }
150
151
152 /**
153 * Print triangle vertex attribs (for debug).
154 */
155 void
lp_setup_print_triangle(struct lp_setup_context * setup,const float (* v0)[4],const float (* v1)[4],const float (* v2)[4])156 lp_setup_print_triangle(struct lp_setup_context *setup,
157 const float (*v0)[4],
158 const float (*v1)[4],
159 const float (*v2)[4])
160 {
161 debug_printf("triangle\n");
162
163 {
164 const float ex = v0[0][0] - v2[0][0];
165 const float ey = v0[0][1] - v2[0][1];
166 const float fx = v1[0][0] - v2[0][0];
167 const float fy = v1[0][1] - v2[0][1];
168
169 /* det = cross(e,f).z */
170 const float det = ex * fy - ey * fx;
171 if (det < 0.0f)
172 debug_printf(" - ccw\n");
173 else if (det > 0.0f)
174 debug_printf(" - cw\n");
175 else
176 debug_printf(" - zero area\n");
177 }
178
179 lp_setup_print_vertex(setup, "v0", v0);
180 lp_setup_print_vertex(setup, "v1", v1);
181 lp_setup_print_vertex(setup, "v2", v2);
182 }
183
184
185 #define MAX_PLANES 8
186 static unsigned
187 lp_rast_tri_tab[MAX_PLANES+1] = {
188 0, /* should be impossible */
189 LP_RAST_OP_TRIANGLE_1,
190 LP_RAST_OP_TRIANGLE_2,
191 LP_RAST_OP_TRIANGLE_3,
192 LP_RAST_OP_TRIANGLE_4,
193 LP_RAST_OP_TRIANGLE_5,
194 LP_RAST_OP_TRIANGLE_6,
195 LP_RAST_OP_TRIANGLE_7,
196 LP_RAST_OP_TRIANGLE_8
197 };
198
199 static unsigned
200 lp_rast_32_tri_tab[MAX_PLANES+1] = {
201 0, /* should be impossible */
202 LP_RAST_OP_TRIANGLE_32_1,
203 LP_RAST_OP_TRIANGLE_32_2,
204 LP_RAST_OP_TRIANGLE_32_3,
205 LP_RAST_OP_TRIANGLE_32_4,
206 LP_RAST_OP_TRIANGLE_32_5,
207 LP_RAST_OP_TRIANGLE_32_6,
208 LP_RAST_OP_TRIANGLE_32_7,
209 LP_RAST_OP_TRIANGLE_32_8
210 };
211
212 static unsigned
213 lp_rast_ms_tri_tab[MAX_PLANES+1] = {
214 0, /* should be impossible */
215 LP_RAST_OP_MS_TRIANGLE_1,
216 LP_RAST_OP_MS_TRIANGLE_2,
217 LP_RAST_OP_MS_TRIANGLE_3,
218 LP_RAST_OP_MS_TRIANGLE_4,
219 LP_RAST_OP_MS_TRIANGLE_5,
220 LP_RAST_OP_MS_TRIANGLE_6,
221 LP_RAST_OP_MS_TRIANGLE_7,
222 LP_RAST_OP_MS_TRIANGLE_8
223 };
224
225 /**
226 * The primitive covers the whole tile- shade whole tile.
227 *
228 * \param tx, ty the tile position in tiles, not pixels
229 */
230 static boolean
lp_setup_whole_tile(struct lp_setup_context * setup,const struct lp_rast_shader_inputs * inputs,int tx,int ty)231 lp_setup_whole_tile(struct lp_setup_context *setup,
232 const struct lp_rast_shader_inputs *inputs,
233 int tx, int ty)
234 {
235 struct lp_scene *scene = setup->scene;
236
237 LP_COUNT(nr_fully_covered_64);
238
239 /* if variant is opaque and scissor doesn't effect the tile */
240 if (inputs->opaque) {
241 /* Several things prevent this optimization from working:
242 * - For layered rendering we can't determine if this covers the same layer
243 * as previous rendering (or in case of clears those actually always cover
244 * all layers so optimization is impossible). Need to use fb_max_layer and
245 * not setup->layer_slot to determine this since even if there's currently
246 * no slot assigned previous rendering could have used one.
247 * - If there were any Begin/End query commands in the scene then those
248 * would get removed which would be very wrong. Furthermore, if queries
249 * were just active we also can't do the optimization since to get
250 * accurate query results we unfortunately need to execute the rendering
251 * commands.
252 */
253 if (!scene->fb.zsbuf && scene->fb_max_layer == 0 && !scene->had_queries) {
254 /*
255 * All previous rendering will be overwritten so reset the bin.
256 */
257 lp_scene_bin_reset( scene, tx, ty );
258 }
259
260 LP_COUNT(nr_shade_opaque_64);
261 return lp_scene_bin_cmd_with_state( scene, tx, ty,
262 setup->fs.stored,
263 LP_RAST_OP_SHADE_TILE_OPAQUE,
264 lp_rast_arg_inputs(inputs) );
265 } else {
266 LP_COUNT(nr_shade_64);
267 return lp_scene_bin_cmd_with_state( scene, tx, ty,
268 setup->fs.stored,
269 LP_RAST_OP_SHADE_TILE,
270 lp_rast_arg_inputs(inputs) );
271 }
272 }
273
274
275 /**
276 * Do basic setup for triangle rasterization and determine which
277 * framebuffer tiles are touched. Put the triangle in the scene's
278 * bins for the tiles which we overlap.
279 */
280 static boolean
do_triangle_ccw(struct lp_setup_context * setup,struct fixed_position * position,const float (* v0)[4],const float (* v1)[4],const float (* v2)[4],boolean frontfacing)281 do_triangle_ccw(struct lp_setup_context *setup,
282 struct fixed_position* position,
283 const float (*v0)[4],
284 const float (*v1)[4],
285 const float (*v2)[4],
286 boolean frontfacing )
287 {
288 struct lp_scene *scene = setup->scene;
289 const struct lp_setup_variant_key *key = &setup->setup.variant->key;
290 struct lp_rast_triangle *tri;
291 struct lp_rast_plane *plane;
292 const struct u_rect *scissor = NULL;
293 struct u_rect bbox, bboxpos;
294 boolean s_planes[4];
295 unsigned tri_bytes;
296 int nr_planes = 3;
297 unsigned viewport_index = 0;
298 unsigned layer = 0;
299 const float (*pv)[4];
300
301 /* Area should always be positive here */
302 assert(position->area > 0);
303
304 if (0)
305 lp_setup_print_triangle(setup, v0, v1, v2);
306
307 if (setup->flatshade_first) {
308 pv = v0;
309 }
310 else {
311 pv = v2;
312 }
313 if (setup->viewport_index_slot > 0) {
314 unsigned *udata = (unsigned*)pv[setup->viewport_index_slot];
315 viewport_index = lp_clamp_viewport_idx(*udata);
316 }
317 if (setup->layer_slot > 0) {
318 layer = *(unsigned*)pv[setup->layer_slot];
319 layer = MIN2(layer, scene->fb_max_layer);
320 }
321
322 /* Bounding rectangle (in pixels) */
323 {
324 /* Yes this is necessary to accurately calculate bounding boxes
325 * with the two fill-conventions we support. GL (normally) ends
326 * up needing a bottom-left fill convention, which requires
327 * slightly different rounding.
328 */
329 int adj = (setup->bottom_edge_rule != 0) ? 1 : 0;
330
331 /* Inclusive x0, exclusive x1 */
332 bbox.x0 = MIN3(position->x[0], position->x[1], position->x[2]) >> FIXED_ORDER;
333 bbox.x1 = (MAX3(position->x[0], position->x[1], position->x[2]) - 1) >> FIXED_ORDER;
334
335 /* Inclusive / exclusive depending upon adj (bottom-left or top-right) */
336 bbox.y0 = (MIN3(position->y[0], position->y[1], position->y[2]) + adj) >> FIXED_ORDER;
337 bbox.y1 = (MAX3(position->y[0], position->y[1], position->y[2]) - 1 + adj) >> FIXED_ORDER;
338 }
339
340 if (bbox.x1 < bbox.x0 ||
341 bbox.y1 < bbox.y0) {
342 if (0) debug_printf("empty bounding box\n");
343 LP_COUNT(nr_culled_tris);
344 return TRUE;
345 }
346
347 if (!u_rect_test_intersection(&setup->draw_regions[viewport_index], &bbox)) {
348 if (0) debug_printf("offscreen\n");
349 LP_COUNT(nr_culled_tris);
350 return TRUE;
351 }
352
353 bboxpos = bbox;
354
355 /* Can safely discard negative regions, but need to keep hold of
356 * information about when the triangle extends past screen
357 * boundaries. See trimmed_box in lp_setup_bin_triangle().
358 */
359 bboxpos.x0 = MAX2(bboxpos.x0, 0);
360 bboxpos.y0 = MAX2(bboxpos.y0, 0);
361
362 nr_planes = 3;
363 /*
364 * Determine how many scissor planes we need, that is drop scissor
365 * edges if the bounding box of the tri is fully inside that edge.
366 */
367 if (setup->scissor_test) {
368 /* why not just use draw_regions */
369 scissor = &setup->scissors[viewport_index];
370 scissor_planes_needed(s_planes, &bboxpos, scissor);
371 nr_planes += s_planes[0] + s_planes[1] + s_planes[2] + s_planes[3];
372 } else {
373 scissor = &setup->draw_regions[viewport_index];
374 scissor_planes_needed(s_planes, &bboxpos, scissor);
375 nr_planes += s_planes[0] + s_planes[1] + s_planes[2] + s_planes[3];
376 }
377
378 tri = lp_setup_alloc_triangle(scene,
379 key->num_inputs,
380 nr_planes,
381 &tri_bytes);
382 if (!tri)
383 return FALSE;
384
385 #ifdef DEBUG
386 tri->v[0][0] = v0[0][0];
387 tri->v[1][0] = v1[0][0];
388 tri->v[2][0] = v2[0][0];
389 tri->v[0][1] = v0[0][1];
390 tri->v[1][1] = v1[0][1];
391 tri->v[2][1] = v2[0][1];
392 #endif
393
394 LP_COUNT(nr_tris);
395
396 /* Setup parameter interpolants:
397 */
398 setup->setup.variant->jit_function(v0, v1, v2,
399 frontfacing,
400 GET_A0(&tri->inputs),
401 GET_DADX(&tri->inputs),
402 GET_DADY(&tri->inputs));
403
404 tri->inputs.frontfacing = frontfacing;
405 tri->inputs.disable = FALSE;
406 tri->inputs.opaque = setup->fs.current.variant->opaque;
407 tri->inputs.layer = layer;
408 tri->inputs.viewport_index = viewport_index;
409
410 if (0)
411 lp_dump_setup_coef(&setup->setup.variant->key,
412 (const float (*)[4])GET_A0(&tri->inputs),
413 (const float (*)[4])GET_DADX(&tri->inputs),
414 (const float (*)[4])GET_DADY(&tri->inputs));
415
416 plane = GET_PLANES(tri);
417
418 #if defined(PIPE_ARCH_SSE)
419 if (1) {
420 __m128i vertx, verty;
421 __m128i shufx, shufy;
422 __m128i dcdx, dcdy;
423 __m128i cdx02, cdx13, cdy02, cdy13, c02, c13;
424 __m128i c01, c23, unused;
425 __m128i dcdx_neg_mask;
426 __m128i dcdy_neg_mask;
427 __m128i dcdx_zero_mask;
428 __m128i top_left_flag, c_dec;
429 __m128i eo, p0, p1, p2;
430 __m128i zero = _mm_setzero_si128();
431
432 vertx = _mm_load_si128((__m128i *)position->x); /* vertex x coords */
433 verty = _mm_load_si128((__m128i *)position->y); /* vertex y coords */
434
435 shufx = _mm_shuffle_epi32(vertx, _MM_SHUFFLE(3,0,2,1));
436 shufy = _mm_shuffle_epi32(verty, _MM_SHUFFLE(3,0,2,1));
437
438 dcdx = _mm_sub_epi32(verty, shufy);
439 dcdy = _mm_sub_epi32(vertx, shufx);
440
441 dcdx_neg_mask = _mm_srai_epi32(dcdx, 31);
442 dcdx_zero_mask = _mm_cmpeq_epi32(dcdx, zero);
443 dcdy_neg_mask = _mm_srai_epi32(dcdy, 31);
444
445 top_left_flag = _mm_set1_epi32((setup->bottom_edge_rule == 0) ? ~0 : 0);
446
447 c_dec = _mm_or_si128(dcdx_neg_mask,
448 _mm_and_si128(dcdx_zero_mask,
449 _mm_xor_si128(dcdy_neg_mask,
450 top_left_flag)));
451
452 /*
453 * 64 bit arithmetic.
454 * Note we need _signed_ mul (_mm_mul_epi32) which we emulate.
455 */
456 cdx02 = mm_mullohi_epi32(dcdx, vertx, &cdx13);
457 cdy02 = mm_mullohi_epi32(dcdy, verty, &cdy13);
458 c02 = _mm_sub_epi64(cdx02, cdy02);
459 c13 = _mm_sub_epi64(cdx13, cdy13);
460 c02 = _mm_sub_epi64(c02, _mm_shuffle_epi32(c_dec,
461 _MM_SHUFFLE(2,2,0,0)));
462 c13 = _mm_sub_epi64(c13, _mm_shuffle_epi32(c_dec,
463 _MM_SHUFFLE(3,3,1,1)));
464
465 /*
466 * Useful for very small fbs/tris (or fewer subpixel bits) only:
467 * c = _mm_sub_epi32(mm_mullo_epi32(dcdx, vertx),
468 * mm_mullo_epi32(dcdy, verty));
469 *
470 * c = _mm_sub_epi32(c, c_dec);
471 */
472
473 /* Scale up to match c:
474 */
475 dcdx = _mm_slli_epi32(dcdx, FIXED_ORDER);
476 dcdy = _mm_slli_epi32(dcdy, FIXED_ORDER);
477
478 /*
479 * Calculate trivial reject values:
480 * Note eo cannot overflow even if dcdx/dcdy would already have
481 * 31 bits (which they shouldn't have). This is because eo
482 * is never negative (albeit if we rely on that need to be careful...)
483 */
484 eo = _mm_sub_epi32(_mm_andnot_si128(dcdy_neg_mask, dcdy),
485 _mm_and_si128(dcdx_neg_mask, dcdx));
486
487 /* ei = _mm_sub_epi32(_mm_sub_epi32(dcdy, dcdx), eo); */
488
489 /*
490 * Pointless transpose which gets undone immediately in
491 * rasterization.
492 * It is actually difficult to do away with it - would essentially
493 * need GET_PLANES_DX, GET_PLANES_DY etc., but the calculations
494 * for this then would need to depend on the number of planes.
495 * The transpose is quite special here due to c being 64bit...
496 * The store has to be unaligned (unless we'd make the plane size
497 * a multiple of 128), and of course storing eo separately...
498 */
499 c01 = _mm_unpacklo_epi64(c02, c13);
500 c23 = _mm_unpackhi_epi64(c02, c13);
501 transpose2_64_2_32(&c01, &c23, &dcdx, &dcdy,
502 &p0, &p1, &p2, &unused);
503 _mm_storeu_si128((__m128i *)&plane[0], p0);
504 plane[0].eo = (uint32_t)_mm_cvtsi128_si32(eo);
505 _mm_storeu_si128((__m128i *)&plane[1], p1);
506 eo = _mm_shuffle_epi32(eo, _MM_SHUFFLE(3,2,0,1));
507 plane[1].eo = (uint32_t)_mm_cvtsi128_si32(eo);
508 _mm_storeu_si128((__m128i *)&plane[2], p2);
509 eo = _mm_shuffle_epi32(eo, _MM_SHUFFLE(0,0,0,2));
510 plane[2].eo = (uint32_t)_mm_cvtsi128_si32(eo);
511 } else
512 #elif defined(_ARCH_PWR8) && UTIL_ARCH_LITTLE_ENDIAN
513 /*
514 * XXX this code is effectively disabled for all practical purposes,
515 * as the allowed fb size is tiny if FIXED_ORDER is 8.
516 */
517 if (setup->fb.width <= MAX_FIXED_LENGTH32 &&
518 setup->fb.height <= MAX_FIXED_LENGTH32 &&
519 (bbox.x1 - bbox.x0) <= MAX_FIXED_LENGTH32 &&
520 (bbox.y1 - bbox.y0) <= MAX_FIXED_LENGTH32) {
521 unsigned int bottom_edge;
522 __m128i vertx, verty;
523 __m128i shufx, shufy;
524 __m128i dcdx, dcdy, c;
525 __m128i unused;
526 __m128i dcdx_neg_mask;
527 __m128i dcdy_neg_mask;
528 __m128i dcdx_zero_mask;
529 __m128i top_left_flag;
530 __m128i c_inc_mask, c_inc;
531 __m128i eo, p0, p1, p2;
532 __m128i_union vshuf_mask;
533 __m128i zero = vec_splats((unsigned char) 0);
534 PIPE_ALIGN_VAR(16) int32_t temp_vec[4];
535
536 #if UTIL_ARCH_LITTLE_ENDIAN
537 vshuf_mask.i[0] = 0x07060504;
538 vshuf_mask.i[1] = 0x0B0A0908;
539 vshuf_mask.i[2] = 0x03020100;
540 vshuf_mask.i[3] = 0x0F0E0D0C;
541 #else
542 vshuf_mask.i[0] = 0x00010203;
543 vshuf_mask.i[1] = 0x0C0D0E0F;
544 vshuf_mask.i[2] = 0x04050607;
545 vshuf_mask.i[3] = 0x08090A0B;
546 #endif
547
548 /* vertex x coords */
549 vertx = vec_load_si128((const uint32_t *) position->x);
550 /* vertex y coords */
551 verty = vec_load_si128((const uint32_t *) position->y);
552
553 shufx = vec_perm (vertx, vertx, vshuf_mask.m128i);
554 shufy = vec_perm (verty, verty, vshuf_mask.m128i);
555
556 dcdx = vec_sub_epi32(verty, shufy);
557 dcdy = vec_sub_epi32(vertx, shufx);
558
559 dcdx_neg_mask = vec_srai_epi32(dcdx, 31);
560 dcdx_zero_mask = vec_cmpeq_epi32(dcdx, zero);
561 dcdy_neg_mask = vec_srai_epi32(dcdy, 31);
562
563 bottom_edge = (setup->bottom_edge_rule == 0) ? ~0 : 0;
564 top_left_flag = (__m128i) vec_splats(bottom_edge);
565
566 c_inc_mask = vec_or(dcdx_neg_mask,
567 vec_and(dcdx_zero_mask,
568 vec_xor(dcdy_neg_mask,
569 top_left_flag)));
570
571 c_inc = vec_srli_epi32(c_inc_mask, 31);
572
573 c = vec_sub_epi32(vec_mullo_epi32(dcdx, vertx),
574 vec_mullo_epi32(dcdy, verty));
575
576 c = vec_add_epi32(c, c_inc);
577
578 /* Scale up to match c:
579 */
580 dcdx = vec_slli_epi32(dcdx, FIXED_ORDER);
581 dcdy = vec_slli_epi32(dcdy, FIXED_ORDER);
582
583 /* Calculate trivial reject values:
584 */
585 eo = vec_sub_epi32(vec_andnot_si128(dcdy_neg_mask, dcdy),
586 vec_and(dcdx_neg_mask, dcdx));
587
588 /* ei = _mm_sub_epi32(_mm_sub_epi32(dcdy, dcdx), eo); */
589
590 /* Pointless transpose which gets undone immediately in
591 * rasterization:
592 */
593 transpose4_epi32(&c, &dcdx, &dcdy, &eo,
594 &p0, &p1, &p2, &unused);
595
596 #define STORE_PLANE(plane, vec) do { \
597 vec_store_si128((uint32_t *)&temp_vec, vec); \
598 plane.c = (int64_t)temp_vec[0]; \
599 plane.dcdx = temp_vec[1]; \
600 plane.dcdy = temp_vec[2]; \
601 plane.eo = temp_vec[3]; \
602 } while(0)
603
604 STORE_PLANE(plane[0], p0);
605 STORE_PLANE(plane[1], p1);
606 STORE_PLANE(plane[2], p2);
607 #undef STORE_PLANE
608 } else
609 #endif
610 {
611 int i;
612 plane[0].dcdy = position->dx01;
613 plane[1].dcdy = position->x[1] - position->x[2];
614 plane[2].dcdy = position->dx20;
615 plane[0].dcdx = position->dy01;
616 plane[1].dcdx = position->y[1] - position->y[2];
617 plane[2].dcdx = position->dy20;
618
619 for (i = 0; i < 3; i++) {
620 /* half-edge constants, will be iterated over the whole render
621 * target.
622 */
623 plane[i].c = IMUL64(plane[i].dcdx, position->x[i]) -
624 IMUL64(plane[i].dcdy, position->y[i]);
625
626 /* correct for top-left vs. bottom-left fill convention.
627 */
628 if (plane[i].dcdx < 0) {
629 /* both fill conventions want this - adjust for left edges */
630 plane[i].c++;
631 }
632 else if (plane[i].dcdx == 0) {
633 if (setup->bottom_edge_rule == 0){
634 /* correct for top-left fill convention:
635 */
636 if (plane[i].dcdy > 0) plane[i].c++;
637 }
638 else {
639 /* correct for bottom-left fill convention:
640 */
641 if (plane[i].dcdy < 0) plane[i].c++;
642 }
643 }
644
645 /* Scale up to match c:
646 */
647 assert((plane[i].dcdx << FIXED_ORDER) >> FIXED_ORDER == plane[i].dcdx);
648 assert((plane[i].dcdy << FIXED_ORDER) >> FIXED_ORDER == plane[i].dcdy);
649 plane[i].dcdx <<= FIXED_ORDER;
650 plane[i].dcdy <<= FIXED_ORDER;
651
652 /* find trivial reject offsets for each edge for a single-pixel
653 * sized block. These will be scaled up at each recursive level to
654 * match the active blocksize. Scaling in this way works best if
655 * the blocks are square.
656 */
657 plane[i].eo = 0;
658 if (plane[i].dcdx < 0) plane[i].eo -= plane[i].dcdx;
659 if (plane[i].dcdy > 0) plane[i].eo += plane[i].dcdy;
660 }
661 }
662
663 if (0) {
664 debug_printf("p0: %"PRIx64"/%08x/%08x/%08x\n",
665 plane[0].c,
666 plane[0].dcdx,
667 plane[0].dcdy,
668 plane[0].eo);
669
670 debug_printf("p1: %"PRIx64"/%08x/%08x/%08x\n",
671 plane[1].c,
672 plane[1].dcdx,
673 plane[1].dcdy,
674 plane[1].eo);
675
676 debug_printf("p2: %"PRIx64"/%08x/%08x/%08x\n",
677 plane[2].c,
678 plane[2].dcdx,
679 plane[2].dcdy,
680 plane[2].eo);
681 }
682
683
684 /*
685 * When rasterizing scissored tris, use the intersection of the
686 * triangle bounding box and the scissor rect to generate the
687 * scissor planes.
688 *
689 * This permits us to cut off the triangle "tails" that are present
690 * in the intermediate recursive levels caused when two of the
691 * triangles edges don't diverge quickly enough to trivially reject
692 * exterior blocks from the triangle.
693 *
694 * It's not really clear if it's worth worrying about these tails,
695 * but since we generate the planes for each scissored tri, it's
696 * free to trim them in this case.
697 *
698 * Note that otherwise, the scissor planes only vary in 'C' value,
699 * and even then only on state-changes. Could alternatively store
700 * these planes elsewhere.
701 * (Or only store the c value together with a bit indicating which
702 * scissor edge this is, so rasterization would treat them differently
703 * (easier to evaluate) to ordinary planes.)
704 */
705 if (nr_planes > 3) {
706 /* why not just use draw_regions */
707 struct lp_rast_plane *plane_s = &plane[3];
708
709 if (s_planes[0]) {
710 plane_s->dcdx = ~0U << 8;
711 plane_s->dcdy = 0;
712 plane_s->c = (1-scissor->x0) << 8;
713 plane_s->eo = 1 << 8;
714 plane_s++;
715 }
716 if (s_planes[1]) {
717 plane_s->dcdx = 1 << 8;
718 plane_s->dcdy = 0;
719 plane_s->c = (scissor->x1+1) << 8;
720 plane_s->eo = 0 << 8;
721 plane_s++;
722 }
723 if (s_planes[2]) {
724 plane_s->dcdx = 0;
725 plane_s->dcdy = 1 << 8;
726 plane_s->c = (1-scissor->y0) << 8;
727 plane_s->eo = 1 << 8;
728 plane_s++;
729 }
730 if (s_planes[3]) {
731 plane_s->dcdx = 0;
732 plane_s->dcdy = ~0U << 8;
733 plane_s->c = (scissor->y1+1) << 8;
734 plane_s->eo = 0;
735 plane_s++;
736 }
737 assert(plane_s == &plane[nr_planes]);
738 }
739
740 return lp_setup_bin_triangle(setup, tri, &bbox, &bboxpos, nr_planes, viewport_index);
741 }
742
743 /*
744 * Round to nearest less or equal power of two of the input.
745 *
746 * Undefined if no bit set exists, so code should check against 0 first.
747 */
748 static inline uint32_t
floor_pot(uint32_t n)749 floor_pot(uint32_t n)
750 {
751 #if defined(PIPE_CC_GCC) && (defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64))
752 if (n == 0)
753 return 0;
754
755 __asm__("bsr %1,%0"
756 : "=r" (n)
757 : "rm" (n)
758 : "cc");
759 return 1 << n;
760 #else
761 n |= (n >> 1);
762 n |= (n >> 2);
763 n |= (n >> 4);
764 n |= (n >> 8);
765 n |= (n >> 16);
766 return n - (n >> 1);
767 #endif
768 }
769
770
771 boolean
lp_setup_bin_triangle(struct lp_setup_context * setup,struct lp_rast_triangle * tri,const struct u_rect * bboxorig,const struct u_rect * bbox,int nr_planes,unsigned viewport_index)772 lp_setup_bin_triangle(struct lp_setup_context *setup,
773 struct lp_rast_triangle *tri,
774 const struct u_rect *bboxorig,
775 const struct u_rect *bbox,
776 int nr_planes,
777 unsigned viewport_index)
778 {
779 struct lp_scene *scene = setup->scene;
780 struct u_rect trimmed_box = *bbox;
781 int i;
782 unsigned cmd;
783
784 /* What is the largest power-of-two boundary this triangle crosses:
785 */
786 int dx = floor_pot((bbox->x0 ^ bbox->x1) |
787 (bbox->y0 ^ bbox->y1));
788
789 /* The largest dimension of the rasterized area of the triangle
790 * (aligned to a 4x4 grid), rounded down to the nearest power of two:
791 */
792 int max_sz = ((bbox->x1 - (bbox->x0 & ~3)) |
793 (bbox->y1 - (bbox->y0 & ~3)));
794 int sz = floor_pot(max_sz);
795
796 /*
797 * NOTE: It is important to use the original bounding box
798 * which might contain negative values here, because if the
799 * plane math may overflow or not with the 32bit rasterization
800 * functions depends on the original extent of the triangle.
801 */
802 int max_szorig = ((bboxorig->x1 - (bboxorig->x0 & ~3)) |
803 (bboxorig->y1 - (bboxorig->y0 & ~3)));
804 boolean use_32bits = max_szorig <= MAX_FIXED_LENGTH32;
805
806 /* Now apply scissor, etc to the bounding box. Could do this
807 * earlier, but it confuses the logic for tri-16 and would force
808 * the rasterizer to also respect scissor, etc, just for the rare
809 * cases where a small triangle extends beyond the scissor.
810 */
811 u_rect_find_intersection(&setup->draw_regions[viewport_index],
812 &trimmed_box);
813
814 /* Determine which tile(s) intersect the triangle's bounding box
815 */
816 if (dx < TILE_SIZE)
817 {
818 int ix0 = bbox->x0 / TILE_SIZE;
819 int iy0 = bbox->y0 / TILE_SIZE;
820 unsigned px = bbox->x0 & 63 & ~3;
821 unsigned py = bbox->y0 & 63 & ~3;
822
823 assert(iy0 == bbox->y1 / TILE_SIZE &&
824 ix0 == bbox->x1 / TILE_SIZE);
825
826 if (nr_planes == 3) {
827 if (sz < 4)
828 {
829 /* Triangle is contained in a single 4x4 stamp:
830 */
831 assert(px + 4 <= TILE_SIZE);
832 assert(py + 4 <= TILE_SIZE);
833 if (setup->multisample)
834 cmd = LP_RAST_OP_MS_TRIANGLE_3_4;
835 else
836 cmd = use_32bits ? LP_RAST_OP_TRIANGLE_32_3_4 : LP_RAST_OP_TRIANGLE_3_4;
837 return lp_scene_bin_cmd_with_state( scene, ix0, iy0,
838 setup->fs.stored, cmd,
839 lp_rast_arg_triangle_contained(tri, px, py) );
840 }
841
842 if (sz < 16)
843 {
844 /* Triangle is contained in a single 16x16 block:
845 */
846
847 /*
848 * The 16x16 block is only 4x4 aligned, and can exceed the tile
849 * dimensions if the triangle is 16 pixels in one dimension but 4
850 * in the other. So budge the 16x16 back inside the tile.
851 */
852 px = MIN2(px, TILE_SIZE - 16);
853 py = MIN2(py, TILE_SIZE - 16);
854
855 assert(px + 16 <= TILE_SIZE);
856 assert(py + 16 <= TILE_SIZE);
857
858 if (setup->multisample)
859 cmd = LP_RAST_OP_MS_TRIANGLE_3_16;
860 else
861 cmd = use_32bits ? LP_RAST_OP_TRIANGLE_32_3_16 : LP_RAST_OP_TRIANGLE_3_16;
862 return lp_scene_bin_cmd_with_state( scene, ix0, iy0,
863 setup->fs.stored, cmd,
864 lp_rast_arg_triangle_contained(tri, px, py) );
865 }
866 }
867 else if (nr_planes == 4 && sz < 16)
868 {
869 px = MIN2(px, TILE_SIZE - 16);
870 py = MIN2(py, TILE_SIZE - 16);
871
872 assert(px + 16 <= TILE_SIZE);
873 assert(py + 16 <= TILE_SIZE);
874
875 if (setup->multisample)
876 cmd = LP_RAST_OP_MS_TRIANGLE_4_16;
877 else
878 cmd = use_32bits ? LP_RAST_OP_TRIANGLE_32_4_16 : LP_RAST_OP_TRIANGLE_4_16;
879 return lp_scene_bin_cmd_with_state(scene, ix0, iy0,
880 setup->fs.stored, cmd,
881 lp_rast_arg_triangle_contained(tri, px, py));
882 }
883
884
885 /* Triangle is contained in a single tile:
886 */
887 if (setup->multisample)
888 cmd = lp_rast_ms_tri_tab[nr_planes];
889 else
890 cmd = use_32bits ? lp_rast_32_tri_tab[nr_planes] : lp_rast_tri_tab[nr_planes];
891 return lp_scene_bin_cmd_with_state(
892 scene, ix0, iy0, setup->fs.stored, cmd,
893 lp_rast_arg_triangle(tri, (1<<nr_planes)-1));
894 }
895 else
896 {
897 struct lp_rast_plane *plane = GET_PLANES(tri);
898 int64_t c[MAX_PLANES];
899 int64_t ei[MAX_PLANES];
900
901 int64_t eo[MAX_PLANES];
902 int64_t xstep[MAX_PLANES];
903 int64_t ystep[MAX_PLANES];
904 int x, y;
905
906 int ix0 = trimmed_box.x0 / TILE_SIZE;
907 int iy0 = trimmed_box.y0 / TILE_SIZE;
908 int ix1 = trimmed_box.x1 / TILE_SIZE;
909 int iy1 = trimmed_box.y1 / TILE_SIZE;
910
911 for (i = 0; i < nr_planes; i++) {
912 c[i] = (plane[i].c +
913 IMUL64(plane[i].dcdy, iy0) * TILE_SIZE -
914 IMUL64(plane[i].dcdx, ix0) * TILE_SIZE);
915
916 ei[i] = (plane[i].dcdy -
917 plane[i].dcdx -
918 (int64_t)plane[i].eo) << TILE_ORDER;
919
920 eo[i] = (int64_t)plane[i].eo << TILE_ORDER;
921 xstep[i] = -(((int64_t)plane[i].dcdx) << TILE_ORDER);
922 ystep[i] = ((int64_t)plane[i].dcdy) << TILE_ORDER;
923 }
924
925
926
927 /* Test tile-sized blocks against the triangle.
928 * Discard blocks fully outside the tri. If the block is fully
929 * contained inside the tri, bin an lp_rast_shade_tile command.
930 * Else, bin a lp_rast_triangle command.
931 */
932 for (y = iy0; y <= iy1; y++)
933 {
934 boolean in = FALSE; /* are we inside the triangle? */
935 int64_t cx[MAX_PLANES];
936
937 for (i = 0; i < nr_planes; i++)
938 cx[i] = c[i];
939
940 for (x = ix0; x <= ix1; x++)
941 {
942 int out = 0;
943 int partial = 0;
944
945 for (i = 0; i < nr_planes; i++) {
946 int64_t planeout = cx[i] + eo[i];
947 int64_t planepartial = cx[i] + ei[i] - 1;
948 out |= (int) (planeout >> 63);
949 partial |= ((int) (planepartial >> 63)) & (1<<i);
950 }
951
952 if (out) {
953 /* do nothing */
954 if (in)
955 break; /* exiting triangle, all done with this row */
956 LP_COUNT(nr_empty_64);
957 }
958 else if (partial) {
959 /* Not trivially accepted by at least one plane -
960 * rasterize/shade partial tile
961 */
962 int count = util_bitcount(partial);
963 in = TRUE;
964
965 if (setup->multisample)
966 cmd = lp_rast_ms_tri_tab[count];
967 else
968 cmd = use_32bits ? lp_rast_32_tri_tab[count] : lp_rast_tri_tab[count];
969 if (!lp_scene_bin_cmd_with_state( scene, x, y,
970 setup->fs.stored, cmd,
971 lp_rast_arg_triangle(tri, partial) ))
972 goto fail;
973
974 LP_COUNT(nr_partially_covered_64);
975 }
976 else {
977 /* triangle covers the whole tile- shade whole tile */
978 LP_COUNT(nr_fully_covered_64);
979 in = TRUE;
980 if (!lp_setup_whole_tile(setup, &tri->inputs, x, y))
981 goto fail;
982 }
983
984 /* Iterate cx values across the region: */
985 for (i = 0; i < nr_planes; i++)
986 cx[i] += xstep[i];
987 }
988
989 /* Iterate c values down the region: */
990 for (i = 0; i < nr_planes; i++)
991 c[i] += ystep[i];
992 }
993 }
994
995 return TRUE;
996
997 fail:
998 /* Need to disable any partially binned triangle. This is easier
999 * than trying to locate all the triangle, shade-tile, etc,
1000 * commands which may have been binned.
1001 */
1002 tri->inputs.disable = TRUE;
1003 return FALSE;
1004 }
1005
1006
1007 /**
1008 * Try to draw the triangle, restart the scene on failure.
1009 */
retry_triangle_ccw(struct lp_setup_context * setup,struct fixed_position * position,const float (* v0)[4],const float (* v1)[4],const float (* v2)[4],boolean front)1010 static void retry_triangle_ccw( struct lp_setup_context *setup,
1011 struct fixed_position* position,
1012 const float (*v0)[4],
1013 const float (*v1)[4],
1014 const float (*v2)[4],
1015 boolean front)
1016 {
1017 if (!do_triangle_ccw( setup, position, v0, v1, v2, front ))
1018 {
1019 if (!lp_setup_flush_and_restart(setup))
1020 return;
1021
1022 if (!do_triangle_ccw( setup, position, v0, v1, v2, front ))
1023 return;
1024 }
1025 }
1026
1027 /**
1028 * Calculate fixed position data for a triangle
1029 * It is unfortunate we need to do that here (as we need area
1030 * calculated in fixed point), as there's quite some code duplication
1031 * to what is done in the jit setup prog.
1032 */
1033 static inline void
calc_fixed_position(struct lp_setup_context * setup,struct fixed_position * position,const float (* v0)[4],const float (* v1)[4],const float (* v2)[4])1034 calc_fixed_position(struct lp_setup_context *setup,
1035 struct fixed_position* position,
1036 const float (*v0)[4],
1037 const float (*v1)[4],
1038 const float (*v2)[4])
1039 {
1040 float pixel_offset = setup->multisample ? 0.0 : setup->pixel_offset;
1041 /*
1042 * The rounding may not be quite the same with PIPE_ARCH_SSE
1043 * (util_iround right now only does nearest/even on x87,
1044 * otherwise nearest/away-from-zero).
1045 * Both should be acceptable, I think.
1046 */
1047 #if defined(PIPE_ARCH_SSE)
1048 __m128 v0r, v1r;
1049 __m128 vxy0xy2, vxy1xy0;
1050 __m128i vxy0xy2i, vxy1xy0i;
1051 __m128i dxdy0120, x0x2y0y2, x1x0y1y0, x0120, y0120;
1052 __m128 pix_offset = _mm_set1_ps(pixel_offset);
1053 __m128 fixed_one = _mm_set1_ps((float)FIXED_ONE);
1054 v0r = _mm_castpd_ps(_mm_load_sd((double *)v0[0]));
1055 vxy0xy2 = _mm_loadh_pi(v0r, (__m64 *)v2[0]);
1056 v1r = _mm_castpd_ps(_mm_load_sd((double *)v1[0]));
1057 vxy1xy0 = _mm_movelh_ps(v1r, vxy0xy2);
1058 vxy0xy2 = _mm_sub_ps(vxy0xy2, pix_offset);
1059 vxy1xy0 = _mm_sub_ps(vxy1xy0, pix_offset);
1060 vxy0xy2 = _mm_mul_ps(vxy0xy2, fixed_one);
1061 vxy1xy0 = _mm_mul_ps(vxy1xy0, fixed_one);
1062 vxy0xy2i = _mm_cvtps_epi32(vxy0xy2);
1063 vxy1xy0i = _mm_cvtps_epi32(vxy1xy0);
1064 dxdy0120 = _mm_sub_epi32(vxy0xy2i, vxy1xy0i);
1065 _mm_store_si128((__m128i *)&position->dx01, dxdy0120);
1066 /*
1067 * For the mul, would need some more shuffles, plus emulation
1068 * for the signed mul (without sse41), so don't bother.
1069 */
1070 x0x2y0y2 = _mm_shuffle_epi32(vxy0xy2i, _MM_SHUFFLE(3,1,2,0));
1071 x1x0y1y0 = _mm_shuffle_epi32(vxy1xy0i, _MM_SHUFFLE(3,1,2,0));
1072 x0120 = _mm_unpacklo_epi32(x0x2y0y2, x1x0y1y0);
1073 y0120 = _mm_unpackhi_epi32(x0x2y0y2, x1x0y1y0);
1074 _mm_store_si128((__m128i *)&position->x[0], x0120);
1075 _mm_store_si128((__m128i *)&position->y[0], y0120);
1076
1077 #else
1078 position->x[0] = subpixel_snap(v0[0][0] - pixel_offset);
1079 position->x[1] = subpixel_snap(v1[0][0] - pixel_offset);
1080 position->x[2] = subpixel_snap(v2[0][0] - pixel_offset);
1081 position->x[3] = 0; // should be unused
1082
1083 position->y[0] = subpixel_snap(v0[0][1] - pixel_offset);
1084 position->y[1] = subpixel_snap(v1[0][1] - pixel_offset);
1085 position->y[2] = subpixel_snap(v2[0][1] - pixel_offset);
1086 position->y[3] = 0; // should be unused
1087
1088 position->dx01 = position->x[0] - position->x[1];
1089 position->dy01 = position->y[0] - position->y[1];
1090
1091 position->dx20 = position->x[2] - position->x[0];
1092 position->dy20 = position->y[2] - position->y[0];
1093 #endif
1094
1095 position->area = IMUL64(position->dx01, position->dy20) -
1096 IMUL64(position->dx20, position->dy01);
1097 }
1098
1099
1100 /**
1101 * Rotate a triangle, flipping its clockwise direction,
1102 * Swaps values for xy[0] and xy[1]
1103 */
1104 static inline void
rotate_fixed_position_01(struct fixed_position * position)1105 rotate_fixed_position_01( struct fixed_position* position )
1106 {
1107 int x, y;
1108
1109 x = position->x[1];
1110 y = position->y[1];
1111 position->x[1] = position->x[0];
1112 position->y[1] = position->y[0];
1113 position->x[0] = x;
1114 position->y[0] = y;
1115
1116 position->dx01 = -position->dx01;
1117 position->dy01 = -position->dy01;
1118 position->dx20 = position->x[2] - position->x[0];
1119 position->dy20 = position->y[2] - position->y[0];
1120
1121 position->area = -position->area;
1122 }
1123
1124
1125 /**
1126 * Rotate a triangle, flipping its clockwise direction,
1127 * Swaps values for xy[1] and xy[2]
1128 */
1129 static inline void
rotate_fixed_position_12(struct fixed_position * position)1130 rotate_fixed_position_12( struct fixed_position* position )
1131 {
1132 int x, y;
1133
1134 x = position->x[2];
1135 y = position->y[2];
1136 position->x[2] = position->x[1];
1137 position->y[2] = position->y[1];
1138 position->x[1] = x;
1139 position->y[1] = y;
1140
1141 x = position->dx01;
1142 y = position->dy01;
1143 position->dx01 = -position->dx20;
1144 position->dy01 = -position->dy20;
1145 position->dx20 = -x;
1146 position->dy20 = -y;
1147
1148 position->area = -position->area;
1149 }
1150
1151
1152 /**
1153 * Draw triangle if it's CW, cull otherwise.
1154 */
triangle_cw(struct lp_setup_context * setup,const float (* v0)[4],const float (* v1)[4],const float (* v2)[4])1155 static void triangle_cw(struct lp_setup_context *setup,
1156 const float (*v0)[4],
1157 const float (*v1)[4],
1158 const float (*v2)[4])
1159 {
1160 PIPE_ALIGN_VAR(16) struct fixed_position position;
1161 struct llvmpipe_context *lp_context = (struct llvmpipe_context *)setup->pipe;
1162
1163 if (lp_context->active_statistics_queries) {
1164 lp_context->pipeline_statistics.c_primitives++;
1165 }
1166
1167 calc_fixed_position(setup, &position, v0, v1, v2);
1168
1169 if (position.area < 0) {
1170 if (setup->flatshade_first) {
1171 rotate_fixed_position_12(&position);
1172 retry_triangle_ccw(setup, &position, v0, v2, v1, !setup->ccw_is_frontface);
1173 } else {
1174 rotate_fixed_position_01(&position);
1175 retry_triangle_ccw(setup, &position, v1, v0, v2, !setup->ccw_is_frontface);
1176 }
1177 }
1178 }
1179
1180
triangle_ccw(struct lp_setup_context * setup,const float (* v0)[4],const float (* v1)[4],const float (* v2)[4])1181 static void triangle_ccw(struct lp_setup_context *setup,
1182 const float (*v0)[4],
1183 const float (*v1)[4],
1184 const float (*v2)[4])
1185 {
1186 PIPE_ALIGN_VAR(16) struct fixed_position position;
1187 struct llvmpipe_context *lp_context = (struct llvmpipe_context *)setup->pipe;
1188
1189 if (lp_context->active_statistics_queries) {
1190 lp_context->pipeline_statistics.c_primitives++;
1191 }
1192
1193 calc_fixed_position(setup, &position, v0, v1, v2);
1194
1195 if (position.area > 0)
1196 retry_triangle_ccw(setup, &position, v0, v1, v2, setup->ccw_is_frontface);
1197 }
1198
1199 /**
1200 * Draw triangle whether it's CW or CCW.
1201 */
triangle_both(struct lp_setup_context * setup,const float (* v0)[4],const float (* v1)[4],const float (* v2)[4])1202 static void triangle_both(struct lp_setup_context *setup,
1203 const float (*v0)[4],
1204 const float (*v1)[4],
1205 const float (*v2)[4])
1206 {
1207 PIPE_ALIGN_VAR(16) struct fixed_position position;
1208 struct llvmpipe_context *lp_context = (struct llvmpipe_context *)setup->pipe;
1209
1210 if (lp_context->active_statistics_queries) {
1211 lp_context->pipeline_statistics.c_primitives++;
1212 }
1213
1214 calc_fixed_position(setup, &position, v0, v1, v2);
1215
1216 if (0) {
1217 assert(!util_is_inf_or_nan(v0[0][0]));
1218 assert(!util_is_inf_or_nan(v0[0][1]));
1219 assert(!util_is_inf_or_nan(v1[0][0]));
1220 assert(!util_is_inf_or_nan(v1[0][1]));
1221 assert(!util_is_inf_or_nan(v2[0][0]));
1222 assert(!util_is_inf_or_nan(v2[0][1]));
1223 }
1224
1225 if (position.area > 0)
1226 retry_triangle_ccw( setup, &position, v0, v1, v2, setup->ccw_is_frontface );
1227 else if (position.area < 0) {
1228 if (setup->flatshade_first) {
1229 rotate_fixed_position_12( &position );
1230 retry_triangle_ccw( setup, &position, v0, v2, v1, !setup->ccw_is_frontface );
1231 } else {
1232 rotate_fixed_position_01( &position );
1233 retry_triangle_ccw( setup, &position, v1, v0, v2, !setup->ccw_is_frontface );
1234 }
1235 }
1236 }
1237
1238
triangle_noop(struct lp_setup_context * setup,const float (* v0)[4],const float (* v1)[4],const float (* v2)[4])1239 static void triangle_noop(struct lp_setup_context *setup,
1240 const float (*v0)[4],
1241 const float (*v1)[4],
1242 const float (*v2)[4])
1243 {
1244 }
1245
1246
1247 void
lp_setup_choose_triangle(struct lp_setup_context * setup)1248 lp_setup_choose_triangle(struct lp_setup_context *setup)
1249 {
1250 if (setup->rasterizer_discard) {
1251 setup->triangle = triangle_noop;
1252 return;
1253 }
1254 switch (setup->cullmode) {
1255 case PIPE_FACE_NONE:
1256 setup->triangle = triangle_both;
1257 break;
1258 case PIPE_FACE_BACK:
1259 setup->triangle = setup->ccw_is_frontface ? triangle_ccw : triangle_cw;
1260 break;
1261 case PIPE_FACE_FRONT:
1262 setup->triangle = setup->ccw_is_frontface ? triangle_cw : triangle_ccw;
1263 break;
1264 default:
1265 setup->triangle = triangle_noop;
1266 break;
1267 }
1268 }
1269