1 /*
2  * Copyright 2012 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * the Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22  * USE OR OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include "si_build_pm4.h"
26 #include "util/u_upload_mgr.h"
27 #include "util/u_viewport.h"
28 
29 #define SI_MAX_SCISSOR 16384
30 
si_get_small_prim_cull_info(struct si_context * sctx,struct si_small_prim_cull_info * out)31 static void si_get_small_prim_cull_info(struct si_context *sctx, struct si_small_prim_cull_info *out)
32 {
33    /* This is needed by the small primitive culling, because it's done
34     * in screen space.
35     */
36    struct si_small_prim_cull_info info;
37    unsigned num_samples = si_get_num_coverage_samples(sctx);
38    assert(num_samples >= 1);
39 
40    info.scale[0] = sctx->viewports.states[0].scale[0];
41    info.scale[1] = sctx->viewports.states[0].scale[1];
42    info.translate[0] = sctx->viewports.states[0].translate[0];
43    info.translate[1] = sctx->viewports.states[0].translate[1];
44 
45    /* The viewport shouldn't flip the X axis for the small prim culling to work. */
46    assert(-info.scale[0] + info.translate[0] <= info.scale[0] + info.translate[0]);
47 
48    /* Compute the line width used by the rasterizer. */
49    float line_width = sctx->queued.named.rasterizer->line_width;
50    if (num_samples == 1)
51       line_width = roundf(line_width);
52    line_width = MAX2(line_width, 1);
53 
54    info.clip_half_line_width[0] = line_width * 0.5 / fabs(info.scale[0]);
55    info.clip_half_line_width[1] = line_width * 0.5 / fabs(info.scale[1]);
56 
57    /* If the Y axis is inverted (OpenGL default framebuffer), reverse it.
58     * This is because the viewport transformation inverts the clip space
59     * bounding box, so min becomes max, which breaks small primitive
60     * culling.
61     */
62    if (sctx->viewport0_y_inverted) {
63       info.scale[1] = -info.scale[1];
64       info.translate[1] = -info.translate[1];
65    }
66 
67    /* This is what the hardware does. */
68    if (!sctx->queued.named.rasterizer->half_pixel_center) {
69       info.translate[0] += 0.5;
70       info.translate[1] += 0.5;
71    }
72 
73    memcpy(info.scale_no_aa, info.scale, sizeof(info.scale));
74    memcpy(info.translate_no_aa, info.translate, sizeof(info.translate));
75 
76    /* Scale the framebuffer up, so that samples become pixels and small
77     * primitive culling is the same for all sample counts.
78     * This only works with the standard DX sample positions, because
79     * the samples are evenly spaced on both X and Y axes.
80     */
81    for (unsigned i = 0; i < 2; i++) {
82       info.scale[i] *= num_samples;
83       info.translate[i] *= num_samples;
84    }
85 
86    /* Better subpixel precision increases the efficiency of small
87     * primitive culling. (more precision means a tighter bounding box
88     * around primitives and more accurate elimination)
89     */
90    unsigned quant_mode = sctx->viewports.as_scissor[0].quant_mode;
91 
92    if (quant_mode == SI_QUANT_MODE_12_12_FIXED_POINT_1_4096TH)
93       info.small_prim_precision_no_aa = 1.0 / 4096.0;
94    else if (quant_mode == SI_QUANT_MODE_14_10_FIXED_POINT_1_1024TH)
95       info.small_prim_precision_no_aa = 1.0 / 1024.0;
96    else
97       info.small_prim_precision_no_aa = 1.0 / 256.0;
98 
99    info.small_prim_precision = num_samples * info.small_prim_precision_no_aa;
100 
101    *out = info;
102 }
103 
si_emit_cull_state(struct si_context * sctx)104 static void si_emit_cull_state(struct si_context *sctx)
105 {
106    assert(sctx->screen->use_ngg_culling);
107 
108    const unsigned upload_size = offsetof(struct si_small_prim_cull_info, small_prim_precision);
109    struct si_small_prim_cull_info info;
110    si_get_small_prim_cull_info(sctx, &info);
111 
112    if (!sctx->small_prim_cull_info_buf ||
113        memcmp(&info, &sctx->last_small_prim_cull_info, sizeof(info))) {
114       unsigned offset = 0;
115 
116       u_upload_data(sctx->b.const_uploader, 0, upload_size,
117                     si_optimal_tcc_alignment(sctx, upload_size), &info, &offset,
118                     (struct pipe_resource **)&sctx->small_prim_cull_info_buf);
119 
120       sctx->small_prim_cull_info_address = sctx->small_prim_cull_info_buf->gpu_address + offset;
121       sctx->last_small_prim_cull_info = info;
122    }
123 
124    /* This will end up in SGPR6 as (value << 8), shifted by the hw. */
125    radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, sctx->small_prim_cull_info_buf,
126                              RADEON_USAGE_READ | RADEON_PRIO_CONST_BUFFER);
127    radeon_begin(&sctx->gfx_cs);
128    radeon_set_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 + GFX9_SGPR_SMALL_PRIM_CULL_INFO * 4,
129                      sctx->small_prim_cull_info_address);
130    radeon_end();
131 
132    /* Set VS_STATE.SMALL_PRIM_PRECISION for NGG culling.
133     *
134     * small_prim_precision is 1 / 2^n. We only need n between 5 (1/32) and 12 (1/4096).
135     * Such a floating point value can be packed into 4 bits as follows:
136     * If we pass the first 4 bits of the exponent to the shader and set the next 3 bits
137     * to 1, we'll get the number exactly because all other bits are always 0. See:
138     *                                                               1
139     * value  =  (0x70 | value.exponent[0:3]) << 23  =  ------------------------------
140     *                                                  2 ^ (15 - value.exponent[0:3])
141     *
142     * So pass only the first 4 bits of the float exponent to the shader.
143     */
144    sctx->current_vs_state &= C_VS_STATE_SMALL_PRIM_PRECISION;
145    sctx->current_vs_state |= S_VS_STATE_SMALL_PRIM_PRECISION(fui(info.small_prim_precision) >> 23);
146 }
147 
si_set_scissor_states(struct pipe_context * pctx,unsigned start_slot,unsigned num_scissors,const struct pipe_scissor_state * state)148 static void si_set_scissor_states(struct pipe_context *pctx, unsigned start_slot,
149                                   unsigned num_scissors, const struct pipe_scissor_state *state)
150 {
151    struct si_context *ctx = (struct si_context *)pctx;
152    int i;
153 
154    for (i = 0; i < num_scissors; i++)
155       ctx->scissors[start_slot + i] = state[i];
156 
157    if (!ctx->queued.named.rasterizer->scissor_enable)
158       return;
159 
160    si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
161 }
162 
163 /* Since the guard band disables clipping, we have to clip per-pixel
164  * using a scissor.
165  */
si_get_scissor_from_viewport(struct si_context * ctx,const struct pipe_viewport_state * vp,struct si_signed_scissor * scissor)166 static void si_get_scissor_from_viewport(struct si_context *ctx,
167                                          const struct pipe_viewport_state *vp,
168                                          struct si_signed_scissor *scissor)
169 {
170    float tmp, minx, miny, maxx, maxy;
171 
172    /* Convert (-1, -1) and (1, 1) from clip space into window space. */
173    minx = -vp->scale[0] + vp->translate[0];
174    miny = -vp->scale[1] + vp->translate[1];
175    maxx = vp->scale[0] + vp->translate[0];
176    maxy = vp->scale[1] + vp->translate[1];
177 
178    /* Handle inverted viewports. */
179    if (minx > maxx) {
180       tmp = minx;
181       minx = maxx;
182       maxx = tmp;
183    }
184    if (miny > maxy) {
185       tmp = miny;
186       miny = maxy;
187       maxy = tmp;
188    }
189 
190    /* Convert to integer and round up the max bounds. */
191    scissor->minx = minx;
192    scissor->miny = miny;
193    scissor->maxx = ceilf(maxx);
194    scissor->maxy = ceilf(maxy);
195 }
196 
si_clamp_scissor(struct si_context * ctx,struct pipe_scissor_state * out,struct si_signed_scissor * scissor)197 static void si_clamp_scissor(struct si_context *ctx, struct pipe_scissor_state *out,
198                              struct si_signed_scissor *scissor)
199 {
200    out->minx = CLAMP(scissor->minx, 0, SI_MAX_SCISSOR);
201    out->miny = CLAMP(scissor->miny, 0, SI_MAX_SCISSOR);
202    out->maxx = CLAMP(scissor->maxx, 0, SI_MAX_SCISSOR);
203    out->maxy = CLAMP(scissor->maxy, 0, SI_MAX_SCISSOR);
204 }
205 
si_clip_scissor(struct pipe_scissor_state * out,struct pipe_scissor_state * clip)206 static void si_clip_scissor(struct pipe_scissor_state *out, struct pipe_scissor_state *clip)
207 {
208    out->minx = MAX2(out->minx, clip->minx);
209    out->miny = MAX2(out->miny, clip->miny);
210    out->maxx = MIN2(out->maxx, clip->maxx);
211    out->maxy = MIN2(out->maxy, clip->maxy);
212 }
213 
si_scissor_make_union(struct si_signed_scissor * out,struct si_signed_scissor * in)214 static void si_scissor_make_union(struct si_signed_scissor *out, struct si_signed_scissor *in)
215 {
216    out->minx = MIN2(out->minx, in->minx);
217    out->miny = MIN2(out->miny, in->miny);
218    out->maxx = MAX2(out->maxx, in->maxx);
219    out->maxy = MAX2(out->maxy, in->maxy);
220    out->quant_mode = MIN2(out->quant_mode, in->quant_mode);
221 }
222 
si_emit_one_scissor(struct si_context * ctx,struct radeon_cmdbuf * cs,struct si_signed_scissor * vp_scissor,struct pipe_scissor_state * scissor)223 static void si_emit_one_scissor(struct si_context *ctx, struct radeon_cmdbuf *cs,
224                                 struct si_signed_scissor *vp_scissor,
225                                 struct pipe_scissor_state *scissor)
226 {
227    struct pipe_scissor_state final;
228 
229    if (ctx->vs_disables_clipping_viewport) {
230       final.minx = final.miny = 0;
231       final.maxx = final.maxy = SI_MAX_SCISSOR;
232    } else {
233       si_clamp_scissor(ctx, &final, vp_scissor);
234    }
235 
236    if (scissor)
237       si_clip_scissor(&final, scissor);
238 
239    radeon_begin(cs);
240 
241    /* Workaround for a hw bug on GFX6 that occurs when PA_SU_HARDWARE_-
242     * SCREEN_OFFSET != 0 and any_scissor.BR_X/Y <= 0.
243     */
244    if (ctx->chip_class == GFX6 && (final.maxx == 0 || final.maxy == 0)) {
245       radeon_emit(S_028250_TL_X(1) | S_028250_TL_Y(1) | S_028250_WINDOW_OFFSET_DISABLE(1));
246       radeon_emit(S_028254_BR_X(1) | S_028254_BR_Y(1));
247       radeon_end();
248       return;
249    }
250 
251    radeon_emit(S_028250_TL_X(final.minx) | S_028250_TL_Y(final.miny) |
252                   S_028250_WINDOW_OFFSET_DISABLE(1));
253    radeon_emit(S_028254_BR_X(final.maxx) | S_028254_BR_Y(final.maxy));
254    radeon_end();
255 }
256 
257 #define MAX_PA_SU_HARDWARE_SCREEN_OFFSET 8176
258 
si_emit_guardband(struct si_context * ctx)259 static void si_emit_guardband(struct si_context *ctx)
260 {
261    const struct si_state_rasterizer *rs = ctx->queued.named.rasterizer;
262    struct si_signed_scissor vp_as_scissor;
263    struct pipe_viewport_state vp;
264    float left, top, right, bottom, max_range, guardband_x, guardband_y;
265    float discard_x, discard_y;
266 
267    if (ctx->vs_writes_viewport_index) {
268       /* Shaders can draw to any viewport. Make a union of all
269        * viewports. */
270       vp_as_scissor = ctx->viewports.as_scissor[0];
271       for (unsigned i = 1; i < SI_MAX_VIEWPORTS; i++) {
272          si_scissor_make_union(&vp_as_scissor, &ctx->viewports.as_scissor[i]);
273       }
274    } else {
275       vp_as_scissor = ctx->viewports.as_scissor[0];
276    }
277 
278    /* Blits don't set the viewport state. The vertex shader determines
279     * the viewport size by scaling the coordinates, so we don't know
280     * how large the viewport is. Assume the worst case.
281     */
282    if (ctx->vs_disables_clipping_viewport)
283       vp_as_scissor.quant_mode = SI_QUANT_MODE_16_8_FIXED_POINT_1_256TH;
284 
285    /* Determine the optimal hardware screen offset to center the viewport
286     * within the viewport range in order to maximize the guardband size.
287     */
288    int hw_screen_offset_x = (vp_as_scissor.maxx + vp_as_scissor.minx) / 2;
289    int hw_screen_offset_y = (vp_as_scissor.maxy + vp_as_scissor.miny) / 2;
290 
291    /* GFX6-GFX7 need to align the offset to an ubertile consisting of all SEs. */
292    const unsigned hw_screen_offset_alignment =
293       ctx->chip_class >= GFX8 ? 16 : MAX2(ctx->screen->se_tile_repeat, 16);
294 
295    /* Indexed by quantization modes */
296    static int max_viewport_size[] = {65535, 16383, 4095};
297 
298    /* Ensure that the whole viewport stays representable in
299     * absolute coordinates.
300     * See comment in si_set_viewport_states.
301     */
302    assert(vp_as_scissor.maxx <= max_viewport_size[vp_as_scissor.quant_mode] &&
303           vp_as_scissor.maxy <= max_viewport_size[vp_as_scissor.quant_mode]);
304 
305    hw_screen_offset_x = CLAMP(hw_screen_offset_x, 0, MAX_PA_SU_HARDWARE_SCREEN_OFFSET);
306    hw_screen_offset_y = CLAMP(hw_screen_offset_y, 0, MAX_PA_SU_HARDWARE_SCREEN_OFFSET);
307 
308    /* Align the screen offset by dropping the low bits. */
309    hw_screen_offset_x &= ~(hw_screen_offset_alignment - 1);
310    hw_screen_offset_y &= ~(hw_screen_offset_alignment - 1);
311 
312    /* Apply the offset to center the viewport and maximize the guardband. */
313    vp_as_scissor.minx -= hw_screen_offset_x;
314    vp_as_scissor.maxx -= hw_screen_offset_x;
315    vp_as_scissor.miny -= hw_screen_offset_y;
316    vp_as_scissor.maxy -= hw_screen_offset_y;
317 
318    /* Reconstruct the viewport transformation from the scissor. */
319    vp.translate[0] = (vp_as_scissor.minx + vp_as_scissor.maxx) / 2.0;
320    vp.translate[1] = (vp_as_scissor.miny + vp_as_scissor.maxy) / 2.0;
321    vp.scale[0] = vp_as_scissor.maxx - vp.translate[0];
322    vp.scale[1] = vp_as_scissor.maxy - vp.translate[1];
323 
324    /* Treat a 0x0 viewport as 1x1 to prevent division by zero. */
325    if (vp_as_scissor.minx == vp_as_scissor.maxx)
326       vp.scale[0] = 0.5;
327    if (vp_as_scissor.miny == vp_as_scissor.maxy)
328       vp.scale[1] = 0.5;
329 
330    /* Find the biggest guard band that is inside the supported viewport
331     * range. The guard band is specified as a horizontal and vertical
332     * distance from (0,0) in clip space.
333     *
334     * This is done by applying the inverse viewport transformation
335     * on the viewport limits to get those limits in clip space.
336     *
337     * The viewport range is [-max_viewport_size/2 - 1, max_viewport_size/2].
338     * (-1 to the min coord because max_viewport_size is odd and ViewportBounds
339     * Min/Max are -32768, 32767).
340     */
341    assert(vp_as_scissor.quant_mode < ARRAY_SIZE(max_viewport_size));
342    max_range = max_viewport_size[vp_as_scissor.quant_mode] / 2;
343    left = (-max_range - 1 - vp.translate[0]) / vp.scale[0];
344    right = (max_range - vp.translate[0]) / vp.scale[0];
345    top = (-max_range - 1 - vp.translate[1]) / vp.scale[1];
346    bottom = (max_range - vp.translate[1]) / vp.scale[1];
347 
348    assert(left <= -1 && top <= -1 && right >= 1 && bottom >= 1);
349 
350    guardband_x = MIN2(-left, right);
351    guardband_y = MIN2(-top, bottom);
352 
353    discard_x = 1.0;
354    discard_y = 1.0;
355 
356    if (unlikely(util_prim_is_points_or_lines(ctx->current_rast_prim))) {
357       /* When rendering wide points or lines, we need to be more
358        * conservative about when to discard them entirely. */
359       float pixels;
360 
361       if (ctx->current_rast_prim == PIPE_PRIM_POINTS)
362          pixels = rs->max_point_size;
363       else
364          pixels = rs->line_width;
365 
366       /* Add half the point size / line width */
367       discard_x += pixels / (2.0 * vp.scale[0]);
368       discard_y += pixels / (2.0 * vp.scale[1]);
369 
370       /* Discard primitives that would lie entirely outside the clip
371        * region. */
372       discard_x = MIN2(discard_x, guardband_x);
373       discard_y = MIN2(discard_y, guardband_y);
374    }
375 
376    /* If any of the GB registers is updated, all of them must be updated.
377     * R_028BE8_PA_CL_GB_VERT_CLIP_ADJ, R_028BEC_PA_CL_GB_VERT_DISC_ADJ
378     * R_028BF0_PA_CL_GB_HORZ_CLIP_ADJ, R_028BF4_PA_CL_GB_HORZ_DISC_ADJ
379     */
380    radeon_begin(&ctx->gfx_cs);
381    radeon_opt_set_context_reg4(ctx, R_028BE8_PA_CL_GB_VERT_CLIP_ADJ,
382                                SI_TRACKED_PA_CL_GB_VERT_CLIP_ADJ, fui(guardband_y), fui(discard_y),
383                                fui(guardband_x), fui(discard_x));
384    radeon_opt_set_context_reg(ctx, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET,
385                               SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET,
386                               S_028234_HW_SCREEN_OFFSET_X(hw_screen_offset_x >> 4) |
387                                  S_028234_HW_SCREEN_OFFSET_Y(hw_screen_offset_y >> 4));
388    radeon_opt_set_context_reg(
389       ctx, R_028BE4_PA_SU_VTX_CNTL, SI_TRACKED_PA_SU_VTX_CNTL,
390       S_028BE4_PIX_CENTER(rs->half_pixel_center) | S_028BE4_ROUND_MODE(V_028BE4_X_ROUND_TO_EVEN) |
391          S_028BE4_QUANT_MODE(V_028BE4_X_16_8_FIXED_POINT_1_256TH + vp_as_scissor.quant_mode));
392    radeon_end_update_context_roll(ctx);
393 }
394 
si_emit_scissors(struct si_context * ctx)395 static void si_emit_scissors(struct si_context *ctx)
396 {
397    struct radeon_cmdbuf *cs = &ctx->gfx_cs;
398    struct pipe_scissor_state *states = ctx->scissors;
399    bool scissor_enabled = ctx->queued.named.rasterizer->scissor_enable;
400 
401    /* The simple case: Only 1 viewport is active. */
402    if (!ctx->vs_writes_viewport_index) {
403       struct si_signed_scissor *vp = &ctx->viewports.as_scissor[0];
404 
405       radeon_begin(cs);
406       radeon_set_context_reg_seq(R_028250_PA_SC_VPORT_SCISSOR_0_TL, 2);
407       radeon_end();
408 
409       si_emit_one_scissor(ctx, cs, vp, scissor_enabled ? &states[0] : NULL);
410       return;
411    }
412 
413    /* All registers in the array need to be updated if any of them is changed.
414     * This is a hardware requirement.
415     */
416    radeon_begin(cs);
417    radeon_set_context_reg_seq(R_028250_PA_SC_VPORT_SCISSOR_0_TL, SI_MAX_VIEWPORTS * 2);
418    radeon_end();
419 
420    for (unsigned i = 0; i < SI_MAX_VIEWPORTS; i++) {
421       si_emit_one_scissor(ctx, cs, &ctx->viewports.as_scissor[i],
422                           scissor_enabled ? &states[i] : NULL);
423    }
424 }
425 
si_set_viewport_states(struct pipe_context * pctx,unsigned start_slot,unsigned num_viewports,const struct pipe_viewport_state * state)426 static void si_set_viewport_states(struct pipe_context *pctx, unsigned start_slot,
427                                    unsigned num_viewports, const struct pipe_viewport_state *state)
428 {
429    struct si_context *ctx = (struct si_context *)pctx;
430    int i;
431 
432    for (i = 0; i < num_viewports; i++) {
433       unsigned index = start_slot + i;
434       struct si_signed_scissor *scissor = &ctx->viewports.as_scissor[index];
435 
436       ctx->viewports.states[index] = state[i];
437 
438       si_get_scissor_from_viewport(ctx, &state[i], scissor);
439 
440       int max_corner = MAX2(
441          MAX2(abs(scissor->maxx), abs(scissor->maxy)),
442          MAX2(abs(scissor->minx), abs(scissor->miny)));
443 
444       /* Determine the best quantization mode (subpixel precision),
445        * but also leave enough space for the guardband.
446        *
447        * Note that primitive binning requires QUANT_MODE == 16_8 on Vega10
448        * and Raven1 for line and rectangle primitive types to work correctly.
449        * Always use 16_8 if primitive binning is possible to occur.
450        */
451       if ((ctx->family == CHIP_VEGA10 || ctx->family == CHIP_RAVEN) && ctx->screen->dpbb_allowed)
452          max_corner = 16384; /* Use QUANT_MODE == 16_8. */
453 
454       /* Another constraint is that all coordinates in the viewport
455        * are representable in fixed point with respect to the
456        * surface origin.
457        *
458        * It means that PA_SU_HARDWARE_SCREEN_OFFSET can't be given
459        * an offset that would make the upper corner of the viewport
460        * greater than the maximum representable number post
461        * quantization, ie 2^quant_bits.
462        *
463        * This does not matter for 14.10 and 16.8 formats since the
464        * offset is already limited at 8k, but it means we can't use
465        * 12.12 if we are drawing to some pixels outside the lower
466        * 4k x 4k of the render target.
467        */
468 
469       if (max_corner <= 1024) /* 4K scanline area for guardband */
470          scissor->quant_mode = SI_QUANT_MODE_12_12_FIXED_POINT_1_4096TH;
471       else if (max_corner <= 4096) /* 16K scanline area for guardband */
472          scissor->quant_mode = SI_QUANT_MODE_14_10_FIXED_POINT_1_1024TH;
473       else /* 64K scanline area for guardband */
474          scissor->quant_mode = SI_QUANT_MODE_16_8_FIXED_POINT_1_256TH;
475    }
476 
477    if (start_slot == 0) {
478       ctx->viewport0_y_inverted =
479          -state->scale[1] + state->translate[1] > state->scale[1] + state->translate[1];
480 
481       /* NGG cull state uses the viewport and quant mode. */
482       if (ctx->screen->use_ngg_culling)
483          si_mark_atom_dirty(ctx, &ctx->atoms.s.ngg_cull_state);
484    }
485 
486    si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
487    si_mark_atom_dirty(ctx, &ctx->atoms.s.guardband);
488    si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
489 }
490 
si_emit_one_viewport(struct si_context * ctx,struct pipe_viewport_state * state)491 static void si_emit_one_viewport(struct si_context *ctx, struct pipe_viewport_state *state)
492 {
493    struct radeon_cmdbuf *cs = &ctx->gfx_cs;
494 
495    radeon_begin(cs);
496    radeon_emit(fui(state->scale[0]));
497    radeon_emit(fui(state->translate[0]));
498    radeon_emit(fui(state->scale[1]));
499    radeon_emit(fui(state->translate[1]));
500    radeon_emit(fui(state->scale[2]));
501    radeon_emit(fui(state->translate[2]));
502    radeon_end();
503 }
504 
si_emit_viewports(struct si_context * ctx)505 static void si_emit_viewports(struct si_context *ctx)
506 {
507    struct radeon_cmdbuf *cs = &ctx->gfx_cs;
508    struct pipe_viewport_state *states = ctx->viewports.states;
509 
510    /* The simple case: Only 1 viewport is active. */
511    if (!ctx->vs_writes_viewport_index) {
512       radeon_begin(cs);
513       radeon_set_context_reg_seq(R_02843C_PA_CL_VPORT_XSCALE, 6);
514       radeon_end();
515 
516       si_emit_one_viewport(ctx, &states[0]);
517       return;
518    }
519 
520    /* All registers in the array need to be updated if any of them is changed.
521     * This is a hardware requirement.
522     */
523    radeon_begin(cs);
524    radeon_set_context_reg_seq(R_02843C_PA_CL_VPORT_XSCALE + 0, SI_MAX_VIEWPORTS * 6);
525    radeon_end();
526 
527    for (unsigned i = 0; i < SI_MAX_VIEWPORTS; i++)
528       si_emit_one_viewport(ctx, &states[i]);
529 }
530 
si_viewport_zmin_zmax(const struct pipe_viewport_state * vp,bool halfz,bool window_space_position,float * zmin,float * zmax)531 static inline void si_viewport_zmin_zmax(const struct pipe_viewport_state *vp, bool halfz,
532                                          bool window_space_position, float *zmin, float *zmax)
533 {
534    if (window_space_position) {
535       *zmin = 0;
536       *zmax = 1;
537       return;
538    }
539    util_viewport_zmin_zmax(vp, halfz, zmin, zmax);
540 }
541 
si_emit_depth_ranges(struct si_context * ctx)542 static void si_emit_depth_ranges(struct si_context *ctx)
543 {
544    struct radeon_cmdbuf *cs = &ctx->gfx_cs;
545    struct pipe_viewport_state *states = ctx->viewports.states;
546    bool clip_halfz = ctx->queued.named.rasterizer->clip_halfz;
547    bool window_space = ctx->vs_disables_clipping_viewport;
548    float zmin, zmax;
549 
550    /* The simple case: Only 1 viewport is active. */
551    if (!ctx->vs_writes_viewport_index) {
552       si_viewport_zmin_zmax(&states[0], clip_halfz, window_space, &zmin, &zmax);
553 
554       radeon_begin(cs);
555       radeon_set_context_reg_seq(R_0282D0_PA_SC_VPORT_ZMIN_0, 2);
556       radeon_emit(fui(zmin));
557       radeon_emit(fui(zmax));
558       radeon_end();
559       return;
560    }
561 
562    /* All registers in the array need to be updated if any of them is changed.
563     * This is a hardware requirement.
564     */
565    radeon_begin(cs);
566    radeon_set_context_reg_seq(R_0282D0_PA_SC_VPORT_ZMIN_0, SI_MAX_VIEWPORTS * 2);
567    for (unsigned i = 0; i < SI_MAX_VIEWPORTS; i++) {
568       si_viewport_zmin_zmax(&states[i], clip_halfz, window_space, &zmin, &zmax);
569       radeon_emit(fui(zmin));
570       radeon_emit(fui(zmax));
571    }
572    radeon_end();
573 }
574 
si_emit_viewport_states(struct si_context * ctx)575 static void si_emit_viewport_states(struct si_context *ctx)
576 {
577    si_emit_viewports(ctx);
578    si_emit_depth_ranges(ctx);
579 }
580 
581 /**
582  * This reacts to 2 state changes:
583  * - VS.writes_viewport_index
584  * - VS output position in window space (enable/disable)
585  *
586  * Normally, we only emit 1 viewport and 1 scissor if no shader is using
587  * the VIEWPORT_INDEX output, and emitting the other viewports and scissors
588  * is delayed. When a shader with VIEWPORT_INDEX appears, this should be
589  * called to emit the rest.
590  */
si_update_vs_viewport_state(struct si_context * ctx)591 void si_update_vs_viewport_state(struct si_context *ctx)
592 {
593    struct si_shader_info *info = si_get_vs_info(ctx);
594    bool vs_window_space;
595 
596    if (!info)
597       return;
598 
599    /* When the VS disables clipping and viewport transformation. */
600    vs_window_space = info->stage == MESA_SHADER_VERTEX && info->base.vs.window_space_position;
601 
602    if (ctx->vs_disables_clipping_viewport != vs_window_space) {
603       ctx->vs_disables_clipping_viewport = vs_window_space;
604       si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
605       si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
606    }
607 
608    /* Viewport index handling. */
609    if (ctx->vs_writes_viewport_index == info->writes_viewport_index)
610       return;
611 
612    /* This changes how the guardband is computed. */
613    ctx->vs_writes_viewport_index = info->writes_viewport_index;
614    si_mark_atom_dirty(ctx, &ctx->atoms.s.guardband);
615 
616    /* Emit scissors and viewports that were enabled by having
617     * the ViewportIndex output.
618     */
619    if (info->writes_viewport_index) {
620       si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
621       si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
622    }
623 }
624 
si_emit_window_rectangles(struct si_context * sctx)625 static void si_emit_window_rectangles(struct si_context *sctx)
626 {
627    /* There are four clipping rectangles. Their corner coordinates are inclusive.
628     * Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
629     * on whether the pixel is inside cliprects 0-3, respectively. For example,
630     * if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
631     * the number 3 (binary 0011).
632     *
633     * If CLIPRECT_RULE & (1 << number), the pixel is rasterized.
634     */
635    struct radeon_cmdbuf *cs = &sctx->gfx_cs;
636    static const unsigned outside[4] = {
637       /* outside rectangle 0 */
638       V_02820C_OUT | V_02820C_IN_1 | V_02820C_IN_2 | V_02820C_IN_21 | V_02820C_IN_3 |
639          V_02820C_IN_31 | V_02820C_IN_32 | V_02820C_IN_321,
640       /* outside rectangles 0, 1 */
641       V_02820C_OUT | V_02820C_IN_2 | V_02820C_IN_3 | V_02820C_IN_32,
642       /* outside rectangles 0, 1, 2 */
643       V_02820C_OUT | V_02820C_IN_3,
644       /* outside rectangles 0, 1, 2, 3 */
645       V_02820C_OUT,
646    };
647    const unsigned disabled = 0xffff; /* all inside and outside cases */
648    unsigned num_rectangles = sctx->num_window_rectangles;
649    struct pipe_scissor_state *rects = sctx->window_rectangles;
650    unsigned rule;
651 
652    assert(num_rectangles <= 4);
653 
654    if (num_rectangles == 0)
655       rule = disabled;
656    else if (sctx->window_rectangles_include)
657       rule = ~outside[num_rectangles - 1];
658    else
659       rule = outside[num_rectangles - 1];
660 
661    radeon_begin(cs);
662    radeon_opt_set_context_reg(sctx, R_02820C_PA_SC_CLIPRECT_RULE, SI_TRACKED_PA_SC_CLIPRECT_RULE,
663                               rule);
664    if (num_rectangles == 0) {
665       radeon_end();
666       return;
667    }
668 
669    radeon_set_context_reg_seq(R_028210_PA_SC_CLIPRECT_0_TL, num_rectangles * 2);
670    for (unsigned i = 0; i < num_rectangles; i++) {
671       radeon_emit(S_028210_TL_X(rects[i].minx) | S_028210_TL_Y(rects[i].miny));
672       radeon_emit(S_028214_BR_X(rects[i].maxx) | S_028214_BR_Y(rects[i].maxy));
673    }
674    radeon_end();
675 }
676 
si_set_window_rectangles(struct pipe_context * ctx,bool include,unsigned num_rectangles,const struct pipe_scissor_state * rects)677 static void si_set_window_rectangles(struct pipe_context *ctx, bool include,
678                                      unsigned num_rectangles,
679                                      const struct pipe_scissor_state *rects)
680 {
681    struct si_context *sctx = (struct si_context *)ctx;
682 
683    sctx->num_window_rectangles = num_rectangles;
684    sctx->window_rectangles_include = include;
685    if (num_rectangles) {
686       memcpy(sctx->window_rectangles, rects, sizeof(*rects) * num_rectangles);
687    }
688 
689    si_mark_atom_dirty(sctx, &sctx->atoms.s.window_rectangles);
690 }
691 
si_init_viewport_functions(struct si_context * ctx)692 void si_init_viewport_functions(struct si_context *ctx)
693 {
694    ctx->atoms.s.guardband.emit = si_emit_guardband;
695    ctx->atoms.s.scissors.emit = si_emit_scissors;
696    ctx->atoms.s.viewports.emit = si_emit_viewport_states;
697    ctx->atoms.s.window_rectangles.emit = si_emit_window_rectangles;
698    ctx->atoms.s.ngg_cull_state.emit = si_emit_cull_state;
699 
700    ctx->b.set_scissor_states = si_set_scissor_states;
701    ctx->b.set_viewport_states = si_set_viewport_states;
702    ctx->b.set_window_rectangles = si_set_window_rectangles;
703 
704    for (unsigned i = 0; i < 16; i++)
705       ctx->viewports.as_scissor[i].quant_mode = SI_QUANT_MODE_16_8_FIXED_POINT_1_256TH;
706 }
707