1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included
12  * in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 /**
24  * @file iris_state.c
25  *
26  * ============================= GENXML CODE =============================
27  *              [This file is compiled once per generation.]
28  * =======================================================================
29  *
30  * This is the main state upload code.
31  *
32  * Gallium uses Constant State Objects, or CSOs, for most state.  Large,
33  * complex, or highly reusable state can be created once, and bound and
34  * rebound multiple times.  This is modeled with the pipe->create_*_state()
35  * and pipe->bind_*_state() hooks.  Highly dynamic or inexpensive state is
36  * streamed out on the fly, via pipe->set_*_state() hooks.
37  *
38  * OpenGL involves frequently mutating context state, which is mirrored in
39  * core Mesa by highly mutable data structures.  However, most applications
40  * typically draw the same things over and over - from frame to frame, most
41  * of the same objects are still visible and need to be redrawn.  So, rather
42  * than inventing new state all the time, applications usually mutate to swap
43  * between known states that we've seen before.
44  *
45  * Gallium isolates us from this mutation by tracking API state, and
46  * distilling it into a set of Constant State Objects, or CSOs.  Large,
47  * complex, or typically reusable state can be created once, then reused
48  * multiple times.  Drivers can create and store their own associated data.
49  * This create/bind model corresponds to the pipe->create_*_state() and
50  * pipe->bind_*_state() driver hooks.
51  *
52  * Some state is cheap to create, or expected to be highly dynamic.  Rather
53  * than creating and caching piles of CSOs for these, Gallium simply streams
54  * them out, via the pipe->set_*_state() driver hooks.
55  *
56  * To reduce draw time overhead, we try to compute as much state at create
57  * time as possible.  Wherever possible, we translate the Gallium pipe state
58  * to 3DSTATE commands, and store those commands in the CSO.  At draw time,
59  * we can simply memcpy them into a batch buffer.
60  *
61  * No hardware matches the abstraction perfectly, so some commands require
62  * information from multiple CSOs.  In this case, we can store two copies
63  * of the packet (one in each CSO), and simply | together their DWords at
64  * draw time.  Sometimes the second set is trivial (one or two fields), so
65  * we simply pack it at draw time.
66  *
67  * There are two main components in the file below.  First, the CSO hooks
68  * create/bind/track state.  The second are the draw-time upload functions,
69  * iris_upload_render_state() and iris_upload_compute_state(), which read
70  * the context state and emit the commands into the actual batch.
71  */
72 
73 #include <stdio.h>
74 #include <errno.h>
75 
76 #if HAVE_VALGRIND
77 #include <valgrind.h>
78 #include <memcheck.h>
79 #define VG(x) x
80 #ifdef DEBUG
81 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
82 #endif
83 #else
84 #define VG(x)
85 #endif
86 
87 #include "pipe/p_defines.h"
88 #include "pipe/p_state.h"
89 #include "pipe/p_context.h"
90 #include "pipe/p_screen.h"
91 #include "util/u_dual_blend.h"
92 #include "util/u_inlines.h"
93 #include "util/format/u_format.h"
94 #include "util/u_framebuffer.h"
95 #include "util/u_transfer.h"
96 #include "util/u_upload_mgr.h"
97 #include "util/u_viewport.h"
98 #include "util/u_memory.h"
99 #include "drm-uapi/i915_drm.h"
100 #include "nir.h"
101 #include "intel/compiler/brw_compiler.h"
102 #include "intel/common/intel_aux_map.h"
103 #include "intel/common/intel_l3_config.h"
104 #include "intel/common/intel_sample_positions.h"
105 #include "iris_batch.h"
106 #include "iris_context.h"
107 #include "iris_defines.h"
108 #include "iris_pipe.h"
109 #include "iris_resource.h"
110 
111 #include "iris_genx_macros.h"
112 #include "intel/common/intel_guardband.h"
113 
114 /**
115  * Statically assert that PIPE_* enums match the hardware packets.
116  * (As long as they match, we don't need to translate them.)
117  */
pipe_asserts()118 UNUSED static void pipe_asserts()
119 {
120 #define PIPE_ASSERT(x) STATIC_ASSERT((int)x)
121 
122    /* pipe_logicop happens to match the hardware. */
123    PIPE_ASSERT(PIPE_LOGICOP_CLEAR == LOGICOP_CLEAR);
124    PIPE_ASSERT(PIPE_LOGICOP_NOR == LOGICOP_NOR);
125    PIPE_ASSERT(PIPE_LOGICOP_AND_INVERTED == LOGICOP_AND_INVERTED);
126    PIPE_ASSERT(PIPE_LOGICOP_COPY_INVERTED == LOGICOP_COPY_INVERTED);
127    PIPE_ASSERT(PIPE_LOGICOP_AND_REVERSE == LOGICOP_AND_REVERSE);
128    PIPE_ASSERT(PIPE_LOGICOP_INVERT == LOGICOP_INVERT);
129    PIPE_ASSERT(PIPE_LOGICOP_XOR == LOGICOP_XOR);
130    PIPE_ASSERT(PIPE_LOGICOP_NAND == LOGICOP_NAND);
131    PIPE_ASSERT(PIPE_LOGICOP_AND == LOGICOP_AND);
132    PIPE_ASSERT(PIPE_LOGICOP_EQUIV == LOGICOP_EQUIV);
133    PIPE_ASSERT(PIPE_LOGICOP_NOOP == LOGICOP_NOOP);
134    PIPE_ASSERT(PIPE_LOGICOP_OR_INVERTED == LOGICOP_OR_INVERTED);
135    PIPE_ASSERT(PIPE_LOGICOP_COPY == LOGICOP_COPY);
136    PIPE_ASSERT(PIPE_LOGICOP_OR_REVERSE == LOGICOP_OR_REVERSE);
137    PIPE_ASSERT(PIPE_LOGICOP_OR == LOGICOP_OR);
138    PIPE_ASSERT(PIPE_LOGICOP_SET == LOGICOP_SET);
139 
140    /* pipe_blend_func happens to match the hardware. */
141    PIPE_ASSERT(PIPE_BLENDFACTOR_ONE == BLENDFACTOR_ONE);
142    PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_COLOR == BLENDFACTOR_SRC_COLOR);
143    PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA == BLENDFACTOR_SRC_ALPHA);
144    PIPE_ASSERT(PIPE_BLENDFACTOR_DST_ALPHA == BLENDFACTOR_DST_ALPHA);
145    PIPE_ASSERT(PIPE_BLENDFACTOR_DST_COLOR == BLENDFACTOR_DST_COLOR);
146    PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE == BLENDFACTOR_SRC_ALPHA_SATURATE);
147    PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_COLOR == BLENDFACTOR_CONST_COLOR);
148    PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_ALPHA == BLENDFACTOR_CONST_ALPHA);
149    PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_COLOR == BLENDFACTOR_SRC1_COLOR);
150    PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_ALPHA == BLENDFACTOR_SRC1_ALPHA);
151    PIPE_ASSERT(PIPE_BLENDFACTOR_ZERO == BLENDFACTOR_ZERO);
152    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_COLOR == BLENDFACTOR_INV_SRC_COLOR);
153    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_ALPHA == BLENDFACTOR_INV_SRC_ALPHA);
154    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_ALPHA == BLENDFACTOR_INV_DST_ALPHA);
155    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_COLOR == BLENDFACTOR_INV_DST_COLOR);
156    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_COLOR == BLENDFACTOR_INV_CONST_COLOR);
157    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_ALPHA == BLENDFACTOR_INV_CONST_ALPHA);
158    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_COLOR == BLENDFACTOR_INV_SRC1_COLOR);
159    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_ALPHA == BLENDFACTOR_INV_SRC1_ALPHA);
160 
161    /* pipe_blend_func happens to match the hardware. */
162    PIPE_ASSERT(PIPE_BLEND_ADD == BLENDFUNCTION_ADD);
163    PIPE_ASSERT(PIPE_BLEND_SUBTRACT == BLENDFUNCTION_SUBTRACT);
164    PIPE_ASSERT(PIPE_BLEND_REVERSE_SUBTRACT == BLENDFUNCTION_REVERSE_SUBTRACT);
165    PIPE_ASSERT(PIPE_BLEND_MIN == BLENDFUNCTION_MIN);
166    PIPE_ASSERT(PIPE_BLEND_MAX == BLENDFUNCTION_MAX);
167 
168    /* pipe_stencil_op happens to match the hardware. */
169    PIPE_ASSERT(PIPE_STENCIL_OP_KEEP == STENCILOP_KEEP);
170    PIPE_ASSERT(PIPE_STENCIL_OP_ZERO == STENCILOP_ZERO);
171    PIPE_ASSERT(PIPE_STENCIL_OP_REPLACE == STENCILOP_REPLACE);
172    PIPE_ASSERT(PIPE_STENCIL_OP_INCR == STENCILOP_INCRSAT);
173    PIPE_ASSERT(PIPE_STENCIL_OP_DECR == STENCILOP_DECRSAT);
174    PIPE_ASSERT(PIPE_STENCIL_OP_INCR_WRAP == STENCILOP_INCR);
175    PIPE_ASSERT(PIPE_STENCIL_OP_DECR_WRAP == STENCILOP_DECR);
176    PIPE_ASSERT(PIPE_STENCIL_OP_INVERT == STENCILOP_INVERT);
177 
178    /* pipe_sprite_coord_mode happens to match 3DSTATE_SBE */
179    PIPE_ASSERT(PIPE_SPRITE_COORD_UPPER_LEFT == UPPERLEFT);
180    PIPE_ASSERT(PIPE_SPRITE_COORD_LOWER_LEFT == LOWERLEFT);
181 #undef PIPE_ASSERT
182 }
183 
184 static unsigned
translate_prim_type(enum pipe_prim_type prim,uint8_t verts_per_patch)185 translate_prim_type(enum pipe_prim_type prim, uint8_t verts_per_patch)
186 {
187    static const unsigned map[] = {
188       [PIPE_PRIM_POINTS]                   = _3DPRIM_POINTLIST,
189       [PIPE_PRIM_LINES]                    = _3DPRIM_LINELIST,
190       [PIPE_PRIM_LINE_LOOP]                = _3DPRIM_LINELOOP,
191       [PIPE_PRIM_LINE_STRIP]               = _3DPRIM_LINESTRIP,
192       [PIPE_PRIM_TRIANGLES]                = _3DPRIM_TRILIST,
193       [PIPE_PRIM_TRIANGLE_STRIP]           = _3DPRIM_TRISTRIP,
194       [PIPE_PRIM_TRIANGLE_FAN]             = _3DPRIM_TRIFAN,
195       [PIPE_PRIM_QUADS]                    = _3DPRIM_QUADLIST,
196       [PIPE_PRIM_QUAD_STRIP]               = _3DPRIM_QUADSTRIP,
197       [PIPE_PRIM_POLYGON]                  = _3DPRIM_POLYGON,
198       [PIPE_PRIM_LINES_ADJACENCY]          = _3DPRIM_LINELIST_ADJ,
199       [PIPE_PRIM_LINE_STRIP_ADJACENCY]     = _3DPRIM_LINESTRIP_ADJ,
200       [PIPE_PRIM_TRIANGLES_ADJACENCY]      = _3DPRIM_TRILIST_ADJ,
201       [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
202       [PIPE_PRIM_PATCHES]                  = _3DPRIM_PATCHLIST_1 - 1,
203    };
204 
205    return map[prim] + (prim == PIPE_PRIM_PATCHES ? verts_per_patch : 0);
206 }
207 
208 static unsigned
translate_compare_func(enum pipe_compare_func pipe_func)209 translate_compare_func(enum pipe_compare_func pipe_func)
210 {
211    static const unsigned map[] = {
212       [PIPE_FUNC_NEVER]    = COMPAREFUNCTION_NEVER,
213       [PIPE_FUNC_LESS]     = COMPAREFUNCTION_LESS,
214       [PIPE_FUNC_EQUAL]    = COMPAREFUNCTION_EQUAL,
215       [PIPE_FUNC_LEQUAL]   = COMPAREFUNCTION_LEQUAL,
216       [PIPE_FUNC_GREATER]  = COMPAREFUNCTION_GREATER,
217       [PIPE_FUNC_NOTEQUAL] = COMPAREFUNCTION_NOTEQUAL,
218       [PIPE_FUNC_GEQUAL]   = COMPAREFUNCTION_GEQUAL,
219       [PIPE_FUNC_ALWAYS]   = COMPAREFUNCTION_ALWAYS,
220    };
221    return map[pipe_func];
222 }
223 
224 static unsigned
translate_shadow_func(enum pipe_compare_func pipe_func)225 translate_shadow_func(enum pipe_compare_func pipe_func)
226 {
227    /* Gallium specifies the result of shadow comparisons as:
228     *
229     *    1 if ref <op> texel,
230     *    0 otherwise.
231     *
232     * The hardware does:
233     *
234     *    0 if texel <op> ref,
235     *    1 otherwise.
236     *
237     * So we need to flip the operator and also negate.
238     */
239    static const unsigned map[] = {
240       [PIPE_FUNC_NEVER]    = PREFILTEROP_ALWAYS,
241       [PIPE_FUNC_LESS]     = PREFILTEROP_LEQUAL,
242       [PIPE_FUNC_EQUAL]    = PREFILTEROP_NOTEQUAL,
243       [PIPE_FUNC_LEQUAL]   = PREFILTEROP_LESS,
244       [PIPE_FUNC_GREATER]  = PREFILTEROP_GEQUAL,
245       [PIPE_FUNC_NOTEQUAL] = PREFILTEROP_EQUAL,
246       [PIPE_FUNC_GEQUAL]   = PREFILTEROP_GREATER,
247       [PIPE_FUNC_ALWAYS]   = PREFILTEROP_NEVER,
248    };
249    return map[pipe_func];
250 }
251 
252 static unsigned
translate_cull_mode(unsigned pipe_face)253 translate_cull_mode(unsigned pipe_face)
254 {
255    static const unsigned map[4] = {
256       [PIPE_FACE_NONE]           = CULLMODE_NONE,
257       [PIPE_FACE_FRONT]          = CULLMODE_FRONT,
258       [PIPE_FACE_BACK]           = CULLMODE_BACK,
259       [PIPE_FACE_FRONT_AND_BACK] = CULLMODE_BOTH,
260    };
261    return map[pipe_face];
262 }
263 
264 static unsigned
translate_fill_mode(unsigned pipe_polymode)265 translate_fill_mode(unsigned pipe_polymode)
266 {
267    static const unsigned map[4] = {
268       [PIPE_POLYGON_MODE_FILL]           = FILL_MODE_SOLID,
269       [PIPE_POLYGON_MODE_LINE]           = FILL_MODE_WIREFRAME,
270       [PIPE_POLYGON_MODE_POINT]          = FILL_MODE_POINT,
271       [PIPE_POLYGON_MODE_FILL_RECTANGLE] = FILL_MODE_SOLID,
272    };
273    return map[pipe_polymode];
274 }
275 
276 static unsigned
translate_mip_filter(enum pipe_tex_mipfilter pipe_mip)277 translate_mip_filter(enum pipe_tex_mipfilter pipe_mip)
278 {
279    static const unsigned map[] = {
280       [PIPE_TEX_MIPFILTER_NEAREST] = MIPFILTER_NEAREST,
281       [PIPE_TEX_MIPFILTER_LINEAR]  = MIPFILTER_LINEAR,
282       [PIPE_TEX_MIPFILTER_NONE]    = MIPFILTER_NONE,
283    };
284    return map[pipe_mip];
285 }
286 
287 static uint32_t
translate_wrap(unsigned pipe_wrap)288 translate_wrap(unsigned pipe_wrap)
289 {
290    static const unsigned map[] = {
291       [PIPE_TEX_WRAP_REPEAT]                 = TCM_WRAP,
292       [PIPE_TEX_WRAP_CLAMP]                  = TCM_HALF_BORDER,
293       [PIPE_TEX_WRAP_CLAMP_TO_EDGE]          = TCM_CLAMP,
294       [PIPE_TEX_WRAP_CLAMP_TO_BORDER]        = TCM_CLAMP_BORDER,
295       [PIPE_TEX_WRAP_MIRROR_REPEAT]          = TCM_MIRROR,
296       [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE]   = TCM_MIRROR_ONCE,
297 
298       /* These are unsupported. */
299       [PIPE_TEX_WRAP_MIRROR_CLAMP]           = -1,
300       [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER] = -1,
301    };
302    return map[pipe_wrap];
303 }
304 
305 /**
306  * Allocate space for some indirect state.
307  *
308  * Return a pointer to the map (to fill it out) and a state ref (for
309  * referring to the state in GPU commands).
310  */
311 static void *
upload_state(struct u_upload_mgr * uploader,struct iris_state_ref * ref,unsigned size,unsigned alignment)312 upload_state(struct u_upload_mgr *uploader,
313              struct iris_state_ref *ref,
314              unsigned size,
315              unsigned alignment)
316 {
317    void *p = NULL;
318    u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
319    return p;
320 }
321 
322 /**
323  * Stream out temporary/short-lived state.
324  *
325  * This allocates space, pins the BO, and includes the BO address in the
326  * returned offset (which works because all state lives in 32-bit memory
327  * zones).
328  */
329 static uint32_t *
stream_state(struct iris_batch * batch,struct u_upload_mgr * uploader,struct pipe_resource ** out_res,unsigned size,unsigned alignment,uint32_t * out_offset)330 stream_state(struct iris_batch *batch,
331              struct u_upload_mgr *uploader,
332              struct pipe_resource **out_res,
333              unsigned size,
334              unsigned alignment,
335              uint32_t *out_offset)
336 {
337    void *ptr = NULL;
338 
339    u_upload_alloc(uploader, 0, size, alignment, out_offset, out_res, &ptr);
340 
341    struct iris_bo *bo = iris_resource_bo(*out_res);
342    iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
343 
344    iris_record_state_size(batch->state_sizes,
345                           bo->address + *out_offset, size);
346 
347    *out_offset += iris_bo_offset_from_base_address(bo);
348 
349    return ptr;
350 }
351 
352 /**
353  * stream_state() + memcpy.
354  */
355 static uint32_t
emit_state(struct iris_batch * batch,struct u_upload_mgr * uploader,struct pipe_resource ** out_res,const void * data,unsigned size,unsigned alignment)356 emit_state(struct iris_batch *batch,
357            struct u_upload_mgr *uploader,
358            struct pipe_resource **out_res,
359            const void *data,
360            unsigned size,
361            unsigned alignment)
362 {
363    unsigned offset = 0;
364    uint32_t *map =
365       stream_state(batch, uploader, out_res, size, alignment, &offset);
366 
367    if (map)
368       memcpy(map, data, size);
369 
370    return offset;
371 }
372 
373 /**
374  * Did field 'x' change between 'old_cso' and 'new_cso'?
375  *
376  * (If so, we may want to set some dirty flags.)
377  */
378 #define cso_changed(x) (!old_cso || (old_cso->x != new_cso->x))
379 #define cso_changed_memcmp(x) \
380    (!old_cso || memcmp(old_cso->x, new_cso->x, sizeof(old_cso->x)) != 0)
381 
382 static void
flush_before_state_base_change(struct iris_batch * batch)383 flush_before_state_base_change(struct iris_batch *batch)
384 {
385    const struct intel_device_info *devinfo = &batch->screen->devinfo;
386 
387    /* Flush before emitting STATE_BASE_ADDRESS.
388     *
389     * This isn't documented anywhere in the PRM.  However, it seems to be
390     * necessary prior to changing the surface state base address.  We've
391     * seen issues in Vulkan where we get GPU hangs when using multi-level
392     * command buffers which clear depth, reset state base address, and then
393     * go render stuff.
394     *
395     * Normally, in GL, we would trust the kernel to do sufficient stalls
396     * and flushes prior to executing our batch.  However, it doesn't seem
397     * as if the kernel's flushing is always sufficient and we don't want to
398     * rely on it.
399     *
400     * We make this an end-of-pipe sync instead of a normal flush because we
401     * do not know the current status of the GPU.  On Haswell at least,
402     * having a fast-clear operation in flight at the same time as a normal
403     * rendering operation can cause hangs.  Since the kernel's flushing is
404     * insufficient, we need to ensure that any rendering operations from
405     * other processes are definitely complete before we try to do our own
406     * rendering.  It's a bit of a big hammer but it appears to work.
407     */
408    iris_emit_end_of_pipe_sync(batch,
409                               "change STATE_BASE_ADDRESS (flushes)",
410                               PIPE_CONTROL_RENDER_TARGET_FLUSH |
411                               PIPE_CONTROL_DEPTH_CACHE_FLUSH |
412                               PIPE_CONTROL_DATA_CACHE_FLUSH |
413                               /* Wa_1606662791:
414                                *
415                                *   Software must program PIPE_CONTROL command
416                                *   with "HDC Pipeline Flush" prior to
417                                *   programming of the below two non-pipeline
418                                *   state :
419                                *      * STATE_BASE_ADDRESS
420                                *      * 3DSTATE_BINDING_TABLE_POOL_ALLOC
421                                */
422                               ((GFX_VER == 12 && devinfo->revision == 0 /* A0 */ ?
423                                 PIPE_CONTROL_FLUSH_HDC : 0)));
424 }
425 
426 static void
flush_after_state_base_change(struct iris_batch * batch)427 flush_after_state_base_change(struct iris_batch *batch)
428 {
429    /* After re-setting the surface state base address, we have to do some
430     * cache flusing so that the sampler engine will pick up the new
431     * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
432     * Shared Function > 3D Sampler > State > State Caching (page 96):
433     *
434     *    Coherency with system memory in the state cache, like the texture
435     *    cache is handled partially by software. It is expected that the
436     *    command stream or shader will issue Cache Flush operation or
437     *    Cache_Flush sampler message to ensure that the L1 cache remains
438     *    coherent with system memory.
439     *
440     *    [...]
441     *
442     *    Whenever the value of the Dynamic_State_Base_Addr,
443     *    Surface_State_Base_Addr are altered, the L1 state cache must be
444     *    invalidated to ensure the new surface or sampler state is fetched
445     *    from system memory.
446     *
447     * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
448     * which, according the PIPE_CONTROL instruction documentation in the
449     * Broadwell PRM:
450     *
451     *    Setting this bit is independent of any other bit in this packet.
452     *    This bit controls the invalidation of the L1 and L2 state caches
453     *    at the top of the pipe i.e. at the parsing time.
454     *
455     * Unfortunately, experimentation seems to indicate that state cache
456     * invalidation through a PIPE_CONTROL does nothing whatsoever in
457     * regards to surface state and binding tables.  In stead, it seems that
458     * invalidating the texture cache is what is actually needed.
459     *
460     * XXX:  As far as we have been able to determine through
461     * experimentation, shows that flush the texture cache appears to be
462     * sufficient.  The theory here is that all of the sampling/rendering
463     * units cache the binding table in the texture cache.  However, we have
464     * yet to be able to actually confirm this.
465     */
466    iris_emit_end_of_pipe_sync(batch,
467                               "change STATE_BASE_ADDRESS (invalidates)",
468                               PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
469                               PIPE_CONTROL_CONST_CACHE_INVALIDATE |
470                               PIPE_CONTROL_STATE_CACHE_INVALIDATE);
471 }
472 
473 static void
_iris_emit_lri(struct iris_batch * batch,uint32_t reg,uint32_t val)474 _iris_emit_lri(struct iris_batch *batch, uint32_t reg, uint32_t val)
475 {
476    iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
477       lri.RegisterOffset = reg;
478       lri.DataDWord      = val;
479    }
480 }
481 #define iris_emit_lri(b, r, v) _iris_emit_lri(b, GENX(r##_num), v)
482 
483 static void
_iris_emit_lrr(struct iris_batch * batch,uint32_t dst,uint32_t src)484 _iris_emit_lrr(struct iris_batch *batch, uint32_t dst, uint32_t src)
485 {
486    iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_REG), lrr) {
487       lrr.SourceRegisterAddress = src;
488       lrr.DestinationRegisterAddress = dst;
489    }
490 }
491 
492 static void
iris_load_register_reg32(struct iris_batch * batch,uint32_t dst,uint32_t src)493 iris_load_register_reg32(struct iris_batch *batch, uint32_t dst,
494                          uint32_t src)
495 {
496    _iris_emit_lrr(batch, dst, src);
497 }
498 
499 static void
iris_load_register_reg64(struct iris_batch * batch,uint32_t dst,uint32_t src)500 iris_load_register_reg64(struct iris_batch *batch, uint32_t dst,
501                          uint32_t src)
502 {
503    _iris_emit_lrr(batch, dst, src);
504    _iris_emit_lrr(batch, dst + 4, src + 4);
505 }
506 
507 static void
iris_load_register_imm32(struct iris_batch * batch,uint32_t reg,uint32_t val)508 iris_load_register_imm32(struct iris_batch *batch, uint32_t reg,
509                          uint32_t val)
510 {
511    _iris_emit_lri(batch, reg, val);
512 }
513 
514 static void
iris_load_register_imm64(struct iris_batch * batch,uint32_t reg,uint64_t val)515 iris_load_register_imm64(struct iris_batch *batch, uint32_t reg,
516                          uint64_t val)
517 {
518    _iris_emit_lri(batch, reg + 0, val & 0xffffffff);
519    _iris_emit_lri(batch, reg + 4, val >> 32);
520 }
521 
522 /**
523  * Emit MI_LOAD_REGISTER_MEM to load a 32-bit MMIO register from a buffer.
524  */
525 static void
iris_load_register_mem32(struct iris_batch * batch,uint32_t reg,struct iris_bo * bo,uint32_t offset)526 iris_load_register_mem32(struct iris_batch *batch, uint32_t reg,
527                          struct iris_bo *bo, uint32_t offset)
528 {
529    iris_batch_sync_region_start(batch);
530    iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
531       lrm.RegisterAddress = reg;
532       lrm.MemoryAddress = ro_bo(bo, offset);
533    }
534    iris_batch_sync_region_end(batch);
535 }
536 
537 /**
538  * Load a 64-bit value from a buffer into a MMIO register via
539  * two MI_LOAD_REGISTER_MEM commands.
540  */
541 static void
iris_load_register_mem64(struct iris_batch * batch,uint32_t reg,struct iris_bo * bo,uint32_t offset)542 iris_load_register_mem64(struct iris_batch *batch, uint32_t reg,
543                          struct iris_bo *bo, uint32_t offset)
544 {
545    iris_load_register_mem32(batch, reg + 0, bo, offset + 0);
546    iris_load_register_mem32(batch, reg + 4, bo, offset + 4);
547 }
548 
549 static void
iris_store_register_mem32(struct iris_batch * batch,uint32_t reg,struct iris_bo * bo,uint32_t offset,bool predicated)550 iris_store_register_mem32(struct iris_batch *batch, uint32_t reg,
551                           struct iris_bo *bo, uint32_t offset,
552                           bool predicated)
553 {
554    iris_batch_sync_region_start(batch);
555    iris_emit_cmd(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
556       srm.RegisterAddress = reg;
557       srm.MemoryAddress = rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE);
558       srm.PredicateEnable = predicated;
559    }
560    iris_batch_sync_region_end(batch);
561 }
562 
563 static void
iris_store_register_mem64(struct iris_batch * batch,uint32_t reg,struct iris_bo * bo,uint32_t offset,bool predicated)564 iris_store_register_mem64(struct iris_batch *batch, uint32_t reg,
565                           struct iris_bo *bo, uint32_t offset,
566                           bool predicated)
567 {
568    iris_store_register_mem32(batch, reg + 0, bo, offset + 0, predicated);
569    iris_store_register_mem32(batch, reg + 4, bo, offset + 4, predicated);
570 }
571 
572 static void
iris_store_data_imm32(struct iris_batch * batch,struct iris_bo * bo,uint32_t offset,uint32_t imm)573 iris_store_data_imm32(struct iris_batch *batch,
574                       struct iris_bo *bo, uint32_t offset,
575                       uint32_t imm)
576 {
577    iris_batch_sync_region_start(batch);
578    iris_emit_cmd(batch, GENX(MI_STORE_DATA_IMM), sdi) {
579       sdi.Address = rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE);
580       sdi.ImmediateData = imm;
581    }
582    iris_batch_sync_region_end(batch);
583 }
584 
585 static void
iris_store_data_imm64(struct iris_batch * batch,struct iris_bo * bo,uint32_t offset,uint64_t imm)586 iris_store_data_imm64(struct iris_batch *batch,
587                       struct iris_bo *bo, uint32_t offset,
588                       uint64_t imm)
589 {
590    /* Can't use iris_emit_cmd because MI_STORE_DATA_IMM has a length of
591     * 2 in genxml but it's actually variable length and we need 5 DWords.
592     */
593    void *map = iris_get_command_space(batch, 4 * 5);
594    iris_batch_sync_region_start(batch);
595    _iris_pack_command(batch, GENX(MI_STORE_DATA_IMM), map, sdi) {
596       sdi.DWordLength = 5 - 2;
597       sdi.Address = rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE);
598       sdi.ImmediateData = imm;
599    }
600    iris_batch_sync_region_end(batch);
601 }
602 
603 static void
iris_copy_mem_mem(struct iris_batch * batch,struct iris_bo * dst_bo,uint32_t dst_offset,struct iris_bo * src_bo,uint32_t src_offset,unsigned bytes)604 iris_copy_mem_mem(struct iris_batch *batch,
605                   struct iris_bo *dst_bo, uint32_t dst_offset,
606                   struct iris_bo *src_bo, uint32_t src_offset,
607                   unsigned bytes)
608 {
609    /* MI_COPY_MEM_MEM operates on DWords. */
610    assert(bytes % 4 == 0);
611    assert(dst_offset % 4 == 0);
612    assert(src_offset % 4 == 0);
613    iris_batch_sync_region_start(batch);
614 
615    for (unsigned i = 0; i < bytes; i += 4) {
616       iris_emit_cmd(batch, GENX(MI_COPY_MEM_MEM), cp) {
617          cp.DestinationMemoryAddress = rw_bo(dst_bo, dst_offset + i,
618                                              IRIS_DOMAIN_OTHER_WRITE);
619          cp.SourceMemoryAddress = ro_bo(src_bo, src_offset + i);
620       }
621    }
622 
623    iris_batch_sync_region_end(batch);
624 }
625 
626 static void
emit_pipeline_select(struct iris_batch * batch,uint32_t pipeline)627 emit_pipeline_select(struct iris_batch *batch, uint32_t pipeline)
628 {
629 #if GFX_VER >= 8 && GFX_VER < 10
630    /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
631     *
632     *   Software must clear the COLOR_CALC_STATE Valid field in
633     *   3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
634     *   with Pipeline Select set to GPGPU.
635     *
636     * The internal hardware docs recommend the same workaround for Gfx9
637     * hardware too.
638     */
639    if (pipeline == GPGPU)
640       iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
641 #endif
642 
643 
644    /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
645     * PIPELINE_SELECT [DevBWR+]":
646     *
647     *    "Project: DEVSNB+
648     *
649     *     Software must ensure all the write caches are flushed through a
650     *     stalling PIPE_CONTROL command followed by another PIPE_CONTROL
651     *     command to invalidate read only caches prior to programming
652     *     MI_PIPELINE_SELECT command to change the Pipeline Select Mode."
653     */
654     iris_emit_pipe_control_flush(batch,
655                                  "workaround: PIPELINE_SELECT flushes (1/2)",
656                                  PIPE_CONTROL_RENDER_TARGET_FLUSH |
657                                  PIPE_CONTROL_DEPTH_CACHE_FLUSH |
658                                  PIPE_CONTROL_DATA_CACHE_FLUSH |
659                                  PIPE_CONTROL_CS_STALL);
660 
661     iris_emit_pipe_control_flush(batch,
662                                  "workaround: PIPELINE_SELECT flushes (2/2)",
663                                  PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
664                                  PIPE_CONTROL_CONST_CACHE_INVALIDATE |
665                                  PIPE_CONTROL_STATE_CACHE_INVALIDATE |
666                                  PIPE_CONTROL_INSTRUCTION_INVALIDATE);
667 
668    iris_emit_cmd(batch, GENX(PIPELINE_SELECT), sel) {
669 #if GFX_VER >= 9
670       sel.MaskBits = GFX_VER >= 12 ? 0x13 : 3;
671       sel.MediaSamplerDOPClockGateEnable = GFX_VER >= 12;
672 #endif
673       sel.PipelineSelection = pipeline;
674    }
675 }
676 
677 UNUSED static void
init_glk_barrier_mode(struct iris_batch * batch,uint32_t value)678 init_glk_barrier_mode(struct iris_batch *batch, uint32_t value)
679 {
680 #if GFX_VER == 9
681    /* Project: DevGLK
682     *
683     *    "This chicken bit works around a hardware issue with barrier
684     *     logic encountered when switching between GPGPU and 3D pipelines.
685     *     To workaround the issue, this mode bit should be set after a
686     *     pipeline is selected."
687     */
688    iris_emit_reg(batch, GENX(SLICE_COMMON_ECO_CHICKEN1), reg) {
689       reg.GLKBarrierMode = value;
690       reg.GLKBarrierModeMask = 1;
691    }
692 #endif
693 }
694 
695 static void
init_state_base_address(struct iris_batch * batch)696 init_state_base_address(struct iris_batch *batch)
697 {
698    struct isl_device *isl_dev = &batch->screen->isl_dev;
699    uint32_t mocs = isl_mocs(isl_dev, 0, false);
700    flush_before_state_base_change(batch);
701 
702    /* We program most base addresses once at context initialization time.
703     * Each base address points at a 4GB memory zone, and never needs to
704     * change.  See iris_bufmgr.h for a description of the memory zones.
705     *
706     * The one exception is Surface State Base Address, which needs to be
707     * updated occasionally.  See iris_binder.c for the details there.
708     */
709    iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
710       sba.GeneralStateMOCS            = mocs;
711       sba.StatelessDataPortAccessMOCS = mocs;
712       sba.DynamicStateMOCS            = mocs;
713       sba.IndirectObjectMOCS          = mocs;
714       sba.InstructionMOCS             = mocs;
715       sba.SurfaceStateMOCS            = mocs;
716 
717       sba.GeneralStateBaseAddressModifyEnable   = true;
718       sba.DynamicStateBaseAddressModifyEnable   = true;
719       sba.IndirectObjectBaseAddressModifyEnable = true;
720       sba.InstructionBaseAddressModifyEnable    = true;
721       sba.GeneralStateBufferSizeModifyEnable    = true;
722       sba.DynamicStateBufferSizeModifyEnable    = true;
723 #if (GFX_VER >= 9)
724       sba.BindlessSurfaceStateBaseAddress = ro_bo(NULL, IRIS_MEMZONE_BINDLESS_START);
725       sba.BindlessSurfaceStateSize = (IRIS_BINDLESS_SIZE >> 12) - 1;
726       sba.BindlessSurfaceStateBaseAddressModifyEnable = true;
727       sba.BindlessSurfaceStateMOCS    = mocs;
728 #endif
729       sba.IndirectObjectBufferSizeModifyEnable  = true;
730       sba.InstructionBuffersizeModifyEnable     = true;
731 
732       sba.InstructionBaseAddress  = ro_bo(NULL, IRIS_MEMZONE_SHADER_START);
733       sba.DynamicStateBaseAddress = ro_bo(NULL, IRIS_MEMZONE_DYNAMIC_START);
734 
735       sba.GeneralStateBufferSize   = 0xfffff;
736       sba.IndirectObjectBufferSize = 0xfffff;
737       sba.InstructionBufferSize    = 0xfffff;
738       sba.DynamicStateBufferSize   = 0xfffff;
739    }
740 
741    flush_after_state_base_change(batch);
742 }
743 
744 static void
iris_emit_l3_config(struct iris_batch * batch,const struct intel_l3_config * cfg)745 iris_emit_l3_config(struct iris_batch *batch,
746                     const struct intel_l3_config *cfg)
747 {
748    assert(cfg || GFX_VER >= 12);
749 
750 #if GFX_VER >= 12
751 #define L3_ALLOCATION_REG GENX(L3ALLOC)
752 #define L3_ALLOCATION_REG_num GENX(L3ALLOC_num)
753 #else
754 #define L3_ALLOCATION_REG GENX(L3CNTLREG)
755 #define L3_ALLOCATION_REG_num GENX(L3CNTLREG_num)
756 #endif
757 
758    iris_emit_reg(batch, L3_ALLOCATION_REG, reg) {
759 #if GFX_VER < 11
760       reg.SLMEnable = cfg->n[INTEL_L3P_SLM] > 0;
761 #endif
762 #if GFX_VER == 11
763       /* Wa_1406697149: Bit 9 "Error Detection Behavior Control" must be set
764        * in L3CNTLREG register. The default setting of the bit is not the
765        * desirable behavior.
766        */
767       reg.ErrorDetectionBehaviorControl = true;
768       reg.UseFullWays = true;
769 #endif
770       if (GFX_VER < 12 || cfg) {
771          reg.URBAllocation = cfg->n[INTEL_L3P_URB];
772          reg.ROAllocation = cfg->n[INTEL_L3P_RO];
773          reg.DCAllocation = cfg->n[INTEL_L3P_DC];
774          reg.AllAllocation = cfg->n[INTEL_L3P_ALL];
775       } else {
776 #if GFX_VER >= 12
777          reg.L3FullWayAllocationEnable = true;
778 #endif
779       }
780    }
781 }
782 
783 #if GFX_VER == 9
784 static void
iris_enable_obj_preemption(struct iris_batch * batch,bool enable)785 iris_enable_obj_preemption(struct iris_batch *batch, bool enable)
786 {
787    /* A fixed function pipe flush is required before modifying this field */
788    iris_emit_end_of_pipe_sync(batch, enable ? "enable preemption"
789                                             : "disable preemption",
790                               PIPE_CONTROL_RENDER_TARGET_FLUSH);
791 
792    /* enable object level preemption */
793    iris_emit_reg(batch, GENX(CS_CHICKEN1), reg) {
794       reg.ReplayMode = enable;
795       reg.ReplayModeMask = true;
796    }
797 }
798 #endif
799 
800 /**
801  * Compute an \p n x \p m pixel hashing table usable as slice, subslice or
802  * pixel pipe hashing table.  The resulting table is the cyclic repetition of
803  * a fixed pattern with periodicity equal to \p period.
804  *
805  * If \p index is specified to be equal to \p period, a 2-way hashing table
806  * will be generated such that indices 0 and 1 are returned for the following
807  * fractions of entries respectively:
808  *
809  *   p_0 = ceil(period / 2) / period
810  *   p_1 = floor(period / 2) / period
811  *
812  * If \p index is even and less than \p period, a 3-way hashing table will be
813  * generated such that indices 0, 1 and 2 are returned for the following
814  * fractions of entries:
815  *
816  *   p_0 = (ceil(period / 2) - 1) / period
817  *   p_1 = floor(period / 2) / period
818  *   p_2 = 1 / period
819  *
820  * The equations above apply if \p flip is equal to 0, if it is equal to 1 p_0
821  * and p_1 will be swapped for the result.  Note that in the context of pixel
822  * pipe hashing this can be always 0 on Gfx12 platforms, since the hardware
823  * transparently remaps logical indices found on the table to physical pixel
824  * pipe indices from the highest to lowest EU count.
825  */
826 UNUSED static void
calculate_pixel_hashing_table(unsigned n,unsigned m,unsigned period,unsigned index,bool flip,uint32_t * p)827 calculate_pixel_hashing_table(unsigned n, unsigned m,
828                               unsigned period, unsigned index, bool flip,
829                               uint32_t *p)
830 {
831    for (unsigned i = 0; i < n; i++) {
832       for (unsigned j = 0; j < m; j++) {
833          const unsigned k = (i + j) % period;
834          p[j + m * i] = (k == index ? 2 : (k & 1) ^ flip);
835       }
836    }
837 }
838 
839 #if GFX_VER == 11
840 static void
gfx11_upload_pixel_hashing_tables(struct iris_batch * batch)841 gfx11_upload_pixel_hashing_tables(struct iris_batch *batch)
842 {
843    const struct intel_device_info *devinfo = &batch->screen->devinfo;
844    assert(devinfo->ppipe_subslices[2] == 0);
845 
846    if (devinfo->ppipe_subslices[0] == devinfo->ppipe_subslices[1])
847       return;
848 
849    struct iris_context *ice = batch->ice;
850    assert(&ice->batches[IRIS_BATCH_RENDER] == batch);
851 
852    unsigned size = GENX(SLICE_HASH_TABLE_length) * 4;
853    uint32_t hash_address;
854    struct pipe_resource *tmp = NULL;
855    uint32_t *map =
856       stream_state(batch, ice->state.dynamic_uploader, &tmp,
857                    size, 64, &hash_address);
858    pipe_resource_reference(&tmp, NULL);
859 
860    const bool flip = devinfo->ppipe_subslices[0] < devinfo->ppipe_subslices[1];
861    struct GENX(SLICE_HASH_TABLE) table;
862    calculate_pixel_hashing_table(16, 16, 3, 3, flip, table.Entry[0]);
863 
864    GENX(SLICE_HASH_TABLE_pack)(NULL, map, &table);
865 
866    iris_emit_cmd(batch, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS), ptr) {
867       ptr.SliceHashStatePointerValid = true;
868       ptr.SliceHashTableStatePointer = hash_address;
869    }
870 
871    iris_emit_cmd(batch, GENX(3DSTATE_3D_MODE), mode) {
872       mode.SliceHashingTableEnable = true;
873    }
874 }
875 #elif GFX_VERx10 == 120
876 static void
gfx12_upload_pixel_hashing_tables(struct iris_batch * batch)877 gfx12_upload_pixel_hashing_tables(struct iris_batch *batch)
878 {
879    const struct intel_device_info *devinfo = &batch->screen->devinfo;
880    /* For each n calculate ppipes_of[n], equal to the number of pixel pipes
881     * present with n active dual subslices.
882     */
883    unsigned ppipes_of[3] = {};
884 
885    for (unsigned n = 0; n < ARRAY_SIZE(ppipes_of); n++) {
886       for (unsigned p = 0; p < ARRAY_SIZE(devinfo->ppipe_subslices); p++)
887          ppipes_of[n] += (devinfo->ppipe_subslices[p] == n);
888    }
889 
890    /* Gfx12 has three pixel pipes. */
891    assert(ppipes_of[0] + ppipes_of[1] + ppipes_of[2] == 3);
892 
893    if (ppipes_of[2] == 3 || ppipes_of[0] == 2) {
894       /* All three pixel pipes have the maximum number of active dual
895        * subslices, or there is only one active pixel pipe: Nothing to do.
896        */
897       return;
898    }
899 
900    iris_emit_cmd(batch, GENX(3DSTATE_SUBSLICE_HASH_TABLE), p) {
901       p.SliceHashControl[0] = TABLE_0;
902 
903       if (ppipes_of[2] == 2 && ppipes_of[0] == 1)
904          calculate_pixel_hashing_table(8, 16, 2, 2, 0, p.TwoWayTableEntry[0]);
905       else if (ppipes_of[2] == 1 && ppipes_of[1] == 1 && ppipes_of[0] == 1)
906          calculate_pixel_hashing_table(8, 16, 3, 3, 0, p.TwoWayTableEntry[0]);
907 
908       if (ppipes_of[2] == 2 && ppipes_of[1] == 1)
909          calculate_pixel_hashing_table(8, 16, 5, 4, 0, p.ThreeWayTableEntry[0]);
910       else if (ppipes_of[2] == 2 && ppipes_of[0] == 1)
911          calculate_pixel_hashing_table(8, 16, 2, 2, 0, p.ThreeWayTableEntry[0]);
912       else if (ppipes_of[2] == 1 && ppipes_of[1] == 1 && ppipes_of[0] == 1)
913          calculate_pixel_hashing_table(8, 16, 3, 3, 0, p.ThreeWayTableEntry[0]);
914       else
915          unreachable("Illegal fusing.");
916    }
917 
918    iris_emit_cmd(batch, GENX(3DSTATE_3D_MODE), p) {
919       p.SubsliceHashingTableEnable = true;
920       p.SubsliceHashingTableEnableMask = true;
921    }
922 }
923 #endif
924 
925 static void
iris_alloc_push_constants(struct iris_batch * batch)926 iris_alloc_push_constants(struct iris_batch *batch)
927 {
928    const struct intel_device_info *devinfo = &batch->screen->devinfo;
929 
930    /* For now, we set a static partitioning of the push constant area,
931     * assuming that all stages could be in use.
932     *
933     * TODO: Try lazily allocating the HS/DS/GS sections as needed, and
934     *       see if that improves performance by offering more space to
935     *       the VS/FS when those aren't in use.  Also, try dynamically
936     *       enabling/disabling it like i965 does.  This would be more
937     *       stalls and may not actually help; we don't know yet.
938     */
939 
940    /* Divide as equally as possible with any remainder given to FRAGMENT. */
941    const unsigned push_constant_kb = devinfo->max_constant_urb_size_kb;
942    const unsigned stage_size = push_constant_kb / 5;
943    const unsigned frag_size = push_constant_kb - 4 * stage_size;
944 
945    for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) {
946       iris_emit_cmd(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
947          alloc._3DCommandSubOpcode = 18 + i;
948          alloc.ConstantBufferOffset = stage_size * i;
949          alloc.ConstantBufferSize = i == MESA_SHADER_FRAGMENT ? frag_size : stage_size;
950       }
951    }
952 }
953 
954 #if GFX_VER >= 12
955 static void
956 init_aux_map_state(struct iris_batch *batch);
957 #endif
958 
959 /**
960  * Upload initial GPU state for any kind of context.
961  *
962  * These need to happen for both render and compute.
963  */
964 static void
iris_init_common_context(struct iris_batch * batch)965 iris_init_common_context(struct iris_batch *batch)
966 {
967 #if GFX_VER == 11
968    iris_emit_reg(batch, GENX(SAMPLER_MODE), reg) {
969       reg.HeaderlessMessageforPreemptableContexts = 1;
970       reg.HeaderlessMessageforPreemptableContextsMask = 1;
971    }
972 
973    /* Bit 1 must be set in HALF_SLICE_CHICKEN7. */
974    iris_emit_reg(batch, GENX(HALF_SLICE_CHICKEN7), reg) {
975       reg.EnabledTexelOffsetPrecisionFix = 1;
976       reg.EnabledTexelOffsetPrecisionFixMask = 1;
977    }
978 #endif
979 }
980 
981 /**
982  * Upload the initial GPU state for a render context.
983  *
984  * This sets some invariant state that needs to be programmed a particular
985  * way, but we never actually change.
986  */
987 static void
iris_init_render_context(struct iris_batch * batch)988 iris_init_render_context(struct iris_batch *batch)
989 {
990    UNUSED const struct intel_device_info *devinfo = &batch->screen->devinfo;
991 
992    iris_batch_sync_region_start(batch);
993 
994    emit_pipeline_select(batch, _3D);
995 
996    iris_emit_l3_config(batch, batch->screen->l3_config_3d);
997 
998    init_state_base_address(batch);
999 
1000    iris_init_common_context(batch);
1001 
1002 #if GFX_VER >= 9
1003    iris_emit_reg(batch, GENX(CS_DEBUG_MODE2), reg) {
1004       reg.CONSTANT_BUFFERAddressOffsetDisable = true;
1005       reg.CONSTANT_BUFFERAddressOffsetDisableMask = true;
1006    }
1007 #else
1008    iris_emit_reg(batch, GENX(INSTPM), reg) {
1009       reg.CONSTANT_BUFFERAddressOffsetDisable = true;
1010       reg.CONSTANT_BUFFERAddressOffsetDisableMask = true;
1011    }
1012 #endif
1013 
1014 #if GFX_VER == 9
1015    iris_emit_reg(batch, GENX(CACHE_MODE_1), reg) {
1016       reg.FloatBlendOptimizationEnable = true;
1017       reg.FloatBlendOptimizationEnableMask = true;
1018       reg.MSCRAWHazardAvoidanceBit = true;
1019       reg.MSCRAWHazardAvoidanceBitMask = true;
1020       reg.PartialResolveDisableInVC = true;
1021       reg.PartialResolveDisableInVCMask = true;
1022    }
1023 
1024    if (devinfo->is_geminilake)
1025       init_glk_barrier_mode(batch, GLK_BARRIER_MODE_3D_HULL);
1026 #endif
1027 
1028 #if GFX_VER == 11
1029    iris_emit_reg(batch, GENX(TCCNTLREG), reg) {
1030       reg.L3DataPartialWriteMergingEnable = true;
1031       reg.ColorZPartialWriteMergingEnable = true;
1032       reg.URBPartialWriteMergingEnable = true;
1033       reg.TCDisable = true;
1034    }
1035 
1036    /* Hardware specification recommends disabling repacking for the
1037     * compatibility with decompression mechanism in display controller.
1038     */
1039    if (devinfo->disable_ccs_repack) {
1040       iris_emit_reg(batch, GENX(CACHE_MODE_0), reg) {
1041          reg.DisableRepackingforCompression = true;
1042          reg.DisableRepackingforCompressionMask = true;
1043       }
1044    }
1045 
1046    gfx11_upload_pixel_hashing_tables(batch);
1047 #endif
1048 
1049 #if GFX_VERx10 == 120
1050    gfx12_upload_pixel_hashing_tables(batch);
1051 #endif
1052 
1053    /* 3DSTATE_DRAWING_RECTANGLE is non-pipelined, so we want to avoid
1054     * changing it dynamically.  We set it to the maximum size here, and
1055     * instead include the render target dimensions in the viewport, so
1056     * viewport extents clipping takes care of pruning stray geometry.
1057     */
1058    iris_emit_cmd(batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) {
1059       rect.ClippedDrawingRectangleXMax = UINT16_MAX;
1060       rect.ClippedDrawingRectangleYMax = UINT16_MAX;
1061    }
1062 
1063    /* Set the initial MSAA sample positions. */
1064    iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_PATTERN), pat) {
1065       INTEL_SAMPLE_POS_1X(pat._1xSample);
1066       INTEL_SAMPLE_POS_2X(pat._2xSample);
1067       INTEL_SAMPLE_POS_4X(pat._4xSample);
1068       INTEL_SAMPLE_POS_8X(pat._8xSample);
1069 #if GFX_VER >= 9
1070       INTEL_SAMPLE_POS_16X(pat._16xSample);
1071 #endif
1072    }
1073 
1074    /* Use the legacy AA line coverage computation. */
1075    iris_emit_cmd(batch, GENX(3DSTATE_AA_LINE_PARAMETERS), foo);
1076 
1077    /* Disable chromakeying (it's for media) */
1078    iris_emit_cmd(batch, GENX(3DSTATE_WM_CHROMAKEY), foo);
1079 
1080    /* We want regular rendering, not special HiZ operations. */
1081    iris_emit_cmd(batch, GENX(3DSTATE_WM_HZ_OP), foo);
1082 
1083    /* No polygon stippling offsets are necessary. */
1084    /* TODO: may need to set an offset for origin-UL framebuffers */
1085    iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_OFFSET), foo);
1086 
1087    iris_alloc_push_constants(batch);
1088 
1089 
1090 #if GFX_VER >= 12
1091    init_aux_map_state(batch);
1092 #endif
1093 
1094    iris_batch_sync_region_end(batch);
1095 }
1096 
1097 static void
iris_init_compute_context(struct iris_batch * batch)1098 iris_init_compute_context(struct iris_batch *batch)
1099 {
1100    UNUSED const struct intel_device_info *devinfo = &batch->screen->devinfo;
1101 
1102    iris_batch_sync_region_start(batch);
1103 
1104    /* Wa_1607854226:
1105     *
1106     *  Start with pipeline in 3D mode to set the STATE_BASE_ADDRESS.
1107     */
1108 #if GFX_VER == 12
1109    emit_pipeline_select(batch, _3D);
1110 #else
1111    emit_pipeline_select(batch, GPGPU);
1112 #endif
1113 
1114    iris_emit_l3_config(batch, batch->screen->l3_config_cs);
1115 
1116    init_state_base_address(batch);
1117 
1118    iris_init_common_context(batch);
1119 
1120 #if GFX_VER == 12
1121    emit_pipeline_select(batch, GPGPU);
1122 #endif
1123 
1124 #if GFX_VER == 9
1125    if (devinfo->is_geminilake)
1126       init_glk_barrier_mode(batch, GLK_BARRIER_MODE_GPGPU);
1127 #endif
1128 
1129 #if GFX_VER >= 12
1130    init_aux_map_state(batch);
1131 #endif
1132 
1133    iris_batch_sync_region_end(batch);
1134 }
1135 
1136 struct iris_vertex_buffer_state {
1137    /** The VERTEX_BUFFER_STATE hardware structure. */
1138    uint32_t state[GENX(VERTEX_BUFFER_STATE_length)];
1139 
1140    /** The resource to source vertex data from. */
1141    struct pipe_resource *resource;
1142 
1143    int offset;
1144 };
1145 
1146 struct iris_depth_buffer_state {
1147    /* Depth/HiZ/Stencil related hardware packets. */
1148    uint32_t packets[GENX(3DSTATE_DEPTH_BUFFER_length) +
1149                     GENX(3DSTATE_STENCIL_BUFFER_length) +
1150                     GENX(3DSTATE_HIER_DEPTH_BUFFER_length) +
1151                     GENX(3DSTATE_CLEAR_PARAMS_length)];
1152 };
1153 
1154 #if GFX_VERx10 == 120
1155    enum iris_depth_reg_mode {
1156       IRIS_DEPTH_REG_MODE_HW_DEFAULT = 0,
1157       IRIS_DEPTH_REG_MODE_D16,
1158       IRIS_DEPTH_REG_MODE_UNKNOWN,
1159    };
1160 #endif
1161 
1162 /**
1163  * Generation-specific context state (ice->state.genx->...).
1164  *
1165  * Most state can go in iris_context directly, but these encode hardware
1166  * packets which vary by generation.
1167  */
1168 struct iris_genx_state {
1169    struct iris_vertex_buffer_state vertex_buffers[33];
1170    uint32_t last_index_buffer[GENX(3DSTATE_INDEX_BUFFER_length)];
1171 
1172    struct iris_depth_buffer_state depth_buffer;
1173 
1174    uint32_t so_buffers[4 * GENX(3DSTATE_SO_BUFFER_length)];
1175 
1176 #if GFX_VER == 8
1177    bool pma_fix_enabled;
1178 #endif
1179 
1180 #if GFX_VER == 9
1181    /* Is object level preemption enabled? */
1182    bool object_preemption;
1183 #endif
1184 
1185 #if GFX_VERx10 == 120
1186    enum iris_depth_reg_mode depth_reg_mode;
1187 #endif
1188 
1189    struct {
1190 #if GFX_VER == 8
1191       struct brw_image_param image_param[PIPE_MAX_SHADER_IMAGES];
1192 #endif
1193    } shaders[MESA_SHADER_STAGES];
1194 };
1195 
1196 /**
1197  * The pipe->set_blend_color() driver hook.
1198  *
1199  * This corresponds to our COLOR_CALC_STATE.
1200  */
1201 static void
iris_set_blend_color(struct pipe_context * ctx,const struct pipe_blend_color * state)1202 iris_set_blend_color(struct pipe_context *ctx,
1203                      const struct pipe_blend_color *state)
1204 {
1205    struct iris_context *ice = (struct iris_context *) ctx;
1206 
1207    /* Our COLOR_CALC_STATE is exactly pipe_blend_color, so just memcpy */
1208    memcpy(&ice->state.blend_color, state, sizeof(struct pipe_blend_color));
1209    ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
1210 }
1211 
1212 /**
1213  * Gallium CSO for blend state (see pipe_blend_state).
1214  */
1215 struct iris_blend_state {
1216    /** Partial 3DSTATE_PS_BLEND */
1217    uint32_t ps_blend[GENX(3DSTATE_PS_BLEND_length)];
1218 
1219    /** Partial BLEND_STATE */
1220    uint32_t blend_state[GENX(BLEND_STATE_length) +
1221                         BRW_MAX_DRAW_BUFFERS * GENX(BLEND_STATE_ENTRY_length)];
1222 
1223    bool alpha_to_coverage; /* for shader key */
1224 
1225    /** Bitfield of whether blending is enabled for RT[i] - for aux resolves */
1226    uint8_t blend_enables;
1227 
1228    /** Bitfield of whether color writes are enabled for RT[i] */
1229    uint8_t color_write_enables;
1230 
1231    /** Does RT[0] use dual color blending? */
1232    bool dual_color_blending;
1233 };
1234 
1235 static enum pipe_blendfactor
fix_blendfactor(enum pipe_blendfactor f,bool alpha_to_one)1236 fix_blendfactor(enum pipe_blendfactor f, bool alpha_to_one)
1237 {
1238    if (alpha_to_one) {
1239       if (f == PIPE_BLENDFACTOR_SRC1_ALPHA)
1240          return PIPE_BLENDFACTOR_ONE;
1241 
1242       if (f == PIPE_BLENDFACTOR_INV_SRC1_ALPHA)
1243          return PIPE_BLENDFACTOR_ZERO;
1244    }
1245 
1246    return f;
1247 }
1248 
1249 /**
1250  * The pipe->create_blend_state() driver hook.
1251  *
1252  * Translates a pipe_blend_state into iris_blend_state.
1253  */
1254 static void *
iris_create_blend_state(struct pipe_context * ctx,const struct pipe_blend_state * state)1255 iris_create_blend_state(struct pipe_context *ctx,
1256                         const struct pipe_blend_state *state)
1257 {
1258    struct iris_blend_state *cso = malloc(sizeof(struct iris_blend_state));
1259    uint32_t *blend_entry = cso->blend_state + GENX(BLEND_STATE_length);
1260 
1261    cso->blend_enables = 0;
1262    cso->color_write_enables = 0;
1263    STATIC_ASSERT(BRW_MAX_DRAW_BUFFERS <= 8);
1264 
1265    cso->alpha_to_coverage = state->alpha_to_coverage;
1266 
1267    bool indep_alpha_blend = false;
1268 
1269    for (int i = 0; i < BRW_MAX_DRAW_BUFFERS; i++) {
1270       const struct pipe_rt_blend_state *rt =
1271          &state->rt[state->independent_blend_enable ? i : 0];
1272 
1273       enum pipe_blendfactor src_rgb =
1274          fix_blendfactor(rt->rgb_src_factor, state->alpha_to_one);
1275       enum pipe_blendfactor src_alpha =
1276          fix_blendfactor(rt->alpha_src_factor, state->alpha_to_one);
1277       enum pipe_blendfactor dst_rgb =
1278          fix_blendfactor(rt->rgb_dst_factor, state->alpha_to_one);
1279       enum pipe_blendfactor dst_alpha =
1280          fix_blendfactor(rt->alpha_dst_factor, state->alpha_to_one);
1281 
1282       if (rt->rgb_func != rt->alpha_func ||
1283           src_rgb != src_alpha || dst_rgb != dst_alpha)
1284          indep_alpha_blend = true;
1285 
1286       if (rt->blend_enable)
1287          cso->blend_enables |= 1u << i;
1288 
1289       if (rt->colormask)
1290          cso->color_write_enables |= 1u << i;
1291 
1292       iris_pack_state(GENX(BLEND_STATE_ENTRY), blend_entry, be) {
1293          be.LogicOpEnable = state->logicop_enable;
1294          be.LogicOpFunction = state->logicop_func;
1295 
1296          be.PreBlendSourceOnlyClampEnable = false;
1297          be.ColorClampRange = COLORCLAMP_RTFORMAT;
1298          be.PreBlendColorClampEnable = true;
1299          be.PostBlendColorClampEnable = true;
1300 
1301          be.ColorBufferBlendEnable = rt->blend_enable;
1302 
1303          be.ColorBlendFunction          = rt->rgb_func;
1304          be.AlphaBlendFunction          = rt->alpha_func;
1305 
1306          /* The casts prevent warnings about implicit enum type conversions. */
1307          be.SourceBlendFactor           = (int) src_rgb;
1308          be.SourceAlphaBlendFactor      = (int) src_alpha;
1309          be.DestinationBlendFactor      = (int) dst_rgb;
1310          be.DestinationAlphaBlendFactor = (int) dst_alpha;
1311 
1312          be.WriteDisableRed   = !(rt->colormask & PIPE_MASK_R);
1313          be.WriteDisableGreen = !(rt->colormask & PIPE_MASK_G);
1314          be.WriteDisableBlue  = !(rt->colormask & PIPE_MASK_B);
1315          be.WriteDisableAlpha = !(rt->colormask & PIPE_MASK_A);
1316       }
1317       blend_entry += GENX(BLEND_STATE_ENTRY_length);
1318    }
1319 
1320    iris_pack_command(GENX(3DSTATE_PS_BLEND), cso->ps_blend, pb) {
1321       /* pb.HasWriteableRT is filled in at draw time.
1322        * pb.AlphaTestEnable is filled in at draw time.
1323        *
1324        * pb.ColorBufferBlendEnable is filled in at draw time so we can avoid
1325        * setting it when dual color blending without an appropriate shader.
1326        */
1327 
1328       pb.AlphaToCoverageEnable = state->alpha_to_coverage;
1329       pb.IndependentAlphaBlendEnable = indep_alpha_blend;
1330 
1331       /* The casts prevent warnings about implicit enum type conversions. */
1332       pb.SourceBlendFactor =
1333          (int) fix_blendfactor(state->rt[0].rgb_src_factor, state->alpha_to_one);
1334       pb.SourceAlphaBlendFactor =
1335          (int) fix_blendfactor(state->rt[0].alpha_src_factor, state->alpha_to_one);
1336       pb.DestinationBlendFactor =
1337          (int) fix_blendfactor(state->rt[0].rgb_dst_factor, state->alpha_to_one);
1338       pb.DestinationAlphaBlendFactor =
1339          (int) fix_blendfactor(state->rt[0].alpha_dst_factor, state->alpha_to_one);
1340    }
1341 
1342    iris_pack_state(GENX(BLEND_STATE), cso->blend_state, bs) {
1343       bs.AlphaToCoverageEnable = state->alpha_to_coverage;
1344       bs.IndependentAlphaBlendEnable = indep_alpha_blend;
1345       bs.AlphaToOneEnable = state->alpha_to_one;
1346       bs.AlphaToCoverageDitherEnable = state->alpha_to_coverage;
1347       bs.ColorDitherEnable = state->dither;
1348       /* bl.AlphaTestEnable and bs.AlphaTestFunction are filled in later. */
1349    }
1350 
1351    cso->dual_color_blending = util_blend_state_is_dual(state, 0);
1352 
1353    return cso;
1354 }
1355 
1356 /**
1357  * The pipe->bind_blend_state() driver hook.
1358  *
1359  * Bind a blending CSO and flag related dirty bits.
1360  */
1361 static void
iris_bind_blend_state(struct pipe_context * ctx,void * state)1362 iris_bind_blend_state(struct pipe_context *ctx, void *state)
1363 {
1364    struct iris_context *ice = (struct iris_context *) ctx;
1365    struct iris_blend_state *cso = state;
1366 
1367    ice->state.cso_blend = cso;
1368 
1369    ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
1370    ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
1371    ice->state.stage_dirty |= ice->state.stage_dirty_for_nos[IRIS_NOS_BLEND];
1372 
1373    if (GFX_VER == 8)
1374       ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
1375 }
1376 
1377 /**
1378  * Return true if the FS writes to any color outputs which are not disabled
1379  * via color masking.
1380  */
1381 static bool
has_writeable_rt(const struct iris_blend_state * cso_blend,const struct shader_info * fs_info)1382 has_writeable_rt(const struct iris_blend_state *cso_blend,
1383                  const struct shader_info *fs_info)
1384 {
1385    if (!fs_info)
1386       return false;
1387 
1388    unsigned rt_outputs = fs_info->outputs_written >> FRAG_RESULT_DATA0;
1389 
1390    if (fs_info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_COLOR))
1391       rt_outputs = (1 << BRW_MAX_DRAW_BUFFERS) - 1;
1392 
1393    return cso_blend->color_write_enables & rt_outputs;
1394 }
1395 
1396 /**
1397  * Gallium CSO for depth, stencil, and alpha testing state.
1398  */
1399 struct iris_depth_stencil_alpha_state {
1400    /** Partial 3DSTATE_WM_DEPTH_STENCIL. */
1401    uint32_t wmds[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
1402 
1403 #if GFX_VER >= 12
1404    uint32_t depth_bounds[GENX(3DSTATE_DEPTH_BOUNDS_length)];
1405 #endif
1406 
1407    /** Outbound to BLEND_STATE, 3DSTATE_PS_BLEND, COLOR_CALC_STATE. */
1408    unsigned alpha_enabled:1;
1409    unsigned alpha_func:3;     /**< PIPE_FUNC_x */
1410    float alpha_ref_value;     /**< reference value */
1411 
1412    /** Outbound to resolve and cache set tracking. */
1413    bool depth_writes_enabled;
1414    bool stencil_writes_enabled;
1415 
1416    /** Outbound to Gfx8-9 PMA stall equations */
1417    bool depth_test_enabled;
1418 };
1419 
1420 /**
1421  * The pipe->create_depth_stencil_alpha_state() driver hook.
1422  *
1423  * We encode most of 3DSTATE_WM_DEPTH_STENCIL, and just save off the alpha
1424  * testing state since we need pieces of it in a variety of places.
1425  */
1426 static void *
iris_create_zsa_state(struct pipe_context * ctx,const struct pipe_depth_stencil_alpha_state * state)1427 iris_create_zsa_state(struct pipe_context *ctx,
1428                       const struct pipe_depth_stencil_alpha_state *state)
1429 {
1430    struct iris_depth_stencil_alpha_state *cso =
1431       malloc(sizeof(struct iris_depth_stencil_alpha_state));
1432 
1433    bool two_sided_stencil = state->stencil[1].enabled;
1434 
1435    cso->alpha_enabled = state->alpha_enabled;
1436    cso->alpha_func = state->alpha_func;
1437    cso->alpha_ref_value = state->alpha_ref_value;
1438    cso->depth_writes_enabled = state->depth_writemask;
1439    cso->depth_test_enabled = state->depth_enabled;
1440    cso->stencil_writes_enabled =
1441       state->stencil[0].writemask != 0 ||
1442       (two_sided_stencil && state->stencil[1].writemask != 0);
1443 
1444    /* gallium frontends need to optimize away EQUAL writes for us. */
1445    assert(!(state->depth_func == PIPE_FUNC_EQUAL && state->depth_writemask));
1446 
1447    iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), cso->wmds, wmds) {
1448       wmds.StencilFailOp = state->stencil[0].fail_op;
1449       wmds.StencilPassDepthFailOp = state->stencil[0].zfail_op;
1450       wmds.StencilPassDepthPassOp = state->stencil[0].zpass_op;
1451       wmds.StencilTestFunction =
1452          translate_compare_func(state->stencil[0].func);
1453       wmds.BackfaceStencilFailOp = state->stencil[1].fail_op;
1454       wmds.BackfaceStencilPassDepthFailOp = state->stencil[1].zfail_op;
1455       wmds.BackfaceStencilPassDepthPassOp = state->stencil[1].zpass_op;
1456       wmds.BackfaceStencilTestFunction =
1457          translate_compare_func(state->stencil[1].func);
1458       wmds.DepthTestFunction = translate_compare_func(state->depth_func);
1459       wmds.DoubleSidedStencilEnable = two_sided_stencil;
1460       wmds.StencilTestEnable = state->stencil[0].enabled;
1461       wmds.StencilBufferWriteEnable =
1462          state->stencil[0].writemask != 0 ||
1463          (two_sided_stencil && state->stencil[1].writemask != 0);
1464       wmds.DepthTestEnable = state->depth_enabled;
1465       wmds.DepthBufferWriteEnable = state->depth_writemask;
1466       wmds.StencilTestMask = state->stencil[0].valuemask;
1467       wmds.StencilWriteMask = state->stencil[0].writemask;
1468       wmds.BackfaceStencilTestMask = state->stencil[1].valuemask;
1469       wmds.BackfaceStencilWriteMask = state->stencil[1].writemask;
1470       /* wmds.[Backface]StencilReferenceValue are merged later */
1471 #if GFX_VER >= 12
1472       wmds.StencilReferenceValueModifyDisable = true;
1473 #endif
1474    }
1475 
1476 #if GFX_VER >= 12
1477    iris_pack_command(GENX(3DSTATE_DEPTH_BOUNDS), cso->depth_bounds, depth_bounds) {
1478       depth_bounds.DepthBoundsTestValueModifyDisable = false;
1479       depth_bounds.DepthBoundsTestEnableModifyDisable = false;
1480       depth_bounds.DepthBoundsTestEnable = state->depth_bounds_test;
1481       depth_bounds.DepthBoundsTestMinValue = state->depth_bounds_min;
1482       depth_bounds.DepthBoundsTestMaxValue = state->depth_bounds_max;
1483    }
1484 #endif
1485 
1486    return cso;
1487 }
1488 
1489 /**
1490  * The pipe->bind_depth_stencil_alpha_state() driver hook.
1491  *
1492  * Bind a depth/stencil/alpha CSO and flag related dirty bits.
1493  */
1494 static void
iris_bind_zsa_state(struct pipe_context * ctx,void * state)1495 iris_bind_zsa_state(struct pipe_context *ctx, void *state)
1496 {
1497    struct iris_context *ice = (struct iris_context *) ctx;
1498    struct iris_depth_stencil_alpha_state *old_cso = ice->state.cso_zsa;
1499    struct iris_depth_stencil_alpha_state *new_cso = state;
1500 
1501    if (new_cso) {
1502       if (cso_changed(alpha_ref_value))
1503          ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
1504 
1505       if (cso_changed(alpha_enabled))
1506          ice->state.dirty |= IRIS_DIRTY_PS_BLEND | IRIS_DIRTY_BLEND_STATE;
1507 
1508       if (cso_changed(alpha_func))
1509          ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
1510 
1511       if (cso_changed(depth_writes_enabled) || cso_changed(stencil_writes_enabled))
1512          ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
1513 
1514       ice->state.depth_writes_enabled = new_cso->depth_writes_enabled;
1515       ice->state.stencil_writes_enabled = new_cso->stencil_writes_enabled;
1516 
1517 #if GFX_VER >= 12
1518       if (cso_changed(depth_bounds))
1519          ice->state.dirty |= IRIS_DIRTY_DEPTH_BOUNDS;
1520 #endif
1521    }
1522 
1523    ice->state.cso_zsa = new_cso;
1524    ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
1525    ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
1526    ice->state.stage_dirty |=
1527       ice->state.stage_dirty_for_nos[IRIS_NOS_DEPTH_STENCIL_ALPHA];
1528 
1529    if (GFX_VER == 8)
1530       ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
1531 }
1532 
1533 #if GFX_VER == 8
1534 static bool
want_pma_fix(struct iris_context * ice)1535 want_pma_fix(struct iris_context *ice)
1536 {
1537    UNUSED struct iris_screen *screen = (void *) ice->ctx.screen;
1538    UNUSED const struct intel_device_info *devinfo = &screen->devinfo;
1539    const struct brw_wm_prog_data *wm_prog_data = (void *)
1540       ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
1541    const struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
1542    const struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
1543    const struct iris_blend_state *cso_blend = ice->state.cso_blend;
1544 
1545    /* In very specific combinations of state, we can instruct Gfx8-9 hardware
1546     * to avoid stalling at the pixel mask array.  The state equations are
1547     * documented in these places:
1548     *
1549     * - Gfx8 Depth PMA Fix:   CACHE_MODE_1::NP_PMA_FIX_ENABLE
1550     * - Gfx9 Stencil PMA Fix: CACHE_MODE_0::STC PMA Optimization Enable
1551     *
1552     * Both equations share some common elements:
1553     *
1554     *    no_hiz_op =
1555     *       !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
1556     *         3DSTATE_WM_HZ_OP::DepthBufferResolve ||
1557     *         3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
1558     *         3DSTATE_WM_HZ_OP::StencilBufferClear) &&
1559     *
1560     *    killpixels =
1561     *       3DSTATE_WM::ForceKillPix != ForceOff &&
1562     *       (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
1563     *        3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
1564     *        3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
1565     *        3DSTATE_PS_BLEND::AlphaTestEnable ||
1566     *        3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
1567     *
1568     *    (Technically the stencil PMA treats ForceKillPix differently,
1569     *     but I think this is a documentation oversight, and we don't
1570     *     ever use it in this way, so it doesn't matter).
1571     *
1572     *    common_pma_fix =
1573     *       3DSTATE_WM::ForceThreadDispatch != 1 &&
1574     *       3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0 &&
1575     *       3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
1576     *       3DSTATE_DEPTH_BUFFER::HIZ Enable &&
1577     *       3DSTATE_WM::EDSC_Mode != EDSC_PREPS &&
1578     *       3DSTATE_PS_EXTRA::PixelShaderValid &&
1579     *       no_hiz_op
1580     *
1581     * These are always true:
1582     *
1583     *    3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0
1584     *    3DSTATE_PS_EXTRA::PixelShaderValid
1585     *
1586     * Also, we never use the normal drawing path for HiZ ops; these are true:
1587     *
1588     *    !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
1589     *      3DSTATE_WM_HZ_OP::DepthBufferResolve ||
1590     *      3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
1591     *      3DSTATE_WM_HZ_OP::StencilBufferClear)
1592     *
1593     * This happens sometimes:
1594     *
1595     *    3DSTATE_WM::ForceThreadDispatch != 1
1596     *
1597     * However, we choose to ignore it as it either agrees with the signal
1598     * (dispatch was already enabled, so nothing out of the ordinary), or
1599     * there are no framebuffer attachments (so no depth or HiZ anyway,
1600     * meaning the PMA signal will already be disabled).
1601     */
1602 
1603    if (!cso_fb->zsbuf)
1604       return false;
1605 
1606    struct iris_resource *zres, *sres;
1607    iris_get_depth_stencil_resources(cso_fb->zsbuf->texture, &zres, &sres);
1608 
1609    /* 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
1610     * 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
1611     */
1612    if (!zres || !iris_resource_level_has_hiz(zres, cso_fb->zsbuf->u.tex.level))
1613       return false;
1614 
1615    /* 3DSTATE_WM::EDSC_Mode != EDSC_PREPS */
1616    if (wm_prog_data->early_fragment_tests)
1617       return false;
1618 
1619    /* 3DSTATE_WM::ForceKillPix != ForceOff &&
1620     * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
1621     *  3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
1622     *  3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
1623     *  3DSTATE_PS_BLEND::AlphaTestEnable ||
1624     *  3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
1625     */
1626    bool killpixels = wm_prog_data->uses_kill || wm_prog_data->uses_omask ||
1627                      cso_blend->alpha_to_coverage || cso_zsa->alpha_enabled;
1628 
1629    /* The Gfx8 depth PMA equation becomes:
1630     *
1631     *    depth_writes =
1632     *       3DSTATE_WM_DEPTH_STENCIL::DepthWriteEnable &&
1633     *       3DSTATE_DEPTH_BUFFER::DEPTH_WRITE_ENABLE
1634     *
1635     *    stencil_writes =
1636     *       3DSTATE_WM_DEPTH_STENCIL::Stencil Buffer Write Enable &&
1637     *       3DSTATE_DEPTH_BUFFER::STENCIL_WRITE_ENABLE &&
1638     *       3DSTATE_STENCIL_BUFFER::STENCIL_BUFFER_ENABLE
1639     *
1640     *    Z_PMA_OPT =
1641     *       common_pma_fix &&
1642     *       3DSTATE_WM_DEPTH_STENCIL::DepthTestEnable &&
1643     *       ((killpixels && (depth_writes || stencil_writes)) ||
1644     *        3DSTATE_PS_EXTRA::PixelShaderComputedDepthMode != PSCDEPTH_OFF)
1645     *
1646     */
1647    if (!cso_zsa->depth_test_enabled)
1648       return false;
1649 
1650    return wm_prog_data->computed_depth_mode != PSCDEPTH_OFF ||
1651           (killpixels && (cso_zsa->depth_writes_enabled ||
1652                           (sres && cso_zsa->stencil_writes_enabled)));
1653 }
1654 #endif
1655 
1656 void
genX(update_pma_fix)1657 genX(update_pma_fix)(struct iris_context *ice,
1658                      struct iris_batch *batch,
1659                      bool enable)
1660 {
1661 #if GFX_VER == 8
1662    struct iris_genx_state *genx = ice->state.genx;
1663 
1664    if (genx->pma_fix_enabled == enable)
1665       return;
1666 
1667    genx->pma_fix_enabled = enable;
1668 
1669    /* According to the Broadwell PIPE_CONTROL documentation, software should
1670     * emit a PIPE_CONTROL with the CS Stall and Depth Cache Flush bits set
1671     * prior to the LRI.  If stencil buffer writes are enabled, then a Render        * Cache Flush is also necessary.
1672     *
1673     * The Gfx9 docs say to use a depth stall rather than a command streamer
1674     * stall.  However, the hardware seems to violently disagree.  A full
1675     * command streamer stall seems to be needed in both cases.
1676     */
1677    iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)",
1678                                 PIPE_CONTROL_CS_STALL |
1679                                 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1680                                 PIPE_CONTROL_RENDER_TARGET_FLUSH);
1681 
1682    iris_emit_reg(batch, GENX(CACHE_MODE_1), reg) {
1683       reg.NPPMAFixEnable = enable;
1684       reg.NPEarlyZFailsDisable = enable;
1685       reg.NPPMAFixEnableMask = true;
1686       reg.NPEarlyZFailsDisableMask = true;
1687    }
1688 
1689    /* After the LRI, a PIPE_CONTROL with both the Depth Stall and Depth Cache
1690     * Flush bits is often necessary.  We do it regardless because it's easier.
1691     * The render cache flush is also necessary if stencil writes are enabled.
1692     *
1693     * Again, the Gfx9 docs give a different set of flushes but the Broadwell
1694     * flushes seem to work just as well.
1695     */
1696    iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)",
1697                                 PIPE_CONTROL_DEPTH_STALL |
1698                                 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1699                                 PIPE_CONTROL_RENDER_TARGET_FLUSH);
1700 #endif
1701 }
1702 
1703 /**
1704  * Gallium CSO for rasterizer state.
1705  */
1706 struct iris_rasterizer_state {
1707    uint32_t sf[GENX(3DSTATE_SF_length)];
1708    uint32_t clip[GENX(3DSTATE_CLIP_length)];
1709    uint32_t raster[GENX(3DSTATE_RASTER_length)];
1710    uint32_t wm[GENX(3DSTATE_WM_length)];
1711    uint32_t line_stipple[GENX(3DSTATE_LINE_STIPPLE_length)];
1712 
1713    uint8_t num_clip_plane_consts;
1714    bool clip_halfz; /* for CC_VIEWPORT */
1715    bool depth_clip_near; /* for CC_VIEWPORT */
1716    bool depth_clip_far; /* for CC_VIEWPORT */
1717    bool flatshade; /* for shader state */
1718    bool flatshade_first; /* for stream output */
1719    bool clamp_fragment_color; /* for shader state */
1720    bool light_twoside; /* for shader state */
1721    bool rasterizer_discard; /* for 3DSTATE_STREAMOUT and 3DSTATE_CLIP */
1722    bool half_pixel_center; /* for 3DSTATE_MULTISAMPLE */
1723    bool line_stipple_enable;
1724    bool poly_stipple_enable;
1725    bool multisample;
1726    bool force_persample_interp;
1727    bool conservative_rasterization;
1728    bool fill_mode_point;
1729    bool fill_mode_line;
1730    bool fill_mode_point_or_line;
1731    enum pipe_sprite_coord_mode sprite_coord_mode; /* PIPE_SPRITE_* */
1732    uint16_t sprite_coord_enable;
1733 };
1734 
1735 static float
get_line_width(const struct pipe_rasterizer_state * state)1736 get_line_width(const struct pipe_rasterizer_state *state)
1737 {
1738    float line_width = state->line_width;
1739 
1740    /* From the OpenGL 4.4 spec:
1741     *
1742     * "The actual width of non-antialiased lines is determined by rounding
1743     *  the supplied width to the nearest integer, then clamping it to the
1744     *  implementation-dependent maximum non-antialiased line width."
1745     */
1746    if (!state->multisample && !state->line_smooth)
1747       line_width = roundf(state->line_width);
1748 
1749    if (!state->multisample && state->line_smooth && line_width < 1.5f) {
1750       /* For 1 pixel line thickness or less, the general anti-aliasing
1751        * algorithm gives up, and a garbage line is generated.  Setting a
1752        * Line Width of 0.0 specifies the rasterization of the "thinnest"
1753        * (one-pixel-wide), non-antialiased lines.
1754        *
1755        * Lines rendered with zero Line Width are rasterized using the
1756        * "Grid Intersection Quantization" rules as specified by the
1757        * "Zero-Width (Cosmetic) Line Rasterization" section of the docs.
1758        */
1759       line_width = 0.0f;
1760    }
1761 
1762    return line_width;
1763 }
1764 
1765 /**
1766  * The pipe->create_rasterizer_state() driver hook.
1767  */
1768 static void *
iris_create_rasterizer_state(struct pipe_context * ctx,const struct pipe_rasterizer_state * state)1769 iris_create_rasterizer_state(struct pipe_context *ctx,
1770                              const struct pipe_rasterizer_state *state)
1771 {
1772    struct iris_rasterizer_state *cso =
1773       malloc(sizeof(struct iris_rasterizer_state));
1774 
1775    cso->multisample = state->multisample;
1776    cso->force_persample_interp = state->force_persample_interp;
1777    cso->clip_halfz = state->clip_halfz;
1778    cso->depth_clip_near = state->depth_clip_near;
1779    cso->depth_clip_far = state->depth_clip_far;
1780    cso->flatshade = state->flatshade;
1781    cso->flatshade_first = state->flatshade_first;
1782    cso->clamp_fragment_color = state->clamp_fragment_color;
1783    cso->light_twoside = state->light_twoside;
1784    cso->rasterizer_discard = state->rasterizer_discard;
1785    cso->half_pixel_center = state->half_pixel_center;
1786    cso->sprite_coord_mode = state->sprite_coord_mode;
1787    cso->sprite_coord_enable = state->sprite_coord_enable;
1788    cso->line_stipple_enable = state->line_stipple_enable;
1789    cso->poly_stipple_enable = state->poly_stipple_enable;
1790    cso->conservative_rasterization =
1791       state->conservative_raster_mode == PIPE_CONSERVATIVE_RASTER_POST_SNAP;
1792 
1793    cso->fill_mode_point =
1794       state->fill_front == PIPE_POLYGON_MODE_POINT ||
1795       state->fill_back == PIPE_POLYGON_MODE_POINT;
1796    cso->fill_mode_line =
1797       state->fill_front == PIPE_POLYGON_MODE_LINE ||
1798       state->fill_back == PIPE_POLYGON_MODE_LINE;
1799    cso->fill_mode_point_or_line =
1800       cso->fill_mode_point ||
1801       cso->fill_mode_line;
1802 
1803    if (state->clip_plane_enable != 0)
1804       cso->num_clip_plane_consts = util_logbase2(state->clip_plane_enable) + 1;
1805    else
1806       cso->num_clip_plane_consts = 0;
1807 
1808    float line_width = get_line_width(state);
1809 
1810    iris_pack_command(GENX(3DSTATE_SF), cso->sf, sf) {
1811       sf.StatisticsEnable = true;
1812       sf.AALineDistanceMode = AALINEDISTANCE_TRUE;
1813       sf.LineEndCapAntialiasingRegionWidth =
1814          state->line_smooth ? _10pixels : _05pixels;
1815       sf.LastPixelEnable = state->line_last_pixel;
1816       sf.LineWidth = line_width;
1817       sf.SmoothPointEnable = (state->point_smooth || state->multisample) &&
1818                              !state->point_quad_rasterization;
1819       sf.PointWidthSource = state->point_size_per_vertex ? Vertex : State;
1820       sf.PointWidth = CLAMP(state->point_size, 0.125f, 255.875f);
1821 
1822       if (state->flatshade_first) {
1823          sf.TriangleFanProvokingVertexSelect = 1;
1824       } else {
1825          sf.TriangleStripListProvokingVertexSelect = 2;
1826          sf.TriangleFanProvokingVertexSelect = 2;
1827          sf.LineStripListProvokingVertexSelect = 1;
1828       }
1829    }
1830 
1831    iris_pack_command(GENX(3DSTATE_RASTER), cso->raster, rr) {
1832       rr.FrontWinding = state->front_ccw ? CounterClockwise : Clockwise;
1833       rr.CullMode = translate_cull_mode(state->cull_face);
1834       rr.FrontFaceFillMode = translate_fill_mode(state->fill_front);
1835       rr.BackFaceFillMode = translate_fill_mode(state->fill_back);
1836       rr.DXMultisampleRasterizationEnable = state->multisample;
1837       rr.GlobalDepthOffsetEnableSolid = state->offset_tri;
1838       rr.GlobalDepthOffsetEnableWireframe = state->offset_line;
1839       rr.GlobalDepthOffsetEnablePoint = state->offset_point;
1840       rr.GlobalDepthOffsetConstant = state->offset_units * 2;
1841       rr.GlobalDepthOffsetScale = state->offset_scale;
1842       rr.GlobalDepthOffsetClamp = state->offset_clamp;
1843       rr.SmoothPointEnable = state->point_smooth;
1844       rr.AntialiasingEnable = state->line_smooth;
1845       rr.ScissorRectangleEnable = state->scissor;
1846 #if GFX_VER >= 9
1847       rr.ViewportZNearClipTestEnable = state->depth_clip_near;
1848       rr.ViewportZFarClipTestEnable = state->depth_clip_far;
1849       rr.ConservativeRasterizationEnable =
1850          cso->conservative_rasterization;
1851 #else
1852       rr.ViewportZClipTestEnable = (state->depth_clip_near || state->depth_clip_far);
1853 #endif
1854    }
1855 
1856    iris_pack_command(GENX(3DSTATE_CLIP), cso->clip, cl) {
1857       /* cl.NonPerspectiveBarycentricEnable is filled in at draw time from
1858        * the FS program; cl.ForceZeroRTAIndexEnable is filled in from the FB.
1859        */
1860       cl.EarlyCullEnable = true;
1861       cl.UserClipDistanceClipTestEnableBitmask = state->clip_plane_enable;
1862       cl.ForceUserClipDistanceClipTestEnableBitmask = true;
1863       cl.APIMode = state->clip_halfz ? APIMODE_D3D : APIMODE_OGL;
1864       cl.GuardbandClipTestEnable = true;
1865       cl.ClipEnable = true;
1866       cl.MinimumPointWidth = 0.125;
1867       cl.MaximumPointWidth = 255.875;
1868 
1869       if (state->flatshade_first) {
1870          cl.TriangleFanProvokingVertexSelect = 1;
1871       } else {
1872          cl.TriangleStripListProvokingVertexSelect = 2;
1873          cl.TriangleFanProvokingVertexSelect = 2;
1874          cl.LineStripListProvokingVertexSelect = 1;
1875       }
1876    }
1877 
1878    iris_pack_command(GENX(3DSTATE_WM), cso->wm, wm) {
1879       /* wm.BarycentricInterpolationMode and wm.EarlyDepthStencilControl are
1880        * filled in at draw time from the FS program.
1881        */
1882       wm.LineAntialiasingRegionWidth = _10pixels;
1883       wm.LineEndCapAntialiasingRegionWidth = _05pixels;
1884       wm.PointRasterizationRule = RASTRULE_UPPER_RIGHT;
1885       wm.LineStippleEnable = state->line_stipple_enable;
1886       wm.PolygonStippleEnable = state->poly_stipple_enable;
1887    }
1888 
1889    /* Remap from 0..255 back to 1..256 */
1890    const unsigned line_stipple_factor = state->line_stipple_factor + 1;
1891 
1892    iris_pack_command(GENX(3DSTATE_LINE_STIPPLE), cso->line_stipple, line) {
1893       if (state->line_stipple_enable) {
1894          line.LineStipplePattern = state->line_stipple_pattern;
1895          line.LineStippleInverseRepeatCount = 1.0f / line_stipple_factor;
1896          line.LineStippleRepeatCount = line_stipple_factor;
1897       }
1898    }
1899 
1900    return cso;
1901 }
1902 
1903 /**
1904  * The pipe->bind_rasterizer_state() driver hook.
1905  *
1906  * Bind a rasterizer CSO and flag related dirty bits.
1907  */
1908 static void
iris_bind_rasterizer_state(struct pipe_context * ctx,void * state)1909 iris_bind_rasterizer_state(struct pipe_context *ctx, void *state)
1910 {
1911    struct iris_context *ice = (struct iris_context *) ctx;
1912    struct iris_rasterizer_state *old_cso = ice->state.cso_rast;
1913    struct iris_rasterizer_state *new_cso = state;
1914 
1915    if (new_cso) {
1916       /* Try to avoid re-emitting 3DSTATE_LINE_STIPPLE, it's non-pipelined */
1917       if (cso_changed_memcmp(line_stipple))
1918          ice->state.dirty |= IRIS_DIRTY_LINE_STIPPLE;
1919 
1920       if (cso_changed(half_pixel_center))
1921          ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
1922 
1923       if (cso_changed(line_stipple_enable) || cso_changed(poly_stipple_enable))
1924          ice->state.dirty |= IRIS_DIRTY_WM;
1925 
1926       if (cso_changed(rasterizer_discard))
1927          ice->state.dirty |= IRIS_DIRTY_STREAMOUT | IRIS_DIRTY_CLIP;
1928 
1929       if (cso_changed(flatshade_first))
1930          ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
1931 
1932       if (cso_changed(depth_clip_near) || cso_changed(depth_clip_far) ||
1933           cso_changed(clip_halfz))
1934          ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
1935 
1936       if (cso_changed(sprite_coord_enable) ||
1937           cso_changed(sprite_coord_mode) ||
1938           cso_changed(light_twoside))
1939          ice->state.dirty |= IRIS_DIRTY_SBE;
1940 
1941       if (cso_changed(conservative_rasterization))
1942          ice->state.stage_dirty |= IRIS_STAGE_DIRTY_FS;
1943    }
1944 
1945    ice->state.cso_rast = new_cso;
1946    ice->state.dirty |= IRIS_DIRTY_RASTER;
1947    ice->state.dirty |= IRIS_DIRTY_CLIP;
1948    ice->state.stage_dirty |=
1949       ice->state.stage_dirty_for_nos[IRIS_NOS_RASTERIZER];
1950 }
1951 
1952 /**
1953  * Return true if the given wrap mode requires the border color to exist.
1954  *
1955  * (We can skip uploading it if the sampler isn't going to use it.)
1956  */
1957 static bool
wrap_mode_needs_border_color(unsigned wrap_mode)1958 wrap_mode_needs_border_color(unsigned wrap_mode)
1959 {
1960    return wrap_mode == TCM_CLAMP_BORDER || wrap_mode == TCM_HALF_BORDER;
1961 }
1962 
1963 /**
1964  * Gallium CSO for sampler state.
1965  */
1966 struct iris_sampler_state {
1967    union pipe_color_union border_color;
1968    bool needs_border_color;
1969 
1970    uint32_t sampler_state[GENX(SAMPLER_STATE_length)];
1971 };
1972 
1973 /**
1974  * The pipe->create_sampler_state() driver hook.
1975  *
1976  * We fill out SAMPLER_STATE (except for the border color pointer), and
1977  * store that on the CPU.  It doesn't make sense to upload it to a GPU
1978  * buffer object yet, because 3DSTATE_SAMPLER_STATE_POINTERS requires
1979  * all bound sampler states to be in contiguous memor.
1980  */
1981 static void *
iris_create_sampler_state(struct pipe_context * ctx,const struct pipe_sampler_state * state)1982 iris_create_sampler_state(struct pipe_context *ctx,
1983                           const struct pipe_sampler_state *state)
1984 {
1985    struct iris_sampler_state *cso = CALLOC_STRUCT(iris_sampler_state);
1986 
1987    if (!cso)
1988       return NULL;
1989 
1990    STATIC_ASSERT(PIPE_TEX_FILTER_NEAREST == MAPFILTER_NEAREST);
1991    STATIC_ASSERT(PIPE_TEX_FILTER_LINEAR == MAPFILTER_LINEAR);
1992 
1993    unsigned wrap_s = translate_wrap(state->wrap_s);
1994    unsigned wrap_t = translate_wrap(state->wrap_t);
1995    unsigned wrap_r = translate_wrap(state->wrap_r);
1996 
1997    memcpy(&cso->border_color, &state->border_color, sizeof(cso->border_color));
1998 
1999    cso->needs_border_color = wrap_mode_needs_border_color(wrap_s) ||
2000                              wrap_mode_needs_border_color(wrap_t) ||
2001                              wrap_mode_needs_border_color(wrap_r);
2002 
2003    float min_lod = state->min_lod;
2004    unsigned mag_img_filter = state->mag_img_filter;
2005 
2006    // XXX: explain this code ported from ilo...I don't get it at all...
2007    if (state->min_mip_filter == PIPE_TEX_MIPFILTER_NONE &&
2008        state->min_lod > 0.0f) {
2009       min_lod = 0.0f;
2010       mag_img_filter = state->min_img_filter;
2011    }
2012 
2013    iris_pack_state(GENX(SAMPLER_STATE), cso->sampler_state, samp) {
2014       samp.TCXAddressControlMode = wrap_s;
2015       samp.TCYAddressControlMode = wrap_t;
2016       samp.TCZAddressControlMode = wrap_r;
2017       samp.CubeSurfaceControlMode = state->seamless_cube_map;
2018       samp.NonnormalizedCoordinateEnable = !state->normalized_coords;
2019       samp.MinModeFilter = state->min_img_filter;
2020       samp.MagModeFilter = mag_img_filter;
2021       samp.MipModeFilter = translate_mip_filter(state->min_mip_filter);
2022       samp.MaximumAnisotropy = RATIO21;
2023 
2024       if (state->max_anisotropy >= 2) {
2025          if (state->min_img_filter == PIPE_TEX_FILTER_LINEAR) {
2026             samp.MinModeFilter = MAPFILTER_ANISOTROPIC;
2027             samp.AnisotropicAlgorithm = EWAApproximation;
2028          }
2029 
2030          if (state->mag_img_filter == PIPE_TEX_FILTER_LINEAR)
2031             samp.MagModeFilter = MAPFILTER_ANISOTROPIC;
2032 
2033          samp.MaximumAnisotropy =
2034             MIN2((state->max_anisotropy - 2) / 2, RATIO161);
2035       }
2036 
2037       /* Set address rounding bits if not using nearest filtering. */
2038       if (state->min_img_filter != PIPE_TEX_FILTER_NEAREST) {
2039          samp.UAddressMinFilterRoundingEnable = true;
2040          samp.VAddressMinFilterRoundingEnable = true;
2041          samp.RAddressMinFilterRoundingEnable = true;
2042       }
2043 
2044       if (state->mag_img_filter != PIPE_TEX_FILTER_NEAREST) {
2045          samp.UAddressMagFilterRoundingEnable = true;
2046          samp.VAddressMagFilterRoundingEnable = true;
2047          samp.RAddressMagFilterRoundingEnable = true;
2048       }
2049 
2050       if (state->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE)
2051          samp.ShadowFunction = translate_shadow_func(state->compare_func);
2052 
2053       const float hw_max_lod = GFX_VER >= 7 ? 14 : 13;
2054 
2055       samp.LODPreClampMode = CLAMP_MODE_OGL;
2056       samp.MinLOD = CLAMP(min_lod, 0, hw_max_lod);
2057       samp.MaxLOD = CLAMP(state->max_lod, 0, hw_max_lod);
2058       samp.TextureLODBias = CLAMP(state->lod_bias, -16, 15);
2059 
2060       /* .BorderColorPointer is filled in by iris_bind_sampler_states. */
2061    }
2062 
2063    return cso;
2064 }
2065 
2066 /**
2067  * The pipe->bind_sampler_states() driver hook.
2068  */
2069 static void
iris_bind_sampler_states(struct pipe_context * ctx,enum pipe_shader_type p_stage,unsigned start,unsigned count,void ** states)2070 iris_bind_sampler_states(struct pipe_context *ctx,
2071                          enum pipe_shader_type p_stage,
2072                          unsigned start, unsigned count,
2073                          void **states)
2074 {
2075    struct iris_context *ice = (struct iris_context *) ctx;
2076    gl_shader_stage stage = stage_from_pipe(p_stage);
2077    struct iris_shader_state *shs = &ice->state.shaders[stage];
2078 
2079    assert(start + count <= IRIS_MAX_TEXTURE_SAMPLERS);
2080 
2081    bool dirty = false;
2082 
2083    for (int i = 0; i < count; i++) {
2084       struct iris_sampler_state *state = states ? states[i] : NULL;
2085       if (shs->samplers[start + i] != state) {
2086          shs->samplers[start + i] = state;
2087          dirty = true;
2088       }
2089    }
2090 
2091    if (dirty)
2092       ice->state.stage_dirty |= IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage;
2093 }
2094 
2095 /**
2096  * Upload the sampler states into a contiguous area of GPU memory, for
2097  * for 3DSTATE_SAMPLER_STATE_POINTERS_*.
2098  *
2099  * Also fill out the border color state pointers.
2100  */
2101 static void
iris_upload_sampler_states(struct iris_context * ice,gl_shader_stage stage)2102 iris_upload_sampler_states(struct iris_context *ice, gl_shader_stage stage)
2103 {
2104    struct iris_shader_state *shs = &ice->state.shaders[stage];
2105    const struct shader_info *info = iris_get_shader_info(ice, stage);
2106 
2107    /* We assume gallium frontends will call pipe->bind_sampler_states()
2108     * if the program's number of textures changes.
2109     */
2110    unsigned count = info ? BITSET_LAST_BIT(info->textures_used) : 0;
2111 
2112    if (!count)
2113       return;
2114 
2115    /* Assemble the SAMPLER_STATEs into a contiguous table that lives
2116     * in the dynamic state memory zone, so we can point to it via the
2117     * 3DSTATE_SAMPLER_STATE_POINTERS_* commands.
2118     */
2119    unsigned size = count * 4 * GENX(SAMPLER_STATE_length);
2120    uint32_t *map =
2121       upload_state(ice->state.dynamic_uploader, &shs->sampler_table, size, 32);
2122    if (unlikely(!map))
2123       return;
2124 
2125    struct pipe_resource *res = shs->sampler_table.res;
2126    struct iris_bo *bo = iris_resource_bo(res);
2127 
2128    iris_record_state_size(ice->state.sizes,
2129                           bo->address + shs->sampler_table.offset, size);
2130 
2131    shs->sampler_table.offset += iris_bo_offset_from_base_address(bo);
2132 
2133    /* Make sure all land in the same BO */
2134    iris_border_color_pool_reserve(ice, IRIS_MAX_TEXTURE_SAMPLERS);
2135 
2136    ice->state.need_border_colors &= ~(1 << stage);
2137 
2138    for (int i = 0; i < count; i++) {
2139       struct iris_sampler_state *state = shs->samplers[i];
2140       struct iris_sampler_view *tex = shs->textures[i];
2141 
2142       if (!state) {
2143          memset(map, 0, 4 * GENX(SAMPLER_STATE_length));
2144       } else if (!state->needs_border_color) {
2145          memcpy(map, state->sampler_state, 4 * GENX(SAMPLER_STATE_length));
2146       } else {
2147          ice->state.need_border_colors |= 1 << stage;
2148 
2149          /* We may need to swizzle the border color for format faking.
2150           * A/LA formats are faked as R/RG with 000R or R00G swizzles.
2151           * This means we need to move the border color's A channel into
2152           * the R or G channels so that those read swizzles will move it
2153           * back into A.
2154           */
2155          union pipe_color_union *color = &state->border_color;
2156          union pipe_color_union tmp;
2157          if (tex) {
2158             enum pipe_format internal_format = tex->res->internal_format;
2159 
2160             if (util_format_is_alpha(internal_format)) {
2161                unsigned char swz[4] = {
2162                   PIPE_SWIZZLE_W, PIPE_SWIZZLE_0,
2163                   PIPE_SWIZZLE_0, PIPE_SWIZZLE_0
2164                };
2165                util_format_apply_color_swizzle(&tmp, color, swz, true);
2166                color = &tmp;
2167             } else if (util_format_is_luminance_alpha(internal_format) &&
2168                        internal_format != PIPE_FORMAT_L8A8_SRGB) {
2169                unsigned char swz[4] = {
2170                   PIPE_SWIZZLE_X, PIPE_SWIZZLE_W,
2171                   PIPE_SWIZZLE_0, PIPE_SWIZZLE_0
2172                };
2173                util_format_apply_color_swizzle(&tmp, color, swz, true);
2174                color = &tmp;
2175             }
2176          }
2177 
2178          /* Stream out the border color and merge the pointer. */
2179          uint32_t offset = iris_upload_border_color(ice, color);
2180 
2181          uint32_t dynamic[GENX(SAMPLER_STATE_length)];
2182          iris_pack_state(GENX(SAMPLER_STATE), dynamic, dyns) {
2183             dyns.BorderColorPointer = offset;
2184          }
2185 
2186          for (uint32_t j = 0; j < GENX(SAMPLER_STATE_length); j++)
2187             map[j] = state->sampler_state[j] | dynamic[j];
2188       }
2189 
2190       map += GENX(SAMPLER_STATE_length);
2191    }
2192 }
2193 
2194 static enum isl_channel_select
fmt_swizzle(const struct iris_format_info * fmt,enum pipe_swizzle swz)2195 fmt_swizzle(const struct iris_format_info *fmt, enum pipe_swizzle swz)
2196 {
2197    switch (swz) {
2198    case PIPE_SWIZZLE_X: return fmt->swizzle.r;
2199    case PIPE_SWIZZLE_Y: return fmt->swizzle.g;
2200    case PIPE_SWIZZLE_Z: return fmt->swizzle.b;
2201    case PIPE_SWIZZLE_W: return fmt->swizzle.a;
2202    case PIPE_SWIZZLE_1: return ISL_CHANNEL_SELECT_ONE;
2203    case PIPE_SWIZZLE_0: return ISL_CHANNEL_SELECT_ZERO;
2204    default: unreachable("invalid swizzle");
2205    }
2206 }
2207 
2208 static void
fill_buffer_surface_state(struct isl_device * isl_dev,struct iris_resource * res,void * map,enum isl_format format,struct isl_swizzle swizzle,unsigned offset,unsigned size,isl_surf_usage_flags_t usage)2209 fill_buffer_surface_state(struct isl_device *isl_dev,
2210                           struct iris_resource *res,
2211                           void *map,
2212                           enum isl_format format,
2213                           struct isl_swizzle swizzle,
2214                           unsigned offset,
2215                           unsigned size,
2216                           isl_surf_usage_flags_t usage)
2217 {
2218    const struct isl_format_layout *fmtl = isl_format_get_layout(format);
2219    const unsigned cpp = format == ISL_FORMAT_RAW ? 1 : fmtl->bpb / 8;
2220 
2221    /* The ARB_texture_buffer_specification says:
2222     *
2223     *    "The number of texels in the buffer texture's texel array is given by
2224     *
2225     *       floor(<buffer_size> / (<components> * sizeof(<base_type>)),
2226     *
2227     *     where <buffer_size> is the size of the buffer object, in basic
2228     *     machine units and <components> and <base_type> are the element count
2229     *     and base data type for elements, as specified in Table X.1.  The
2230     *     number of texels in the texel array is then clamped to the
2231     *     implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
2232     *
2233     * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
2234     * so that when ISL divides by stride to obtain the number of texels, that
2235     * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
2236     */
2237    unsigned final_size =
2238       MIN3(size, res->bo->size - res->offset - offset,
2239            IRIS_MAX_TEXTURE_BUFFER_SIZE * cpp);
2240 
2241    isl_buffer_fill_state(isl_dev, map,
2242                          .address = res->bo->address + res->offset + offset,
2243                          .size_B = final_size,
2244                          .format = format,
2245                          .swizzle = swizzle,
2246                          .stride_B = cpp,
2247                          .mocs = iris_mocs(res->bo, isl_dev, usage));
2248 }
2249 
2250 #define SURFACE_STATE_ALIGNMENT 64
2251 
2252 /**
2253  * Allocate several contiguous SURFACE_STATE structures, one for each
2254  * supported auxiliary surface mode.  This only allocates the CPU-side
2255  * copy, they will need to be uploaded later after they're filled in.
2256  */
2257 static void
alloc_surface_states(struct iris_surface_state * surf_state,unsigned aux_usages)2258 alloc_surface_states(struct iris_surface_state *surf_state,
2259                      unsigned aux_usages)
2260 {
2261    const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
2262 
2263    /* If this changes, update this to explicitly align pointers */
2264    STATIC_ASSERT(surf_size == SURFACE_STATE_ALIGNMENT);
2265 
2266    assert(aux_usages != 0);
2267 
2268    /* In case we're re-allocating them... */
2269    free(surf_state->cpu);
2270 
2271    surf_state->num_states = util_bitcount(aux_usages);
2272    surf_state->cpu = calloc(surf_state->num_states, surf_size);
2273    surf_state->ref.offset = 0;
2274    pipe_resource_reference(&surf_state->ref.res, NULL);
2275 
2276    assert(surf_state->cpu);
2277 }
2278 
2279 /**
2280  * Upload the CPU side SURFACE_STATEs into a GPU buffer.
2281  */
2282 static void
upload_surface_states(struct u_upload_mgr * mgr,struct iris_surface_state * surf_state)2283 upload_surface_states(struct u_upload_mgr *mgr,
2284                       struct iris_surface_state *surf_state)
2285 {
2286    const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
2287    const unsigned bytes = surf_state->num_states * surf_size;
2288 
2289    void *map =
2290       upload_state(mgr, &surf_state->ref, bytes, SURFACE_STATE_ALIGNMENT);
2291 
2292    surf_state->ref.offset +=
2293       iris_bo_offset_from_base_address(iris_resource_bo(surf_state->ref.res));
2294 
2295    if (map)
2296       memcpy(map, surf_state->cpu, bytes);
2297 }
2298 
2299 /**
2300  * Update resource addresses in a set of SURFACE_STATE descriptors,
2301  * and re-upload them if necessary.
2302  */
2303 static bool
update_surface_state_addrs(struct u_upload_mgr * mgr,struct iris_surface_state * surf_state,struct iris_bo * bo)2304 update_surface_state_addrs(struct u_upload_mgr *mgr,
2305                            struct iris_surface_state *surf_state,
2306                            struct iris_bo *bo)
2307 {
2308    if (surf_state->bo_address == bo->address)
2309       return false;
2310 
2311    STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start) % 64 == 0);
2312    STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_bits) == 64);
2313 
2314    uint64_t *ss_addr = (uint64_t *) &surf_state->cpu[GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start) / 32];
2315 
2316    /* First, update the CPU copies.  We assume no other fields exist in
2317     * the QWord containing Surface Base Address.
2318     */
2319    for (unsigned i = 0; i < surf_state->num_states; i++) {
2320       *ss_addr = *ss_addr - surf_state->bo_address + bo->address;
2321       ss_addr = ((void *) ss_addr) + SURFACE_STATE_ALIGNMENT;
2322    }
2323 
2324    /* Next, upload the updated copies to a GPU buffer. */
2325    upload_surface_states(mgr, surf_state);
2326 
2327    surf_state->bo_address = bo->address;
2328 
2329    return true;
2330 }
2331 
2332 static void
fill_surface_state(struct isl_device * isl_dev,void * map,struct iris_resource * res,struct isl_surf * surf,struct isl_view * view,unsigned aux_usage,uint32_t extra_main_offset,uint32_t tile_x_sa,uint32_t tile_y_sa)2333 fill_surface_state(struct isl_device *isl_dev,
2334                    void *map,
2335                    struct iris_resource *res,
2336                    struct isl_surf *surf,
2337                    struct isl_view *view,
2338                    unsigned aux_usage,
2339                    uint32_t extra_main_offset,
2340                    uint32_t tile_x_sa,
2341                    uint32_t tile_y_sa)
2342 {
2343    struct isl_surf_fill_state_info f = {
2344       .surf = surf,
2345       .view = view,
2346       .mocs = iris_mocs(res->bo, isl_dev, view->usage),
2347       .address = res->bo->address + res->offset + extra_main_offset,
2348       .x_offset_sa = tile_x_sa,
2349       .y_offset_sa = tile_y_sa,
2350    };
2351 
2352    if (aux_usage != ISL_AUX_USAGE_NONE) {
2353       f.aux_surf = &res->aux.surf;
2354       f.aux_usage = aux_usage;
2355       f.clear_color = res->aux.clear_color;
2356 
2357       if (res->aux.bo)
2358          f.aux_address = res->aux.bo->address + res->aux.offset;
2359 
2360       if (res->aux.clear_color_bo) {
2361          f.clear_address = res->aux.clear_color_bo->address +
2362                            res->aux.clear_color_offset;
2363          f.use_clear_address = isl_dev->info->ver > 9;
2364       }
2365    }
2366 
2367    isl_surf_fill_state_s(isl_dev, map, &f);
2368 }
2369 
2370 /**
2371  * The pipe->create_sampler_view() driver hook.
2372  */
2373 static struct pipe_sampler_view *
iris_create_sampler_view(struct pipe_context * ctx,struct pipe_resource * tex,const struct pipe_sampler_view * tmpl)2374 iris_create_sampler_view(struct pipe_context *ctx,
2375                          struct pipe_resource *tex,
2376                          const struct pipe_sampler_view *tmpl)
2377 {
2378    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2379    const struct intel_device_info *devinfo = &screen->devinfo;
2380    struct iris_sampler_view *isv = calloc(1, sizeof(struct iris_sampler_view));
2381 
2382    if (!isv)
2383       return NULL;
2384 
2385    /* initialize base object */
2386    isv->base = *tmpl;
2387    isv->base.context = ctx;
2388    isv->base.texture = NULL;
2389    pipe_reference_init(&isv->base.reference, 1);
2390    pipe_resource_reference(&isv->base.texture, tex);
2391 
2392    if (util_format_is_depth_or_stencil(tmpl->format)) {
2393       struct iris_resource *zres, *sres;
2394       const struct util_format_description *desc =
2395          util_format_description(tmpl->format);
2396 
2397       iris_get_depth_stencil_resources(tex, &zres, &sres);
2398 
2399       tex = util_format_has_depth(desc) ? &zres->base.b : &sres->base.b;
2400    }
2401 
2402    isv->res = (struct iris_resource *) tex;
2403 
2404    alloc_surface_states(&isv->surface_state, isv->res->aux.sampler_usages);
2405 
2406    isv->surface_state.bo_address = isv->res->bo->address;
2407 
2408    isl_surf_usage_flags_t usage = ISL_SURF_USAGE_TEXTURE_BIT;
2409 
2410    if (isv->base.target == PIPE_TEXTURE_CUBE ||
2411        isv->base.target == PIPE_TEXTURE_CUBE_ARRAY)
2412       usage |= ISL_SURF_USAGE_CUBE_BIT;
2413 
2414    const struct iris_format_info fmt =
2415       iris_format_for_usage(devinfo, tmpl->format, usage);
2416 
2417    isv->clear_color = isv->res->aux.clear_color;
2418 
2419    isv->view = (struct isl_view) {
2420       .format = fmt.fmt,
2421       .swizzle = (struct isl_swizzle) {
2422          .r = fmt_swizzle(&fmt, tmpl->swizzle_r),
2423          .g = fmt_swizzle(&fmt, tmpl->swizzle_g),
2424          .b = fmt_swizzle(&fmt, tmpl->swizzle_b),
2425          .a = fmt_swizzle(&fmt, tmpl->swizzle_a),
2426       },
2427       .usage = usage,
2428    };
2429 
2430    void *map = isv->surface_state.cpu;
2431 
2432    /* Fill out SURFACE_STATE for this view. */
2433    if (tmpl->target != PIPE_BUFFER) {
2434       isv->view.base_level = tmpl->u.tex.first_level;
2435       isv->view.levels = tmpl->u.tex.last_level - tmpl->u.tex.first_level + 1;
2436 
2437       if (tmpl->target == PIPE_TEXTURE_3D) {
2438          isv->view.base_array_layer = 0;
2439          isv->view.array_len = 1;
2440       } else {
2441          isv->view.base_array_layer = tmpl->u.tex.first_layer;
2442          isv->view.array_len =
2443             tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
2444       }
2445 
2446       unsigned aux_modes = isv->res->aux.sampler_usages;
2447       while (aux_modes) {
2448          enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
2449 
2450          fill_surface_state(&screen->isl_dev, map, isv->res, &isv->res->surf,
2451                             &isv->view, aux_usage, 0, 0, 0);
2452 
2453          map += SURFACE_STATE_ALIGNMENT;
2454       }
2455    } else {
2456       fill_buffer_surface_state(&screen->isl_dev, isv->res, map,
2457                                 isv->view.format, isv->view.swizzle,
2458                                 tmpl->u.buf.offset, tmpl->u.buf.size,
2459                                 ISL_SURF_USAGE_TEXTURE_BIT);
2460    }
2461 
2462    return &isv->base;
2463 }
2464 
2465 static void
iris_sampler_view_destroy(struct pipe_context * ctx,struct pipe_sampler_view * state)2466 iris_sampler_view_destroy(struct pipe_context *ctx,
2467                           struct pipe_sampler_view *state)
2468 {
2469    struct iris_sampler_view *isv = (void *) state;
2470    pipe_resource_reference(&state->texture, NULL);
2471    pipe_resource_reference(&isv->surface_state.ref.res, NULL);
2472    free(isv->surface_state.cpu);
2473    free(isv);
2474 }
2475 
2476 /**
2477  * The pipe->create_surface() driver hook.
2478  *
2479  * In Gallium nomenclature, "surfaces" are a view of a resource that
2480  * can be bound as a render target or depth/stencil buffer.
2481  */
2482 static struct pipe_surface *
iris_create_surface(struct pipe_context * ctx,struct pipe_resource * tex,const struct pipe_surface * tmpl)2483 iris_create_surface(struct pipe_context *ctx,
2484                     struct pipe_resource *tex,
2485                     const struct pipe_surface *tmpl)
2486 {
2487    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2488    const struct intel_device_info *devinfo = &screen->devinfo;
2489 
2490    isl_surf_usage_flags_t usage = 0;
2491    if (tmpl->writable)
2492       usage = ISL_SURF_USAGE_STORAGE_BIT;
2493    else if (util_format_is_depth_or_stencil(tmpl->format))
2494       usage = ISL_SURF_USAGE_DEPTH_BIT;
2495    else
2496       usage = ISL_SURF_USAGE_RENDER_TARGET_BIT;
2497 
2498    const struct iris_format_info fmt =
2499       iris_format_for_usage(devinfo, tmpl->format, usage);
2500 
2501    if ((usage & ISL_SURF_USAGE_RENDER_TARGET_BIT) &&
2502        !isl_format_supports_rendering(devinfo, fmt.fmt)) {
2503       /* Framebuffer validation will reject this invalid case, but it
2504        * hasn't had the opportunity yet.  In the meantime, we need to
2505        * avoid hitting ISL asserts about unsupported formats below.
2506        */
2507       return NULL;
2508    }
2509 
2510    struct iris_surface *surf = calloc(1, sizeof(struct iris_surface));
2511    struct pipe_surface *psurf = &surf->base;
2512    struct iris_resource *res = (struct iris_resource *) tex;
2513 
2514    if (!surf)
2515       return NULL;
2516 
2517    pipe_reference_init(&psurf->reference, 1);
2518    pipe_resource_reference(&psurf->texture, tex);
2519    psurf->context = ctx;
2520    psurf->format = tmpl->format;
2521    psurf->width = tex->width0;
2522    psurf->height = tex->height0;
2523    psurf->texture = tex;
2524    psurf->u.tex.first_layer = tmpl->u.tex.first_layer;
2525    psurf->u.tex.last_layer = tmpl->u.tex.last_layer;
2526    psurf->u.tex.level = tmpl->u.tex.level;
2527 
2528    uint32_t array_len = tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
2529 
2530    struct isl_view *view = &surf->view;
2531    *view = (struct isl_view) {
2532       .format = fmt.fmt,
2533       .base_level = tmpl->u.tex.level,
2534       .levels = 1,
2535       .base_array_layer = tmpl->u.tex.first_layer,
2536       .array_len = array_len,
2537       .swizzle = ISL_SWIZZLE_IDENTITY,
2538       .usage = usage,
2539    };
2540 
2541 #if GFX_VER == 8
2542    struct isl_view *read_view = &surf->read_view;
2543    *read_view = (struct isl_view) {
2544       .format = fmt.fmt,
2545       .base_level = tmpl->u.tex.level,
2546       .levels = 1,
2547       .base_array_layer = tmpl->u.tex.first_layer,
2548       .array_len = array_len,
2549       .swizzle = ISL_SWIZZLE_IDENTITY,
2550       .usage = ISL_SURF_USAGE_TEXTURE_BIT,
2551    };
2552 
2553    struct isl_surf read_surf = res->surf;
2554    uint64_t read_surf_offset_B = 0;
2555    uint32_t read_surf_tile_x_sa = 0, read_surf_tile_y_sa = 0;
2556    if (tex->target == PIPE_TEXTURE_3D && array_len == 1) {
2557       /* The minimum array element field of the surface state structure is
2558        * ignored by the sampler unit for 3D textures on some hardware.  If the
2559        * render buffer is a single slice of a 3D texture, create a 2D texture
2560        * covering that slice.
2561        *
2562        * TODO: This only handles the case where we're rendering to a single
2563        * slice of an array texture.  If we have layered rendering combined
2564        * with non-coherent FB fetch and a non-zero base_array_layer, then
2565        * we're going to run into problems.
2566        *
2567        * See https://gitlab.freedesktop.org/mesa/mesa/-/issues/4904
2568        */
2569       isl_surf_get_image_surf(&screen->isl_dev, &res->surf,
2570                               read_view->base_level,
2571                               0, read_view->base_array_layer,
2572                               &read_surf, &read_surf_offset_B,
2573                               &read_surf_tile_x_sa, &read_surf_tile_y_sa);
2574       read_view->base_level = 0;
2575       read_view->base_array_layer = 0;
2576       assert(read_view->array_len == 1);
2577    } else if (tex->target == PIPE_TEXTURE_1D_ARRAY) {
2578       /* Convert 1D array textures to 2D arrays because shaders always provide
2579        * the array index coordinate at the Z component to avoid recompiles
2580        * when changing the texture target of the framebuffer.
2581        */
2582       assert(read_surf.dim_layout == ISL_DIM_LAYOUT_GFX4_2D);
2583       read_surf.dim = ISL_SURF_DIM_2D;
2584    }
2585 #endif
2586 
2587    surf->clear_color = res->aux.clear_color;
2588 
2589    /* Bail early for depth/stencil - we don't want SURFACE_STATE for them. */
2590    if (res->surf.usage & (ISL_SURF_USAGE_DEPTH_BIT |
2591                           ISL_SURF_USAGE_STENCIL_BIT))
2592       return psurf;
2593 
2594 
2595    alloc_surface_states(&surf->surface_state, res->aux.possible_usages);
2596    surf->surface_state.bo_address = res->bo->address;
2597 
2598 #if GFX_VER == 8
2599    alloc_surface_states(&surf->surface_state_read, res->aux.possible_usages);
2600    surf->surface_state_read.bo_address = res->bo->address;
2601 #endif
2602 
2603    if (!isl_format_is_compressed(res->surf.format)) {
2604       void *map = surf->surface_state.cpu;
2605       UNUSED void *map_read = surf->surface_state_read.cpu;
2606 
2607       /* This is a normal surface.  Fill out a SURFACE_STATE for each possible
2608        * auxiliary surface mode and return the pipe_surface.
2609        */
2610       unsigned aux_modes = res->aux.possible_usages;
2611       while (aux_modes) {
2612          enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
2613          fill_surface_state(&screen->isl_dev, map, res, &res->surf,
2614                             view, aux_usage, 0, 0, 0);
2615          map += SURFACE_STATE_ALIGNMENT;
2616 
2617 #if GFX_VER == 8
2618          fill_surface_state(&screen->isl_dev, map_read, res,
2619                             &read_surf, read_view, aux_usage,
2620                             read_surf_offset_B,
2621                             read_surf_tile_x_sa, read_surf_tile_y_sa);
2622          map_read += SURFACE_STATE_ALIGNMENT;
2623 #endif
2624       }
2625 
2626       return psurf;
2627    }
2628 
2629    /* The resource has a compressed format, which is not renderable, but we
2630     * have a renderable view format.  We must be attempting to upload blocks
2631     * of compressed data via an uncompressed view.
2632     *
2633     * In this case, we can assume there are no auxiliary buffers, a single
2634     * miplevel, and that the resource is single-sampled.  Gallium may try
2635     * and create an uncompressed view with multiple layers, however.
2636     */
2637    assert(!isl_format_is_compressed(fmt.fmt));
2638    assert(res->aux.possible_usages == 1 << ISL_AUX_USAGE_NONE);
2639    assert(res->surf.samples == 1);
2640    assert(view->levels == 1);
2641 
2642    struct isl_surf isl_surf;
2643    uint64_t offset_B = 0;
2644    uint32_t tile_x_el = 0, tile_y_el = 0;
2645    bool ok = isl_surf_get_uncompressed_surf(&screen->isl_dev, &res->surf,
2646                                             view, &isl_surf, view,
2647                                             &offset_B, &tile_x_el, &tile_y_el);
2648    if (!ok) {
2649       free(surf);
2650       return NULL;
2651    }
2652 
2653    psurf->width = isl_surf.logical_level0_px.width;
2654    psurf->height = isl_surf.logical_level0_px.height;
2655 
2656    struct isl_surf_fill_state_info f = {
2657       .surf = &isl_surf,
2658       .view = view,
2659       .mocs = iris_mocs(res->bo, &screen->isl_dev,
2660                         ISL_SURF_USAGE_RENDER_TARGET_BIT),
2661       .address = res->bo->address + offset_B,
2662       .x_offset_sa = tile_x_el, /* Single-sampled, so el == sa */
2663       .y_offset_sa = tile_y_el, /* Single-sampled, so el == sa */
2664    };
2665 
2666    isl_surf_fill_state_s(&screen->isl_dev, surf->surface_state.cpu, &f);
2667 
2668    return psurf;
2669 }
2670 
2671 #if GFX_VER < 9
2672 static void
fill_default_image_param(struct brw_image_param * param)2673 fill_default_image_param(struct brw_image_param *param)
2674 {
2675    memset(param, 0, sizeof(*param));
2676    /* Set the swizzling shifts to all-ones to effectively disable swizzling --
2677     * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
2678     * detailed explanation of these parameters.
2679     */
2680    param->swizzling[0] = 0xff;
2681    param->swizzling[1] = 0xff;
2682 }
2683 
2684 static void
fill_buffer_image_param(struct brw_image_param * param,enum pipe_format pfmt,unsigned size)2685 fill_buffer_image_param(struct brw_image_param *param,
2686                         enum pipe_format pfmt,
2687                         unsigned size)
2688 {
2689    const unsigned cpp = util_format_get_blocksize(pfmt);
2690 
2691    fill_default_image_param(param);
2692    param->size[0] = size / cpp;
2693    param->stride[0] = cpp;
2694 }
2695 #else
2696 #define isl_surf_fill_image_param(x, ...)
2697 #define fill_default_image_param(x, ...)
2698 #define fill_buffer_image_param(x, ...)
2699 #endif
2700 
2701 /**
2702  * The pipe->set_shader_images() driver hook.
2703  */
2704 static void
iris_set_shader_images(struct pipe_context * ctx,enum pipe_shader_type p_stage,unsigned start_slot,unsigned count,unsigned unbind_num_trailing_slots,const struct pipe_image_view * p_images)2705 iris_set_shader_images(struct pipe_context *ctx,
2706                        enum pipe_shader_type p_stage,
2707                        unsigned start_slot, unsigned count,
2708                        unsigned unbind_num_trailing_slots,
2709                        const struct pipe_image_view *p_images)
2710 {
2711    struct iris_context *ice = (struct iris_context *) ctx;
2712    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2713    gl_shader_stage stage = stage_from_pipe(p_stage);
2714    struct iris_shader_state *shs = &ice->state.shaders[stage];
2715 #if GFX_VER == 8
2716    struct iris_genx_state *genx = ice->state.genx;
2717    struct brw_image_param *image_params = genx->shaders[stage].image_param;
2718 #endif
2719 
2720    shs->bound_image_views &=
2721       ~u_bit_consecutive(start_slot, count + unbind_num_trailing_slots);
2722 
2723    for (unsigned i = 0; i < count; i++) {
2724       struct iris_image_view *iv = &shs->image[start_slot + i];
2725 
2726       if (p_images && p_images[i].resource) {
2727          const struct pipe_image_view *img = &p_images[i];
2728          struct iris_resource *res = (void *) img->resource;
2729 
2730          util_copy_image_view(&iv->base, img);
2731 
2732          shs->bound_image_views |= 1 << (start_slot + i);
2733 
2734          res->bind_history |= PIPE_BIND_SHADER_IMAGE;
2735          res->bind_stages |= 1 << stage;
2736 
2737          enum isl_format isl_fmt = iris_image_view_get_format(ice, img);
2738 
2739          /* Render compression with images supported on gfx12+ only. */
2740          unsigned aux_usages = GFX_VER >= 12 ? res->aux.possible_usages :
2741             1 << ISL_AUX_USAGE_NONE;
2742 
2743          alloc_surface_states(&iv->surface_state, aux_usages);
2744          iv->surface_state.bo_address = res->bo->address;
2745 
2746          void *map = iv->surface_state.cpu;
2747 
2748          if (res->base.b.target != PIPE_BUFFER) {
2749             struct isl_view view = {
2750                .format = isl_fmt,
2751                .base_level = img->u.tex.level,
2752                .levels = 1,
2753                .base_array_layer = img->u.tex.first_layer,
2754                .array_len = img->u.tex.last_layer - img->u.tex.first_layer + 1,
2755                .swizzle = ISL_SWIZZLE_IDENTITY,
2756                .usage = ISL_SURF_USAGE_STORAGE_BIT,
2757             };
2758 
2759             /* If using untyped fallback. */
2760             if (isl_fmt == ISL_FORMAT_RAW) {
2761                fill_buffer_surface_state(&screen->isl_dev, res, map,
2762                                          isl_fmt, ISL_SWIZZLE_IDENTITY,
2763                                          0, res->bo->size,
2764                                          ISL_SURF_USAGE_STORAGE_BIT);
2765             } else {
2766                unsigned aux_modes = aux_usages;
2767                while (aux_modes) {
2768                   enum isl_aux_usage usage = u_bit_scan(&aux_modes);
2769 
2770                   fill_surface_state(&screen->isl_dev, map, res, &res->surf,
2771                                      &view, usage, 0, 0, 0);
2772 
2773                   map += SURFACE_STATE_ALIGNMENT;
2774                }
2775             }
2776 
2777             isl_surf_fill_image_param(&screen->isl_dev,
2778                                       &image_params[start_slot + i],
2779                                       &res->surf, &view);
2780          } else {
2781             util_range_add(&res->base.b, &res->valid_buffer_range, img->u.buf.offset,
2782                            img->u.buf.offset + img->u.buf.size);
2783 
2784             fill_buffer_surface_state(&screen->isl_dev, res, map,
2785                                       isl_fmt, ISL_SWIZZLE_IDENTITY,
2786                                       img->u.buf.offset, img->u.buf.size,
2787                                       ISL_SURF_USAGE_STORAGE_BIT);
2788             fill_buffer_image_param(&image_params[start_slot + i],
2789                                     img->format, img->u.buf.size);
2790          }
2791 
2792          upload_surface_states(ice->state.surface_uploader, &iv->surface_state);
2793       } else {
2794          pipe_resource_reference(&iv->base.resource, NULL);
2795          pipe_resource_reference(&iv->surface_state.ref.res, NULL);
2796          fill_default_image_param(&image_params[start_slot + i]);
2797       }
2798    }
2799 
2800    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << stage;
2801    ice->state.dirty |=
2802       stage == MESA_SHADER_COMPUTE ? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
2803                                    : IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
2804 
2805    /* Broadwell also needs brw_image_params re-uploaded */
2806    if (GFX_VER < 9) {
2807       ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS << stage;
2808       shs->sysvals_need_upload = true;
2809    }
2810 
2811    if (unbind_num_trailing_slots) {
2812       iris_set_shader_images(ctx, p_stage, start_slot + count,
2813                              unbind_num_trailing_slots, 0, NULL);
2814    }
2815 }
2816 
2817 
2818 /**
2819  * The pipe->set_sampler_views() driver hook.
2820  */
2821 static void
iris_set_sampler_views(struct pipe_context * ctx,enum pipe_shader_type p_stage,unsigned start,unsigned count,unsigned unbind_num_trailing_slots,bool take_ownership,struct pipe_sampler_view ** views)2822 iris_set_sampler_views(struct pipe_context *ctx,
2823                        enum pipe_shader_type p_stage,
2824                        unsigned start, unsigned count,
2825                        unsigned unbind_num_trailing_slots,
2826                        bool take_ownership,
2827                        struct pipe_sampler_view **views)
2828 {
2829    struct iris_context *ice = (struct iris_context *) ctx;
2830    gl_shader_stage stage = stage_from_pipe(p_stage);
2831    struct iris_shader_state *shs = &ice->state.shaders[stage];
2832    unsigned i;
2833 
2834    shs->bound_sampler_views &=
2835       ~u_bit_consecutive(start, count + unbind_num_trailing_slots);
2836 
2837    for (i = 0; i < count; i++) {
2838       struct pipe_sampler_view *pview = views ? views[i] : NULL;
2839 
2840       if (take_ownership) {
2841          pipe_sampler_view_reference((struct pipe_sampler_view **)
2842                                      &shs->textures[start + i], NULL);
2843          shs->textures[start + i] = (struct iris_sampler_view *)pview;
2844       } else {
2845          pipe_sampler_view_reference((struct pipe_sampler_view **)
2846                                      &shs->textures[start + i], pview);
2847       }
2848       struct iris_sampler_view *view = (void *) pview;
2849       if (view) {
2850          view->res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
2851          view->res->bind_stages |= 1 << stage;
2852 
2853          shs->bound_sampler_views |= 1 << (start + i);
2854 
2855          update_surface_state_addrs(ice->state.surface_uploader,
2856                                     &view->surface_state, view->res->bo);
2857       }
2858    }
2859    for (; i < count + unbind_num_trailing_slots; i++) {
2860       pipe_sampler_view_reference((struct pipe_sampler_view **)
2861                                   &shs->textures[start + i], NULL);
2862    }
2863 
2864    ice->state.stage_dirty |= (IRIS_STAGE_DIRTY_BINDINGS_VS << stage);
2865    ice->state.dirty |=
2866       stage == MESA_SHADER_COMPUTE ? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
2867                                    : IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
2868 }
2869 
2870 static void
iris_set_compute_resources(struct pipe_context * ctx,unsigned start,unsigned count,struct pipe_surface ** resources)2871 iris_set_compute_resources(struct pipe_context *ctx,
2872                            unsigned start, unsigned count,
2873                            struct pipe_surface **resources)
2874 {
2875    assert(count == 0);
2876 }
2877 
2878 static void
iris_set_global_binding(struct pipe_context * ctx,unsigned start_slot,unsigned count,struct pipe_resource ** resources,uint32_t ** handles)2879 iris_set_global_binding(struct pipe_context *ctx,
2880                         unsigned start_slot, unsigned count,
2881                         struct pipe_resource **resources,
2882                         uint32_t **handles)
2883 {
2884    struct iris_context *ice = (struct iris_context *) ctx;
2885 
2886    assert(start_slot + count <= IRIS_MAX_GLOBAL_BINDINGS);
2887    for (unsigned i = 0; i < count; i++) {
2888       if (resources && resources[i]) {
2889          pipe_resource_reference(&ice->state.global_bindings[start_slot + i],
2890                                  resources[i]);
2891          struct iris_resource *res = (void *) resources[i];
2892          uint64_t addr = res->bo->address;
2893          memcpy(handles[i], &addr, sizeof(addr));
2894       } else {
2895          pipe_resource_reference(&ice->state.global_bindings[start_slot + i],
2896                                  NULL);
2897       }
2898    }
2899 
2900    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_CS;
2901 }
2902 
2903 /**
2904  * The pipe->set_tess_state() driver hook.
2905  */
2906 static void
iris_set_tess_state(struct pipe_context * ctx,const float default_outer_level[4],const float default_inner_level[2])2907 iris_set_tess_state(struct pipe_context *ctx,
2908                     const float default_outer_level[4],
2909                     const float default_inner_level[2])
2910 {
2911    struct iris_context *ice = (struct iris_context *) ctx;
2912    struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
2913 
2914    memcpy(&ice->state.default_outer_level[0], &default_outer_level[0], 4 * sizeof(float));
2915    memcpy(&ice->state.default_inner_level[0], &default_inner_level[0], 2 * sizeof(float));
2916 
2917    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_TCS;
2918    shs->sysvals_need_upload = true;
2919 }
2920 
2921 static void
iris_set_patch_vertices(struct pipe_context * ctx,uint8_t patch_vertices)2922 iris_set_patch_vertices(struct pipe_context *ctx, uint8_t patch_vertices)
2923 {
2924    struct iris_context *ice = (struct iris_context *) ctx;
2925 
2926    ice->state.patch_vertices = patch_vertices;
2927 }
2928 
2929 static void
iris_surface_destroy(struct pipe_context * ctx,struct pipe_surface * p_surf)2930 iris_surface_destroy(struct pipe_context *ctx, struct pipe_surface *p_surf)
2931 {
2932    struct iris_surface *surf = (void *) p_surf;
2933    pipe_resource_reference(&p_surf->texture, NULL);
2934    pipe_resource_reference(&surf->surface_state.ref.res, NULL);
2935    pipe_resource_reference(&surf->surface_state_read.ref.res, NULL);
2936    free(surf->surface_state.cpu);
2937    free(surf);
2938 }
2939 
2940 static void
iris_set_clip_state(struct pipe_context * ctx,const struct pipe_clip_state * state)2941 iris_set_clip_state(struct pipe_context *ctx,
2942                     const struct pipe_clip_state *state)
2943 {
2944    struct iris_context *ice = (struct iris_context *) ctx;
2945    struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
2946    struct iris_shader_state *gshs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
2947    struct iris_shader_state *tshs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
2948 
2949    memcpy(&ice->state.clip_planes, state, sizeof(*state));
2950 
2951    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS |
2952                              IRIS_STAGE_DIRTY_CONSTANTS_GS |
2953                              IRIS_STAGE_DIRTY_CONSTANTS_TES;
2954    shs->sysvals_need_upload = true;
2955    gshs->sysvals_need_upload = true;
2956    tshs->sysvals_need_upload = true;
2957 }
2958 
2959 /**
2960  * The pipe->set_polygon_stipple() driver hook.
2961  */
2962 static void
iris_set_polygon_stipple(struct pipe_context * ctx,const struct pipe_poly_stipple * state)2963 iris_set_polygon_stipple(struct pipe_context *ctx,
2964                          const struct pipe_poly_stipple *state)
2965 {
2966    struct iris_context *ice = (struct iris_context *) ctx;
2967    memcpy(&ice->state.poly_stipple, state, sizeof(*state));
2968    ice->state.dirty |= IRIS_DIRTY_POLYGON_STIPPLE;
2969 }
2970 
2971 /**
2972  * The pipe->set_sample_mask() driver hook.
2973  */
2974 static void
iris_set_sample_mask(struct pipe_context * ctx,unsigned sample_mask)2975 iris_set_sample_mask(struct pipe_context *ctx, unsigned sample_mask)
2976 {
2977    struct iris_context *ice = (struct iris_context *) ctx;
2978 
2979    /* We only support 16x MSAA, so we have 16 bits of sample maks.
2980     * st/mesa may pass us 0xffffffff though, meaning "enable all samples".
2981     */
2982    ice->state.sample_mask = sample_mask & 0xffff;
2983    ice->state.dirty |= IRIS_DIRTY_SAMPLE_MASK;
2984 }
2985 
2986 /**
2987  * The pipe->set_scissor_states() driver hook.
2988  *
2989  * This corresponds to our SCISSOR_RECT state structures.  It's an
2990  * exact match, so we just store them, and memcpy them out later.
2991  */
2992 static void
iris_set_scissor_states(struct pipe_context * ctx,unsigned start_slot,unsigned num_scissors,const struct pipe_scissor_state * rects)2993 iris_set_scissor_states(struct pipe_context *ctx,
2994                         unsigned start_slot,
2995                         unsigned num_scissors,
2996                         const struct pipe_scissor_state *rects)
2997 {
2998    struct iris_context *ice = (struct iris_context *) ctx;
2999 
3000    for (unsigned i = 0; i < num_scissors; i++) {
3001       if (rects[i].minx == rects[i].maxx || rects[i].miny == rects[i].maxy) {
3002          /* If the scissor was out of bounds and got clamped to 0 width/height
3003           * at the bounds, the subtraction of 1 from maximums could produce a
3004           * negative number and thus not clip anything.  Instead, just provide
3005           * a min > max scissor inside the bounds, which produces the expected
3006           * no rendering.
3007           */
3008          ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
3009             .minx = 1, .maxx = 0, .miny = 1, .maxy = 0,
3010          };
3011       } else {
3012          ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
3013             .minx = rects[i].minx,     .miny = rects[i].miny,
3014             .maxx = rects[i].maxx - 1, .maxy = rects[i].maxy - 1,
3015          };
3016       }
3017    }
3018 
3019    ice->state.dirty |= IRIS_DIRTY_SCISSOR_RECT;
3020 }
3021 
3022 /**
3023  * The pipe->set_stencil_ref() driver hook.
3024  *
3025  * This is added to 3DSTATE_WM_DEPTH_STENCIL dynamically at draw time.
3026  */
3027 static void
iris_set_stencil_ref(struct pipe_context * ctx,const struct pipe_stencil_ref state)3028 iris_set_stencil_ref(struct pipe_context *ctx,
3029                      const struct pipe_stencil_ref state)
3030 {
3031    struct iris_context *ice = (struct iris_context *) ctx;
3032    memcpy(&ice->state.stencil_ref, &state, sizeof(state));
3033    if (GFX_VER >= 12)
3034       ice->state.dirty |= IRIS_DIRTY_STENCIL_REF;
3035    else if (GFX_VER >= 9)
3036       ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
3037    else
3038       ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
3039 }
3040 
3041 static float
viewport_extent(const struct pipe_viewport_state * state,int axis,float sign)3042 viewport_extent(const struct pipe_viewport_state *state, int axis, float sign)
3043 {
3044    return copysignf(state->scale[axis], sign) + state->translate[axis];
3045 }
3046 
3047 /**
3048  * The pipe->set_viewport_states() driver hook.
3049  *
3050  * This corresponds to our SF_CLIP_VIEWPORT states.  We can't calculate
3051  * the guardband yet, as we need the framebuffer dimensions, but we can
3052  * at least fill out the rest.
3053  */
3054 static void
iris_set_viewport_states(struct pipe_context * ctx,unsigned start_slot,unsigned count,const struct pipe_viewport_state * states)3055 iris_set_viewport_states(struct pipe_context *ctx,
3056                          unsigned start_slot,
3057                          unsigned count,
3058                          const struct pipe_viewport_state *states)
3059 {
3060    struct iris_context *ice = (struct iris_context *) ctx;
3061 
3062    memcpy(&ice->state.viewports[start_slot], states, sizeof(*states) * count);
3063 
3064    ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
3065 
3066    if (ice->state.cso_rast && (!ice->state.cso_rast->depth_clip_near ||
3067                                !ice->state.cso_rast->depth_clip_far))
3068       ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
3069 }
3070 
3071 /**
3072  * The pipe->set_framebuffer_state() driver hook.
3073  *
3074  * Sets the current draw FBO, including color render targets, depth,
3075  * and stencil buffers.
3076  */
3077 static void
iris_set_framebuffer_state(struct pipe_context * ctx,const struct pipe_framebuffer_state * state)3078 iris_set_framebuffer_state(struct pipe_context *ctx,
3079                            const struct pipe_framebuffer_state *state)
3080 {
3081    struct iris_context *ice = (struct iris_context *) ctx;
3082    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3083    struct isl_device *isl_dev = &screen->isl_dev;
3084    struct pipe_framebuffer_state *cso = &ice->state.framebuffer;
3085    struct iris_resource *zres;
3086    struct iris_resource *stencil_res;
3087 
3088    unsigned samples = util_framebuffer_get_num_samples(state);
3089    unsigned layers = util_framebuffer_get_num_layers(state);
3090 
3091    if (cso->samples != samples) {
3092       ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
3093 
3094       /* We need to toggle 3DSTATE_PS::32 Pixel Dispatch Enable */
3095       if (GFX_VER >= 9 && (cso->samples == 16 || samples == 16))
3096          ice->state.stage_dirty |= IRIS_STAGE_DIRTY_FS;
3097    }
3098 
3099    if (cso->nr_cbufs != state->nr_cbufs) {
3100       ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
3101    }
3102 
3103    if ((cso->layers == 0) != (layers == 0)) {
3104       ice->state.dirty |= IRIS_DIRTY_CLIP;
3105    }
3106 
3107    if (cso->width != state->width || cso->height != state->height) {
3108       ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
3109    }
3110 
3111    if (cso->zsbuf || state->zsbuf) {
3112       ice->state.dirty |= IRIS_DIRTY_DEPTH_BUFFER;
3113    }
3114 
3115    util_copy_framebuffer_state(cso, state);
3116    cso->samples = samples;
3117    cso->layers = layers;
3118 
3119    struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
3120 
3121    struct isl_view view = {
3122       .base_level = 0,
3123       .levels = 1,
3124       .base_array_layer = 0,
3125       .array_len = 1,
3126       .swizzle = ISL_SWIZZLE_IDENTITY,
3127    };
3128 
3129    struct isl_depth_stencil_hiz_emit_info info = { .view = &view };
3130 
3131    if (cso->zsbuf) {
3132       iris_get_depth_stencil_resources(cso->zsbuf->texture, &zres,
3133                                        &stencil_res);
3134 
3135       view.base_level = cso->zsbuf->u.tex.level;
3136       view.base_array_layer = cso->zsbuf->u.tex.first_layer;
3137       view.array_len =
3138          cso->zsbuf->u.tex.last_layer - cso->zsbuf->u.tex.first_layer + 1;
3139 
3140       if (zres) {
3141          view.usage |= ISL_SURF_USAGE_DEPTH_BIT;
3142 
3143          info.depth_surf = &zres->surf;
3144          info.depth_address = zres->bo->address + zres->offset;
3145          info.mocs = iris_mocs(zres->bo, isl_dev, view.usage);
3146 
3147          view.format = zres->surf.format;
3148 
3149          if (iris_resource_level_has_hiz(zres, view.base_level)) {
3150             info.hiz_usage = zres->aux.usage;
3151             info.hiz_surf = &zres->aux.surf;
3152             info.hiz_address = zres->aux.bo->address + zres->aux.offset;
3153          }
3154 
3155          ice->state.hiz_usage = info.hiz_usage;
3156       }
3157 
3158       if (stencil_res) {
3159          view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
3160          info.stencil_aux_usage = stencil_res->aux.usage;
3161          info.stencil_surf = &stencil_res->surf;
3162          info.stencil_address = stencil_res->bo->address + stencil_res->offset;
3163          if (!zres) {
3164             view.format = stencil_res->surf.format;
3165             info.mocs = iris_mocs(stencil_res->bo, isl_dev, view.usage);
3166          }
3167       }
3168    }
3169 
3170    isl_emit_depth_stencil_hiz_s(isl_dev, cso_z->packets, &info);
3171 
3172    /* Make a null surface for unbound buffers */
3173    void *null_surf_map =
3174       upload_state(ice->state.surface_uploader, &ice->state.null_fb,
3175                    4 * GENX(RENDER_SURFACE_STATE_length), 64);
3176    isl_null_fill_state(&screen->isl_dev, null_surf_map,
3177                        .size = isl_extent3d(MAX2(cso->width, 1),
3178                                             MAX2(cso->height, 1),
3179                                             cso->layers ? cso->layers : 1));
3180    ice->state.null_fb.offset +=
3181       iris_bo_offset_from_base_address(iris_resource_bo(ice->state.null_fb.res));
3182 
3183    /* Render target change */
3184    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_FS;
3185 
3186    ice->state.dirty |= IRIS_DIRTY_RENDER_BUFFER;
3187 
3188    ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
3189 
3190    ice->state.stage_dirty |=
3191       ice->state.stage_dirty_for_nos[IRIS_NOS_FRAMEBUFFER];
3192 
3193    if (GFX_VER == 8)
3194       ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
3195 }
3196 
3197 /**
3198  * The pipe->set_constant_buffer() driver hook.
3199  *
3200  * This uploads any constant data in user buffers, and references
3201  * any UBO resources containing constant data.
3202  */
3203 static void
iris_set_constant_buffer(struct pipe_context * ctx,enum pipe_shader_type p_stage,unsigned index,bool take_ownership,const struct pipe_constant_buffer * input)3204 iris_set_constant_buffer(struct pipe_context *ctx,
3205                          enum pipe_shader_type p_stage, unsigned index,
3206                          bool take_ownership,
3207                          const struct pipe_constant_buffer *input)
3208 {
3209    struct iris_context *ice = (struct iris_context *) ctx;
3210    gl_shader_stage stage = stage_from_pipe(p_stage);
3211    struct iris_shader_state *shs = &ice->state.shaders[stage];
3212    struct pipe_shader_buffer *cbuf = &shs->constbuf[index];
3213 
3214    /* TODO: Only do this if the buffer changes? */
3215    pipe_resource_reference(&shs->constbuf_surf_state[index].res, NULL);
3216 
3217    if (input && input->buffer_size && (input->buffer || input->user_buffer)) {
3218       shs->bound_cbufs |= 1u << index;
3219 
3220       if (input->user_buffer) {
3221          void *map = NULL;
3222          pipe_resource_reference(&cbuf->buffer, NULL);
3223          u_upload_alloc(ice->ctx.const_uploader, 0, input->buffer_size, 64,
3224                         &cbuf->buffer_offset, &cbuf->buffer, (void **) &map);
3225 
3226          if (!cbuf->buffer) {
3227             /* Allocation was unsuccessful - just unbind */
3228             iris_set_constant_buffer(ctx, p_stage, index, false, NULL);
3229             return;
3230          }
3231 
3232          assert(map);
3233          memcpy(map, input->user_buffer, input->buffer_size);
3234       } else if (input->buffer) {
3235          if (cbuf->buffer != input->buffer) {
3236             ice->state.dirty |= (IRIS_DIRTY_RENDER_MISC_BUFFER_FLUSHES |
3237                                  IRIS_DIRTY_COMPUTE_MISC_BUFFER_FLUSHES);
3238             shs->dirty_cbufs |= 1u << index;
3239          }
3240 
3241          if (take_ownership) {
3242             pipe_resource_reference(&cbuf->buffer, NULL);
3243             cbuf->buffer = input->buffer;
3244          } else {
3245             pipe_resource_reference(&cbuf->buffer, input->buffer);
3246          }
3247 
3248          cbuf->buffer_offset = input->buffer_offset;
3249       }
3250 
3251       cbuf->buffer_size =
3252          MIN2(input->buffer_size,
3253               iris_resource_bo(cbuf->buffer)->size - cbuf->buffer_offset);
3254 
3255       struct iris_resource *res = (void *) cbuf->buffer;
3256       res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
3257       res->bind_stages |= 1 << stage;
3258    } else {
3259       shs->bound_cbufs &= ~(1u << index);
3260       pipe_resource_reference(&cbuf->buffer, NULL);
3261    }
3262 
3263    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS << stage;
3264 }
3265 
3266 static void
upload_sysvals(struct iris_context * ice,gl_shader_stage stage,const struct pipe_grid_info * grid)3267 upload_sysvals(struct iris_context *ice,
3268                gl_shader_stage stage,
3269                const struct pipe_grid_info *grid)
3270 {
3271    UNUSED struct iris_genx_state *genx = ice->state.genx;
3272    struct iris_shader_state *shs = &ice->state.shaders[stage];
3273 
3274    struct iris_compiled_shader *shader = ice->shaders.prog[stage];
3275    if (!shader || (shader->num_system_values == 0 &&
3276                    shader->kernel_input_size == 0))
3277       return;
3278 
3279    assert(shader->num_cbufs > 0);
3280 
3281    unsigned sysval_cbuf_index = shader->num_cbufs - 1;
3282    struct pipe_shader_buffer *cbuf = &shs->constbuf[sysval_cbuf_index];
3283    unsigned system_values_start =
3284       ALIGN(shader->kernel_input_size, sizeof(uint32_t));
3285    unsigned upload_size = system_values_start +
3286                           shader->num_system_values * sizeof(uint32_t);
3287    void *map = NULL;
3288 
3289    assert(sysval_cbuf_index < PIPE_MAX_CONSTANT_BUFFERS);
3290    u_upload_alloc(ice->ctx.const_uploader, 0, upload_size, 64,
3291                   &cbuf->buffer_offset, &cbuf->buffer, &map);
3292 
3293    if (shader->kernel_input_size > 0)
3294       memcpy(map, grid->input, shader->kernel_input_size);
3295 
3296    uint32_t *sysval_map = map + system_values_start;
3297    for (int i = 0; i < shader->num_system_values; i++) {
3298       uint32_t sysval = shader->system_values[i];
3299       uint32_t value = 0;
3300 
3301       if (BRW_PARAM_DOMAIN(sysval) == BRW_PARAM_DOMAIN_IMAGE) {
3302 #if GFX_VER == 8
3303          unsigned img = BRW_PARAM_IMAGE_IDX(sysval);
3304          unsigned offset = BRW_PARAM_IMAGE_OFFSET(sysval);
3305          struct brw_image_param *param =
3306             &genx->shaders[stage].image_param[img];
3307 
3308          assert(offset < sizeof(struct brw_image_param));
3309          value = ((uint32_t *) param)[offset];
3310 #endif
3311       } else if (sysval == BRW_PARAM_BUILTIN_ZERO) {
3312          value = 0;
3313       } else if (BRW_PARAM_BUILTIN_IS_CLIP_PLANE(sysval)) {
3314          int plane = BRW_PARAM_BUILTIN_CLIP_PLANE_IDX(sysval);
3315          int comp  = BRW_PARAM_BUILTIN_CLIP_PLANE_COMP(sysval);
3316          value = fui(ice->state.clip_planes.ucp[plane][comp]);
3317       } else if (sysval == BRW_PARAM_BUILTIN_PATCH_VERTICES_IN) {
3318          if (stage == MESA_SHADER_TESS_CTRL) {
3319             value = ice->state.vertices_per_patch;
3320          } else {
3321             assert(stage == MESA_SHADER_TESS_EVAL);
3322             const struct shader_info *tcs_info =
3323                iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
3324             if (tcs_info)
3325                value = tcs_info->tess.tcs_vertices_out;
3326             else
3327                value = ice->state.vertices_per_patch;
3328          }
3329       } else if (sysval >= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X &&
3330                  sysval <= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_W) {
3331          unsigned i = sysval - BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X;
3332          value = fui(ice->state.default_outer_level[i]);
3333       } else if (sysval == BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X) {
3334          value = fui(ice->state.default_inner_level[0]);
3335       } else if (sysval == BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y) {
3336          value = fui(ice->state.default_inner_level[1]);
3337       } else if (sysval >= BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X &&
3338                  sysval <= BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_Z) {
3339          unsigned i = sysval - BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X;
3340          value = ice->state.last_block[i];
3341       } else if (sysval == BRW_PARAM_BUILTIN_WORK_DIM) {
3342          value = grid->work_dim;
3343       } else {
3344          assert(!"unhandled system value");
3345       }
3346 
3347       *sysval_map++ = value;
3348    }
3349 
3350    cbuf->buffer_size = upload_size;
3351    iris_upload_ubo_ssbo_surf_state(ice, cbuf,
3352                                    &shs->constbuf_surf_state[sysval_cbuf_index],
3353                                    ISL_SURF_USAGE_CONSTANT_BUFFER_BIT);
3354 
3355    shs->sysvals_need_upload = false;
3356 }
3357 
3358 /**
3359  * The pipe->set_shader_buffers() driver hook.
3360  *
3361  * This binds SSBOs and ABOs.  Unfortunately, we need to stream out
3362  * SURFACE_STATE here, as the buffer offset may change each time.
3363  */
3364 static void
iris_set_shader_buffers(struct pipe_context * ctx,enum pipe_shader_type p_stage,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers,unsigned writable_bitmask)3365 iris_set_shader_buffers(struct pipe_context *ctx,
3366                         enum pipe_shader_type p_stage,
3367                         unsigned start_slot, unsigned count,
3368                         const struct pipe_shader_buffer *buffers,
3369                         unsigned writable_bitmask)
3370 {
3371    struct iris_context *ice = (struct iris_context *) ctx;
3372    gl_shader_stage stage = stage_from_pipe(p_stage);
3373    struct iris_shader_state *shs = &ice->state.shaders[stage];
3374 
3375    unsigned modified_bits = u_bit_consecutive(start_slot, count);
3376 
3377    shs->bound_ssbos &= ~modified_bits;
3378    shs->writable_ssbos &= ~modified_bits;
3379    shs->writable_ssbos |= writable_bitmask << start_slot;
3380 
3381    for (unsigned i = 0; i < count; i++) {
3382       if (buffers && buffers[i].buffer) {
3383          struct iris_resource *res = (void *) buffers[i].buffer;
3384          struct pipe_shader_buffer *ssbo = &shs->ssbo[start_slot + i];
3385          struct iris_state_ref *surf_state =
3386             &shs->ssbo_surf_state[start_slot + i];
3387          pipe_resource_reference(&ssbo->buffer, &res->base.b);
3388          ssbo->buffer_offset = buffers[i].buffer_offset;
3389          ssbo->buffer_size =
3390             MIN2(buffers[i].buffer_size, res->bo->size - ssbo->buffer_offset);
3391 
3392          shs->bound_ssbos |= 1 << (start_slot + i);
3393 
3394          isl_surf_usage_flags_t usage = ISL_SURF_USAGE_STORAGE_BIT;
3395 
3396          iris_upload_ubo_ssbo_surf_state(ice, ssbo, surf_state, usage);
3397 
3398          res->bind_history |= PIPE_BIND_SHADER_BUFFER;
3399          res->bind_stages |= 1 << stage;
3400 
3401          util_range_add(&res->base.b, &res->valid_buffer_range, ssbo->buffer_offset,
3402                         ssbo->buffer_offset + ssbo->buffer_size);
3403       } else {
3404          pipe_resource_reference(&shs->ssbo[start_slot + i].buffer, NULL);
3405          pipe_resource_reference(&shs->ssbo_surf_state[start_slot + i].res,
3406                                  NULL);
3407       }
3408    }
3409 
3410    ice->state.dirty |= (IRIS_DIRTY_RENDER_MISC_BUFFER_FLUSHES |
3411                         IRIS_DIRTY_COMPUTE_MISC_BUFFER_FLUSHES);
3412    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << stage;
3413 }
3414 
3415 static void
iris_delete_state(struct pipe_context * ctx,void * state)3416 iris_delete_state(struct pipe_context *ctx, void *state)
3417 {
3418    free(state);
3419 }
3420 
3421 /**
3422  * The pipe->set_vertex_buffers() driver hook.
3423  *
3424  * This translates pipe_vertex_buffer to our 3DSTATE_VERTEX_BUFFERS packet.
3425  */
3426 static void
iris_set_vertex_buffers(struct pipe_context * ctx,unsigned start_slot,unsigned count,unsigned unbind_num_trailing_slots,bool take_ownership,const struct pipe_vertex_buffer * buffers)3427 iris_set_vertex_buffers(struct pipe_context *ctx,
3428                         unsigned start_slot, unsigned count,
3429                         unsigned unbind_num_trailing_slots,
3430                         bool take_ownership,
3431                         const struct pipe_vertex_buffer *buffers)
3432 {
3433    struct iris_context *ice = (struct iris_context *) ctx;
3434    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3435    struct iris_genx_state *genx = ice->state.genx;
3436 
3437    ice->state.bound_vertex_buffers &=
3438       ~u_bit_consecutive64(start_slot, count + unbind_num_trailing_slots);
3439 
3440    for (unsigned i = 0; i < count; i++) {
3441       const struct pipe_vertex_buffer *buffer = buffers ? &buffers[i] : NULL;
3442       struct iris_vertex_buffer_state *state =
3443          &genx->vertex_buffers[start_slot + i];
3444 
3445       if (!buffer) {
3446          pipe_resource_reference(&state->resource, NULL);
3447          continue;
3448       }
3449 
3450       /* We may see user buffers that are NULL bindings. */
3451       assert(!(buffer->is_user_buffer && buffer->buffer.user != NULL));
3452 
3453       if (buffer->buffer.resource &&
3454           state->resource != buffer->buffer.resource)
3455          ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFER_FLUSHES;
3456 
3457       if (take_ownership) {
3458          pipe_resource_reference(&state->resource, NULL);
3459          state->resource = buffer->buffer.resource;
3460       } else {
3461          pipe_resource_reference(&state->resource, buffer->buffer.resource);
3462       }
3463       struct iris_resource *res = (void *) state->resource;
3464 
3465       state->offset = (int) buffer->buffer_offset;
3466 
3467       if (res) {
3468          ice->state.bound_vertex_buffers |= 1ull << (start_slot + i);
3469          res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
3470       }
3471 
3472       iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
3473          vb.VertexBufferIndex = start_slot + i;
3474          vb.AddressModifyEnable = true;
3475          vb.BufferPitch = buffer->stride;
3476          if (res) {
3477             vb.BufferSize = res->base.b.width0 - (int) buffer->buffer_offset;
3478             vb.BufferStartingAddress =
3479                ro_bo(NULL, res->bo->address + (int) buffer->buffer_offset);
3480             vb.MOCS = iris_mocs(res->bo, &screen->isl_dev,
3481                                 ISL_SURF_USAGE_VERTEX_BUFFER_BIT);
3482 #if GFX_VER >= 12
3483             vb.L3BypassDisable       = true;
3484 #endif
3485          } else {
3486             vb.NullVertexBuffer = true;
3487          }
3488       }
3489    }
3490 
3491    for (unsigned i = 0; i < unbind_num_trailing_slots; i++) {
3492       struct iris_vertex_buffer_state *state =
3493          &genx->vertex_buffers[start_slot + count + i];
3494 
3495       pipe_resource_reference(&state->resource, NULL);
3496    }
3497 
3498    ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS;
3499 }
3500 
3501 /**
3502  * Gallium CSO for vertex elements.
3503  */
3504 struct iris_vertex_element_state {
3505    uint32_t vertex_elements[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)];
3506    uint32_t vf_instancing[33 * GENX(3DSTATE_VF_INSTANCING_length)];
3507    uint32_t edgeflag_ve[GENX(VERTEX_ELEMENT_STATE_length)];
3508    uint32_t edgeflag_vfi[GENX(3DSTATE_VF_INSTANCING_length)];
3509    unsigned count;
3510 };
3511 
3512 /**
3513  * The pipe->create_vertex_elements() driver hook.
3514  *
3515  * This translates pipe_vertex_element to our 3DSTATE_VERTEX_ELEMENTS
3516  * and 3DSTATE_VF_INSTANCING commands. The vertex_elements and vf_instancing
3517  * arrays are ready to be emitted at draw time if no EdgeFlag or SGVs are
3518  * needed. In these cases we will need information available at draw time.
3519  * We setup edgeflag_ve and edgeflag_vfi as alternatives last
3520  * 3DSTATE_VERTEX_ELEMENT and 3DSTATE_VF_INSTANCING that can be used at
3521  * draw time if we detect that EdgeFlag is needed by the Vertex Shader.
3522  */
3523 static void *
iris_create_vertex_elements(struct pipe_context * ctx,unsigned count,const struct pipe_vertex_element * state)3524 iris_create_vertex_elements(struct pipe_context *ctx,
3525                             unsigned count,
3526                             const struct pipe_vertex_element *state)
3527 {
3528    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3529    const struct intel_device_info *devinfo = &screen->devinfo;
3530    struct iris_vertex_element_state *cso =
3531       malloc(sizeof(struct iris_vertex_element_state));
3532 
3533    cso->count = count;
3534 
3535    iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS), cso->vertex_elements, ve) {
3536       ve.DWordLength =
3537          1 + GENX(VERTEX_ELEMENT_STATE_length) * MAX2(count, 1) - 2;
3538    }
3539 
3540    uint32_t *ve_pack_dest = &cso->vertex_elements[1];
3541    uint32_t *vfi_pack_dest = cso->vf_instancing;
3542 
3543    if (count == 0) {
3544       iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
3545          ve.Valid = true;
3546          ve.SourceElementFormat = ISL_FORMAT_R32G32B32A32_FLOAT;
3547          ve.Component0Control = VFCOMP_STORE_0;
3548          ve.Component1Control = VFCOMP_STORE_0;
3549          ve.Component2Control = VFCOMP_STORE_0;
3550          ve.Component3Control = VFCOMP_STORE_1_FP;
3551       }
3552 
3553       iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
3554       }
3555    }
3556 
3557    for (int i = 0; i < count; i++) {
3558       const struct iris_format_info fmt =
3559          iris_format_for_usage(devinfo, state[i].src_format, 0);
3560       unsigned comp[4] = { VFCOMP_STORE_SRC, VFCOMP_STORE_SRC,
3561                            VFCOMP_STORE_SRC, VFCOMP_STORE_SRC };
3562 
3563       switch (isl_format_get_num_channels(fmt.fmt)) {
3564       case 0: comp[0] = VFCOMP_STORE_0; FALLTHROUGH;
3565       case 1: comp[1] = VFCOMP_STORE_0; FALLTHROUGH;
3566       case 2: comp[2] = VFCOMP_STORE_0; FALLTHROUGH;
3567       case 3:
3568          comp[3] = isl_format_has_int_channel(fmt.fmt) ? VFCOMP_STORE_1_INT
3569                                                        : VFCOMP_STORE_1_FP;
3570          break;
3571       }
3572       iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
3573          ve.EdgeFlagEnable = false;
3574          ve.VertexBufferIndex = state[i].vertex_buffer_index;
3575          ve.Valid = true;
3576          ve.SourceElementOffset = state[i].src_offset;
3577          ve.SourceElementFormat = fmt.fmt;
3578          ve.Component0Control = comp[0];
3579          ve.Component1Control = comp[1];
3580          ve.Component2Control = comp[2];
3581          ve.Component3Control = comp[3];
3582       }
3583 
3584       iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
3585          vi.VertexElementIndex = i;
3586          vi.InstancingEnable = state[i].instance_divisor > 0;
3587          vi.InstanceDataStepRate = state[i].instance_divisor;
3588       }
3589 
3590       ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
3591       vfi_pack_dest += GENX(3DSTATE_VF_INSTANCING_length);
3592    }
3593 
3594    /* An alternative version of the last VE and VFI is stored so it
3595     * can be used at draw time in case Vertex Shader uses EdgeFlag
3596     */
3597    if (count) {
3598       const unsigned edgeflag_index = count - 1;
3599       const struct iris_format_info fmt =
3600          iris_format_for_usage(devinfo, state[edgeflag_index].src_format, 0);
3601       iris_pack_state(GENX(VERTEX_ELEMENT_STATE), cso->edgeflag_ve, ve) {
3602          ve.EdgeFlagEnable = true ;
3603          ve.VertexBufferIndex = state[edgeflag_index].vertex_buffer_index;
3604          ve.Valid = true;
3605          ve.SourceElementOffset = state[edgeflag_index].src_offset;
3606          ve.SourceElementFormat = fmt.fmt;
3607          ve.Component0Control = VFCOMP_STORE_SRC;
3608          ve.Component1Control = VFCOMP_STORE_0;
3609          ve.Component2Control = VFCOMP_STORE_0;
3610          ve.Component3Control = VFCOMP_STORE_0;
3611       }
3612       iris_pack_command(GENX(3DSTATE_VF_INSTANCING), cso->edgeflag_vfi, vi) {
3613          /* The vi.VertexElementIndex of the EdgeFlag Vertex Element is filled
3614           * at draw time, as it should change if SGVs are emitted.
3615           */
3616          vi.InstancingEnable = state[edgeflag_index].instance_divisor > 0;
3617          vi.InstanceDataStepRate = state[edgeflag_index].instance_divisor;
3618       }
3619    }
3620 
3621    return cso;
3622 }
3623 
3624 /**
3625  * The pipe->bind_vertex_elements_state() driver hook.
3626  */
3627 static void
iris_bind_vertex_elements_state(struct pipe_context * ctx,void * state)3628 iris_bind_vertex_elements_state(struct pipe_context *ctx, void *state)
3629 {
3630    struct iris_context *ice = (struct iris_context *) ctx;
3631    struct iris_vertex_element_state *old_cso = ice->state.cso_vertex_elements;
3632    struct iris_vertex_element_state *new_cso = state;
3633 
3634    /* 3DSTATE_VF_SGVs overrides the last VE, so if the count is changing,
3635     * we need to re-emit it to ensure we're overriding the right one.
3636     */
3637    if (new_cso && cso_changed(count))
3638       ice->state.dirty |= IRIS_DIRTY_VF_SGVS;
3639 
3640    ice->state.cso_vertex_elements = state;
3641    ice->state.dirty |= IRIS_DIRTY_VERTEX_ELEMENTS;
3642 }
3643 
3644 /**
3645  * The pipe->create_stream_output_target() driver hook.
3646  *
3647  * "Target" here refers to a destination buffer.  We translate this into
3648  * a 3DSTATE_SO_BUFFER packet.  We can handle most fields, but don't yet
3649  * know which buffer this represents, or whether we ought to zero the
3650  * write-offsets, or append.  Those are handled in the set() hook.
3651  */
3652 static struct pipe_stream_output_target *
iris_create_stream_output_target(struct pipe_context * ctx,struct pipe_resource * p_res,unsigned buffer_offset,unsigned buffer_size)3653 iris_create_stream_output_target(struct pipe_context *ctx,
3654                                  struct pipe_resource *p_res,
3655                                  unsigned buffer_offset,
3656                                  unsigned buffer_size)
3657 {
3658    struct iris_resource *res = (void *) p_res;
3659    struct iris_stream_output_target *cso = calloc(1, sizeof(*cso));
3660    if (!cso)
3661       return NULL;
3662 
3663    res->bind_history |= PIPE_BIND_STREAM_OUTPUT;
3664 
3665    pipe_reference_init(&cso->base.reference, 1);
3666    pipe_resource_reference(&cso->base.buffer, p_res);
3667    cso->base.buffer_offset = buffer_offset;
3668    cso->base.buffer_size = buffer_size;
3669    cso->base.context = ctx;
3670 
3671    util_range_add(&res->base.b, &res->valid_buffer_range, buffer_offset,
3672                   buffer_offset + buffer_size);
3673 
3674    return &cso->base;
3675 }
3676 
3677 static void
iris_stream_output_target_destroy(struct pipe_context * ctx,struct pipe_stream_output_target * state)3678 iris_stream_output_target_destroy(struct pipe_context *ctx,
3679                                   struct pipe_stream_output_target *state)
3680 {
3681    struct iris_stream_output_target *cso = (void *) state;
3682 
3683    pipe_resource_reference(&cso->base.buffer, NULL);
3684    pipe_resource_reference(&cso->offset.res, NULL);
3685 
3686    free(cso);
3687 }
3688 
3689 /**
3690  * The pipe->set_stream_output_targets() driver hook.
3691  *
3692  * At this point, we know which targets are bound to a particular index,
3693  * and also whether we want to append or start over.  We can finish the
3694  * 3DSTATE_SO_BUFFER packets we started earlier.
3695  */
3696 static void
iris_set_stream_output_targets(struct pipe_context * ctx,unsigned num_targets,struct pipe_stream_output_target ** targets,const unsigned * offsets)3697 iris_set_stream_output_targets(struct pipe_context *ctx,
3698                                unsigned num_targets,
3699                                struct pipe_stream_output_target **targets,
3700                                const unsigned *offsets)
3701 {
3702    struct iris_context *ice = (struct iris_context *) ctx;
3703    struct iris_genx_state *genx = ice->state.genx;
3704    uint32_t *so_buffers = genx->so_buffers;
3705    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3706 
3707    const bool active = num_targets > 0;
3708    if (ice->state.streamout_active != active) {
3709       ice->state.streamout_active = active;
3710       ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
3711 
3712       /* We only emit 3DSTATE_SO_DECL_LIST when streamout is active, because
3713        * it's a non-pipelined command.  If we're switching streamout on, we
3714        * may have missed emitting it earlier, so do so now.  (We're already
3715        * taking a stall to update 3DSTATE_SO_BUFFERS anyway...)
3716        */
3717       if (active) {
3718          ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST;
3719       } else {
3720          uint32_t flush = 0;
3721          for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
3722             struct iris_stream_output_target *tgt =
3723                (void *) ice->state.so_target[i];
3724             if (tgt) {
3725                struct iris_resource *res = (void *) tgt->base.buffer;
3726 
3727                flush |= iris_flush_bits_for_history(ice, res);
3728                iris_dirty_for_history(ice, res);
3729             }
3730          }
3731 #if GFX_VER >= 12
3732          /* SO draws require flushing of const cache to make SO data
3733           * observable when VB/IB are cached in L3.
3734           */
3735          if (flush & PIPE_CONTROL_VF_CACHE_INVALIDATE)
3736             flush |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
3737 #endif
3738          iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER],
3739                                       "make streamout results visible", flush);
3740       }
3741    }
3742 
3743    for (int i = 0; i < 4; i++) {
3744       pipe_so_target_reference(&ice->state.so_target[i],
3745                                i < num_targets ? targets[i] : NULL);
3746    }
3747 
3748    /* No need to update 3DSTATE_SO_BUFFER unless SOL is active. */
3749    if (!active)
3750       return;
3751 
3752    for (unsigned i = 0; i < 4; i++,
3753         so_buffers += GENX(3DSTATE_SO_BUFFER_length)) {
3754 
3755       struct iris_stream_output_target *tgt = (void *) ice->state.so_target[i];
3756       unsigned offset = offsets[i];
3757 
3758       if (!tgt) {
3759          iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
3760 #if GFX_VER < 12
3761             sob.SOBufferIndex = i;
3762 #else
3763             sob._3DCommandOpcode = 0;
3764             sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i;
3765 #endif
3766          }
3767          continue;
3768       }
3769 
3770       if (!tgt->offset.res)
3771          upload_state(ctx->const_uploader, &tgt->offset, sizeof(uint32_t), 4);
3772 
3773       struct iris_resource *res = (void *) tgt->base.buffer;
3774 
3775       /* Note that offsets[i] will either be 0, causing us to zero
3776        * the value in the buffer, or 0xFFFFFFFF, which happens to mean
3777        * "continue appending at the existing offset."
3778        */
3779       assert(offset == 0 || offset == 0xFFFFFFFF);
3780 
3781       /* When we're first called with an offset of 0, we want the next
3782        * 3DSTATE_SO_BUFFER packets to reset the offset to the beginning.
3783        * Any further times we emit those packets, we want to use 0xFFFFFFFF
3784        * to continue appending from the current offset.
3785        *
3786        * Note that we might be called by Begin (offset = 0), Pause, then
3787        * Resume (offset = 0xFFFFFFFF) before ever drawing (where these
3788        * commands will actually be sent to the GPU).  In this case, we
3789        * don't want to append - we still want to do our initial zeroing.
3790        */
3791       if (offset == 0)
3792          tgt->zero_offset = true;
3793 
3794       iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
3795 #if GFX_VER < 12
3796          sob.SOBufferIndex = i;
3797 #else
3798          sob._3DCommandOpcode = 0;
3799          sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i;
3800 #endif
3801          sob.SurfaceBaseAddress =
3802             rw_bo(NULL, res->bo->address + tgt->base.buffer_offset,
3803                   IRIS_DOMAIN_OTHER_WRITE);
3804          sob.SOBufferEnable = true;
3805          sob.StreamOffsetWriteEnable = true;
3806          sob.StreamOutputBufferOffsetAddressEnable = true;
3807          sob.MOCS = iris_mocs(res->bo, &screen->isl_dev, 0);
3808 
3809          sob.SurfaceSize = MAX2(tgt->base.buffer_size / 4, 1) - 1;
3810          sob.StreamOutputBufferOffsetAddress =
3811             rw_bo(NULL, iris_resource_bo(tgt->offset.res)->address +
3812                         tgt->offset.offset, IRIS_DOMAIN_OTHER_WRITE);
3813          sob.StreamOffset = 0xFFFFFFFF; /* not offset, see above */
3814       }
3815    }
3816 
3817    ice->state.dirty |= IRIS_DIRTY_SO_BUFFERS;
3818 }
3819 
3820 /**
3821  * An iris-vtable helper for encoding the 3DSTATE_SO_DECL_LIST and
3822  * 3DSTATE_STREAMOUT packets.
3823  *
3824  * 3DSTATE_SO_DECL_LIST is a list of shader outputs we want the streamout
3825  * hardware to record.  We can create it entirely based on the shader, with
3826  * no dynamic state dependencies.
3827  *
3828  * 3DSTATE_STREAMOUT is an annoying mix of shader-based information and
3829  * state-based settings.  We capture the shader-related ones here, and merge
3830  * the rest in at draw time.
3831  */
3832 static uint32_t *
iris_create_so_decl_list(const struct pipe_stream_output_info * info,const struct brw_vue_map * vue_map)3833 iris_create_so_decl_list(const struct pipe_stream_output_info *info,
3834                          const struct brw_vue_map *vue_map)
3835 {
3836    struct GENX(SO_DECL) so_decl[MAX_VERTEX_STREAMS][128];
3837    int buffer_mask[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
3838    int next_offset[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
3839    int decls[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
3840    int max_decls = 0;
3841    STATIC_ASSERT(ARRAY_SIZE(so_decl[0]) >= MAX_PROGRAM_OUTPUTS);
3842 
3843    memset(so_decl, 0, sizeof(so_decl));
3844 
3845    /* Construct the list of SO_DECLs to be emitted.  The formatting of the
3846     * command feels strange -- each dword pair contains a SO_DECL per stream.
3847     */
3848    for (unsigned i = 0; i < info->num_outputs; i++) {
3849       const struct pipe_stream_output *output = &info->output[i];
3850       const int buffer = output->output_buffer;
3851       const int varying = output->register_index;
3852       const unsigned stream_id = output->stream;
3853       assert(stream_id < MAX_VERTEX_STREAMS);
3854 
3855       buffer_mask[stream_id] |= 1 << buffer;
3856 
3857       assert(vue_map->varying_to_slot[varying] >= 0);
3858 
3859       /* Mesa doesn't store entries for gl_SkipComponents in the Outputs[]
3860        * array.  Instead, it simply increments DstOffset for the following
3861        * input by the number of components that should be skipped.
3862        *
3863        * Our hardware is unusual in that it requires us to program SO_DECLs
3864        * for fake "hole" components, rather than simply taking the offset
3865        * for each real varying.  Each hole can have size 1, 2, 3, or 4; we
3866        * program as many size = 4 holes as we can, then a final hole to
3867        * accommodate the final 1, 2, or 3 remaining.
3868        */
3869       int skip_components = output->dst_offset - next_offset[buffer];
3870 
3871       while (skip_components > 0) {
3872          so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
3873             .HoleFlag = 1,
3874             .OutputBufferSlot = output->output_buffer,
3875             .ComponentMask = (1 << MIN2(skip_components, 4)) - 1,
3876          };
3877          skip_components -= 4;
3878       }
3879 
3880       next_offset[buffer] = output->dst_offset + output->num_components;
3881 
3882       so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
3883          .OutputBufferSlot = output->output_buffer,
3884          .RegisterIndex = vue_map->varying_to_slot[varying],
3885          .ComponentMask =
3886             ((1 << output->num_components) - 1) << output->start_component,
3887       };
3888 
3889       if (decls[stream_id] > max_decls)
3890          max_decls = decls[stream_id];
3891    }
3892 
3893    unsigned dwords = GENX(3DSTATE_STREAMOUT_length) + (3 + 2 * max_decls);
3894    uint32_t *map = ralloc_size(NULL, sizeof(uint32_t) * dwords);
3895    uint32_t *so_decl_map = map + GENX(3DSTATE_STREAMOUT_length);
3896 
3897    iris_pack_command(GENX(3DSTATE_STREAMOUT), map, sol) {
3898       int urb_entry_read_offset = 0;
3899       int urb_entry_read_length = (vue_map->num_slots + 1) / 2 -
3900          urb_entry_read_offset;
3901 
3902       /* We always read the whole vertex.  This could be reduced at some
3903        * point by reading less and offsetting the register index in the
3904        * SO_DECLs.
3905        */
3906       sol.Stream0VertexReadOffset = urb_entry_read_offset;
3907       sol.Stream0VertexReadLength = urb_entry_read_length - 1;
3908       sol.Stream1VertexReadOffset = urb_entry_read_offset;
3909       sol.Stream1VertexReadLength = urb_entry_read_length - 1;
3910       sol.Stream2VertexReadOffset = urb_entry_read_offset;
3911       sol.Stream2VertexReadLength = urb_entry_read_length - 1;
3912       sol.Stream3VertexReadOffset = urb_entry_read_offset;
3913       sol.Stream3VertexReadLength = urb_entry_read_length - 1;
3914 
3915       /* Set buffer pitches; 0 means unbound. */
3916       sol.Buffer0SurfacePitch = 4 * info->stride[0];
3917       sol.Buffer1SurfacePitch = 4 * info->stride[1];
3918       sol.Buffer2SurfacePitch = 4 * info->stride[2];
3919       sol.Buffer3SurfacePitch = 4 * info->stride[3];
3920    }
3921 
3922    iris_pack_command(GENX(3DSTATE_SO_DECL_LIST), so_decl_map, list) {
3923       list.DWordLength = 3 + 2 * max_decls - 2;
3924       list.StreamtoBufferSelects0 = buffer_mask[0];
3925       list.StreamtoBufferSelects1 = buffer_mask[1];
3926       list.StreamtoBufferSelects2 = buffer_mask[2];
3927       list.StreamtoBufferSelects3 = buffer_mask[3];
3928       list.NumEntries0 = decls[0];
3929       list.NumEntries1 = decls[1];
3930       list.NumEntries2 = decls[2];
3931       list.NumEntries3 = decls[3];
3932    }
3933 
3934    for (int i = 0; i < max_decls; i++) {
3935       iris_pack_state(GENX(SO_DECL_ENTRY), so_decl_map + 3 + i * 2, entry) {
3936          entry.Stream0Decl = so_decl[0][i];
3937          entry.Stream1Decl = so_decl[1][i];
3938          entry.Stream2Decl = so_decl[2][i];
3939          entry.Stream3Decl = so_decl[3][i];
3940       }
3941    }
3942 
3943    return map;
3944 }
3945 
3946 static void
iris_compute_sbe_urb_read_interval(uint64_t fs_input_slots,const struct brw_vue_map * last_vue_map,bool two_sided_color,unsigned * out_offset,unsigned * out_length)3947 iris_compute_sbe_urb_read_interval(uint64_t fs_input_slots,
3948                                    const struct brw_vue_map *last_vue_map,
3949                                    bool two_sided_color,
3950                                    unsigned *out_offset,
3951                                    unsigned *out_length)
3952 {
3953    /* The compiler computes the first URB slot without considering COL/BFC
3954     * swizzling (because it doesn't know whether it's enabled), so we need
3955     * to do that here too.  This may result in a smaller offset, which
3956     * should be safe.
3957     */
3958    const unsigned first_slot =
3959       brw_compute_first_urb_slot_required(fs_input_slots, last_vue_map);
3960 
3961    /* This becomes the URB read offset (counted in pairs of slots). */
3962    assert(first_slot % 2 == 0);
3963    *out_offset = first_slot / 2;
3964 
3965    /* We need to adjust the inputs read to account for front/back color
3966     * swizzling, as it can make the URB length longer.
3967     */
3968    for (int c = 0; c <= 1; c++) {
3969       if (fs_input_slots & (VARYING_BIT_COL0 << c)) {
3970          /* If two sided color is enabled, the fragment shader's gl_Color
3971           * (COL0) input comes from either the gl_FrontColor (COL0) or
3972           * gl_BackColor (BFC0) input varyings.  Mark BFC as used, too.
3973           */
3974          if (two_sided_color)
3975             fs_input_slots |= (VARYING_BIT_BFC0 << c);
3976 
3977          /* If front color isn't written, we opt to give them back color
3978           * instead of an undefined value.  Switch from COL to BFC.
3979           */
3980          if (last_vue_map->varying_to_slot[VARYING_SLOT_COL0 + c] == -1) {
3981             fs_input_slots &= ~(VARYING_BIT_COL0 << c);
3982             fs_input_slots |= (VARYING_BIT_BFC0 << c);
3983          }
3984       }
3985    }
3986 
3987    /* Compute the minimum URB Read Length necessary for the FS inputs.
3988     *
3989     * From the Sandy Bridge PRM, Volume 2, Part 1, documentation for
3990     * 3DSTATE_SF DWord 1 bits 15:11, "Vertex URB Entry Read Length":
3991     *
3992     * "This field should be set to the minimum length required to read the
3993     *  maximum source attribute.  The maximum source attribute is indicated
3994     *  by the maximum value of the enabled Attribute # Source Attribute if
3995     *  Attribute Swizzle Enable is set, Number of Output Attributes-1 if
3996     *  enable is not set.
3997     *  read_length = ceiling((max_source_attr + 1) / 2)
3998     *
3999     *  [errata] Corruption/Hang possible if length programmed larger than
4000     *  recommended"
4001     *
4002     * Similar text exists for Ivy Bridge.
4003     *
4004     * We find the last URB slot that's actually read by the FS.
4005     */
4006    unsigned last_read_slot = last_vue_map->num_slots - 1;
4007    while (last_read_slot > first_slot && !(fs_input_slots &
4008           (1ull << last_vue_map->slot_to_varying[last_read_slot])))
4009       --last_read_slot;
4010 
4011    /* The URB read length is the difference of the two, counted in pairs. */
4012    *out_length = DIV_ROUND_UP(last_read_slot - first_slot + 1, 2);
4013 }
4014 
4015 static void
iris_emit_sbe_swiz(struct iris_batch * batch,const struct iris_context * ice,const struct brw_vue_map * vue_map,unsigned urb_read_offset,unsigned sprite_coord_enables)4016 iris_emit_sbe_swiz(struct iris_batch *batch,
4017                    const struct iris_context *ice,
4018                    const struct brw_vue_map *vue_map,
4019                    unsigned urb_read_offset,
4020                    unsigned sprite_coord_enables)
4021 {
4022    struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL) attr_overrides[16] = {};
4023    const struct brw_wm_prog_data *wm_prog_data = (void *)
4024       ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
4025    const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4026 
4027    /* XXX: this should be generated when putting programs in place */
4028 
4029    for (uint8_t idx = 0; idx < wm_prog_data->urb_setup_attribs_count; idx++) {
4030       const uint8_t fs_attr = wm_prog_data->urb_setup_attribs[idx];
4031       const int input_index = wm_prog_data->urb_setup[fs_attr];
4032       if (input_index < 0 || input_index >= 16)
4033          continue;
4034 
4035       struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL) *attr =
4036          &attr_overrides[input_index];
4037       int slot = vue_map->varying_to_slot[fs_attr];
4038 
4039       /* Viewport and Layer are stored in the VUE header.  We need to override
4040        * them to zero if earlier stages didn't write them, as GL requires that
4041        * they read back as zero when not explicitly set.
4042        */
4043       switch (fs_attr) {
4044       case VARYING_SLOT_VIEWPORT:
4045       case VARYING_SLOT_LAYER:
4046          attr->ComponentOverrideX = true;
4047          attr->ComponentOverrideW = true;
4048          attr->ConstantSource = CONST_0000;
4049 
4050          if (!(vue_map->slots_valid & VARYING_BIT_LAYER))
4051             attr->ComponentOverrideY = true;
4052          if (!(vue_map->slots_valid & VARYING_BIT_VIEWPORT))
4053             attr->ComponentOverrideZ = true;
4054          continue;
4055 
4056       case VARYING_SLOT_PRIMITIVE_ID:
4057          /* Override if the previous shader stage didn't write gl_PrimitiveID. */
4058          if (slot == -1) {
4059             attr->ComponentOverrideX = true;
4060             attr->ComponentOverrideY = true;
4061             attr->ComponentOverrideZ = true;
4062             attr->ComponentOverrideW = true;
4063             attr->ConstantSource = PRIM_ID;
4064             continue;
4065          }
4066          break;
4067 
4068       default:
4069          break;
4070       }
4071 
4072       if (sprite_coord_enables & (1 << input_index))
4073          continue;
4074 
4075       /* If there was only a back color written but not front, use back
4076        * as the color instead of undefined.
4077        */
4078       if (slot == -1 && fs_attr == VARYING_SLOT_COL0)
4079          slot = vue_map->varying_to_slot[VARYING_SLOT_BFC0];
4080       if (slot == -1 && fs_attr == VARYING_SLOT_COL1)
4081          slot = vue_map->varying_to_slot[VARYING_SLOT_BFC1];
4082 
4083       /* Not written by the previous stage - undefined. */
4084       if (slot == -1) {
4085          attr->ComponentOverrideX = true;
4086          attr->ComponentOverrideY = true;
4087          attr->ComponentOverrideZ = true;
4088          attr->ComponentOverrideW = true;
4089          attr->ConstantSource = CONST_0001_FLOAT;
4090          continue;
4091       }
4092 
4093       /* Compute the location of the attribute relative to the read offset,
4094        * which is counted in 256-bit increments (two 128-bit VUE slots).
4095        */
4096       const int source_attr = slot - 2 * urb_read_offset;
4097       assert(source_attr >= 0 && source_attr <= 32);
4098       attr->SourceAttribute = source_attr;
4099 
4100       /* If we are doing two-sided color, and the VUE slot following this one
4101        * represents a back-facing color, then we need to instruct the SF unit
4102        * to do back-facing swizzling.
4103        */
4104       if (cso_rast->light_twoside &&
4105           ((vue_map->slot_to_varying[slot] == VARYING_SLOT_COL0 &&
4106             vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC0) ||
4107            (vue_map->slot_to_varying[slot] == VARYING_SLOT_COL1 &&
4108             vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC1)))
4109          attr->SwizzleSelect = INPUTATTR_FACING;
4110    }
4111 
4112    iris_emit_cmd(batch, GENX(3DSTATE_SBE_SWIZ), sbes) {
4113       for (int i = 0; i < 16; i++)
4114          sbes.Attribute[i] = attr_overrides[i];
4115    }
4116 }
4117 
4118 static bool
iris_is_drawing_points(const struct iris_context * ice)4119 iris_is_drawing_points(const struct iris_context *ice)
4120 {
4121    const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4122 
4123    if (cso_rast->fill_mode_point) {
4124       return true;
4125    }
4126 
4127    if (ice->shaders.prog[MESA_SHADER_GEOMETRY]) {
4128       const struct brw_gs_prog_data *gs_prog_data =
4129          (void *) ice->shaders.prog[MESA_SHADER_GEOMETRY]->prog_data;
4130       return gs_prog_data->output_topology == _3DPRIM_POINTLIST;
4131    } else if (ice->shaders.prog[MESA_SHADER_TESS_EVAL]) {
4132       const struct brw_tes_prog_data *tes_data =
4133          (void *) ice->shaders.prog[MESA_SHADER_TESS_EVAL]->prog_data;
4134       return tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_POINT;
4135    } else {
4136       return ice->state.prim_mode == PIPE_PRIM_POINTS;
4137    }
4138 }
4139 
4140 static unsigned
iris_calculate_point_sprite_overrides(const struct brw_wm_prog_data * prog_data,const struct iris_rasterizer_state * cso)4141 iris_calculate_point_sprite_overrides(const struct brw_wm_prog_data *prog_data,
4142                                       const struct iris_rasterizer_state *cso)
4143 {
4144    unsigned overrides = 0;
4145 
4146    if (prog_data->urb_setup[VARYING_SLOT_PNTC] != -1)
4147       overrides |= 1 << prog_data->urb_setup[VARYING_SLOT_PNTC];
4148 
4149    for (int i = 0; i < 8; i++) {
4150       if ((cso->sprite_coord_enable & (1 << i)) &&
4151           prog_data->urb_setup[VARYING_SLOT_TEX0 + i] != -1)
4152          overrides |= 1 << prog_data->urb_setup[VARYING_SLOT_TEX0 + i];
4153    }
4154 
4155    return overrides;
4156 }
4157 
4158 static void
iris_emit_sbe(struct iris_batch * batch,const struct iris_context * ice)4159 iris_emit_sbe(struct iris_batch *batch, const struct iris_context *ice)
4160 {
4161    const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4162    const struct brw_wm_prog_data *wm_prog_data = (void *)
4163       ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
4164    const struct shader_info *fs_info =
4165       iris_get_shader_info(ice, MESA_SHADER_FRAGMENT);
4166    const struct brw_vue_map *last_vue_map =
4167       &brw_vue_prog_data(ice->shaders.last_vue_shader->prog_data)->vue_map;
4168 
4169    unsigned urb_read_offset, urb_read_length;
4170    iris_compute_sbe_urb_read_interval(fs_info->inputs_read,
4171                                       last_vue_map,
4172                                       cso_rast->light_twoside,
4173                                       &urb_read_offset, &urb_read_length);
4174 
4175    unsigned sprite_coord_overrides =
4176       iris_is_drawing_points(ice) ?
4177       iris_calculate_point_sprite_overrides(wm_prog_data, cso_rast) : 0;
4178 
4179    iris_emit_cmd(batch, GENX(3DSTATE_SBE), sbe) {
4180       sbe.AttributeSwizzleEnable = true;
4181       sbe.NumberofSFOutputAttributes = wm_prog_data->num_varying_inputs;
4182       sbe.PointSpriteTextureCoordinateOrigin = cso_rast->sprite_coord_mode;
4183       sbe.VertexURBEntryReadOffset = urb_read_offset;
4184       sbe.VertexURBEntryReadLength = urb_read_length;
4185       sbe.ForceVertexURBEntryReadOffset = true;
4186       sbe.ForceVertexURBEntryReadLength = true;
4187       sbe.ConstantInterpolationEnable = wm_prog_data->flat_inputs;
4188       sbe.PointSpriteTextureCoordinateEnable = sprite_coord_overrides;
4189 #if GFX_VER >= 9
4190       for (int i = 0; i < 32; i++) {
4191          sbe.AttributeActiveComponentFormat[i] = ACTIVE_COMPONENT_XYZW;
4192       }
4193 #endif
4194    }
4195 
4196    iris_emit_sbe_swiz(batch, ice, last_vue_map, urb_read_offset,
4197                       sprite_coord_overrides);
4198 }
4199 
4200 /* ------------------------------------------------------------------- */
4201 
4202 /**
4203  * Populate VS program key fields based on the current state.
4204  */
4205 static void
iris_populate_vs_key(const struct iris_context * ice,const struct shader_info * info,gl_shader_stage last_stage,struct iris_vs_prog_key * key)4206 iris_populate_vs_key(const struct iris_context *ice,
4207                      const struct shader_info *info,
4208                      gl_shader_stage last_stage,
4209                      struct iris_vs_prog_key *key)
4210 {
4211    const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4212 
4213    if (info->clip_distance_array_size == 0 &&
4214        (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
4215        last_stage == MESA_SHADER_VERTEX)
4216       key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
4217 }
4218 
4219 /**
4220  * Populate TCS program key fields based on the current state.
4221  */
4222 static void
iris_populate_tcs_key(const struct iris_context * ice,struct iris_tcs_prog_key * key)4223 iris_populate_tcs_key(const struct iris_context *ice,
4224                       struct iris_tcs_prog_key *key)
4225 {
4226 }
4227 
4228 /**
4229  * Populate TES program key fields based on the current state.
4230  */
4231 static void
iris_populate_tes_key(const struct iris_context * ice,const struct shader_info * info,gl_shader_stage last_stage,struct iris_tes_prog_key * key)4232 iris_populate_tes_key(const struct iris_context *ice,
4233                       const struct shader_info *info,
4234                       gl_shader_stage last_stage,
4235                       struct iris_tes_prog_key *key)
4236 {
4237    const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4238 
4239    if (info->clip_distance_array_size == 0 &&
4240        (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
4241        last_stage == MESA_SHADER_TESS_EVAL)
4242       key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
4243 }
4244 
4245 /**
4246  * Populate GS program key fields based on the current state.
4247  */
4248 static void
iris_populate_gs_key(const struct iris_context * ice,const struct shader_info * info,gl_shader_stage last_stage,struct iris_gs_prog_key * key)4249 iris_populate_gs_key(const struct iris_context *ice,
4250                      const struct shader_info *info,
4251                      gl_shader_stage last_stage,
4252                      struct iris_gs_prog_key *key)
4253 {
4254    const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4255 
4256    if (info->clip_distance_array_size == 0 &&
4257        (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
4258        last_stage == MESA_SHADER_GEOMETRY)
4259       key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
4260 }
4261 
4262 /**
4263  * Populate FS program key fields based on the current state.
4264  */
4265 static void
iris_populate_fs_key(const struct iris_context * ice,const struct shader_info * info,struct iris_fs_prog_key * key)4266 iris_populate_fs_key(const struct iris_context *ice,
4267                      const struct shader_info *info,
4268                      struct iris_fs_prog_key *key)
4269 {
4270    struct iris_screen *screen = (void *) ice->ctx.screen;
4271    const struct pipe_framebuffer_state *fb = &ice->state.framebuffer;
4272    const struct iris_depth_stencil_alpha_state *zsa = ice->state.cso_zsa;
4273    const struct iris_rasterizer_state *rast = ice->state.cso_rast;
4274    const struct iris_blend_state *blend = ice->state.cso_blend;
4275 
4276    key->nr_color_regions = fb->nr_cbufs;
4277 
4278    key->clamp_fragment_color = rast->clamp_fragment_color;
4279 
4280    key->alpha_to_coverage = blend->alpha_to_coverage;
4281 
4282    key->alpha_test_replicate_alpha = fb->nr_cbufs > 1 && zsa->alpha_enabled;
4283 
4284    key->flat_shade = rast->flatshade &&
4285       (info->inputs_read & (VARYING_BIT_COL0 | VARYING_BIT_COL1));
4286 
4287    key->persample_interp = rast->force_persample_interp;
4288    key->multisample_fbo = rast->multisample && fb->samples > 1;
4289 
4290    key->coherent_fb_fetch = GFX_VER >= 9;
4291 
4292    key->force_dual_color_blend =
4293       screen->driconf.dual_color_blend_by_location &&
4294       (blend->blend_enables & 1) && blend->dual_color_blending;
4295 
4296    /* TODO: Respect glHint for key->high_quality_derivatives */
4297 }
4298 
4299 static void
iris_populate_cs_key(const struct iris_context * ice,struct iris_cs_prog_key * key)4300 iris_populate_cs_key(const struct iris_context *ice,
4301                      struct iris_cs_prog_key *key)
4302 {
4303 }
4304 
4305 static uint64_t
KSP(const struct iris_compiled_shader * shader)4306 KSP(const struct iris_compiled_shader *shader)
4307 {
4308    struct iris_resource *res = (void *) shader->assembly.res;
4309    return iris_bo_offset_from_base_address(res->bo) + shader->assembly.offset;
4310 }
4311 
4312 #define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix, stage)                   \
4313    pkt.KernelStartPointer = KSP(shader);                                  \
4314    pkt.BindingTableEntryCount = shader->bt.size_bytes / 4;                \
4315    pkt.FloatingPointMode = prog_data->use_alt_mode;                       \
4316                                                                           \
4317    pkt.DispatchGRFStartRegisterForURBData =                               \
4318       prog_data->dispatch_grf_start_reg;                                  \
4319    pkt.prefix##URBEntryReadLength = vue_prog_data->urb_read_length;       \
4320    pkt.prefix##URBEntryReadOffset = 0;                                    \
4321                                                                           \
4322    pkt.StatisticsEnable = true;                                           \
4323    pkt.Enable           = true;                                           \
4324                                                                           \
4325    if (prog_data->total_scratch) {                                        \
4326       INIT_THREAD_SCRATCH_SIZE(pkt)                                       \
4327    }
4328 
4329 #if GFX_VERx10 >= 125
4330 #define INIT_THREAD_SCRATCH_SIZE(pkt)
4331 #define MERGE_SCRATCH_ADDR(name)                                          \
4332 {                                                                         \
4333    uint32_t pkt2[GENX(name##_length)] = {0};                              \
4334    _iris_pack_command(batch, GENX(name), pkt2, p) {                       \
4335       p.ScratchSpaceBuffer = scratch_addr >> 4;                           \
4336    }                                                                      \
4337    iris_emit_merge(batch, pkt, pkt2, GENX(name##_length));                \
4338 }
4339 #else
4340 #define INIT_THREAD_SCRATCH_SIZE(pkt)                                     \
4341    pkt.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
4342 #define MERGE_SCRATCH_ADDR(name)                                          \
4343 {                                                                         \
4344    uint32_t pkt2[GENX(name##_length)] = {0};                              \
4345    _iris_pack_command(batch, GENX(name), pkt2, p) {                       \
4346       p.ScratchSpaceBasePointer =                                         \
4347          rw_bo(NULL, scratch_addr, IRIS_DOMAIN_NONE);                     \
4348    }                                                                      \
4349    iris_emit_merge(batch, pkt, pkt2, GENX(name##_length));                \
4350 }
4351 #endif
4352 
4353 
4354 /**
4355  * Encode most of 3DSTATE_VS based on the compiled shader.
4356  */
4357 static void
iris_store_vs_state(const struct intel_device_info * devinfo,struct iris_compiled_shader * shader)4358 iris_store_vs_state(const struct intel_device_info *devinfo,
4359                     struct iris_compiled_shader *shader)
4360 {
4361    struct brw_stage_prog_data *prog_data = shader->prog_data;
4362    struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4363 
4364    iris_pack_command(GENX(3DSTATE_VS), shader->derived_data, vs) {
4365       INIT_THREAD_DISPATCH_FIELDS(vs, Vertex, MESA_SHADER_VERTEX);
4366       vs.MaximumNumberofThreads = devinfo->max_vs_threads - 1;
4367       vs.SIMD8DispatchEnable = true;
4368       vs.UserClipDistanceCullTestEnableBitmask =
4369          vue_prog_data->cull_distance_mask;
4370    }
4371 }
4372 
4373 /**
4374  * Encode most of 3DSTATE_HS based on the compiled shader.
4375  */
4376 static void
iris_store_tcs_state(const struct intel_device_info * devinfo,struct iris_compiled_shader * shader)4377 iris_store_tcs_state(const struct intel_device_info *devinfo,
4378                      struct iris_compiled_shader *shader)
4379 {
4380    struct brw_stage_prog_data *prog_data = shader->prog_data;
4381    struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4382    struct brw_tcs_prog_data *tcs_prog_data = (void *) prog_data;
4383 
4384    iris_pack_command(GENX(3DSTATE_HS), shader->derived_data, hs) {
4385       INIT_THREAD_DISPATCH_FIELDS(hs, Vertex, MESA_SHADER_TESS_CTRL);
4386 
4387 #if GFX_VER >= 12
4388       /* Wa_1604578095:
4389        *
4390        *    Hang occurs when the number of max threads is less than 2 times
4391        *    the number of instance count. The number of max threads must be
4392        *    more than 2 times the number of instance count.
4393        */
4394       assert((devinfo->max_tcs_threads / 2) > tcs_prog_data->instances);
4395       hs.DispatchGRFStartRegisterForURBData = prog_data->dispatch_grf_start_reg & 0x1f;
4396       hs.DispatchGRFStartRegisterForURBData5 = prog_data->dispatch_grf_start_reg >> 5;
4397 #endif
4398 
4399       hs.InstanceCount = tcs_prog_data->instances - 1;
4400       hs.MaximumNumberofThreads = devinfo->max_tcs_threads - 1;
4401       hs.IncludeVertexHandles = true;
4402 
4403 #if GFX_VER == 12
4404       /* Patch Count threshold specifies the maximum number of patches that
4405        * will be accumulated before a thread dispatch is forced.
4406        */
4407       hs.PatchCountThreshold = tcs_prog_data->patch_count_threshold;
4408 #endif
4409 
4410 #if GFX_VER >= 9
4411       hs.DispatchMode = vue_prog_data->dispatch_mode;
4412       hs.IncludePrimitiveID = tcs_prog_data->include_primitive_id;
4413 #endif
4414    }
4415 }
4416 
4417 /**
4418  * Encode 3DSTATE_TE and most of 3DSTATE_DS based on the compiled shader.
4419  */
4420 static void
iris_store_tes_state(const struct intel_device_info * devinfo,struct iris_compiled_shader * shader)4421 iris_store_tes_state(const struct intel_device_info *devinfo,
4422                      struct iris_compiled_shader *shader)
4423 {
4424    struct brw_stage_prog_data *prog_data = shader->prog_data;
4425    struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4426    struct brw_tes_prog_data *tes_prog_data = (void *) prog_data;
4427 
4428    uint32_t *ds_state = (void *) shader->derived_data;
4429    uint32_t *te_state = ds_state + GENX(3DSTATE_DS_length);
4430 
4431    iris_pack_command(GENX(3DSTATE_DS), ds_state, ds) {
4432       INIT_THREAD_DISPATCH_FIELDS(ds, Patch, MESA_SHADER_TESS_EVAL);
4433 
4434       ds.DispatchMode = DISPATCH_MODE_SIMD8_SINGLE_PATCH;
4435       ds.MaximumNumberofThreads = devinfo->max_tes_threads - 1;
4436       ds.ComputeWCoordinateEnable =
4437          tes_prog_data->domain == BRW_TESS_DOMAIN_TRI;
4438 
4439       ds.UserClipDistanceCullTestEnableBitmask =
4440          vue_prog_data->cull_distance_mask;
4441    }
4442 
4443    iris_pack_command(GENX(3DSTATE_TE), te_state, te) {
4444       te.Partitioning = tes_prog_data->partitioning;
4445       te.OutputTopology = tes_prog_data->output_topology;
4446       te.TEDomain = tes_prog_data->domain;
4447       te.TEEnable = true;
4448       te.MaximumTessellationFactorOdd = 63.0;
4449       te.MaximumTessellationFactorNotOdd = 64.0;
4450    }
4451 }
4452 
4453 /**
4454  * Encode most of 3DSTATE_GS based on the compiled shader.
4455  */
4456 static void
iris_store_gs_state(const struct intel_device_info * devinfo,struct iris_compiled_shader * shader)4457 iris_store_gs_state(const struct intel_device_info *devinfo,
4458                     struct iris_compiled_shader *shader)
4459 {
4460    struct brw_stage_prog_data *prog_data = shader->prog_data;
4461    struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4462    struct brw_gs_prog_data *gs_prog_data = (void *) prog_data;
4463 
4464    iris_pack_command(GENX(3DSTATE_GS), shader->derived_data, gs) {
4465       INIT_THREAD_DISPATCH_FIELDS(gs, Vertex, MESA_SHADER_GEOMETRY);
4466 
4467       gs.OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1;
4468       gs.OutputTopology = gs_prog_data->output_topology;
4469       gs.ControlDataHeaderSize =
4470          gs_prog_data->control_data_header_size_hwords;
4471       gs.InstanceControl = gs_prog_data->invocations - 1;
4472       gs.DispatchMode = DISPATCH_MODE_SIMD8;
4473       gs.IncludePrimitiveID = gs_prog_data->include_primitive_id;
4474       gs.ControlDataFormat = gs_prog_data->control_data_format;
4475       gs.ReorderMode = TRAILING;
4476       gs.ExpectedVertexCount = gs_prog_data->vertices_in;
4477       gs.MaximumNumberofThreads =
4478          GFX_VER == 8 ? (devinfo->max_gs_threads / 2 - 1)
4479                       : (devinfo->max_gs_threads - 1);
4480 
4481       if (gs_prog_data->static_vertex_count != -1) {
4482          gs.StaticOutput = true;
4483          gs.StaticOutputVertexCount = gs_prog_data->static_vertex_count;
4484       }
4485       gs.IncludeVertexHandles = vue_prog_data->include_vue_handles;
4486 
4487       gs.UserClipDistanceCullTestEnableBitmask =
4488          vue_prog_data->cull_distance_mask;
4489 
4490       const int urb_entry_write_offset = 1;
4491       const uint32_t urb_entry_output_length =
4492          DIV_ROUND_UP(vue_prog_data->vue_map.num_slots, 2) -
4493          urb_entry_write_offset;
4494 
4495       gs.VertexURBEntryOutputReadOffset = urb_entry_write_offset;
4496       gs.VertexURBEntryOutputLength = MAX2(urb_entry_output_length, 1);
4497    }
4498 }
4499 
4500 /**
4501  * Encode most of 3DSTATE_PS and 3DSTATE_PS_EXTRA based on the shader.
4502  */
4503 static void
iris_store_fs_state(const struct intel_device_info * devinfo,struct iris_compiled_shader * shader)4504 iris_store_fs_state(const struct intel_device_info *devinfo,
4505                     struct iris_compiled_shader *shader)
4506 {
4507    struct brw_stage_prog_data *prog_data = shader->prog_data;
4508    struct brw_wm_prog_data *wm_prog_data = (void *) shader->prog_data;
4509 
4510    uint32_t *ps_state = (void *) shader->derived_data;
4511    uint32_t *psx_state = ps_state + GENX(3DSTATE_PS_length);
4512 
4513    iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) {
4514       ps.VectorMaskEnable = true;
4515       ps.BindingTableEntryCount = shader->bt.size_bytes / 4;
4516       ps.FloatingPointMode = prog_data->use_alt_mode;
4517       ps.MaximumNumberofThreadsPerPSD = 64 - (GFX_VER == 8 ? 2 : 1);
4518 
4519       ps.PushConstantEnable = prog_data->ubo_ranges[0].length > 0;
4520 
4521       /* From the documentation for this packet:
4522        * "If the PS kernel does not need the Position XY Offsets to
4523        *  compute a Position Value, then this field should be programmed
4524        *  to POSOFFSET_NONE."
4525        *
4526        * "SW Recommendation: If the PS kernel needs the Position Offsets
4527        *  to compute a Position XY value, this field should match Position
4528        *  ZW Interpolation Mode to ensure a consistent position.xyzw
4529        *  computation."
4530        *
4531        * We only require XY sample offsets. So, this recommendation doesn't
4532        * look useful at the moment.  We might need this in future.
4533        */
4534       ps.PositionXYOffsetSelect =
4535          wm_prog_data->uses_pos_offset ? POSOFFSET_SAMPLE : POSOFFSET_NONE;
4536 
4537       if (prog_data->total_scratch) {
4538          INIT_THREAD_SCRATCH_SIZE(ps);
4539       }
4540    }
4541 
4542    iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
4543       psx.PixelShaderValid = true;
4544       psx.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode;
4545       psx.PixelShaderKillsPixel = wm_prog_data->uses_kill;
4546       psx.AttributeEnable = wm_prog_data->num_varying_inputs != 0;
4547       psx.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth;
4548       psx.PixelShaderUsesSourceW = wm_prog_data->uses_src_w;
4549       psx.PixelShaderIsPerSample = wm_prog_data->persample_dispatch;
4550       psx.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;
4551 
4552 #if GFX_VER >= 9
4553       psx.PixelShaderPullsBary = wm_prog_data->pulls_bary;
4554       psx.PixelShaderComputesStencil = wm_prog_data->computed_stencil;
4555 #endif
4556    }
4557 }
4558 
4559 /**
4560  * Compute the size of the derived data (shader command packets).
4561  *
4562  * This must match the data written by the iris_store_xs_state() functions.
4563  */
4564 static void
iris_store_cs_state(const struct intel_device_info * devinfo,struct iris_compiled_shader * shader)4565 iris_store_cs_state(const struct intel_device_info *devinfo,
4566                     struct iris_compiled_shader *shader)
4567 {
4568    struct brw_cs_prog_data *cs_prog_data = (void *) shader->prog_data;
4569    void *map = shader->derived_data;
4570 
4571    iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), map, desc) {
4572 #if GFX_VERx10 < 125
4573       desc.ConstantURBEntryReadLength = cs_prog_data->push.per_thread.regs;
4574       desc.CrossThreadConstantDataReadLength =
4575          cs_prog_data->push.cross_thread.regs;
4576 #else
4577       assert(cs_prog_data->push.per_thread.regs == 0);
4578       assert(cs_prog_data->push.cross_thread.regs == 0);
4579 #endif
4580       desc.BarrierEnable = cs_prog_data->uses_barrier;
4581 #if GFX_VER >= 12
4582       /* TODO: Check if we are missing workarounds and enable mid-thread
4583        * preemption.
4584        *
4585        * We still have issues with mid-thread preemption (it was already
4586        * disabled by the kernel on gfx11, due to missing workarounds). It's
4587        * possible that we are just missing some workarounds, and could enable
4588        * it later, but for now let's disable it to fix a GPU in compute in Car
4589        * Chase (and possibly more).
4590        */
4591       desc.ThreadPreemptionDisable = true;
4592 #endif
4593    }
4594 }
4595 
4596 static unsigned
iris_derived_program_state_size(enum iris_program_cache_id cache_id)4597 iris_derived_program_state_size(enum iris_program_cache_id cache_id)
4598 {
4599    assert(cache_id <= IRIS_CACHE_BLORP);
4600 
4601    static const unsigned dwords[] = {
4602       [IRIS_CACHE_VS] = GENX(3DSTATE_VS_length),
4603       [IRIS_CACHE_TCS] = GENX(3DSTATE_HS_length),
4604       [IRIS_CACHE_TES] = GENX(3DSTATE_TE_length) + GENX(3DSTATE_DS_length),
4605       [IRIS_CACHE_GS] = GENX(3DSTATE_GS_length),
4606       [IRIS_CACHE_FS] =
4607          GENX(3DSTATE_PS_length) + GENX(3DSTATE_PS_EXTRA_length),
4608       [IRIS_CACHE_CS] = GENX(INTERFACE_DESCRIPTOR_DATA_length),
4609       [IRIS_CACHE_BLORP] = 0,
4610    };
4611 
4612    return sizeof(uint32_t) * dwords[cache_id];
4613 }
4614 
4615 /**
4616  * Create any state packets corresponding to the given shader stage
4617  * (i.e. 3DSTATE_VS) and save them as "derived data" in the shader variant.
4618  * This means that we can look up a program in the in-memory cache and
4619  * get most of the state packet without having to reconstruct it.
4620  */
4621 static void
iris_store_derived_program_state(const struct intel_device_info * devinfo,enum iris_program_cache_id cache_id,struct iris_compiled_shader * shader)4622 iris_store_derived_program_state(const struct intel_device_info *devinfo,
4623                                  enum iris_program_cache_id cache_id,
4624                                  struct iris_compiled_shader *shader)
4625 {
4626    switch (cache_id) {
4627    case IRIS_CACHE_VS:
4628       iris_store_vs_state(devinfo, shader);
4629       break;
4630    case IRIS_CACHE_TCS:
4631       iris_store_tcs_state(devinfo, shader);
4632       break;
4633    case IRIS_CACHE_TES:
4634       iris_store_tes_state(devinfo, shader);
4635       break;
4636    case IRIS_CACHE_GS:
4637       iris_store_gs_state(devinfo, shader);
4638       break;
4639    case IRIS_CACHE_FS:
4640       iris_store_fs_state(devinfo, shader);
4641       break;
4642    case IRIS_CACHE_CS:
4643       iris_store_cs_state(devinfo, shader);
4644       break;
4645    case IRIS_CACHE_BLORP:
4646       break;
4647    }
4648 }
4649 
4650 /* ------------------------------------------------------------------- */
4651 
4652 static const uint32_t push_constant_opcodes[] = {
4653    [MESA_SHADER_VERTEX]    = 21,
4654    [MESA_SHADER_TESS_CTRL] = 25, /* HS */
4655    [MESA_SHADER_TESS_EVAL] = 26, /* DS */
4656    [MESA_SHADER_GEOMETRY]  = 22,
4657    [MESA_SHADER_FRAGMENT]  = 23,
4658    [MESA_SHADER_COMPUTE]   = 0,
4659 };
4660 
4661 static uint32_t
use_null_surface(struct iris_batch * batch,struct iris_context * ice)4662 use_null_surface(struct iris_batch *batch, struct iris_context *ice)
4663 {
4664    struct iris_bo *state_bo = iris_resource_bo(ice->state.unbound_tex.res);
4665 
4666    iris_use_pinned_bo(batch, state_bo, false, IRIS_DOMAIN_NONE);
4667 
4668    return ice->state.unbound_tex.offset;
4669 }
4670 
4671 static uint32_t
use_null_fb_surface(struct iris_batch * batch,struct iris_context * ice)4672 use_null_fb_surface(struct iris_batch *batch, struct iris_context *ice)
4673 {
4674    /* If set_framebuffer_state() was never called, fall back to 1x1x1 */
4675    if (!ice->state.null_fb.res)
4676       return use_null_surface(batch, ice);
4677 
4678    struct iris_bo *state_bo = iris_resource_bo(ice->state.null_fb.res);
4679 
4680    iris_use_pinned_bo(batch, state_bo, false, IRIS_DOMAIN_NONE);
4681 
4682    return ice->state.null_fb.offset;
4683 }
4684 
4685 static uint32_t
surf_state_offset_for_aux(struct iris_resource * res,unsigned aux_modes,enum isl_aux_usage aux_usage)4686 surf_state_offset_for_aux(struct iris_resource *res,
4687                           unsigned aux_modes,
4688                           enum isl_aux_usage aux_usage)
4689 {
4690    assert(aux_modes & (1 << aux_usage));
4691    return SURFACE_STATE_ALIGNMENT *
4692           util_bitcount(aux_modes & ((1 << aux_usage) - 1));
4693 }
4694 
4695 #if GFX_VER == 9
4696 static void
surf_state_update_clear_value(struct iris_batch * batch,struct iris_resource * res,struct iris_state_ref * state,unsigned aux_modes,enum isl_aux_usage aux_usage)4697 surf_state_update_clear_value(struct iris_batch *batch,
4698                               struct iris_resource *res,
4699                               struct iris_state_ref *state,
4700                               unsigned aux_modes,
4701                               enum isl_aux_usage aux_usage)
4702 {
4703    struct isl_device *isl_dev = &batch->screen->isl_dev;
4704    struct iris_bo *state_bo = iris_resource_bo(state->res);
4705    uint64_t real_offset = state->offset + IRIS_MEMZONE_BINDER_START;
4706    uint32_t offset_into_bo = real_offset - state_bo->address;
4707    uint32_t clear_offset = offset_into_bo +
4708       isl_dev->ss.clear_value_offset +
4709       surf_state_offset_for_aux(res, aux_modes, aux_usage);
4710    uint32_t *color = res->aux.clear_color.u32;
4711 
4712    assert(isl_dev->ss.clear_value_size == 16);
4713 
4714    if (aux_usage == ISL_AUX_USAGE_HIZ) {
4715       iris_emit_pipe_control_write(batch, "update fast clear value (Z)",
4716                                    PIPE_CONTROL_WRITE_IMMEDIATE,
4717                                    state_bo, clear_offset, color[0]);
4718    } else {
4719       iris_emit_pipe_control_write(batch, "update fast clear color (RG__)",
4720                                    PIPE_CONTROL_WRITE_IMMEDIATE,
4721                                    state_bo, clear_offset,
4722                                    (uint64_t) color[0] |
4723                                    (uint64_t) color[1] << 32);
4724       iris_emit_pipe_control_write(batch, "update fast clear color (__BA)",
4725                                    PIPE_CONTROL_WRITE_IMMEDIATE,
4726                                    state_bo, clear_offset + 8,
4727                                    (uint64_t) color[2] |
4728                                    (uint64_t) color[3] << 32);
4729    }
4730 
4731    iris_emit_pipe_control_flush(batch,
4732                                 "update fast clear: state cache invalidate",
4733                                 PIPE_CONTROL_FLUSH_ENABLE |
4734                                 PIPE_CONTROL_STATE_CACHE_INVALIDATE);
4735 }
4736 #endif
4737 
4738 static void
update_clear_value(struct iris_context * ice,struct iris_batch * batch,struct iris_resource * res,struct iris_surface_state * surf_state,unsigned all_aux_modes,struct isl_view * view)4739 update_clear_value(struct iris_context *ice,
4740                    struct iris_batch *batch,
4741                    struct iris_resource *res,
4742                    struct iris_surface_state *surf_state,
4743                    unsigned all_aux_modes,
4744                    struct isl_view *view)
4745 {
4746    UNUSED struct isl_device *isl_dev = &batch->screen->isl_dev;
4747    UNUSED unsigned aux_modes = all_aux_modes;
4748 
4749    /* We only need to update the clear color in the surface state for gfx8 and
4750     * gfx9. Newer gens can read it directly from the clear color state buffer.
4751     */
4752 #if GFX_VER == 9
4753    /* Skip updating the ISL_AUX_USAGE_NONE surface state */
4754    aux_modes &= ~(1 << ISL_AUX_USAGE_NONE);
4755 
4756    while (aux_modes) {
4757       enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
4758 
4759       surf_state_update_clear_value(batch, res, &surf_state->ref,
4760                                     all_aux_modes, aux_usage);
4761    }
4762 #elif GFX_VER == 8
4763    /* TODO: Could update rather than re-filling */
4764    alloc_surface_states(surf_state, all_aux_modes);
4765 
4766    void *map = surf_state->cpu;
4767 
4768    while (aux_modes) {
4769       enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
4770       fill_surface_state(isl_dev, map, res, &res->surf, view, aux_usage,
4771                          0, 0, 0);
4772       map += SURFACE_STATE_ALIGNMENT;
4773    }
4774 
4775    upload_surface_states(ice->state.surface_uploader, surf_state);
4776 #endif
4777 }
4778 
4779 /**
4780  * Add a surface to the validation list, as well as the buffer containing
4781  * the corresponding SURFACE_STATE.
4782  *
4783  * Returns the binding table entry (offset to SURFACE_STATE).
4784  */
4785 static uint32_t
use_surface(struct iris_context * ice,struct iris_batch * batch,struct pipe_surface * p_surf,bool writeable,enum isl_aux_usage aux_usage,bool is_read_surface,enum iris_domain access)4786 use_surface(struct iris_context *ice,
4787             struct iris_batch *batch,
4788             struct pipe_surface *p_surf,
4789             bool writeable,
4790             enum isl_aux_usage aux_usage,
4791             bool is_read_surface,
4792             enum iris_domain access)
4793 {
4794    struct iris_surface *surf = (void *) p_surf;
4795    struct iris_resource *res = (void *) p_surf->texture;
4796    uint32_t offset = 0;
4797 
4798    if (GFX_VER == 8 && is_read_surface && !surf->surface_state_read.ref.res) {
4799       upload_surface_states(ice->state.surface_uploader,
4800                             &surf->surface_state_read);
4801    }
4802 
4803    if (!surf->surface_state.ref.res) {
4804       upload_surface_states(ice->state.surface_uploader,
4805                             &surf->surface_state);
4806    }
4807 
4808    if (memcmp(&res->aux.clear_color, &surf->clear_color,
4809               sizeof(surf->clear_color)) != 0) {
4810       update_clear_value(ice, batch, res, &surf->surface_state,
4811                          res->aux.possible_usages, &surf->view);
4812       if (GFX_VER == 8) {
4813          update_clear_value(ice, batch, res, &surf->surface_state_read,
4814                             res->aux.possible_usages, &surf->read_view);
4815       }
4816       surf->clear_color = res->aux.clear_color;
4817    }
4818 
4819    if (res->aux.clear_color_bo)
4820       iris_use_pinned_bo(batch, res->aux.clear_color_bo, false, access);
4821 
4822    if (res->aux.bo)
4823       iris_use_pinned_bo(batch, res->aux.bo, writeable, access);
4824 
4825    iris_use_pinned_bo(batch, res->bo, writeable, access);
4826 
4827    if (GFX_VER == 8 && is_read_surface) {
4828       iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state_read.ref.res), false,
4829                          IRIS_DOMAIN_NONE);
4830    } else {
4831       iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state.ref.res), false,
4832                          IRIS_DOMAIN_NONE);
4833    }
4834 
4835    offset = (GFX_VER == 8 && is_read_surface)
4836                ? surf->surface_state_read.ref.offset
4837                : surf->surface_state.ref.offset;
4838 
4839    return offset +
4840           surf_state_offset_for_aux(res, res->aux.possible_usages, aux_usage);
4841 }
4842 
4843 static uint32_t
use_sampler_view(struct iris_context * ice,struct iris_batch * batch,struct iris_sampler_view * isv)4844 use_sampler_view(struct iris_context *ice,
4845                  struct iris_batch *batch,
4846                  struct iris_sampler_view *isv)
4847 {
4848    enum isl_aux_usage aux_usage =
4849       iris_resource_texture_aux_usage(ice, isv->res, isv->view.format);
4850 
4851    if (!isv->surface_state.ref.res)
4852       upload_surface_states(ice->state.surface_uploader, &isv->surface_state);
4853 
4854    if (memcmp(&isv->res->aux.clear_color, &isv->clear_color,
4855               sizeof(isv->clear_color)) != 0) {
4856       update_clear_value(ice, batch, isv->res, &isv->surface_state,
4857                          isv->res->aux.sampler_usages, &isv->view);
4858       isv->clear_color = isv->res->aux.clear_color;
4859    }
4860 
4861    if (isv->res->aux.clear_color_bo) {
4862       iris_use_pinned_bo(batch, isv->res->aux.clear_color_bo,
4863                          false, IRIS_DOMAIN_OTHER_READ);
4864    }
4865 
4866    if (isv->res->aux.bo) {
4867       iris_use_pinned_bo(batch, isv->res->aux.bo,
4868                          false, IRIS_DOMAIN_OTHER_READ);
4869    }
4870 
4871    iris_use_pinned_bo(batch, isv->res->bo, false, IRIS_DOMAIN_OTHER_READ);
4872    iris_use_pinned_bo(batch, iris_resource_bo(isv->surface_state.ref.res), false,
4873                       IRIS_DOMAIN_NONE);
4874 
4875    return isv->surface_state.ref.offset +
4876           surf_state_offset_for_aux(isv->res, isv->res->aux.sampler_usages,
4877                                     aux_usage);
4878 }
4879 
4880 static uint32_t
use_ubo_ssbo(struct iris_batch * batch,struct iris_context * ice,struct pipe_shader_buffer * buf,struct iris_state_ref * surf_state,bool writable,enum iris_domain access)4881 use_ubo_ssbo(struct iris_batch *batch,
4882              struct iris_context *ice,
4883              struct pipe_shader_buffer *buf,
4884              struct iris_state_ref *surf_state,
4885              bool writable, enum iris_domain access)
4886 {
4887    if (!buf->buffer || !surf_state->res)
4888       return use_null_surface(batch, ice);
4889 
4890    iris_use_pinned_bo(batch, iris_resource_bo(buf->buffer), writable, access);
4891    iris_use_pinned_bo(batch, iris_resource_bo(surf_state->res), false,
4892                       IRIS_DOMAIN_NONE);
4893 
4894    return surf_state->offset;
4895 }
4896 
4897 static uint32_t
use_image(struct iris_batch * batch,struct iris_context * ice,struct iris_shader_state * shs,const struct shader_info * info,int i)4898 use_image(struct iris_batch *batch, struct iris_context *ice,
4899           struct iris_shader_state *shs, const struct shader_info *info,
4900           int i)
4901 {
4902    struct iris_image_view *iv = &shs->image[i];
4903    struct iris_resource *res = (void *) iv->base.resource;
4904 
4905    if (!res)
4906       return use_null_surface(batch, ice);
4907 
4908    bool write = iv->base.shader_access & PIPE_IMAGE_ACCESS_WRITE;
4909 
4910    iris_use_pinned_bo(batch, res->bo, write, IRIS_DOMAIN_NONE);
4911    iris_use_pinned_bo(batch, iris_resource_bo(iv->surface_state.ref.res),
4912                       false, IRIS_DOMAIN_NONE);
4913 
4914    if (res->aux.bo)
4915       iris_use_pinned_bo(batch, res->aux.bo, write, IRIS_DOMAIN_NONE);
4916 
4917    enum isl_aux_usage aux_usage =
4918       iris_image_view_aux_usage(ice, &iv->base, info);
4919 
4920    return iv->surface_state.ref.offset +
4921       surf_state_offset_for_aux(res, res->aux.possible_usages, aux_usage);
4922 }
4923 
4924 #define push_bt_entry(addr) \
4925    assert(addr >= binder_addr); \
4926    assert(s < shader->bt.size_bytes / sizeof(uint32_t)); \
4927    if (!pin_only) bt_map[s++] = (addr) - binder_addr;
4928 
4929 #define bt_assert(section) \
4930    if (!pin_only && shader->bt.used_mask[section] != 0) \
4931       assert(shader->bt.offsets[section] == s);
4932 
4933 /**
4934  * Populate the binding table for a given shader stage.
4935  *
4936  * This fills out the table of pointers to surfaces required by the shader,
4937  * and also adds those buffers to the validation list so the kernel can make
4938  * resident before running our batch.
4939  */
4940 static void
iris_populate_binding_table(struct iris_context * ice,struct iris_batch * batch,gl_shader_stage stage,bool pin_only)4941 iris_populate_binding_table(struct iris_context *ice,
4942                             struct iris_batch *batch,
4943                             gl_shader_stage stage,
4944                             bool pin_only)
4945 {
4946    const struct iris_binder *binder = &ice->state.binder;
4947    struct iris_compiled_shader *shader = ice->shaders.prog[stage];
4948    if (!shader)
4949       return;
4950 
4951    struct iris_binding_table *bt = &shader->bt;
4952    UNUSED struct brw_stage_prog_data *prog_data = shader->prog_data;
4953    struct iris_shader_state *shs = &ice->state.shaders[stage];
4954    uint32_t binder_addr = binder->bo->address;
4955 
4956    uint32_t *bt_map = binder->map + binder->bt_offset[stage];
4957    int s = 0;
4958 
4959    const struct shader_info *info = iris_get_shader_info(ice, stage);
4960    if (!info) {
4961       /* TCS passthrough doesn't need a binding table. */
4962       assert(stage == MESA_SHADER_TESS_CTRL);
4963       return;
4964    }
4965 
4966    if (stage == MESA_SHADER_COMPUTE &&
4967        shader->bt.used_mask[IRIS_SURFACE_GROUP_CS_WORK_GROUPS]) {
4968       /* surface for gl_NumWorkGroups */
4969       struct iris_state_ref *grid_data = &ice->state.grid_size;
4970       struct iris_state_ref *grid_state = &ice->state.grid_surf_state;
4971       iris_use_pinned_bo(batch, iris_resource_bo(grid_data->res), false,
4972                          IRIS_DOMAIN_OTHER_READ);
4973       iris_use_pinned_bo(batch, iris_resource_bo(grid_state->res), false,
4974                          IRIS_DOMAIN_NONE);
4975       push_bt_entry(grid_state->offset);
4976    }
4977 
4978    if (stage == MESA_SHADER_FRAGMENT) {
4979       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
4980       /* Note that cso_fb->nr_cbufs == fs_key->nr_color_regions. */
4981       if (cso_fb->nr_cbufs) {
4982          for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
4983             uint32_t addr;
4984             if (cso_fb->cbufs[i]) {
4985                addr = use_surface(ice, batch, cso_fb->cbufs[i], true,
4986                                   ice->state.draw_aux_usage[i], false,
4987                                   IRIS_DOMAIN_RENDER_WRITE);
4988             } else {
4989                addr = use_null_fb_surface(batch, ice);
4990             }
4991             push_bt_entry(addr);
4992          }
4993       } else if (GFX_VER < 11) {
4994          uint32_t addr = use_null_fb_surface(batch, ice);
4995          push_bt_entry(addr);
4996       }
4997    }
4998 
4999 #define foreach_surface_used(index, group) \
5000    bt_assert(group); \
5001    for (int index = 0; index < bt->sizes[group]; index++) \
5002       if (iris_group_index_to_bti(bt, group, index) != \
5003           IRIS_SURFACE_NOT_USED)
5004 
5005    foreach_surface_used(i, IRIS_SURFACE_GROUP_RENDER_TARGET_READ) {
5006       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5007       uint32_t addr;
5008       if (cso_fb->cbufs[i]) {
5009          addr = use_surface(ice, batch, cso_fb->cbufs[i],
5010                             false, ice->state.draw_aux_usage[i], true,
5011                             IRIS_DOMAIN_OTHER_READ);
5012          push_bt_entry(addr);
5013       }
5014    }
5015 
5016    foreach_surface_used(i, IRIS_SURFACE_GROUP_TEXTURE) {
5017       struct iris_sampler_view *view = shs->textures[i];
5018       uint32_t addr = view ? use_sampler_view(ice, batch, view)
5019                            : use_null_surface(batch, ice);
5020       push_bt_entry(addr);
5021    }
5022 
5023    foreach_surface_used(i, IRIS_SURFACE_GROUP_IMAGE) {
5024       uint32_t addr = use_image(batch, ice, shs, info, i);
5025       push_bt_entry(addr);
5026    }
5027 
5028    foreach_surface_used(i, IRIS_SURFACE_GROUP_UBO) {
5029       uint32_t addr = use_ubo_ssbo(batch, ice, &shs->constbuf[i],
5030                                    &shs->constbuf_surf_state[i], false,
5031                                    IRIS_DOMAIN_OTHER_READ);
5032       push_bt_entry(addr);
5033    }
5034 
5035    foreach_surface_used(i, IRIS_SURFACE_GROUP_SSBO) {
5036       uint32_t addr =
5037          use_ubo_ssbo(batch, ice, &shs->ssbo[i], &shs->ssbo_surf_state[i],
5038                       shs->writable_ssbos & (1u << i), IRIS_DOMAIN_NONE);
5039       push_bt_entry(addr);
5040    }
5041 
5042 #if 0
5043       /* XXX: YUV surfaces not implemented yet */
5044       bt_assert(plane_start[1], ...);
5045       bt_assert(plane_start[2], ...);
5046 #endif
5047 }
5048 
5049 static void
iris_use_optional_res(struct iris_batch * batch,struct pipe_resource * res,bool writeable,enum iris_domain access)5050 iris_use_optional_res(struct iris_batch *batch,
5051                       struct pipe_resource *res,
5052                       bool writeable,
5053                       enum iris_domain access)
5054 {
5055    if (res) {
5056       struct iris_bo *bo = iris_resource_bo(res);
5057       iris_use_pinned_bo(batch, bo, writeable, access);
5058    }
5059 }
5060 
5061 static void
pin_depth_and_stencil_buffers(struct iris_batch * batch,struct pipe_surface * zsbuf,struct iris_depth_stencil_alpha_state * cso_zsa)5062 pin_depth_and_stencil_buffers(struct iris_batch *batch,
5063                               struct pipe_surface *zsbuf,
5064                               struct iris_depth_stencil_alpha_state *cso_zsa)
5065 {
5066    if (!zsbuf)
5067       return;
5068 
5069    struct iris_resource *zres, *sres;
5070    iris_get_depth_stencil_resources(zsbuf->texture, &zres, &sres);
5071 
5072    if (zres) {
5073       const enum iris_domain access = cso_zsa->depth_writes_enabled ?
5074          IRIS_DOMAIN_DEPTH_WRITE : IRIS_DOMAIN_OTHER_READ;
5075       iris_use_pinned_bo(batch, zres->bo, cso_zsa->depth_writes_enabled,
5076                          access);
5077       if (zres->aux.bo) {
5078          iris_use_pinned_bo(batch, zres->aux.bo,
5079                             cso_zsa->depth_writes_enabled, access);
5080       }
5081    }
5082 
5083    if (sres) {
5084       const enum iris_domain access = cso_zsa->stencil_writes_enabled ?
5085          IRIS_DOMAIN_DEPTH_WRITE : IRIS_DOMAIN_OTHER_READ;
5086       iris_use_pinned_bo(batch, sres->bo, cso_zsa->stencil_writes_enabled,
5087                          access);
5088    }
5089 }
5090 
5091 static uint32_t
pin_scratch_space(struct iris_context * ice,struct iris_batch * batch,const struct brw_stage_prog_data * prog_data,gl_shader_stage stage)5092 pin_scratch_space(struct iris_context *ice,
5093                   struct iris_batch *batch,
5094                   const struct brw_stage_prog_data *prog_data,
5095                   gl_shader_stage stage)
5096 {
5097    uint32_t scratch_addr = 0;
5098 
5099    if (prog_data->total_scratch > 0) {
5100       struct iris_bo *scratch_bo =
5101          iris_get_scratch_space(ice, prog_data->total_scratch, stage);
5102       iris_use_pinned_bo(batch, scratch_bo, true, IRIS_DOMAIN_NONE);
5103 
5104 #if GFX_VERx10 >= 125
5105       const struct iris_state_ref *ref =
5106          iris_get_scratch_surf(ice, prog_data->total_scratch);
5107       iris_use_pinned_bo(batch, iris_resource_bo(ref->res),
5108                          false, IRIS_DOMAIN_NONE);
5109       scratch_addr = ref->offset +
5110                      iris_resource_bo(ref->res)->address -
5111                      IRIS_MEMZONE_BINDLESS_START;
5112       assert((scratch_addr & 0x3f) == 0 && scratch_addr < (1 << 26));
5113 #else
5114       scratch_addr = scratch_bo->address;
5115 #endif
5116    }
5117 
5118    return scratch_addr;
5119 }
5120 
5121 /* ------------------------------------------------------------------- */
5122 
5123 /**
5124  * Pin any BOs which were installed by a previous batch, and restored
5125  * via the hardware logical context mechanism.
5126  *
5127  * We don't need to re-emit all state every batch - the hardware context
5128  * mechanism will save and restore it for us.  This includes pointers to
5129  * various BOs...which won't exist unless we ask the kernel to pin them
5130  * by adding them to the validation list.
5131  *
5132  * We can skip buffers if we've re-emitted those packets, as we're
5133  * overwriting those stale pointers with new ones, and don't actually
5134  * refer to the old BOs.
5135  */
5136 static void
iris_restore_render_saved_bos(struct iris_context * ice,struct iris_batch * batch,const struct pipe_draw_info * draw)5137 iris_restore_render_saved_bos(struct iris_context *ice,
5138                               struct iris_batch *batch,
5139                               const struct pipe_draw_info *draw)
5140 {
5141    struct iris_genx_state *genx = ice->state.genx;
5142 
5143    const uint64_t clean = ~ice->state.dirty;
5144    const uint64_t stage_clean = ~ice->state.stage_dirty;
5145 
5146    if (clean & IRIS_DIRTY_CC_VIEWPORT) {
5147       iris_use_optional_res(batch, ice->state.last_res.cc_vp, false,
5148                             IRIS_DOMAIN_NONE);
5149    }
5150 
5151    if (clean & IRIS_DIRTY_SF_CL_VIEWPORT) {
5152       iris_use_optional_res(batch, ice->state.last_res.sf_cl_vp, false,
5153                             IRIS_DOMAIN_NONE);
5154    }
5155 
5156    if (clean & IRIS_DIRTY_BLEND_STATE) {
5157       iris_use_optional_res(batch, ice->state.last_res.blend, false,
5158                             IRIS_DOMAIN_NONE);
5159    }
5160 
5161    if (clean & IRIS_DIRTY_COLOR_CALC_STATE) {
5162       iris_use_optional_res(batch, ice->state.last_res.color_calc, false,
5163                             IRIS_DOMAIN_NONE);
5164    }
5165 
5166    if (clean & IRIS_DIRTY_SCISSOR_RECT) {
5167       iris_use_optional_res(batch, ice->state.last_res.scissor, false,
5168                             IRIS_DOMAIN_NONE);
5169    }
5170 
5171    if (ice->state.streamout_active && (clean & IRIS_DIRTY_SO_BUFFERS)) {
5172       for (int i = 0; i < 4; i++) {
5173          struct iris_stream_output_target *tgt =
5174             (void *) ice->state.so_target[i];
5175          if (tgt) {
5176             iris_use_pinned_bo(batch, iris_resource_bo(tgt->base.buffer),
5177                                true, IRIS_DOMAIN_OTHER_WRITE);
5178             iris_use_pinned_bo(batch, iris_resource_bo(tgt->offset.res),
5179                                true, IRIS_DOMAIN_OTHER_WRITE);
5180          }
5181       }
5182    }
5183 
5184    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5185       if (!(stage_clean & (IRIS_STAGE_DIRTY_CONSTANTS_VS << stage)))
5186          continue;
5187 
5188       struct iris_shader_state *shs = &ice->state.shaders[stage];
5189       struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5190 
5191       if (!shader)
5192          continue;
5193 
5194       struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
5195 
5196       for (int i = 0; i < 4; i++) {
5197          const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
5198 
5199          if (range->length == 0)
5200             continue;
5201 
5202          /* Range block is a binding table index, map back to UBO index. */
5203          unsigned block_index = iris_bti_to_group_index(
5204             &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
5205          assert(block_index != IRIS_SURFACE_NOT_USED);
5206 
5207          struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
5208          struct iris_resource *res = (void *) cbuf->buffer;
5209 
5210          if (res)
5211             iris_use_pinned_bo(batch, res->bo, false, IRIS_DOMAIN_OTHER_READ);
5212          else
5213             iris_use_pinned_bo(batch, batch->screen->workaround_bo, false,
5214                                IRIS_DOMAIN_OTHER_READ);
5215       }
5216    }
5217 
5218    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5219       if (stage_clean & (IRIS_STAGE_DIRTY_BINDINGS_VS << stage)) {
5220          /* Re-pin any buffers referred to by the binding table. */
5221          iris_populate_binding_table(ice, batch, stage, true);
5222       }
5223    }
5224 
5225    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5226       struct iris_shader_state *shs = &ice->state.shaders[stage];
5227       struct pipe_resource *res = shs->sampler_table.res;
5228       if (res)
5229          iris_use_pinned_bo(batch, iris_resource_bo(res), false,
5230                             IRIS_DOMAIN_NONE);
5231    }
5232 
5233    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5234       if (stage_clean & (IRIS_STAGE_DIRTY_VS << stage)) {
5235          struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5236 
5237          if (shader) {
5238             struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
5239             iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
5240 
5241             pin_scratch_space(ice, batch, shader->prog_data, stage);
5242          }
5243       }
5244    }
5245 
5246    if ((clean & IRIS_DIRTY_DEPTH_BUFFER) &&
5247        (clean & IRIS_DIRTY_WM_DEPTH_STENCIL)) {
5248       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5249       pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa);
5250    }
5251 
5252    iris_use_optional_res(batch, ice->state.last_res.index_buffer, false,
5253                          IRIS_DOMAIN_VF_READ);
5254 
5255    if (clean & IRIS_DIRTY_VERTEX_BUFFERS) {
5256       uint64_t bound = ice->state.bound_vertex_buffers;
5257       while (bound) {
5258          const int i = u_bit_scan64(&bound);
5259          struct pipe_resource *res = genx->vertex_buffers[i].resource;
5260          iris_use_pinned_bo(batch, iris_resource_bo(res), false,
5261                             IRIS_DOMAIN_VF_READ);
5262       }
5263    }
5264 }
5265 
5266 static void
iris_restore_compute_saved_bos(struct iris_context * ice,struct iris_batch * batch,const struct pipe_grid_info * grid)5267 iris_restore_compute_saved_bos(struct iris_context *ice,
5268                                struct iris_batch *batch,
5269                                const struct pipe_grid_info *grid)
5270 {
5271    const uint64_t stage_clean = ~ice->state.stage_dirty;
5272 
5273    const int stage = MESA_SHADER_COMPUTE;
5274    struct iris_shader_state *shs = &ice->state.shaders[stage];
5275 
5276    if (stage_clean & IRIS_STAGE_DIRTY_BINDINGS_CS) {
5277       /* Re-pin any buffers referred to by the binding table. */
5278       iris_populate_binding_table(ice, batch, stage, true);
5279    }
5280 
5281    struct pipe_resource *sampler_res = shs->sampler_table.res;
5282    if (sampler_res)
5283       iris_use_pinned_bo(batch, iris_resource_bo(sampler_res), false,
5284                          IRIS_DOMAIN_NONE);
5285 
5286    if ((stage_clean & IRIS_STAGE_DIRTY_SAMPLER_STATES_CS) &&
5287        (stage_clean & IRIS_STAGE_DIRTY_BINDINGS_CS) &&
5288        (stage_clean & IRIS_STAGE_DIRTY_CONSTANTS_CS) &&
5289        (stage_clean & IRIS_STAGE_DIRTY_CS)) {
5290       iris_use_optional_res(batch, ice->state.last_res.cs_desc, false,
5291                             IRIS_DOMAIN_NONE);
5292    }
5293 
5294    if (stage_clean & IRIS_STAGE_DIRTY_CS) {
5295       struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5296 
5297       if (shader) {
5298          struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
5299          iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
5300 
5301          if (GFX_VERx10 < 125) {
5302             struct iris_bo *curbe_bo =
5303                iris_resource_bo(ice->state.last_res.cs_thread_ids);
5304             iris_use_pinned_bo(batch, curbe_bo, false, IRIS_DOMAIN_NONE);
5305          }
5306 
5307          pin_scratch_space(ice, batch, shader->prog_data, stage);
5308       }
5309    }
5310 }
5311 
5312 /**
5313  * Possibly emit STATE_BASE_ADDRESS to update Surface State Base Address.
5314  */
5315 static void
iris_update_surface_base_address(struct iris_batch * batch,struct iris_binder * binder)5316 iris_update_surface_base_address(struct iris_batch *batch,
5317                                  struct iris_binder *binder)
5318 {
5319    if (batch->last_surface_base_address == binder->bo->address)
5320       return;
5321 
5322    struct isl_device *isl_dev = &batch->screen->isl_dev;
5323    uint32_t mocs = isl_mocs(isl_dev, 0, false);
5324 
5325    iris_batch_sync_region_start(batch);
5326 
5327    flush_before_state_base_change(batch);
5328 
5329 #if GFX_VER == 12
5330    /* Wa_1607854226:
5331     *
5332     *  Workaround the non pipelined state not applying in MEDIA/GPGPU pipeline
5333     *  mode by putting the pipeline temporarily in 3D mode..
5334     */
5335    if (batch->name == IRIS_BATCH_COMPUTE)
5336       emit_pipeline_select(batch, _3D);
5337 #endif
5338 
5339    iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
5340       sba.SurfaceStateBaseAddressModifyEnable = true;
5341       sba.SurfaceStateBaseAddress = ro_bo(binder->bo, 0);
5342 
5343       /* The hardware appears to pay attention to the MOCS fields even
5344        * if you don't set the "Address Modify Enable" bit for the base.
5345        */
5346       sba.GeneralStateMOCS            = mocs;
5347       sba.StatelessDataPortAccessMOCS = mocs;
5348       sba.DynamicStateMOCS            = mocs;
5349       sba.IndirectObjectMOCS          = mocs;
5350       sba.InstructionMOCS             = mocs;
5351       sba.SurfaceStateMOCS            = mocs;
5352 #if GFX_VER >= 9
5353       sba.BindlessSurfaceStateMOCS    = mocs;
5354 #endif
5355    }
5356 
5357 #if GFX_VER == 12
5358    /* Wa_1607854226:
5359     *
5360     *  Put the pipeline back into compute mode.
5361     */
5362    if (batch->name == IRIS_BATCH_COMPUTE)
5363       emit_pipeline_select(batch, GPGPU);
5364 #endif
5365 
5366    flush_after_state_base_change(batch);
5367    iris_batch_sync_region_end(batch);
5368 
5369    batch->last_surface_base_address = binder->bo->address;
5370 }
5371 
5372 static inline void
iris_viewport_zmin_zmax(const struct pipe_viewport_state * vp,bool halfz,bool window_space_position,float * zmin,float * zmax)5373 iris_viewport_zmin_zmax(const struct pipe_viewport_state *vp, bool halfz,
5374                         bool window_space_position, float *zmin, float *zmax)
5375 {
5376    if (window_space_position) {
5377       *zmin = 0.f;
5378       *zmax = 1.f;
5379       return;
5380    }
5381    util_viewport_zmin_zmax(vp, halfz, zmin, zmax);
5382 }
5383 
5384 #if GFX_VER >= 12
5385 void
genX(invalidate_aux_map_state)5386 genX(invalidate_aux_map_state)(struct iris_batch *batch)
5387 {
5388    struct iris_screen *screen = batch->screen;
5389    void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr);
5390    if (!aux_map_ctx)
5391       return;
5392    uint32_t aux_map_state_num = intel_aux_map_get_state_num(aux_map_ctx);
5393    if (batch->last_aux_map_state != aux_map_state_num) {
5394       /* HSD 1209978178: docs say that before programming the aux table:
5395        *
5396        *    "Driver must ensure that the engine is IDLE but ensure it doesn't
5397        *    add extra flushes in the case it knows that the engine is already
5398        *    IDLE."
5399        *
5400        * An end of pipe sync is needed here, otherwise we see GPU hangs in
5401        * dEQP-GLES31.functional.copy_image.* tests.
5402        */
5403       iris_emit_end_of_pipe_sync(batch, "Invalidate aux map table",
5404                                  PIPE_CONTROL_CS_STALL);
5405 
5406       /* If the aux-map state number increased, then we need to rewrite the
5407        * register. Rewriting the register is used to both set the aux-map
5408        * translation table address, and also to invalidate any previously
5409        * cached translations.
5410        */
5411       iris_load_register_imm32(batch, GENX(GFX_CCS_AUX_INV_num), 1);
5412       batch->last_aux_map_state = aux_map_state_num;
5413    }
5414 }
5415 
5416 static void
init_aux_map_state(struct iris_batch * batch)5417 init_aux_map_state(struct iris_batch *batch)
5418 {
5419    struct iris_screen *screen = batch->screen;
5420    void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr);
5421    if (!aux_map_ctx)
5422       return;
5423 
5424    uint64_t base_addr = intel_aux_map_get_base(aux_map_ctx);
5425    assert(base_addr != 0 && align64(base_addr, 32 * 1024) == base_addr);
5426    iris_load_register_imm64(batch, GENX(GFX_AUX_TABLE_BASE_ADDR_num),
5427                             base_addr);
5428 }
5429 #endif
5430 
5431 struct push_bos {
5432    struct {
5433       struct iris_address addr;
5434       uint32_t length;
5435    } buffers[4];
5436    int buffer_count;
5437    uint32_t max_length;
5438 };
5439 
5440 static void
setup_constant_buffers(struct iris_context * ice,struct iris_batch * batch,int stage,struct push_bos * push_bos)5441 setup_constant_buffers(struct iris_context *ice,
5442                        struct iris_batch *batch,
5443                        int stage,
5444                        struct push_bos *push_bos)
5445 {
5446    struct iris_shader_state *shs = &ice->state.shaders[stage];
5447    struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5448    struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
5449 
5450    uint32_t push_range_sum = 0;
5451 
5452    int n = 0;
5453    for (int i = 0; i < 4; i++) {
5454       const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
5455 
5456       if (range->length == 0)
5457          continue;
5458 
5459       push_range_sum += range->length;
5460 
5461       if (range->length > push_bos->max_length)
5462          push_bos->max_length = range->length;
5463 
5464       /* Range block is a binding table index, map back to UBO index. */
5465       unsigned block_index = iris_bti_to_group_index(
5466          &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
5467       assert(block_index != IRIS_SURFACE_NOT_USED);
5468 
5469       struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
5470       struct iris_resource *res = (void *) cbuf->buffer;
5471 
5472       assert(cbuf->buffer_offset % 32 == 0);
5473 
5474       push_bos->buffers[n].length = range->length;
5475       push_bos->buffers[n].addr =
5476          res ? ro_bo(res->bo, range->start * 32 + cbuf->buffer_offset)
5477          : batch->screen->workaround_address;
5478       n++;
5479    }
5480 
5481    /* From the 3DSTATE_CONSTANT_XS and 3DSTATE_CONSTANT_ALL programming notes:
5482     *
5483     *    "The sum of all four read length fields must be less than or
5484     *    equal to the size of 64."
5485     */
5486    assert(push_range_sum <= 64);
5487 
5488    push_bos->buffer_count = n;
5489 }
5490 
5491 static void
emit_push_constant_packets(struct iris_context * ice,struct iris_batch * batch,int stage,const struct push_bos * push_bos)5492 emit_push_constant_packets(struct iris_context *ice,
5493                            struct iris_batch *batch,
5494                            int stage,
5495                            const struct push_bos *push_bos)
5496 {
5497    UNUSED struct isl_device *isl_dev = &batch->screen->isl_dev;
5498    struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5499    struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
5500 
5501    iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_VS), pkt) {
5502       pkt._3DCommandSubOpcode = push_constant_opcodes[stage];
5503 #if GFX_VER >= 12
5504       pkt.MOCS = isl_mocs(isl_dev, 0, false);
5505 #endif
5506       if (prog_data) {
5507          /* The Skylake PRM contains the following restriction:
5508           *
5509           *    "The driver must ensure The following case does not occur
5510           *     without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
5511           *     buffer 3 read length equal to zero committed followed by a
5512           *     3DSTATE_CONSTANT_* with buffer 0 read length not equal to
5513           *     zero committed."
5514           *
5515           * To avoid this, we program the buffers in the highest slots.
5516           * This way, slot 0 is only used if slot 3 is also used.
5517           */
5518          int n = push_bos->buffer_count;
5519          assert(n <= 4);
5520          const unsigned shift = 4 - n;
5521          for (int i = 0; i < n; i++) {
5522             pkt.ConstantBody.ReadLength[i + shift] =
5523                push_bos->buffers[i].length;
5524             pkt.ConstantBody.Buffer[i + shift] = push_bos->buffers[i].addr;
5525          }
5526       }
5527    }
5528 }
5529 
5530 #if GFX_VER >= 12
5531 static void
emit_push_constant_packet_all(struct iris_context * ice,struct iris_batch * batch,uint32_t shader_mask,const struct push_bos * push_bos)5532 emit_push_constant_packet_all(struct iris_context *ice,
5533                               struct iris_batch *batch,
5534                               uint32_t shader_mask,
5535                               const struct push_bos *push_bos)
5536 {
5537    struct isl_device *isl_dev = &batch->screen->isl_dev;
5538 
5539    if (!push_bos) {
5540       iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_ALL), pc) {
5541          pc.ShaderUpdateEnable = shader_mask;
5542       }
5543       return;
5544    }
5545 
5546    const uint32_t n = push_bos->buffer_count;
5547    const uint32_t max_pointers = 4;
5548    const uint32_t num_dwords = 2 + 2 * n;
5549    uint32_t const_all[2 + 2 * max_pointers];
5550    uint32_t *dw = &const_all[0];
5551 
5552    assert(n <= max_pointers);
5553    iris_pack_command(GENX(3DSTATE_CONSTANT_ALL), dw, all) {
5554       all.DWordLength = num_dwords - 2;
5555       all.MOCS = isl_mocs(isl_dev, 0, false);
5556       all.ShaderUpdateEnable = shader_mask;
5557       all.PointerBufferMask = (1 << n) - 1;
5558    }
5559    dw += 2;
5560 
5561    for (int i = 0; i < n; i++) {
5562       _iris_pack_state(batch, GENX(3DSTATE_CONSTANT_ALL_DATA),
5563                        dw + i * 2, data) {
5564          data.PointerToConstantBuffer = push_bos->buffers[i].addr;
5565          data.ConstantBufferReadLength = push_bos->buffers[i].length;
5566       }
5567    }
5568    iris_batch_emit(batch, const_all, sizeof(uint32_t) * num_dwords);
5569 }
5570 #endif
5571 
5572 void
genX(emit_depth_state_workarounds)5573 genX(emit_depth_state_workarounds)(struct iris_context *ice,
5574                                    struct iris_batch *batch,
5575                                    const struct isl_surf *surf)
5576 {
5577 #if GFX_VERx10 == 120
5578    const bool fmt_is_d16 = surf->format == ISL_FORMAT_R16_UNORM;
5579 
5580    switch (ice->state.genx->depth_reg_mode) {
5581    case IRIS_DEPTH_REG_MODE_HW_DEFAULT:
5582       if (!fmt_is_d16)
5583          return;
5584       break;
5585    case IRIS_DEPTH_REG_MODE_D16:
5586       if (fmt_is_d16)
5587          return;
5588       break;
5589    case IRIS_DEPTH_REG_MODE_UNKNOWN:
5590       break;
5591    }
5592 
5593    /* We'll change some CHICKEN registers depending on the depth surface
5594     * format. Do a depth flush and stall so the pipeline is not using these
5595     * settings while we change the registers.
5596     */
5597    iris_emit_end_of_pipe_sync(batch,
5598                               "Workaround: Stop pipeline for 14010455700",
5599                               PIPE_CONTROL_DEPTH_STALL |
5600                               PIPE_CONTROL_DEPTH_CACHE_FLUSH);
5601 
5602    /* Wa_14010455700
5603     *
5604     * To avoid sporadic corruptions “Set 0x7010[9] when Depth Buffer
5605     * Surface Format is D16_UNORM , surface type is not NULL & 1X_MSAA”.
5606     */
5607    iris_emit_reg(batch, GENX(COMMON_SLICE_CHICKEN1), reg) {
5608       reg.HIZPlaneOptimizationdisablebit = fmt_is_d16 && surf->samples == 1;
5609       reg.HIZPlaneOptimizationdisablebitMask = true;
5610    }
5611 
5612    /* Wa_1806527549
5613     *
5614     * Set HIZ_CHICKEN (7018h) bit 13 = 1 when depth buffer is D16_UNORM.
5615     */
5616    iris_emit_reg(batch, GENX(HIZ_CHICKEN), reg) {
5617       reg.HZDepthTestLEGEOptimizationDisable = fmt_is_d16;
5618       reg.HZDepthTestLEGEOptimizationDisableMask = true;
5619    }
5620 
5621    ice->state.genx->depth_reg_mode =
5622       fmt_is_d16 ? IRIS_DEPTH_REG_MODE_D16 : IRIS_DEPTH_REG_MODE_HW_DEFAULT;
5623 #endif
5624 }
5625 
5626 static void
iris_upload_dirty_render_state(struct iris_context * ice,struct iris_batch * batch,const struct pipe_draw_info * draw)5627 iris_upload_dirty_render_state(struct iris_context *ice,
5628                                struct iris_batch *batch,
5629                                const struct pipe_draw_info *draw)
5630 {
5631    const uint64_t dirty = ice->state.dirty;
5632    const uint64_t stage_dirty = ice->state.stage_dirty;
5633 
5634    if (!(dirty & IRIS_ALL_DIRTY_FOR_RENDER) &&
5635        !(stage_dirty & IRIS_ALL_STAGE_DIRTY_FOR_RENDER))
5636       return;
5637 
5638    struct iris_genx_state *genx = ice->state.genx;
5639    struct iris_binder *binder = &ice->state.binder;
5640    struct brw_wm_prog_data *wm_prog_data = (void *)
5641       ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
5642 
5643    if (dirty & IRIS_DIRTY_CC_VIEWPORT) {
5644       const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
5645       uint32_t cc_vp_address;
5646 
5647       /* XXX: could avoid streaming for depth_clip [0,1] case. */
5648       uint32_t *cc_vp_map =
5649          stream_state(batch, ice->state.dynamic_uploader,
5650                       &ice->state.last_res.cc_vp,
5651                       4 * ice->state.num_viewports *
5652                       GENX(CC_VIEWPORT_length), 32, &cc_vp_address);
5653       for (int i = 0; i < ice->state.num_viewports; i++) {
5654          float zmin, zmax;
5655          iris_viewport_zmin_zmax(&ice->state.viewports[i], cso_rast->clip_halfz,
5656                                  ice->state.window_space_position,
5657                                  &zmin, &zmax);
5658          if (cso_rast->depth_clip_near)
5659             zmin = 0.0;
5660          if (cso_rast->depth_clip_far)
5661             zmax = 1.0;
5662 
5663          iris_pack_state(GENX(CC_VIEWPORT), cc_vp_map, ccv) {
5664             ccv.MinimumDepth = zmin;
5665             ccv.MaximumDepth = zmax;
5666          }
5667 
5668          cc_vp_map += GENX(CC_VIEWPORT_length);
5669       }
5670 
5671       iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC), ptr) {
5672          ptr.CCViewportPointer = cc_vp_address;
5673       }
5674    }
5675 
5676    if (dirty & IRIS_DIRTY_SF_CL_VIEWPORT) {
5677       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5678       uint32_t sf_cl_vp_address;
5679       uint32_t *vp_map =
5680          stream_state(batch, ice->state.dynamic_uploader,
5681                       &ice->state.last_res.sf_cl_vp,
5682                       4 * ice->state.num_viewports *
5683                       GENX(SF_CLIP_VIEWPORT_length), 64, &sf_cl_vp_address);
5684 
5685       for (unsigned i = 0; i < ice->state.num_viewports; i++) {
5686          const struct pipe_viewport_state *state = &ice->state.viewports[i];
5687          float gb_xmin, gb_xmax, gb_ymin, gb_ymax;
5688 
5689          float vp_xmin = viewport_extent(state, 0, -1.0f);
5690          float vp_xmax = viewport_extent(state, 0,  1.0f);
5691          float vp_ymin = viewport_extent(state, 1, -1.0f);
5692          float vp_ymax = viewport_extent(state, 1,  1.0f);
5693 
5694          intel_calculate_guardband_size(cso_fb->width, cso_fb->height,
5695                                         state->scale[0], state->scale[1],
5696                                         state->translate[0], state->translate[1],
5697                                         &gb_xmin, &gb_xmax, &gb_ymin, &gb_ymax);
5698 
5699          iris_pack_state(GENX(SF_CLIP_VIEWPORT), vp_map, vp) {
5700             vp.ViewportMatrixElementm00 = state->scale[0];
5701             vp.ViewportMatrixElementm11 = state->scale[1];
5702             vp.ViewportMatrixElementm22 = state->scale[2];
5703             vp.ViewportMatrixElementm30 = state->translate[0];
5704             vp.ViewportMatrixElementm31 = state->translate[1];
5705             vp.ViewportMatrixElementm32 = state->translate[2];
5706             vp.XMinClipGuardband = gb_xmin;
5707             vp.XMaxClipGuardband = gb_xmax;
5708             vp.YMinClipGuardband = gb_ymin;
5709             vp.YMaxClipGuardband = gb_ymax;
5710             vp.XMinViewPort = MAX2(vp_xmin, 0);
5711             vp.XMaxViewPort = MIN2(vp_xmax, cso_fb->width) - 1;
5712             vp.YMinViewPort = MAX2(vp_ymin, 0);
5713             vp.YMaxViewPort = MIN2(vp_ymax, cso_fb->height) - 1;
5714          }
5715 
5716          vp_map += GENX(SF_CLIP_VIEWPORT_length);
5717       }
5718 
5719       iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP), ptr) {
5720          ptr.SFClipViewportPointer = sf_cl_vp_address;
5721       }
5722    }
5723 
5724    if (dirty & IRIS_DIRTY_URB) {
5725       for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
5726          if (!ice->shaders.prog[i]) {
5727             ice->shaders.urb.size[i] = 1;
5728          } else {
5729             struct brw_vue_prog_data *vue_prog_data =
5730                (void *) ice->shaders.prog[i]->prog_data;
5731             ice->shaders.urb.size[i] = vue_prog_data->urb_entry_size;
5732          }
5733          assert(ice->shaders.urb.size[i] != 0);
5734       }
5735 
5736       intel_get_urb_config(&batch->screen->devinfo,
5737                            batch->screen->l3_config_3d,
5738                            ice->shaders.prog[MESA_SHADER_TESS_EVAL] != NULL,
5739                            ice->shaders.prog[MESA_SHADER_GEOMETRY] != NULL,
5740                            ice->shaders.urb.size,
5741                            ice->shaders.urb.entries,
5742                            ice->shaders.urb.start,
5743                            &ice->state.urb_deref_block_size,
5744                            &ice->shaders.urb.constrained);
5745 
5746       for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
5747          iris_emit_cmd(batch, GENX(3DSTATE_URB_VS), urb) {
5748             urb._3DCommandSubOpcode += i;
5749             urb.VSURBStartingAddress     = ice->shaders.urb.start[i];
5750             urb.VSURBEntryAllocationSize = ice->shaders.urb.size[i] - 1;
5751             urb.VSNumberofURBEntries     = ice->shaders.urb.entries[i];
5752          }
5753       }
5754    }
5755 
5756    if (dirty & IRIS_DIRTY_BLEND_STATE) {
5757       struct iris_blend_state *cso_blend = ice->state.cso_blend;
5758       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5759       struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
5760       const int header_dwords = GENX(BLEND_STATE_length);
5761 
5762       /* Always write at least one BLEND_STATE - the final RT message will
5763        * reference BLEND_STATE[0] even if there aren't color writes.  There
5764        * may still be alpha testing, computed depth, and so on.
5765        */
5766       const int rt_dwords =
5767          MAX2(cso_fb->nr_cbufs, 1) * GENX(BLEND_STATE_ENTRY_length);
5768 
5769       uint32_t blend_offset;
5770       uint32_t *blend_map =
5771          stream_state(batch, ice->state.dynamic_uploader,
5772                       &ice->state.last_res.blend,
5773                       4 * (header_dwords + rt_dwords), 64, &blend_offset);
5774 
5775       uint32_t blend_state_header;
5776       iris_pack_state(GENX(BLEND_STATE), &blend_state_header, bs) {
5777          bs.AlphaTestEnable = cso_zsa->alpha_enabled;
5778          bs.AlphaTestFunction = translate_compare_func(cso_zsa->alpha_func);
5779       }
5780 
5781       blend_map[0] = blend_state_header | cso_blend->blend_state[0];
5782       memcpy(&blend_map[1], &cso_blend->blend_state[1], 4 * rt_dwords);
5783 
5784       iris_emit_cmd(batch, GENX(3DSTATE_BLEND_STATE_POINTERS), ptr) {
5785          ptr.BlendStatePointer = blend_offset;
5786          ptr.BlendStatePointerValid = true;
5787       }
5788    }
5789 
5790    if (dirty & IRIS_DIRTY_COLOR_CALC_STATE) {
5791       struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
5792 #if GFX_VER == 8
5793       struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
5794 #endif
5795       uint32_t cc_offset;
5796       void *cc_map =
5797          stream_state(batch, ice->state.dynamic_uploader,
5798                       &ice->state.last_res.color_calc,
5799                       sizeof(uint32_t) * GENX(COLOR_CALC_STATE_length),
5800                       64, &cc_offset);
5801       iris_pack_state(GENX(COLOR_CALC_STATE), cc_map, cc) {
5802          cc.AlphaTestFormat = ALPHATEST_FLOAT32;
5803          cc.AlphaReferenceValueAsFLOAT32 = cso->alpha_ref_value;
5804          cc.BlendConstantColorRed   = ice->state.blend_color.color[0];
5805          cc.BlendConstantColorGreen = ice->state.blend_color.color[1];
5806          cc.BlendConstantColorBlue  = ice->state.blend_color.color[2];
5807          cc.BlendConstantColorAlpha = ice->state.blend_color.color[3];
5808 #if GFX_VER == 8
5809 	 cc.StencilReferenceValue = p_stencil_refs->ref_value[0];
5810 	 cc.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
5811 #endif
5812       }
5813       iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), ptr) {
5814          ptr.ColorCalcStatePointer = cc_offset;
5815          ptr.ColorCalcStatePointerValid = true;
5816       }
5817    }
5818 
5819    /* Wa_1604061319
5820     *
5821     *    3DSTATE_CONSTANT_* needs to be programmed before BTP_*
5822     *
5823     * Testing shows that all the 3DSTATE_CONSTANT_XS need to be emitted if
5824     * any stage has a dirty binding table.
5825     */
5826    const bool emit_const_wa = GFX_VER >= 11 &&
5827       ((dirty & IRIS_DIRTY_RENDER_BUFFER) ||
5828        (stage_dirty & IRIS_ALL_STAGE_DIRTY_BINDINGS_FOR_RENDER));
5829 
5830 #if GFX_VER >= 12
5831    uint32_t nobuffer_stages = 0;
5832 #endif
5833 
5834    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5835       if (!(stage_dirty & (IRIS_STAGE_DIRTY_CONSTANTS_VS << stage)) &&
5836           !emit_const_wa)
5837          continue;
5838 
5839       struct iris_shader_state *shs = &ice->state.shaders[stage];
5840       struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5841 
5842       if (!shader)
5843          continue;
5844 
5845       if (shs->sysvals_need_upload)
5846          upload_sysvals(ice, stage, NULL);
5847 
5848       struct push_bos push_bos = {};
5849       setup_constant_buffers(ice, batch, stage, &push_bos);
5850 
5851 #if GFX_VER >= 12
5852       /* If this stage doesn't have any push constants, emit it later in a
5853        * single CONSTANT_ALL packet with all the other stages.
5854        */
5855       if (push_bos.buffer_count == 0) {
5856          nobuffer_stages |= 1 << stage;
5857          continue;
5858       }
5859 
5860       /* The Constant Buffer Read Length field from 3DSTATE_CONSTANT_ALL
5861        * contains only 5 bits, so we can only use it for buffers smaller than
5862        * 32.
5863        */
5864       if (push_bos.max_length < 32) {
5865          emit_push_constant_packet_all(ice, batch, 1 << stage, &push_bos);
5866          continue;
5867       }
5868 #endif
5869       emit_push_constant_packets(ice, batch, stage, &push_bos);
5870    }
5871 
5872 #if GFX_VER >= 12
5873    if (nobuffer_stages)
5874       emit_push_constant_packet_all(ice, batch, nobuffer_stages, NULL);
5875 #endif
5876 
5877    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5878       /* Gfx9 requires 3DSTATE_BINDING_TABLE_POINTERS_XS to be re-emitted
5879        * in order to commit constants.  TODO: Investigate "Disable Gather
5880        * at Set Shader" to go back to legacy mode...
5881        */
5882       if (stage_dirty & ((IRIS_STAGE_DIRTY_BINDINGS_VS |
5883                           (GFX_VER == 9 ? IRIS_STAGE_DIRTY_CONSTANTS_VS : 0))
5884                             << stage)) {
5885          iris_emit_cmd(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), ptr) {
5886             ptr._3DCommandSubOpcode = 38 + stage;
5887             ptr.PointertoVSBindingTable = binder->bt_offset[stage];
5888          }
5889       }
5890    }
5891 
5892    if (GFX_VER >= 11 && (dirty & IRIS_DIRTY_RENDER_BUFFER)) {
5893       // XXX: we may want to flag IRIS_DIRTY_MULTISAMPLE (or SAMPLE_MASK?)
5894       // XXX: see commit 979fc1bc9bcc64027ff2cfafd285676f31b930a6
5895 
5896       /* The PIPE_CONTROL command description says:
5897        *
5898        *   "Whenever a Binding Table Index (BTI) used by a Render Target
5899        *    Message points to a different RENDER_SURFACE_STATE, SW must issue a
5900        *    Render Target Cache Flush by enabling this bit. When render target
5901        *    flush is set due to new association of BTI, PS Scoreboard Stall bit
5902        *    must be set in this packet."
5903        */
5904       // XXX: does this need to happen at 3DSTATE_BTP_PS time?
5905       iris_emit_pipe_control_flush(batch, "workaround: RT BTI change [draw]",
5906                                    PIPE_CONTROL_RENDER_TARGET_FLUSH |
5907                                    PIPE_CONTROL_STALL_AT_SCOREBOARD);
5908    }
5909 
5910    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5911       if (stage_dirty & (IRIS_STAGE_DIRTY_BINDINGS_VS << stage)) {
5912          iris_populate_binding_table(ice, batch, stage, false);
5913       }
5914    }
5915 
5916    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5917       if (!(stage_dirty & (IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage)) ||
5918           !ice->shaders.prog[stage])
5919          continue;
5920 
5921       iris_upload_sampler_states(ice, stage);
5922 
5923       struct iris_shader_state *shs = &ice->state.shaders[stage];
5924       struct pipe_resource *res = shs->sampler_table.res;
5925       if (res)
5926          iris_use_pinned_bo(batch, iris_resource_bo(res), false,
5927                             IRIS_DOMAIN_NONE);
5928 
5929       iris_emit_cmd(batch, GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ptr) {
5930          ptr._3DCommandSubOpcode = 43 + stage;
5931          ptr.PointertoVSSamplerState = shs->sampler_table.offset;
5932       }
5933    }
5934 
5935    if (ice->state.need_border_colors)
5936       iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false,
5937                          IRIS_DOMAIN_NONE);
5938 
5939    if (dirty & IRIS_DIRTY_MULTISAMPLE) {
5940       iris_emit_cmd(batch, GENX(3DSTATE_MULTISAMPLE), ms) {
5941          ms.PixelLocation =
5942             ice->state.cso_rast->half_pixel_center ? CENTER : UL_CORNER;
5943          if (ice->state.framebuffer.samples > 0)
5944             ms.NumberofMultisamples = ffs(ice->state.framebuffer.samples) - 1;
5945       }
5946    }
5947 
5948    if (dirty & IRIS_DIRTY_SAMPLE_MASK) {
5949       iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_MASK), ms) {
5950          ms.SampleMask = ice->state.sample_mask;
5951       }
5952    }
5953 
5954    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5955       if (!(stage_dirty & (IRIS_STAGE_DIRTY_VS << stage)))
5956          continue;
5957 
5958       struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5959 
5960       if (shader) {
5961          struct brw_stage_prog_data *prog_data = shader->prog_data;
5962          struct iris_resource *cache = (void *) shader->assembly.res;
5963          iris_use_pinned_bo(batch, cache->bo, false, IRIS_DOMAIN_NONE);
5964 
5965          uint32_t scratch_addr =
5966             pin_scratch_space(ice, batch, prog_data, stage);
5967 
5968          if (stage == MESA_SHADER_FRAGMENT) {
5969             UNUSED struct iris_rasterizer_state *cso = ice->state.cso_rast;
5970             struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5971 
5972             uint32_t ps_state[GENX(3DSTATE_PS_length)] = {0};
5973             _iris_pack_command(batch, GENX(3DSTATE_PS), ps_state, ps) {
5974                ps._8PixelDispatchEnable = wm_prog_data->dispatch_8;
5975                ps._16PixelDispatchEnable = wm_prog_data->dispatch_16;
5976                ps._32PixelDispatchEnable = wm_prog_data->dispatch_32;
5977 
5978               /* The docs for 3DSTATE_PS::32 Pixel Dispatch Enable say:
5979                *
5980                *    "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16,
5981                *     SIMD32 Dispatch must not be enabled for PER_PIXEL dispatch
5982                *     mode."
5983                *
5984                * 16x MSAA only exists on Gfx9+, so we can skip this on Gfx8.
5985                */
5986                if (GFX_VER >= 9 && cso_fb->samples == 16 &&
5987                    !wm_prog_data->persample_dispatch) {
5988                   assert(ps._8PixelDispatchEnable || ps._16PixelDispatchEnable);
5989                   ps._32PixelDispatchEnable = false;
5990                }
5991 
5992                ps.DispatchGRFStartRegisterForConstantSetupData0 =
5993                   brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0);
5994                ps.DispatchGRFStartRegisterForConstantSetupData1 =
5995                   brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1);
5996                ps.DispatchGRFStartRegisterForConstantSetupData2 =
5997                   brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2);
5998 
5999                ps.KernelStartPointer0 = KSP(shader) +
6000                   brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0);
6001                ps.KernelStartPointer1 = KSP(shader) +
6002                   brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1);
6003                ps.KernelStartPointer2 = KSP(shader) +
6004                   brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2);
6005 
6006 #if GFX_VERx10 >= 125
6007                ps.ScratchSpaceBuffer = scratch_addr >> 4;
6008 #else
6009                ps.ScratchSpaceBasePointer =
6010                   rw_bo(NULL, scratch_addr, IRIS_DOMAIN_NONE);
6011 #endif
6012             }
6013 
6014             uint32_t psx_state[GENX(3DSTATE_PS_EXTRA_length)] = {0};
6015             iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
6016 #if GFX_VER >= 9
6017                if (!wm_prog_data->uses_sample_mask)
6018                   psx.InputCoverageMaskState  = ICMS_NONE;
6019                else if (wm_prog_data->post_depth_coverage)
6020                   psx.InputCoverageMaskState = ICMS_DEPTH_COVERAGE;
6021                else if (wm_prog_data->inner_coverage &&
6022                         cso->conservative_rasterization)
6023                   psx.InputCoverageMaskState = ICMS_INNER_CONSERVATIVE;
6024                else
6025                   psx.InputCoverageMaskState = ICMS_NORMAL;
6026 #else
6027                psx.PixelShaderUsesInputCoverageMask =
6028                   wm_prog_data->uses_sample_mask;
6029 #endif
6030             }
6031 
6032             uint32_t *shader_ps = (uint32_t *) shader->derived_data;
6033             uint32_t *shader_psx = shader_ps + GENX(3DSTATE_PS_length);
6034             iris_emit_merge(batch, shader_ps, ps_state,
6035                             GENX(3DSTATE_PS_length));
6036             iris_emit_merge(batch, shader_psx, psx_state,
6037                             GENX(3DSTATE_PS_EXTRA_length));
6038          } else if (scratch_addr) {
6039             uint32_t *pkt = (uint32_t *) shader->derived_data;
6040             switch (stage) {
6041             case MESA_SHADER_VERTEX:    MERGE_SCRATCH_ADDR(3DSTATE_VS); break;
6042             case MESA_SHADER_TESS_CTRL: MERGE_SCRATCH_ADDR(3DSTATE_HS); break;
6043             case MESA_SHADER_TESS_EVAL: MERGE_SCRATCH_ADDR(3DSTATE_DS); break;
6044             case MESA_SHADER_GEOMETRY:  MERGE_SCRATCH_ADDR(3DSTATE_GS); break;
6045             }
6046          } else {
6047             iris_batch_emit(batch, shader->derived_data,
6048                             iris_derived_program_state_size(stage));
6049          }
6050       } else {
6051          if (stage == MESA_SHADER_TESS_EVAL) {
6052             iris_emit_cmd(batch, GENX(3DSTATE_HS), hs);
6053             iris_emit_cmd(batch, GENX(3DSTATE_TE), te);
6054             iris_emit_cmd(batch, GENX(3DSTATE_DS), ds);
6055          } else if (stage == MESA_SHADER_GEOMETRY) {
6056             iris_emit_cmd(batch, GENX(3DSTATE_GS), gs);
6057          }
6058       }
6059    }
6060 
6061    if (ice->state.streamout_active) {
6062       if (dirty & IRIS_DIRTY_SO_BUFFERS) {
6063          for (int i = 0; i < 4; i++) {
6064             struct iris_stream_output_target *tgt =
6065                (void *) ice->state.so_target[i];
6066             const uint32_t dwords = GENX(3DSTATE_SO_BUFFER_length);
6067             uint32_t *so_buffers = genx->so_buffers + i * dwords;
6068             bool zero_offset = false;
6069 
6070             if (tgt) {
6071                zero_offset = tgt->zero_offset;
6072                iris_use_pinned_bo(batch, iris_resource_bo(tgt->base.buffer),
6073                                   true, IRIS_DOMAIN_OTHER_WRITE);
6074                iris_use_pinned_bo(batch, iris_resource_bo(tgt->offset.res),
6075                                   true, IRIS_DOMAIN_OTHER_WRITE);
6076             }
6077 
6078             if (zero_offset) {
6079                /* Skip the last DWord which contains "Stream Offset" of
6080                 * 0xFFFFFFFF and instead emit a dword of zero directly.
6081                 */
6082                STATIC_ASSERT(GENX(3DSTATE_SO_BUFFER_StreamOffset_start) ==
6083                              32 * (dwords - 1));
6084                const uint32_t zero = 0;
6085                iris_batch_emit(batch, so_buffers, 4 * (dwords - 1));
6086                iris_batch_emit(batch, &zero, sizeof(zero));
6087                tgt->zero_offset = false;
6088             } else {
6089                iris_batch_emit(batch, so_buffers, 4 * dwords);
6090             }
6091          }
6092       }
6093 
6094       if ((dirty & IRIS_DIRTY_SO_DECL_LIST) && ice->state.streamout) {
6095          uint32_t *decl_list =
6096             ice->state.streamout + GENX(3DSTATE_STREAMOUT_length);
6097          iris_batch_emit(batch, decl_list, 4 * ((decl_list[0] & 0xff) + 2));
6098       }
6099 
6100       if (dirty & IRIS_DIRTY_STREAMOUT) {
6101          const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
6102 
6103          uint32_t dynamic_sol[GENX(3DSTATE_STREAMOUT_length)];
6104          iris_pack_command(GENX(3DSTATE_STREAMOUT), dynamic_sol, sol) {
6105             sol.SOFunctionEnable = true;
6106             sol.SOStatisticsEnable = true;
6107 
6108             sol.RenderingDisable = cso_rast->rasterizer_discard &&
6109                                    !ice->state.prims_generated_query_active;
6110             sol.ReorderMode = cso_rast->flatshade_first ? LEADING : TRAILING;
6111          }
6112 
6113          assert(ice->state.streamout);
6114 
6115          iris_emit_merge(batch, ice->state.streamout, dynamic_sol,
6116                          GENX(3DSTATE_STREAMOUT_length));
6117       }
6118    } else {
6119       if (dirty & IRIS_DIRTY_STREAMOUT) {
6120          iris_emit_cmd(batch, GENX(3DSTATE_STREAMOUT), sol);
6121       }
6122    }
6123 
6124    if (dirty & IRIS_DIRTY_CLIP) {
6125       struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
6126       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
6127 
6128       bool gs_or_tes = ice->shaders.prog[MESA_SHADER_GEOMETRY] ||
6129                        ice->shaders.prog[MESA_SHADER_TESS_EVAL];
6130       bool points_or_lines = cso_rast->fill_mode_point_or_line ||
6131          (gs_or_tes ? ice->shaders.output_topology_is_points_or_lines
6132                     : ice->state.prim_is_points_or_lines);
6133 
6134       uint32_t dynamic_clip[GENX(3DSTATE_CLIP_length)];
6135       iris_pack_command(GENX(3DSTATE_CLIP), &dynamic_clip, cl) {
6136          cl.StatisticsEnable = ice->state.statistics_counters_enabled;
6137          if (cso_rast->rasterizer_discard)
6138             cl.ClipMode = CLIPMODE_REJECT_ALL;
6139          else if (ice->state.window_space_position)
6140             cl.ClipMode = CLIPMODE_ACCEPT_ALL;
6141          else
6142             cl.ClipMode = CLIPMODE_NORMAL;
6143 
6144          cl.PerspectiveDivideDisable = ice->state.window_space_position;
6145          cl.ViewportXYClipTestEnable = !points_or_lines;
6146 
6147          if (wm_prog_data->barycentric_interp_modes &
6148              BRW_BARYCENTRIC_NONPERSPECTIVE_BITS)
6149             cl.NonPerspectiveBarycentricEnable = true;
6150 
6151          cl.ForceZeroRTAIndexEnable = cso_fb->layers <= 1;
6152          cl.MaximumVPIndex = ice->state.num_viewports - 1;
6153       }
6154       iris_emit_merge(batch, cso_rast->clip, dynamic_clip,
6155                       ARRAY_SIZE(cso_rast->clip));
6156    }
6157 
6158    if (dirty & (IRIS_DIRTY_RASTER | IRIS_DIRTY_URB)) {
6159       struct iris_rasterizer_state *cso = ice->state.cso_rast;
6160       iris_batch_emit(batch, cso->raster, sizeof(cso->raster));
6161 
6162       uint32_t dynamic_sf[GENX(3DSTATE_SF_length)];
6163       iris_pack_command(GENX(3DSTATE_SF), &dynamic_sf, sf) {
6164          sf.ViewportTransformEnable = !ice->state.window_space_position;
6165 
6166 #if GFX_VER >= 12
6167          sf.DerefBlockSize = ice->state.urb_deref_block_size;
6168 #endif
6169       }
6170       iris_emit_merge(batch, cso->sf, dynamic_sf,
6171                       ARRAY_SIZE(dynamic_sf));
6172    }
6173 
6174    if (dirty & IRIS_DIRTY_WM) {
6175       struct iris_rasterizer_state *cso = ice->state.cso_rast;
6176       uint32_t dynamic_wm[GENX(3DSTATE_WM_length)];
6177 
6178       iris_pack_command(GENX(3DSTATE_WM), &dynamic_wm, wm) {
6179          wm.StatisticsEnable = ice->state.statistics_counters_enabled;
6180 
6181          wm.BarycentricInterpolationMode =
6182             wm_prog_data->barycentric_interp_modes;
6183 
6184          if (wm_prog_data->early_fragment_tests)
6185             wm.EarlyDepthStencilControl = EDSC_PREPS;
6186          else if (wm_prog_data->has_side_effects)
6187             wm.EarlyDepthStencilControl = EDSC_PSEXEC;
6188 
6189          /* We could skip this bit if color writes are enabled. */
6190          if (wm_prog_data->has_side_effects || wm_prog_data->uses_kill)
6191             wm.ForceThreadDispatchEnable = ForceON;
6192       }
6193       iris_emit_merge(batch, cso->wm, dynamic_wm, ARRAY_SIZE(cso->wm));
6194    }
6195 
6196    if (dirty & IRIS_DIRTY_SBE) {
6197       iris_emit_sbe(batch, ice);
6198    }
6199 
6200    if (dirty & IRIS_DIRTY_PS_BLEND) {
6201       struct iris_blend_state *cso_blend = ice->state.cso_blend;
6202       struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
6203       const struct shader_info *fs_info =
6204          iris_get_shader_info(ice, MESA_SHADER_FRAGMENT);
6205 
6206       uint32_t dynamic_pb[GENX(3DSTATE_PS_BLEND_length)];
6207       iris_pack_command(GENX(3DSTATE_PS_BLEND), &dynamic_pb, pb) {
6208          pb.HasWriteableRT = has_writeable_rt(cso_blend, fs_info);
6209          pb.AlphaTestEnable = cso_zsa->alpha_enabled;
6210 
6211          /* The dual source blending docs caution against using SRC1 factors
6212           * when the shader doesn't use a dual source render target write.
6213           * Empirically, this can lead to GPU hangs, and the results are
6214           * undefined anyway, so simply disable blending to avoid the hang.
6215           */
6216          pb.ColorBufferBlendEnable = (cso_blend->blend_enables & 1) &&
6217             (!cso_blend->dual_color_blending || wm_prog_data->dual_src_blend);
6218       }
6219 
6220       iris_emit_merge(batch, cso_blend->ps_blend, dynamic_pb,
6221                       ARRAY_SIZE(cso_blend->ps_blend));
6222    }
6223 
6224    if (dirty & IRIS_DIRTY_WM_DEPTH_STENCIL) {
6225       struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
6226 #if GFX_VER >= 9 && GFX_VER < 12
6227       struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
6228       uint32_t stencil_refs[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
6229       iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), &stencil_refs, wmds) {
6230          wmds.StencilReferenceValue = p_stencil_refs->ref_value[0];
6231          wmds.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
6232       }
6233       iris_emit_merge(batch, cso->wmds, stencil_refs, ARRAY_SIZE(cso->wmds));
6234 #else
6235       /* Use modify disable fields which allow us to emit packets
6236        * directly instead of merging them later.
6237        */
6238       iris_batch_emit(batch, cso->wmds, sizeof(cso->wmds));
6239 #endif
6240 
6241 #if GFX_VER >= 12
6242       iris_batch_emit(batch, cso->depth_bounds, sizeof(cso->depth_bounds));
6243 #endif
6244    }
6245 
6246    if (dirty & IRIS_DIRTY_STENCIL_REF) {
6247 #if GFX_VER >= 12
6248       /* Use modify disable fields which allow us to emit packets
6249        * directly instead of merging them later.
6250        */
6251       struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
6252       uint32_t stencil_refs[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
6253       iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), &stencil_refs, wmds) {
6254          wmds.StencilReferenceValue = p_stencil_refs->ref_value[0];
6255          wmds.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
6256          wmds.StencilTestMaskModifyDisable = true;
6257          wmds.StencilWriteMaskModifyDisable = true;
6258          wmds.StencilStateModifyDisable = true;
6259          wmds.DepthStateModifyDisable = true;
6260       }
6261       iris_batch_emit(batch, stencil_refs, sizeof(stencil_refs));
6262 #endif
6263    }
6264 
6265    if (dirty & IRIS_DIRTY_SCISSOR_RECT) {
6266       /* Wa_1409725701:
6267        *    "The viewport-specific state used by the SF unit (SCISSOR_RECT) is
6268        *    stored as an array of up to 16 elements. The location of first
6269        *    element of the array, as specified by Pointer to SCISSOR_RECT,
6270        *    should be aligned to a 64-byte boundary.
6271        */
6272       uint32_t alignment = 64;
6273       uint32_t scissor_offset =
6274          emit_state(batch, ice->state.dynamic_uploader,
6275                     &ice->state.last_res.scissor,
6276                     ice->state.scissors,
6277                     sizeof(struct pipe_scissor_state) *
6278                     ice->state.num_viewports, alignment);
6279 
6280       iris_emit_cmd(batch, GENX(3DSTATE_SCISSOR_STATE_POINTERS), ptr) {
6281          ptr.ScissorRectPointer = scissor_offset;
6282       }
6283    }
6284 
6285    if (dirty & IRIS_DIRTY_DEPTH_BUFFER) {
6286       struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
6287 
6288       /* Do not emit the cso yet. We may need to update clear params first. */
6289       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
6290       struct iris_resource *zres = NULL, *sres = NULL;
6291       if (cso_fb->zsbuf) {
6292          iris_get_depth_stencil_resources(cso_fb->zsbuf->texture,
6293                                           &zres, &sres);
6294       }
6295 
6296       if (zres && ice->state.hiz_usage != ISL_AUX_USAGE_NONE) {
6297          uint32_t *clear_params =
6298             cso_z->packets + ARRAY_SIZE(cso_z->packets) -
6299             GENX(3DSTATE_CLEAR_PARAMS_length);
6300 
6301          iris_pack_command(GENX(3DSTATE_CLEAR_PARAMS), clear_params, clear) {
6302             clear.DepthClearValueValid = true;
6303             clear.DepthClearValue = zres->aux.clear_color.f32[0];
6304          }
6305       }
6306 
6307       iris_batch_emit(batch, cso_z->packets, sizeof(cso_z->packets));
6308 
6309       if (zres)
6310          genX(emit_depth_state_workarounds)(ice, batch, &zres->surf);
6311 
6312       if (GFX_VER >= 12) {
6313          /* Wa_1408224581
6314           *
6315           * Workaround: Gfx12LP Astep only An additional pipe control with
6316           * post-sync = store dword operation would be required.( w/a is to
6317           * have an additional pipe control after the stencil state whenever
6318           * the surface state bits of this state is changing).
6319           */
6320          iris_emit_pipe_control_write(batch, "WA for stencil state",
6321                                       PIPE_CONTROL_WRITE_IMMEDIATE,
6322                                       batch->screen->workaround_address.bo,
6323                                       batch->screen->workaround_address.offset, 0);
6324       }
6325    }
6326 
6327    if (dirty & (IRIS_DIRTY_DEPTH_BUFFER | IRIS_DIRTY_WM_DEPTH_STENCIL)) {
6328       /* Listen for buffer changes, and also write enable changes. */
6329       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
6330       pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa);
6331    }
6332 
6333    if (dirty & IRIS_DIRTY_POLYGON_STIPPLE) {
6334       iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_PATTERN), poly) {
6335          for (int i = 0; i < 32; i++) {
6336             poly.PatternRow[i] = ice->state.poly_stipple.stipple[i];
6337          }
6338       }
6339    }
6340 
6341    if (dirty & IRIS_DIRTY_LINE_STIPPLE) {
6342       struct iris_rasterizer_state *cso = ice->state.cso_rast;
6343       iris_batch_emit(batch, cso->line_stipple, sizeof(cso->line_stipple));
6344    }
6345 
6346    if (dirty & IRIS_DIRTY_VF_TOPOLOGY) {
6347       iris_emit_cmd(batch, GENX(3DSTATE_VF_TOPOLOGY), topo) {
6348          topo.PrimitiveTopologyType =
6349             translate_prim_type(draw->mode, ice->state.vertices_per_patch);
6350       }
6351    }
6352 
6353    if (dirty & IRIS_DIRTY_VERTEX_BUFFERS) {
6354       int count = util_bitcount64(ice->state.bound_vertex_buffers);
6355       uint64_t dynamic_bound = ice->state.bound_vertex_buffers;
6356 
6357       if (ice->state.vs_uses_draw_params) {
6358          assert(ice->draw.draw_params.res);
6359 
6360          struct iris_vertex_buffer_state *state =
6361             &(ice->state.genx->vertex_buffers[count]);
6362          pipe_resource_reference(&state->resource, ice->draw.draw_params.res);
6363          struct iris_resource *res = (void *) state->resource;
6364 
6365          iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
6366             vb.VertexBufferIndex = count;
6367             vb.AddressModifyEnable = true;
6368             vb.BufferPitch = 0;
6369             vb.BufferSize = res->bo->size - ice->draw.draw_params.offset;
6370             vb.BufferStartingAddress =
6371                ro_bo(NULL, res->bo->address +
6372                            (int) ice->draw.draw_params.offset);
6373             vb.MOCS = iris_mocs(res->bo, &batch->screen->isl_dev,
6374                                 ISL_SURF_USAGE_VERTEX_BUFFER_BIT);
6375 #if GFX_VER >= 12
6376             vb.L3BypassDisable       = true;
6377 #endif
6378          }
6379          dynamic_bound |= 1ull << count;
6380          count++;
6381       }
6382 
6383       if (ice->state.vs_uses_derived_draw_params) {
6384          struct iris_vertex_buffer_state *state =
6385             &(ice->state.genx->vertex_buffers[count]);
6386          pipe_resource_reference(&state->resource,
6387                                  ice->draw.derived_draw_params.res);
6388          struct iris_resource *res = (void *) ice->draw.derived_draw_params.res;
6389 
6390          iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
6391              vb.VertexBufferIndex = count;
6392             vb.AddressModifyEnable = true;
6393             vb.BufferPitch = 0;
6394             vb.BufferSize =
6395                res->bo->size - ice->draw.derived_draw_params.offset;
6396             vb.BufferStartingAddress =
6397                ro_bo(NULL, res->bo->address +
6398                            (int) ice->draw.derived_draw_params.offset);
6399             vb.MOCS = iris_mocs(res->bo, &batch->screen->isl_dev,
6400                                 ISL_SURF_USAGE_VERTEX_BUFFER_BIT);
6401 #if GFX_VER >= 12
6402             vb.L3BypassDisable       = true;
6403 #endif
6404          }
6405          dynamic_bound |= 1ull << count;
6406          count++;
6407       }
6408 
6409       if (count) {
6410 #if GFX_VER >= 11
6411          /* Gfx11+ doesn't need the cache workaround below */
6412          uint64_t bound = dynamic_bound;
6413          while (bound) {
6414             const int i = u_bit_scan64(&bound);
6415             iris_use_optional_res(batch, genx->vertex_buffers[i].resource,
6416                                   false, IRIS_DOMAIN_VF_READ);
6417          }
6418 #else
6419          /* The VF cache designers cut corners, and made the cache key's
6420           * <VertexBufferIndex, Memory Address> tuple only consider the bottom
6421           * 32 bits of the address.  If you have two vertex buffers which get
6422           * placed exactly 4 GiB apart and use them in back-to-back draw calls,
6423           * you can get collisions (even within a single batch).
6424           *
6425           * So, we need to do a VF cache invalidate if the buffer for a VB
6426           * slot slot changes [48:32] address bits from the previous time.
6427           */
6428          unsigned flush_flags = 0;
6429 
6430          uint64_t bound = dynamic_bound;
6431          while (bound) {
6432             const int i = u_bit_scan64(&bound);
6433             uint16_t high_bits = 0;
6434 
6435             struct iris_resource *res =
6436                (void *) genx->vertex_buffers[i].resource;
6437             if (res) {
6438                iris_use_pinned_bo(batch, res->bo, false, IRIS_DOMAIN_VF_READ);
6439 
6440                high_bits = res->bo->address >> 32ull;
6441                if (high_bits != ice->state.last_vbo_high_bits[i]) {
6442                   flush_flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE |
6443                                  PIPE_CONTROL_CS_STALL;
6444                   ice->state.last_vbo_high_bits[i] = high_bits;
6445                }
6446             }
6447          }
6448 
6449          if (flush_flags) {
6450             iris_emit_pipe_control_flush(batch,
6451                                          "workaround: VF cache 32-bit key [VB]",
6452                                          flush_flags);
6453          }
6454 #endif
6455 
6456          const unsigned vb_dwords = GENX(VERTEX_BUFFER_STATE_length);
6457 
6458          uint32_t *map =
6459             iris_get_command_space(batch, 4 * (1 + vb_dwords * count));
6460          _iris_pack_command(batch, GENX(3DSTATE_VERTEX_BUFFERS), map, vb) {
6461             vb.DWordLength = (vb_dwords * count + 1) - 2;
6462          }
6463          map += 1;
6464 
6465          bound = dynamic_bound;
6466          while (bound) {
6467             const int i = u_bit_scan64(&bound);
6468             memcpy(map, genx->vertex_buffers[i].state,
6469                    sizeof(uint32_t) * vb_dwords);
6470             map += vb_dwords;
6471          }
6472       }
6473    }
6474 
6475    if (dirty & IRIS_DIRTY_VERTEX_ELEMENTS) {
6476       struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements;
6477       const unsigned entries = MAX2(cso->count, 1);
6478       if (!(ice->state.vs_needs_sgvs_element ||
6479             ice->state.vs_uses_derived_draw_params ||
6480             ice->state.vs_needs_edge_flag)) {
6481          iris_batch_emit(batch, cso->vertex_elements, sizeof(uint32_t) *
6482                          (1 + entries * GENX(VERTEX_ELEMENT_STATE_length)));
6483       } else {
6484          uint32_t dynamic_ves[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)];
6485          const unsigned dyn_count = cso->count +
6486             ice->state.vs_needs_sgvs_element +
6487             ice->state.vs_uses_derived_draw_params;
6488 
6489          iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS),
6490                            &dynamic_ves, ve) {
6491             ve.DWordLength =
6492                1 + GENX(VERTEX_ELEMENT_STATE_length) * dyn_count - 2;
6493          }
6494          memcpy(&dynamic_ves[1], &cso->vertex_elements[1],
6495                 (cso->count - ice->state.vs_needs_edge_flag) *
6496                 GENX(VERTEX_ELEMENT_STATE_length) * sizeof(uint32_t));
6497          uint32_t *ve_pack_dest =
6498             &dynamic_ves[1 + (cso->count - ice->state.vs_needs_edge_flag) *
6499                          GENX(VERTEX_ELEMENT_STATE_length)];
6500 
6501          if (ice->state.vs_needs_sgvs_element) {
6502             uint32_t base_ctrl = ice->state.vs_uses_draw_params ?
6503                                  VFCOMP_STORE_SRC : VFCOMP_STORE_0;
6504             iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
6505                ve.Valid = true;
6506                ve.VertexBufferIndex =
6507                   util_bitcount64(ice->state.bound_vertex_buffers);
6508                ve.SourceElementFormat = ISL_FORMAT_R32G32_UINT;
6509                ve.Component0Control = base_ctrl;
6510                ve.Component1Control = base_ctrl;
6511                ve.Component2Control = VFCOMP_STORE_0;
6512                ve.Component3Control = VFCOMP_STORE_0;
6513             }
6514             ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
6515          }
6516          if (ice->state.vs_uses_derived_draw_params) {
6517             iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
6518                ve.Valid = true;
6519                ve.VertexBufferIndex =
6520                   util_bitcount64(ice->state.bound_vertex_buffers) +
6521                   ice->state.vs_uses_draw_params;
6522                ve.SourceElementFormat = ISL_FORMAT_R32G32_UINT;
6523                ve.Component0Control = VFCOMP_STORE_SRC;
6524                ve.Component1Control = VFCOMP_STORE_SRC;
6525                ve.Component2Control = VFCOMP_STORE_0;
6526                ve.Component3Control = VFCOMP_STORE_0;
6527             }
6528             ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
6529          }
6530          if (ice->state.vs_needs_edge_flag) {
6531             for (int i = 0; i < GENX(VERTEX_ELEMENT_STATE_length);  i++)
6532                ve_pack_dest[i] = cso->edgeflag_ve[i];
6533          }
6534 
6535          iris_batch_emit(batch, &dynamic_ves, sizeof(uint32_t) *
6536                          (1 + dyn_count * GENX(VERTEX_ELEMENT_STATE_length)));
6537       }
6538 
6539       if (!ice->state.vs_needs_edge_flag) {
6540          iris_batch_emit(batch, cso->vf_instancing, sizeof(uint32_t) *
6541                          entries * GENX(3DSTATE_VF_INSTANCING_length));
6542       } else {
6543          assert(cso->count > 0);
6544          const unsigned edgeflag_index = cso->count - 1;
6545          uint32_t dynamic_vfi[33 * GENX(3DSTATE_VF_INSTANCING_length)];
6546          memcpy(&dynamic_vfi[0], cso->vf_instancing, edgeflag_index *
6547                 GENX(3DSTATE_VF_INSTANCING_length) * sizeof(uint32_t));
6548 
6549          uint32_t *vfi_pack_dest = &dynamic_vfi[0] +
6550             edgeflag_index * GENX(3DSTATE_VF_INSTANCING_length);
6551          iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
6552             vi.VertexElementIndex = edgeflag_index +
6553                ice->state.vs_needs_sgvs_element +
6554                ice->state.vs_uses_derived_draw_params;
6555          }
6556          for (int i = 0; i < GENX(3DSTATE_VF_INSTANCING_length);  i++)
6557             vfi_pack_dest[i] |= cso->edgeflag_vfi[i];
6558 
6559          iris_batch_emit(batch, &dynamic_vfi[0], sizeof(uint32_t) *
6560                          entries * GENX(3DSTATE_VF_INSTANCING_length));
6561       }
6562    }
6563 
6564    if (dirty & IRIS_DIRTY_VF_SGVS) {
6565       const struct brw_vs_prog_data *vs_prog_data = (void *)
6566          ice->shaders.prog[MESA_SHADER_VERTEX]->prog_data;
6567       struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements;
6568 
6569       iris_emit_cmd(batch, GENX(3DSTATE_VF_SGVS), sgv) {
6570          if (vs_prog_data->uses_vertexid) {
6571             sgv.VertexIDEnable = true;
6572             sgv.VertexIDComponentNumber = 2;
6573             sgv.VertexIDElementOffset =
6574                cso->count - ice->state.vs_needs_edge_flag;
6575          }
6576 
6577          if (vs_prog_data->uses_instanceid) {
6578             sgv.InstanceIDEnable = true;
6579             sgv.InstanceIDComponentNumber = 3;
6580             sgv.InstanceIDElementOffset =
6581                cso->count - ice->state.vs_needs_edge_flag;
6582          }
6583       }
6584    }
6585 
6586    if (dirty & IRIS_DIRTY_VF) {
6587       iris_emit_cmd(batch, GENX(3DSTATE_VF), vf) {
6588          if (draw->primitive_restart) {
6589             vf.IndexedDrawCutIndexEnable = true;
6590             vf.CutIndex = draw->restart_index;
6591          }
6592       }
6593    }
6594 
6595    if (dirty & IRIS_DIRTY_VF_STATISTICS) {
6596       iris_emit_cmd(batch, GENX(3DSTATE_VF_STATISTICS), vf) {
6597          vf.StatisticsEnable = true;
6598       }
6599    }
6600 
6601 #if GFX_VER == 8
6602    if (dirty & IRIS_DIRTY_PMA_FIX) {
6603       bool enable = want_pma_fix(ice);
6604       genX(update_pma_fix)(ice, batch, enable);
6605    }
6606 #endif
6607 
6608    if (ice->state.current_hash_scale != 1)
6609       genX(emit_hashing_mode)(ice, batch, UINT_MAX, UINT_MAX, 1);
6610 
6611 #if GFX_VER >= 12
6612    genX(invalidate_aux_map_state)(batch);
6613 #endif
6614 }
6615 
6616 static void
flush_vbos(struct iris_context * ice,struct iris_batch * batch)6617 flush_vbos(struct iris_context *ice, struct iris_batch *batch)
6618 {
6619    struct iris_genx_state *genx = ice->state.genx;
6620    uint64_t bound = ice->state.bound_vertex_buffers;
6621    while (bound) {
6622       const int i = u_bit_scan64(&bound);
6623       struct iris_bo *bo = iris_resource_bo(genx->vertex_buffers[i].resource);
6624       iris_emit_buffer_barrier_for(batch, bo, IRIS_DOMAIN_VF_READ);
6625    }
6626 }
6627 
6628 static void
iris_upload_render_state(struct iris_context * ice,struct iris_batch * batch,const struct pipe_draw_info * draw,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * sc)6629 iris_upload_render_state(struct iris_context *ice,
6630                          struct iris_batch *batch,
6631                          const struct pipe_draw_info *draw,
6632                          unsigned drawid_offset,
6633                          const struct pipe_draw_indirect_info *indirect,
6634                          const struct pipe_draw_start_count_bias *sc)
6635 {
6636    bool use_predicate = ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT;
6637 
6638    if (ice->state.dirty & IRIS_DIRTY_VERTEX_BUFFER_FLUSHES)
6639       flush_vbos(ice, batch);
6640 
6641    iris_batch_sync_region_start(batch);
6642 
6643    /* Always pin the binder.  If we're emitting new binding table pointers,
6644     * we need it.  If not, we're probably inheriting old tables via the
6645     * context, and need it anyway.  Since true zero-bindings cases are
6646     * practically non-existent, just pin it and avoid last_res tracking.
6647     */
6648    iris_use_pinned_bo(batch, ice->state.binder.bo, false,
6649                       IRIS_DOMAIN_NONE);
6650 
6651    if (!batch->contains_draw) {
6652       if (GFX_VER == 12) {
6653          /* Re-emit constants when starting a new batch buffer in order to
6654           * work around push constant corruption on context switch.
6655           *
6656           * XXX - Provide hardware spec quotation when available.
6657           */
6658          ice->state.stage_dirty |= (IRIS_STAGE_DIRTY_CONSTANTS_VS  |
6659                                     IRIS_STAGE_DIRTY_CONSTANTS_TCS |
6660                                     IRIS_STAGE_DIRTY_CONSTANTS_TES |
6661                                     IRIS_STAGE_DIRTY_CONSTANTS_GS  |
6662                                     IRIS_STAGE_DIRTY_CONSTANTS_FS);
6663       }
6664       batch->contains_draw = true;
6665    }
6666 
6667    if (!batch->contains_draw_with_next_seqno) {
6668       iris_restore_render_saved_bos(ice, batch, draw);
6669       batch->contains_draw_with_next_seqno = true;
6670    }
6671 
6672    iris_upload_dirty_render_state(ice, batch, draw);
6673 
6674    if (draw->index_size > 0) {
6675       unsigned offset;
6676 
6677       if (draw->has_user_indices) {
6678          unsigned start_offset = draw->index_size * sc->start;
6679 
6680          u_upload_data(ice->ctx.const_uploader, start_offset,
6681                        sc->count * draw->index_size, 4,
6682                        (char*)draw->index.user + start_offset,
6683                        &offset, &ice->state.last_res.index_buffer);
6684          offset -= start_offset;
6685       } else {
6686          struct iris_resource *res = (void *) draw->index.resource;
6687          res->bind_history |= PIPE_BIND_INDEX_BUFFER;
6688 
6689          pipe_resource_reference(&ice->state.last_res.index_buffer,
6690                                  draw->index.resource);
6691          offset = 0;
6692 
6693          iris_emit_buffer_barrier_for(batch, res->bo, IRIS_DOMAIN_VF_READ);
6694       }
6695 
6696       struct iris_genx_state *genx = ice->state.genx;
6697       struct iris_bo *bo = iris_resource_bo(ice->state.last_res.index_buffer);
6698 
6699       uint32_t ib_packet[GENX(3DSTATE_INDEX_BUFFER_length)];
6700       iris_pack_command(GENX(3DSTATE_INDEX_BUFFER), ib_packet, ib) {
6701          ib.IndexFormat = draw->index_size >> 1;
6702          ib.MOCS = iris_mocs(bo, &batch->screen->isl_dev,
6703                              ISL_SURF_USAGE_INDEX_BUFFER_BIT);
6704          ib.BufferSize = bo->size - offset;
6705          ib.BufferStartingAddress = ro_bo(NULL, bo->address + offset);
6706 #if GFX_VER >= 12
6707          ib.L3BypassDisable       = true;
6708 #endif
6709       }
6710 
6711       if (memcmp(genx->last_index_buffer, ib_packet, sizeof(ib_packet)) != 0) {
6712          memcpy(genx->last_index_buffer, ib_packet, sizeof(ib_packet));
6713          iris_batch_emit(batch, ib_packet, sizeof(ib_packet));
6714          iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_VF_READ);
6715       }
6716 
6717 #if GFX_VER < 11
6718       /* The VF cache key only uses 32-bits, see vertex buffer comment above */
6719       uint16_t high_bits = bo->address >> 32ull;
6720       if (high_bits != ice->state.last_index_bo_high_bits) {
6721          iris_emit_pipe_control_flush(batch,
6722                                       "workaround: VF cache 32-bit key [IB]",
6723                                       PIPE_CONTROL_VF_CACHE_INVALIDATE |
6724                                       PIPE_CONTROL_CS_STALL);
6725          ice->state.last_index_bo_high_bits = high_bits;
6726       }
6727 #endif
6728    }
6729 
6730 #define _3DPRIM_END_OFFSET          0x2420
6731 #define _3DPRIM_START_VERTEX        0x2430
6732 #define _3DPRIM_VERTEX_COUNT        0x2434
6733 #define _3DPRIM_INSTANCE_COUNT      0x2438
6734 #define _3DPRIM_START_INSTANCE      0x243C
6735 #define _3DPRIM_BASE_VERTEX         0x2440
6736 
6737    if (indirect && !indirect->count_from_stream_output) {
6738       if (indirect->indirect_draw_count) {
6739          use_predicate = true;
6740 
6741          struct iris_bo *draw_count_bo =
6742             iris_resource_bo(indirect->indirect_draw_count);
6743          unsigned draw_count_offset =
6744             indirect->indirect_draw_count_offset;
6745 
6746          if (ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) {
6747             struct mi_builder b;
6748             mi_builder_init(&b, &batch->screen->devinfo, batch);
6749 
6750             /* comparison = draw id < draw count */
6751             struct mi_value comparison =
6752                mi_ult(&b, mi_imm(drawid_offset),
6753                           mi_mem32(ro_bo(draw_count_bo, draw_count_offset)));
6754 
6755             /* predicate = comparison & conditional rendering predicate */
6756             mi_store(&b, mi_reg32(MI_PREDICATE_RESULT),
6757                          mi_iand(&b, comparison, mi_reg32(CS_GPR(15))));
6758          } else {
6759             uint32_t mi_predicate;
6760 
6761             /* Upload the id of the current primitive to MI_PREDICATE_SRC1. */
6762             iris_load_register_imm64(batch, MI_PREDICATE_SRC1, drawid_offset);
6763             /* Upload the current draw count from the draw parameters buffer
6764              * to MI_PREDICATE_SRC0.
6765              */
6766             iris_load_register_mem32(batch, MI_PREDICATE_SRC0,
6767                                      draw_count_bo, draw_count_offset);
6768             /* Zero the top 32-bits of MI_PREDICATE_SRC0 */
6769             iris_load_register_imm32(batch, MI_PREDICATE_SRC0 + 4, 0);
6770 
6771             if (drawid_offset == 0) {
6772                mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOADINV |
6773                               MI_PREDICATE_COMBINEOP_SET |
6774                               MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
6775             } else {
6776                /* While draw_index < draw_count the predicate's result will be
6777                 *  (draw_index == draw_count) ^ TRUE = TRUE
6778                 * When draw_index == draw_count the result is
6779                 *  (TRUE) ^ TRUE = FALSE
6780                 * After this all results will be:
6781                 *  (FALSE) ^ FALSE = FALSE
6782                 */
6783                mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOAD |
6784                               MI_PREDICATE_COMBINEOP_XOR |
6785                               MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
6786             }
6787             iris_batch_emit(batch, &mi_predicate, sizeof(uint32_t));
6788          }
6789       }
6790       struct iris_bo *bo = iris_resource_bo(indirect->buffer);
6791       assert(bo);
6792 
6793       iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6794          lrm.RegisterAddress = _3DPRIM_VERTEX_COUNT;
6795          lrm.MemoryAddress = ro_bo(bo, indirect->offset + 0);
6796       }
6797       iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6798          lrm.RegisterAddress = _3DPRIM_INSTANCE_COUNT;
6799          lrm.MemoryAddress = ro_bo(bo, indirect->offset + 4);
6800       }
6801       iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6802          lrm.RegisterAddress = _3DPRIM_START_VERTEX;
6803          lrm.MemoryAddress = ro_bo(bo, indirect->offset + 8);
6804       }
6805       if (draw->index_size) {
6806          iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6807             lrm.RegisterAddress = _3DPRIM_BASE_VERTEX;
6808             lrm.MemoryAddress = ro_bo(bo, indirect->offset + 12);
6809          }
6810          iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6811             lrm.RegisterAddress = _3DPRIM_START_INSTANCE;
6812             lrm.MemoryAddress = ro_bo(bo, indirect->offset + 16);
6813          }
6814       } else {
6815          iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6816             lrm.RegisterAddress = _3DPRIM_START_INSTANCE;
6817             lrm.MemoryAddress = ro_bo(bo, indirect->offset + 12);
6818          }
6819          iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
6820             lri.RegisterOffset = _3DPRIM_BASE_VERTEX;
6821             lri.DataDWord = 0;
6822          }
6823       }
6824    } else if (indirect && indirect->count_from_stream_output) {
6825       struct iris_stream_output_target *so =
6826          (void *) indirect->count_from_stream_output;
6827 
6828       /* XXX: Replace with actual cache tracking */
6829       iris_emit_pipe_control_flush(batch,
6830                                    "draw count from stream output stall",
6831                                    PIPE_CONTROL_CS_STALL);
6832 
6833       struct mi_builder b;
6834       mi_builder_init(&b, &batch->screen->devinfo, batch);
6835 
6836       struct iris_address addr =
6837          ro_bo(iris_resource_bo(so->offset.res), so->offset.offset);
6838       struct mi_value offset =
6839          mi_iadd_imm(&b, mi_mem32(addr), -so->base.buffer_offset);
6840 
6841       mi_store(&b, mi_reg32(_3DPRIM_VERTEX_COUNT),
6842                    mi_udiv32_imm(&b, offset, so->stride));
6843 
6844       _iris_emit_lri(batch, _3DPRIM_START_VERTEX, 0);
6845       _iris_emit_lri(batch, _3DPRIM_BASE_VERTEX, 0);
6846       _iris_emit_lri(batch, _3DPRIM_START_INSTANCE, 0);
6847       _iris_emit_lri(batch, _3DPRIM_INSTANCE_COUNT, draw->instance_count);
6848    }
6849 
6850    iris_measure_snapshot(ice, batch, INTEL_SNAPSHOT_DRAW, draw, indirect, sc);
6851 
6852    iris_emit_cmd(batch, GENX(3DPRIMITIVE), prim) {
6853       prim.VertexAccessType = draw->index_size > 0 ? RANDOM : SEQUENTIAL;
6854       prim.PredicateEnable = use_predicate;
6855 
6856       if (indirect) {
6857          prim.IndirectParameterEnable = true;
6858       } else {
6859          prim.StartInstanceLocation = draw->start_instance;
6860          prim.InstanceCount = draw->instance_count;
6861          prim.VertexCountPerInstance = sc->count;
6862 
6863          prim.StartVertexLocation = sc->start;
6864 
6865          if (draw->index_size) {
6866             prim.BaseVertexLocation += sc->index_bias;
6867          }
6868       }
6869    }
6870 
6871    iris_batch_sync_region_end(batch);
6872 }
6873 
6874 static void
iris_load_indirect_location(struct iris_context * ice,struct iris_batch * batch,const struct pipe_grid_info * grid)6875 iris_load_indirect_location(struct iris_context *ice,
6876                             struct iris_batch *batch,
6877                             const struct pipe_grid_info *grid)
6878 {
6879 #define GPGPU_DISPATCHDIMX 0x2500
6880 #define GPGPU_DISPATCHDIMY 0x2504
6881 #define GPGPU_DISPATCHDIMZ 0x2508
6882 
6883    assert(grid->indirect);
6884 
6885    struct iris_state_ref *grid_size = &ice->state.grid_size;
6886    struct iris_bo *bo = iris_resource_bo(grid_size->res);
6887    iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6888       lrm.RegisterAddress = GPGPU_DISPATCHDIMX;
6889       lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 0);
6890    }
6891    iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6892       lrm.RegisterAddress = GPGPU_DISPATCHDIMY;
6893       lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 4);
6894    }
6895    iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6896       lrm.RegisterAddress = GPGPU_DISPATCHDIMZ;
6897       lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 8);
6898    }
6899 }
6900 
6901 #if GFX_VERx10 >= 125
6902 
6903 static void
iris_upload_compute_walker(struct iris_context * ice,struct iris_batch * batch,const struct pipe_grid_info * grid)6904 iris_upload_compute_walker(struct iris_context *ice,
6905                            struct iris_batch *batch,
6906                            const struct pipe_grid_info *grid)
6907 {
6908    const uint64_t stage_dirty = ice->state.stage_dirty;
6909    struct iris_screen *screen = batch->screen;
6910    const struct intel_device_info *devinfo = &screen->devinfo;
6911    struct iris_binder *binder = &ice->state.binder;
6912    struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
6913    struct iris_compiled_shader *shader =
6914       ice->shaders.prog[MESA_SHADER_COMPUTE];
6915    struct brw_stage_prog_data *prog_data = shader->prog_data;
6916    struct brw_cs_prog_data *cs_prog_data = (void *) prog_data;
6917    const struct brw_cs_dispatch_info dispatch =
6918       brw_cs_get_dispatch_info(devinfo, cs_prog_data, grid->block);
6919 
6920    if (stage_dirty & IRIS_STAGE_DIRTY_CS) {
6921       iris_emit_cmd(batch, GENX(CFE_STATE), cfe) {
6922          cfe.MaximumNumberofThreads =
6923             devinfo->max_cs_threads * devinfo->subslice_total - 1;
6924          if (prog_data->total_scratch > 0) {
6925             cfe.ScratchSpaceBuffer =
6926                iris_get_scratch_surf(ice, prog_data->total_scratch)->offset >> 4;
6927          }
6928       }
6929    }
6930 
6931    if (grid->indirect)
6932       iris_load_indirect_location(ice, batch, grid);
6933 
6934    iris_emit_cmd(batch, GENX(COMPUTE_WALKER), cw) {
6935       cw.IndirectParameterEnable        = grid->indirect;
6936       cw.SIMDSize                       = dispatch.simd_size / 16;
6937       cw.LocalXMaximum                  = grid->block[0] - 1;
6938       cw.LocalYMaximum                  = grid->block[1] - 1;
6939       cw.LocalZMaximum                  = grid->block[2] - 1;
6940       cw.ThreadGroupIDXDimension        = grid->grid[0];
6941       cw.ThreadGroupIDYDimension        = grid->grid[1];
6942       cw.ThreadGroupIDZDimension        = grid->grid[2];
6943       cw.ExecutionMask                  = dispatch.right_mask;
6944 
6945       cw.InterfaceDescriptor = (struct GENX(INTERFACE_DESCRIPTOR_DATA)) {
6946          .KernelStartPointer = KSP(shader),
6947          .NumberofThreadsinGPGPUThreadGroup = dispatch.threads,
6948          .SharedLocalMemorySize =
6949             encode_slm_size(GFX_VER, prog_data->total_shared),
6950          .NumberOfBarriers = cs_prog_data->uses_barrier,
6951          .SamplerStatePointer = shs->sampler_table.offset,
6952          .BindingTablePointer = binder->bt_offset[MESA_SHADER_COMPUTE],
6953       };
6954 
6955       assert(brw_cs_push_const_total_size(cs_prog_data, dispatch.threads) == 0);
6956    }
6957 
6958 }
6959 
6960 #else /* #if GFX_VERx10 >= 125 */
6961 
6962 static void
iris_upload_gpgpu_walker(struct iris_context * ice,struct iris_batch * batch,const struct pipe_grid_info * grid)6963 iris_upload_gpgpu_walker(struct iris_context *ice,
6964                          struct iris_batch *batch,
6965                          const struct pipe_grid_info *grid)
6966 {
6967    const uint64_t stage_dirty = ice->state.stage_dirty;
6968    struct iris_screen *screen = batch->screen;
6969    const struct intel_device_info *devinfo = &screen->devinfo;
6970    struct iris_binder *binder = &ice->state.binder;
6971    struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
6972    struct iris_uncompiled_shader *ish =
6973       ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
6974    struct iris_compiled_shader *shader =
6975       ice->shaders.prog[MESA_SHADER_COMPUTE];
6976    struct brw_stage_prog_data *prog_data = shader->prog_data;
6977    struct brw_cs_prog_data *cs_prog_data = (void *) prog_data;
6978    const struct brw_cs_dispatch_info dispatch =
6979       brw_cs_get_dispatch_info(devinfo, cs_prog_data, grid->block);
6980 
6981    if ((stage_dirty & IRIS_STAGE_DIRTY_CS) ||
6982        cs_prog_data->local_size[0] == 0 /* Variable local group size */) {
6983       /* The MEDIA_VFE_STATE documentation for Gfx8+ says:
6984        *
6985        *   "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
6986        *    the only bits that are changed are scoreboard related: Scoreboard
6987        *    Enable, Scoreboard Type, Scoreboard Mask, Scoreboard Delta.  For
6988        *    these scoreboard related states, a MEDIA_STATE_FLUSH is
6989        *    sufficient."
6990        */
6991       iris_emit_pipe_control_flush(batch,
6992                                    "workaround: stall before MEDIA_VFE_STATE",
6993                                    PIPE_CONTROL_CS_STALL);
6994 
6995       iris_emit_cmd(batch, GENX(MEDIA_VFE_STATE), vfe) {
6996          if (prog_data->total_scratch) {
6997             uint32_t scratch_addr =
6998                pin_scratch_space(ice, batch, prog_data, MESA_SHADER_COMPUTE);
6999 
7000             vfe.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
7001             vfe.ScratchSpaceBasePointer =
7002                rw_bo(NULL, scratch_addr, IRIS_DOMAIN_NONE);
7003          }
7004 
7005          vfe.MaximumNumberofThreads =
7006             devinfo->max_cs_threads * devinfo->subslice_total - 1;
7007 #if GFX_VER < 11
7008          vfe.ResetGatewayTimer =
7009             Resettingrelativetimerandlatchingtheglobaltimestamp;
7010 #endif
7011 #if GFX_VER == 8
7012          vfe.BypassGatewayControl = true;
7013 #endif
7014          vfe.NumberofURBEntries = 2;
7015          vfe.URBEntryAllocationSize = 2;
7016 
7017          vfe.CURBEAllocationSize =
7018             ALIGN(cs_prog_data->push.per_thread.regs * dispatch.threads +
7019                   cs_prog_data->push.cross_thread.regs, 2);
7020       }
7021    }
7022 
7023    /* TODO: Combine subgroup-id with cbuf0 so we can push regular uniforms */
7024    if ((stage_dirty & IRIS_STAGE_DIRTY_CS) ||
7025        cs_prog_data->local_size[0] == 0 /* Variable local group size */) {
7026       uint32_t curbe_data_offset = 0;
7027       assert(cs_prog_data->push.cross_thread.dwords == 0 &&
7028              cs_prog_data->push.per_thread.dwords == 1 &&
7029              cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
7030       const unsigned push_const_size =
7031          brw_cs_push_const_total_size(cs_prog_data, dispatch.threads);
7032       uint32_t *curbe_data_map =
7033          stream_state(batch, ice->state.dynamic_uploader,
7034                       &ice->state.last_res.cs_thread_ids,
7035                       ALIGN(push_const_size, 64), 64,
7036                       &curbe_data_offset);
7037       assert(curbe_data_map);
7038       memset(curbe_data_map, 0x5a, ALIGN(push_const_size, 64));
7039       iris_fill_cs_push_const_buffer(cs_prog_data, dispatch.threads,
7040                                      curbe_data_map);
7041 
7042       iris_emit_cmd(batch, GENX(MEDIA_CURBE_LOAD), curbe) {
7043          curbe.CURBETotalDataLength = ALIGN(push_const_size, 64);
7044          curbe.CURBEDataStartAddress = curbe_data_offset;
7045       }
7046    }
7047 
7048    for (unsigned i = 0; i < IRIS_MAX_GLOBAL_BINDINGS; i++) {
7049       struct pipe_resource *res = ice->state.global_bindings[i];
7050       if (!res)
7051          continue;
7052 
7053       iris_use_pinned_bo(batch, iris_resource_bo(res),
7054                          true, IRIS_DOMAIN_NONE);
7055    }
7056 
7057    if (stage_dirty & (IRIS_STAGE_DIRTY_SAMPLER_STATES_CS |
7058                       IRIS_STAGE_DIRTY_BINDINGS_CS |
7059                       IRIS_STAGE_DIRTY_CONSTANTS_CS |
7060                       IRIS_STAGE_DIRTY_CS)) {
7061       uint32_t desc[GENX(INTERFACE_DESCRIPTOR_DATA_length)];
7062 
7063       iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), desc, idd) {
7064          idd.SharedLocalMemorySize =
7065             encode_slm_size(GFX_VER, ish->kernel_shared_size);
7066          idd.KernelStartPointer =
7067             KSP(shader) + brw_cs_prog_data_prog_offset(cs_prog_data,
7068                                                        dispatch.simd_size);
7069          idd.SamplerStatePointer = shs->sampler_table.offset;
7070          idd.BindingTablePointer = binder->bt_offset[MESA_SHADER_COMPUTE];
7071          idd.NumberofThreadsinGPGPUThreadGroup = dispatch.threads;
7072       }
7073 
7074       for (int i = 0; i < GENX(INTERFACE_DESCRIPTOR_DATA_length); i++)
7075          desc[i] |= ((uint32_t *) shader->derived_data)[i];
7076 
7077       iris_emit_cmd(batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), load) {
7078          load.InterfaceDescriptorTotalLength =
7079             GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
7080          load.InterfaceDescriptorDataStartAddress =
7081             emit_state(batch, ice->state.dynamic_uploader,
7082                        &ice->state.last_res.cs_desc, desc, sizeof(desc), 64);
7083       }
7084    }
7085 
7086    if (grid->indirect)
7087       iris_load_indirect_location(ice, batch, grid);
7088 
7089    iris_measure_snapshot(ice, batch, INTEL_SNAPSHOT_COMPUTE, NULL, NULL, NULL);
7090 
7091    iris_emit_cmd(batch, GENX(GPGPU_WALKER), ggw) {
7092       ggw.IndirectParameterEnable    = grid->indirect != NULL;
7093       ggw.SIMDSize                   = dispatch.simd_size / 16;
7094       ggw.ThreadDepthCounterMaximum  = 0;
7095       ggw.ThreadHeightCounterMaximum = 0;
7096       ggw.ThreadWidthCounterMaximum  = dispatch.threads - 1;
7097       ggw.ThreadGroupIDXDimension    = grid->grid[0];
7098       ggw.ThreadGroupIDYDimension    = grid->grid[1];
7099       ggw.ThreadGroupIDZDimension    = grid->grid[2];
7100       ggw.RightExecutionMask         = dispatch.right_mask;
7101       ggw.BottomExecutionMask        = 0xffffffff;
7102    }
7103 
7104    iris_emit_cmd(batch, GENX(MEDIA_STATE_FLUSH), msf);
7105 }
7106 
7107 #endif /* #if GFX_VERx10 >= 125 */
7108 
7109 static void
iris_upload_compute_state(struct iris_context * ice,struct iris_batch * batch,const struct pipe_grid_info * grid)7110 iris_upload_compute_state(struct iris_context *ice,
7111                           struct iris_batch *batch,
7112                           const struct pipe_grid_info *grid)
7113 {
7114    const uint64_t stage_dirty = ice->state.stage_dirty;
7115    struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
7116    struct iris_compiled_shader *shader =
7117       ice->shaders.prog[MESA_SHADER_COMPUTE];
7118 
7119    iris_batch_sync_region_start(batch);
7120 
7121    /* Always pin the binder.  If we're emitting new binding table pointers,
7122     * we need it.  If not, we're probably inheriting old tables via the
7123     * context, and need it anyway.  Since true zero-bindings cases are
7124     * practically non-existent, just pin it and avoid last_res tracking.
7125     */
7126    iris_use_pinned_bo(batch, ice->state.binder.bo, false, IRIS_DOMAIN_NONE);
7127 
7128    if (((stage_dirty & IRIS_STAGE_DIRTY_CONSTANTS_CS) &&
7129         shs->sysvals_need_upload) ||
7130        shader->kernel_input_size > 0)
7131       upload_sysvals(ice, MESA_SHADER_COMPUTE, grid);
7132 
7133    if (stage_dirty & IRIS_STAGE_DIRTY_BINDINGS_CS)
7134       iris_populate_binding_table(ice, batch, MESA_SHADER_COMPUTE, false);
7135 
7136    if (stage_dirty & IRIS_STAGE_DIRTY_SAMPLER_STATES_CS)
7137       iris_upload_sampler_states(ice, MESA_SHADER_COMPUTE);
7138 
7139    iris_use_optional_res(batch, shs->sampler_table.res, false,
7140                          IRIS_DOMAIN_NONE);
7141    iris_use_pinned_bo(batch, iris_resource_bo(shader->assembly.res), false,
7142                       IRIS_DOMAIN_NONE);
7143 
7144    if (ice->state.need_border_colors)
7145       iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false,
7146                          IRIS_DOMAIN_NONE);
7147 
7148 #if GFX_VER >= 12
7149    genX(invalidate_aux_map_state)(batch);
7150 #endif
7151 
7152 #if GFX_VERx10 >= 125
7153    iris_upload_compute_walker(ice, batch, grid);
7154 #else
7155    iris_upload_gpgpu_walker(ice, batch, grid);
7156 #endif
7157 
7158    if (!batch->contains_draw_with_next_seqno) {
7159       iris_restore_compute_saved_bos(ice, batch, grid);
7160       batch->contains_draw_with_next_seqno = batch->contains_draw = true;
7161    }
7162 
7163    iris_batch_sync_region_end(batch);
7164 }
7165 
7166 /**
7167  * State module teardown.
7168  */
7169 static void
iris_destroy_state(struct iris_context * ice)7170 iris_destroy_state(struct iris_context *ice)
7171 {
7172    struct iris_genx_state *genx = ice->state.genx;
7173 
7174    pipe_resource_reference(&ice->draw.draw_params.res, NULL);
7175    pipe_resource_reference(&ice->draw.derived_draw_params.res, NULL);
7176 
7177    /* Loop over all VBOs, including ones for draw parameters */
7178    for (unsigned i = 0; i < ARRAY_SIZE(genx->vertex_buffers); i++) {
7179       pipe_resource_reference(&genx->vertex_buffers[i].resource, NULL);
7180    }
7181 
7182    free(ice->state.genx);
7183 
7184    for (int i = 0; i < 4; i++) {
7185       pipe_so_target_reference(&ice->state.so_target[i], NULL);
7186    }
7187 
7188    for (unsigned i = 0; i < ice->state.framebuffer.nr_cbufs; i++) {
7189       pipe_surface_reference(&ice->state.framebuffer.cbufs[i], NULL);
7190    }
7191    pipe_surface_reference(&ice->state.framebuffer.zsbuf, NULL);
7192 
7193    for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) {
7194       struct iris_shader_state *shs = &ice->state.shaders[stage];
7195       pipe_resource_reference(&shs->sampler_table.res, NULL);
7196       for (int i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
7197          pipe_resource_reference(&shs->constbuf[i].buffer, NULL);
7198          pipe_resource_reference(&shs->constbuf_surf_state[i].res, NULL);
7199       }
7200       for (int i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
7201          pipe_resource_reference(&shs->image[i].base.resource, NULL);
7202          pipe_resource_reference(&shs->image[i].surface_state.ref.res, NULL);
7203          free(shs->image[i].surface_state.cpu);
7204       }
7205       for (int i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) {
7206          pipe_resource_reference(&shs->ssbo[i].buffer, NULL);
7207          pipe_resource_reference(&shs->ssbo_surf_state[i].res, NULL);
7208       }
7209       for (int i = 0; i < IRIS_MAX_TEXTURE_SAMPLERS; i++) {
7210          pipe_sampler_view_reference((struct pipe_sampler_view **)
7211                                      &shs->textures[i], NULL);
7212       }
7213    }
7214 
7215    pipe_resource_reference(&ice->state.grid_size.res, NULL);
7216    pipe_resource_reference(&ice->state.grid_surf_state.res, NULL);
7217 
7218    pipe_resource_reference(&ice->state.null_fb.res, NULL);
7219    pipe_resource_reference(&ice->state.unbound_tex.res, NULL);
7220 
7221    pipe_resource_reference(&ice->state.last_res.cc_vp, NULL);
7222    pipe_resource_reference(&ice->state.last_res.sf_cl_vp, NULL);
7223    pipe_resource_reference(&ice->state.last_res.color_calc, NULL);
7224    pipe_resource_reference(&ice->state.last_res.scissor, NULL);
7225    pipe_resource_reference(&ice->state.last_res.blend, NULL);
7226    pipe_resource_reference(&ice->state.last_res.index_buffer, NULL);
7227    pipe_resource_reference(&ice->state.last_res.cs_thread_ids, NULL);
7228    pipe_resource_reference(&ice->state.last_res.cs_desc, NULL);
7229 }
7230 
7231 /* ------------------------------------------------------------------- */
7232 
7233 static void
iris_rebind_buffer(struct iris_context * ice,struct iris_resource * res)7234 iris_rebind_buffer(struct iris_context *ice,
7235                    struct iris_resource *res)
7236 {
7237    struct pipe_context *ctx = &ice->ctx;
7238    struct iris_genx_state *genx = ice->state.genx;
7239 
7240    assert(res->base.b.target == PIPE_BUFFER);
7241 
7242    /* Buffers can't be framebuffer attachments, nor display related,
7243     * and we don't have upstream Clover support.
7244     */
7245    assert(!(res->bind_history & (PIPE_BIND_DEPTH_STENCIL |
7246                                  PIPE_BIND_RENDER_TARGET |
7247                                  PIPE_BIND_BLENDABLE |
7248                                  PIPE_BIND_DISPLAY_TARGET |
7249                                  PIPE_BIND_CURSOR |
7250                                  PIPE_BIND_COMPUTE_RESOURCE |
7251                                  PIPE_BIND_GLOBAL)));
7252 
7253    if (res->bind_history & PIPE_BIND_VERTEX_BUFFER) {
7254       uint64_t bound_vbs = ice->state.bound_vertex_buffers;
7255       while (bound_vbs) {
7256          const int i = u_bit_scan64(&bound_vbs);
7257          struct iris_vertex_buffer_state *state = &genx->vertex_buffers[i];
7258 
7259          /* Update the CPU struct */
7260          STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_start) == 32);
7261          STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_bits) == 64);
7262          uint64_t *addr = (uint64_t *) &state->state[1];
7263          struct iris_bo *bo = iris_resource_bo(state->resource);
7264 
7265          if (*addr != bo->address + state->offset) {
7266             *addr = bo->address + state->offset;
7267             ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
7268                                 IRIS_DIRTY_VERTEX_BUFFER_FLUSHES;
7269          }
7270       }
7271    }
7272 
7273    /* We don't need to handle PIPE_BIND_INDEX_BUFFER here: we re-emit
7274     * the 3DSTATE_INDEX_BUFFER packet whenever the address changes.
7275     *
7276     * There is also no need to handle these:
7277     * - PIPE_BIND_COMMAND_ARGS_BUFFER (emitted for every indirect draw)
7278     * - PIPE_BIND_QUERY_BUFFER (no persistent state references)
7279     */
7280 
7281    if (res->bind_history & PIPE_BIND_STREAM_OUTPUT) {
7282       uint32_t *so_buffers = genx->so_buffers;
7283       for (unsigned i = 0; i < 4; i++,
7284            so_buffers += GENX(3DSTATE_SO_BUFFER_length)) {
7285 
7286          /* There are no other fields in bits 127:64 */
7287          uint64_t *addr = (uint64_t *) &so_buffers[2];
7288          STATIC_ASSERT(GENX(3DSTATE_SO_BUFFER_SurfaceBaseAddress_start) == 66);
7289          STATIC_ASSERT(GENX(3DSTATE_SO_BUFFER_SurfaceBaseAddress_bits) == 46);
7290 
7291          struct pipe_stream_output_target *tgt = ice->state.so_target[i];
7292          if (tgt) {
7293             struct iris_bo *bo = iris_resource_bo(tgt->buffer);
7294             if (*addr != bo->address + tgt->buffer_offset) {
7295                *addr = bo->address + tgt->buffer_offset;
7296                ice->state.dirty |= IRIS_DIRTY_SO_BUFFERS;
7297             }
7298          }
7299       }
7300    }
7301 
7302    for (int s = MESA_SHADER_VERTEX; s < MESA_SHADER_STAGES; s++) {
7303       struct iris_shader_state *shs = &ice->state.shaders[s];
7304       enum pipe_shader_type p_stage = stage_to_pipe(s);
7305 
7306       if (!(res->bind_stages & (1 << s)))
7307          continue;
7308 
7309       if (res->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
7310          /* Skip constant buffer 0, it's for regular uniforms, not UBOs */
7311          uint32_t bound_cbufs = shs->bound_cbufs & ~1u;
7312          while (bound_cbufs) {
7313             const int i = u_bit_scan(&bound_cbufs);
7314             struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
7315             struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
7316 
7317             if (res->bo == iris_resource_bo(cbuf->buffer)) {
7318                pipe_resource_reference(&surf_state->res, NULL);
7319                shs->dirty_cbufs |= 1u << i;
7320                ice->state.dirty |= (IRIS_DIRTY_RENDER_MISC_BUFFER_FLUSHES |
7321                                     IRIS_DIRTY_COMPUTE_MISC_BUFFER_FLUSHES);
7322                ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS << s;
7323             }
7324          }
7325       }
7326 
7327       if (res->bind_history & PIPE_BIND_SHADER_BUFFER) {
7328          uint32_t bound_ssbos = shs->bound_ssbos;
7329          while (bound_ssbos) {
7330             const int i = u_bit_scan(&bound_ssbos);
7331             struct pipe_shader_buffer *ssbo = &shs->ssbo[i];
7332 
7333             if (res->bo == iris_resource_bo(ssbo->buffer)) {
7334                struct pipe_shader_buffer buf = {
7335                   .buffer = &res->base.b,
7336                   .buffer_offset = ssbo->buffer_offset,
7337                   .buffer_size = ssbo->buffer_size,
7338                };
7339                iris_set_shader_buffers(ctx, p_stage, i, 1, &buf,
7340                                        (shs->writable_ssbos >> i) & 1);
7341             }
7342          }
7343       }
7344 
7345       if (res->bind_history & PIPE_BIND_SAMPLER_VIEW) {
7346          uint32_t bound_sampler_views = shs->bound_sampler_views;
7347          while (bound_sampler_views) {
7348             const int i = u_bit_scan(&bound_sampler_views);
7349             struct iris_sampler_view *isv = shs->textures[i];
7350             struct iris_bo *bo = isv->res->bo;
7351 
7352             if (update_surface_state_addrs(ice->state.surface_uploader,
7353                                            &isv->surface_state, bo)) {
7354                ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << s;
7355             }
7356          }
7357       }
7358 
7359       if (res->bind_history & PIPE_BIND_SHADER_IMAGE) {
7360          uint32_t bound_image_views = shs->bound_image_views;
7361          while (bound_image_views) {
7362             const int i = u_bit_scan(&bound_image_views);
7363             struct iris_image_view *iv = &shs->image[i];
7364             struct iris_bo *bo = iris_resource_bo(iv->base.resource);
7365 
7366             if (update_surface_state_addrs(ice->state.surface_uploader,
7367                                            &iv->surface_state, bo)) {
7368                ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << s;
7369             }
7370          }
7371       }
7372    }
7373 }
7374 
7375 /* ------------------------------------------------------------------- */
7376 
7377 /**
7378  * Introduce a batch synchronization boundary, and update its cache coherency
7379  * status to reflect the execution of a PIPE_CONTROL command with the
7380  * specified flags.
7381  */
7382 static void
batch_mark_sync_for_pipe_control(struct iris_batch * batch,uint32_t flags)7383 batch_mark_sync_for_pipe_control(struct iris_batch *batch, uint32_t flags)
7384 {
7385    iris_batch_sync_boundary(batch);
7386 
7387    if ((flags & PIPE_CONTROL_CS_STALL)) {
7388       if ((flags & PIPE_CONTROL_RENDER_TARGET_FLUSH))
7389          iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_RENDER_WRITE);
7390 
7391       if ((flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH))
7392          iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_DEPTH_WRITE);
7393 
7394       if ((flags & PIPE_CONTROL_DATA_CACHE_FLUSH))
7395          iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_DATA_WRITE);
7396 
7397       if ((flags & PIPE_CONTROL_FLUSH_ENABLE))
7398          iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_OTHER_WRITE);
7399 
7400       if ((flags & (PIPE_CONTROL_CACHE_FLUSH_BITS |
7401                     PIPE_CONTROL_STALL_AT_SCOREBOARD))) {
7402          iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_VF_READ);
7403          iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_OTHER_READ);
7404       }
7405    }
7406 
7407    if ((flags & PIPE_CONTROL_RENDER_TARGET_FLUSH))
7408       iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_RENDER_WRITE);
7409 
7410    if ((flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH))
7411       iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_DEPTH_WRITE);
7412 
7413    if ((flags & PIPE_CONTROL_DATA_CACHE_FLUSH))
7414       iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_DATA_WRITE);
7415 
7416    if ((flags & PIPE_CONTROL_FLUSH_ENABLE))
7417       iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_OTHER_WRITE);
7418 
7419    if ((flags & PIPE_CONTROL_VF_CACHE_INVALIDATE))
7420       iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_VF_READ);
7421 
7422    if ((flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE) &&
7423        (flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE))
7424       iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_OTHER_READ);
7425 }
7426 
7427 static unsigned
flags_to_post_sync_op(uint32_t flags)7428 flags_to_post_sync_op(uint32_t flags)
7429 {
7430    if (flags & PIPE_CONTROL_WRITE_IMMEDIATE)
7431       return WriteImmediateData;
7432 
7433    if (flags & PIPE_CONTROL_WRITE_DEPTH_COUNT)
7434       return WritePSDepthCount;
7435 
7436    if (flags & PIPE_CONTROL_WRITE_TIMESTAMP)
7437       return WriteTimestamp;
7438 
7439    return 0;
7440 }
7441 
7442 /**
7443  * Do the given flags have a Post Sync or LRI Post Sync operation?
7444  */
7445 static enum pipe_control_flags
get_post_sync_flags(enum pipe_control_flags flags)7446 get_post_sync_flags(enum pipe_control_flags flags)
7447 {
7448    flags &= PIPE_CONTROL_WRITE_IMMEDIATE |
7449             PIPE_CONTROL_WRITE_DEPTH_COUNT |
7450             PIPE_CONTROL_WRITE_TIMESTAMP |
7451             PIPE_CONTROL_LRI_POST_SYNC_OP;
7452 
7453    /* Only one "Post Sync Op" is allowed, and it's mutually exclusive with
7454     * "LRI Post Sync Operation".  So more than one bit set would be illegal.
7455     */
7456    assert(util_bitcount(flags) <= 1);
7457 
7458    return flags;
7459 }
7460 
7461 #define IS_COMPUTE_PIPELINE(batch) (batch->name == IRIS_BATCH_COMPUTE)
7462 
7463 /**
7464  * Emit a series of PIPE_CONTROL commands, taking into account any
7465  * workarounds necessary to actually accomplish the caller's request.
7466  *
7467  * Unless otherwise noted, spec quotations in this function come from:
7468  *
7469  * Synchronization of the 3D Pipeline > PIPE_CONTROL Command > Programming
7470  * Restrictions for PIPE_CONTROL.
7471  *
7472  * You should not use this function directly.  Use the helpers in
7473  * iris_pipe_control.c instead, which may split the pipe control further.
7474  */
7475 static void
iris_emit_raw_pipe_control(struct iris_batch * batch,const char * reason,uint32_t flags,struct iris_bo * bo,uint32_t offset,uint64_t imm)7476 iris_emit_raw_pipe_control(struct iris_batch *batch,
7477                            const char *reason,
7478                            uint32_t flags,
7479                            struct iris_bo *bo,
7480                            uint32_t offset,
7481                            uint64_t imm)
7482 {
7483    UNUSED const struct intel_device_info *devinfo = &batch->screen->devinfo;
7484    enum pipe_control_flags post_sync_flags = get_post_sync_flags(flags);
7485    enum pipe_control_flags non_lri_post_sync_flags =
7486       post_sync_flags & ~PIPE_CONTROL_LRI_POST_SYNC_OP;
7487 
7488    /* Recursive PIPE_CONTROL workarounds --------------------------------
7489     * (http://knowyourmeme.com/memes/xzibit-yo-dawg)
7490     *
7491     * We do these first because we want to look at the original operation,
7492     * rather than any workarounds we set.
7493     */
7494    if (GFX_VER == 9 && (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE)) {
7495       /* The PIPE_CONTROL "VF Cache Invalidation Enable" bit description
7496        * lists several workarounds:
7497        *
7498        *    "Project: SKL, KBL, BXT
7499        *
7500        *     If the VF Cache Invalidation Enable is set to a 1 in a
7501        *     PIPE_CONTROL, a separate Null PIPE_CONTROL, all bitfields
7502        *     sets to 0, with the VF Cache Invalidation Enable set to 0
7503        *     needs to be sent prior to the PIPE_CONTROL with VF Cache
7504        *     Invalidation Enable set to a 1."
7505        */
7506       iris_emit_raw_pipe_control(batch,
7507                                  "workaround: recursive VF cache invalidate",
7508                                  0, NULL, 0, 0);
7509    }
7510 
7511    /* Wa_1409226450, Wait for EU to be idle before pipe control which
7512     * invalidates the instruction cache
7513     */
7514    if (GFX_VER == 12 && (flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE)) {
7515       iris_emit_raw_pipe_control(batch,
7516                                  "workaround: CS stall before instruction "
7517                                  "cache invalidate",
7518                                  PIPE_CONTROL_CS_STALL |
7519                                  PIPE_CONTROL_STALL_AT_SCOREBOARD, bo, offset,
7520                                  imm);
7521    }
7522 
7523    if ((GFX_VER == 9 || (GFX_VER == 12 && devinfo->revision == 0 /* A0*/)) &&
7524         IS_COMPUTE_PIPELINE(batch) && post_sync_flags) {
7525       /* Project: SKL / Argument: LRI Post Sync Operation [23]
7526        *
7527        * "PIPECONTROL command with “Command Streamer Stall Enable” must be
7528        *  programmed prior to programming a PIPECONTROL command with "LRI
7529        *  Post Sync Operation" in GPGPU mode of operation (i.e when
7530        *  PIPELINE_SELECT command is set to GPGPU mode of operation)."
7531        *
7532        * The same text exists a few rows below for Post Sync Op.
7533        *
7534        * On Gfx12 this is Wa_1607156449.
7535        */
7536       iris_emit_raw_pipe_control(batch,
7537                                  "workaround: CS stall before gpgpu post-sync",
7538                                  PIPE_CONTROL_CS_STALL, bo, offset, imm);
7539    }
7540 
7541    /* "Flush Types" workarounds ---------------------------------------------
7542     * We do these now because they may add post-sync operations or CS stalls.
7543     */
7544 
7545    if (GFX_VER < 11 && flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) {
7546       /* Project: BDW, SKL+ (stopping at CNL) / Argument: VF Invalidate
7547        *
7548        * "'Post Sync Operation' must be enabled to 'Write Immediate Data' or
7549        *  'Write PS Depth Count' or 'Write Timestamp'."
7550        */
7551       if (!bo) {
7552          flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
7553          post_sync_flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
7554          non_lri_post_sync_flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
7555          bo = batch->screen->workaround_address.bo;
7556          offset = batch->screen->workaround_address.offset;
7557       }
7558    }
7559 
7560    if (flags & PIPE_CONTROL_DEPTH_STALL) {
7561       /* From the PIPE_CONTROL instruction table, bit 13 (Depth Stall Enable):
7562        *
7563        *    "This bit must be DISABLED for operations other than writing
7564        *     PS_DEPTH_COUNT."
7565        *
7566        * This seems like nonsense.  An Ivybridge workaround requires us to
7567        * emit a PIPE_CONTROL with a depth stall and write immediate post-sync
7568        * operation.  Gfx8+ requires us to emit depth stalls and depth cache
7569        * flushes together.  So, it's hard to imagine this means anything other
7570        * than "we originally intended this to be used for PS_DEPTH_COUNT".
7571        *
7572        * We ignore the supposed restriction and do nothing.
7573        */
7574    }
7575 
7576    if (flags & (PIPE_CONTROL_RENDER_TARGET_FLUSH |
7577                 PIPE_CONTROL_STALL_AT_SCOREBOARD)) {
7578       /* From the PIPE_CONTROL instruction table, bit 12 and bit 1:
7579        *
7580        *    "This bit must be DISABLED for End-of-pipe (Read) fences,
7581        *     PS_DEPTH_COUNT or TIMESTAMP queries."
7582        *
7583        * TODO: Implement end-of-pipe checking.
7584        */
7585       assert(!(post_sync_flags & (PIPE_CONTROL_WRITE_DEPTH_COUNT |
7586                                   PIPE_CONTROL_WRITE_TIMESTAMP)));
7587    }
7588 
7589    if (GFX_VER < 11 && (flags & PIPE_CONTROL_STALL_AT_SCOREBOARD)) {
7590       /* From the PIPE_CONTROL instruction table, bit 1:
7591        *
7592        *    "This bit is ignored if Depth Stall Enable is set.
7593        *     Further, the render cache is not flushed even if Write Cache
7594        *     Flush Enable bit is set."
7595        *
7596        * We assert that the caller doesn't do this combination, to try and
7597        * prevent mistakes.  It shouldn't hurt the GPU, though.
7598        *
7599        * We skip this check on Gfx11+ as the "Stall at Pixel Scoreboard"
7600        * and "Render Target Flush" combo is explicitly required for BTI
7601        * update workarounds.
7602        */
7603       assert(!(flags & (PIPE_CONTROL_DEPTH_STALL |
7604                         PIPE_CONTROL_RENDER_TARGET_FLUSH)));
7605    }
7606 
7607    /* PIPE_CONTROL page workarounds ------------------------------------- */
7608 
7609    if (GFX_VER <= 8 && (flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE)) {
7610       /* From the PIPE_CONTROL page itself:
7611        *
7612        *    "IVB, HSW, BDW
7613        *     Restriction: Pipe_control with CS-stall bit set must be issued
7614        *     before a pipe-control command that has the State Cache
7615        *     Invalidate bit set."
7616        */
7617       flags |= PIPE_CONTROL_CS_STALL;
7618    }
7619 
7620    if (flags & PIPE_CONTROL_FLUSH_LLC) {
7621       /* From the PIPE_CONTROL instruction table, bit 26 (Flush LLC):
7622        *
7623        *    "Project: ALL
7624        *     SW must always program Post-Sync Operation to "Write Immediate
7625        *     Data" when Flush LLC is set."
7626        *
7627        * For now, we just require the caller to do it.
7628        */
7629       assert(flags & PIPE_CONTROL_WRITE_IMMEDIATE);
7630    }
7631 
7632    /* "Post-Sync Operation" workarounds -------------------------------- */
7633 
7634    /* Project: All / Argument: Global Snapshot Count Reset [19]
7635     *
7636     * "This bit must not be exercised on any product.
7637     *  Requires stall bit ([20] of DW1) set."
7638     *
7639     * We don't use this, so we just assert that it isn't used.  The
7640     * PIPE_CONTROL instruction page indicates that they intended this
7641     * as a debug feature and don't think it is useful in production,
7642     * but it may actually be usable, should we ever want to.
7643     */
7644    assert((flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET) == 0);
7645 
7646    if (flags & (PIPE_CONTROL_MEDIA_STATE_CLEAR |
7647                 PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE)) {
7648       /* Project: All / Arguments:
7649        *
7650        * - Generic Media State Clear [16]
7651        * - Indirect State Pointers Disable [16]
7652        *
7653        *    "Requires stall bit ([20] of DW1) set."
7654        *
7655        * Also, the PIPE_CONTROL instruction table, bit 16 (Generic Media
7656        * State Clear) says:
7657        *
7658        *    "PIPECONTROL command with “Command Streamer Stall Enable” must be
7659        *     programmed prior to programming a PIPECONTROL command with "Media
7660        *     State Clear" set in GPGPU mode of operation"
7661        *
7662        * This is a subset of the earlier rule, so there's nothing to do.
7663        */
7664       flags |= PIPE_CONTROL_CS_STALL;
7665    }
7666 
7667    if (flags & PIPE_CONTROL_STORE_DATA_INDEX) {
7668       /* Project: All / Argument: Store Data Index
7669        *
7670        * "Post-Sync Operation ([15:14] of DW1) must be set to something other
7671        *  than '0'."
7672        *
7673        * For now, we just assert that the caller does this.  We might want to
7674        * automatically add a write to the workaround BO...
7675        */
7676       assert(non_lri_post_sync_flags != 0);
7677    }
7678 
7679    if (flags & PIPE_CONTROL_SYNC_GFDT) {
7680       /* Project: All / Argument: Sync GFDT
7681        *
7682        * "Post-Sync Operation ([15:14] of DW1) must be set to something other
7683        *  than '0' or 0x2520[13] must be set."
7684        *
7685        * For now, we just assert that the caller does this.
7686        */
7687       assert(non_lri_post_sync_flags != 0);
7688    }
7689 
7690    if (flags & PIPE_CONTROL_TLB_INVALIDATE) {
7691       /* Project: IVB+ / Argument: TLB inv
7692        *
7693        *    "Requires stall bit ([20] of DW1) set."
7694        *
7695        * Also, from the PIPE_CONTROL instruction table:
7696        *
7697        *    "Project: SKL+
7698        *     Post Sync Operation or CS stall must be set to ensure a TLB
7699        *     invalidation occurs.  Otherwise no cycle will occur to the TLB
7700        *     cache to invalidate."
7701        *
7702        * This is not a subset of the earlier rule, so there's nothing to do.
7703        */
7704       flags |= PIPE_CONTROL_CS_STALL;
7705    }
7706 
7707    if (GFX_VER == 9 && devinfo->gt == 4) {
7708       /* TODO: The big Skylake GT4 post sync op workaround */
7709    }
7710 
7711    /* "GPGPU specific workarounds" (both post-sync and flush) ------------ */
7712 
7713    if (IS_COMPUTE_PIPELINE(batch)) {
7714       if (GFX_VER >= 9 && (flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE)) {
7715          /* Project: SKL+ / Argument: Tex Invalidate
7716           * "Requires stall bit ([20] of DW) set for all GPGPU Workloads."
7717           */
7718          flags |= PIPE_CONTROL_CS_STALL;
7719       }
7720 
7721       if (GFX_VER == 8 && (post_sync_flags ||
7722                            (flags & (PIPE_CONTROL_NOTIFY_ENABLE |
7723                                      PIPE_CONTROL_DEPTH_STALL |
7724                                      PIPE_CONTROL_RENDER_TARGET_FLUSH |
7725                                      PIPE_CONTROL_DEPTH_CACHE_FLUSH |
7726                                      PIPE_CONTROL_DATA_CACHE_FLUSH)))) {
7727          /* Project: BDW / Arguments:
7728           *
7729           * - LRI Post Sync Operation   [23]
7730           * - Post Sync Op              [15:14]
7731           * - Notify En                 [8]
7732           * - Depth Stall               [13]
7733           * - Render Target Cache Flush [12]
7734           * - Depth Cache Flush         [0]
7735           * - DC Flush Enable           [5]
7736           *
7737           *    "Requires stall bit ([20] of DW) set for all GPGPU and Media
7738           *     Workloads."
7739           */
7740          flags |= PIPE_CONTROL_CS_STALL;
7741 
7742          /* Also, from the PIPE_CONTROL instruction table, bit 20:
7743           *
7744           *    "Project: BDW
7745           *     This bit must be always set when PIPE_CONTROL command is
7746           *     programmed by GPGPU and MEDIA workloads, except for the cases
7747           *     when only Read Only Cache Invalidation bits are set (State
7748           *     Cache Invalidation Enable, Instruction cache Invalidation
7749           *     Enable, Texture Cache Invalidation Enable, Constant Cache
7750           *     Invalidation Enable). This is to WA FFDOP CG issue, this WA
7751           *     need not implemented when FF_DOP_CG is disable via "Fixed
7752           *     Function DOP Clock Gate Disable" bit in RC_PSMI_CTRL register."
7753           *
7754           * It sounds like we could avoid CS stalls in some cases, but we
7755           * don't currently bother.  This list isn't exactly the list above,
7756           * either...
7757           */
7758       }
7759    }
7760 
7761    /* "Stall" workarounds ----------------------------------------------
7762     * These have to come after the earlier ones because we may have added
7763     * some additional CS stalls above.
7764     */
7765 
7766    if (GFX_VER < 9 && (flags & PIPE_CONTROL_CS_STALL)) {
7767       /* Project: PRE-SKL, VLV, CHV
7768        *
7769        * "[All Stepping][All SKUs]:
7770        *
7771        *  One of the following must also be set:
7772        *
7773        *  - Render Target Cache Flush Enable ([12] of DW1)
7774        *  - Depth Cache Flush Enable ([0] of DW1)
7775        *  - Stall at Pixel Scoreboard ([1] of DW1)
7776        *  - Depth Stall ([13] of DW1)
7777        *  - Post-Sync Operation ([13] of DW1)
7778        *  - DC Flush Enable ([5] of DW1)"
7779        *
7780        * If we don't already have one of those bits set, we choose to add
7781        * "Stall at Pixel Scoreboard".  Some of the other bits require a
7782        * CS stall as a workaround (see above), which would send us into
7783        * an infinite recursion of PIPE_CONTROLs.  "Stall at Pixel Scoreboard"
7784        * appears to be safe, so we choose that.
7785        */
7786       const uint32_t wa_bits = PIPE_CONTROL_RENDER_TARGET_FLUSH |
7787                                PIPE_CONTROL_DEPTH_CACHE_FLUSH |
7788                                PIPE_CONTROL_WRITE_IMMEDIATE |
7789                                PIPE_CONTROL_WRITE_DEPTH_COUNT |
7790                                PIPE_CONTROL_WRITE_TIMESTAMP |
7791                                PIPE_CONTROL_STALL_AT_SCOREBOARD |
7792                                PIPE_CONTROL_DEPTH_STALL |
7793                                PIPE_CONTROL_DATA_CACHE_FLUSH;
7794       if (!(flags & wa_bits))
7795          flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
7796    }
7797 
7798    if (GFX_VER >= 12 && (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH)) {
7799       /* Wa_1409600907:
7800        *
7801        * "PIPE_CONTROL with Depth Stall Enable bit must be set
7802        * with any PIPE_CONTROL with Depth Flush Enable bit set.
7803        */
7804       flags |= PIPE_CONTROL_DEPTH_STALL;
7805    }
7806 
7807    /* Emit --------------------------------------------------------------- */
7808 
7809    if (INTEL_DEBUG(DEBUG_PIPE_CONTROL)) {
7810       fprintf(stderr,
7811               "  PC [%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%"PRIx64"]: %s\n",
7812               (flags & PIPE_CONTROL_FLUSH_ENABLE) ? "PipeCon " : "",
7813               (flags & PIPE_CONTROL_CS_STALL) ? "CS " : "",
7814               (flags & PIPE_CONTROL_STALL_AT_SCOREBOARD) ? "Scoreboard " : "",
7815               (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) ? "VF " : "",
7816               (flags & PIPE_CONTROL_RENDER_TARGET_FLUSH) ? "RT " : "",
7817               (flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE) ? "Const " : "",
7818               (flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE) ? "TC " : "",
7819               (flags & PIPE_CONTROL_DATA_CACHE_FLUSH) ? "DC " : "",
7820               (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH) ? "ZFlush " : "",
7821               (flags & PIPE_CONTROL_TILE_CACHE_FLUSH) ? "Tile " : "",
7822               (flags & PIPE_CONTROL_DEPTH_STALL) ? "ZStall " : "",
7823               (flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE) ? "State " : "",
7824               (flags & PIPE_CONTROL_TLB_INVALIDATE) ? "TLB " : "",
7825               (flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE) ? "Inst " : "",
7826               (flags & PIPE_CONTROL_MEDIA_STATE_CLEAR) ? "MediaClear " : "",
7827               (flags & PIPE_CONTROL_NOTIFY_ENABLE) ? "Notify " : "",
7828               (flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET) ?
7829                  "SnapRes" : "",
7830               (flags & PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE) ?
7831                   "ISPDis" : "",
7832               (flags & PIPE_CONTROL_WRITE_IMMEDIATE) ? "WriteImm " : "",
7833               (flags & PIPE_CONTROL_WRITE_DEPTH_COUNT) ? "WriteZCount " : "",
7834               (flags & PIPE_CONTROL_WRITE_TIMESTAMP) ? "WriteTimestamp " : "",
7835               (flags & PIPE_CONTROL_FLUSH_HDC) ? "HDC " : "",
7836               imm, reason);
7837    }
7838 
7839    batch_mark_sync_for_pipe_control(batch, flags);
7840    iris_batch_sync_region_start(batch);
7841 
7842    iris_emit_cmd(batch, GENX(PIPE_CONTROL), pc) {
7843 #if GFX_VER >= 12
7844       pc.TileCacheFlushEnable = flags & PIPE_CONTROL_TILE_CACHE_FLUSH;
7845 #endif
7846 #if GFX_VER >= 11
7847       pc.HDCPipelineFlushEnable = flags & PIPE_CONTROL_FLUSH_HDC;
7848 #endif
7849       pc.LRIPostSyncOperation = NoLRIOperation;
7850       pc.PipeControlFlushEnable = flags & PIPE_CONTROL_FLUSH_ENABLE;
7851       pc.DCFlushEnable = flags & PIPE_CONTROL_DATA_CACHE_FLUSH;
7852       pc.StoreDataIndex = 0;
7853       pc.CommandStreamerStallEnable = flags & PIPE_CONTROL_CS_STALL;
7854       pc.GlobalSnapshotCountReset =
7855          flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET;
7856       pc.TLBInvalidate = flags & PIPE_CONTROL_TLB_INVALIDATE;
7857       pc.GenericMediaStateClear = flags & PIPE_CONTROL_MEDIA_STATE_CLEAR;
7858       pc.StallAtPixelScoreboard = flags & PIPE_CONTROL_STALL_AT_SCOREBOARD;
7859       pc.RenderTargetCacheFlushEnable =
7860          flags & PIPE_CONTROL_RENDER_TARGET_FLUSH;
7861       pc.DepthCacheFlushEnable = flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH;
7862       pc.StateCacheInvalidationEnable =
7863          flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE;
7864       pc.VFCacheInvalidationEnable = flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
7865       pc.ConstantCacheInvalidationEnable =
7866          flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE;
7867       pc.PostSyncOperation = flags_to_post_sync_op(flags);
7868       pc.DepthStallEnable = flags & PIPE_CONTROL_DEPTH_STALL;
7869       pc.InstructionCacheInvalidateEnable =
7870          flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE;
7871       pc.NotifyEnable = flags & PIPE_CONTROL_NOTIFY_ENABLE;
7872       pc.IndirectStatePointersDisable =
7873          flags & PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE;
7874       pc.TextureCacheInvalidationEnable =
7875          flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
7876       pc.Address = rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE);
7877       pc.ImmediateData = imm;
7878    }
7879 
7880    iris_batch_sync_region_end(batch);
7881 }
7882 
7883 #if GFX_VER == 9
7884 /**
7885  * Preemption on Gfx9 has to be enabled or disabled in various cases.
7886  *
7887  * See these workarounds for preemption:
7888  *  - WaDisableMidObjectPreemptionForGSLineStripAdj
7889  *  - WaDisableMidObjectPreemptionForTrifanOrPolygon
7890  *  - WaDisableMidObjectPreemptionForLineLoop
7891  *  - WA#0798
7892  *
7893  * We don't put this in the vtable because it's only used on Gfx9.
7894  */
7895 void
gfx9_toggle_preemption(struct iris_context * ice,struct iris_batch * batch,const struct pipe_draw_info * draw)7896 gfx9_toggle_preemption(struct iris_context *ice,
7897                        struct iris_batch *batch,
7898                        const struct pipe_draw_info *draw)
7899 {
7900    struct iris_genx_state *genx = ice->state.genx;
7901    bool object_preemption = true;
7902 
7903    /* WaDisableMidObjectPreemptionForGSLineStripAdj
7904     *
7905     *    "WA: Disable mid-draw preemption when draw-call is a linestrip_adj
7906     *     and GS is enabled."
7907     */
7908    if (draw->mode == PIPE_PRIM_LINE_STRIP_ADJACENCY &&
7909        ice->shaders.prog[MESA_SHADER_GEOMETRY])
7910       object_preemption = false;
7911 
7912    /* WaDisableMidObjectPreemptionForTrifanOrPolygon
7913     *
7914     *    "TriFan miscompare in Execlist Preemption test. Cut index that is
7915     *     on a previous context. End the previous, the resume another context
7916     *     with a tri-fan or polygon, and the vertex count is corrupted. If we
7917     *     prempt again we will cause corruption.
7918     *
7919     *     WA: Disable mid-draw preemption when draw-call has a tri-fan."
7920     */
7921    if (draw->mode == PIPE_PRIM_TRIANGLE_FAN)
7922       object_preemption = false;
7923 
7924    /* WaDisableMidObjectPreemptionForLineLoop
7925     *
7926     *    "VF Stats Counters Missing a vertex when preemption enabled.
7927     *
7928     *     WA: Disable mid-draw preemption when the draw uses a lineloop
7929     *     topology."
7930     */
7931    if (draw->mode == PIPE_PRIM_LINE_LOOP)
7932       object_preemption = false;
7933 
7934    /* WA#0798
7935     *
7936     *    "VF is corrupting GAFS data when preempted on an instance boundary
7937     *     and replayed with instancing enabled.
7938     *
7939     *     WA: Disable preemption when using instanceing."
7940     */
7941    if (draw->instance_count > 1)
7942       object_preemption = false;
7943 
7944    if (genx->object_preemption != object_preemption) {
7945       iris_enable_obj_preemption(batch, object_preemption);
7946       genx->object_preemption = object_preemption;
7947    }
7948 }
7949 #endif
7950 
7951 static void
iris_lost_genx_state(struct iris_context * ice,struct iris_batch * batch)7952 iris_lost_genx_state(struct iris_context *ice, struct iris_batch *batch)
7953 {
7954    struct iris_genx_state *genx = ice->state.genx;
7955 
7956 #if GFX_VERx10 == 120
7957    genx->depth_reg_mode = IRIS_DEPTH_REG_MODE_UNKNOWN;
7958 #endif
7959 
7960    memset(genx->last_index_buffer, 0, sizeof(genx->last_index_buffer));
7961 }
7962 
7963 static void
iris_emit_mi_report_perf_count(struct iris_batch * batch,struct iris_bo * bo,uint32_t offset_in_bytes,uint32_t report_id)7964 iris_emit_mi_report_perf_count(struct iris_batch *batch,
7965                                struct iris_bo *bo,
7966                                uint32_t offset_in_bytes,
7967                                uint32_t report_id)
7968 {
7969    iris_batch_sync_region_start(batch);
7970    iris_emit_cmd(batch, GENX(MI_REPORT_PERF_COUNT), mi_rpc) {
7971       mi_rpc.MemoryAddress = rw_bo(bo, offset_in_bytes,
7972                                    IRIS_DOMAIN_OTHER_WRITE);
7973       mi_rpc.ReportID = report_id;
7974    }
7975    iris_batch_sync_region_end(batch);
7976 }
7977 
7978 /**
7979  * Update the pixel hashing modes that determine the balancing of PS threads
7980  * across subslices and slices.
7981  *
7982  * \param width Width bound of the rendering area (already scaled down if \p
7983  *              scale is greater than 1).
7984  * \param height Height bound of the rendering area (already scaled down if \p
7985  *               scale is greater than 1).
7986  * \param scale The number of framebuffer samples that could potentially be
7987  *              affected by an individual channel of the PS thread.  This is
7988  *              typically one for single-sampled rendering, but for operations
7989  *              like CCS resolves and fast clears a single PS invocation may
7990  *              update a huge number of pixels, in which case a finer
7991  *              balancing is desirable in order to maximally utilize the
7992  *              bandwidth available.  UINT_MAX can be used as shorthand for
7993  *              "finest hashing mode available".
7994  */
7995 void
genX(emit_hashing_mode)7996 genX(emit_hashing_mode)(struct iris_context *ice, struct iris_batch *batch,
7997                         unsigned width, unsigned height, unsigned scale)
7998 {
7999 #if GFX_VER == 9
8000    const struct intel_device_info *devinfo = &batch->screen->devinfo;
8001    const unsigned slice_hashing[] = {
8002       /* Because all Gfx9 platforms with more than one slice require
8003        * three-way subslice hashing, a single "normal" 16x16 slice hashing
8004        * block is guaranteed to suffer from substantial imbalance, with one
8005        * subslice receiving twice as much work as the other two in the
8006        * slice.
8007        *
8008        * The performance impact of that would be particularly severe when
8009        * three-way hashing is also in use for slice balancing (which is the
8010        * case for all Gfx9 GT4 platforms), because one of the slices
8011        * receives one every three 16x16 blocks in either direction, which
8012        * is roughly the periodicity of the underlying subslice imbalance
8013        * pattern ("roughly" because in reality the hardware's
8014        * implementation of three-way hashing doesn't do exact modulo 3
8015        * arithmetic, which somewhat decreases the magnitude of this effect
8016        * in practice).  This leads to a systematic subslice imbalance
8017        * within that slice regardless of the size of the primitive.  The
8018        * 32x32 hashing mode guarantees that the subslice imbalance within a
8019        * single slice hashing block is minimal, largely eliminating this
8020        * effect.
8021        */
8022       _32x32,
8023       /* Finest slice hashing mode available. */
8024       NORMAL
8025    };
8026    const unsigned subslice_hashing[] = {
8027       /* 16x16 would provide a slight cache locality benefit especially
8028        * visible in the sampler L1 cache efficiency of low-bandwidth
8029        * non-LLC platforms, but it comes at the cost of greater subslice
8030        * imbalance for primitives of dimensions approximately intermediate
8031        * between 16x4 and 16x16.
8032        */
8033       _16x4,
8034       /* Finest subslice hashing mode available. */
8035       _8x4
8036    };
8037    /* Dimensions of the smallest hashing block of a given hashing mode.  If
8038     * the rendering area is smaller than this there can't possibly be any
8039     * benefit from switching to this mode, so we optimize out the
8040     * transition.
8041     */
8042    const unsigned min_size[][2] = {
8043       { 16, 4 },
8044       { 8, 4 }
8045    };
8046    const unsigned idx = scale > 1;
8047 
8048    if (width > min_size[idx][0] || height > min_size[idx][1]) {
8049       iris_emit_raw_pipe_control(batch,
8050                                  "workaround: CS stall before GT_MODE LRI",
8051                                  PIPE_CONTROL_STALL_AT_SCOREBOARD |
8052                                  PIPE_CONTROL_CS_STALL,
8053                                  NULL, 0, 0);
8054 
8055       iris_emit_reg(batch, GENX(GT_MODE), reg) {
8056          reg.SliceHashing = (devinfo->num_slices > 1 ? slice_hashing[idx] : 0);
8057          reg.SliceHashingMask = (devinfo->num_slices > 1 ? -1 : 0);
8058          reg.SubsliceHashing = subslice_hashing[idx];
8059          reg.SubsliceHashingMask = -1;
8060       };
8061 
8062       ice->state.current_hash_scale = scale;
8063    }
8064 #endif
8065 }
8066 
8067 static void
iris_set_frontend_noop(struct pipe_context * ctx,bool enable)8068 iris_set_frontend_noop(struct pipe_context *ctx, bool enable)
8069 {
8070    struct iris_context *ice = (struct iris_context *) ctx;
8071 
8072    if (iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_RENDER], enable)) {
8073       ice->state.dirty |= IRIS_ALL_DIRTY_FOR_RENDER;
8074       ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_FOR_RENDER;
8075    }
8076 
8077    if (iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_COMPUTE], enable)) {
8078       ice->state.dirty |= IRIS_ALL_DIRTY_FOR_COMPUTE;
8079       ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE;
8080    }
8081 }
8082 
8083 void
genX(init_screen_state)8084 genX(init_screen_state)(struct iris_screen *screen)
8085 {
8086    assert(screen->devinfo.verx10 == GFX_VERx10);
8087    screen->vtbl.destroy_state = iris_destroy_state;
8088    screen->vtbl.init_render_context = iris_init_render_context;
8089    screen->vtbl.init_compute_context = iris_init_compute_context;
8090    screen->vtbl.upload_render_state = iris_upload_render_state;
8091    screen->vtbl.update_surface_base_address = iris_update_surface_base_address;
8092    screen->vtbl.upload_compute_state = iris_upload_compute_state;
8093    screen->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
8094    screen->vtbl.emit_mi_report_perf_count = iris_emit_mi_report_perf_count;
8095    screen->vtbl.rebind_buffer = iris_rebind_buffer;
8096    screen->vtbl.load_register_reg32 = iris_load_register_reg32;
8097    screen->vtbl.load_register_reg64 = iris_load_register_reg64;
8098    screen->vtbl.load_register_imm32 = iris_load_register_imm32;
8099    screen->vtbl.load_register_imm64 = iris_load_register_imm64;
8100    screen->vtbl.load_register_mem32 = iris_load_register_mem32;
8101    screen->vtbl.load_register_mem64 = iris_load_register_mem64;
8102    screen->vtbl.store_register_mem32 = iris_store_register_mem32;
8103    screen->vtbl.store_register_mem64 = iris_store_register_mem64;
8104    screen->vtbl.store_data_imm32 = iris_store_data_imm32;
8105    screen->vtbl.store_data_imm64 = iris_store_data_imm64;
8106    screen->vtbl.copy_mem_mem = iris_copy_mem_mem;
8107    screen->vtbl.derived_program_state_size = iris_derived_program_state_size;
8108    screen->vtbl.store_derived_program_state = iris_store_derived_program_state;
8109    screen->vtbl.create_so_decl_list = iris_create_so_decl_list;
8110    screen->vtbl.populate_vs_key = iris_populate_vs_key;
8111    screen->vtbl.populate_tcs_key = iris_populate_tcs_key;
8112    screen->vtbl.populate_tes_key = iris_populate_tes_key;
8113    screen->vtbl.populate_gs_key = iris_populate_gs_key;
8114    screen->vtbl.populate_fs_key = iris_populate_fs_key;
8115    screen->vtbl.populate_cs_key = iris_populate_cs_key;
8116    screen->vtbl.lost_genx_state = iris_lost_genx_state;
8117 }
8118 
8119 void
genX(init_state)8120 genX(init_state)(struct iris_context *ice)
8121 {
8122    struct pipe_context *ctx = &ice->ctx;
8123    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
8124 
8125    ctx->create_blend_state = iris_create_blend_state;
8126    ctx->create_depth_stencil_alpha_state = iris_create_zsa_state;
8127    ctx->create_rasterizer_state = iris_create_rasterizer_state;
8128    ctx->create_sampler_state = iris_create_sampler_state;
8129    ctx->create_sampler_view = iris_create_sampler_view;
8130    ctx->create_surface = iris_create_surface;
8131    ctx->create_vertex_elements_state = iris_create_vertex_elements;
8132    ctx->bind_blend_state = iris_bind_blend_state;
8133    ctx->bind_depth_stencil_alpha_state = iris_bind_zsa_state;
8134    ctx->bind_sampler_states = iris_bind_sampler_states;
8135    ctx->bind_rasterizer_state = iris_bind_rasterizer_state;
8136    ctx->bind_vertex_elements_state = iris_bind_vertex_elements_state;
8137    ctx->delete_blend_state = iris_delete_state;
8138    ctx->delete_depth_stencil_alpha_state = iris_delete_state;
8139    ctx->delete_rasterizer_state = iris_delete_state;
8140    ctx->delete_sampler_state = iris_delete_state;
8141    ctx->delete_vertex_elements_state = iris_delete_state;
8142    ctx->set_blend_color = iris_set_blend_color;
8143    ctx->set_clip_state = iris_set_clip_state;
8144    ctx->set_constant_buffer = iris_set_constant_buffer;
8145    ctx->set_shader_buffers = iris_set_shader_buffers;
8146    ctx->set_shader_images = iris_set_shader_images;
8147    ctx->set_sampler_views = iris_set_sampler_views;
8148    ctx->set_compute_resources = iris_set_compute_resources;
8149    ctx->set_global_binding = iris_set_global_binding;
8150    ctx->set_tess_state = iris_set_tess_state;
8151    ctx->set_patch_vertices = iris_set_patch_vertices;
8152    ctx->set_framebuffer_state = iris_set_framebuffer_state;
8153    ctx->set_polygon_stipple = iris_set_polygon_stipple;
8154    ctx->set_sample_mask = iris_set_sample_mask;
8155    ctx->set_scissor_states = iris_set_scissor_states;
8156    ctx->set_stencil_ref = iris_set_stencil_ref;
8157    ctx->set_vertex_buffers = iris_set_vertex_buffers;
8158    ctx->set_viewport_states = iris_set_viewport_states;
8159    ctx->sampler_view_destroy = iris_sampler_view_destroy;
8160    ctx->surface_destroy = iris_surface_destroy;
8161    ctx->draw_vbo = iris_draw_vbo;
8162    ctx->launch_grid = iris_launch_grid;
8163    ctx->create_stream_output_target = iris_create_stream_output_target;
8164    ctx->stream_output_target_destroy = iris_stream_output_target_destroy;
8165    ctx->set_stream_output_targets = iris_set_stream_output_targets;
8166    ctx->set_frontend_noop = iris_set_frontend_noop;
8167 
8168    ice->state.dirty = ~0ull;
8169    ice->state.stage_dirty = ~0ull;
8170 
8171    ice->state.statistics_counters_enabled = true;
8172 
8173    ice->state.sample_mask = 0xffff;
8174    ice->state.num_viewports = 1;
8175    ice->state.prim_mode = PIPE_PRIM_MAX;
8176    ice->state.genx = calloc(1, sizeof(struct iris_genx_state));
8177    ice->draw.derived_params.drawid = -1;
8178 
8179    /* Make a 1x1x1 null surface for unbound textures */
8180    void *null_surf_map =
8181       upload_state(ice->state.surface_uploader, &ice->state.unbound_tex,
8182                    4 * GENX(RENDER_SURFACE_STATE_length), 64);
8183    isl_null_fill_state(&screen->isl_dev, null_surf_map,
8184                        .size = isl_extent3d(1, 1, 1));
8185    ice->state.unbound_tex.offset +=
8186       iris_bo_offset_from_base_address(iris_resource_bo(ice->state.unbound_tex.res));
8187 
8188    /* Default all scissor rectangles to be empty regions. */
8189    for (int i = 0; i < IRIS_MAX_VIEWPORTS; i++) {
8190       ice->state.scissors[i] = (struct pipe_scissor_state) {
8191          .minx = 1, .maxx = 0, .miny = 1, .maxy = 0,
8192       };
8193    }
8194 }
8195