1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included
12  * in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 /**
24  * @file iris_state.c
25  *
26  * ============================= GENXML CODE =============================
27  *              [This file is compiled once per generation.]
28  * =======================================================================
29  *
30  * This is the main state upload code.
31  *
32  * Gallium uses Constant State Objects, or CSOs, for most state.  Large,
33  * complex, or highly reusable state can be created once, and bound and
34  * rebound multiple times.  This is modeled with the pipe->create_*_state()
35  * and pipe->bind_*_state() hooks.  Highly dynamic or inexpensive state is
36  * streamed out on the fly, via pipe->set_*_state() hooks.
37  *
38  * OpenGL involves frequently mutating context state, which is mirrored in
39  * core Mesa by highly mutable data structures.  However, most applications
40  * typically draw the same things over and over - from frame to frame, most
41  * of the same objects are still visible and need to be redrawn.  So, rather
42  * than inventing new state all the time, applications usually mutate to swap
43  * between known states that we've seen before.
44  *
45  * Gallium isolates us from this mutation by tracking API state, and
46  * distilling it into a set of Constant State Objects, or CSOs.  Large,
47  * complex, or typically reusable state can be created once, then reused
48  * multiple times.  Drivers can create and store their own associated data.
49  * This create/bind model corresponds to the pipe->create_*_state() and
50  * pipe->bind_*_state() driver hooks.
51  *
52  * Some state is cheap to create, or expected to be highly dynamic.  Rather
53  * than creating and caching piles of CSOs for these, Gallium simply streams
54  * them out, via the pipe->set_*_state() driver hooks.
55  *
56  * To reduce draw time overhead, we try to compute as much state at create
57  * time as possible.  Wherever possible, we translate the Gallium pipe state
58  * to 3DSTATE commands, and store those commands in the CSO.  At draw time,
59  * we can simply memcpy them into a batch buffer.
60  *
61  * No hardware matches the abstraction perfectly, so some commands require
62  * information from multiple CSOs.  In this case, we can store two copies
63  * of the packet (one in each CSO), and simply | together their DWords at
64  * draw time.  Sometimes the second set is trivial (one or two fields), so
65  * we simply pack it at draw time.
66  *
67  * There are two main components in the file below.  First, the CSO hooks
68  * create/bind/track state.  The second are the draw-time upload functions,
69  * iris_upload_render_state() and iris_upload_compute_state(), which read
70  * the context state and emit the commands into the actual batch.
71  */
72 
73 #include <stdio.h>
74 #include <errno.h>
75 
76 #if HAVE_VALGRIND
77 #include <valgrind.h>
78 #include <memcheck.h>
79 #define VG(x) x
80 #ifdef DEBUG
81 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
82 #endif
83 #else
84 #define VG(x)
85 #endif
86 
87 #include "pipe/p_defines.h"
88 #include "pipe/p_state.h"
89 #include "pipe/p_context.h"
90 #include "pipe/p_screen.h"
91 #include "util/u_dual_blend.h"
92 #include "util/u_inlines.h"
93 #include "util/format/u_format.h"
94 #include "util/u_framebuffer.h"
95 #include "util/u_transfer.h"
96 #include "util/u_upload_mgr.h"
97 #include "util/u_viewport.h"
98 #include "util/u_memory.h"
99 #include "util/u_trace_gallium.h"
100 #include "drm-uapi/i915_drm.h"
101 #include "nir.h"
102 #include "intel/compiler/brw_compiler.h"
103 #include "intel/common/intel_aux_map.h"
104 #include "intel/common/intel_l3_config.h"
105 #include "intel/common/intel_sample_positions.h"
106 #include "intel/ds/intel_tracepoints.h"
107 #include "iris_batch.h"
108 #include "iris_context.h"
109 #include "iris_defines.h"
110 #include "iris_pipe.h"
111 #include "iris_resource.h"
112 #include "iris_utrace.h"
113 
114 #include "iris_genx_macros.h"
115 #include "intel/common/intel_guardband.h"
116 #include "intel/common/intel_pixel_hash.h"
117 
118 /**
119  * Statically assert that PIPE_* enums match the hardware packets.
120  * (As long as they match, we don't need to translate them.)
121  */
pipe_asserts()122 UNUSED static void pipe_asserts()
123 {
124 #define PIPE_ASSERT(x) STATIC_ASSERT((int)x)
125 
126    /* pipe_logicop happens to match the hardware. */
127    PIPE_ASSERT(PIPE_LOGICOP_CLEAR == LOGICOP_CLEAR);
128    PIPE_ASSERT(PIPE_LOGICOP_NOR == LOGICOP_NOR);
129    PIPE_ASSERT(PIPE_LOGICOP_AND_INVERTED == LOGICOP_AND_INVERTED);
130    PIPE_ASSERT(PIPE_LOGICOP_COPY_INVERTED == LOGICOP_COPY_INVERTED);
131    PIPE_ASSERT(PIPE_LOGICOP_AND_REVERSE == LOGICOP_AND_REVERSE);
132    PIPE_ASSERT(PIPE_LOGICOP_INVERT == LOGICOP_INVERT);
133    PIPE_ASSERT(PIPE_LOGICOP_XOR == LOGICOP_XOR);
134    PIPE_ASSERT(PIPE_LOGICOP_NAND == LOGICOP_NAND);
135    PIPE_ASSERT(PIPE_LOGICOP_AND == LOGICOP_AND);
136    PIPE_ASSERT(PIPE_LOGICOP_EQUIV == LOGICOP_EQUIV);
137    PIPE_ASSERT(PIPE_LOGICOP_NOOP == LOGICOP_NOOP);
138    PIPE_ASSERT(PIPE_LOGICOP_OR_INVERTED == LOGICOP_OR_INVERTED);
139    PIPE_ASSERT(PIPE_LOGICOP_COPY == LOGICOP_COPY);
140    PIPE_ASSERT(PIPE_LOGICOP_OR_REVERSE == LOGICOP_OR_REVERSE);
141    PIPE_ASSERT(PIPE_LOGICOP_OR == LOGICOP_OR);
142    PIPE_ASSERT(PIPE_LOGICOP_SET == LOGICOP_SET);
143 
144    /* pipe_blend_func happens to match the hardware. */
145    PIPE_ASSERT(PIPE_BLENDFACTOR_ONE == BLENDFACTOR_ONE);
146    PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_COLOR == BLENDFACTOR_SRC_COLOR);
147    PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA == BLENDFACTOR_SRC_ALPHA);
148    PIPE_ASSERT(PIPE_BLENDFACTOR_DST_ALPHA == BLENDFACTOR_DST_ALPHA);
149    PIPE_ASSERT(PIPE_BLENDFACTOR_DST_COLOR == BLENDFACTOR_DST_COLOR);
150    PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE == BLENDFACTOR_SRC_ALPHA_SATURATE);
151    PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_COLOR == BLENDFACTOR_CONST_COLOR);
152    PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_ALPHA == BLENDFACTOR_CONST_ALPHA);
153    PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_COLOR == BLENDFACTOR_SRC1_COLOR);
154    PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_ALPHA == BLENDFACTOR_SRC1_ALPHA);
155    PIPE_ASSERT(PIPE_BLENDFACTOR_ZERO == BLENDFACTOR_ZERO);
156    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_COLOR == BLENDFACTOR_INV_SRC_COLOR);
157    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_ALPHA == BLENDFACTOR_INV_SRC_ALPHA);
158    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_ALPHA == BLENDFACTOR_INV_DST_ALPHA);
159    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_COLOR == BLENDFACTOR_INV_DST_COLOR);
160    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_COLOR == BLENDFACTOR_INV_CONST_COLOR);
161    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_ALPHA == BLENDFACTOR_INV_CONST_ALPHA);
162    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_COLOR == BLENDFACTOR_INV_SRC1_COLOR);
163    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_ALPHA == BLENDFACTOR_INV_SRC1_ALPHA);
164 
165    /* pipe_blend_func happens to match the hardware. */
166    PIPE_ASSERT(PIPE_BLEND_ADD == BLENDFUNCTION_ADD);
167    PIPE_ASSERT(PIPE_BLEND_SUBTRACT == BLENDFUNCTION_SUBTRACT);
168    PIPE_ASSERT(PIPE_BLEND_REVERSE_SUBTRACT == BLENDFUNCTION_REVERSE_SUBTRACT);
169    PIPE_ASSERT(PIPE_BLEND_MIN == BLENDFUNCTION_MIN);
170    PIPE_ASSERT(PIPE_BLEND_MAX == BLENDFUNCTION_MAX);
171 
172    /* pipe_stencil_op happens to match the hardware. */
173    PIPE_ASSERT(PIPE_STENCIL_OP_KEEP == STENCILOP_KEEP);
174    PIPE_ASSERT(PIPE_STENCIL_OP_ZERO == STENCILOP_ZERO);
175    PIPE_ASSERT(PIPE_STENCIL_OP_REPLACE == STENCILOP_REPLACE);
176    PIPE_ASSERT(PIPE_STENCIL_OP_INCR == STENCILOP_INCRSAT);
177    PIPE_ASSERT(PIPE_STENCIL_OP_DECR == STENCILOP_DECRSAT);
178    PIPE_ASSERT(PIPE_STENCIL_OP_INCR_WRAP == STENCILOP_INCR);
179    PIPE_ASSERT(PIPE_STENCIL_OP_DECR_WRAP == STENCILOP_DECR);
180    PIPE_ASSERT(PIPE_STENCIL_OP_INVERT == STENCILOP_INVERT);
181 
182    /* pipe_sprite_coord_mode happens to match 3DSTATE_SBE */
183    PIPE_ASSERT(PIPE_SPRITE_COORD_UPPER_LEFT == UPPERLEFT);
184    PIPE_ASSERT(PIPE_SPRITE_COORD_LOWER_LEFT == LOWERLEFT);
185 #undef PIPE_ASSERT
186 }
187 
188 static unsigned
translate_prim_type(enum pipe_prim_type prim,uint8_t verts_per_patch)189 translate_prim_type(enum pipe_prim_type prim, uint8_t verts_per_patch)
190 {
191    static const unsigned map[] = {
192       [PIPE_PRIM_POINTS]                   = _3DPRIM_POINTLIST,
193       [PIPE_PRIM_LINES]                    = _3DPRIM_LINELIST,
194       [PIPE_PRIM_LINE_LOOP]                = _3DPRIM_LINELOOP,
195       [PIPE_PRIM_LINE_STRIP]               = _3DPRIM_LINESTRIP,
196       [PIPE_PRIM_TRIANGLES]                = _3DPRIM_TRILIST,
197       [PIPE_PRIM_TRIANGLE_STRIP]           = _3DPRIM_TRISTRIP,
198       [PIPE_PRIM_TRIANGLE_FAN]             = _3DPRIM_TRIFAN,
199       [PIPE_PRIM_QUADS]                    = _3DPRIM_QUADLIST,
200       [PIPE_PRIM_QUAD_STRIP]               = _3DPRIM_QUADSTRIP,
201       [PIPE_PRIM_POLYGON]                  = _3DPRIM_POLYGON,
202       [PIPE_PRIM_LINES_ADJACENCY]          = _3DPRIM_LINELIST_ADJ,
203       [PIPE_PRIM_LINE_STRIP_ADJACENCY]     = _3DPRIM_LINESTRIP_ADJ,
204       [PIPE_PRIM_TRIANGLES_ADJACENCY]      = _3DPRIM_TRILIST_ADJ,
205       [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
206       [PIPE_PRIM_PATCHES]                  = _3DPRIM_PATCHLIST_1 - 1,
207    };
208 
209    return map[prim] + (prim == PIPE_PRIM_PATCHES ? verts_per_patch : 0);
210 }
211 
212 static unsigned
translate_compare_func(enum pipe_compare_func pipe_func)213 translate_compare_func(enum pipe_compare_func pipe_func)
214 {
215    static const unsigned map[] = {
216       [PIPE_FUNC_NEVER]    = COMPAREFUNCTION_NEVER,
217       [PIPE_FUNC_LESS]     = COMPAREFUNCTION_LESS,
218       [PIPE_FUNC_EQUAL]    = COMPAREFUNCTION_EQUAL,
219       [PIPE_FUNC_LEQUAL]   = COMPAREFUNCTION_LEQUAL,
220       [PIPE_FUNC_GREATER]  = COMPAREFUNCTION_GREATER,
221       [PIPE_FUNC_NOTEQUAL] = COMPAREFUNCTION_NOTEQUAL,
222       [PIPE_FUNC_GEQUAL]   = COMPAREFUNCTION_GEQUAL,
223       [PIPE_FUNC_ALWAYS]   = COMPAREFUNCTION_ALWAYS,
224    };
225    return map[pipe_func];
226 }
227 
228 static unsigned
translate_shadow_func(enum pipe_compare_func pipe_func)229 translate_shadow_func(enum pipe_compare_func pipe_func)
230 {
231    /* Gallium specifies the result of shadow comparisons as:
232     *
233     *    1 if ref <op> texel,
234     *    0 otherwise.
235     *
236     * The hardware does:
237     *
238     *    0 if texel <op> ref,
239     *    1 otherwise.
240     *
241     * So we need to flip the operator and also negate.
242     */
243    static const unsigned map[] = {
244       [PIPE_FUNC_NEVER]    = PREFILTEROP_ALWAYS,
245       [PIPE_FUNC_LESS]     = PREFILTEROP_LEQUAL,
246       [PIPE_FUNC_EQUAL]    = PREFILTEROP_NOTEQUAL,
247       [PIPE_FUNC_LEQUAL]   = PREFILTEROP_LESS,
248       [PIPE_FUNC_GREATER]  = PREFILTEROP_GEQUAL,
249       [PIPE_FUNC_NOTEQUAL] = PREFILTEROP_EQUAL,
250       [PIPE_FUNC_GEQUAL]   = PREFILTEROP_GREATER,
251       [PIPE_FUNC_ALWAYS]   = PREFILTEROP_NEVER,
252    };
253    return map[pipe_func];
254 }
255 
256 static unsigned
translate_cull_mode(unsigned pipe_face)257 translate_cull_mode(unsigned pipe_face)
258 {
259    static const unsigned map[4] = {
260       [PIPE_FACE_NONE]           = CULLMODE_NONE,
261       [PIPE_FACE_FRONT]          = CULLMODE_FRONT,
262       [PIPE_FACE_BACK]           = CULLMODE_BACK,
263       [PIPE_FACE_FRONT_AND_BACK] = CULLMODE_BOTH,
264    };
265    return map[pipe_face];
266 }
267 
268 static unsigned
translate_fill_mode(unsigned pipe_polymode)269 translate_fill_mode(unsigned pipe_polymode)
270 {
271    static const unsigned map[4] = {
272       [PIPE_POLYGON_MODE_FILL]           = FILL_MODE_SOLID,
273       [PIPE_POLYGON_MODE_LINE]           = FILL_MODE_WIREFRAME,
274       [PIPE_POLYGON_MODE_POINT]          = FILL_MODE_POINT,
275       [PIPE_POLYGON_MODE_FILL_RECTANGLE] = FILL_MODE_SOLID,
276    };
277    return map[pipe_polymode];
278 }
279 
280 static unsigned
translate_mip_filter(enum pipe_tex_mipfilter pipe_mip)281 translate_mip_filter(enum pipe_tex_mipfilter pipe_mip)
282 {
283    static const unsigned map[] = {
284       [PIPE_TEX_MIPFILTER_NEAREST] = MIPFILTER_NEAREST,
285       [PIPE_TEX_MIPFILTER_LINEAR]  = MIPFILTER_LINEAR,
286       [PIPE_TEX_MIPFILTER_NONE]    = MIPFILTER_NONE,
287    };
288    return map[pipe_mip];
289 }
290 
291 static uint32_t
translate_wrap(unsigned pipe_wrap)292 translate_wrap(unsigned pipe_wrap)
293 {
294    static const unsigned map[] = {
295       [PIPE_TEX_WRAP_REPEAT]                 = TCM_WRAP,
296       [PIPE_TEX_WRAP_CLAMP]                  = TCM_HALF_BORDER,
297       [PIPE_TEX_WRAP_CLAMP_TO_EDGE]          = TCM_CLAMP,
298       [PIPE_TEX_WRAP_CLAMP_TO_BORDER]        = TCM_CLAMP_BORDER,
299       [PIPE_TEX_WRAP_MIRROR_REPEAT]          = TCM_MIRROR,
300       [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE]   = TCM_MIRROR_ONCE,
301 
302       /* These are unsupported. */
303       [PIPE_TEX_WRAP_MIRROR_CLAMP]           = -1,
304       [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER] = -1,
305    };
306    return map[pipe_wrap];
307 }
308 
309 /**
310  * Allocate space for some indirect state.
311  *
312  * Return a pointer to the map (to fill it out) and a state ref (for
313  * referring to the state in GPU commands).
314  */
315 static void *
upload_state(struct u_upload_mgr * uploader,struct iris_state_ref * ref,unsigned size,unsigned alignment)316 upload_state(struct u_upload_mgr *uploader,
317              struct iris_state_ref *ref,
318              unsigned size,
319              unsigned alignment)
320 {
321    void *p = NULL;
322    u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
323    return p;
324 }
325 
326 /**
327  * Stream out temporary/short-lived state.
328  *
329  * This allocates space, pins the BO, and includes the BO address in the
330  * returned offset (which works because all state lives in 32-bit memory
331  * zones).
332  */
333 static uint32_t *
stream_state(struct iris_batch * batch,struct u_upload_mgr * uploader,struct pipe_resource ** out_res,unsigned size,unsigned alignment,uint32_t * out_offset)334 stream_state(struct iris_batch *batch,
335              struct u_upload_mgr *uploader,
336              struct pipe_resource **out_res,
337              unsigned size,
338              unsigned alignment,
339              uint32_t *out_offset)
340 {
341    void *ptr = NULL;
342 
343    u_upload_alloc(uploader, 0, size, alignment, out_offset, out_res, &ptr);
344 
345    struct iris_bo *bo = iris_resource_bo(*out_res);
346    iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
347 
348    iris_record_state_size(batch->state_sizes,
349                           bo->address + *out_offset, size);
350 
351    *out_offset += iris_bo_offset_from_base_address(bo);
352 
353    return ptr;
354 }
355 
356 /**
357  * stream_state() + memcpy.
358  */
359 static uint32_t
emit_state(struct iris_batch * batch,struct u_upload_mgr * uploader,struct pipe_resource ** out_res,const void * data,unsigned size,unsigned alignment)360 emit_state(struct iris_batch *batch,
361            struct u_upload_mgr *uploader,
362            struct pipe_resource **out_res,
363            const void *data,
364            unsigned size,
365            unsigned alignment)
366 {
367    unsigned offset = 0;
368    uint32_t *map =
369       stream_state(batch, uploader, out_res, size, alignment, &offset);
370 
371    if (map)
372       memcpy(map, data, size);
373 
374    return offset;
375 }
376 
377 /**
378  * Did field 'x' change between 'old_cso' and 'new_cso'?
379  *
380  * (If so, we may want to set some dirty flags.)
381  */
382 #define cso_changed(x) (!old_cso || (old_cso->x != new_cso->x))
383 #define cso_changed_memcmp(x) \
384    (!old_cso || memcmp(old_cso->x, new_cso->x, sizeof(old_cso->x)) != 0)
385 
386 static void
flush_before_state_base_change(struct iris_batch * batch)387 flush_before_state_base_change(struct iris_batch *batch)
388 {
389    /* Flush before emitting STATE_BASE_ADDRESS.
390     *
391     * This isn't documented anywhere in the PRM.  However, it seems to be
392     * necessary prior to changing the surface state base address.  We've
393     * seen issues in Vulkan where we get GPU hangs when using multi-level
394     * command buffers which clear depth, reset state base address, and then
395     * go render stuff.
396     *
397     * Normally, in GL, we would trust the kernel to do sufficient stalls
398     * and flushes prior to executing our batch.  However, it doesn't seem
399     * as if the kernel's flushing is always sufficient and we don't want to
400     * rely on it.
401     *
402     * We make this an end-of-pipe sync instead of a normal flush because we
403     * do not know the current status of the GPU.  On Haswell at least,
404     * having a fast-clear operation in flight at the same time as a normal
405     * rendering operation can cause hangs.  Since the kernel's flushing is
406     * insufficient, we need to ensure that any rendering operations from
407     * other processes are definitely complete before we try to do our own
408     * rendering.  It's a bit of a big hammer but it appears to work.
409     */
410    iris_emit_end_of_pipe_sync(batch,
411                               "change STATE_BASE_ADDRESS (flushes)",
412                               PIPE_CONTROL_RENDER_TARGET_FLUSH |
413                               PIPE_CONTROL_DEPTH_CACHE_FLUSH |
414                               PIPE_CONTROL_DATA_CACHE_FLUSH);
415 }
416 
417 static void
flush_after_state_base_change(struct iris_batch * batch)418 flush_after_state_base_change(struct iris_batch *batch)
419 {
420    /* After re-setting the surface state base address, we have to do some
421     * cache flusing so that the sampler engine will pick up the new
422     * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
423     * Shared Function > 3D Sampler > State > State Caching (page 96):
424     *
425     *    Coherency with system memory in the state cache, like the texture
426     *    cache is handled partially by software. It is expected that the
427     *    command stream or shader will issue Cache Flush operation or
428     *    Cache_Flush sampler message to ensure that the L1 cache remains
429     *    coherent with system memory.
430     *
431     *    [...]
432     *
433     *    Whenever the value of the Dynamic_State_Base_Addr,
434     *    Surface_State_Base_Addr are altered, the L1 state cache must be
435     *    invalidated to ensure the new surface or sampler state is fetched
436     *    from system memory.
437     *
438     * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
439     * which, according the PIPE_CONTROL instruction documentation in the
440     * Broadwell PRM:
441     *
442     *    Setting this bit is independent of any other bit in this packet.
443     *    This bit controls the invalidation of the L1 and L2 state caches
444     *    at the top of the pipe i.e. at the parsing time.
445     *
446     * Unfortunately, experimentation seems to indicate that state cache
447     * invalidation through a PIPE_CONTROL does nothing whatsoever in
448     * regards to surface state and binding tables.  In stead, it seems that
449     * invalidating the texture cache is what is actually needed.
450     *
451     * XXX:  As far as we have been able to determine through
452     * experimentation, shows that flush the texture cache appears to be
453     * sufficient.  The theory here is that all of the sampling/rendering
454     * units cache the binding table in the texture cache.  However, we have
455     * yet to be able to actually confirm this.
456     *
457     * Wa_14013910100:
458     *
459     *  "DG2 128/256/512-A/B: S/W must program STATE_BASE_ADDRESS command twice
460     *   or program pipe control with Instruction cache invalidate post
461     *   STATE_BASE_ADDRESS command"
462     */
463    iris_emit_end_of_pipe_sync(batch,
464                               "change STATE_BASE_ADDRESS (invalidates)",
465                               PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
466                               PIPE_CONTROL_CONST_CACHE_INVALIDATE |
467                               PIPE_CONTROL_STATE_CACHE_INVALIDATE |
468                               (GFX_VERx10 != 125 ? 0 :
469                                PIPE_CONTROL_INSTRUCTION_INVALIDATE));
470 }
471 
472 static void
iris_load_register_reg32(struct iris_batch * batch,uint32_t dst,uint32_t src)473 iris_load_register_reg32(struct iris_batch *batch, uint32_t dst,
474                          uint32_t src)
475 {
476    struct mi_builder b;
477    mi_builder_init(&b, &batch->screen->devinfo, batch);
478    mi_store(&b, mi_reg32(dst), mi_reg32(src));
479 }
480 
481 static void
iris_load_register_reg64(struct iris_batch * batch,uint32_t dst,uint32_t src)482 iris_load_register_reg64(struct iris_batch *batch, uint32_t dst,
483                          uint32_t src)
484 {
485    struct mi_builder b;
486    mi_builder_init(&b, &batch->screen->devinfo, batch);
487    mi_store(&b, mi_reg64(dst), mi_reg64(src));
488 }
489 
490 static void
iris_load_register_imm32(struct iris_batch * batch,uint32_t reg,uint32_t val)491 iris_load_register_imm32(struct iris_batch *batch, uint32_t reg,
492                          uint32_t val)
493 {
494    struct mi_builder b;
495    mi_builder_init(&b, &batch->screen->devinfo, batch);
496    mi_store(&b, mi_reg32(reg), mi_imm(val));
497 }
498 
499 static void
iris_load_register_imm64(struct iris_batch * batch,uint32_t reg,uint64_t val)500 iris_load_register_imm64(struct iris_batch *batch, uint32_t reg,
501                          uint64_t val)
502 {
503    struct mi_builder b;
504    mi_builder_init(&b, &batch->screen->devinfo, batch);
505    mi_store(&b, mi_reg64(reg), mi_imm(val));
506 }
507 
508 /**
509  * Emit MI_LOAD_REGISTER_MEM to load a 32-bit MMIO register from a buffer.
510  */
511 static void
iris_load_register_mem32(struct iris_batch * batch,uint32_t reg,struct iris_bo * bo,uint32_t offset)512 iris_load_register_mem32(struct iris_batch *batch, uint32_t reg,
513                          struct iris_bo *bo, uint32_t offset)
514 {
515    iris_batch_sync_region_start(batch);
516    struct mi_builder b;
517    mi_builder_init(&b, &batch->screen->devinfo, batch);
518    struct mi_value src = mi_mem32(ro_bo(bo, offset));
519    mi_store(&b, mi_reg32(reg), src);
520    iris_batch_sync_region_end(batch);
521 }
522 
523 /**
524  * Load a 64-bit value from a buffer into a MMIO register via
525  * two MI_LOAD_REGISTER_MEM commands.
526  */
527 static void
iris_load_register_mem64(struct iris_batch * batch,uint32_t reg,struct iris_bo * bo,uint32_t offset)528 iris_load_register_mem64(struct iris_batch *batch, uint32_t reg,
529                          struct iris_bo *bo, uint32_t offset)
530 {
531    iris_batch_sync_region_start(batch);
532    struct mi_builder b;
533    mi_builder_init(&b, &batch->screen->devinfo, batch);
534    struct mi_value src = mi_mem64(ro_bo(bo, offset));
535    mi_store(&b, mi_reg64(reg), src);
536    iris_batch_sync_region_end(batch);
537 }
538 
539 static void
iris_store_register_mem32(struct iris_batch * batch,uint32_t reg,struct iris_bo * bo,uint32_t offset,bool predicated)540 iris_store_register_mem32(struct iris_batch *batch, uint32_t reg,
541                           struct iris_bo *bo, uint32_t offset,
542                           bool predicated)
543 {
544    iris_batch_sync_region_start(batch);
545    struct mi_builder b;
546    mi_builder_init(&b, &batch->screen->devinfo, batch);
547    struct mi_value dst = mi_mem32(rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE));
548    struct mi_value src = mi_reg32(reg);
549    if (predicated)
550       mi_store_if(&b, dst, src);
551    else
552       mi_store(&b, dst, src);
553    iris_batch_sync_region_end(batch);
554 }
555 
556 static void
iris_store_register_mem64(struct iris_batch * batch,uint32_t reg,struct iris_bo * bo,uint32_t offset,bool predicated)557 iris_store_register_mem64(struct iris_batch *batch, uint32_t reg,
558                           struct iris_bo *bo, uint32_t offset,
559                           bool predicated)
560 {
561    iris_batch_sync_region_start(batch);
562    struct mi_builder b;
563    mi_builder_init(&b, &batch->screen->devinfo, batch);
564    struct mi_value dst = mi_mem64(rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE));
565    struct mi_value src = mi_reg64(reg);
566    if (predicated)
567       mi_store_if(&b, dst, src);
568    else
569       mi_store(&b, dst, src);
570    iris_batch_sync_region_end(batch);
571 }
572 
573 static void
iris_store_data_imm32(struct iris_batch * batch,struct iris_bo * bo,uint32_t offset,uint32_t imm)574 iris_store_data_imm32(struct iris_batch *batch,
575                       struct iris_bo *bo, uint32_t offset,
576                       uint32_t imm)
577 {
578    iris_batch_sync_region_start(batch);
579    struct mi_builder b;
580    mi_builder_init(&b, &batch->screen->devinfo, batch);
581    struct mi_value dst = mi_mem32(rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE));
582    struct mi_value src = mi_imm(imm);
583    mi_store(&b, dst, src);
584    iris_batch_sync_region_end(batch);
585 }
586 
587 static void
iris_store_data_imm64(struct iris_batch * batch,struct iris_bo * bo,uint32_t offset,uint64_t imm)588 iris_store_data_imm64(struct iris_batch *batch,
589                       struct iris_bo *bo, uint32_t offset,
590                       uint64_t imm)
591 {
592    iris_batch_sync_region_start(batch);
593    struct mi_builder b;
594    mi_builder_init(&b, &batch->screen->devinfo, batch);
595    struct mi_value dst = mi_mem64(rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE));
596    struct mi_value src = mi_imm(imm);
597    mi_store(&b, dst, src);
598    iris_batch_sync_region_end(batch);
599 }
600 
601 static void
iris_copy_mem_mem(struct iris_batch * batch,struct iris_bo * dst_bo,uint32_t dst_offset,struct iris_bo * src_bo,uint32_t src_offset,unsigned bytes)602 iris_copy_mem_mem(struct iris_batch *batch,
603                   struct iris_bo *dst_bo, uint32_t dst_offset,
604                   struct iris_bo *src_bo, uint32_t src_offset,
605                   unsigned bytes)
606 {
607    /* MI_COPY_MEM_MEM operates on DWords. */
608    assert(bytes % 4 == 0);
609    assert(dst_offset % 4 == 0);
610    assert(src_offset % 4 == 0);
611    iris_batch_sync_region_start(batch);
612 
613    for (unsigned i = 0; i < bytes; i += 4) {
614       iris_emit_cmd(batch, GENX(MI_COPY_MEM_MEM), cp) {
615          cp.DestinationMemoryAddress = rw_bo(dst_bo, dst_offset + i,
616                                              IRIS_DOMAIN_OTHER_WRITE);
617          cp.SourceMemoryAddress = ro_bo(src_bo, src_offset + i);
618       }
619    }
620 
621    iris_batch_sync_region_end(batch);
622 }
623 
624 static void
emit_pipeline_select(struct iris_batch * batch,uint32_t pipeline)625 emit_pipeline_select(struct iris_batch *batch, uint32_t pipeline)
626 {
627 #if GFX_VER >= 8 && GFX_VER < 10
628    /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
629     *
630     *   Software must clear the COLOR_CALC_STATE Valid field in
631     *   3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
632     *   with Pipeline Select set to GPGPU.
633     *
634     * The internal hardware docs recommend the same workaround for Gfx9
635     * hardware too.
636     */
637    if (pipeline == GPGPU)
638       iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
639 #endif
640 
641 
642    /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
643     * PIPELINE_SELECT [DevBWR+]":
644     *
645     *    "Project: DEVSNB+
646     *
647     *     Software must ensure all the write caches are flushed through a
648     *     stalling PIPE_CONTROL command followed by another PIPE_CONTROL
649     *     command to invalidate read only caches prior to programming
650     *     MI_PIPELINE_SELECT command to change the Pipeline Select Mode."
651     */
652     iris_emit_pipe_control_flush(batch,
653                                  "workaround: PIPELINE_SELECT flushes (1/2)",
654                                  PIPE_CONTROL_RENDER_TARGET_FLUSH |
655                                  PIPE_CONTROL_DEPTH_CACHE_FLUSH |
656                                  PIPE_CONTROL_DATA_CACHE_FLUSH |
657                                  PIPE_CONTROL_CS_STALL);
658 
659     iris_emit_pipe_control_flush(batch,
660                                  "workaround: PIPELINE_SELECT flushes (2/2)",
661                                  PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
662                                  PIPE_CONTROL_CONST_CACHE_INVALIDATE |
663                                  PIPE_CONTROL_STATE_CACHE_INVALIDATE |
664                                  PIPE_CONTROL_INSTRUCTION_INVALIDATE);
665 
666    iris_emit_cmd(batch, GENX(PIPELINE_SELECT), sel) {
667 #if GFX_VER >= 9
668       sel.MaskBits = GFX_VER >= 12 ? 0x13 : 3;
669       sel.MediaSamplerDOPClockGateEnable = GFX_VER >= 12;
670 #endif
671       sel.PipelineSelection = pipeline;
672    }
673 }
674 
675 UNUSED static void
init_glk_barrier_mode(struct iris_batch * batch,uint32_t value)676 init_glk_barrier_mode(struct iris_batch *batch, uint32_t value)
677 {
678 #if GFX_VER == 9
679    /* Project: DevGLK
680     *
681     *    "This chicken bit works around a hardware issue with barrier
682     *     logic encountered when switching between GPGPU and 3D pipelines.
683     *     To workaround the issue, this mode bit should be set after a
684     *     pipeline is selected."
685     */
686    iris_emit_reg(batch, GENX(SLICE_COMMON_ECO_CHICKEN1), reg) {
687       reg.GLKBarrierMode = value;
688       reg.GLKBarrierModeMask = 1;
689    }
690 #endif
691 }
692 
693 static void
init_state_base_address(struct iris_batch * batch)694 init_state_base_address(struct iris_batch *batch)
695 {
696    struct isl_device *isl_dev = &batch->screen->isl_dev;
697    uint32_t mocs = isl_mocs(isl_dev, 0, false);
698    flush_before_state_base_change(batch);
699 
700    /* We program most base addresses once at context initialization time.
701     * Each base address points at a 4GB memory zone, and never needs to
702     * change.  See iris_bufmgr.h for a description of the memory zones.
703     *
704     * The one exception is Surface State Base Address, which needs to be
705     * updated occasionally.  See iris_binder.c for the details there.
706     */
707    iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
708       sba.GeneralStateMOCS            = mocs;
709       sba.StatelessDataPortAccessMOCS = mocs;
710       sba.DynamicStateMOCS            = mocs;
711       sba.IndirectObjectMOCS          = mocs;
712       sba.InstructionMOCS             = mocs;
713       sba.SurfaceStateMOCS            = mocs;
714 
715       sba.GeneralStateBaseAddressModifyEnable   = true;
716       sba.DynamicStateBaseAddressModifyEnable   = true;
717       sba.IndirectObjectBaseAddressModifyEnable = true;
718       sba.InstructionBaseAddressModifyEnable    = true;
719       sba.GeneralStateBufferSizeModifyEnable    = true;
720       sba.DynamicStateBufferSizeModifyEnable    = true;
721       sba.SurfaceStateBaseAddressModifyEnable   = true;
722 #if GFX_VER >= 9
723       sba.BindlessSurfaceStateBaseAddress = ro_bo(NULL, IRIS_MEMZONE_BINDLESS_START);
724       sba.BindlessSurfaceStateSize = (IRIS_BINDLESS_SIZE >> 12) - 1;
725       sba.BindlessSurfaceStateBaseAddressModifyEnable = true;
726       sba.BindlessSurfaceStateMOCS    = mocs;
727 #endif
728 #if GFX_VER >= 11
729       sba.BindlessSamplerStateMOCS    = mocs;
730 #endif
731       sba.IndirectObjectBufferSizeModifyEnable  = true;
732       sba.InstructionBuffersizeModifyEnable     = true;
733 
734       sba.InstructionBaseAddress  = ro_bo(NULL, IRIS_MEMZONE_SHADER_START);
735       sba.DynamicStateBaseAddress = ro_bo(NULL, IRIS_MEMZONE_DYNAMIC_START);
736       sba.SurfaceStateBaseAddress = ro_bo(NULL, IRIS_MEMZONE_BINDER_START);
737 
738       sba.GeneralStateBufferSize   = 0xfffff;
739       sba.IndirectObjectBufferSize = 0xfffff;
740       sba.InstructionBufferSize    = 0xfffff;
741       sba.DynamicStateBufferSize   = 0xfffff;
742    }
743 
744    flush_after_state_base_change(batch);
745 }
746 
747 static void
iris_emit_l3_config(struct iris_batch * batch,const struct intel_l3_config * cfg)748 iris_emit_l3_config(struct iris_batch *batch,
749                     const struct intel_l3_config *cfg)
750 {
751    assert(cfg || GFX_VER >= 12);
752 
753 #if GFX_VER >= 12
754 #define L3_ALLOCATION_REG GENX(L3ALLOC)
755 #define L3_ALLOCATION_REG_num GENX(L3ALLOC_num)
756 #else
757 #define L3_ALLOCATION_REG GENX(L3CNTLREG)
758 #define L3_ALLOCATION_REG_num GENX(L3CNTLREG_num)
759 #endif
760 
761    iris_emit_reg(batch, L3_ALLOCATION_REG, reg) {
762 #if GFX_VER < 11
763       reg.SLMEnable = cfg->n[INTEL_L3P_SLM] > 0;
764 #endif
765 #if GFX_VER == 11
766       /* Wa_1406697149: Bit 9 "Error Detection Behavior Control" must be set
767        * in L3CNTLREG register. The default setting of the bit is not the
768        * desirable behavior.
769        */
770       reg.ErrorDetectionBehaviorControl = true;
771       reg.UseFullWays = true;
772 #endif
773       if (GFX_VER < 12 || cfg) {
774          reg.URBAllocation = cfg->n[INTEL_L3P_URB];
775          reg.ROAllocation = cfg->n[INTEL_L3P_RO];
776          reg.DCAllocation = cfg->n[INTEL_L3P_DC];
777          reg.AllAllocation = cfg->n[INTEL_L3P_ALL];
778       } else {
779 #if GFX_VER >= 12
780          reg.L3FullWayAllocationEnable = true;
781 #endif
782       }
783    }
784 }
785 
786 #if GFX_VER == 9
787 static void
iris_enable_obj_preemption(struct iris_batch * batch,bool enable)788 iris_enable_obj_preemption(struct iris_batch *batch, bool enable)
789 {
790    /* A fixed function pipe flush is required before modifying this field */
791    iris_emit_end_of_pipe_sync(batch, enable ? "enable preemption"
792                                             : "disable preemption",
793                               PIPE_CONTROL_RENDER_TARGET_FLUSH);
794 
795    /* enable object level preemption */
796    iris_emit_reg(batch, GENX(CS_CHICKEN1), reg) {
797       reg.ReplayMode = enable;
798       reg.ReplayModeMask = true;
799    }
800 }
801 #endif
802 
803 static void
upload_pixel_hashing_tables(struct iris_batch * batch)804 upload_pixel_hashing_tables(struct iris_batch *batch)
805 {
806    UNUSED const struct intel_device_info *devinfo = &batch->screen->devinfo;
807    UNUSED struct iris_context *ice = batch->ice;
808    assert(&ice->batches[IRIS_BATCH_RENDER] == batch);
809 
810 #if GFX_VER == 11
811    /* Gfx11 hardware has two pixel pipes at most. */
812    for (unsigned i = 2; i < ARRAY_SIZE(devinfo->ppipe_subslices); i++)
813       assert(devinfo->ppipe_subslices[i] == 0);
814 
815    if (devinfo->ppipe_subslices[0] == devinfo->ppipe_subslices[1])
816       return;
817 
818    unsigned size = GENX(SLICE_HASH_TABLE_length) * 4;
819    uint32_t hash_address;
820    struct pipe_resource *tmp = NULL;
821    uint32_t *map =
822       stream_state(batch, ice->state.dynamic_uploader, &tmp,
823                    size, 64, &hash_address);
824    pipe_resource_reference(&tmp, NULL);
825 
826    const bool flip = devinfo->ppipe_subslices[0] < devinfo->ppipe_subslices[1];
827    struct GENX(SLICE_HASH_TABLE) table;
828    intel_compute_pixel_hash_table_3way(16, 16, 3, 3, flip, table.Entry[0]);
829 
830    GENX(SLICE_HASH_TABLE_pack)(NULL, map, &table);
831 
832    iris_emit_cmd(batch, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS), ptr) {
833       ptr.SliceHashStatePointerValid = true;
834       ptr.SliceHashTableStatePointer = hash_address;
835    }
836 
837    iris_emit_cmd(batch, GENX(3DSTATE_3D_MODE), mode) {
838       mode.SliceHashingTableEnable = true;
839    }
840 
841 #elif GFX_VERx10 == 120
842    /* For each n calculate ppipes_of[n], equal to the number of pixel pipes
843     * present with n active dual subslices.
844     */
845    unsigned ppipes_of[3] = {};
846 
847    for (unsigned n = 0; n < ARRAY_SIZE(ppipes_of); n++) {
848       for (unsigned p = 0; p < 3; p++)
849          ppipes_of[n] += (devinfo->ppipe_subslices[p] == n);
850    }
851 
852    /* Gfx12 has three pixel pipes. */
853    for (unsigned p = 3; p < ARRAY_SIZE(devinfo->ppipe_subslices); p++)
854       assert(devinfo->ppipe_subslices[p] == 0);
855 
856    if (ppipes_of[2] == 3 || ppipes_of[0] == 2) {
857       /* All three pixel pipes have the maximum number of active dual
858        * subslices, or there is only one active pixel pipe: Nothing to do.
859        */
860       return;
861    }
862 
863    iris_emit_cmd(batch, GENX(3DSTATE_SUBSLICE_HASH_TABLE), p) {
864       p.SliceHashControl[0] = TABLE_0;
865 
866       if (ppipes_of[2] == 2 && ppipes_of[0] == 1)
867          intel_compute_pixel_hash_table_3way(8, 16, 2, 2, 0, p.TwoWayTableEntry[0]);
868       else if (ppipes_of[2] == 1 && ppipes_of[1] == 1 && ppipes_of[0] == 1)
869          intel_compute_pixel_hash_table_3way(8, 16, 3, 3, 0, p.TwoWayTableEntry[0]);
870 
871       if (ppipes_of[2] == 2 && ppipes_of[1] == 1)
872          intel_compute_pixel_hash_table_3way(8, 16, 5, 4, 0, p.ThreeWayTableEntry[0]);
873       else if (ppipes_of[2] == 2 && ppipes_of[0] == 1)
874          intel_compute_pixel_hash_table_3way(8, 16, 2, 2, 0, p.ThreeWayTableEntry[0]);
875       else if (ppipes_of[2] == 1 && ppipes_of[1] == 1 && ppipes_of[0] == 1)
876          intel_compute_pixel_hash_table_3way(8, 16, 3, 3, 0, p.ThreeWayTableEntry[0]);
877       else
878          unreachable("Illegal fusing.");
879    }
880 
881    iris_emit_cmd(batch, GENX(3DSTATE_3D_MODE), p) {
882       p.SubsliceHashingTableEnable = true;
883       p.SubsliceHashingTableEnableMask = true;
884    }
885 
886 #elif GFX_VERx10 == 125
887    struct pipe_screen *pscreen = &batch->screen->base;
888    const unsigned size = GENX(SLICE_HASH_TABLE_length) * 4;
889    const struct pipe_resource tmpl = {
890      .target = PIPE_BUFFER,
891      .format = PIPE_FORMAT_R8_UNORM,
892      .bind = PIPE_BIND_CUSTOM,
893      .usage = PIPE_USAGE_IMMUTABLE,
894      .flags = IRIS_RESOURCE_FLAG_DYNAMIC_MEMZONE,
895      .width0 = size,
896      .height0 = 1,
897      .depth0 = 1,
898      .array_size = 1
899    };
900 
901    pipe_resource_reference(&ice->state.pixel_hashing_tables, NULL);
902    ice->state.pixel_hashing_tables = pscreen->resource_create(pscreen, &tmpl);
903 
904    struct iris_resource *res = (struct iris_resource *)ice->state.pixel_hashing_tables;
905    struct pipe_transfer *transfer = NULL;
906    uint32_t *map = pipe_buffer_map_range(&ice->ctx, ice->state.pixel_hashing_tables,
907                                          0, size, PIPE_MAP_WRITE,
908                                          &transfer);
909 
910    uint32_t ppipe_mask = 0;
911    for (unsigned p = 0; p < ARRAY_SIZE(devinfo->ppipe_subslices); p++) {
912       if (devinfo->ppipe_subslices[p])
913          ppipe_mask |= (1u << p);
914    }
915    assert(ppipe_mask);
916 
917    struct GENX(SLICE_HASH_TABLE) table;
918 
919    /* Note that the hardware expects an array with 7 tables, each
920     * table is intended to specify the pixel pipe hashing behavior for
921     * every possible slice count between 2 and 8, however that doesn't
922     * actually work, among other reasons due to hardware bugs that
923     * will cause the GPU to erroneously access the table at the wrong
924     * index in some cases, so in practice all 7 tables need to be
925     * initialized to the same value.
926     */
927    for (unsigned i = 0; i < 7; i++)
928      intel_compute_pixel_hash_table_nway(16, 16, ppipe_mask, table.Entry[i][0]);
929 
930    GENX(SLICE_HASH_TABLE_pack)(NULL, map, &table);
931 
932    pipe_buffer_unmap(&ice->ctx, transfer);
933 
934    iris_use_pinned_bo(batch, res->bo, false, IRIS_DOMAIN_NONE);
935    iris_record_state_size(batch->state_sizes, res->bo->address + res->offset, size);
936 
937    iris_emit_cmd(batch, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS), ptr) {
938       ptr.SliceHashStatePointerValid = true;
939       ptr.SliceHashTableStatePointer = iris_bo_offset_from_base_address(res->bo) +
940                                        res->offset;
941    }
942 
943    iris_emit_cmd(batch, GENX(3DSTATE_3D_MODE), mode) {
944       mode.SliceHashingTableEnable = true;
945       mode.SliceHashingTableEnableMask = true;
946       mode.CrossSliceHashingMode = (util_bitcount(ppipe_mask) > 1 ?
947                                     hashing32x32 : NormalMode);
948       mode.CrossSliceHashingModeMask = -1;
949    }
950 #endif
951 }
952 
953 static void
iris_alloc_push_constants(struct iris_batch * batch)954 iris_alloc_push_constants(struct iris_batch *batch)
955 {
956    const struct intel_device_info *devinfo = &batch->screen->devinfo;
957 
958    /* For now, we set a static partitioning of the push constant area,
959     * assuming that all stages could be in use.
960     *
961     * TODO: Try lazily allocating the HS/DS/GS sections as needed, and
962     *       see if that improves performance by offering more space to
963     *       the VS/FS when those aren't in use.  Also, try dynamically
964     *       enabling/disabling it like i965 does.  This would be more
965     *       stalls and may not actually help; we don't know yet.
966     */
967 
968    /* Divide as equally as possible with any remainder given to FRAGMENT. */
969    const unsigned push_constant_kb = devinfo->max_constant_urb_size_kb;
970    const unsigned stage_size = push_constant_kb / 5;
971    const unsigned frag_size = push_constant_kb - 4 * stage_size;
972 
973    for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) {
974       iris_emit_cmd(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
975          alloc._3DCommandSubOpcode = 18 + i;
976          alloc.ConstantBufferOffset = stage_size * i;
977          alloc.ConstantBufferSize = i == MESA_SHADER_FRAGMENT ? frag_size : stage_size;
978       }
979    }
980 
981 #if GFX_VERx10 == 125
982    /* Wa_22011440098
983     *
984     * In 3D mode, after programming push constant alloc command immediately
985     * program push constant command(ZERO length) without any commit between
986     * them.
987     */
988    if (intel_device_info_is_dg2(devinfo)) {
989       iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_ALL), c) {
990          c.MOCS = iris_mocs(NULL, &batch->screen->isl_dev, 0);
991       }
992    }
993 #endif
994 }
995 
996 #if GFX_VER >= 12
997 static void
998 init_aux_map_state(struct iris_batch *batch);
999 #endif
1000 
1001 /**
1002  * Upload initial GPU state for any kind of context.
1003  *
1004  * These need to happen for both render and compute.
1005  */
1006 static void
iris_init_common_context(struct iris_batch * batch)1007 iris_init_common_context(struct iris_batch *batch)
1008 {
1009 #if GFX_VER == 11
1010    iris_emit_reg(batch, GENX(SAMPLER_MODE), reg) {
1011       reg.HeaderlessMessageforPreemptableContexts = 1;
1012       reg.HeaderlessMessageforPreemptableContextsMask = 1;
1013    }
1014 
1015    /* Bit 1 must be set in HALF_SLICE_CHICKEN7. */
1016    iris_emit_reg(batch, GENX(HALF_SLICE_CHICKEN7), reg) {
1017       reg.EnabledTexelOffsetPrecisionFix = 1;
1018       reg.EnabledTexelOffsetPrecisionFixMask = 1;
1019    }
1020 #endif
1021 
1022    /* Select 256B-aligned binding table mode on Icelake through Tigerlake,
1023     * which gives us larger binding table pointers, at the cost of higher
1024     * alignment requirements (bits 18:8 are valid instead of 15:5).  When
1025     * using this mode, we have to shift binding table pointers by 3 bits,
1026     * as they're still stored in the same bit-location in the field.
1027     */
1028 #if GFX_VER >= 11 && GFX_VERx10 < 125
1029    iris_emit_reg(batch, GENX(GT_MODE), reg) {
1030       reg.BindingTableAlignment = BTP_18_8;
1031       reg.BindingTableAlignmentMask = true;
1032    }
1033 #define IRIS_BT_OFFSET_SHIFT 3
1034 #else
1035 #define IRIS_BT_OFFSET_SHIFT 0
1036 #endif
1037 }
1038 
1039 /**
1040  * Upload the initial GPU state for a render context.
1041  *
1042  * This sets some invariant state that needs to be programmed a particular
1043  * way, but we never actually change.
1044  */
1045 static void
iris_init_render_context(struct iris_batch * batch)1046 iris_init_render_context(struct iris_batch *batch)
1047 {
1048    UNUSED const struct intel_device_info *devinfo = &batch->screen->devinfo;
1049 
1050    iris_batch_sync_region_start(batch);
1051 
1052    emit_pipeline_select(batch, _3D);
1053 
1054    iris_emit_l3_config(batch, batch->screen->l3_config_3d);
1055 
1056    init_state_base_address(batch);
1057 
1058    iris_init_common_context(batch);
1059 
1060 #if GFX_VER >= 9
1061    iris_emit_reg(batch, GENX(CS_DEBUG_MODE2), reg) {
1062       reg.CONSTANT_BUFFERAddressOffsetDisable = true;
1063       reg.CONSTANT_BUFFERAddressOffsetDisableMask = true;
1064    }
1065 #else
1066    iris_emit_reg(batch, GENX(INSTPM), reg) {
1067       reg.CONSTANT_BUFFERAddressOffsetDisable = true;
1068       reg.CONSTANT_BUFFERAddressOffsetDisableMask = true;
1069    }
1070 #endif
1071 
1072 #if GFX_VER == 9
1073    iris_emit_reg(batch, GENX(CACHE_MODE_1), reg) {
1074       reg.FloatBlendOptimizationEnable = true;
1075       reg.FloatBlendOptimizationEnableMask = true;
1076       reg.MSCRAWHazardAvoidanceBit = true;
1077       reg.MSCRAWHazardAvoidanceBitMask = true;
1078       reg.PartialResolveDisableInVC = true;
1079       reg.PartialResolveDisableInVCMask = true;
1080    }
1081 
1082    if (devinfo->platform == INTEL_PLATFORM_GLK)
1083       init_glk_barrier_mode(batch, GLK_BARRIER_MODE_3D_HULL);
1084 #endif
1085 
1086 #if GFX_VER == 11
1087    iris_emit_reg(batch, GENX(TCCNTLREG), reg) {
1088       reg.L3DataPartialWriteMergingEnable = true;
1089       reg.ColorZPartialWriteMergingEnable = true;
1090       reg.URBPartialWriteMergingEnable = true;
1091       reg.TCDisable = true;
1092    }
1093 
1094    /* Hardware specification recommends disabling repacking for the
1095     * compatibility with decompression mechanism in display controller.
1096     */
1097    if (devinfo->disable_ccs_repack) {
1098       iris_emit_reg(batch, GENX(CACHE_MODE_0), reg) {
1099          reg.DisableRepackingforCompression = true;
1100          reg.DisableRepackingforCompressionMask = true;
1101       }
1102    }
1103 #endif
1104 
1105    upload_pixel_hashing_tables(batch);
1106 
1107    /* 3DSTATE_DRAWING_RECTANGLE is non-pipelined, so we want to avoid
1108     * changing it dynamically.  We set it to the maximum size here, and
1109     * instead include the render target dimensions in the viewport, so
1110     * viewport extents clipping takes care of pruning stray geometry.
1111     */
1112    iris_emit_cmd(batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) {
1113       rect.ClippedDrawingRectangleXMax = UINT16_MAX;
1114       rect.ClippedDrawingRectangleYMax = UINT16_MAX;
1115    }
1116 
1117    /* Set the initial MSAA sample positions. */
1118    iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_PATTERN), pat) {
1119       INTEL_SAMPLE_POS_1X(pat._1xSample);
1120       INTEL_SAMPLE_POS_2X(pat._2xSample);
1121       INTEL_SAMPLE_POS_4X(pat._4xSample);
1122       INTEL_SAMPLE_POS_8X(pat._8xSample);
1123 #if GFX_VER >= 9
1124       INTEL_SAMPLE_POS_16X(pat._16xSample);
1125 #endif
1126    }
1127 
1128    /* Use the legacy AA line coverage computation. */
1129    iris_emit_cmd(batch, GENX(3DSTATE_AA_LINE_PARAMETERS), foo);
1130 
1131    /* Disable chromakeying (it's for media) */
1132    iris_emit_cmd(batch, GENX(3DSTATE_WM_CHROMAKEY), foo);
1133 
1134    /* We want regular rendering, not special HiZ operations. */
1135    iris_emit_cmd(batch, GENX(3DSTATE_WM_HZ_OP), foo);
1136 
1137    /* No polygon stippling offsets are necessary. */
1138    /* TODO: may need to set an offset for origin-UL framebuffers */
1139    iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_OFFSET), foo);
1140 
1141    iris_alloc_push_constants(batch);
1142 
1143 
1144 #if GFX_VER >= 12
1145    init_aux_map_state(batch);
1146 #endif
1147 
1148    iris_batch_sync_region_end(batch);
1149 }
1150 
1151 static void
iris_init_compute_context(struct iris_batch * batch)1152 iris_init_compute_context(struct iris_batch *batch)
1153 {
1154    UNUSED const struct intel_device_info *devinfo = &batch->screen->devinfo;
1155 
1156    iris_batch_sync_region_start(batch);
1157 
1158    /* Wa_1607854226:
1159     *
1160     *  Start with pipeline in 3D mode to set the STATE_BASE_ADDRESS.
1161     */
1162 #if GFX_VERx10 == 120
1163    emit_pipeline_select(batch, _3D);
1164 #else
1165    emit_pipeline_select(batch, GPGPU);
1166 #endif
1167 
1168    iris_emit_l3_config(batch, batch->screen->l3_config_cs);
1169 
1170    init_state_base_address(batch);
1171 
1172    iris_init_common_context(batch);
1173 
1174 #if GFX_VERx10 == 120
1175    emit_pipeline_select(batch, GPGPU);
1176 #endif
1177 
1178 #if GFX_VER == 9
1179    if (devinfo->platform == INTEL_PLATFORM_GLK)
1180       init_glk_barrier_mode(batch, GLK_BARRIER_MODE_GPGPU);
1181 #endif
1182 
1183 #if GFX_VER >= 12
1184    init_aux_map_state(batch);
1185 #endif
1186 
1187    iris_batch_sync_region_end(batch);
1188 }
1189 
1190 struct iris_vertex_buffer_state {
1191    /** The VERTEX_BUFFER_STATE hardware structure. */
1192    uint32_t state[GENX(VERTEX_BUFFER_STATE_length)];
1193 
1194    /** The resource to source vertex data from. */
1195    struct pipe_resource *resource;
1196 
1197    int offset;
1198 };
1199 
1200 struct iris_depth_buffer_state {
1201    /* Depth/HiZ/Stencil related hardware packets. */
1202    uint32_t packets[GENX(3DSTATE_DEPTH_BUFFER_length) +
1203                     GENX(3DSTATE_STENCIL_BUFFER_length) +
1204                     GENX(3DSTATE_HIER_DEPTH_BUFFER_length) +
1205                     GENX(3DSTATE_CLEAR_PARAMS_length)];
1206 };
1207 
1208 #if GFX_VERx10 == 120
1209    enum iris_depth_reg_mode {
1210       IRIS_DEPTH_REG_MODE_HW_DEFAULT = 0,
1211       IRIS_DEPTH_REG_MODE_D16,
1212       IRIS_DEPTH_REG_MODE_UNKNOWN,
1213    };
1214 #endif
1215 
1216 /**
1217  * Generation-specific context state (ice->state.genx->...).
1218  *
1219  * Most state can go in iris_context directly, but these encode hardware
1220  * packets which vary by generation.
1221  */
1222 struct iris_genx_state {
1223    struct iris_vertex_buffer_state vertex_buffers[33];
1224    uint32_t last_index_buffer[GENX(3DSTATE_INDEX_BUFFER_length)];
1225 
1226    struct iris_depth_buffer_state depth_buffer;
1227 
1228    uint32_t so_buffers[4 * GENX(3DSTATE_SO_BUFFER_length)];
1229 
1230 #if GFX_VER == 8
1231    bool pma_fix_enabled;
1232 #endif
1233 
1234 #if GFX_VER == 9
1235    /* Is object level preemption enabled? */
1236    bool object_preemption;
1237 #endif
1238 
1239 #if GFX_VERx10 == 120
1240    enum iris_depth_reg_mode depth_reg_mode;
1241 #endif
1242 
1243    struct {
1244 #if GFX_VER == 8
1245       struct brw_image_param image_param[PIPE_MAX_SHADER_IMAGES];
1246 #endif
1247    } shaders[MESA_SHADER_STAGES];
1248 };
1249 
1250 /**
1251  * The pipe->set_blend_color() driver hook.
1252  *
1253  * This corresponds to our COLOR_CALC_STATE.
1254  */
1255 static void
iris_set_blend_color(struct pipe_context * ctx,const struct pipe_blend_color * state)1256 iris_set_blend_color(struct pipe_context *ctx,
1257                      const struct pipe_blend_color *state)
1258 {
1259    struct iris_context *ice = (struct iris_context *) ctx;
1260 
1261    /* Our COLOR_CALC_STATE is exactly pipe_blend_color, so just memcpy */
1262    memcpy(&ice->state.blend_color, state, sizeof(struct pipe_blend_color));
1263    ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
1264 }
1265 
1266 /**
1267  * Gallium CSO for blend state (see pipe_blend_state).
1268  */
1269 struct iris_blend_state {
1270    /** Partial 3DSTATE_PS_BLEND */
1271    uint32_t ps_blend[GENX(3DSTATE_PS_BLEND_length)];
1272 
1273    /** Partial BLEND_STATE */
1274    uint32_t blend_state[GENX(BLEND_STATE_length) +
1275                         BRW_MAX_DRAW_BUFFERS * GENX(BLEND_STATE_ENTRY_length)];
1276 
1277    bool alpha_to_coverage; /* for shader key */
1278 
1279    /** Bitfield of whether blending is enabled for RT[i] - for aux resolves */
1280    uint8_t blend_enables;
1281 
1282    /** Bitfield of whether color writes are enabled for RT[i] */
1283    uint8_t color_write_enables;
1284 
1285    /** Does RT[0] use dual color blending? */
1286    bool dual_color_blending;
1287 };
1288 
1289 static enum pipe_blendfactor
fix_blendfactor(enum pipe_blendfactor f,bool alpha_to_one)1290 fix_blendfactor(enum pipe_blendfactor f, bool alpha_to_one)
1291 {
1292    if (alpha_to_one) {
1293       if (f == PIPE_BLENDFACTOR_SRC1_ALPHA)
1294          return PIPE_BLENDFACTOR_ONE;
1295 
1296       if (f == PIPE_BLENDFACTOR_INV_SRC1_ALPHA)
1297          return PIPE_BLENDFACTOR_ZERO;
1298    }
1299 
1300    return f;
1301 }
1302 
1303 /**
1304  * The pipe->create_blend_state() driver hook.
1305  *
1306  * Translates a pipe_blend_state into iris_blend_state.
1307  */
1308 static void *
iris_create_blend_state(struct pipe_context * ctx,const struct pipe_blend_state * state)1309 iris_create_blend_state(struct pipe_context *ctx,
1310                         const struct pipe_blend_state *state)
1311 {
1312    struct iris_blend_state *cso = malloc(sizeof(struct iris_blend_state));
1313    uint32_t *blend_entry = cso->blend_state + GENX(BLEND_STATE_length);
1314 
1315    cso->blend_enables = 0;
1316    cso->color_write_enables = 0;
1317    STATIC_ASSERT(BRW_MAX_DRAW_BUFFERS <= 8);
1318 
1319    cso->alpha_to_coverage = state->alpha_to_coverage;
1320 
1321    bool indep_alpha_blend = false;
1322 
1323    for (int i = 0; i < BRW_MAX_DRAW_BUFFERS; i++) {
1324       const struct pipe_rt_blend_state *rt =
1325          &state->rt[state->independent_blend_enable ? i : 0];
1326 
1327       enum pipe_blendfactor src_rgb =
1328          fix_blendfactor(rt->rgb_src_factor, state->alpha_to_one);
1329       enum pipe_blendfactor src_alpha =
1330          fix_blendfactor(rt->alpha_src_factor, state->alpha_to_one);
1331       enum pipe_blendfactor dst_rgb =
1332          fix_blendfactor(rt->rgb_dst_factor, state->alpha_to_one);
1333       enum pipe_blendfactor dst_alpha =
1334          fix_blendfactor(rt->alpha_dst_factor, state->alpha_to_one);
1335 
1336       if (rt->rgb_func != rt->alpha_func ||
1337           src_rgb != src_alpha || dst_rgb != dst_alpha)
1338          indep_alpha_blend = true;
1339 
1340       if (rt->blend_enable)
1341          cso->blend_enables |= 1u << i;
1342 
1343       if (rt->colormask)
1344          cso->color_write_enables |= 1u << i;
1345 
1346       iris_pack_state(GENX(BLEND_STATE_ENTRY), blend_entry, be) {
1347          be.LogicOpEnable = state->logicop_enable;
1348          be.LogicOpFunction = state->logicop_func;
1349 
1350          be.PreBlendSourceOnlyClampEnable = false;
1351          be.ColorClampRange = COLORCLAMP_RTFORMAT;
1352          be.PreBlendColorClampEnable = true;
1353          be.PostBlendColorClampEnable = true;
1354 
1355          be.ColorBufferBlendEnable = rt->blend_enable;
1356 
1357          be.ColorBlendFunction          = rt->rgb_func;
1358          be.AlphaBlendFunction          = rt->alpha_func;
1359 
1360          /* The casts prevent warnings about implicit enum type conversions. */
1361          be.SourceBlendFactor           = (int) src_rgb;
1362          be.SourceAlphaBlendFactor      = (int) src_alpha;
1363          be.DestinationBlendFactor      = (int) dst_rgb;
1364          be.DestinationAlphaBlendFactor = (int) dst_alpha;
1365 
1366          be.WriteDisableRed   = !(rt->colormask & PIPE_MASK_R);
1367          be.WriteDisableGreen = !(rt->colormask & PIPE_MASK_G);
1368          be.WriteDisableBlue  = !(rt->colormask & PIPE_MASK_B);
1369          be.WriteDisableAlpha = !(rt->colormask & PIPE_MASK_A);
1370       }
1371       blend_entry += GENX(BLEND_STATE_ENTRY_length);
1372    }
1373 
1374    iris_pack_command(GENX(3DSTATE_PS_BLEND), cso->ps_blend, pb) {
1375       /* pb.HasWriteableRT is filled in at draw time.
1376        * pb.AlphaTestEnable is filled in at draw time.
1377        *
1378        * pb.ColorBufferBlendEnable is filled in at draw time so we can avoid
1379        * setting it when dual color blending without an appropriate shader.
1380        */
1381 
1382       pb.AlphaToCoverageEnable = state->alpha_to_coverage;
1383       pb.IndependentAlphaBlendEnable = indep_alpha_blend;
1384 
1385       /* The casts prevent warnings about implicit enum type conversions. */
1386       pb.SourceBlendFactor =
1387          (int) fix_blendfactor(state->rt[0].rgb_src_factor, state->alpha_to_one);
1388       pb.SourceAlphaBlendFactor =
1389          (int) fix_blendfactor(state->rt[0].alpha_src_factor, state->alpha_to_one);
1390       pb.DestinationBlendFactor =
1391          (int) fix_blendfactor(state->rt[0].rgb_dst_factor, state->alpha_to_one);
1392       pb.DestinationAlphaBlendFactor =
1393          (int) fix_blendfactor(state->rt[0].alpha_dst_factor, state->alpha_to_one);
1394    }
1395 
1396    iris_pack_state(GENX(BLEND_STATE), cso->blend_state, bs) {
1397       bs.AlphaToCoverageEnable = state->alpha_to_coverage;
1398       bs.IndependentAlphaBlendEnable = indep_alpha_blend;
1399       bs.AlphaToOneEnable = state->alpha_to_one;
1400       bs.AlphaToCoverageDitherEnable = state->alpha_to_coverage;
1401       bs.ColorDitherEnable = state->dither;
1402       /* bl.AlphaTestEnable and bs.AlphaTestFunction are filled in later. */
1403    }
1404 
1405    cso->dual_color_blending = util_blend_state_is_dual(state, 0);
1406 
1407    return cso;
1408 }
1409 
1410 /**
1411  * The pipe->bind_blend_state() driver hook.
1412  *
1413  * Bind a blending CSO and flag related dirty bits.
1414  */
1415 static void
iris_bind_blend_state(struct pipe_context * ctx,void * state)1416 iris_bind_blend_state(struct pipe_context *ctx, void *state)
1417 {
1418    struct iris_context *ice = (struct iris_context *) ctx;
1419    struct iris_blend_state *cso = state;
1420 
1421    ice->state.cso_blend = cso;
1422 
1423    ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
1424    ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
1425    ice->state.stage_dirty |= ice->state.stage_dirty_for_nos[IRIS_NOS_BLEND];
1426 
1427    if (GFX_VER == 8)
1428       ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
1429 }
1430 
1431 /**
1432  * Return true if the FS writes to any color outputs which are not disabled
1433  * via color masking.
1434  */
1435 static bool
has_writeable_rt(const struct iris_blend_state * cso_blend,const struct shader_info * fs_info)1436 has_writeable_rt(const struct iris_blend_state *cso_blend,
1437                  const struct shader_info *fs_info)
1438 {
1439    if (!fs_info)
1440       return false;
1441 
1442    unsigned rt_outputs = fs_info->outputs_written >> FRAG_RESULT_DATA0;
1443 
1444    if (fs_info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_COLOR))
1445       rt_outputs = (1 << BRW_MAX_DRAW_BUFFERS) - 1;
1446 
1447    return cso_blend->color_write_enables & rt_outputs;
1448 }
1449 
1450 /**
1451  * Gallium CSO for depth, stencil, and alpha testing state.
1452  */
1453 struct iris_depth_stencil_alpha_state {
1454    /** Partial 3DSTATE_WM_DEPTH_STENCIL. */
1455    uint32_t wmds[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
1456 
1457 #if GFX_VER >= 12
1458    uint32_t depth_bounds[GENX(3DSTATE_DEPTH_BOUNDS_length)];
1459 #endif
1460 
1461    /** Outbound to BLEND_STATE, 3DSTATE_PS_BLEND, COLOR_CALC_STATE. */
1462    unsigned alpha_enabled:1;
1463    unsigned alpha_func:3;     /**< PIPE_FUNC_x */
1464    float alpha_ref_value;     /**< reference value */
1465 
1466    /** Outbound to resolve and cache set tracking. */
1467    bool depth_writes_enabled;
1468    bool stencil_writes_enabled;
1469 
1470    /** Outbound to Gfx8-9 PMA stall equations */
1471    bool depth_test_enabled;
1472 };
1473 
1474 /**
1475  * The pipe->create_depth_stencil_alpha_state() driver hook.
1476  *
1477  * We encode most of 3DSTATE_WM_DEPTH_STENCIL, and just save off the alpha
1478  * testing state since we need pieces of it in a variety of places.
1479  */
1480 static void *
iris_create_zsa_state(struct pipe_context * ctx,const struct pipe_depth_stencil_alpha_state * state)1481 iris_create_zsa_state(struct pipe_context *ctx,
1482                       const struct pipe_depth_stencil_alpha_state *state)
1483 {
1484    struct iris_depth_stencil_alpha_state *cso =
1485       malloc(sizeof(struct iris_depth_stencil_alpha_state));
1486 
1487    bool two_sided_stencil = state->stencil[1].enabled;
1488 
1489    cso->alpha_enabled = state->alpha_enabled;
1490    cso->alpha_func = state->alpha_func;
1491    cso->alpha_ref_value = state->alpha_ref_value;
1492    cso->depth_writes_enabled = state->depth_writemask;
1493    cso->depth_test_enabled = state->depth_enabled;
1494    cso->stencil_writes_enabled =
1495       state->stencil[0].writemask != 0 ||
1496       (two_sided_stencil && state->stencil[1].writemask != 0);
1497 
1498    /* gallium frontends need to optimize away EQUAL writes for us. */
1499    assert(!(state->depth_func == PIPE_FUNC_EQUAL && state->depth_writemask));
1500 
1501    iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), cso->wmds, wmds) {
1502       wmds.StencilFailOp = state->stencil[0].fail_op;
1503       wmds.StencilPassDepthFailOp = state->stencil[0].zfail_op;
1504       wmds.StencilPassDepthPassOp = state->stencil[0].zpass_op;
1505       wmds.StencilTestFunction =
1506          translate_compare_func(state->stencil[0].func);
1507       wmds.BackfaceStencilFailOp = state->stencil[1].fail_op;
1508       wmds.BackfaceStencilPassDepthFailOp = state->stencil[1].zfail_op;
1509       wmds.BackfaceStencilPassDepthPassOp = state->stencil[1].zpass_op;
1510       wmds.BackfaceStencilTestFunction =
1511          translate_compare_func(state->stencil[1].func);
1512       wmds.DepthTestFunction = translate_compare_func(state->depth_func);
1513       wmds.DoubleSidedStencilEnable = two_sided_stencil;
1514       wmds.StencilTestEnable = state->stencil[0].enabled;
1515       wmds.StencilBufferWriteEnable =
1516          state->stencil[0].writemask != 0 ||
1517          (two_sided_stencil && state->stencil[1].writemask != 0);
1518       wmds.DepthTestEnable = state->depth_enabled;
1519       wmds.DepthBufferWriteEnable = state->depth_writemask;
1520       wmds.StencilTestMask = state->stencil[0].valuemask;
1521       wmds.StencilWriteMask = state->stencil[0].writemask;
1522       wmds.BackfaceStencilTestMask = state->stencil[1].valuemask;
1523       wmds.BackfaceStencilWriteMask = state->stencil[1].writemask;
1524       /* wmds.[Backface]StencilReferenceValue are merged later */
1525 #if GFX_VER >= 12
1526       wmds.StencilReferenceValueModifyDisable = true;
1527 #endif
1528    }
1529 
1530 #if GFX_VER >= 12
1531    iris_pack_command(GENX(3DSTATE_DEPTH_BOUNDS), cso->depth_bounds, depth_bounds) {
1532       depth_bounds.DepthBoundsTestValueModifyDisable = false;
1533       depth_bounds.DepthBoundsTestEnableModifyDisable = false;
1534       depth_bounds.DepthBoundsTestEnable = state->depth_bounds_test;
1535       depth_bounds.DepthBoundsTestMinValue = state->depth_bounds_min;
1536       depth_bounds.DepthBoundsTestMaxValue = state->depth_bounds_max;
1537    }
1538 #endif
1539 
1540    return cso;
1541 }
1542 
1543 /**
1544  * The pipe->bind_depth_stencil_alpha_state() driver hook.
1545  *
1546  * Bind a depth/stencil/alpha CSO and flag related dirty bits.
1547  */
1548 static void
iris_bind_zsa_state(struct pipe_context * ctx,void * state)1549 iris_bind_zsa_state(struct pipe_context *ctx, void *state)
1550 {
1551    struct iris_context *ice = (struct iris_context *) ctx;
1552    struct iris_depth_stencil_alpha_state *old_cso = ice->state.cso_zsa;
1553    struct iris_depth_stencil_alpha_state *new_cso = state;
1554 
1555    if (new_cso) {
1556       if (cso_changed(alpha_ref_value))
1557          ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
1558 
1559       if (cso_changed(alpha_enabled))
1560          ice->state.dirty |= IRIS_DIRTY_PS_BLEND | IRIS_DIRTY_BLEND_STATE;
1561 
1562       if (cso_changed(alpha_func))
1563          ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
1564 
1565       if (cso_changed(depth_writes_enabled) || cso_changed(stencil_writes_enabled))
1566          ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
1567 
1568       ice->state.depth_writes_enabled = new_cso->depth_writes_enabled;
1569       ice->state.stencil_writes_enabled = new_cso->stencil_writes_enabled;
1570 
1571 #if GFX_VER >= 12
1572       if (cso_changed(depth_bounds))
1573          ice->state.dirty |= IRIS_DIRTY_DEPTH_BOUNDS;
1574 #endif
1575    }
1576 
1577    ice->state.cso_zsa = new_cso;
1578    ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
1579    ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
1580    ice->state.stage_dirty |=
1581       ice->state.stage_dirty_for_nos[IRIS_NOS_DEPTH_STENCIL_ALPHA];
1582 
1583    if (GFX_VER == 8)
1584       ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
1585 }
1586 
1587 #if GFX_VER == 8
1588 static bool
want_pma_fix(struct iris_context * ice)1589 want_pma_fix(struct iris_context *ice)
1590 {
1591    UNUSED struct iris_screen *screen = (void *) ice->ctx.screen;
1592    UNUSED const struct intel_device_info *devinfo = &screen->devinfo;
1593    const struct brw_wm_prog_data *wm_prog_data = (void *)
1594       ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
1595    const struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
1596    const struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
1597    const struct iris_blend_state *cso_blend = ice->state.cso_blend;
1598 
1599    /* In very specific combinations of state, we can instruct Gfx8-9 hardware
1600     * to avoid stalling at the pixel mask array.  The state equations are
1601     * documented in these places:
1602     *
1603     * - Gfx8 Depth PMA Fix:   CACHE_MODE_1::NP_PMA_FIX_ENABLE
1604     * - Gfx9 Stencil PMA Fix: CACHE_MODE_0::STC PMA Optimization Enable
1605     *
1606     * Both equations share some common elements:
1607     *
1608     *    no_hiz_op =
1609     *       !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
1610     *         3DSTATE_WM_HZ_OP::DepthBufferResolve ||
1611     *         3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
1612     *         3DSTATE_WM_HZ_OP::StencilBufferClear) &&
1613     *
1614     *    killpixels =
1615     *       3DSTATE_WM::ForceKillPix != ForceOff &&
1616     *       (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
1617     *        3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
1618     *        3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
1619     *        3DSTATE_PS_BLEND::AlphaTestEnable ||
1620     *        3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
1621     *
1622     *    (Technically the stencil PMA treats ForceKillPix differently,
1623     *     but I think this is a documentation oversight, and we don't
1624     *     ever use it in this way, so it doesn't matter).
1625     *
1626     *    common_pma_fix =
1627     *       3DSTATE_WM::ForceThreadDispatch != 1 &&
1628     *       3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0 &&
1629     *       3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
1630     *       3DSTATE_DEPTH_BUFFER::HIZ Enable &&
1631     *       3DSTATE_WM::EDSC_Mode != EDSC_PREPS &&
1632     *       3DSTATE_PS_EXTRA::PixelShaderValid &&
1633     *       no_hiz_op
1634     *
1635     * These are always true:
1636     *
1637     *    3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0
1638     *    3DSTATE_PS_EXTRA::PixelShaderValid
1639     *
1640     * Also, we never use the normal drawing path for HiZ ops; these are true:
1641     *
1642     *    !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
1643     *      3DSTATE_WM_HZ_OP::DepthBufferResolve ||
1644     *      3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
1645     *      3DSTATE_WM_HZ_OP::StencilBufferClear)
1646     *
1647     * This happens sometimes:
1648     *
1649     *    3DSTATE_WM::ForceThreadDispatch != 1
1650     *
1651     * However, we choose to ignore it as it either agrees with the signal
1652     * (dispatch was already enabled, so nothing out of the ordinary), or
1653     * there are no framebuffer attachments (so no depth or HiZ anyway,
1654     * meaning the PMA signal will already be disabled).
1655     */
1656 
1657    if (!cso_fb->zsbuf)
1658       return false;
1659 
1660    struct iris_resource *zres, *sres;
1661    iris_get_depth_stencil_resources(cso_fb->zsbuf->texture, &zres, &sres);
1662 
1663    /* 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
1664     * 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
1665     */
1666    if (!zres || !iris_resource_level_has_hiz(zres, cso_fb->zsbuf->u.tex.level))
1667       return false;
1668 
1669    /* 3DSTATE_WM::EDSC_Mode != EDSC_PREPS */
1670    if (wm_prog_data->early_fragment_tests)
1671       return false;
1672 
1673    /* 3DSTATE_WM::ForceKillPix != ForceOff &&
1674     * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
1675     *  3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
1676     *  3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
1677     *  3DSTATE_PS_BLEND::AlphaTestEnable ||
1678     *  3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
1679     */
1680    bool killpixels = wm_prog_data->uses_kill || wm_prog_data->uses_omask ||
1681                      cso_blend->alpha_to_coverage || cso_zsa->alpha_enabled;
1682 
1683    /* The Gfx8 depth PMA equation becomes:
1684     *
1685     *    depth_writes =
1686     *       3DSTATE_WM_DEPTH_STENCIL::DepthWriteEnable &&
1687     *       3DSTATE_DEPTH_BUFFER::DEPTH_WRITE_ENABLE
1688     *
1689     *    stencil_writes =
1690     *       3DSTATE_WM_DEPTH_STENCIL::Stencil Buffer Write Enable &&
1691     *       3DSTATE_DEPTH_BUFFER::STENCIL_WRITE_ENABLE &&
1692     *       3DSTATE_STENCIL_BUFFER::STENCIL_BUFFER_ENABLE
1693     *
1694     *    Z_PMA_OPT =
1695     *       common_pma_fix &&
1696     *       3DSTATE_WM_DEPTH_STENCIL::DepthTestEnable &&
1697     *       ((killpixels && (depth_writes || stencil_writes)) ||
1698     *        3DSTATE_PS_EXTRA::PixelShaderComputedDepthMode != PSCDEPTH_OFF)
1699     *
1700     */
1701    if (!cso_zsa->depth_test_enabled)
1702       return false;
1703 
1704    return wm_prog_data->computed_depth_mode != PSCDEPTH_OFF ||
1705           (killpixels && (cso_zsa->depth_writes_enabled ||
1706                           (sres && cso_zsa->stencil_writes_enabled)));
1707 }
1708 #endif
1709 
1710 void
genX(update_pma_fix)1711 genX(update_pma_fix)(struct iris_context *ice,
1712                      struct iris_batch *batch,
1713                      bool enable)
1714 {
1715 #if GFX_VER == 8
1716    struct iris_genx_state *genx = ice->state.genx;
1717 
1718    if (genx->pma_fix_enabled == enable)
1719       return;
1720 
1721    genx->pma_fix_enabled = enable;
1722 
1723    /* According to the Broadwell PIPE_CONTROL documentation, software should
1724     * emit a PIPE_CONTROL with the CS Stall and Depth Cache Flush bits set
1725     * prior to the LRI.  If stencil buffer writes are enabled, then a Render        * Cache Flush is also necessary.
1726     *
1727     * The Gfx9 docs say to use a depth stall rather than a command streamer
1728     * stall.  However, the hardware seems to violently disagree.  A full
1729     * command streamer stall seems to be needed in both cases.
1730     */
1731    iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)",
1732                                 PIPE_CONTROL_CS_STALL |
1733                                 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1734                                 PIPE_CONTROL_RENDER_TARGET_FLUSH);
1735 
1736    iris_emit_reg(batch, GENX(CACHE_MODE_1), reg) {
1737       reg.NPPMAFixEnable = enable;
1738       reg.NPEarlyZFailsDisable = enable;
1739       reg.NPPMAFixEnableMask = true;
1740       reg.NPEarlyZFailsDisableMask = true;
1741    }
1742 
1743    /* After the LRI, a PIPE_CONTROL with both the Depth Stall and Depth Cache
1744     * Flush bits is often necessary.  We do it regardless because it's easier.
1745     * The render cache flush is also necessary if stencil writes are enabled.
1746     *
1747     * Again, the Gfx9 docs give a different set of flushes but the Broadwell
1748     * flushes seem to work just as well.
1749     */
1750    iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)",
1751                                 PIPE_CONTROL_DEPTH_STALL |
1752                                 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1753                                 PIPE_CONTROL_RENDER_TARGET_FLUSH);
1754 #endif
1755 }
1756 
1757 /**
1758  * Gallium CSO for rasterizer state.
1759  */
1760 struct iris_rasterizer_state {
1761    uint32_t sf[GENX(3DSTATE_SF_length)];
1762    uint32_t clip[GENX(3DSTATE_CLIP_length)];
1763    uint32_t raster[GENX(3DSTATE_RASTER_length)];
1764    uint32_t wm[GENX(3DSTATE_WM_length)];
1765    uint32_t line_stipple[GENX(3DSTATE_LINE_STIPPLE_length)];
1766 
1767    uint8_t num_clip_plane_consts;
1768    bool clip_halfz; /* for CC_VIEWPORT */
1769    bool depth_clip_near; /* for CC_VIEWPORT */
1770    bool depth_clip_far; /* for CC_VIEWPORT */
1771    bool flatshade; /* for shader state */
1772    bool flatshade_first; /* for stream output */
1773    bool clamp_fragment_color; /* for shader state */
1774    bool light_twoside; /* for shader state */
1775    bool rasterizer_discard; /* for 3DSTATE_STREAMOUT and 3DSTATE_CLIP */
1776    bool half_pixel_center; /* for 3DSTATE_MULTISAMPLE */
1777    bool line_stipple_enable;
1778    bool poly_stipple_enable;
1779    bool multisample;
1780    bool force_persample_interp;
1781    bool conservative_rasterization;
1782    bool fill_mode_point;
1783    bool fill_mode_line;
1784    bool fill_mode_point_or_line;
1785    enum pipe_sprite_coord_mode sprite_coord_mode; /* PIPE_SPRITE_* */
1786    uint16_t sprite_coord_enable;
1787 };
1788 
1789 static float
get_line_width(const struct pipe_rasterizer_state * state)1790 get_line_width(const struct pipe_rasterizer_state *state)
1791 {
1792    float line_width = state->line_width;
1793 
1794    /* From the OpenGL 4.4 spec:
1795     *
1796     * "The actual width of non-antialiased lines is determined by rounding
1797     *  the supplied width to the nearest integer, then clamping it to the
1798     *  implementation-dependent maximum non-antialiased line width."
1799     */
1800    if (!state->multisample && !state->line_smooth)
1801       line_width = roundf(state->line_width);
1802 
1803    if (!state->multisample && state->line_smooth && line_width < 1.5f) {
1804       /* For 1 pixel line thickness or less, the general anti-aliasing
1805        * algorithm gives up, and a garbage line is generated.  Setting a
1806        * Line Width of 0.0 specifies the rasterization of the "thinnest"
1807        * (one-pixel-wide), non-antialiased lines.
1808        *
1809        * Lines rendered with zero Line Width are rasterized using the
1810        * "Grid Intersection Quantization" rules as specified by the
1811        * "Zero-Width (Cosmetic) Line Rasterization" section of the docs.
1812        */
1813       line_width = 0.0f;
1814    }
1815 
1816    return line_width;
1817 }
1818 
1819 /**
1820  * The pipe->create_rasterizer_state() driver hook.
1821  */
1822 static void *
iris_create_rasterizer_state(struct pipe_context * ctx,const struct pipe_rasterizer_state * state)1823 iris_create_rasterizer_state(struct pipe_context *ctx,
1824                              const struct pipe_rasterizer_state *state)
1825 {
1826    struct iris_rasterizer_state *cso =
1827       malloc(sizeof(struct iris_rasterizer_state));
1828 
1829    cso->multisample = state->multisample;
1830    cso->force_persample_interp = state->force_persample_interp;
1831    cso->clip_halfz = state->clip_halfz;
1832    cso->depth_clip_near = state->depth_clip_near;
1833    cso->depth_clip_far = state->depth_clip_far;
1834    cso->flatshade = state->flatshade;
1835    cso->flatshade_first = state->flatshade_first;
1836    cso->clamp_fragment_color = state->clamp_fragment_color;
1837    cso->light_twoside = state->light_twoside;
1838    cso->rasterizer_discard = state->rasterizer_discard;
1839    cso->half_pixel_center = state->half_pixel_center;
1840    cso->sprite_coord_mode = state->sprite_coord_mode;
1841    cso->sprite_coord_enable = state->sprite_coord_enable;
1842    cso->line_stipple_enable = state->line_stipple_enable;
1843    cso->poly_stipple_enable = state->poly_stipple_enable;
1844    cso->conservative_rasterization =
1845       state->conservative_raster_mode == PIPE_CONSERVATIVE_RASTER_POST_SNAP;
1846 
1847    cso->fill_mode_point =
1848       state->fill_front == PIPE_POLYGON_MODE_POINT ||
1849       state->fill_back == PIPE_POLYGON_MODE_POINT;
1850    cso->fill_mode_line =
1851       state->fill_front == PIPE_POLYGON_MODE_LINE ||
1852       state->fill_back == PIPE_POLYGON_MODE_LINE;
1853    cso->fill_mode_point_or_line =
1854       cso->fill_mode_point ||
1855       cso->fill_mode_line;
1856 
1857    if (state->clip_plane_enable != 0)
1858       cso->num_clip_plane_consts = util_logbase2(state->clip_plane_enable) + 1;
1859    else
1860       cso->num_clip_plane_consts = 0;
1861 
1862    float line_width = get_line_width(state);
1863 
1864    iris_pack_command(GENX(3DSTATE_SF), cso->sf, sf) {
1865       sf.StatisticsEnable = true;
1866       sf.AALineDistanceMode = AALINEDISTANCE_TRUE;
1867       sf.LineEndCapAntialiasingRegionWidth =
1868          state->line_smooth ? _10pixels : _05pixels;
1869       sf.LastPixelEnable = state->line_last_pixel;
1870       sf.LineWidth = line_width;
1871       sf.SmoothPointEnable = (state->point_smooth || state->multisample) &&
1872                              !state->point_quad_rasterization;
1873       sf.PointWidthSource = state->point_size_per_vertex ? Vertex : State;
1874       sf.PointWidth = CLAMP(state->point_size, 0.125f, 255.875f);
1875 
1876       if (state->flatshade_first) {
1877          sf.TriangleFanProvokingVertexSelect = 1;
1878       } else {
1879          sf.TriangleStripListProvokingVertexSelect = 2;
1880          sf.TriangleFanProvokingVertexSelect = 2;
1881          sf.LineStripListProvokingVertexSelect = 1;
1882       }
1883    }
1884 
1885    iris_pack_command(GENX(3DSTATE_RASTER), cso->raster, rr) {
1886       rr.FrontWinding = state->front_ccw ? CounterClockwise : Clockwise;
1887       rr.CullMode = translate_cull_mode(state->cull_face);
1888       rr.FrontFaceFillMode = translate_fill_mode(state->fill_front);
1889       rr.BackFaceFillMode = translate_fill_mode(state->fill_back);
1890       rr.DXMultisampleRasterizationEnable = state->multisample;
1891       rr.GlobalDepthOffsetEnableSolid = state->offset_tri;
1892       rr.GlobalDepthOffsetEnableWireframe = state->offset_line;
1893       rr.GlobalDepthOffsetEnablePoint = state->offset_point;
1894       rr.GlobalDepthOffsetConstant = state->offset_units * 2;
1895       rr.GlobalDepthOffsetScale = state->offset_scale;
1896       rr.GlobalDepthOffsetClamp = state->offset_clamp;
1897       rr.SmoothPointEnable = state->point_smooth;
1898       rr.AntialiasingEnable = state->line_smooth;
1899       rr.ScissorRectangleEnable = state->scissor;
1900 #if GFX_VER >= 9
1901       rr.ViewportZNearClipTestEnable = state->depth_clip_near;
1902       rr.ViewportZFarClipTestEnable = state->depth_clip_far;
1903       rr.ConservativeRasterizationEnable =
1904          cso->conservative_rasterization;
1905 #else
1906       rr.ViewportZClipTestEnable = (state->depth_clip_near || state->depth_clip_far);
1907 #endif
1908    }
1909 
1910    iris_pack_command(GENX(3DSTATE_CLIP), cso->clip, cl) {
1911       /* cl.NonPerspectiveBarycentricEnable is filled in at draw time from
1912        * the FS program; cl.ForceZeroRTAIndexEnable is filled in from the FB.
1913        */
1914       cl.EarlyCullEnable = true;
1915       cl.UserClipDistanceClipTestEnableBitmask = state->clip_plane_enable;
1916       cl.ForceUserClipDistanceClipTestEnableBitmask = true;
1917       cl.APIMode = state->clip_halfz ? APIMODE_D3D : APIMODE_OGL;
1918       cl.GuardbandClipTestEnable = true;
1919       cl.ClipEnable = true;
1920       cl.MinimumPointWidth = 0.125;
1921       cl.MaximumPointWidth = 255.875;
1922 
1923       if (state->flatshade_first) {
1924          cl.TriangleFanProvokingVertexSelect = 1;
1925       } else {
1926          cl.TriangleStripListProvokingVertexSelect = 2;
1927          cl.TriangleFanProvokingVertexSelect = 2;
1928          cl.LineStripListProvokingVertexSelect = 1;
1929       }
1930    }
1931 
1932    iris_pack_command(GENX(3DSTATE_WM), cso->wm, wm) {
1933       /* wm.BarycentricInterpolationMode and wm.EarlyDepthStencilControl are
1934        * filled in at draw time from the FS program.
1935        */
1936       wm.LineAntialiasingRegionWidth = _10pixels;
1937       wm.LineEndCapAntialiasingRegionWidth = _05pixels;
1938       wm.PointRasterizationRule = RASTRULE_UPPER_RIGHT;
1939       wm.LineStippleEnable = state->line_stipple_enable;
1940       wm.PolygonStippleEnable = state->poly_stipple_enable;
1941    }
1942 
1943    /* Remap from 0..255 back to 1..256 */
1944    const unsigned line_stipple_factor = state->line_stipple_factor + 1;
1945 
1946    iris_pack_command(GENX(3DSTATE_LINE_STIPPLE), cso->line_stipple, line) {
1947       if (state->line_stipple_enable) {
1948          line.LineStipplePattern = state->line_stipple_pattern;
1949          line.LineStippleInverseRepeatCount = 1.0f / line_stipple_factor;
1950          line.LineStippleRepeatCount = line_stipple_factor;
1951       }
1952    }
1953 
1954    return cso;
1955 }
1956 
1957 /**
1958  * The pipe->bind_rasterizer_state() driver hook.
1959  *
1960  * Bind a rasterizer CSO and flag related dirty bits.
1961  */
1962 static void
iris_bind_rasterizer_state(struct pipe_context * ctx,void * state)1963 iris_bind_rasterizer_state(struct pipe_context *ctx, void *state)
1964 {
1965    struct iris_context *ice = (struct iris_context *) ctx;
1966    struct iris_rasterizer_state *old_cso = ice->state.cso_rast;
1967    struct iris_rasterizer_state *new_cso = state;
1968 
1969    if (new_cso) {
1970       /* Try to avoid re-emitting 3DSTATE_LINE_STIPPLE, it's non-pipelined */
1971       if (cso_changed_memcmp(line_stipple))
1972          ice->state.dirty |= IRIS_DIRTY_LINE_STIPPLE;
1973 
1974       if (cso_changed(half_pixel_center))
1975          ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
1976 
1977       if (cso_changed(line_stipple_enable) || cso_changed(poly_stipple_enable))
1978          ice->state.dirty |= IRIS_DIRTY_WM;
1979 
1980       if (cso_changed(rasterizer_discard))
1981          ice->state.dirty |= IRIS_DIRTY_STREAMOUT | IRIS_DIRTY_CLIP;
1982 
1983       if (cso_changed(flatshade_first))
1984          ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
1985 
1986       if (cso_changed(depth_clip_near) || cso_changed(depth_clip_far) ||
1987           cso_changed(clip_halfz))
1988          ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
1989 
1990       if (cso_changed(sprite_coord_enable) ||
1991           cso_changed(sprite_coord_mode) ||
1992           cso_changed(light_twoside))
1993          ice->state.dirty |= IRIS_DIRTY_SBE;
1994 
1995       if (cso_changed(conservative_rasterization))
1996          ice->state.stage_dirty |= IRIS_STAGE_DIRTY_FS;
1997    }
1998 
1999    ice->state.cso_rast = new_cso;
2000    ice->state.dirty |= IRIS_DIRTY_RASTER;
2001    ice->state.dirty |= IRIS_DIRTY_CLIP;
2002    ice->state.stage_dirty |=
2003       ice->state.stage_dirty_for_nos[IRIS_NOS_RASTERIZER];
2004 }
2005 
2006 /**
2007  * Return true if the given wrap mode requires the border color to exist.
2008  *
2009  * (We can skip uploading it if the sampler isn't going to use it.)
2010  */
2011 static bool
wrap_mode_needs_border_color(unsigned wrap_mode)2012 wrap_mode_needs_border_color(unsigned wrap_mode)
2013 {
2014    return wrap_mode == TCM_CLAMP_BORDER || wrap_mode == TCM_HALF_BORDER;
2015 }
2016 
2017 /**
2018  * Gallium CSO for sampler state.
2019  */
2020 struct iris_sampler_state {
2021    union pipe_color_union border_color;
2022    bool needs_border_color;
2023 
2024    uint32_t sampler_state[GENX(SAMPLER_STATE_length)];
2025 
2026 #if GFX_VERx10 == 125
2027    /* Sampler state structure to use for 3D textures in order to
2028     * implement Wa_14014414195.
2029     */
2030    uint32_t sampler_state_3d[GENX(SAMPLER_STATE_length)];
2031 #endif
2032 };
2033 
2034 static void
fill_sampler_state(uint32_t * sampler_state,const struct pipe_sampler_state * state,unsigned max_anisotropy)2035 fill_sampler_state(uint32_t *sampler_state,
2036                    const struct pipe_sampler_state *state,
2037                    unsigned max_anisotropy)
2038 {
2039    float min_lod = state->min_lod;
2040    unsigned mag_img_filter = state->mag_img_filter;
2041 
2042    // XXX: explain this code ported from ilo...I don't get it at all...
2043    if (state->min_mip_filter == PIPE_TEX_MIPFILTER_NONE &&
2044        state->min_lod > 0.0f) {
2045       min_lod = 0.0f;
2046       mag_img_filter = state->min_img_filter;
2047    }
2048 
2049    iris_pack_state(GENX(SAMPLER_STATE), sampler_state, samp) {
2050       samp.TCXAddressControlMode = translate_wrap(state->wrap_s);
2051       samp.TCYAddressControlMode = translate_wrap(state->wrap_t);
2052       samp.TCZAddressControlMode = translate_wrap(state->wrap_r);
2053       samp.CubeSurfaceControlMode = state->seamless_cube_map;
2054       samp.NonnormalizedCoordinateEnable = !state->normalized_coords;
2055       samp.MinModeFilter = state->min_img_filter;
2056       samp.MagModeFilter = mag_img_filter;
2057       samp.MipModeFilter = translate_mip_filter(state->min_mip_filter);
2058       samp.MaximumAnisotropy = RATIO21;
2059 
2060       if (max_anisotropy >= 2) {
2061          if (state->min_img_filter == PIPE_TEX_FILTER_LINEAR) {
2062             samp.MinModeFilter = MAPFILTER_ANISOTROPIC;
2063             samp.AnisotropicAlgorithm = EWAApproximation;
2064          }
2065 
2066          if (state->mag_img_filter == PIPE_TEX_FILTER_LINEAR)
2067             samp.MagModeFilter = MAPFILTER_ANISOTROPIC;
2068 
2069          samp.MaximumAnisotropy =
2070             MIN2((max_anisotropy - 2) / 2, RATIO161);
2071       }
2072 
2073       /* Set address rounding bits if not using nearest filtering. */
2074       if (state->min_img_filter != PIPE_TEX_FILTER_NEAREST) {
2075          samp.UAddressMinFilterRoundingEnable = true;
2076          samp.VAddressMinFilterRoundingEnable = true;
2077          samp.RAddressMinFilterRoundingEnable = true;
2078       }
2079 
2080       if (state->mag_img_filter != PIPE_TEX_FILTER_NEAREST) {
2081          samp.UAddressMagFilterRoundingEnable = true;
2082          samp.VAddressMagFilterRoundingEnable = true;
2083          samp.RAddressMagFilterRoundingEnable = true;
2084       }
2085 
2086       if (state->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE)
2087          samp.ShadowFunction = translate_shadow_func(state->compare_func);
2088 
2089       const float hw_max_lod = GFX_VER >= 7 ? 14 : 13;
2090 
2091       samp.LODPreClampMode = CLAMP_MODE_OGL;
2092       samp.MinLOD = CLAMP(min_lod, 0, hw_max_lod);
2093       samp.MaxLOD = CLAMP(state->max_lod, 0, hw_max_lod);
2094       samp.TextureLODBias = CLAMP(state->lod_bias, -16, 15);
2095 
2096       /* .BorderColorPointer is filled in by iris_bind_sampler_states. */
2097    }
2098 }
2099 
2100 /**
2101  * The pipe->create_sampler_state() driver hook.
2102  *
2103  * We fill out SAMPLER_STATE (except for the border color pointer), and
2104  * store that on the CPU.  It doesn't make sense to upload it to a GPU
2105  * buffer object yet, because 3DSTATE_SAMPLER_STATE_POINTERS requires
2106  * all bound sampler states to be in contiguous memor.
2107  */
2108 static void *
iris_create_sampler_state(struct pipe_context * ctx,const struct pipe_sampler_state * state)2109 iris_create_sampler_state(struct pipe_context *ctx,
2110                           const struct pipe_sampler_state *state)
2111 {
2112    UNUSED struct iris_screen *screen = (void *)ctx->screen;
2113    UNUSED const struct intel_device_info *devinfo = &screen->devinfo;
2114    struct iris_sampler_state *cso = CALLOC_STRUCT(iris_sampler_state);
2115 
2116    if (!cso)
2117       return NULL;
2118 
2119    STATIC_ASSERT(PIPE_TEX_FILTER_NEAREST == MAPFILTER_NEAREST);
2120    STATIC_ASSERT(PIPE_TEX_FILTER_LINEAR == MAPFILTER_LINEAR);
2121 
2122    unsigned wrap_s = translate_wrap(state->wrap_s);
2123    unsigned wrap_t = translate_wrap(state->wrap_t);
2124    unsigned wrap_r = translate_wrap(state->wrap_r);
2125 
2126    memcpy(&cso->border_color, &state->border_color, sizeof(cso->border_color));
2127 
2128    cso->needs_border_color = wrap_mode_needs_border_color(wrap_s) ||
2129                              wrap_mode_needs_border_color(wrap_t) ||
2130                              wrap_mode_needs_border_color(wrap_r);
2131 
2132    fill_sampler_state(cso->sampler_state, state, state->max_anisotropy);
2133 
2134 #if GFX_VERx10 == 125
2135    /* Fill an extra sampler state structure with anisotropic filtering
2136     * disabled used to implement Wa_14014414195.
2137     */
2138    fill_sampler_state(cso->sampler_state_3d, state, 0);
2139 #endif
2140 
2141    return cso;
2142 }
2143 
2144 /**
2145  * The pipe->bind_sampler_states() driver hook.
2146  */
2147 static void
iris_bind_sampler_states(struct pipe_context * ctx,enum pipe_shader_type p_stage,unsigned start,unsigned count,void ** states)2148 iris_bind_sampler_states(struct pipe_context *ctx,
2149                          enum pipe_shader_type p_stage,
2150                          unsigned start, unsigned count,
2151                          void **states)
2152 {
2153    struct iris_context *ice = (struct iris_context *) ctx;
2154    gl_shader_stage stage = stage_from_pipe(p_stage);
2155    struct iris_shader_state *shs = &ice->state.shaders[stage];
2156 
2157    assert(start + count <= IRIS_MAX_TEXTURE_SAMPLERS);
2158 
2159    bool dirty = false;
2160 
2161    for (int i = 0; i < count; i++) {
2162       struct iris_sampler_state *state = states ? states[i] : NULL;
2163       if (shs->samplers[start + i] != state) {
2164          shs->samplers[start + i] = state;
2165          dirty = true;
2166       }
2167    }
2168 
2169    if (dirty)
2170       ice->state.stage_dirty |= IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage;
2171 }
2172 
2173 /**
2174  * Upload the sampler states into a contiguous area of GPU memory, for
2175  * for 3DSTATE_SAMPLER_STATE_POINTERS_*.
2176  *
2177  * Also fill out the border color state pointers.
2178  */
2179 static void
iris_upload_sampler_states(struct iris_context * ice,gl_shader_stage stage)2180 iris_upload_sampler_states(struct iris_context *ice, gl_shader_stage stage)
2181 {
2182    struct iris_screen *screen = (struct iris_screen *) ice->ctx.screen;
2183    struct iris_shader_state *shs = &ice->state.shaders[stage];
2184    const struct shader_info *info = iris_get_shader_info(ice, stage);
2185    struct iris_border_color_pool *border_color_pool =
2186       iris_bufmgr_get_border_color_pool(screen->bufmgr);
2187 
2188    /* We assume gallium frontends will call pipe->bind_sampler_states()
2189     * if the program's number of textures changes.
2190     */
2191    unsigned count = info ? BITSET_LAST_BIT(info->textures_used) : 0;
2192 
2193    if (!count)
2194       return;
2195 
2196    /* Assemble the SAMPLER_STATEs into a contiguous table that lives
2197     * in the dynamic state memory zone, so we can point to it via the
2198     * 3DSTATE_SAMPLER_STATE_POINTERS_* commands.
2199     */
2200    unsigned size = count * 4 * GENX(SAMPLER_STATE_length);
2201    uint32_t *map =
2202       upload_state(ice->state.dynamic_uploader, &shs->sampler_table, size, 32);
2203    if (unlikely(!map))
2204       return;
2205 
2206    struct pipe_resource *res = shs->sampler_table.res;
2207    struct iris_bo *bo = iris_resource_bo(res);
2208 
2209    iris_record_state_size(ice->state.sizes,
2210                           bo->address + shs->sampler_table.offset, size);
2211 
2212    shs->sampler_table.offset += iris_bo_offset_from_base_address(bo);
2213 
2214    ice->state.need_border_colors &= ~(1 << stage);
2215 
2216    for (int i = 0; i < count; i++) {
2217       struct iris_sampler_state *state = shs->samplers[i];
2218       struct iris_sampler_view *tex = shs->textures[i];
2219 
2220       if (!state) {
2221          memset(map, 0, 4 * GENX(SAMPLER_STATE_length));
2222       } else {
2223          const uint32_t *sampler_state = state->sampler_state;
2224 #if GFX_VERx10 == 125
2225          if (tex && tex->res->base.b.target == PIPE_TEXTURE_3D)
2226             sampler_state = state->sampler_state_3d;
2227 #endif
2228 
2229          if (!state->needs_border_color) {
2230             memcpy(map, sampler_state, 4 * GENX(SAMPLER_STATE_length));
2231          } else {
2232             ice->state.need_border_colors |= 1 << stage;
2233 
2234             /* We may need to swizzle the border color for format faking.
2235              * A/LA formats are faked as R/RG with 000R or R00G swizzles.
2236              * This means we need to move the border color's A channel into
2237              * the R or G channels so that those read swizzles will move it
2238              * back into A.
2239              */
2240             union pipe_color_union *color = &state->border_color;
2241             union pipe_color_union tmp;
2242             if (tex) {
2243                enum pipe_format internal_format = tex->res->internal_format;
2244 
2245                if (util_format_is_alpha(internal_format)) {
2246                   unsigned char swz[4] = {
2247                      PIPE_SWIZZLE_W, PIPE_SWIZZLE_0,
2248                      PIPE_SWIZZLE_0, PIPE_SWIZZLE_0
2249                   };
2250                   util_format_apply_color_swizzle(&tmp, color, swz, true);
2251                   color = &tmp;
2252                } else if (util_format_is_luminance_alpha(internal_format) &&
2253                           internal_format != PIPE_FORMAT_L8A8_SRGB) {
2254                   unsigned char swz[4] = {
2255                      PIPE_SWIZZLE_X, PIPE_SWIZZLE_W,
2256                      PIPE_SWIZZLE_0, PIPE_SWIZZLE_0
2257                   };
2258                   util_format_apply_color_swizzle(&tmp, color, swz, true);
2259                   color = &tmp;
2260                }
2261             }
2262 
2263             /* Stream out the border color and merge the pointer. */
2264             uint32_t offset = iris_upload_border_color(border_color_pool,
2265                                                        color);
2266 
2267             uint32_t dynamic[GENX(SAMPLER_STATE_length)];
2268             iris_pack_state(GENX(SAMPLER_STATE), dynamic, dyns) {
2269                dyns.BorderColorPointer = offset;
2270             }
2271 
2272             for (uint32_t j = 0; j < GENX(SAMPLER_STATE_length); j++)
2273                map[j] = sampler_state[j] | dynamic[j];
2274          }
2275       }
2276 
2277       map += GENX(SAMPLER_STATE_length);
2278    }
2279 }
2280 
2281 static enum isl_channel_select
fmt_swizzle(const struct iris_format_info * fmt,enum pipe_swizzle swz)2282 fmt_swizzle(const struct iris_format_info *fmt, enum pipe_swizzle swz)
2283 {
2284    switch (swz) {
2285    case PIPE_SWIZZLE_X: return fmt->swizzle.r;
2286    case PIPE_SWIZZLE_Y: return fmt->swizzle.g;
2287    case PIPE_SWIZZLE_Z: return fmt->swizzle.b;
2288    case PIPE_SWIZZLE_W: return fmt->swizzle.a;
2289    case PIPE_SWIZZLE_1: return ISL_CHANNEL_SELECT_ONE;
2290    case PIPE_SWIZZLE_0: return ISL_CHANNEL_SELECT_ZERO;
2291    default: unreachable("invalid swizzle");
2292    }
2293 }
2294 
2295 static void
fill_buffer_surface_state(struct isl_device * isl_dev,struct iris_resource * res,void * map,enum isl_format format,struct isl_swizzle swizzle,unsigned offset,unsigned size,isl_surf_usage_flags_t usage)2296 fill_buffer_surface_state(struct isl_device *isl_dev,
2297                           struct iris_resource *res,
2298                           void *map,
2299                           enum isl_format format,
2300                           struct isl_swizzle swizzle,
2301                           unsigned offset,
2302                           unsigned size,
2303                           isl_surf_usage_flags_t usage)
2304 {
2305    const struct isl_format_layout *fmtl = isl_format_get_layout(format);
2306    const unsigned cpp = format == ISL_FORMAT_RAW ? 1 : fmtl->bpb / 8;
2307 
2308    /* The ARB_texture_buffer_specification says:
2309     *
2310     *    "The number of texels in the buffer texture's texel array is given by
2311     *
2312     *       floor(<buffer_size> / (<components> * sizeof(<base_type>)),
2313     *
2314     *     where <buffer_size> is the size of the buffer object, in basic
2315     *     machine units and <components> and <base_type> are the element count
2316     *     and base data type for elements, as specified in Table X.1.  The
2317     *     number of texels in the texel array is then clamped to the
2318     *     implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
2319     *
2320     * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
2321     * so that when ISL divides by stride to obtain the number of texels, that
2322     * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
2323     */
2324    unsigned final_size =
2325       MIN3(size, res->bo->size - res->offset - offset,
2326            IRIS_MAX_TEXTURE_BUFFER_SIZE * cpp);
2327 
2328    isl_buffer_fill_state(isl_dev, map,
2329                          .address = res->bo->address + res->offset + offset,
2330                          .size_B = final_size,
2331                          .format = format,
2332                          .swizzle = swizzle,
2333                          .stride_B = cpp,
2334                          .mocs = iris_mocs(res->bo, isl_dev, usage));
2335 }
2336 
2337 #define SURFACE_STATE_ALIGNMENT 64
2338 
2339 /**
2340  * Allocate several contiguous SURFACE_STATE structures, one for each
2341  * supported auxiliary surface mode.  This only allocates the CPU-side
2342  * copy, they will need to be uploaded later after they're filled in.
2343  */
2344 static void
alloc_surface_states(struct iris_surface_state * surf_state,unsigned aux_usages)2345 alloc_surface_states(struct iris_surface_state *surf_state,
2346                      unsigned aux_usages)
2347 {
2348    const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
2349 
2350    /* If this changes, update this to explicitly align pointers */
2351    STATIC_ASSERT(surf_size == SURFACE_STATE_ALIGNMENT);
2352 
2353    assert(aux_usages != 0);
2354 
2355    /* In case we're re-allocating them... */
2356    free(surf_state->cpu);
2357 
2358    surf_state->aux_usages = aux_usages;
2359    surf_state->num_states = util_bitcount(aux_usages);
2360    surf_state->cpu = calloc(surf_state->num_states, surf_size);
2361    surf_state->ref.offset = 0;
2362    pipe_resource_reference(&surf_state->ref.res, NULL);
2363 
2364    assert(surf_state->cpu);
2365 }
2366 
2367 /**
2368  * Upload the CPU side SURFACE_STATEs into a GPU buffer.
2369  */
2370 static void
upload_surface_states(struct u_upload_mgr * mgr,struct iris_surface_state * surf_state)2371 upload_surface_states(struct u_upload_mgr *mgr,
2372                       struct iris_surface_state *surf_state)
2373 {
2374    const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
2375    const unsigned bytes = surf_state->num_states * surf_size;
2376 
2377    void *map =
2378       upload_state(mgr, &surf_state->ref, bytes, SURFACE_STATE_ALIGNMENT);
2379 
2380    surf_state->ref.offset +=
2381       iris_bo_offset_from_base_address(iris_resource_bo(surf_state->ref.res));
2382 
2383    if (map)
2384       memcpy(map, surf_state->cpu, bytes);
2385 }
2386 
2387 /**
2388  * Update resource addresses in a set of SURFACE_STATE descriptors,
2389  * and re-upload them if necessary.
2390  */
2391 static bool
update_surface_state_addrs(struct u_upload_mgr * mgr,struct iris_surface_state * surf_state,struct iris_bo * bo)2392 update_surface_state_addrs(struct u_upload_mgr *mgr,
2393                            struct iris_surface_state *surf_state,
2394                            struct iris_bo *bo)
2395 {
2396    if (surf_state->bo_address == bo->address)
2397       return false;
2398 
2399    STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start) % 64 == 0);
2400    STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_bits) == 64);
2401 
2402    uint64_t *ss_addr = (uint64_t *) &surf_state->cpu[GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start) / 32];
2403 
2404    /* First, update the CPU copies.  We assume no other fields exist in
2405     * the QWord containing Surface Base Address.
2406     */
2407    for (unsigned i = 0; i < surf_state->num_states; i++) {
2408       *ss_addr = *ss_addr - surf_state->bo_address + bo->address;
2409       ss_addr = ((void *) ss_addr) + SURFACE_STATE_ALIGNMENT;
2410    }
2411 
2412    /* Next, upload the updated copies to a GPU buffer. */
2413    upload_surface_states(mgr, surf_state);
2414 
2415    surf_state->bo_address = bo->address;
2416 
2417    return true;
2418 }
2419 
2420 static void
fill_surface_state(struct isl_device * isl_dev,void * map,struct iris_resource * res,struct isl_surf * surf,struct isl_view * view,unsigned aux_usage,uint32_t extra_main_offset,uint32_t tile_x_sa,uint32_t tile_y_sa)2421 fill_surface_state(struct isl_device *isl_dev,
2422                    void *map,
2423                    struct iris_resource *res,
2424                    struct isl_surf *surf,
2425                    struct isl_view *view,
2426                    unsigned aux_usage,
2427                    uint32_t extra_main_offset,
2428                    uint32_t tile_x_sa,
2429                    uint32_t tile_y_sa)
2430 {
2431    struct isl_surf_fill_state_info f = {
2432       .surf = surf,
2433       .view = view,
2434       .mocs = iris_mocs(res->bo, isl_dev, view->usage),
2435       .address = res->bo->address + res->offset + extra_main_offset,
2436       .x_offset_sa = tile_x_sa,
2437       .y_offset_sa = tile_y_sa,
2438    };
2439 
2440    if (aux_usage != ISL_AUX_USAGE_NONE) {
2441       f.aux_surf = &res->aux.surf;
2442       f.aux_usage = aux_usage;
2443       f.clear_color = res->aux.clear_color;
2444 
2445       if (aux_usage == ISL_AUX_USAGE_MC)
2446          f.mc_format = iris_format_for_usage(isl_dev->info,
2447                                              res->external_format,
2448                                              surf->usage).fmt;
2449 
2450       if (res->aux.bo)
2451          f.aux_address = res->aux.bo->address + res->aux.offset;
2452 
2453       if (res->aux.clear_color_bo) {
2454          f.clear_address = res->aux.clear_color_bo->address +
2455                            res->aux.clear_color_offset;
2456          f.use_clear_address = isl_dev->info->ver > 9;
2457       }
2458    }
2459 
2460    isl_surf_fill_state_s(isl_dev, map, &f);
2461 }
2462 
2463 static void
fill_surface_states(struct isl_device * isl_dev,struct iris_surface_state * surf_state,struct iris_resource * res,struct isl_surf * surf,struct isl_view * view,uint64_t extra_main_offset,uint32_t tile_x_sa,uint32_t tile_y_sa)2464 fill_surface_states(struct isl_device *isl_dev,
2465                     struct iris_surface_state *surf_state,
2466                     struct iris_resource *res,
2467                     struct isl_surf *surf,
2468                     struct isl_view *view,
2469                     uint64_t extra_main_offset,
2470                     uint32_t tile_x_sa,
2471                     uint32_t tile_y_sa)
2472 {
2473    void *map = surf_state->cpu;
2474    unsigned aux_modes = surf_state->aux_usages;
2475 
2476    while (aux_modes) {
2477       enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
2478 
2479       fill_surface_state(isl_dev, map, res, surf, view, aux_usage,
2480                          extra_main_offset, tile_x_sa, tile_y_sa);
2481 
2482       map += SURFACE_STATE_ALIGNMENT;
2483    }
2484 }
2485 
2486 /**
2487  * The pipe->create_sampler_view() driver hook.
2488  */
2489 static struct pipe_sampler_view *
iris_create_sampler_view(struct pipe_context * ctx,struct pipe_resource * tex,const struct pipe_sampler_view * tmpl)2490 iris_create_sampler_view(struct pipe_context *ctx,
2491                          struct pipe_resource *tex,
2492                          const struct pipe_sampler_view *tmpl)
2493 {
2494    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2495    const struct intel_device_info *devinfo = &screen->devinfo;
2496    struct iris_sampler_view *isv = calloc(1, sizeof(struct iris_sampler_view));
2497 
2498    if (!isv)
2499       return NULL;
2500 
2501    /* initialize base object */
2502    isv->base = *tmpl;
2503    isv->base.context = ctx;
2504    isv->base.texture = NULL;
2505    pipe_reference_init(&isv->base.reference, 1);
2506    pipe_resource_reference(&isv->base.texture, tex);
2507 
2508    if (util_format_is_depth_or_stencil(tmpl->format)) {
2509       struct iris_resource *zres, *sres;
2510       const struct util_format_description *desc =
2511          util_format_description(tmpl->format);
2512 
2513       iris_get_depth_stencil_resources(tex, &zres, &sres);
2514 
2515       tex = util_format_has_depth(desc) ? &zres->base.b : &sres->base.b;
2516    }
2517 
2518    isv->res = (struct iris_resource *) tex;
2519 
2520    isl_surf_usage_flags_t usage = ISL_SURF_USAGE_TEXTURE_BIT;
2521 
2522    if (isv->base.target == PIPE_TEXTURE_CUBE ||
2523        isv->base.target == PIPE_TEXTURE_CUBE_ARRAY)
2524       usage |= ISL_SURF_USAGE_CUBE_BIT;
2525 
2526    const struct iris_format_info fmt =
2527       iris_format_for_usage(devinfo, tmpl->format, usage);
2528 
2529    isv->clear_color = isv->res->aux.clear_color;
2530 
2531    isv->view = (struct isl_view) {
2532       .format = fmt.fmt,
2533       .swizzle = (struct isl_swizzle) {
2534          .r = fmt_swizzle(&fmt, tmpl->swizzle_r),
2535          .g = fmt_swizzle(&fmt, tmpl->swizzle_g),
2536          .b = fmt_swizzle(&fmt, tmpl->swizzle_b),
2537          .a = fmt_swizzle(&fmt, tmpl->swizzle_a),
2538       },
2539       .usage = usage,
2540    };
2541 
2542    unsigned aux_usages = 0;
2543 
2544    if ((isv->res->aux.usage == ISL_AUX_USAGE_CCS_D ||
2545         isv->res->aux.usage == ISL_AUX_USAGE_CCS_E ||
2546         isv->res->aux.usage == ISL_AUX_USAGE_GFX12_CCS_E) &&
2547        !isl_format_supports_ccs_e(devinfo, isv->view.format)) {
2548       aux_usages = 1 << ISL_AUX_USAGE_NONE;
2549    } else if (isl_aux_usage_has_hiz(isv->res->aux.usage) &&
2550               !iris_sample_with_depth_aux(devinfo, isv->res)) {
2551       aux_usages = 1 << ISL_AUX_USAGE_NONE;
2552    } else {
2553       aux_usages = 1 << ISL_AUX_USAGE_NONE |
2554                    1 << isv->res->aux.usage;
2555    }
2556 
2557    alloc_surface_states(&isv->surface_state, aux_usages);
2558    isv->surface_state.bo_address = isv->res->bo->address;
2559 
2560    /* Fill out SURFACE_STATE for this view. */
2561    if (tmpl->target != PIPE_BUFFER) {
2562       isv->view.base_level = tmpl->u.tex.first_level;
2563       isv->view.levels = tmpl->u.tex.last_level - tmpl->u.tex.first_level + 1;
2564 
2565       if (tmpl->target == PIPE_TEXTURE_3D) {
2566          isv->view.base_array_layer = 0;
2567          isv->view.array_len = 1;
2568       } else {
2569 #if GFX_VER < 9
2570          /* Hardware older than skylake ignores this value */
2571          assert(tex->target != PIPE_TEXTURE_3D || !tmpl->u.tex.first_layer);
2572 #endif
2573          isv->view.base_array_layer = tmpl->u.tex.first_layer;
2574          isv->view.array_len =
2575             tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
2576       }
2577 
2578       fill_surface_states(&screen->isl_dev, &isv->surface_state, isv->res,
2579                           &isv->res->surf, &isv->view, 0, 0, 0);
2580    } else {
2581       fill_buffer_surface_state(&screen->isl_dev, isv->res,
2582                                 isv->surface_state.cpu,
2583                                 isv->view.format, isv->view.swizzle,
2584                                 tmpl->u.buf.offset, tmpl->u.buf.size,
2585                                 ISL_SURF_USAGE_TEXTURE_BIT);
2586    }
2587 
2588    return &isv->base;
2589 }
2590 
2591 static void
iris_sampler_view_destroy(struct pipe_context * ctx,struct pipe_sampler_view * state)2592 iris_sampler_view_destroy(struct pipe_context *ctx,
2593                           struct pipe_sampler_view *state)
2594 {
2595    struct iris_sampler_view *isv = (void *) state;
2596    pipe_resource_reference(&state->texture, NULL);
2597    pipe_resource_reference(&isv->surface_state.ref.res, NULL);
2598    free(isv->surface_state.cpu);
2599    free(isv);
2600 }
2601 
2602 /**
2603  * The pipe->create_surface() driver hook.
2604  *
2605  * In Gallium nomenclature, "surfaces" are a view of a resource that
2606  * can be bound as a render target or depth/stencil buffer.
2607  */
2608 static struct pipe_surface *
iris_create_surface(struct pipe_context * ctx,struct pipe_resource * tex,const struct pipe_surface * tmpl)2609 iris_create_surface(struct pipe_context *ctx,
2610                     struct pipe_resource *tex,
2611                     const struct pipe_surface *tmpl)
2612 {
2613    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2614    const struct intel_device_info *devinfo = &screen->devinfo;
2615 
2616    isl_surf_usage_flags_t usage = 0;
2617    if (tmpl->writable)
2618       usage = ISL_SURF_USAGE_STORAGE_BIT;
2619    else if (util_format_is_depth_or_stencil(tmpl->format))
2620       usage = ISL_SURF_USAGE_DEPTH_BIT;
2621    else
2622       usage = ISL_SURF_USAGE_RENDER_TARGET_BIT;
2623 
2624    const struct iris_format_info fmt =
2625       iris_format_for_usage(devinfo, tmpl->format, usage);
2626 
2627    if ((usage & ISL_SURF_USAGE_RENDER_TARGET_BIT) &&
2628        !isl_format_supports_rendering(devinfo, fmt.fmt)) {
2629       /* Framebuffer validation will reject this invalid case, but it
2630        * hasn't had the opportunity yet.  In the meantime, we need to
2631        * avoid hitting ISL asserts about unsupported formats below.
2632        */
2633       return NULL;
2634    }
2635 
2636    struct iris_surface *surf = calloc(1, sizeof(struct iris_surface));
2637    struct pipe_surface *psurf = &surf->base;
2638    struct iris_resource *res = (struct iris_resource *) tex;
2639 
2640    if (!surf)
2641       return NULL;
2642 
2643    pipe_reference_init(&psurf->reference, 1);
2644    pipe_resource_reference(&psurf->texture, tex);
2645    psurf->context = ctx;
2646    psurf->format = tmpl->format;
2647    psurf->width = tex->width0;
2648    psurf->height = tex->height0;
2649    psurf->texture = tex;
2650    psurf->u.tex.first_layer = tmpl->u.tex.first_layer;
2651    psurf->u.tex.last_layer = tmpl->u.tex.last_layer;
2652    psurf->u.tex.level = tmpl->u.tex.level;
2653 
2654    uint32_t array_len = tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
2655 
2656    struct isl_view *view = &surf->view;
2657    *view = (struct isl_view) {
2658       .format = fmt.fmt,
2659       .base_level = tmpl->u.tex.level,
2660       .levels = 1,
2661       .base_array_layer = tmpl->u.tex.first_layer,
2662       .array_len = array_len,
2663       .swizzle = ISL_SWIZZLE_IDENTITY,
2664       .usage = usage,
2665    };
2666 
2667 #if GFX_VER == 8
2668    struct isl_view *read_view = &surf->read_view;
2669    *read_view = (struct isl_view) {
2670       .format = fmt.fmt,
2671       .base_level = tmpl->u.tex.level,
2672       .levels = 1,
2673       .base_array_layer = tmpl->u.tex.first_layer,
2674       .array_len = array_len,
2675       .swizzle = ISL_SWIZZLE_IDENTITY,
2676       .usage = ISL_SURF_USAGE_TEXTURE_BIT,
2677    };
2678 
2679    struct isl_surf read_surf = res->surf;
2680    uint64_t read_surf_offset_B = 0;
2681    uint32_t read_surf_tile_x_sa = 0, read_surf_tile_y_sa = 0;
2682    if (tex->target == PIPE_TEXTURE_3D && array_len == 1) {
2683       /* The minimum array element field of the surface state structure is
2684        * ignored by the sampler unit for 3D textures on some hardware.  If the
2685        * render buffer is a single slice of a 3D texture, create a 2D texture
2686        * covering that slice.
2687        *
2688        * TODO: This only handles the case where we're rendering to a single
2689        * slice of an array texture.  If we have layered rendering combined
2690        * with non-coherent FB fetch and a non-zero base_array_layer, then
2691        * we're going to run into problems.
2692        *
2693        * See https://gitlab.freedesktop.org/mesa/mesa/-/issues/4904
2694        */
2695       isl_surf_get_image_surf(&screen->isl_dev, &res->surf,
2696                               read_view->base_level,
2697                               0, read_view->base_array_layer,
2698                               &read_surf, &read_surf_offset_B,
2699                               &read_surf_tile_x_sa, &read_surf_tile_y_sa);
2700       read_view->base_level = 0;
2701       read_view->base_array_layer = 0;
2702       assert(read_view->array_len == 1);
2703    } else if (tex->target == PIPE_TEXTURE_1D_ARRAY) {
2704       /* Convert 1D array textures to 2D arrays because shaders always provide
2705        * the array index coordinate at the Z component to avoid recompiles
2706        * when changing the texture target of the framebuffer.
2707        */
2708       assert(read_surf.dim_layout == ISL_DIM_LAYOUT_GFX4_2D);
2709       read_surf.dim = ISL_SURF_DIM_2D;
2710    }
2711 #endif
2712 
2713    surf->clear_color = res->aux.clear_color;
2714 
2715    /* Bail early for depth/stencil - we don't want SURFACE_STATE for them. */
2716    if (res->surf.usage & (ISL_SURF_USAGE_DEPTH_BIT |
2717                           ISL_SURF_USAGE_STENCIL_BIT))
2718       return psurf;
2719 
2720    unsigned aux_usages = 0;
2721 
2722    if ((res->aux.usage == ISL_AUX_USAGE_CCS_E ||
2723         res->aux.usage == ISL_AUX_USAGE_GFX12_CCS_E) &&
2724        !isl_format_supports_ccs_e(devinfo, view->format)) {
2725       aux_usages = 1 << ISL_AUX_USAGE_NONE;
2726    } else {
2727       aux_usages = 1 << ISL_AUX_USAGE_NONE |
2728                    1 << res->aux.usage;
2729    }
2730 
2731    alloc_surface_states(&surf->surface_state, aux_usages);
2732    surf->surface_state.bo_address = res->bo->address;
2733 
2734 #if GFX_VER == 8
2735    alloc_surface_states(&surf->surface_state_read, aux_usages);
2736    surf->surface_state_read.bo_address = res->bo->address;
2737 #endif
2738 
2739    if (!isl_format_is_compressed(res->surf.format)) {
2740       /* This is a normal surface.  Fill out a SURFACE_STATE for each possible
2741        * auxiliary surface mode and return the pipe_surface.
2742        */
2743       fill_surface_states(&screen->isl_dev, &surf->surface_state, res,
2744                           &res->surf, view, 0, 0, 0);
2745 #if GFX_VER == 8
2746       fill_surface_states(&screen->isl_dev, &surf->surface_state_read, res,
2747                           &read_surf, read_view, read_surf_offset_B,
2748                           read_surf_tile_x_sa, read_surf_tile_y_sa);
2749 #endif
2750       return psurf;
2751    }
2752 
2753    /* The resource has a compressed format, which is not renderable, but we
2754     * have a renderable view format.  We must be attempting to upload blocks
2755     * of compressed data via an uncompressed view.
2756     *
2757     * In this case, we can assume there are no auxiliary buffers, a single
2758     * miplevel, and that the resource is single-sampled.  Gallium may try
2759     * and create an uncompressed view with multiple layers, however.
2760     */
2761    assert(!isl_format_is_compressed(fmt.fmt));
2762    assert(res->aux.usage == ISL_AUX_USAGE_NONE);
2763    assert(res->surf.samples == 1);
2764    assert(view->levels == 1);
2765 
2766    struct isl_surf isl_surf;
2767    uint64_t offset_B = 0;
2768    uint32_t tile_x_el = 0, tile_y_el = 0;
2769    bool ok = isl_surf_get_uncompressed_surf(&screen->isl_dev, &res->surf,
2770                                             view, &isl_surf, view,
2771                                             &offset_B, &tile_x_el, &tile_y_el);
2772    if (!ok) {
2773       free(surf);
2774       return NULL;
2775    }
2776 
2777    psurf->width = isl_surf.logical_level0_px.width;
2778    psurf->height = isl_surf.logical_level0_px.height;
2779 
2780    struct isl_surf_fill_state_info f = {
2781       .surf = &isl_surf,
2782       .view = view,
2783       .mocs = iris_mocs(res->bo, &screen->isl_dev,
2784                         ISL_SURF_USAGE_RENDER_TARGET_BIT),
2785       .address = res->bo->address + offset_B,
2786       .x_offset_sa = tile_x_el, /* Single-sampled, so el == sa */
2787       .y_offset_sa = tile_y_el, /* Single-sampled, so el == sa */
2788    };
2789 
2790    isl_surf_fill_state_s(&screen->isl_dev, surf->surface_state.cpu, &f);
2791 
2792    return psurf;
2793 }
2794 
2795 #if GFX_VER < 9
2796 static void
fill_default_image_param(struct brw_image_param * param)2797 fill_default_image_param(struct brw_image_param *param)
2798 {
2799    memset(param, 0, sizeof(*param));
2800    /* Set the swizzling shifts to all-ones to effectively disable swizzling --
2801     * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
2802     * detailed explanation of these parameters.
2803     */
2804    param->swizzling[0] = 0xff;
2805    param->swizzling[1] = 0xff;
2806 }
2807 
2808 static void
fill_buffer_image_param(struct brw_image_param * param,enum pipe_format pfmt,unsigned size)2809 fill_buffer_image_param(struct brw_image_param *param,
2810                         enum pipe_format pfmt,
2811                         unsigned size)
2812 {
2813    const unsigned cpp = util_format_get_blocksize(pfmt);
2814 
2815    fill_default_image_param(param);
2816    param->size[0] = size / cpp;
2817    param->stride[0] = cpp;
2818 }
2819 #else
2820 #define isl_surf_fill_image_param(x, ...)
2821 #define fill_default_image_param(x, ...)
2822 #define fill_buffer_image_param(x, ...)
2823 #endif
2824 
2825 /**
2826  * The pipe->set_shader_images() driver hook.
2827  */
2828 static void
iris_set_shader_images(struct pipe_context * ctx,enum pipe_shader_type p_stage,unsigned start_slot,unsigned count,unsigned unbind_num_trailing_slots,const struct pipe_image_view * p_images)2829 iris_set_shader_images(struct pipe_context *ctx,
2830                        enum pipe_shader_type p_stage,
2831                        unsigned start_slot, unsigned count,
2832                        unsigned unbind_num_trailing_slots,
2833                        const struct pipe_image_view *p_images)
2834 {
2835    struct iris_context *ice = (struct iris_context *) ctx;
2836    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2837    gl_shader_stage stage = stage_from_pipe(p_stage);
2838    struct iris_shader_state *shs = &ice->state.shaders[stage];
2839 #if GFX_VER == 8
2840    struct iris_genx_state *genx = ice->state.genx;
2841    struct brw_image_param *image_params = genx->shaders[stage].image_param;
2842 #endif
2843 
2844    shs->bound_image_views &=
2845       ~u_bit_consecutive(start_slot, count + unbind_num_trailing_slots);
2846 
2847    for (unsigned i = 0; i < count; i++) {
2848       struct iris_image_view *iv = &shs->image[start_slot + i];
2849 
2850       if (p_images && p_images[i].resource) {
2851          const struct pipe_image_view *img = &p_images[i];
2852          struct iris_resource *res = (void *) img->resource;
2853 
2854          util_copy_image_view(&iv->base, img);
2855 
2856          shs->bound_image_views |= 1 << (start_slot + i);
2857 
2858          res->bind_history |= PIPE_BIND_SHADER_IMAGE;
2859          res->bind_stages |= 1 << stage;
2860 
2861          enum isl_format isl_fmt = iris_image_view_get_format(ice, img);
2862 
2863          unsigned aux_usages = 1 << ISL_AUX_USAGE_NONE;
2864 
2865          /* Gfx12+ supports render compression for images */
2866          if (GFX_VER >= 12)
2867             aux_usages |= 1 << res->aux.usage;
2868 
2869          alloc_surface_states(&iv->surface_state, aux_usages);
2870          iv->surface_state.bo_address = res->bo->address;
2871 
2872          if (res->base.b.target != PIPE_BUFFER) {
2873             struct isl_view view = {
2874                .format = isl_fmt,
2875                .base_level = img->u.tex.level,
2876                .levels = 1,
2877                .base_array_layer = img->u.tex.first_layer,
2878                .array_len = img->u.tex.last_layer - img->u.tex.first_layer + 1,
2879                .swizzle = ISL_SWIZZLE_IDENTITY,
2880                .usage = ISL_SURF_USAGE_STORAGE_BIT,
2881             };
2882 
2883             /* If using untyped fallback. */
2884             if (isl_fmt == ISL_FORMAT_RAW) {
2885                fill_buffer_surface_state(&screen->isl_dev, res,
2886                                          iv->surface_state.cpu,
2887                                          isl_fmt, ISL_SWIZZLE_IDENTITY,
2888                                          0, res->bo->size,
2889                                          ISL_SURF_USAGE_STORAGE_BIT);
2890             } else {
2891                fill_surface_states(&screen->isl_dev, &iv->surface_state, res,
2892                                    &res->surf, &view, 0, 0, 0);
2893             }
2894 
2895             isl_surf_fill_image_param(&screen->isl_dev,
2896                                       &image_params[start_slot + i],
2897                                       &res->surf, &view);
2898          } else {
2899             util_range_add(&res->base.b, &res->valid_buffer_range, img->u.buf.offset,
2900                            img->u.buf.offset + img->u.buf.size);
2901 
2902             fill_buffer_surface_state(&screen->isl_dev, res,
2903                                       iv->surface_state.cpu,
2904                                       isl_fmt, ISL_SWIZZLE_IDENTITY,
2905                                       img->u.buf.offset, img->u.buf.size,
2906                                       ISL_SURF_USAGE_STORAGE_BIT);
2907             fill_buffer_image_param(&image_params[start_slot + i],
2908                                     img->format, img->u.buf.size);
2909          }
2910 
2911          upload_surface_states(ice->state.surface_uploader, &iv->surface_state);
2912       } else {
2913          pipe_resource_reference(&iv->base.resource, NULL);
2914          pipe_resource_reference(&iv->surface_state.ref.res, NULL);
2915          fill_default_image_param(&image_params[start_slot + i]);
2916       }
2917    }
2918 
2919    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << stage;
2920    ice->state.dirty |=
2921       stage == MESA_SHADER_COMPUTE ? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
2922                                    : IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
2923 
2924    /* Broadwell also needs brw_image_params re-uploaded */
2925    if (GFX_VER < 9) {
2926       ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS << stage;
2927       shs->sysvals_need_upload = true;
2928    }
2929 
2930    if (unbind_num_trailing_slots) {
2931       iris_set_shader_images(ctx, p_stage, start_slot + count,
2932                              unbind_num_trailing_slots, 0, NULL);
2933    }
2934 }
2935 
2936 UNUSED static bool
is_sampler_view_3d(const struct iris_sampler_view * view)2937 is_sampler_view_3d(const struct iris_sampler_view *view)
2938 {
2939    return view && view->res->base.b.target == PIPE_TEXTURE_3D;
2940 }
2941 
2942 /**
2943  * The pipe->set_sampler_views() driver hook.
2944  */
2945 static void
iris_set_sampler_views(struct pipe_context * ctx,enum pipe_shader_type p_stage,unsigned start,unsigned count,unsigned unbind_num_trailing_slots,bool take_ownership,struct pipe_sampler_view ** views)2946 iris_set_sampler_views(struct pipe_context *ctx,
2947                        enum pipe_shader_type p_stage,
2948                        unsigned start, unsigned count,
2949                        unsigned unbind_num_trailing_slots,
2950                        bool take_ownership,
2951                        struct pipe_sampler_view **views)
2952 {
2953    struct iris_context *ice = (struct iris_context *) ctx;
2954    UNUSED struct iris_screen *screen = (void *) ctx->screen;
2955    UNUSED const struct intel_device_info *devinfo = &screen->devinfo;
2956    gl_shader_stage stage = stage_from_pipe(p_stage);
2957    struct iris_shader_state *shs = &ice->state.shaders[stage];
2958    unsigned i;
2959 
2960    shs->bound_sampler_views &=
2961       ~u_bit_consecutive(start, count + unbind_num_trailing_slots);
2962 
2963    for (i = 0; i < count; i++) {
2964       struct pipe_sampler_view *pview = views ? views[i] : NULL;
2965       struct iris_sampler_view *view = (void *) pview;
2966 
2967 #if GFX_VERx10 == 125
2968       if (is_sampler_view_3d(shs->textures[start + i]) !=
2969           is_sampler_view_3d(view))
2970          ice->state.stage_dirty |= IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage;
2971 #endif
2972 
2973       if (take_ownership) {
2974          pipe_sampler_view_reference((struct pipe_sampler_view **)
2975                                      &shs->textures[start + i], NULL);
2976          shs->textures[start + i] = (struct iris_sampler_view *)pview;
2977       } else {
2978          pipe_sampler_view_reference((struct pipe_sampler_view **)
2979                                      &shs->textures[start + i], pview);
2980       }
2981       if (view) {
2982          view->res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
2983          view->res->bind_stages |= 1 << stage;
2984 
2985          shs->bound_sampler_views |= 1 << (start + i);
2986 
2987          update_surface_state_addrs(ice->state.surface_uploader,
2988                                     &view->surface_state, view->res->bo);
2989       }
2990    }
2991    for (; i < count + unbind_num_trailing_slots; i++) {
2992       pipe_sampler_view_reference((struct pipe_sampler_view **)
2993                                   &shs->textures[start + i], NULL);
2994    }
2995 
2996    ice->state.stage_dirty |= (IRIS_STAGE_DIRTY_BINDINGS_VS << stage);
2997    ice->state.dirty |=
2998       stage == MESA_SHADER_COMPUTE ? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
2999                                    : IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
3000 }
3001 
3002 static void
iris_set_compute_resources(struct pipe_context * ctx,unsigned start,unsigned count,struct pipe_surface ** resources)3003 iris_set_compute_resources(struct pipe_context *ctx,
3004                            unsigned start, unsigned count,
3005                            struct pipe_surface **resources)
3006 {
3007    assert(count == 0);
3008 }
3009 
3010 static void
iris_set_global_binding(struct pipe_context * ctx,unsigned start_slot,unsigned count,struct pipe_resource ** resources,uint32_t ** handles)3011 iris_set_global_binding(struct pipe_context *ctx,
3012                         unsigned start_slot, unsigned count,
3013                         struct pipe_resource **resources,
3014                         uint32_t **handles)
3015 {
3016    struct iris_context *ice = (struct iris_context *) ctx;
3017 
3018    assert(start_slot + count <= IRIS_MAX_GLOBAL_BINDINGS);
3019    for (unsigned i = 0; i < count; i++) {
3020       if (resources && resources[i]) {
3021          pipe_resource_reference(&ice->state.global_bindings[start_slot + i],
3022                                  resources[i]);
3023          struct iris_resource *res = (void *) resources[i];
3024          uint64_t addr = res->bo->address;
3025          memcpy(handles[i], &addr, sizeof(addr));
3026       } else {
3027          pipe_resource_reference(&ice->state.global_bindings[start_slot + i],
3028                                  NULL);
3029       }
3030    }
3031 
3032    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_CS;
3033 }
3034 
3035 /**
3036  * The pipe->set_tess_state() driver hook.
3037  */
3038 static void
iris_set_tess_state(struct pipe_context * ctx,const float default_outer_level[4],const float default_inner_level[2])3039 iris_set_tess_state(struct pipe_context *ctx,
3040                     const float default_outer_level[4],
3041                     const float default_inner_level[2])
3042 {
3043    struct iris_context *ice = (struct iris_context *) ctx;
3044    struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
3045 
3046    memcpy(&ice->state.default_outer_level[0], &default_outer_level[0], 4 * sizeof(float));
3047    memcpy(&ice->state.default_inner_level[0], &default_inner_level[0], 2 * sizeof(float));
3048 
3049    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_TCS;
3050    shs->sysvals_need_upload = true;
3051 }
3052 
3053 static void
iris_set_patch_vertices(struct pipe_context * ctx,uint8_t patch_vertices)3054 iris_set_patch_vertices(struct pipe_context *ctx, uint8_t patch_vertices)
3055 {
3056    struct iris_context *ice = (struct iris_context *) ctx;
3057 
3058    ice->state.patch_vertices = patch_vertices;
3059 }
3060 
3061 static void
iris_surface_destroy(struct pipe_context * ctx,struct pipe_surface * p_surf)3062 iris_surface_destroy(struct pipe_context *ctx, struct pipe_surface *p_surf)
3063 {
3064    struct iris_surface *surf = (void *) p_surf;
3065    pipe_resource_reference(&p_surf->texture, NULL);
3066    pipe_resource_reference(&surf->surface_state.ref.res, NULL);
3067    pipe_resource_reference(&surf->surface_state_read.ref.res, NULL);
3068    free(surf->surface_state.cpu);
3069    free(surf->surface_state_read.cpu);
3070    free(surf);
3071 }
3072 
3073 static void
iris_set_clip_state(struct pipe_context * ctx,const struct pipe_clip_state * state)3074 iris_set_clip_state(struct pipe_context *ctx,
3075                     const struct pipe_clip_state *state)
3076 {
3077    struct iris_context *ice = (struct iris_context *) ctx;
3078    struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
3079    struct iris_shader_state *gshs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
3080    struct iris_shader_state *tshs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
3081 
3082    memcpy(&ice->state.clip_planes, state, sizeof(*state));
3083 
3084    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS |
3085                              IRIS_STAGE_DIRTY_CONSTANTS_GS |
3086                              IRIS_STAGE_DIRTY_CONSTANTS_TES;
3087    shs->sysvals_need_upload = true;
3088    gshs->sysvals_need_upload = true;
3089    tshs->sysvals_need_upload = true;
3090 }
3091 
3092 /**
3093  * The pipe->set_polygon_stipple() driver hook.
3094  */
3095 static void
iris_set_polygon_stipple(struct pipe_context * ctx,const struct pipe_poly_stipple * state)3096 iris_set_polygon_stipple(struct pipe_context *ctx,
3097                          const struct pipe_poly_stipple *state)
3098 {
3099    struct iris_context *ice = (struct iris_context *) ctx;
3100    memcpy(&ice->state.poly_stipple, state, sizeof(*state));
3101    ice->state.dirty |= IRIS_DIRTY_POLYGON_STIPPLE;
3102 }
3103 
3104 /**
3105  * The pipe->set_sample_mask() driver hook.
3106  */
3107 static void
iris_set_sample_mask(struct pipe_context * ctx,unsigned sample_mask)3108 iris_set_sample_mask(struct pipe_context *ctx, unsigned sample_mask)
3109 {
3110    struct iris_context *ice = (struct iris_context *) ctx;
3111 
3112    /* We only support 16x MSAA, so we have 16 bits of sample maks.
3113     * st/mesa may pass us 0xffffffff though, meaning "enable all samples".
3114     */
3115    ice->state.sample_mask = sample_mask & 0xffff;
3116    ice->state.dirty |= IRIS_DIRTY_SAMPLE_MASK;
3117 }
3118 
3119 /**
3120  * The pipe->set_scissor_states() driver hook.
3121  *
3122  * This corresponds to our SCISSOR_RECT state structures.  It's an
3123  * exact match, so we just store them, and memcpy them out later.
3124  */
3125 static void
iris_set_scissor_states(struct pipe_context * ctx,unsigned start_slot,unsigned num_scissors,const struct pipe_scissor_state * rects)3126 iris_set_scissor_states(struct pipe_context *ctx,
3127                         unsigned start_slot,
3128                         unsigned num_scissors,
3129                         const struct pipe_scissor_state *rects)
3130 {
3131    struct iris_context *ice = (struct iris_context *) ctx;
3132 
3133    for (unsigned i = 0; i < num_scissors; i++) {
3134       if (rects[i].minx == rects[i].maxx || rects[i].miny == rects[i].maxy) {
3135          /* If the scissor was out of bounds and got clamped to 0 width/height
3136           * at the bounds, the subtraction of 1 from maximums could produce a
3137           * negative number and thus not clip anything.  Instead, just provide
3138           * a min > max scissor inside the bounds, which produces the expected
3139           * no rendering.
3140           */
3141          ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
3142             .minx = 1, .maxx = 0, .miny = 1, .maxy = 0,
3143          };
3144       } else {
3145          ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
3146             .minx = rects[i].minx,     .miny = rects[i].miny,
3147             .maxx = rects[i].maxx - 1, .maxy = rects[i].maxy - 1,
3148          };
3149       }
3150    }
3151 
3152    ice->state.dirty |= IRIS_DIRTY_SCISSOR_RECT;
3153 }
3154 
3155 /**
3156  * The pipe->set_stencil_ref() driver hook.
3157  *
3158  * This is added to 3DSTATE_WM_DEPTH_STENCIL dynamically at draw time.
3159  */
3160 static void
iris_set_stencil_ref(struct pipe_context * ctx,const struct pipe_stencil_ref state)3161 iris_set_stencil_ref(struct pipe_context *ctx,
3162                      const struct pipe_stencil_ref state)
3163 {
3164    struct iris_context *ice = (struct iris_context *) ctx;
3165    memcpy(&ice->state.stencil_ref, &state, sizeof(state));
3166    if (GFX_VER >= 12)
3167       ice->state.dirty |= IRIS_DIRTY_STENCIL_REF;
3168    else if (GFX_VER >= 9)
3169       ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
3170    else
3171       ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
3172 }
3173 
3174 static float
viewport_extent(const struct pipe_viewport_state * state,int axis,float sign)3175 viewport_extent(const struct pipe_viewport_state *state, int axis, float sign)
3176 {
3177    return copysignf(state->scale[axis], sign) + state->translate[axis];
3178 }
3179 
3180 /**
3181  * The pipe->set_viewport_states() driver hook.
3182  *
3183  * This corresponds to our SF_CLIP_VIEWPORT states.  We can't calculate
3184  * the guardband yet, as we need the framebuffer dimensions, but we can
3185  * at least fill out the rest.
3186  */
3187 static void
iris_set_viewport_states(struct pipe_context * ctx,unsigned start_slot,unsigned count,const struct pipe_viewport_state * states)3188 iris_set_viewport_states(struct pipe_context *ctx,
3189                          unsigned start_slot,
3190                          unsigned count,
3191                          const struct pipe_viewport_state *states)
3192 {
3193    struct iris_context *ice = (struct iris_context *) ctx;
3194 
3195    memcpy(&ice->state.viewports[start_slot], states, sizeof(*states) * count);
3196 
3197    ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
3198 
3199    if (ice->state.cso_rast && (!ice->state.cso_rast->depth_clip_near ||
3200                                !ice->state.cso_rast->depth_clip_far))
3201       ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
3202 }
3203 
3204 /**
3205  * The pipe->set_framebuffer_state() driver hook.
3206  *
3207  * Sets the current draw FBO, including color render targets, depth,
3208  * and stencil buffers.
3209  */
3210 static void
iris_set_framebuffer_state(struct pipe_context * ctx,const struct pipe_framebuffer_state * state)3211 iris_set_framebuffer_state(struct pipe_context *ctx,
3212                            const struct pipe_framebuffer_state *state)
3213 {
3214    struct iris_context *ice = (struct iris_context *) ctx;
3215    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3216    struct isl_device *isl_dev = &screen->isl_dev;
3217    struct pipe_framebuffer_state *cso = &ice->state.framebuffer;
3218    struct iris_resource *zres;
3219    struct iris_resource *stencil_res;
3220 
3221    unsigned samples = util_framebuffer_get_num_samples(state);
3222    unsigned layers = util_framebuffer_get_num_layers(state);
3223 
3224    if (cso->samples != samples) {
3225       ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
3226 
3227       /* We need to toggle 3DSTATE_PS::32 Pixel Dispatch Enable */
3228       if (GFX_VER >= 9 && (cso->samples == 16 || samples == 16))
3229          ice->state.stage_dirty |= IRIS_STAGE_DIRTY_FS;
3230    }
3231 
3232    if (cso->nr_cbufs != state->nr_cbufs) {
3233       ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
3234    }
3235 
3236    if ((cso->layers == 0) != (layers == 0)) {
3237       ice->state.dirty |= IRIS_DIRTY_CLIP;
3238    }
3239 
3240    if (cso->width != state->width || cso->height != state->height) {
3241       ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
3242    }
3243 
3244    if (cso->zsbuf || state->zsbuf) {
3245       ice->state.dirty |= IRIS_DIRTY_DEPTH_BUFFER;
3246    }
3247 
3248    util_copy_framebuffer_state(cso, state);
3249    cso->samples = samples;
3250    cso->layers = layers;
3251 
3252    struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
3253 
3254    struct isl_view view = {
3255       .base_level = 0,
3256       .levels = 1,
3257       .base_array_layer = 0,
3258       .array_len = 1,
3259       .swizzle = ISL_SWIZZLE_IDENTITY,
3260    };
3261 
3262    struct isl_depth_stencil_hiz_emit_info info = {
3263       .view = &view,
3264       .mocs = iris_mocs(NULL, isl_dev, ISL_SURF_USAGE_DEPTH_BIT),
3265    };
3266 
3267    if (cso->zsbuf) {
3268       iris_get_depth_stencil_resources(cso->zsbuf->texture, &zres,
3269                                        &stencil_res);
3270 
3271       view.base_level = cso->zsbuf->u.tex.level;
3272       view.base_array_layer = cso->zsbuf->u.tex.first_layer;
3273       view.array_len =
3274          cso->zsbuf->u.tex.last_layer - cso->zsbuf->u.tex.first_layer + 1;
3275 
3276       if (zres) {
3277          view.usage |= ISL_SURF_USAGE_DEPTH_BIT;
3278 
3279          info.depth_surf = &zres->surf;
3280          info.depth_address = zres->bo->address + zres->offset;
3281          info.mocs = iris_mocs(zres->bo, isl_dev, view.usage);
3282 
3283          view.format = zres->surf.format;
3284 
3285          if (iris_resource_level_has_hiz(zres, view.base_level)) {
3286             info.hiz_usage = zres->aux.usage;
3287             info.hiz_surf = &zres->aux.surf;
3288             info.hiz_address = zres->aux.bo->address + zres->aux.offset;
3289          }
3290 
3291          ice->state.hiz_usage = info.hiz_usage;
3292       }
3293 
3294       if (stencil_res) {
3295          view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
3296          info.stencil_aux_usage = stencil_res->aux.usage;
3297          info.stencil_surf = &stencil_res->surf;
3298          info.stencil_address = stencil_res->bo->address + stencil_res->offset;
3299          if (!zres) {
3300             view.format = stencil_res->surf.format;
3301             info.mocs = iris_mocs(stencil_res->bo, isl_dev, view.usage);
3302          }
3303       }
3304    }
3305 
3306    isl_emit_depth_stencil_hiz_s(isl_dev, cso_z->packets, &info);
3307 
3308    /* Make a null surface for unbound buffers */
3309    void *null_surf_map =
3310       upload_state(ice->state.surface_uploader, &ice->state.null_fb,
3311                    4 * GENX(RENDER_SURFACE_STATE_length), 64);
3312    isl_null_fill_state(&screen->isl_dev, null_surf_map,
3313                        .size = isl_extent3d(MAX2(cso->width, 1),
3314                                             MAX2(cso->height, 1),
3315                                             cso->layers ? cso->layers : 1));
3316    ice->state.null_fb.offset +=
3317       iris_bo_offset_from_base_address(iris_resource_bo(ice->state.null_fb.res));
3318 
3319    /* Render target change */
3320    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_FS;
3321 
3322    ice->state.dirty |= IRIS_DIRTY_RENDER_BUFFER;
3323 
3324    ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
3325 
3326    ice->state.stage_dirty |=
3327       ice->state.stage_dirty_for_nos[IRIS_NOS_FRAMEBUFFER];
3328 
3329    if (GFX_VER == 8)
3330       ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
3331 }
3332 
3333 /**
3334  * The pipe->set_constant_buffer() driver hook.
3335  *
3336  * This uploads any constant data in user buffers, and references
3337  * any UBO resources containing constant data.
3338  */
3339 static void
iris_set_constant_buffer(struct pipe_context * ctx,enum pipe_shader_type p_stage,unsigned index,bool take_ownership,const struct pipe_constant_buffer * input)3340 iris_set_constant_buffer(struct pipe_context *ctx,
3341                          enum pipe_shader_type p_stage, unsigned index,
3342                          bool take_ownership,
3343                          const struct pipe_constant_buffer *input)
3344 {
3345    struct iris_context *ice = (struct iris_context *) ctx;
3346    gl_shader_stage stage = stage_from_pipe(p_stage);
3347    struct iris_shader_state *shs = &ice->state.shaders[stage];
3348    struct pipe_shader_buffer *cbuf = &shs->constbuf[index];
3349 
3350    /* TODO: Only do this if the buffer changes? */
3351    pipe_resource_reference(&shs->constbuf_surf_state[index].res, NULL);
3352 
3353    if (input && input->buffer_size && (input->buffer || input->user_buffer)) {
3354       shs->bound_cbufs |= 1u << index;
3355 
3356       if (input->user_buffer) {
3357          void *map = NULL;
3358          pipe_resource_reference(&cbuf->buffer, NULL);
3359          u_upload_alloc(ice->ctx.const_uploader, 0, input->buffer_size, 64,
3360                         &cbuf->buffer_offset, &cbuf->buffer, (void **) &map);
3361 
3362          if (!cbuf->buffer) {
3363             /* Allocation was unsuccessful - just unbind */
3364             iris_set_constant_buffer(ctx, p_stage, index, false, NULL);
3365             return;
3366          }
3367 
3368          assert(map);
3369          memcpy(map, input->user_buffer, input->buffer_size);
3370       } else if (input->buffer) {
3371          if (cbuf->buffer != input->buffer) {
3372             ice->state.dirty |= (IRIS_DIRTY_RENDER_MISC_BUFFER_FLUSHES |
3373                                  IRIS_DIRTY_COMPUTE_MISC_BUFFER_FLUSHES);
3374             shs->dirty_cbufs |= 1u << index;
3375          }
3376 
3377          if (take_ownership) {
3378             pipe_resource_reference(&cbuf->buffer, NULL);
3379             cbuf->buffer = input->buffer;
3380          } else {
3381             pipe_resource_reference(&cbuf->buffer, input->buffer);
3382          }
3383 
3384          cbuf->buffer_offset = input->buffer_offset;
3385       }
3386 
3387       cbuf->buffer_size =
3388          MIN2(input->buffer_size,
3389               iris_resource_bo(cbuf->buffer)->size - cbuf->buffer_offset);
3390 
3391       struct iris_resource *res = (void *) cbuf->buffer;
3392       res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
3393       res->bind_stages |= 1 << stage;
3394    } else {
3395       shs->bound_cbufs &= ~(1u << index);
3396       pipe_resource_reference(&cbuf->buffer, NULL);
3397    }
3398 
3399    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS << stage;
3400 }
3401 
3402 static void
upload_sysvals(struct iris_context * ice,gl_shader_stage stage,const struct pipe_grid_info * grid)3403 upload_sysvals(struct iris_context *ice,
3404                gl_shader_stage stage,
3405                const struct pipe_grid_info *grid)
3406 {
3407    UNUSED struct iris_genx_state *genx = ice->state.genx;
3408    struct iris_shader_state *shs = &ice->state.shaders[stage];
3409 
3410    struct iris_compiled_shader *shader = ice->shaders.prog[stage];
3411    if (!shader || (shader->num_system_values == 0 &&
3412                    shader->kernel_input_size == 0))
3413       return;
3414 
3415    assert(shader->num_cbufs > 0);
3416 
3417    unsigned sysval_cbuf_index = shader->num_cbufs - 1;
3418    struct pipe_shader_buffer *cbuf = &shs->constbuf[sysval_cbuf_index];
3419    unsigned system_values_start =
3420       ALIGN(shader->kernel_input_size, sizeof(uint32_t));
3421    unsigned upload_size = system_values_start +
3422                           shader->num_system_values * sizeof(uint32_t);
3423    void *map = NULL;
3424 
3425    assert(sysval_cbuf_index < PIPE_MAX_CONSTANT_BUFFERS);
3426    u_upload_alloc(ice->ctx.const_uploader, 0, upload_size, 64,
3427                   &cbuf->buffer_offset, &cbuf->buffer, &map);
3428 
3429    if (shader->kernel_input_size > 0)
3430       memcpy(map, grid->input, shader->kernel_input_size);
3431 
3432    uint32_t *sysval_map = map + system_values_start;
3433    for (int i = 0; i < shader->num_system_values; i++) {
3434       uint32_t sysval = shader->system_values[i];
3435       uint32_t value = 0;
3436 
3437       if (BRW_PARAM_DOMAIN(sysval) == BRW_PARAM_DOMAIN_IMAGE) {
3438 #if GFX_VER == 8
3439          unsigned img = BRW_PARAM_IMAGE_IDX(sysval);
3440          unsigned offset = BRW_PARAM_IMAGE_OFFSET(sysval);
3441          struct brw_image_param *param =
3442             &genx->shaders[stage].image_param[img];
3443 
3444          assert(offset < sizeof(struct brw_image_param));
3445          value = ((uint32_t *) param)[offset];
3446 #endif
3447       } else if (sysval == BRW_PARAM_BUILTIN_ZERO) {
3448          value = 0;
3449       } else if (BRW_PARAM_BUILTIN_IS_CLIP_PLANE(sysval)) {
3450          int plane = BRW_PARAM_BUILTIN_CLIP_PLANE_IDX(sysval);
3451          int comp  = BRW_PARAM_BUILTIN_CLIP_PLANE_COMP(sysval);
3452          value = fui(ice->state.clip_planes.ucp[plane][comp]);
3453       } else if (sysval == BRW_PARAM_BUILTIN_PATCH_VERTICES_IN) {
3454          if (stage == MESA_SHADER_TESS_CTRL) {
3455             value = ice->state.vertices_per_patch;
3456          } else {
3457             assert(stage == MESA_SHADER_TESS_EVAL);
3458             const struct shader_info *tcs_info =
3459                iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
3460             if (tcs_info)
3461                value = tcs_info->tess.tcs_vertices_out;
3462             else
3463                value = ice->state.vertices_per_patch;
3464          }
3465       } else if (sysval >= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X &&
3466                  sysval <= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_W) {
3467          unsigned i = sysval - BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X;
3468          value = fui(ice->state.default_outer_level[i]);
3469       } else if (sysval == BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X) {
3470          value = fui(ice->state.default_inner_level[0]);
3471       } else if (sysval == BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y) {
3472          value = fui(ice->state.default_inner_level[1]);
3473       } else if (sysval >= BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X &&
3474                  sysval <= BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_Z) {
3475          unsigned i = sysval - BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X;
3476          value = ice->state.last_block[i];
3477       } else if (sysval == BRW_PARAM_BUILTIN_WORK_DIM) {
3478          value = grid->work_dim;
3479       } else {
3480          assert(!"unhandled system value");
3481       }
3482 
3483       *sysval_map++ = value;
3484    }
3485 
3486    cbuf->buffer_size = upload_size;
3487    iris_upload_ubo_ssbo_surf_state(ice, cbuf,
3488                                    &shs->constbuf_surf_state[sysval_cbuf_index],
3489                                    ISL_SURF_USAGE_CONSTANT_BUFFER_BIT);
3490 
3491    shs->sysvals_need_upload = false;
3492 }
3493 
3494 /**
3495  * The pipe->set_shader_buffers() driver hook.
3496  *
3497  * This binds SSBOs and ABOs.  Unfortunately, we need to stream out
3498  * SURFACE_STATE here, as the buffer offset may change each time.
3499  */
3500 static void
iris_set_shader_buffers(struct pipe_context * ctx,enum pipe_shader_type p_stage,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers,unsigned writable_bitmask)3501 iris_set_shader_buffers(struct pipe_context *ctx,
3502                         enum pipe_shader_type p_stage,
3503                         unsigned start_slot, unsigned count,
3504                         const struct pipe_shader_buffer *buffers,
3505                         unsigned writable_bitmask)
3506 {
3507    struct iris_context *ice = (struct iris_context *) ctx;
3508    gl_shader_stage stage = stage_from_pipe(p_stage);
3509    struct iris_shader_state *shs = &ice->state.shaders[stage];
3510 
3511    unsigned modified_bits = u_bit_consecutive(start_slot, count);
3512 
3513    shs->bound_ssbos &= ~modified_bits;
3514    shs->writable_ssbos &= ~modified_bits;
3515    shs->writable_ssbos |= writable_bitmask << start_slot;
3516 
3517    for (unsigned i = 0; i < count; i++) {
3518       if (buffers && buffers[i].buffer) {
3519          struct iris_resource *res = (void *) buffers[i].buffer;
3520          struct pipe_shader_buffer *ssbo = &shs->ssbo[start_slot + i];
3521          struct iris_state_ref *surf_state =
3522             &shs->ssbo_surf_state[start_slot + i];
3523          pipe_resource_reference(&ssbo->buffer, &res->base.b);
3524          ssbo->buffer_offset = buffers[i].buffer_offset;
3525          ssbo->buffer_size =
3526             MIN2(buffers[i].buffer_size, res->bo->size - ssbo->buffer_offset);
3527 
3528          shs->bound_ssbos |= 1 << (start_slot + i);
3529 
3530          isl_surf_usage_flags_t usage = ISL_SURF_USAGE_STORAGE_BIT;
3531 
3532          iris_upload_ubo_ssbo_surf_state(ice, ssbo, surf_state, usage);
3533 
3534          res->bind_history |= PIPE_BIND_SHADER_BUFFER;
3535          res->bind_stages |= 1 << stage;
3536 
3537          util_range_add(&res->base.b, &res->valid_buffer_range, ssbo->buffer_offset,
3538                         ssbo->buffer_offset + ssbo->buffer_size);
3539       } else {
3540          pipe_resource_reference(&shs->ssbo[start_slot + i].buffer, NULL);
3541          pipe_resource_reference(&shs->ssbo_surf_state[start_slot + i].res,
3542                                  NULL);
3543       }
3544    }
3545 
3546    ice->state.dirty |= (IRIS_DIRTY_RENDER_MISC_BUFFER_FLUSHES |
3547                         IRIS_DIRTY_COMPUTE_MISC_BUFFER_FLUSHES);
3548    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << stage;
3549 }
3550 
3551 static void
iris_delete_state(struct pipe_context * ctx,void * state)3552 iris_delete_state(struct pipe_context *ctx, void *state)
3553 {
3554    free(state);
3555 }
3556 
3557 /**
3558  * The pipe->set_vertex_buffers() driver hook.
3559  *
3560  * This translates pipe_vertex_buffer to our 3DSTATE_VERTEX_BUFFERS packet.
3561  */
3562 static void
iris_set_vertex_buffers(struct pipe_context * ctx,unsigned start_slot,unsigned count,unsigned unbind_num_trailing_slots,bool take_ownership,const struct pipe_vertex_buffer * buffers)3563 iris_set_vertex_buffers(struct pipe_context *ctx,
3564                         unsigned start_slot, unsigned count,
3565                         unsigned unbind_num_trailing_slots,
3566                         bool take_ownership,
3567                         const struct pipe_vertex_buffer *buffers)
3568 {
3569    struct iris_context *ice = (struct iris_context *) ctx;
3570    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3571    struct iris_genx_state *genx = ice->state.genx;
3572 
3573    ice->state.bound_vertex_buffers &=
3574       ~u_bit_consecutive64(start_slot, count + unbind_num_trailing_slots);
3575 
3576    for (unsigned i = 0; i < count; i++) {
3577       const struct pipe_vertex_buffer *buffer = buffers ? &buffers[i] : NULL;
3578       struct iris_vertex_buffer_state *state =
3579          &genx->vertex_buffers[start_slot + i];
3580 
3581       if (!buffer) {
3582          pipe_resource_reference(&state->resource, NULL);
3583          continue;
3584       }
3585 
3586       /* We may see user buffers that are NULL bindings. */
3587       assert(!(buffer->is_user_buffer && buffer->buffer.user != NULL));
3588 
3589       if (buffer->buffer.resource &&
3590           state->resource != buffer->buffer.resource)
3591          ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFER_FLUSHES;
3592 
3593       if (take_ownership) {
3594          pipe_resource_reference(&state->resource, NULL);
3595          state->resource = buffer->buffer.resource;
3596       } else {
3597          pipe_resource_reference(&state->resource, buffer->buffer.resource);
3598       }
3599       struct iris_resource *res = (void *) state->resource;
3600 
3601       state->offset = (int) buffer->buffer_offset;
3602 
3603       if (res) {
3604          ice->state.bound_vertex_buffers |= 1ull << (start_slot + i);
3605          res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
3606       }
3607 
3608       iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
3609          vb.VertexBufferIndex = start_slot + i;
3610          vb.AddressModifyEnable = true;
3611          vb.BufferPitch = buffer->stride;
3612          if (res) {
3613             vb.BufferSize = res->base.b.width0 - (int) buffer->buffer_offset;
3614             vb.BufferStartingAddress =
3615                ro_bo(NULL, res->bo->address + (int) buffer->buffer_offset);
3616             vb.MOCS = iris_mocs(res->bo, &screen->isl_dev,
3617                                 ISL_SURF_USAGE_VERTEX_BUFFER_BIT);
3618 #if GFX_VER >= 12
3619             vb.L3BypassDisable       = true;
3620 #endif
3621          } else {
3622             vb.NullVertexBuffer = true;
3623             vb.MOCS = iris_mocs(NULL, &screen->isl_dev,
3624                                 ISL_SURF_USAGE_VERTEX_BUFFER_BIT);
3625          }
3626       }
3627    }
3628 
3629    for (unsigned i = 0; i < unbind_num_trailing_slots; i++) {
3630       struct iris_vertex_buffer_state *state =
3631          &genx->vertex_buffers[start_slot + count + i];
3632 
3633       pipe_resource_reference(&state->resource, NULL);
3634    }
3635 
3636    ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS;
3637 }
3638 
3639 /**
3640  * Gallium CSO for vertex elements.
3641  */
3642 struct iris_vertex_element_state {
3643    uint32_t vertex_elements[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)];
3644    uint32_t vf_instancing[33 * GENX(3DSTATE_VF_INSTANCING_length)];
3645    uint32_t edgeflag_ve[GENX(VERTEX_ELEMENT_STATE_length)];
3646    uint32_t edgeflag_vfi[GENX(3DSTATE_VF_INSTANCING_length)];
3647    unsigned count;
3648 };
3649 
3650 /**
3651  * The pipe->create_vertex_elements() driver hook.
3652  *
3653  * This translates pipe_vertex_element to our 3DSTATE_VERTEX_ELEMENTS
3654  * and 3DSTATE_VF_INSTANCING commands. The vertex_elements and vf_instancing
3655  * arrays are ready to be emitted at draw time if no EdgeFlag or SGVs are
3656  * needed. In these cases we will need information available at draw time.
3657  * We setup edgeflag_ve and edgeflag_vfi as alternatives last
3658  * 3DSTATE_VERTEX_ELEMENT and 3DSTATE_VF_INSTANCING that can be used at
3659  * draw time if we detect that EdgeFlag is needed by the Vertex Shader.
3660  */
3661 static void *
iris_create_vertex_elements(struct pipe_context * ctx,unsigned count,const struct pipe_vertex_element * state)3662 iris_create_vertex_elements(struct pipe_context *ctx,
3663                             unsigned count,
3664                             const struct pipe_vertex_element *state)
3665 {
3666    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3667    const struct intel_device_info *devinfo = &screen->devinfo;
3668    struct iris_vertex_element_state *cso =
3669       malloc(sizeof(struct iris_vertex_element_state));
3670 
3671    cso->count = count;
3672 
3673    iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS), cso->vertex_elements, ve) {
3674       ve.DWordLength =
3675          1 + GENX(VERTEX_ELEMENT_STATE_length) * MAX2(count, 1) - 2;
3676    }
3677 
3678    uint32_t *ve_pack_dest = &cso->vertex_elements[1];
3679    uint32_t *vfi_pack_dest = cso->vf_instancing;
3680 
3681    if (count == 0) {
3682       iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
3683          ve.Valid = true;
3684          ve.SourceElementFormat = ISL_FORMAT_R32G32B32A32_FLOAT;
3685          ve.Component0Control = VFCOMP_STORE_0;
3686          ve.Component1Control = VFCOMP_STORE_0;
3687          ve.Component2Control = VFCOMP_STORE_0;
3688          ve.Component3Control = VFCOMP_STORE_1_FP;
3689       }
3690 
3691       iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
3692       }
3693    }
3694 
3695    for (int i = 0; i < count; i++) {
3696       const struct iris_format_info fmt =
3697          iris_format_for_usage(devinfo, state[i].src_format, 0);
3698       unsigned comp[4] = { VFCOMP_STORE_SRC, VFCOMP_STORE_SRC,
3699                            VFCOMP_STORE_SRC, VFCOMP_STORE_SRC };
3700 
3701       switch (isl_format_get_num_channels(fmt.fmt)) {
3702       case 0: comp[0] = VFCOMP_STORE_0; FALLTHROUGH;
3703       case 1: comp[1] = VFCOMP_STORE_0; FALLTHROUGH;
3704       case 2: comp[2] = VFCOMP_STORE_0; FALLTHROUGH;
3705       case 3:
3706          comp[3] = isl_format_has_int_channel(fmt.fmt) ? VFCOMP_STORE_1_INT
3707                                                        : VFCOMP_STORE_1_FP;
3708          break;
3709       }
3710       iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
3711          ve.EdgeFlagEnable = false;
3712          ve.VertexBufferIndex = state[i].vertex_buffer_index;
3713          ve.Valid = true;
3714          ve.SourceElementOffset = state[i].src_offset;
3715          ve.SourceElementFormat = fmt.fmt;
3716          ve.Component0Control = comp[0];
3717          ve.Component1Control = comp[1];
3718          ve.Component2Control = comp[2];
3719          ve.Component3Control = comp[3];
3720       }
3721 
3722       iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
3723          vi.VertexElementIndex = i;
3724          vi.InstancingEnable = state[i].instance_divisor > 0;
3725          vi.InstanceDataStepRate = state[i].instance_divisor;
3726       }
3727 
3728       ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
3729       vfi_pack_dest += GENX(3DSTATE_VF_INSTANCING_length);
3730    }
3731 
3732    /* An alternative version of the last VE and VFI is stored so it
3733     * can be used at draw time in case Vertex Shader uses EdgeFlag
3734     */
3735    if (count) {
3736       const unsigned edgeflag_index = count - 1;
3737       const struct iris_format_info fmt =
3738          iris_format_for_usage(devinfo, state[edgeflag_index].src_format, 0);
3739       iris_pack_state(GENX(VERTEX_ELEMENT_STATE), cso->edgeflag_ve, ve) {
3740          ve.EdgeFlagEnable = true ;
3741          ve.VertexBufferIndex = state[edgeflag_index].vertex_buffer_index;
3742          ve.Valid = true;
3743          ve.SourceElementOffset = state[edgeflag_index].src_offset;
3744          ve.SourceElementFormat = fmt.fmt;
3745          ve.Component0Control = VFCOMP_STORE_SRC;
3746          ve.Component1Control = VFCOMP_STORE_0;
3747          ve.Component2Control = VFCOMP_STORE_0;
3748          ve.Component3Control = VFCOMP_STORE_0;
3749       }
3750       iris_pack_command(GENX(3DSTATE_VF_INSTANCING), cso->edgeflag_vfi, vi) {
3751          /* The vi.VertexElementIndex of the EdgeFlag Vertex Element is filled
3752           * at draw time, as it should change if SGVs are emitted.
3753           */
3754          vi.InstancingEnable = state[edgeflag_index].instance_divisor > 0;
3755          vi.InstanceDataStepRate = state[edgeflag_index].instance_divisor;
3756       }
3757    }
3758 
3759    return cso;
3760 }
3761 
3762 /**
3763  * The pipe->bind_vertex_elements_state() driver hook.
3764  */
3765 static void
iris_bind_vertex_elements_state(struct pipe_context * ctx,void * state)3766 iris_bind_vertex_elements_state(struct pipe_context *ctx, void *state)
3767 {
3768    struct iris_context *ice = (struct iris_context *) ctx;
3769    struct iris_vertex_element_state *old_cso = ice->state.cso_vertex_elements;
3770    struct iris_vertex_element_state *new_cso = state;
3771 
3772    /* 3DSTATE_VF_SGVs overrides the last VE, so if the count is changing,
3773     * we need to re-emit it to ensure we're overriding the right one.
3774     */
3775    if (new_cso && cso_changed(count))
3776       ice->state.dirty |= IRIS_DIRTY_VF_SGVS;
3777 
3778    ice->state.cso_vertex_elements = state;
3779    ice->state.dirty |= IRIS_DIRTY_VERTEX_ELEMENTS;
3780 }
3781 
3782 /**
3783  * The pipe->create_stream_output_target() driver hook.
3784  *
3785  * "Target" here refers to a destination buffer.  We translate this into
3786  * a 3DSTATE_SO_BUFFER packet.  We can handle most fields, but don't yet
3787  * know which buffer this represents, or whether we ought to zero the
3788  * write-offsets, or append.  Those are handled in the set() hook.
3789  */
3790 static struct pipe_stream_output_target *
iris_create_stream_output_target(struct pipe_context * ctx,struct pipe_resource * p_res,unsigned buffer_offset,unsigned buffer_size)3791 iris_create_stream_output_target(struct pipe_context *ctx,
3792                                  struct pipe_resource *p_res,
3793                                  unsigned buffer_offset,
3794                                  unsigned buffer_size)
3795 {
3796    struct iris_resource *res = (void *) p_res;
3797    struct iris_stream_output_target *cso = calloc(1, sizeof(*cso));
3798    if (!cso)
3799       return NULL;
3800 
3801    res->bind_history |= PIPE_BIND_STREAM_OUTPUT;
3802 
3803    pipe_reference_init(&cso->base.reference, 1);
3804    pipe_resource_reference(&cso->base.buffer, p_res);
3805    cso->base.buffer_offset = buffer_offset;
3806    cso->base.buffer_size = buffer_size;
3807    cso->base.context = ctx;
3808 
3809    util_range_add(&res->base.b, &res->valid_buffer_range, buffer_offset,
3810                   buffer_offset + buffer_size);
3811 
3812    return &cso->base;
3813 }
3814 
3815 static void
iris_stream_output_target_destroy(struct pipe_context * ctx,struct pipe_stream_output_target * state)3816 iris_stream_output_target_destroy(struct pipe_context *ctx,
3817                                   struct pipe_stream_output_target *state)
3818 {
3819    struct iris_stream_output_target *cso = (void *) state;
3820 
3821    pipe_resource_reference(&cso->base.buffer, NULL);
3822    pipe_resource_reference(&cso->offset.res, NULL);
3823 
3824    free(cso);
3825 }
3826 
3827 /**
3828  * The pipe->set_stream_output_targets() driver hook.
3829  *
3830  * At this point, we know which targets are bound to a particular index,
3831  * and also whether we want to append or start over.  We can finish the
3832  * 3DSTATE_SO_BUFFER packets we started earlier.
3833  */
3834 static void
iris_set_stream_output_targets(struct pipe_context * ctx,unsigned num_targets,struct pipe_stream_output_target ** targets,const unsigned * offsets)3835 iris_set_stream_output_targets(struct pipe_context *ctx,
3836                                unsigned num_targets,
3837                                struct pipe_stream_output_target **targets,
3838                                const unsigned *offsets)
3839 {
3840    struct iris_context *ice = (struct iris_context *) ctx;
3841    struct iris_genx_state *genx = ice->state.genx;
3842    uint32_t *so_buffers = genx->so_buffers;
3843    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3844 
3845    const bool active = num_targets > 0;
3846    if (ice->state.streamout_active != active) {
3847       ice->state.streamout_active = active;
3848       ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
3849 
3850       /* We only emit 3DSTATE_SO_DECL_LIST when streamout is active, because
3851        * it's a non-pipelined command.  If we're switching streamout on, we
3852        * may have missed emitting it earlier, so do so now.  (We're already
3853        * taking a stall to update 3DSTATE_SO_BUFFERS anyway...)
3854        */
3855       if (active) {
3856          ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST;
3857       } else {
3858          uint32_t flush = 0;
3859          for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
3860             struct iris_stream_output_target *tgt =
3861                (void *) ice->state.so_target[i];
3862             if (tgt) {
3863                struct iris_resource *res = (void *) tgt->base.buffer;
3864 
3865                flush |= iris_flush_bits_for_history(ice, res);
3866                iris_dirty_for_history(ice, res);
3867             }
3868          }
3869 #if GFX_VER >= 12
3870          /* SO draws require flushing of const cache to make SO data
3871           * observable when VB/IB are cached in L3.
3872           */
3873          if (flush & PIPE_CONTROL_VF_CACHE_INVALIDATE)
3874             flush |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
3875 #endif
3876          iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER],
3877                                       "make streamout results visible", flush);
3878       }
3879    }
3880 
3881    for (int i = 0; i < 4; i++) {
3882       pipe_so_target_reference(&ice->state.so_target[i],
3883                                i < num_targets ? targets[i] : NULL);
3884    }
3885 
3886    /* No need to update 3DSTATE_SO_BUFFER unless SOL is active. */
3887    if (!active)
3888       return;
3889 
3890    for (unsigned i = 0; i < 4; i++,
3891         so_buffers += GENX(3DSTATE_SO_BUFFER_length)) {
3892 
3893       struct iris_stream_output_target *tgt = (void *) ice->state.so_target[i];
3894       unsigned offset = offsets[i];
3895 
3896       if (!tgt) {
3897          iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
3898 #if GFX_VER < 12
3899             sob.SOBufferIndex = i;
3900 #else
3901             sob._3DCommandOpcode = 0;
3902             sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i;
3903 #endif
3904             sob.MOCS = iris_mocs(NULL, &screen->isl_dev, 0);
3905          }
3906          continue;
3907       }
3908 
3909       if (!tgt->offset.res)
3910          upload_state(ctx->const_uploader, &tgt->offset, sizeof(uint32_t), 4);
3911 
3912       struct iris_resource *res = (void *) tgt->base.buffer;
3913 
3914       /* Note that offsets[i] will either be 0, causing us to zero
3915        * the value in the buffer, or 0xFFFFFFFF, which happens to mean
3916        * "continue appending at the existing offset."
3917        */
3918       assert(offset == 0 || offset == 0xFFFFFFFF);
3919 
3920       /* When we're first called with an offset of 0, we want the next
3921        * 3DSTATE_SO_BUFFER packets to reset the offset to the beginning.
3922        * Any further times we emit those packets, we want to use 0xFFFFFFFF
3923        * to continue appending from the current offset.
3924        *
3925        * Note that we might be called by Begin (offset = 0), Pause, then
3926        * Resume (offset = 0xFFFFFFFF) before ever drawing (where these
3927        * commands will actually be sent to the GPU).  In this case, we
3928        * don't want to append - we still want to do our initial zeroing.
3929        */
3930       if (offset == 0)
3931          tgt->zero_offset = true;
3932 
3933       iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
3934 #if GFX_VER < 12
3935          sob.SOBufferIndex = i;
3936 #else
3937          sob._3DCommandOpcode = 0;
3938          sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i;
3939 #endif
3940          sob.SurfaceBaseAddress =
3941             rw_bo(NULL, res->bo->address + tgt->base.buffer_offset,
3942                   IRIS_DOMAIN_OTHER_WRITE);
3943          sob.SOBufferEnable = true;
3944          sob.StreamOffsetWriteEnable = true;
3945          sob.StreamOutputBufferOffsetAddressEnable = true;
3946          sob.MOCS = iris_mocs(res->bo, &screen->isl_dev, 0);
3947 
3948          sob.SurfaceSize = MAX2(tgt->base.buffer_size / 4, 1) - 1;
3949          sob.StreamOutputBufferOffsetAddress =
3950             rw_bo(NULL, iris_resource_bo(tgt->offset.res)->address +
3951                         tgt->offset.offset, IRIS_DOMAIN_OTHER_WRITE);
3952          sob.StreamOffset = 0xFFFFFFFF; /* not offset, see above */
3953       }
3954    }
3955 
3956    ice->state.dirty |= IRIS_DIRTY_SO_BUFFERS;
3957 }
3958 
3959 /**
3960  * An iris-vtable helper for encoding the 3DSTATE_SO_DECL_LIST and
3961  * 3DSTATE_STREAMOUT packets.
3962  *
3963  * 3DSTATE_SO_DECL_LIST is a list of shader outputs we want the streamout
3964  * hardware to record.  We can create it entirely based on the shader, with
3965  * no dynamic state dependencies.
3966  *
3967  * 3DSTATE_STREAMOUT is an annoying mix of shader-based information and
3968  * state-based settings.  We capture the shader-related ones here, and merge
3969  * the rest in at draw time.
3970  */
3971 static uint32_t *
iris_create_so_decl_list(const struct pipe_stream_output_info * info,const struct brw_vue_map * vue_map)3972 iris_create_so_decl_list(const struct pipe_stream_output_info *info,
3973                          const struct brw_vue_map *vue_map)
3974 {
3975    struct GENX(SO_DECL) so_decl[MAX_VERTEX_STREAMS][128];
3976    int buffer_mask[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
3977    int next_offset[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
3978    int decls[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
3979    int max_decls = 0;
3980    STATIC_ASSERT(ARRAY_SIZE(so_decl[0]) >= MAX_PROGRAM_OUTPUTS);
3981 
3982    memset(so_decl, 0, sizeof(so_decl));
3983 
3984    /* Construct the list of SO_DECLs to be emitted.  The formatting of the
3985     * command feels strange -- each dword pair contains a SO_DECL per stream.
3986     */
3987    for (unsigned i = 0; i < info->num_outputs; i++) {
3988       const struct pipe_stream_output *output = &info->output[i];
3989       const int buffer = output->output_buffer;
3990       const int varying = output->register_index;
3991       const unsigned stream_id = output->stream;
3992       assert(stream_id < MAX_VERTEX_STREAMS);
3993 
3994       buffer_mask[stream_id] |= 1 << buffer;
3995 
3996       assert(vue_map->varying_to_slot[varying] >= 0);
3997 
3998       /* Mesa doesn't store entries for gl_SkipComponents in the Outputs[]
3999        * array.  Instead, it simply increments DstOffset for the following
4000        * input by the number of components that should be skipped.
4001        *
4002        * Our hardware is unusual in that it requires us to program SO_DECLs
4003        * for fake "hole" components, rather than simply taking the offset
4004        * for each real varying.  Each hole can have size 1, 2, 3, or 4; we
4005        * program as many size = 4 holes as we can, then a final hole to
4006        * accommodate the final 1, 2, or 3 remaining.
4007        */
4008       int skip_components = output->dst_offset - next_offset[buffer];
4009 
4010       while (skip_components > 0) {
4011          so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
4012             .HoleFlag = 1,
4013             .OutputBufferSlot = output->output_buffer,
4014             .ComponentMask = (1 << MIN2(skip_components, 4)) - 1,
4015          };
4016          skip_components -= 4;
4017       }
4018 
4019       next_offset[buffer] = output->dst_offset + output->num_components;
4020 
4021       so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
4022          .OutputBufferSlot = output->output_buffer,
4023          .RegisterIndex = vue_map->varying_to_slot[varying],
4024          .ComponentMask =
4025             ((1 << output->num_components) - 1) << output->start_component,
4026       };
4027 
4028       if (decls[stream_id] > max_decls)
4029          max_decls = decls[stream_id];
4030    }
4031 
4032    unsigned dwords = GENX(3DSTATE_STREAMOUT_length) + (3 + 2 * max_decls);
4033    uint32_t *map = ralloc_size(NULL, sizeof(uint32_t) * dwords);
4034    uint32_t *so_decl_map = map + GENX(3DSTATE_STREAMOUT_length);
4035 
4036    iris_pack_command(GENX(3DSTATE_STREAMOUT), map, sol) {
4037       int urb_entry_read_offset = 0;
4038       int urb_entry_read_length = (vue_map->num_slots + 1) / 2 -
4039          urb_entry_read_offset;
4040 
4041       /* We always read the whole vertex.  This could be reduced at some
4042        * point by reading less and offsetting the register index in the
4043        * SO_DECLs.
4044        */
4045       sol.Stream0VertexReadOffset = urb_entry_read_offset;
4046       sol.Stream0VertexReadLength = urb_entry_read_length - 1;
4047       sol.Stream1VertexReadOffset = urb_entry_read_offset;
4048       sol.Stream1VertexReadLength = urb_entry_read_length - 1;
4049       sol.Stream2VertexReadOffset = urb_entry_read_offset;
4050       sol.Stream2VertexReadLength = urb_entry_read_length - 1;
4051       sol.Stream3VertexReadOffset = urb_entry_read_offset;
4052       sol.Stream3VertexReadLength = urb_entry_read_length - 1;
4053 
4054       /* Set buffer pitches; 0 means unbound. */
4055       sol.Buffer0SurfacePitch = 4 * info->stride[0];
4056       sol.Buffer1SurfacePitch = 4 * info->stride[1];
4057       sol.Buffer2SurfacePitch = 4 * info->stride[2];
4058       sol.Buffer3SurfacePitch = 4 * info->stride[3];
4059    }
4060 
4061    iris_pack_command(GENX(3DSTATE_SO_DECL_LIST), so_decl_map, list) {
4062       list.DWordLength = 3 + 2 * max_decls - 2;
4063       list.StreamtoBufferSelects0 = buffer_mask[0];
4064       list.StreamtoBufferSelects1 = buffer_mask[1];
4065       list.StreamtoBufferSelects2 = buffer_mask[2];
4066       list.StreamtoBufferSelects3 = buffer_mask[3];
4067       list.NumEntries0 = decls[0];
4068       list.NumEntries1 = decls[1];
4069       list.NumEntries2 = decls[2];
4070       list.NumEntries3 = decls[3];
4071    }
4072 
4073    for (int i = 0; i < max_decls; i++) {
4074       iris_pack_state(GENX(SO_DECL_ENTRY), so_decl_map + 3 + i * 2, entry) {
4075          entry.Stream0Decl = so_decl[0][i];
4076          entry.Stream1Decl = so_decl[1][i];
4077          entry.Stream2Decl = so_decl[2][i];
4078          entry.Stream3Decl = so_decl[3][i];
4079       }
4080    }
4081 
4082    return map;
4083 }
4084 
4085 static void
iris_compute_sbe_urb_read_interval(uint64_t fs_input_slots,const struct brw_vue_map * last_vue_map,bool two_sided_color,unsigned * out_offset,unsigned * out_length)4086 iris_compute_sbe_urb_read_interval(uint64_t fs_input_slots,
4087                                    const struct brw_vue_map *last_vue_map,
4088                                    bool two_sided_color,
4089                                    unsigned *out_offset,
4090                                    unsigned *out_length)
4091 {
4092    /* The compiler computes the first URB slot without considering COL/BFC
4093     * swizzling (because it doesn't know whether it's enabled), so we need
4094     * to do that here too.  This may result in a smaller offset, which
4095     * should be safe.
4096     */
4097    const unsigned first_slot =
4098       brw_compute_first_urb_slot_required(fs_input_slots, last_vue_map);
4099 
4100    /* This becomes the URB read offset (counted in pairs of slots). */
4101    assert(first_slot % 2 == 0);
4102    *out_offset = first_slot / 2;
4103 
4104    /* We need to adjust the inputs read to account for front/back color
4105     * swizzling, as it can make the URB length longer.
4106     */
4107    for (int c = 0; c <= 1; c++) {
4108       if (fs_input_slots & (VARYING_BIT_COL0 << c)) {
4109          /* If two sided color is enabled, the fragment shader's gl_Color
4110           * (COL0) input comes from either the gl_FrontColor (COL0) or
4111           * gl_BackColor (BFC0) input varyings.  Mark BFC as used, too.
4112           */
4113          if (two_sided_color)
4114             fs_input_slots |= (VARYING_BIT_BFC0 << c);
4115 
4116          /* If front color isn't written, we opt to give them back color
4117           * instead of an undefined value.  Switch from COL to BFC.
4118           */
4119          if (last_vue_map->varying_to_slot[VARYING_SLOT_COL0 + c] == -1) {
4120             fs_input_slots &= ~(VARYING_BIT_COL0 << c);
4121             fs_input_slots |= (VARYING_BIT_BFC0 << c);
4122          }
4123       }
4124    }
4125 
4126    /* Compute the minimum URB Read Length necessary for the FS inputs.
4127     *
4128     * From the Sandy Bridge PRM, Volume 2, Part 1, documentation for
4129     * 3DSTATE_SF DWord 1 bits 15:11, "Vertex URB Entry Read Length":
4130     *
4131     * "This field should be set to the minimum length required to read the
4132     *  maximum source attribute.  The maximum source attribute is indicated
4133     *  by the maximum value of the enabled Attribute # Source Attribute if
4134     *  Attribute Swizzle Enable is set, Number of Output Attributes-1 if
4135     *  enable is not set.
4136     *  read_length = ceiling((max_source_attr + 1) / 2)
4137     *
4138     *  [errata] Corruption/Hang possible if length programmed larger than
4139     *  recommended"
4140     *
4141     * Similar text exists for Ivy Bridge.
4142     *
4143     * We find the last URB slot that's actually read by the FS.
4144     */
4145    unsigned last_read_slot = last_vue_map->num_slots - 1;
4146    while (last_read_slot > first_slot && !(fs_input_slots &
4147           (1ull << last_vue_map->slot_to_varying[last_read_slot])))
4148       --last_read_slot;
4149 
4150    /* The URB read length is the difference of the two, counted in pairs. */
4151    *out_length = DIV_ROUND_UP(last_read_slot - first_slot + 1, 2);
4152 }
4153 
4154 static void
iris_emit_sbe_swiz(struct iris_batch * batch,const struct iris_context * ice,const struct brw_vue_map * vue_map,unsigned urb_read_offset,unsigned sprite_coord_enables)4155 iris_emit_sbe_swiz(struct iris_batch *batch,
4156                    const struct iris_context *ice,
4157                    const struct brw_vue_map *vue_map,
4158                    unsigned urb_read_offset,
4159                    unsigned sprite_coord_enables)
4160 {
4161    struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL) attr_overrides[16] = {};
4162    const struct brw_wm_prog_data *wm_prog_data = (void *)
4163       ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
4164    const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4165 
4166    /* XXX: this should be generated when putting programs in place */
4167 
4168    for (uint8_t idx = 0; idx < wm_prog_data->urb_setup_attribs_count; idx++) {
4169       const uint8_t fs_attr = wm_prog_data->urb_setup_attribs[idx];
4170       const int input_index = wm_prog_data->urb_setup[fs_attr];
4171       if (input_index < 0 || input_index >= 16)
4172          continue;
4173 
4174       struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL) *attr =
4175          &attr_overrides[input_index];
4176       int slot = vue_map->varying_to_slot[fs_attr];
4177 
4178       /* Viewport and Layer are stored in the VUE header.  We need to override
4179        * them to zero if earlier stages didn't write them, as GL requires that
4180        * they read back as zero when not explicitly set.
4181        */
4182       switch (fs_attr) {
4183       case VARYING_SLOT_VIEWPORT:
4184       case VARYING_SLOT_LAYER:
4185          attr->ComponentOverrideX = true;
4186          attr->ComponentOverrideW = true;
4187          attr->ConstantSource = CONST_0000;
4188 
4189          if (!(vue_map->slots_valid & VARYING_BIT_LAYER))
4190             attr->ComponentOverrideY = true;
4191          if (!(vue_map->slots_valid & VARYING_BIT_VIEWPORT))
4192             attr->ComponentOverrideZ = true;
4193          continue;
4194 
4195       default:
4196          break;
4197       }
4198 
4199       if (sprite_coord_enables & (1 << input_index))
4200          continue;
4201 
4202       /* If there was only a back color written but not front, use back
4203        * as the color instead of undefined.
4204        */
4205       if (slot == -1 && fs_attr == VARYING_SLOT_COL0)
4206          slot = vue_map->varying_to_slot[VARYING_SLOT_BFC0];
4207       if (slot == -1 && fs_attr == VARYING_SLOT_COL1)
4208          slot = vue_map->varying_to_slot[VARYING_SLOT_BFC1];
4209 
4210       /* Not written by the previous stage - undefined. */
4211       if (slot == -1) {
4212          attr->ComponentOverrideX = true;
4213          attr->ComponentOverrideY = true;
4214          attr->ComponentOverrideZ = true;
4215          attr->ComponentOverrideW = true;
4216          attr->ConstantSource = CONST_0001_FLOAT;
4217          continue;
4218       }
4219 
4220       /* Compute the location of the attribute relative to the read offset,
4221        * which is counted in 256-bit increments (two 128-bit VUE slots).
4222        */
4223       const int source_attr = slot - 2 * urb_read_offset;
4224       assert(source_attr >= 0 && source_attr <= 32);
4225       attr->SourceAttribute = source_attr;
4226 
4227       /* If we are doing two-sided color, and the VUE slot following this one
4228        * represents a back-facing color, then we need to instruct the SF unit
4229        * to do back-facing swizzling.
4230        */
4231       if (cso_rast->light_twoside &&
4232           ((vue_map->slot_to_varying[slot] == VARYING_SLOT_COL0 &&
4233             vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC0) ||
4234            (vue_map->slot_to_varying[slot] == VARYING_SLOT_COL1 &&
4235             vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC1)))
4236          attr->SwizzleSelect = INPUTATTR_FACING;
4237    }
4238 
4239    iris_emit_cmd(batch, GENX(3DSTATE_SBE_SWIZ), sbes) {
4240       for (int i = 0; i < 16; i++)
4241          sbes.Attribute[i] = attr_overrides[i];
4242    }
4243 }
4244 
4245 static bool
iris_is_drawing_points(const struct iris_context * ice)4246 iris_is_drawing_points(const struct iris_context *ice)
4247 {
4248    const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4249 
4250    if (cso_rast->fill_mode_point) {
4251       return true;
4252    }
4253 
4254    if (ice->shaders.prog[MESA_SHADER_GEOMETRY]) {
4255       const struct brw_gs_prog_data *gs_prog_data =
4256          (void *) ice->shaders.prog[MESA_SHADER_GEOMETRY]->prog_data;
4257       return gs_prog_data->output_topology == _3DPRIM_POINTLIST;
4258    } else if (ice->shaders.prog[MESA_SHADER_TESS_EVAL]) {
4259       const struct brw_tes_prog_data *tes_data =
4260          (void *) ice->shaders.prog[MESA_SHADER_TESS_EVAL]->prog_data;
4261       return tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_POINT;
4262    } else {
4263       return ice->state.prim_mode == PIPE_PRIM_POINTS;
4264    }
4265 }
4266 
4267 static unsigned
iris_calculate_point_sprite_overrides(const struct brw_wm_prog_data * prog_data,const struct iris_rasterizer_state * cso)4268 iris_calculate_point_sprite_overrides(const struct brw_wm_prog_data *prog_data,
4269                                       const struct iris_rasterizer_state *cso)
4270 {
4271    unsigned overrides = 0;
4272 
4273    if (prog_data->urb_setup[VARYING_SLOT_PNTC] != -1)
4274       overrides |= 1 << prog_data->urb_setup[VARYING_SLOT_PNTC];
4275 
4276    for (int i = 0; i < 8; i++) {
4277       if ((cso->sprite_coord_enable & (1 << i)) &&
4278           prog_data->urb_setup[VARYING_SLOT_TEX0 + i] != -1)
4279          overrides |= 1 << prog_data->urb_setup[VARYING_SLOT_TEX0 + i];
4280    }
4281 
4282    return overrides;
4283 }
4284 
4285 static void
iris_emit_sbe(struct iris_batch * batch,const struct iris_context * ice)4286 iris_emit_sbe(struct iris_batch *batch, const struct iris_context *ice)
4287 {
4288    const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4289    const struct brw_wm_prog_data *wm_prog_data = (void *)
4290       ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
4291    const struct brw_vue_map *last_vue_map =
4292       &brw_vue_prog_data(ice->shaders.last_vue_shader->prog_data)->vue_map;
4293 
4294    unsigned urb_read_offset, urb_read_length;
4295    iris_compute_sbe_urb_read_interval(wm_prog_data->inputs,
4296                                       last_vue_map,
4297                                       cso_rast->light_twoside,
4298                                       &urb_read_offset, &urb_read_length);
4299 
4300    unsigned sprite_coord_overrides =
4301       iris_is_drawing_points(ice) ?
4302       iris_calculate_point_sprite_overrides(wm_prog_data, cso_rast) : 0;
4303 
4304    iris_emit_cmd(batch, GENX(3DSTATE_SBE), sbe) {
4305       sbe.AttributeSwizzleEnable = true;
4306       sbe.NumberofSFOutputAttributes = wm_prog_data->num_varying_inputs;
4307       sbe.PointSpriteTextureCoordinateOrigin = cso_rast->sprite_coord_mode;
4308       sbe.VertexURBEntryReadOffset = urb_read_offset;
4309       sbe.VertexURBEntryReadLength = urb_read_length;
4310       sbe.ForceVertexURBEntryReadOffset = true;
4311       sbe.ForceVertexURBEntryReadLength = true;
4312       sbe.ConstantInterpolationEnable = wm_prog_data->flat_inputs;
4313       sbe.PointSpriteTextureCoordinateEnable = sprite_coord_overrides;
4314 #if GFX_VER >= 9
4315       for (int i = 0; i < 32; i++) {
4316          sbe.AttributeActiveComponentFormat[i] = ACTIVE_COMPONENT_XYZW;
4317       }
4318 #endif
4319 
4320       /* Ask the hardware to supply PrimitiveID if the fragment shader
4321        * reads it but a previous stage didn't write one.
4322        */
4323       if ((wm_prog_data->inputs & VARYING_BIT_PRIMITIVE_ID) &&
4324           last_vue_map->varying_to_slot[VARYING_SLOT_PRIMITIVE_ID] == -1) {
4325          sbe.PrimitiveIDOverrideAttributeSelect =
4326             wm_prog_data->urb_setup[VARYING_SLOT_PRIMITIVE_ID];
4327          sbe.PrimitiveIDOverrideComponentX = true;
4328          sbe.PrimitiveIDOverrideComponentY = true;
4329          sbe.PrimitiveIDOverrideComponentZ = true;
4330          sbe.PrimitiveIDOverrideComponentW = true;
4331       }
4332    }
4333 
4334    iris_emit_sbe_swiz(batch, ice, last_vue_map, urb_read_offset,
4335                       sprite_coord_overrides);
4336 }
4337 
4338 /* ------------------------------------------------------------------- */
4339 
4340 /**
4341  * Populate VS program key fields based on the current state.
4342  */
4343 static void
iris_populate_vs_key(const struct iris_context * ice,const struct shader_info * info,gl_shader_stage last_stage,struct iris_vs_prog_key * key)4344 iris_populate_vs_key(const struct iris_context *ice,
4345                      const struct shader_info *info,
4346                      gl_shader_stage last_stage,
4347                      struct iris_vs_prog_key *key)
4348 {
4349    const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4350 
4351    if (info->clip_distance_array_size == 0 &&
4352        (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
4353        last_stage == MESA_SHADER_VERTEX)
4354       key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
4355 }
4356 
4357 /**
4358  * Populate TCS program key fields based on the current state.
4359  */
4360 static void
iris_populate_tcs_key(const struct iris_context * ice,struct iris_tcs_prog_key * key)4361 iris_populate_tcs_key(const struct iris_context *ice,
4362                       struct iris_tcs_prog_key *key)
4363 {
4364 }
4365 
4366 /**
4367  * Populate TES program key fields based on the current state.
4368  */
4369 static void
iris_populate_tes_key(const struct iris_context * ice,const struct shader_info * info,gl_shader_stage last_stage,struct iris_tes_prog_key * key)4370 iris_populate_tes_key(const struct iris_context *ice,
4371                       const struct shader_info *info,
4372                       gl_shader_stage last_stage,
4373                       struct iris_tes_prog_key *key)
4374 {
4375    const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4376 
4377    if (info->clip_distance_array_size == 0 &&
4378        (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
4379        last_stage == MESA_SHADER_TESS_EVAL)
4380       key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
4381 }
4382 
4383 /**
4384  * Populate GS program key fields based on the current state.
4385  */
4386 static void
iris_populate_gs_key(const struct iris_context * ice,const struct shader_info * info,gl_shader_stage last_stage,struct iris_gs_prog_key * key)4387 iris_populate_gs_key(const struct iris_context *ice,
4388                      const struct shader_info *info,
4389                      gl_shader_stage last_stage,
4390                      struct iris_gs_prog_key *key)
4391 {
4392    const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4393 
4394    if (info->clip_distance_array_size == 0 &&
4395        (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
4396        last_stage == MESA_SHADER_GEOMETRY)
4397       key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
4398 }
4399 
4400 /**
4401  * Populate FS program key fields based on the current state.
4402  */
4403 static void
iris_populate_fs_key(const struct iris_context * ice,const struct shader_info * info,struct iris_fs_prog_key * key)4404 iris_populate_fs_key(const struct iris_context *ice,
4405                      const struct shader_info *info,
4406                      struct iris_fs_prog_key *key)
4407 {
4408    struct iris_screen *screen = (void *) ice->ctx.screen;
4409    const struct pipe_framebuffer_state *fb = &ice->state.framebuffer;
4410    const struct iris_depth_stencil_alpha_state *zsa = ice->state.cso_zsa;
4411    const struct iris_rasterizer_state *rast = ice->state.cso_rast;
4412    const struct iris_blend_state *blend = ice->state.cso_blend;
4413 
4414    key->nr_color_regions = fb->nr_cbufs;
4415 
4416    key->clamp_fragment_color = rast->clamp_fragment_color;
4417 
4418    key->alpha_to_coverage = blend->alpha_to_coverage;
4419 
4420    key->alpha_test_replicate_alpha = fb->nr_cbufs > 1 && zsa->alpha_enabled;
4421 
4422    key->flat_shade = rast->flatshade &&
4423       (info->inputs_read & (VARYING_BIT_COL0 | VARYING_BIT_COL1));
4424 
4425    key->persample_interp = rast->force_persample_interp;
4426    key->multisample_fbo = rast->multisample && fb->samples > 1;
4427 
4428    key->coherent_fb_fetch = GFX_VER >= 9;
4429 
4430    key->force_dual_color_blend =
4431       screen->driconf.dual_color_blend_by_location &&
4432       (blend->blend_enables & 1) && blend->dual_color_blending;
4433 }
4434 
4435 static void
iris_populate_cs_key(const struct iris_context * ice,struct iris_cs_prog_key * key)4436 iris_populate_cs_key(const struct iris_context *ice,
4437                      struct iris_cs_prog_key *key)
4438 {
4439 }
4440 
4441 static uint64_t
KSP(const struct iris_compiled_shader * shader)4442 KSP(const struct iris_compiled_shader *shader)
4443 {
4444    struct iris_resource *res = (void *) shader->assembly.res;
4445    return iris_bo_offset_from_base_address(res->bo) + shader->assembly.offset;
4446 }
4447 
4448 #define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix, stage)                   \
4449    pkt.KernelStartPointer = KSP(shader);                                  \
4450    pkt.BindingTableEntryCount = shader->bt.size_bytes / 4;                \
4451    pkt.FloatingPointMode = prog_data->use_alt_mode;                       \
4452                                                                           \
4453    pkt.DispatchGRFStartRegisterForURBData =                               \
4454       prog_data->dispatch_grf_start_reg;                                  \
4455    pkt.prefix##URBEntryReadLength = vue_prog_data->urb_read_length;       \
4456    pkt.prefix##URBEntryReadOffset = 0;                                    \
4457                                                                           \
4458    pkt.StatisticsEnable = true;                                           \
4459    pkt.Enable           = true;                                           \
4460                                                                           \
4461    if (prog_data->total_scratch) {                                        \
4462       INIT_THREAD_SCRATCH_SIZE(pkt)                                       \
4463    }
4464 
4465 #if GFX_VERx10 >= 125
4466 #define INIT_THREAD_SCRATCH_SIZE(pkt)
4467 #define MERGE_SCRATCH_ADDR(name)                                          \
4468 {                                                                         \
4469    uint32_t pkt2[GENX(name##_length)] = {0};                              \
4470    _iris_pack_command(batch, GENX(name), pkt2, p) {                       \
4471       p.ScratchSpaceBuffer = scratch_addr >> 4;                           \
4472    }                                                                      \
4473    iris_emit_merge(batch, pkt, pkt2, GENX(name##_length));                \
4474 }
4475 #else
4476 #define INIT_THREAD_SCRATCH_SIZE(pkt)                                     \
4477    pkt.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
4478 #define MERGE_SCRATCH_ADDR(name)                                          \
4479 {                                                                         \
4480    uint32_t pkt2[GENX(name##_length)] = {0};                              \
4481    _iris_pack_command(batch, GENX(name), pkt2, p) {                       \
4482       p.ScratchSpaceBasePointer =                                         \
4483          rw_bo(NULL, scratch_addr, IRIS_DOMAIN_NONE);                     \
4484    }                                                                      \
4485    iris_emit_merge(batch, pkt, pkt2, GENX(name##_length));                \
4486 }
4487 #endif
4488 
4489 
4490 /**
4491  * Encode most of 3DSTATE_VS based on the compiled shader.
4492  */
4493 static void
iris_store_vs_state(const struct intel_device_info * devinfo,struct iris_compiled_shader * shader)4494 iris_store_vs_state(const struct intel_device_info *devinfo,
4495                     struct iris_compiled_shader *shader)
4496 {
4497    struct brw_stage_prog_data *prog_data = shader->prog_data;
4498    struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4499 
4500    iris_pack_command(GENX(3DSTATE_VS), shader->derived_data, vs) {
4501       INIT_THREAD_DISPATCH_FIELDS(vs, Vertex, MESA_SHADER_VERTEX);
4502       vs.MaximumNumberofThreads = devinfo->max_vs_threads - 1;
4503       vs.SIMD8DispatchEnable = true;
4504       vs.UserClipDistanceCullTestEnableBitmask =
4505          vue_prog_data->cull_distance_mask;
4506    }
4507 }
4508 
4509 /**
4510  * Encode most of 3DSTATE_HS based on the compiled shader.
4511  */
4512 static void
iris_store_tcs_state(const struct intel_device_info * devinfo,struct iris_compiled_shader * shader)4513 iris_store_tcs_state(const struct intel_device_info *devinfo,
4514                      struct iris_compiled_shader *shader)
4515 {
4516    struct brw_stage_prog_data *prog_data = shader->prog_data;
4517    struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4518    struct brw_tcs_prog_data *tcs_prog_data = (void *) prog_data;
4519 
4520    iris_pack_command(GENX(3DSTATE_HS), shader->derived_data, hs) {
4521       INIT_THREAD_DISPATCH_FIELDS(hs, Vertex, MESA_SHADER_TESS_CTRL);
4522 
4523 #if GFX_VER >= 12
4524       /* Wa_1604578095:
4525        *
4526        *    Hang occurs when the number of max threads is less than 2 times
4527        *    the number of instance count. The number of max threads must be
4528        *    more than 2 times the number of instance count.
4529        */
4530       assert((devinfo->max_tcs_threads / 2) > tcs_prog_data->instances);
4531       hs.DispatchGRFStartRegisterForURBData = prog_data->dispatch_grf_start_reg & 0x1f;
4532       hs.DispatchGRFStartRegisterForURBData5 = prog_data->dispatch_grf_start_reg >> 5;
4533 #endif
4534 
4535       hs.InstanceCount = tcs_prog_data->instances - 1;
4536       hs.MaximumNumberofThreads = devinfo->max_tcs_threads - 1;
4537       hs.IncludeVertexHandles = true;
4538 
4539 #if GFX_VER == 12
4540       /* Patch Count threshold specifies the maximum number of patches that
4541        * will be accumulated before a thread dispatch is forced.
4542        */
4543       hs.PatchCountThreshold = tcs_prog_data->patch_count_threshold;
4544 #endif
4545 
4546 #if GFX_VER >= 9
4547       hs.DispatchMode = vue_prog_data->dispatch_mode;
4548       hs.IncludePrimitiveID = tcs_prog_data->include_primitive_id;
4549 #endif
4550    }
4551 }
4552 
4553 /**
4554  * Encode 3DSTATE_TE and most of 3DSTATE_DS based on the compiled shader.
4555  */
4556 static void
iris_store_tes_state(const struct intel_device_info * devinfo,struct iris_compiled_shader * shader)4557 iris_store_tes_state(const struct intel_device_info *devinfo,
4558                      struct iris_compiled_shader *shader)
4559 {
4560    struct brw_stage_prog_data *prog_data = shader->prog_data;
4561    struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4562    struct brw_tes_prog_data *tes_prog_data = (void *) prog_data;
4563 
4564    uint32_t *ds_state = (void *) shader->derived_data;
4565    uint32_t *te_state = ds_state + GENX(3DSTATE_DS_length);
4566 
4567    iris_pack_command(GENX(3DSTATE_DS), ds_state, ds) {
4568       INIT_THREAD_DISPATCH_FIELDS(ds, Patch, MESA_SHADER_TESS_EVAL);
4569 
4570       ds.DispatchMode = DISPATCH_MODE_SIMD8_SINGLE_PATCH;
4571       ds.MaximumNumberofThreads = devinfo->max_tes_threads - 1;
4572       ds.ComputeWCoordinateEnable =
4573          tes_prog_data->domain == BRW_TESS_DOMAIN_TRI;
4574 
4575 #if GFX_VER >= 12
4576       ds.PrimitiveIDNotRequired = !tes_prog_data->include_primitive_id;
4577 #endif
4578       ds.UserClipDistanceCullTestEnableBitmask =
4579          vue_prog_data->cull_distance_mask;
4580    }
4581 
4582    iris_pack_command(GENX(3DSTATE_TE), te_state, te) {
4583       te.Partitioning = tes_prog_data->partitioning;
4584       te.OutputTopology = tes_prog_data->output_topology;
4585       te.TEDomain = tes_prog_data->domain;
4586       te.TEEnable = true;
4587       te.MaximumTessellationFactorOdd = 63.0;
4588       te.MaximumTessellationFactorNotOdd = 64.0;
4589 #if GFX_VERx10 >= 125
4590       te.TessellationDistributionMode = TEDMODE_RR_FREE;
4591       te.TessellationDistributionLevel = TEDLEVEL_PATCH;
4592       /* 64_TRIANGLES */
4593       te.SmallPatchThreshold = 3;
4594       /* 1K_TRIANGLES */
4595       te.TargetBlockSize = 8;
4596       /* 1K_TRIANGLES */
4597       te.LocalBOPAccumulatorThreshold = 1;
4598 #endif
4599    }
4600 }
4601 
4602 /**
4603  * Encode most of 3DSTATE_GS based on the compiled shader.
4604  */
4605 static void
iris_store_gs_state(const struct intel_device_info * devinfo,struct iris_compiled_shader * shader)4606 iris_store_gs_state(const struct intel_device_info *devinfo,
4607                     struct iris_compiled_shader *shader)
4608 {
4609    struct brw_stage_prog_data *prog_data = shader->prog_data;
4610    struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4611    struct brw_gs_prog_data *gs_prog_data = (void *) prog_data;
4612 
4613    iris_pack_command(GENX(3DSTATE_GS), shader->derived_data, gs) {
4614       INIT_THREAD_DISPATCH_FIELDS(gs, Vertex, MESA_SHADER_GEOMETRY);
4615 
4616       gs.OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1;
4617       gs.OutputTopology = gs_prog_data->output_topology;
4618       gs.ControlDataHeaderSize =
4619          gs_prog_data->control_data_header_size_hwords;
4620       gs.InstanceControl = gs_prog_data->invocations - 1;
4621       gs.DispatchMode = DISPATCH_MODE_SIMD8;
4622       gs.IncludePrimitiveID = gs_prog_data->include_primitive_id;
4623       gs.ControlDataFormat = gs_prog_data->control_data_format;
4624       gs.ReorderMode = TRAILING;
4625       gs.ExpectedVertexCount = gs_prog_data->vertices_in;
4626       gs.MaximumNumberofThreads =
4627          GFX_VER == 8 ? (devinfo->max_gs_threads / 2 - 1)
4628                       : (devinfo->max_gs_threads - 1);
4629 
4630       if (gs_prog_data->static_vertex_count != -1) {
4631          gs.StaticOutput = true;
4632          gs.StaticOutputVertexCount = gs_prog_data->static_vertex_count;
4633       }
4634       gs.IncludeVertexHandles = vue_prog_data->include_vue_handles;
4635 
4636       gs.UserClipDistanceCullTestEnableBitmask =
4637          vue_prog_data->cull_distance_mask;
4638 
4639       const int urb_entry_write_offset = 1;
4640       const uint32_t urb_entry_output_length =
4641          DIV_ROUND_UP(vue_prog_data->vue_map.num_slots, 2) -
4642          urb_entry_write_offset;
4643 
4644       gs.VertexURBEntryOutputReadOffset = urb_entry_write_offset;
4645       gs.VertexURBEntryOutputLength = MAX2(urb_entry_output_length, 1);
4646    }
4647 }
4648 
4649 /**
4650  * Encode most of 3DSTATE_PS and 3DSTATE_PS_EXTRA based on the shader.
4651  */
4652 static void
iris_store_fs_state(const struct intel_device_info * devinfo,struct iris_compiled_shader * shader)4653 iris_store_fs_state(const struct intel_device_info *devinfo,
4654                     struct iris_compiled_shader *shader)
4655 {
4656    struct brw_stage_prog_data *prog_data = shader->prog_data;
4657    struct brw_wm_prog_data *wm_prog_data = (void *) shader->prog_data;
4658 
4659    uint32_t *ps_state = (void *) shader->derived_data;
4660    uint32_t *psx_state = ps_state + GENX(3DSTATE_PS_length);
4661 
4662    iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) {
4663       ps.VectorMaskEnable = true;
4664       ps.BindingTableEntryCount = shader->bt.size_bytes / 4;
4665       ps.FloatingPointMode = prog_data->use_alt_mode;
4666       ps.MaximumNumberofThreadsPerPSD =
4667          devinfo->max_threads_per_psd - (GFX_VER == 8 ? 2 : 1);
4668 
4669       ps.PushConstantEnable = prog_data->ubo_ranges[0].length > 0;
4670 
4671       /* From the documentation for this packet:
4672        * "If the PS kernel does not need the Position XY Offsets to
4673        *  compute a Position Value, then this field should be programmed
4674        *  to POSOFFSET_NONE."
4675        *
4676        * "SW Recommendation: If the PS kernel needs the Position Offsets
4677        *  to compute a Position XY value, this field should match Position
4678        *  ZW Interpolation Mode to ensure a consistent position.xyzw
4679        *  computation."
4680        *
4681        * We only require XY sample offsets. So, this recommendation doesn't
4682        * look useful at the moment.  We might need this in future.
4683        */
4684       ps.PositionXYOffsetSelect =
4685          wm_prog_data->uses_pos_offset ? POSOFFSET_SAMPLE : POSOFFSET_NONE;
4686 
4687       if (prog_data->total_scratch) {
4688          INIT_THREAD_SCRATCH_SIZE(ps);
4689       }
4690    }
4691 
4692    iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
4693       psx.PixelShaderValid = true;
4694       psx.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode;
4695       psx.PixelShaderKillsPixel = wm_prog_data->uses_kill;
4696       psx.AttributeEnable = wm_prog_data->num_varying_inputs != 0;
4697       psx.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth;
4698       psx.PixelShaderUsesSourceW = wm_prog_data->uses_src_w;
4699       psx.PixelShaderIsPerSample = wm_prog_data->persample_dispatch;
4700       psx.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;
4701 
4702 #if GFX_VER >= 9
4703       psx.PixelShaderPullsBary = wm_prog_data->pulls_bary;
4704       psx.PixelShaderComputesStencil = wm_prog_data->computed_stencil;
4705 #endif
4706    }
4707 }
4708 
4709 /**
4710  * Compute the size of the derived data (shader command packets).
4711  *
4712  * This must match the data written by the iris_store_xs_state() functions.
4713  */
4714 static void
iris_store_cs_state(const struct intel_device_info * devinfo,struct iris_compiled_shader * shader)4715 iris_store_cs_state(const struct intel_device_info *devinfo,
4716                     struct iris_compiled_shader *shader)
4717 {
4718    struct brw_cs_prog_data *cs_prog_data = (void *) shader->prog_data;
4719    void *map = shader->derived_data;
4720 
4721    iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), map, desc) {
4722 #if GFX_VERx10 < 125
4723       desc.ConstantURBEntryReadLength = cs_prog_data->push.per_thread.regs;
4724       desc.CrossThreadConstantDataReadLength =
4725          cs_prog_data->push.cross_thread.regs;
4726 #else
4727       assert(cs_prog_data->push.per_thread.regs == 0);
4728       assert(cs_prog_data->push.cross_thread.regs == 0);
4729 #endif
4730       desc.BarrierEnable = cs_prog_data->uses_barrier;
4731 #if GFX_VER >= 12
4732       /* TODO: Check if we are missing workarounds and enable mid-thread
4733        * preemption.
4734        *
4735        * We still have issues with mid-thread preemption (it was already
4736        * disabled by the kernel on gfx11, due to missing workarounds). It's
4737        * possible that we are just missing some workarounds, and could enable
4738        * it later, but for now let's disable it to fix a GPU in compute in Car
4739        * Chase (and possibly more).
4740        */
4741       desc.ThreadPreemptionDisable = true;
4742 #endif
4743    }
4744 }
4745 
4746 static unsigned
iris_derived_program_state_size(enum iris_program_cache_id cache_id)4747 iris_derived_program_state_size(enum iris_program_cache_id cache_id)
4748 {
4749    assert(cache_id <= IRIS_CACHE_BLORP);
4750 
4751    static const unsigned dwords[] = {
4752       [IRIS_CACHE_VS] = GENX(3DSTATE_VS_length),
4753       [IRIS_CACHE_TCS] = GENX(3DSTATE_HS_length),
4754       [IRIS_CACHE_TES] = GENX(3DSTATE_TE_length) + GENX(3DSTATE_DS_length),
4755       [IRIS_CACHE_GS] = GENX(3DSTATE_GS_length),
4756       [IRIS_CACHE_FS] =
4757          GENX(3DSTATE_PS_length) + GENX(3DSTATE_PS_EXTRA_length),
4758       [IRIS_CACHE_CS] = GENX(INTERFACE_DESCRIPTOR_DATA_length),
4759       [IRIS_CACHE_BLORP] = 0,
4760    };
4761 
4762    return sizeof(uint32_t) * dwords[cache_id];
4763 }
4764 
4765 /**
4766  * Create any state packets corresponding to the given shader stage
4767  * (i.e. 3DSTATE_VS) and save them as "derived data" in the shader variant.
4768  * This means that we can look up a program in the in-memory cache and
4769  * get most of the state packet without having to reconstruct it.
4770  */
4771 static void
iris_store_derived_program_state(const struct intel_device_info * devinfo,enum iris_program_cache_id cache_id,struct iris_compiled_shader * shader)4772 iris_store_derived_program_state(const struct intel_device_info *devinfo,
4773                                  enum iris_program_cache_id cache_id,
4774                                  struct iris_compiled_shader *shader)
4775 {
4776    switch (cache_id) {
4777    case IRIS_CACHE_VS:
4778       iris_store_vs_state(devinfo, shader);
4779       break;
4780    case IRIS_CACHE_TCS:
4781       iris_store_tcs_state(devinfo, shader);
4782       break;
4783    case IRIS_CACHE_TES:
4784       iris_store_tes_state(devinfo, shader);
4785       break;
4786    case IRIS_CACHE_GS:
4787       iris_store_gs_state(devinfo, shader);
4788       break;
4789    case IRIS_CACHE_FS:
4790       iris_store_fs_state(devinfo, shader);
4791       break;
4792    case IRIS_CACHE_CS:
4793       iris_store_cs_state(devinfo, shader);
4794       break;
4795    case IRIS_CACHE_BLORP:
4796       break;
4797    }
4798 }
4799 
4800 /* ------------------------------------------------------------------- */
4801 
4802 static const uint32_t push_constant_opcodes[] = {
4803    [MESA_SHADER_VERTEX]    = 21,
4804    [MESA_SHADER_TESS_CTRL] = 25, /* HS */
4805    [MESA_SHADER_TESS_EVAL] = 26, /* DS */
4806    [MESA_SHADER_GEOMETRY]  = 22,
4807    [MESA_SHADER_FRAGMENT]  = 23,
4808    [MESA_SHADER_COMPUTE]   = 0,
4809 };
4810 
4811 static uint32_t
use_null_surface(struct iris_batch * batch,struct iris_context * ice)4812 use_null_surface(struct iris_batch *batch, struct iris_context *ice)
4813 {
4814    struct iris_bo *state_bo = iris_resource_bo(ice->state.unbound_tex.res);
4815 
4816    iris_use_pinned_bo(batch, state_bo, false, IRIS_DOMAIN_NONE);
4817 
4818    return ice->state.unbound_tex.offset;
4819 }
4820 
4821 static uint32_t
use_null_fb_surface(struct iris_batch * batch,struct iris_context * ice)4822 use_null_fb_surface(struct iris_batch *batch, struct iris_context *ice)
4823 {
4824    /* If set_framebuffer_state() was never called, fall back to 1x1x1 */
4825    if (!ice->state.null_fb.res)
4826       return use_null_surface(batch, ice);
4827 
4828    struct iris_bo *state_bo = iris_resource_bo(ice->state.null_fb.res);
4829 
4830    iris_use_pinned_bo(batch, state_bo, false, IRIS_DOMAIN_NONE);
4831 
4832    return ice->state.null_fb.offset;
4833 }
4834 
4835 static uint32_t
surf_state_offset_for_aux(unsigned aux_modes,enum isl_aux_usage aux_usage)4836 surf_state_offset_for_aux(unsigned aux_modes,
4837                           enum isl_aux_usage aux_usage)
4838 {
4839    assert(aux_modes & (1 << aux_usage));
4840    return SURFACE_STATE_ALIGNMENT *
4841           util_bitcount(aux_modes & ((1 << aux_usage) - 1));
4842 }
4843 
4844 #if GFX_VER == 9
4845 static void
surf_state_update_clear_value(struct iris_batch * batch,struct iris_resource * res,struct iris_surface_state * surf_state,enum isl_aux_usage aux_usage)4846 surf_state_update_clear_value(struct iris_batch *batch,
4847                               struct iris_resource *res,
4848                               struct iris_surface_state *surf_state,
4849                               enum isl_aux_usage aux_usage)
4850 {
4851    struct isl_device *isl_dev = &batch->screen->isl_dev;
4852    struct iris_bo *state_bo = iris_resource_bo(surf_state->ref.res);
4853    uint64_t real_offset = surf_state->ref.offset + IRIS_MEMZONE_BINDER_START;
4854    uint32_t offset_into_bo = real_offset - state_bo->address;
4855    uint32_t clear_offset = offset_into_bo +
4856       isl_dev->ss.clear_value_offset +
4857       surf_state_offset_for_aux(surf_state->aux_usages, aux_usage);
4858    uint32_t *color = res->aux.clear_color.u32;
4859 
4860    assert(isl_dev->ss.clear_value_size == 16);
4861 
4862    if (aux_usage == ISL_AUX_USAGE_HIZ) {
4863       iris_emit_pipe_control_write(batch, "update fast clear value (Z)",
4864                                    PIPE_CONTROL_WRITE_IMMEDIATE,
4865                                    state_bo, clear_offset, color[0]);
4866    } else {
4867       iris_emit_pipe_control_write(batch, "update fast clear color (RG__)",
4868                                    PIPE_CONTROL_WRITE_IMMEDIATE,
4869                                    state_bo, clear_offset,
4870                                    (uint64_t) color[0] |
4871                                    (uint64_t) color[1] << 32);
4872       iris_emit_pipe_control_write(batch, "update fast clear color (__BA)",
4873                                    PIPE_CONTROL_WRITE_IMMEDIATE,
4874                                    state_bo, clear_offset + 8,
4875                                    (uint64_t) color[2] |
4876                                    (uint64_t) color[3] << 32);
4877    }
4878 
4879    iris_emit_pipe_control_flush(batch,
4880                                 "update fast clear: state cache invalidate",
4881                                 PIPE_CONTROL_FLUSH_ENABLE |
4882                                 PIPE_CONTROL_STATE_CACHE_INVALIDATE);
4883 }
4884 #endif
4885 
4886 static void
update_clear_value(struct iris_context * ice,struct iris_batch * batch,struct iris_resource * res,struct iris_surface_state * surf_state,struct isl_view * view)4887 update_clear_value(struct iris_context *ice,
4888                    struct iris_batch *batch,
4889                    struct iris_resource *res,
4890                    struct iris_surface_state *surf_state,
4891                    struct isl_view *view)
4892 {
4893    UNUSED struct isl_device *isl_dev = &batch->screen->isl_dev;
4894    UNUSED unsigned aux_modes = surf_state->aux_usages;
4895 
4896    /* We only need to update the clear color in the surface state for gfx8 and
4897     * gfx9. Newer gens can read it directly from the clear color state buffer.
4898     */
4899 #if GFX_VER == 9
4900    /* Skip updating the ISL_AUX_USAGE_NONE surface state */
4901    aux_modes &= ~(1 << ISL_AUX_USAGE_NONE);
4902 
4903    while (aux_modes) {
4904       enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
4905 
4906       surf_state_update_clear_value(batch, res, surf_state, aux_usage);
4907    }
4908 #elif GFX_VER == 8
4909    /* TODO: Could update rather than re-filling */
4910    alloc_surface_states(surf_state, surf_state->aux_usages);
4911 
4912    fill_surface_states(isl_dev, surf_state, res, &res->surf, view, 0, 0, 0);
4913 
4914    upload_surface_states(ice->state.surface_uploader, surf_state);
4915 #endif
4916 }
4917 
4918 static uint32_t
use_surface_state(struct iris_batch * batch,struct iris_surface_state * surf_state,enum isl_aux_usage aux_usage)4919 use_surface_state(struct iris_batch *batch,
4920                   struct iris_surface_state *surf_state,
4921                   enum isl_aux_usage aux_usage)
4922 {
4923    iris_use_pinned_bo(batch, iris_resource_bo(surf_state->ref.res), false,
4924                       IRIS_DOMAIN_NONE);
4925 
4926    return surf_state->ref.offset +
4927           surf_state_offset_for_aux(surf_state->aux_usages, aux_usage);
4928 }
4929 
4930 /**
4931  * Add a surface to the validation list, as well as the buffer containing
4932  * the corresponding SURFACE_STATE.
4933  *
4934  * Returns the binding table entry (offset to SURFACE_STATE).
4935  */
4936 static uint32_t
use_surface(struct iris_context * ice,struct iris_batch * batch,struct pipe_surface * p_surf,bool writeable,enum isl_aux_usage aux_usage,bool is_read_surface,enum iris_domain access)4937 use_surface(struct iris_context *ice,
4938             struct iris_batch *batch,
4939             struct pipe_surface *p_surf,
4940             bool writeable,
4941             enum isl_aux_usage aux_usage,
4942             bool is_read_surface,
4943             enum iris_domain access)
4944 {
4945    struct iris_surface *surf = (void *) p_surf;
4946    struct iris_resource *res = (void *) p_surf->texture;
4947 
4948    if (GFX_VER == 8 && is_read_surface && !surf->surface_state_read.ref.res) {
4949       upload_surface_states(ice->state.surface_uploader,
4950                             &surf->surface_state_read);
4951    }
4952 
4953    if (!surf->surface_state.ref.res) {
4954       upload_surface_states(ice->state.surface_uploader,
4955                             &surf->surface_state);
4956    }
4957 
4958    if (memcmp(&res->aux.clear_color, &surf->clear_color,
4959               sizeof(surf->clear_color)) != 0) {
4960       update_clear_value(ice, batch, res, &surf->surface_state, &surf->view);
4961       if (GFX_VER == 8) {
4962          update_clear_value(ice, batch, res, &surf->surface_state_read,
4963                             &surf->read_view);
4964       }
4965       surf->clear_color = res->aux.clear_color;
4966    }
4967 
4968    if (res->aux.clear_color_bo)
4969       iris_use_pinned_bo(batch, res->aux.clear_color_bo, false, access);
4970 
4971    if (res->aux.bo)
4972       iris_use_pinned_bo(batch, res->aux.bo, writeable, access);
4973 
4974    iris_use_pinned_bo(batch, res->bo, writeable, access);
4975 
4976    if (GFX_VER == 8 && is_read_surface) {
4977       return use_surface_state(batch, &surf->surface_state_read, aux_usage);
4978    } else {
4979       return use_surface_state(batch, &surf->surface_state, aux_usage);
4980    }
4981 }
4982 
4983 static uint32_t
use_sampler_view(struct iris_context * ice,struct iris_batch * batch,struct iris_sampler_view * isv)4984 use_sampler_view(struct iris_context *ice,
4985                  struct iris_batch *batch,
4986                  struct iris_sampler_view *isv)
4987 {
4988    enum isl_aux_usage aux_usage =
4989       iris_resource_texture_aux_usage(ice, isv->res, isv->view.format);
4990 
4991    if (!isv->surface_state.ref.res)
4992       upload_surface_states(ice->state.surface_uploader, &isv->surface_state);
4993 
4994    if (memcmp(&isv->res->aux.clear_color, &isv->clear_color,
4995               sizeof(isv->clear_color)) != 0) {
4996       update_clear_value(ice, batch, isv->res, &isv->surface_state,
4997                          &isv->view);
4998       isv->clear_color = isv->res->aux.clear_color;
4999    }
5000 
5001    if (isv->res->aux.clear_color_bo) {
5002       iris_use_pinned_bo(batch, isv->res->aux.clear_color_bo,
5003                          false, IRIS_DOMAIN_OTHER_READ);
5004    }
5005 
5006    if (isv->res->aux.bo) {
5007       iris_use_pinned_bo(batch, isv->res->aux.bo,
5008                          false, IRIS_DOMAIN_OTHER_READ);
5009    }
5010 
5011    iris_use_pinned_bo(batch, isv->res->bo, false, IRIS_DOMAIN_OTHER_READ);
5012 
5013    return use_surface_state(batch, &isv->surface_state, aux_usage);
5014 }
5015 
5016 static uint32_t
use_ubo_ssbo(struct iris_batch * batch,struct iris_context * ice,struct pipe_shader_buffer * buf,struct iris_state_ref * surf_state,bool writable,enum iris_domain access)5017 use_ubo_ssbo(struct iris_batch *batch,
5018              struct iris_context *ice,
5019              struct pipe_shader_buffer *buf,
5020              struct iris_state_ref *surf_state,
5021              bool writable, enum iris_domain access)
5022 {
5023    if (!buf->buffer || !surf_state->res)
5024       return use_null_surface(batch, ice);
5025 
5026    iris_use_pinned_bo(batch, iris_resource_bo(buf->buffer), writable, access);
5027    iris_use_pinned_bo(batch, iris_resource_bo(surf_state->res), false,
5028                       IRIS_DOMAIN_NONE);
5029 
5030    return surf_state->offset;
5031 }
5032 
5033 static uint32_t
use_image(struct iris_batch * batch,struct iris_context * ice,struct iris_shader_state * shs,const struct shader_info * info,int i)5034 use_image(struct iris_batch *batch, struct iris_context *ice,
5035           struct iris_shader_state *shs, const struct shader_info *info,
5036           int i)
5037 {
5038    struct iris_image_view *iv = &shs->image[i];
5039    struct iris_resource *res = (void *) iv->base.resource;
5040 
5041    if (!res)
5042       return use_null_surface(batch, ice);
5043 
5044    bool write = iv->base.shader_access & PIPE_IMAGE_ACCESS_WRITE;
5045 
5046    iris_use_pinned_bo(batch, res->bo, write, IRIS_DOMAIN_NONE);
5047 
5048    if (res->aux.bo)
5049       iris_use_pinned_bo(batch, res->aux.bo, write, IRIS_DOMAIN_NONE);
5050 
5051    enum isl_aux_usage aux_usage =
5052       iris_image_view_aux_usage(ice, &iv->base, info);
5053 
5054    return use_surface_state(batch, &iv->surface_state, aux_usage);
5055 }
5056 
5057 #define push_bt_entry(addr) \
5058    assert(addr >= surf_base_offset); \
5059    assert(s < shader->bt.size_bytes / sizeof(uint32_t)); \
5060    if (!pin_only) bt_map[s++] = (addr) - surf_base_offset;
5061 
5062 #define bt_assert(section) \
5063    if (!pin_only && shader->bt.used_mask[section] != 0) \
5064       assert(shader->bt.offsets[section] == s);
5065 
5066 /**
5067  * Populate the binding table for a given shader stage.
5068  *
5069  * This fills out the table of pointers to surfaces required by the shader,
5070  * and also adds those buffers to the validation list so the kernel can make
5071  * resident before running our batch.
5072  */
5073 static void
iris_populate_binding_table(struct iris_context * ice,struct iris_batch * batch,gl_shader_stage stage,bool pin_only)5074 iris_populate_binding_table(struct iris_context *ice,
5075                             struct iris_batch *batch,
5076                             gl_shader_stage stage,
5077                             bool pin_only)
5078 {
5079    const struct iris_binder *binder = &ice->state.binder;
5080    struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5081    if (!shader)
5082       return;
5083 
5084    struct iris_binding_table *bt = &shader->bt;
5085    UNUSED struct brw_stage_prog_data *prog_data = shader->prog_data;
5086    struct iris_shader_state *shs = &ice->state.shaders[stage];
5087    uint32_t surf_base_offset = GFX_VER < 11 ? binder->bo->address : 0;
5088 
5089    uint32_t *bt_map = binder->map + binder->bt_offset[stage];
5090    int s = 0;
5091 
5092    const struct shader_info *info = iris_get_shader_info(ice, stage);
5093    if (!info) {
5094       /* TCS passthrough doesn't need a binding table. */
5095       assert(stage == MESA_SHADER_TESS_CTRL);
5096       return;
5097    }
5098 
5099    if (stage == MESA_SHADER_COMPUTE &&
5100        shader->bt.used_mask[IRIS_SURFACE_GROUP_CS_WORK_GROUPS]) {
5101       /* surface for gl_NumWorkGroups */
5102       struct iris_state_ref *grid_data = &ice->state.grid_size;
5103       struct iris_state_ref *grid_state = &ice->state.grid_surf_state;
5104       iris_use_pinned_bo(batch, iris_resource_bo(grid_data->res), false,
5105                          IRIS_DOMAIN_OTHER_READ);
5106       iris_use_pinned_bo(batch, iris_resource_bo(grid_state->res), false,
5107                          IRIS_DOMAIN_NONE);
5108       push_bt_entry(grid_state->offset);
5109    }
5110 
5111    if (stage == MESA_SHADER_FRAGMENT) {
5112       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5113       /* Note that cso_fb->nr_cbufs == fs_key->nr_color_regions. */
5114       if (cso_fb->nr_cbufs) {
5115          for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
5116             uint32_t addr;
5117             if (cso_fb->cbufs[i]) {
5118                addr = use_surface(ice, batch, cso_fb->cbufs[i], true,
5119                                   ice->state.draw_aux_usage[i], false,
5120                                   IRIS_DOMAIN_RENDER_WRITE);
5121             } else {
5122                addr = use_null_fb_surface(batch, ice);
5123             }
5124             push_bt_entry(addr);
5125          }
5126       } else if (GFX_VER < 11) {
5127          uint32_t addr = use_null_fb_surface(batch, ice);
5128          push_bt_entry(addr);
5129       }
5130    }
5131 
5132 #define foreach_surface_used(index, group) \
5133    bt_assert(group); \
5134    for (int index = 0; index < bt->sizes[group]; index++) \
5135       if (iris_group_index_to_bti(bt, group, index) != \
5136           IRIS_SURFACE_NOT_USED)
5137 
5138    foreach_surface_used(i, IRIS_SURFACE_GROUP_RENDER_TARGET_READ) {
5139       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5140       uint32_t addr;
5141       if (cso_fb->cbufs[i]) {
5142          addr = use_surface(ice, batch, cso_fb->cbufs[i],
5143                             false, ice->state.draw_aux_usage[i], true,
5144                             IRIS_DOMAIN_OTHER_READ);
5145          push_bt_entry(addr);
5146       }
5147    }
5148 
5149    foreach_surface_used(i, IRIS_SURFACE_GROUP_TEXTURE) {
5150       struct iris_sampler_view *view = shs->textures[i];
5151       uint32_t addr = view ? use_sampler_view(ice, batch, view)
5152                            : use_null_surface(batch, ice);
5153       push_bt_entry(addr);
5154    }
5155 
5156    foreach_surface_used(i, IRIS_SURFACE_GROUP_IMAGE) {
5157       uint32_t addr = use_image(batch, ice, shs, info, i);
5158       push_bt_entry(addr);
5159    }
5160 
5161    foreach_surface_used(i, IRIS_SURFACE_GROUP_UBO) {
5162       uint32_t addr = use_ubo_ssbo(batch, ice, &shs->constbuf[i],
5163                                    &shs->constbuf_surf_state[i], false,
5164                                    IRIS_DOMAIN_OTHER_READ);
5165       push_bt_entry(addr);
5166    }
5167 
5168    foreach_surface_used(i, IRIS_SURFACE_GROUP_SSBO) {
5169       uint32_t addr =
5170          use_ubo_ssbo(batch, ice, &shs->ssbo[i], &shs->ssbo_surf_state[i],
5171                       shs->writable_ssbos & (1u << i), IRIS_DOMAIN_NONE);
5172       push_bt_entry(addr);
5173    }
5174 
5175 #if 0
5176       /* XXX: YUV surfaces not implemented yet */
5177       bt_assert(plane_start[1], ...);
5178       bt_assert(plane_start[2], ...);
5179 #endif
5180 }
5181 
5182 static void
iris_use_optional_res(struct iris_batch * batch,struct pipe_resource * res,bool writeable,enum iris_domain access)5183 iris_use_optional_res(struct iris_batch *batch,
5184                       struct pipe_resource *res,
5185                       bool writeable,
5186                       enum iris_domain access)
5187 {
5188    if (res) {
5189       struct iris_bo *bo = iris_resource_bo(res);
5190       iris_use_pinned_bo(batch, bo, writeable, access);
5191    }
5192 }
5193 
5194 static void
pin_depth_and_stencil_buffers(struct iris_batch * batch,struct pipe_surface * zsbuf,struct iris_depth_stencil_alpha_state * cso_zsa)5195 pin_depth_and_stencil_buffers(struct iris_batch *batch,
5196                               struct pipe_surface *zsbuf,
5197                               struct iris_depth_stencil_alpha_state *cso_zsa)
5198 {
5199    if (!zsbuf)
5200       return;
5201 
5202    struct iris_resource *zres, *sres;
5203    iris_get_depth_stencil_resources(zsbuf->texture, &zres, &sres);
5204 
5205    if (zres) {
5206       const enum iris_domain access = cso_zsa->depth_writes_enabled ?
5207          IRIS_DOMAIN_DEPTH_WRITE : IRIS_DOMAIN_OTHER_READ;
5208       iris_use_pinned_bo(batch, zres->bo, cso_zsa->depth_writes_enabled,
5209                          access);
5210       if (zres->aux.bo) {
5211          iris_use_pinned_bo(batch, zres->aux.bo,
5212                             cso_zsa->depth_writes_enabled, access);
5213       }
5214    }
5215 
5216    if (sres) {
5217       const enum iris_domain access = cso_zsa->stencil_writes_enabled ?
5218          IRIS_DOMAIN_DEPTH_WRITE : IRIS_DOMAIN_OTHER_READ;
5219       iris_use_pinned_bo(batch, sres->bo, cso_zsa->stencil_writes_enabled,
5220                          access);
5221    }
5222 }
5223 
5224 static uint32_t
pin_scratch_space(struct iris_context * ice,struct iris_batch * batch,const struct brw_stage_prog_data * prog_data,gl_shader_stage stage)5225 pin_scratch_space(struct iris_context *ice,
5226                   struct iris_batch *batch,
5227                   const struct brw_stage_prog_data *prog_data,
5228                   gl_shader_stage stage)
5229 {
5230    uint32_t scratch_addr = 0;
5231 
5232    if (prog_data->total_scratch > 0) {
5233       struct iris_bo *scratch_bo =
5234          iris_get_scratch_space(ice, prog_data->total_scratch, stage);
5235       iris_use_pinned_bo(batch, scratch_bo, true, IRIS_DOMAIN_NONE);
5236 
5237 #if GFX_VERx10 >= 125
5238       const struct iris_state_ref *ref =
5239          iris_get_scratch_surf(ice, prog_data->total_scratch);
5240       iris_use_pinned_bo(batch, iris_resource_bo(ref->res),
5241                          false, IRIS_DOMAIN_NONE);
5242       scratch_addr = ref->offset +
5243                      iris_resource_bo(ref->res)->address -
5244                      IRIS_MEMZONE_BINDLESS_START;
5245       assert((scratch_addr & 0x3f) == 0 && scratch_addr < (1 << 26));
5246 #else
5247       scratch_addr = scratch_bo->address;
5248 #endif
5249    }
5250 
5251    return scratch_addr;
5252 }
5253 
5254 /* ------------------------------------------------------------------- */
5255 
5256 /**
5257  * Pin any BOs which were installed by a previous batch, and restored
5258  * via the hardware logical context mechanism.
5259  *
5260  * We don't need to re-emit all state every batch - the hardware context
5261  * mechanism will save and restore it for us.  This includes pointers to
5262  * various BOs...which won't exist unless we ask the kernel to pin them
5263  * by adding them to the validation list.
5264  *
5265  * We can skip buffers if we've re-emitted those packets, as we're
5266  * overwriting those stale pointers with new ones, and don't actually
5267  * refer to the old BOs.
5268  */
5269 static void
iris_restore_render_saved_bos(struct iris_context * ice,struct iris_batch * batch,const struct pipe_draw_info * draw)5270 iris_restore_render_saved_bos(struct iris_context *ice,
5271                               struct iris_batch *batch,
5272                               const struct pipe_draw_info *draw)
5273 {
5274    struct iris_genx_state *genx = ice->state.genx;
5275 
5276    const uint64_t clean = ~ice->state.dirty;
5277    const uint64_t stage_clean = ~ice->state.stage_dirty;
5278 
5279    if (clean & IRIS_DIRTY_CC_VIEWPORT) {
5280       iris_use_optional_res(batch, ice->state.last_res.cc_vp, false,
5281                             IRIS_DOMAIN_NONE);
5282    }
5283 
5284    if (clean & IRIS_DIRTY_SF_CL_VIEWPORT) {
5285       iris_use_optional_res(batch, ice->state.last_res.sf_cl_vp, false,
5286                             IRIS_DOMAIN_NONE);
5287    }
5288 
5289    if (clean & IRIS_DIRTY_BLEND_STATE) {
5290       iris_use_optional_res(batch, ice->state.last_res.blend, false,
5291                             IRIS_DOMAIN_NONE);
5292    }
5293 
5294    if (clean & IRIS_DIRTY_COLOR_CALC_STATE) {
5295       iris_use_optional_res(batch, ice->state.last_res.color_calc, false,
5296                             IRIS_DOMAIN_NONE);
5297    }
5298 
5299    if (clean & IRIS_DIRTY_SCISSOR_RECT) {
5300       iris_use_optional_res(batch, ice->state.last_res.scissor, false,
5301                             IRIS_DOMAIN_NONE);
5302    }
5303 
5304    if (ice->state.streamout_active && (clean & IRIS_DIRTY_SO_BUFFERS)) {
5305       for (int i = 0; i < 4; i++) {
5306          struct iris_stream_output_target *tgt =
5307             (void *) ice->state.so_target[i];
5308          if (tgt) {
5309             iris_use_pinned_bo(batch, iris_resource_bo(tgt->base.buffer),
5310                                true, IRIS_DOMAIN_OTHER_WRITE);
5311             iris_use_pinned_bo(batch, iris_resource_bo(tgt->offset.res),
5312                                true, IRIS_DOMAIN_OTHER_WRITE);
5313          }
5314       }
5315    }
5316 
5317    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5318       if (!(stage_clean & (IRIS_STAGE_DIRTY_CONSTANTS_VS << stage)))
5319          continue;
5320 
5321       struct iris_shader_state *shs = &ice->state.shaders[stage];
5322       struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5323 
5324       if (!shader)
5325          continue;
5326 
5327       struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
5328 
5329       for (int i = 0; i < 4; i++) {
5330          const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
5331 
5332          if (range->length == 0)
5333             continue;
5334 
5335          /* Range block is a binding table index, map back to UBO index. */
5336          unsigned block_index = iris_bti_to_group_index(
5337             &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
5338          assert(block_index != IRIS_SURFACE_NOT_USED);
5339 
5340          struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
5341          struct iris_resource *res = (void *) cbuf->buffer;
5342 
5343          if (res)
5344             iris_use_pinned_bo(batch, res->bo, false, IRIS_DOMAIN_OTHER_READ);
5345          else
5346             iris_use_pinned_bo(batch, batch->screen->workaround_bo, false,
5347                                IRIS_DOMAIN_OTHER_READ);
5348       }
5349    }
5350 
5351    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5352       if (stage_clean & (IRIS_STAGE_DIRTY_BINDINGS_VS << stage)) {
5353          /* Re-pin any buffers referred to by the binding table. */
5354          iris_populate_binding_table(ice, batch, stage, true);
5355       }
5356    }
5357 
5358    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5359       struct iris_shader_state *shs = &ice->state.shaders[stage];
5360       struct pipe_resource *res = shs->sampler_table.res;
5361       if (res)
5362          iris_use_pinned_bo(batch, iris_resource_bo(res), false,
5363                             IRIS_DOMAIN_NONE);
5364    }
5365 
5366    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5367       if (stage_clean & (IRIS_STAGE_DIRTY_VS << stage)) {
5368          struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5369 
5370          if (shader) {
5371             struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
5372             iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
5373 
5374             pin_scratch_space(ice, batch, shader->prog_data, stage);
5375          }
5376       }
5377    }
5378 
5379    if ((clean & IRIS_DIRTY_DEPTH_BUFFER) &&
5380        (clean & IRIS_DIRTY_WM_DEPTH_STENCIL)) {
5381       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5382       pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa);
5383    }
5384 
5385    iris_use_optional_res(batch, ice->state.last_res.index_buffer, false,
5386                          IRIS_DOMAIN_VF_READ);
5387 
5388    if (clean & IRIS_DIRTY_VERTEX_BUFFERS) {
5389       uint64_t bound = ice->state.bound_vertex_buffers;
5390       while (bound) {
5391          const int i = u_bit_scan64(&bound);
5392          struct pipe_resource *res = genx->vertex_buffers[i].resource;
5393          iris_use_pinned_bo(batch, iris_resource_bo(res), false,
5394                             IRIS_DOMAIN_VF_READ);
5395       }
5396    }
5397 
5398 #if GFX_VERx10 == 125
5399    iris_use_pinned_bo(batch, iris_resource_bo(ice->state.pixel_hashing_tables),
5400                       false, IRIS_DOMAIN_NONE);
5401 #else
5402    assert(!ice->state.pixel_hashing_tables);
5403 #endif
5404 }
5405 
5406 static void
iris_restore_compute_saved_bos(struct iris_context * ice,struct iris_batch * batch,const struct pipe_grid_info * grid)5407 iris_restore_compute_saved_bos(struct iris_context *ice,
5408                                struct iris_batch *batch,
5409                                const struct pipe_grid_info *grid)
5410 {
5411    const uint64_t stage_clean = ~ice->state.stage_dirty;
5412 
5413    const int stage = MESA_SHADER_COMPUTE;
5414    struct iris_shader_state *shs = &ice->state.shaders[stage];
5415 
5416    if (stage_clean & IRIS_STAGE_DIRTY_BINDINGS_CS) {
5417       /* Re-pin any buffers referred to by the binding table. */
5418       iris_populate_binding_table(ice, batch, stage, true);
5419    }
5420 
5421    struct pipe_resource *sampler_res = shs->sampler_table.res;
5422    if (sampler_res)
5423       iris_use_pinned_bo(batch, iris_resource_bo(sampler_res), false,
5424                          IRIS_DOMAIN_NONE);
5425 
5426    if ((stage_clean & IRIS_STAGE_DIRTY_SAMPLER_STATES_CS) &&
5427        (stage_clean & IRIS_STAGE_DIRTY_BINDINGS_CS) &&
5428        (stage_clean & IRIS_STAGE_DIRTY_CONSTANTS_CS) &&
5429        (stage_clean & IRIS_STAGE_DIRTY_CS)) {
5430       iris_use_optional_res(batch, ice->state.last_res.cs_desc, false,
5431                             IRIS_DOMAIN_NONE);
5432    }
5433 
5434    if (stage_clean & IRIS_STAGE_DIRTY_CS) {
5435       struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5436 
5437       if (shader) {
5438          struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
5439          iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
5440 
5441          if (GFX_VERx10 < 125) {
5442             struct iris_bo *curbe_bo =
5443                iris_resource_bo(ice->state.last_res.cs_thread_ids);
5444             iris_use_pinned_bo(batch, curbe_bo, false, IRIS_DOMAIN_NONE);
5445          }
5446 
5447          pin_scratch_space(ice, batch, shader->prog_data, stage);
5448       }
5449    }
5450 }
5451 
5452 /**
5453  * Possibly emit STATE_BASE_ADDRESS to update Surface State Base Address.
5454  */
5455 static void
iris_update_binder_address(struct iris_batch * batch,struct iris_binder * binder)5456 iris_update_binder_address(struct iris_batch *batch,
5457                            struct iris_binder *binder)
5458 {
5459    if (batch->last_binder_address == binder->bo->address)
5460       return;
5461 
5462    struct isl_device *isl_dev = &batch->screen->isl_dev;
5463    uint32_t mocs = isl_mocs(isl_dev, 0, false);
5464 
5465    iris_batch_sync_region_start(batch);
5466 
5467 #if GFX_VER >= 11
5468    /* Use 3DSTATE_BINDING_TABLE_POOL_ALLOC on Icelake and later */
5469 
5470 #if GFX_VERx10 == 120
5471    /* Wa_1607854226:
5472     *
5473     *  Workaround the non pipelined state not applying in MEDIA/GPGPU pipeline
5474     *  mode by putting the pipeline temporarily in 3D mode..
5475     */
5476    if (batch->name == IRIS_BATCH_COMPUTE)
5477       emit_pipeline_select(batch, _3D);
5478 #endif
5479 
5480    iris_emit_pipe_control_flush(batch, "Stall for binder realloc",
5481                                 PIPE_CONTROL_CS_STALL);
5482 
5483    iris_emit_cmd(batch, GENX(3DSTATE_BINDING_TABLE_POOL_ALLOC), btpa) {
5484       btpa.BindingTablePoolBaseAddress = ro_bo(binder->bo, 0);
5485       btpa.BindingTablePoolBufferSize = binder->size / 4096;
5486 #if GFX_VERx10 < 125
5487       btpa.BindingTablePoolEnable = true;
5488 #endif
5489       btpa.MOCS = mocs;
5490    }
5491 
5492 #if GFX_VERx10 == 120
5493    /* Wa_1607854226:
5494     *
5495     *  Put the pipeline back into compute mode.
5496     */
5497    if (batch->name == IRIS_BATCH_COMPUTE)
5498       emit_pipeline_select(batch, GPGPU);
5499 #endif
5500 #else
5501    /* Use STATE_BASE_ADDRESS on older platforms */
5502    flush_before_state_base_change(batch);
5503 
5504    iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
5505       sba.SurfaceStateBaseAddressModifyEnable = true;
5506       sba.SurfaceStateBaseAddress = ro_bo(binder->bo, 0);
5507 
5508       /* The hardware appears to pay attention to the MOCS fields even
5509        * if you don't set the "Address Modify Enable" bit for the base.
5510        */
5511       sba.GeneralStateMOCS            = mocs;
5512       sba.StatelessDataPortAccessMOCS = mocs;
5513       sba.DynamicStateMOCS            = mocs;
5514       sba.IndirectObjectMOCS          = mocs;
5515       sba.InstructionMOCS             = mocs;
5516       sba.SurfaceStateMOCS            = mocs;
5517 #if GFX_VER >= 9
5518       sba.BindlessSurfaceStateMOCS    = mocs;
5519 #endif
5520    }
5521 #endif
5522 
5523    flush_after_state_base_change(batch);
5524    iris_batch_sync_region_end(batch);
5525 
5526    batch->last_binder_address = binder->bo->address;
5527 }
5528 
5529 static inline void
iris_viewport_zmin_zmax(const struct pipe_viewport_state * vp,bool halfz,bool window_space_position,float * zmin,float * zmax)5530 iris_viewport_zmin_zmax(const struct pipe_viewport_state *vp, bool halfz,
5531                         bool window_space_position, float *zmin, float *zmax)
5532 {
5533    if (window_space_position) {
5534       *zmin = 0.f;
5535       *zmax = 1.f;
5536       return;
5537    }
5538    util_viewport_zmin_zmax(vp, halfz, zmin, zmax);
5539 }
5540 
5541 #if GFX_VER >= 12
5542 void
genX(invalidate_aux_map_state)5543 genX(invalidate_aux_map_state)(struct iris_batch *batch)
5544 {
5545    struct iris_screen *screen = batch->screen;
5546    void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr);
5547    if (!aux_map_ctx)
5548       return;
5549    uint32_t aux_map_state_num = intel_aux_map_get_state_num(aux_map_ctx);
5550    if (batch->last_aux_map_state != aux_map_state_num) {
5551       /* HSD 1209978178: docs say that before programming the aux table:
5552        *
5553        *    "Driver must ensure that the engine is IDLE but ensure it doesn't
5554        *    add extra flushes in the case it knows that the engine is already
5555        *    IDLE."
5556        *
5557        * An end of pipe sync is needed here, otherwise we see GPU hangs in
5558        * dEQP-GLES31.functional.copy_image.* tests.
5559        */
5560       iris_emit_end_of_pipe_sync(batch, "Invalidate aux map table",
5561                                  PIPE_CONTROL_CS_STALL);
5562 
5563       /* If the aux-map state number increased, then we need to rewrite the
5564        * register. Rewriting the register is used to both set the aux-map
5565        * translation table address, and also to invalidate any previously
5566        * cached translations.
5567        */
5568       iris_load_register_imm32(batch, GENX(GFX_CCS_AUX_INV_num), 1);
5569       batch->last_aux_map_state = aux_map_state_num;
5570    }
5571 }
5572 
5573 static void
init_aux_map_state(struct iris_batch * batch)5574 init_aux_map_state(struct iris_batch *batch)
5575 {
5576    struct iris_screen *screen = batch->screen;
5577    void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr);
5578    if (!aux_map_ctx)
5579       return;
5580 
5581    uint64_t base_addr = intel_aux_map_get_base(aux_map_ctx);
5582    assert(base_addr != 0 && align64(base_addr, 32 * 1024) == base_addr);
5583    iris_load_register_imm64(batch, GENX(GFX_AUX_TABLE_BASE_ADDR_num),
5584                             base_addr);
5585 }
5586 #endif
5587 
5588 struct push_bos {
5589    struct {
5590       struct iris_address addr;
5591       uint32_t length;
5592    } buffers[4];
5593    int buffer_count;
5594    uint32_t max_length;
5595 };
5596 
5597 static void
setup_constant_buffers(struct iris_context * ice,struct iris_batch * batch,int stage,struct push_bos * push_bos)5598 setup_constant_buffers(struct iris_context *ice,
5599                        struct iris_batch *batch,
5600                        int stage,
5601                        struct push_bos *push_bos)
5602 {
5603    struct iris_shader_state *shs = &ice->state.shaders[stage];
5604    struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5605    struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
5606 
5607    uint32_t push_range_sum = 0;
5608 
5609    int n = 0;
5610    for (int i = 0; i < 4; i++) {
5611       const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
5612 
5613       if (range->length == 0)
5614          continue;
5615 
5616       push_range_sum += range->length;
5617 
5618       if (range->length > push_bos->max_length)
5619          push_bos->max_length = range->length;
5620 
5621       /* Range block is a binding table index, map back to UBO index. */
5622       unsigned block_index = iris_bti_to_group_index(
5623          &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
5624       assert(block_index != IRIS_SURFACE_NOT_USED);
5625 
5626       struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
5627       struct iris_resource *res = (void *) cbuf->buffer;
5628 
5629       assert(cbuf->buffer_offset % 32 == 0);
5630 
5631       push_bos->buffers[n].length = range->length;
5632       push_bos->buffers[n].addr =
5633          res ? ro_bo(res->bo, range->start * 32 + cbuf->buffer_offset)
5634          : batch->screen->workaround_address;
5635       n++;
5636    }
5637 
5638    /* From the 3DSTATE_CONSTANT_XS and 3DSTATE_CONSTANT_ALL programming notes:
5639     *
5640     *    "The sum of all four read length fields must be less than or
5641     *    equal to the size of 64."
5642     */
5643    assert(push_range_sum <= 64);
5644 
5645    push_bos->buffer_count = n;
5646 }
5647 
5648 static void
emit_push_constant_packets(struct iris_context * ice,struct iris_batch * batch,int stage,const struct push_bos * push_bos)5649 emit_push_constant_packets(struct iris_context *ice,
5650                            struct iris_batch *batch,
5651                            int stage,
5652                            const struct push_bos *push_bos)
5653 {
5654    UNUSED struct isl_device *isl_dev = &batch->screen->isl_dev;
5655    struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5656    struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
5657 
5658    iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_VS), pkt) {
5659       pkt._3DCommandSubOpcode = push_constant_opcodes[stage];
5660 
5661 #if GFX_VER >= 9
5662       pkt.MOCS = isl_mocs(isl_dev, 0, false);
5663 #endif
5664 
5665       if (prog_data) {
5666          /* The Skylake PRM contains the following restriction:
5667           *
5668           *    "The driver must ensure The following case does not occur
5669           *     without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
5670           *     buffer 3 read length equal to zero committed followed by a
5671           *     3DSTATE_CONSTANT_* with buffer 0 read length not equal to
5672           *     zero committed."
5673           *
5674           * To avoid this, we program the buffers in the highest slots.
5675           * This way, slot 0 is only used if slot 3 is also used.
5676           */
5677          int n = push_bos->buffer_count;
5678          assert(n <= 4);
5679          const unsigned shift = 4 - n;
5680          for (int i = 0; i < n; i++) {
5681             pkt.ConstantBody.ReadLength[i + shift] =
5682                push_bos->buffers[i].length;
5683             pkt.ConstantBody.Buffer[i + shift] = push_bos->buffers[i].addr;
5684          }
5685       }
5686    }
5687 }
5688 
5689 #if GFX_VER >= 12
5690 static void
emit_push_constant_packet_all(struct iris_context * ice,struct iris_batch * batch,uint32_t shader_mask,const struct push_bos * push_bos)5691 emit_push_constant_packet_all(struct iris_context *ice,
5692                               struct iris_batch *batch,
5693                               uint32_t shader_mask,
5694                               const struct push_bos *push_bos)
5695 {
5696    struct isl_device *isl_dev = &batch->screen->isl_dev;
5697 
5698    if (!push_bos) {
5699       iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_ALL), pc) {
5700          pc.ShaderUpdateEnable = shader_mask;
5701          pc.MOCS = iris_mocs(NULL, isl_dev, 0);
5702       }
5703       return;
5704    }
5705 
5706    const uint32_t n = push_bos->buffer_count;
5707    const uint32_t max_pointers = 4;
5708    const uint32_t num_dwords = 2 + 2 * n;
5709    uint32_t const_all[2 + 2 * max_pointers];
5710    uint32_t *dw = &const_all[0];
5711 
5712    assert(n <= max_pointers);
5713    iris_pack_command(GENX(3DSTATE_CONSTANT_ALL), dw, all) {
5714       all.DWordLength = num_dwords - 2;
5715       all.MOCS = isl_mocs(isl_dev, 0, false);
5716       all.ShaderUpdateEnable = shader_mask;
5717       all.PointerBufferMask = (1 << n) - 1;
5718    }
5719    dw += 2;
5720 
5721    for (int i = 0; i < n; i++) {
5722       _iris_pack_state(batch, GENX(3DSTATE_CONSTANT_ALL_DATA),
5723                        dw + i * 2, data) {
5724          data.PointerToConstantBuffer = push_bos->buffers[i].addr;
5725          data.ConstantBufferReadLength = push_bos->buffers[i].length;
5726       }
5727    }
5728    iris_batch_emit(batch, const_all, sizeof(uint32_t) * num_dwords);
5729 }
5730 #endif
5731 
5732 void
genX(emit_depth_state_workarounds)5733 genX(emit_depth_state_workarounds)(struct iris_context *ice,
5734                                    struct iris_batch *batch,
5735                                    const struct isl_surf *surf)
5736 {
5737 #if GFX_VERx10 == 120
5738    const bool fmt_is_d16 = surf->format == ISL_FORMAT_R16_UNORM;
5739 
5740    switch (ice->state.genx->depth_reg_mode) {
5741    case IRIS_DEPTH_REG_MODE_HW_DEFAULT:
5742       if (!fmt_is_d16)
5743          return;
5744       break;
5745    case IRIS_DEPTH_REG_MODE_D16:
5746       if (fmt_is_d16)
5747          return;
5748       break;
5749    case IRIS_DEPTH_REG_MODE_UNKNOWN:
5750       break;
5751    }
5752 
5753    /* We'll change some CHICKEN registers depending on the depth surface
5754     * format. Do a depth flush and stall so the pipeline is not using these
5755     * settings while we change the registers.
5756     */
5757    iris_emit_end_of_pipe_sync(batch,
5758                               "Workaround: Stop pipeline for 14010455700",
5759                               PIPE_CONTROL_DEPTH_STALL |
5760                               PIPE_CONTROL_DEPTH_CACHE_FLUSH);
5761 
5762    /* Wa_14010455700
5763     *
5764     * To avoid sporadic corruptions “Set 0x7010[9] when Depth Buffer
5765     * Surface Format is D16_UNORM , surface type is not NULL & 1X_MSAA”.
5766     */
5767    iris_emit_reg(batch, GENX(COMMON_SLICE_CHICKEN1), reg) {
5768       reg.HIZPlaneOptimizationdisablebit = fmt_is_d16 && surf->samples == 1;
5769       reg.HIZPlaneOptimizationdisablebitMask = true;
5770    }
5771 
5772    /* Wa_1806527549
5773     *
5774     * Set HIZ_CHICKEN (7018h) bit 13 = 1 when depth buffer is D16_UNORM.
5775     */
5776    iris_emit_reg(batch, GENX(HIZ_CHICKEN), reg) {
5777       reg.HZDepthTestLEGEOptimizationDisable = fmt_is_d16;
5778       reg.HZDepthTestLEGEOptimizationDisableMask = true;
5779    }
5780 
5781    ice->state.genx->depth_reg_mode =
5782       fmt_is_d16 ? IRIS_DEPTH_REG_MODE_D16 : IRIS_DEPTH_REG_MODE_HW_DEFAULT;
5783 #endif
5784 }
5785 
5786 static void
iris_upload_dirty_render_state(struct iris_context * ice,struct iris_batch * batch,const struct pipe_draw_info * draw)5787 iris_upload_dirty_render_state(struct iris_context *ice,
5788                                struct iris_batch *batch,
5789                                const struct pipe_draw_info *draw)
5790 {
5791    struct iris_screen *screen = batch->screen;
5792    struct iris_border_color_pool *border_color_pool =
5793       iris_bufmgr_get_border_color_pool(screen->bufmgr);
5794    const uint64_t dirty = ice->state.dirty;
5795    const uint64_t stage_dirty = ice->state.stage_dirty;
5796 
5797    if (!(dirty & IRIS_ALL_DIRTY_FOR_RENDER) &&
5798        !(stage_dirty & IRIS_ALL_STAGE_DIRTY_FOR_RENDER))
5799       return;
5800 
5801    struct iris_genx_state *genx = ice->state.genx;
5802    struct iris_binder *binder = &ice->state.binder;
5803    struct brw_wm_prog_data *wm_prog_data = (void *)
5804       ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
5805 
5806    if (dirty & IRIS_DIRTY_CC_VIEWPORT) {
5807       const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
5808       uint32_t cc_vp_address;
5809 
5810       /* XXX: could avoid streaming for depth_clip [0,1] case. */
5811       uint32_t *cc_vp_map =
5812          stream_state(batch, ice->state.dynamic_uploader,
5813                       &ice->state.last_res.cc_vp,
5814                       4 * ice->state.num_viewports *
5815                       GENX(CC_VIEWPORT_length), 32, &cc_vp_address);
5816       for (int i = 0; i < ice->state.num_viewports; i++) {
5817          float zmin, zmax;
5818          iris_viewport_zmin_zmax(&ice->state.viewports[i], cso_rast->clip_halfz,
5819                                  ice->state.window_space_position,
5820                                  &zmin, &zmax);
5821          if (cso_rast->depth_clip_near)
5822             zmin = 0.0;
5823          if (cso_rast->depth_clip_far)
5824             zmax = 1.0;
5825 
5826          iris_pack_state(GENX(CC_VIEWPORT), cc_vp_map, ccv) {
5827             ccv.MinimumDepth = zmin;
5828             ccv.MaximumDepth = zmax;
5829          }
5830 
5831          cc_vp_map += GENX(CC_VIEWPORT_length);
5832       }
5833 
5834       iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC), ptr) {
5835          ptr.CCViewportPointer = cc_vp_address;
5836       }
5837    }
5838 
5839    if (dirty & IRIS_DIRTY_SF_CL_VIEWPORT) {
5840       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5841       uint32_t sf_cl_vp_address;
5842       uint32_t *vp_map =
5843          stream_state(batch, ice->state.dynamic_uploader,
5844                       &ice->state.last_res.sf_cl_vp,
5845                       4 * ice->state.num_viewports *
5846                       GENX(SF_CLIP_VIEWPORT_length), 64, &sf_cl_vp_address);
5847 
5848       for (unsigned i = 0; i < ice->state.num_viewports; i++) {
5849          const struct pipe_viewport_state *state = &ice->state.viewports[i];
5850          float gb_xmin, gb_xmax, gb_ymin, gb_ymax;
5851 
5852          float vp_xmin = viewport_extent(state, 0, -1.0f);
5853          float vp_xmax = viewport_extent(state, 0,  1.0f);
5854          float vp_ymin = viewport_extent(state, 1, -1.0f);
5855          float vp_ymax = viewport_extent(state, 1,  1.0f);
5856 
5857          intel_calculate_guardband_size(0, cso_fb->width, 0, cso_fb->height,
5858                                         state->scale[0], state->scale[1],
5859                                         state->translate[0], state->translate[1],
5860                                         &gb_xmin, &gb_xmax, &gb_ymin, &gb_ymax);
5861 
5862          iris_pack_state(GENX(SF_CLIP_VIEWPORT), vp_map, vp) {
5863             vp.ViewportMatrixElementm00 = state->scale[0];
5864             vp.ViewportMatrixElementm11 = state->scale[1];
5865             vp.ViewportMatrixElementm22 = state->scale[2];
5866             vp.ViewportMatrixElementm30 = state->translate[0];
5867             vp.ViewportMatrixElementm31 = state->translate[1];
5868             vp.ViewportMatrixElementm32 = state->translate[2];
5869             vp.XMinClipGuardband = gb_xmin;
5870             vp.XMaxClipGuardband = gb_xmax;
5871             vp.YMinClipGuardband = gb_ymin;
5872             vp.YMaxClipGuardband = gb_ymax;
5873             vp.XMinViewPort = MAX2(vp_xmin, 0);
5874             vp.XMaxViewPort = MIN2(vp_xmax, cso_fb->width) - 1;
5875             vp.YMinViewPort = MAX2(vp_ymin, 0);
5876             vp.YMaxViewPort = MIN2(vp_ymax, cso_fb->height) - 1;
5877          }
5878 
5879          vp_map += GENX(SF_CLIP_VIEWPORT_length);
5880       }
5881 
5882       iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP), ptr) {
5883          ptr.SFClipViewportPointer = sf_cl_vp_address;
5884       }
5885    }
5886 
5887    if (dirty & IRIS_DIRTY_URB) {
5888       for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
5889          if (!ice->shaders.prog[i]) {
5890             ice->shaders.urb.size[i] = 1;
5891          } else {
5892             struct brw_vue_prog_data *vue_prog_data =
5893                (void *) ice->shaders.prog[i]->prog_data;
5894             ice->shaders.urb.size[i] = vue_prog_data->urb_entry_size;
5895          }
5896          assert(ice->shaders.urb.size[i] != 0);
5897       }
5898 
5899       intel_get_urb_config(&screen->devinfo,
5900                            screen->l3_config_3d,
5901                            ice->shaders.prog[MESA_SHADER_TESS_EVAL] != NULL,
5902                            ice->shaders.prog[MESA_SHADER_GEOMETRY] != NULL,
5903                            ice->shaders.urb.size,
5904                            ice->shaders.urb.entries,
5905                            ice->shaders.urb.start,
5906                            &ice->state.urb_deref_block_size,
5907                            &ice->shaders.urb.constrained);
5908 
5909       for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
5910          iris_emit_cmd(batch, GENX(3DSTATE_URB_VS), urb) {
5911             urb._3DCommandSubOpcode += i;
5912             urb.VSURBStartingAddress     = ice->shaders.urb.start[i];
5913             urb.VSURBEntryAllocationSize = ice->shaders.urb.size[i] - 1;
5914             urb.VSNumberofURBEntries     = ice->shaders.urb.entries[i];
5915          }
5916       }
5917    }
5918 
5919    if (dirty & IRIS_DIRTY_BLEND_STATE) {
5920       struct iris_blend_state *cso_blend = ice->state.cso_blend;
5921       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5922       struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
5923       const int header_dwords = GENX(BLEND_STATE_length);
5924 
5925       /* Always write at least one BLEND_STATE - the final RT message will
5926        * reference BLEND_STATE[0] even if there aren't color writes.  There
5927        * may still be alpha testing, computed depth, and so on.
5928        */
5929       const int rt_dwords =
5930          MAX2(cso_fb->nr_cbufs, 1) * GENX(BLEND_STATE_ENTRY_length);
5931 
5932       uint32_t blend_offset;
5933       uint32_t *blend_map =
5934          stream_state(batch, ice->state.dynamic_uploader,
5935                       &ice->state.last_res.blend,
5936                       4 * (header_dwords + rt_dwords), 64, &blend_offset);
5937 
5938       uint32_t blend_state_header;
5939       iris_pack_state(GENX(BLEND_STATE), &blend_state_header, bs) {
5940          bs.AlphaTestEnable = cso_zsa->alpha_enabled;
5941          bs.AlphaTestFunction = translate_compare_func(cso_zsa->alpha_func);
5942       }
5943 
5944       blend_map[0] = blend_state_header | cso_blend->blend_state[0];
5945       memcpy(&blend_map[1], &cso_blend->blend_state[1], 4 * rt_dwords);
5946 
5947       iris_emit_cmd(batch, GENX(3DSTATE_BLEND_STATE_POINTERS), ptr) {
5948          ptr.BlendStatePointer = blend_offset;
5949          ptr.BlendStatePointerValid = true;
5950       }
5951    }
5952 
5953    if (dirty & IRIS_DIRTY_COLOR_CALC_STATE) {
5954       struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
5955 #if GFX_VER == 8
5956       struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
5957 #endif
5958       uint32_t cc_offset;
5959       void *cc_map =
5960          stream_state(batch, ice->state.dynamic_uploader,
5961                       &ice->state.last_res.color_calc,
5962                       sizeof(uint32_t) * GENX(COLOR_CALC_STATE_length),
5963                       64, &cc_offset);
5964       iris_pack_state(GENX(COLOR_CALC_STATE), cc_map, cc) {
5965          cc.AlphaTestFormat = ALPHATEST_FLOAT32;
5966          cc.AlphaReferenceValueAsFLOAT32 = cso->alpha_ref_value;
5967          cc.BlendConstantColorRed   = ice->state.blend_color.color[0];
5968          cc.BlendConstantColorGreen = ice->state.blend_color.color[1];
5969          cc.BlendConstantColorBlue  = ice->state.blend_color.color[2];
5970          cc.BlendConstantColorAlpha = ice->state.blend_color.color[3];
5971 #if GFX_VER == 8
5972 	 cc.StencilReferenceValue = p_stencil_refs->ref_value[0];
5973 	 cc.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
5974 #endif
5975       }
5976       iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), ptr) {
5977          ptr.ColorCalcStatePointer = cc_offset;
5978          ptr.ColorCalcStatePointerValid = true;
5979       }
5980    }
5981 
5982    /* Wa_1604061319
5983     *
5984     *    3DSTATE_CONSTANT_* needs to be programmed before BTP_*
5985     *
5986     * Testing shows that all the 3DSTATE_CONSTANT_XS need to be emitted if
5987     * any stage has a dirty binding table.
5988     */
5989    const bool emit_const_wa = GFX_VER >= 11 &&
5990       ((dirty & IRIS_DIRTY_RENDER_BUFFER) ||
5991        (stage_dirty & IRIS_ALL_STAGE_DIRTY_BINDINGS_FOR_RENDER));
5992 
5993 #if GFX_VER >= 12
5994    uint32_t nobuffer_stages = 0;
5995 #endif
5996 
5997    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5998       if (!(stage_dirty & (IRIS_STAGE_DIRTY_CONSTANTS_VS << stage)) &&
5999           !emit_const_wa)
6000          continue;
6001 
6002       struct iris_shader_state *shs = &ice->state.shaders[stage];
6003       struct iris_compiled_shader *shader = ice->shaders.prog[stage];
6004 
6005       if (!shader)
6006          continue;
6007 
6008       if (shs->sysvals_need_upload)
6009          upload_sysvals(ice, stage, NULL);
6010 
6011       struct push_bos push_bos = {};
6012       setup_constant_buffers(ice, batch, stage, &push_bos);
6013 
6014 #if GFX_VER >= 12
6015       /* If this stage doesn't have any push constants, emit it later in a
6016        * single CONSTANT_ALL packet with all the other stages.
6017        */
6018       if (push_bos.buffer_count == 0) {
6019          nobuffer_stages |= 1 << stage;
6020          continue;
6021       }
6022 
6023       /* The Constant Buffer Read Length field from 3DSTATE_CONSTANT_ALL
6024        * contains only 5 bits, so we can only use it for buffers smaller than
6025        * 32.
6026        */
6027       if (push_bos.max_length < 32) {
6028          emit_push_constant_packet_all(ice, batch, 1 << stage, &push_bos);
6029          continue;
6030       }
6031 #endif
6032       emit_push_constant_packets(ice, batch, stage, &push_bos);
6033    }
6034 
6035 #if GFX_VER >= 12
6036    if (nobuffer_stages)
6037       emit_push_constant_packet_all(ice, batch, nobuffer_stages, NULL);
6038 #endif
6039 
6040    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
6041       /* Gfx9 requires 3DSTATE_BINDING_TABLE_POINTERS_XS to be re-emitted
6042        * in order to commit constants.  TODO: Investigate "Disable Gather
6043        * at Set Shader" to go back to legacy mode...
6044        */
6045       if (stage_dirty & ((IRIS_STAGE_DIRTY_BINDINGS_VS |
6046                           (GFX_VER == 9 ? IRIS_STAGE_DIRTY_CONSTANTS_VS : 0))
6047                             << stage)) {
6048          iris_emit_cmd(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), ptr) {
6049             ptr._3DCommandSubOpcode = 38 + stage;
6050             ptr.PointertoVSBindingTable =
6051                binder->bt_offset[stage] >> IRIS_BT_OFFSET_SHIFT;
6052          }
6053       }
6054    }
6055 
6056    if (GFX_VER >= 11 && (dirty & IRIS_DIRTY_RENDER_BUFFER)) {
6057       // XXX: we may want to flag IRIS_DIRTY_MULTISAMPLE (or SAMPLE_MASK?)
6058       // XXX: see commit 979fc1bc9bcc64027ff2cfafd285676f31b930a6
6059 
6060       /* The PIPE_CONTROL command description says:
6061        *
6062        *   "Whenever a Binding Table Index (BTI) used by a Render Target
6063        *    Message points to a different RENDER_SURFACE_STATE, SW must issue a
6064        *    Render Target Cache Flush by enabling this bit. When render target
6065        *    flush is set due to new association of BTI, PS Scoreboard Stall bit
6066        *    must be set in this packet."
6067        */
6068       // XXX: does this need to happen at 3DSTATE_BTP_PS time?
6069       iris_emit_pipe_control_flush(batch, "workaround: RT BTI change [draw]",
6070                                    PIPE_CONTROL_RENDER_TARGET_FLUSH |
6071                                    PIPE_CONTROL_STALL_AT_SCOREBOARD);
6072    }
6073 
6074    if (dirty & IRIS_DIRTY_RENDER_BUFFER)
6075       trace_framebuffer_state(&batch->trace, batch, &ice->state.framebuffer);
6076 
6077    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
6078       if (stage_dirty & (IRIS_STAGE_DIRTY_BINDINGS_VS << stage)) {
6079          iris_populate_binding_table(ice, batch, stage, false);
6080       }
6081    }
6082 
6083    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
6084       if (!(stage_dirty & (IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage)) ||
6085           !ice->shaders.prog[stage])
6086          continue;
6087 
6088       iris_upload_sampler_states(ice, stage);
6089 
6090       struct iris_shader_state *shs = &ice->state.shaders[stage];
6091       struct pipe_resource *res = shs->sampler_table.res;
6092       if (res)
6093          iris_use_pinned_bo(batch, iris_resource_bo(res), false,
6094                             IRIS_DOMAIN_NONE);
6095 
6096       iris_emit_cmd(batch, GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ptr) {
6097          ptr._3DCommandSubOpcode = 43 + stage;
6098          ptr.PointertoVSSamplerState = shs->sampler_table.offset;
6099       }
6100    }
6101 
6102    if (ice->state.need_border_colors)
6103       iris_use_pinned_bo(batch, border_color_pool->bo, false, IRIS_DOMAIN_NONE);
6104 
6105    if (dirty & IRIS_DIRTY_MULTISAMPLE) {
6106       iris_emit_cmd(batch, GENX(3DSTATE_MULTISAMPLE), ms) {
6107          ms.PixelLocation =
6108             ice->state.cso_rast->half_pixel_center ? CENTER : UL_CORNER;
6109          if (ice->state.framebuffer.samples > 0)
6110             ms.NumberofMultisamples = ffs(ice->state.framebuffer.samples) - 1;
6111       }
6112    }
6113 
6114    if (dirty & IRIS_DIRTY_SAMPLE_MASK) {
6115       iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_MASK), ms) {
6116          ms.SampleMask = ice->state.sample_mask;
6117       }
6118    }
6119 
6120    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
6121       if (!(stage_dirty & (IRIS_STAGE_DIRTY_VS << stage)))
6122          continue;
6123 
6124       struct iris_compiled_shader *shader = ice->shaders.prog[stage];
6125 
6126       if (shader) {
6127          struct brw_stage_prog_data *prog_data = shader->prog_data;
6128          struct iris_resource *cache = (void *) shader->assembly.res;
6129          iris_use_pinned_bo(batch, cache->bo, false, IRIS_DOMAIN_NONE);
6130 
6131          uint32_t scratch_addr =
6132             pin_scratch_space(ice, batch, prog_data, stage);
6133 
6134          if (stage == MESA_SHADER_FRAGMENT) {
6135             UNUSED struct iris_rasterizer_state *cso = ice->state.cso_rast;
6136             struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
6137 
6138             uint32_t ps_state[GENX(3DSTATE_PS_length)] = {0};
6139             _iris_pack_command(batch, GENX(3DSTATE_PS), ps_state, ps) {
6140                ps._8PixelDispatchEnable = wm_prog_data->dispatch_8;
6141                ps._16PixelDispatchEnable = wm_prog_data->dispatch_16;
6142                ps._32PixelDispatchEnable = wm_prog_data->dispatch_32;
6143 
6144               /* The docs for 3DSTATE_PS::32 Pixel Dispatch Enable say:
6145                *
6146                *    "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16,
6147                *     SIMD32 Dispatch must not be enabled for PER_PIXEL dispatch
6148                *     mode."
6149                *
6150                * 16x MSAA only exists on Gfx9+, so we can skip this on Gfx8.
6151                */
6152                if (GFX_VER >= 9 && cso_fb->samples == 16 &&
6153                    !wm_prog_data->persample_dispatch) {
6154                   assert(ps._8PixelDispatchEnable || ps._16PixelDispatchEnable);
6155                   ps._32PixelDispatchEnable = false;
6156                }
6157 
6158                ps.DispatchGRFStartRegisterForConstantSetupData0 =
6159                   brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0);
6160                ps.DispatchGRFStartRegisterForConstantSetupData1 =
6161                   brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1);
6162                ps.DispatchGRFStartRegisterForConstantSetupData2 =
6163                   brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2);
6164 
6165                ps.KernelStartPointer0 = KSP(shader) +
6166                   brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0);
6167                ps.KernelStartPointer1 = KSP(shader) +
6168                   brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1);
6169                ps.KernelStartPointer2 = KSP(shader) +
6170                   brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2);
6171 
6172 #if GFX_VERx10 >= 125
6173                ps.ScratchSpaceBuffer = scratch_addr >> 4;
6174 #else
6175                ps.ScratchSpaceBasePointer =
6176                   rw_bo(NULL, scratch_addr, IRIS_DOMAIN_NONE);
6177 #endif
6178             }
6179 
6180             uint32_t psx_state[GENX(3DSTATE_PS_EXTRA_length)] = {0};
6181             iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
6182 #if GFX_VER >= 9
6183                if (!wm_prog_data->uses_sample_mask)
6184                   psx.InputCoverageMaskState  = ICMS_NONE;
6185                else if (wm_prog_data->post_depth_coverage)
6186                   psx.InputCoverageMaskState = ICMS_DEPTH_COVERAGE;
6187                else if (wm_prog_data->inner_coverage &&
6188                         cso->conservative_rasterization)
6189                   psx.InputCoverageMaskState = ICMS_INNER_CONSERVATIVE;
6190                else
6191                   psx.InputCoverageMaskState = ICMS_NORMAL;
6192 #else
6193                psx.PixelShaderUsesInputCoverageMask =
6194                   wm_prog_data->uses_sample_mask;
6195 #endif
6196             }
6197 
6198             uint32_t *shader_ps = (uint32_t *) shader->derived_data;
6199             uint32_t *shader_psx = shader_ps + GENX(3DSTATE_PS_length);
6200             iris_emit_merge(batch, shader_ps, ps_state,
6201                             GENX(3DSTATE_PS_length));
6202             iris_emit_merge(batch, shader_psx, psx_state,
6203                             GENX(3DSTATE_PS_EXTRA_length));
6204          } else if (scratch_addr) {
6205             uint32_t *pkt = (uint32_t *) shader->derived_data;
6206             switch (stage) {
6207             case MESA_SHADER_VERTEX:    MERGE_SCRATCH_ADDR(3DSTATE_VS); break;
6208             case MESA_SHADER_TESS_CTRL: MERGE_SCRATCH_ADDR(3DSTATE_HS); break;
6209             case MESA_SHADER_TESS_EVAL: MERGE_SCRATCH_ADDR(3DSTATE_DS); break;
6210             case MESA_SHADER_GEOMETRY:  MERGE_SCRATCH_ADDR(3DSTATE_GS); break;
6211             }
6212          } else {
6213             iris_batch_emit(batch, shader->derived_data,
6214                             iris_derived_program_state_size(stage));
6215          }
6216       } else {
6217          if (stage == MESA_SHADER_TESS_EVAL) {
6218             iris_emit_cmd(batch, GENX(3DSTATE_HS), hs);
6219             iris_emit_cmd(batch, GENX(3DSTATE_TE), te);
6220             iris_emit_cmd(batch, GENX(3DSTATE_DS), ds);
6221          } else if (stage == MESA_SHADER_GEOMETRY) {
6222             iris_emit_cmd(batch, GENX(3DSTATE_GS), gs);
6223          }
6224       }
6225    }
6226 
6227    if (ice->state.streamout_active) {
6228       if (dirty & IRIS_DIRTY_SO_BUFFERS) {
6229          /* Wa_16011411144
6230           * SW must insert a PIPE_CONTROL cmd before and after the
6231           * 3dstate_so_buffer_index_0/1/2/3 states to ensure so_buffer_index_* state is
6232           * not combined with other state changes.
6233           */
6234          if (intel_device_info_is_dg2(&batch->screen->devinfo)) {
6235             iris_emit_pipe_control_flush(batch,
6236                                          "SO pre change stall WA",
6237                                          PIPE_CONTROL_CS_STALL);
6238          }
6239 
6240          for (int i = 0; i < 4; i++) {
6241             struct iris_stream_output_target *tgt =
6242                (void *) ice->state.so_target[i];
6243             const uint32_t dwords = GENX(3DSTATE_SO_BUFFER_length);
6244             uint32_t *so_buffers = genx->so_buffers + i * dwords;
6245             bool zero_offset = false;
6246 
6247             if (tgt) {
6248                zero_offset = tgt->zero_offset;
6249                iris_use_pinned_bo(batch, iris_resource_bo(tgt->base.buffer),
6250                                   true, IRIS_DOMAIN_OTHER_WRITE);
6251                iris_use_pinned_bo(batch, iris_resource_bo(tgt->offset.res),
6252                                   true, IRIS_DOMAIN_OTHER_WRITE);
6253             }
6254 
6255             if (zero_offset) {
6256                /* Skip the last DWord which contains "Stream Offset" of
6257                 * 0xFFFFFFFF and instead emit a dword of zero directly.
6258                 */
6259                STATIC_ASSERT(GENX(3DSTATE_SO_BUFFER_StreamOffset_start) ==
6260                              32 * (dwords - 1));
6261                const uint32_t zero = 0;
6262                iris_batch_emit(batch, so_buffers, 4 * (dwords - 1));
6263                iris_batch_emit(batch, &zero, sizeof(zero));
6264                tgt->zero_offset = false;
6265             } else {
6266                iris_batch_emit(batch, so_buffers, 4 * dwords);
6267             }
6268          }
6269 
6270          /* Wa_16011411144 */
6271          if (intel_device_info_is_dg2(&batch->screen->devinfo)) {
6272             iris_emit_pipe_control_flush(batch,
6273                                          "SO post change stall WA",
6274                                          PIPE_CONTROL_CS_STALL);
6275          }
6276       }
6277 
6278       if ((dirty & IRIS_DIRTY_SO_DECL_LIST) && ice->state.streamout) {
6279          /* Wa_16011773973:
6280           * If SOL is enabled and SO_DECL state has to be programmed,
6281           *    1. Send 3D State SOL state with SOL disabled
6282           *    2. Send SO_DECL NP state
6283           *    3. Send 3D State SOL with SOL Enabled
6284           */
6285          if (intel_device_info_is_dg2(&batch->screen->devinfo))
6286             iris_emit_cmd(batch, GENX(3DSTATE_STREAMOUT), sol);
6287 
6288          uint32_t *decl_list =
6289             ice->state.streamout + GENX(3DSTATE_STREAMOUT_length);
6290          iris_batch_emit(batch, decl_list, 4 * ((decl_list[0] & 0xff) + 2));
6291       }
6292 
6293       if (dirty & IRIS_DIRTY_STREAMOUT) {
6294          const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
6295 
6296          uint32_t dynamic_sol[GENX(3DSTATE_STREAMOUT_length)];
6297          iris_pack_command(GENX(3DSTATE_STREAMOUT), dynamic_sol, sol) {
6298             sol.SOFunctionEnable = true;
6299             sol.SOStatisticsEnable = true;
6300 
6301             sol.RenderingDisable = cso_rast->rasterizer_discard &&
6302                                    !ice->state.prims_generated_query_active;
6303             sol.ReorderMode = cso_rast->flatshade_first ? LEADING : TRAILING;
6304          }
6305 
6306          assert(ice->state.streamout);
6307 
6308          iris_emit_merge(batch, ice->state.streamout, dynamic_sol,
6309                          GENX(3DSTATE_STREAMOUT_length));
6310       }
6311    } else {
6312       if (dirty & IRIS_DIRTY_STREAMOUT) {
6313          iris_emit_cmd(batch, GENX(3DSTATE_STREAMOUT), sol);
6314       }
6315    }
6316 
6317    if (dirty & IRIS_DIRTY_CLIP) {
6318       struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
6319       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
6320 
6321       bool gs_or_tes = ice->shaders.prog[MESA_SHADER_GEOMETRY] ||
6322                        ice->shaders.prog[MESA_SHADER_TESS_EVAL];
6323       bool points_or_lines = cso_rast->fill_mode_point_or_line ||
6324          (gs_or_tes ? ice->shaders.output_topology_is_points_or_lines
6325                     : ice->state.prim_is_points_or_lines);
6326 
6327       uint32_t dynamic_clip[GENX(3DSTATE_CLIP_length)];
6328       iris_pack_command(GENX(3DSTATE_CLIP), &dynamic_clip, cl) {
6329          cl.StatisticsEnable = ice->state.statistics_counters_enabled;
6330          if (cso_rast->rasterizer_discard)
6331             cl.ClipMode = CLIPMODE_REJECT_ALL;
6332          else if (ice->state.window_space_position)
6333             cl.ClipMode = CLIPMODE_ACCEPT_ALL;
6334          else
6335             cl.ClipMode = CLIPMODE_NORMAL;
6336 
6337          cl.PerspectiveDivideDisable = ice->state.window_space_position;
6338          cl.ViewportXYClipTestEnable = !points_or_lines;
6339 
6340          if (wm_prog_data->barycentric_interp_modes &
6341              BRW_BARYCENTRIC_NONPERSPECTIVE_BITS)
6342             cl.NonPerspectiveBarycentricEnable = true;
6343 
6344          cl.ForceZeroRTAIndexEnable = cso_fb->layers <= 1;
6345          cl.MaximumVPIndex = ice->state.num_viewports - 1;
6346       }
6347       iris_emit_merge(batch, cso_rast->clip, dynamic_clip,
6348                       ARRAY_SIZE(cso_rast->clip));
6349    }
6350 
6351    if (dirty & (IRIS_DIRTY_RASTER | IRIS_DIRTY_URB)) {
6352       struct iris_rasterizer_state *cso = ice->state.cso_rast;
6353       iris_batch_emit(batch, cso->raster, sizeof(cso->raster));
6354 
6355       uint32_t dynamic_sf[GENX(3DSTATE_SF_length)];
6356       iris_pack_command(GENX(3DSTATE_SF), &dynamic_sf, sf) {
6357          sf.ViewportTransformEnable = !ice->state.window_space_position;
6358 
6359 #if GFX_VER >= 12
6360          sf.DerefBlockSize = ice->state.urb_deref_block_size;
6361 #endif
6362       }
6363       iris_emit_merge(batch, cso->sf, dynamic_sf,
6364                       ARRAY_SIZE(dynamic_sf));
6365    }
6366 
6367    if (dirty & IRIS_DIRTY_WM) {
6368       struct iris_rasterizer_state *cso = ice->state.cso_rast;
6369       uint32_t dynamic_wm[GENX(3DSTATE_WM_length)];
6370 
6371       iris_pack_command(GENX(3DSTATE_WM), &dynamic_wm, wm) {
6372          wm.StatisticsEnable = ice->state.statistics_counters_enabled;
6373 
6374          wm.BarycentricInterpolationMode =
6375             wm_prog_data->barycentric_interp_modes;
6376 
6377          if (wm_prog_data->early_fragment_tests)
6378             wm.EarlyDepthStencilControl = EDSC_PREPS;
6379          else if (wm_prog_data->has_side_effects)
6380             wm.EarlyDepthStencilControl = EDSC_PSEXEC;
6381 
6382          /* We could skip this bit if color writes are enabled. */
6383          if (wm_prog_data->has_side_effects || wm_prog_data->uses_kill)
6384             wm.ForceThreadDispatchEnable = ForceON;
6385       }
6386       iris_emit_merge(batch, cso->wm, dynamic_wm, ARRAY_SIZE(cso->wm));
6387    }
6388 
6389    if (dirty & IRIS_DIRTY_SBE) {
6390       iris_emit_sbe(batch, ice);
6391    }
6392 
6393    if (dirty & IRIS_DIRTY_PS_BLEND) {
6394       struct iris_blend_state *cso_blend = ice->state.cso_blend;
6395       struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
6396       const struct shader_info *fs_info =
6397          iris_get_shader_info(ice, MESA_SHADER_FRAGMENT);
6398 
6399       uint32_t dynamic_pb[GENX(3DSTATE_PS_BLEND_length)];
6400       iris_pack_command(GENX(3DSTATE_PS_BLEND), &dynamic_pb, pb) {
6401          pb.HasWriteableRT = has_writeable_rt(cso_blend, fs_info);
6402          pb.AlphaTestEnable = cso_zsa->alpha_enabled;
6403 
6404          /* The dual source blending docs caution against using SRC1 factors
6405           * when the shader doesn't use a dual source render target write.
6406           * Empirically, this can lead to GPU hangs, and the results are
6407           * undefined anyway, so simply disable blending to avoid the hang.
6408           */
6409          pb.ColorBufferBlendEnable = (cso_blend->blend_enables & 1) &&
6410             (!cso_blend->dual_color_blending || wm_prog_data->dual_src_blend);
6411       }
6412 
6413       iris_emit_merge(batch, cso_blend->ps_blend, dynamic_pb,
6414                       ARRAY_SIZE(cso_blend->ps_blend));
6415    }
6416 
6417    if (dirty & IRIS_DIRTY_WM_DEPTH_STENCIL) {
6418       struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
6419 #if GFX_VER >= 9 && GFX_VER < 12
6420       struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
6421       uint32_t stencil_refs[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
6422       iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), &stencil_refs, wmds) {
6423          wmds.StencilReferenceValue = p_stencil_refs->ref_value[0];
6424          wmds.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
6425       }
6426       iris_emit_merge(batch, cso->wmds, stencil_refs, ARRAY_SIZE(cso->wmds));
6427 #else
6428       /* Use modify disable fields which allow us to emit packets
6429        * directly instead of merging them later.
6430        */
6431       iris_batch_emit(batch, cso->wmds, sizeof(cso->wmds));
6432 #endif
6433 
6434 #if GFX_VER >= 12
6435       iris_batch_emit(batch, cso->depth_bounds, sizeof(cso->depth_bounds));
6436 #endif
6437    }
6438 
6439    if (dirty & IRIS_DIRTY_STENCIL_REF) {
6440 #if GFX_VER >= 12
6441       /* Use modify disable fields which allow us to emit packets
6442        * directly instead of merging them later.
6443        */
6444       struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
6445       uint32_t stencil_refs[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
6446       iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), &stencil_refs, wmds) {
6447          wmds.StencilReferenceValue = p_stencil_refs->ref_value[0];
6448          wmds.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
6449          wmds.StencilTestMaskModifyDisable = true;
6450          wmds.StencilWriteMaskModifyDisable = true;
6451          wmds.StencilStateModifyDisable = true;
6452          wmds.DepthStateModifyDisable = true;
6453       }
6454       iris_batch_emit(batch, stencil_refs, sizeof(stencil_refs));
6455 #endif
6456    }
6457 
6458    if (dirty & IRIS_DIRTY_SCISSOR_RECT) {
6459       /* Wa_1409725701:
6460        *    "The viewport-specific state used by the SF unit (SCISSOR_RECT) is
6461        *    stored as an array of up to 16 elements. The location of first
6462        *    element of the array, as specified by Pointer to SCISSOR_RECT,
6463        *    should be aligned to a 64-byte boundary.
6464        */
6465       uint32_t alignment = 64;
6466       uint32_t scissor_offset =
6467          emit_state(batch, ice->state.dynamic_uploader,
6468                     &ice->state.last_res.scissor,
6469                     ice->state.scissors,
6470                     sizeof(struct pipe_scissor_state) *
6471                     ice->state.num_viewports, alignment);
6472 
6473       iris_emit_cmd(batch, GENX(3DSTATE_SCISSOR_STATE_POINTERS), ptr) {
6474          ptr.ScissorRectPointer = scissor_offset;
6475       }
6476    }
6477 
6478    if (dirty & IRIS_DIRTY_DEPTH_BUFFER) {
6479       struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
6480 
6481       /* Do not emit the cso yet. We may need to update clear params first. */
6482       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
6483       struct iris_resource *zres = NULL, *sres = NULL;
6484       if (cso_fb->zsbuf) {
6485          iris_get_depth_stencil_resources(cso_fb->zsbuf->texture,
6486                                           &zres, &sres);
6487       }
6488 
6489       if (zres && ice->state.hiz_usage != ISL_AUX_USAGE_NONE) {
6490          uint32_t *clear_params =
6491             cso_z->packets + ARRAY_SIZE(cso_z->packets) -
6492             GENX(3DSTATE_CLEAR_PARAMS_length);
6493 
6494          iris_pack_command(GENX(3DSTATE_CLEAR_PARAMS), clear_params, clear) {
6495             clear.DepthClearValueValid = true;
6496             clear.DepthClearValue = zres->aux.clear_color.f32[0];
6497          }
6498       }
6499 
6500       iris_batch_emit(batch, cso_z->packets, sizeof(cso_z->packets));
6501 
6502       if (zres)
6503          genX(emit_depth_state_workarounds)(ice, batch, &zres->surf);
6504 
6505       if (GFX_VER >= 12) {
6506          /* Wa_1408224581
6507           *
6508           * Workaround: Gfx12LP Astep only An additional pipe control with
6509           * post-sync = store dword operation would be required.( w/a is to
6510           * have an additional pipe control after the stencil state whenever
6511           * the surface state bits of this state is changing).
6512           *
6513           * This also seems sufficient to handle Wa_14014148106.
6514           */
6515          iris_emit_pipe_control_write(batch, "WA for stencil state",
6516                                       PIPE_CONTROL_WRITE_IMMEDIATE,
6517                                       screen->workaround_address.bo,
6518                                       screen->workaround_address.offset, 0);
6519       }
6520    }
6521 
6522    if (dirty & (IRIS_DIRTY_DEPTH_BUFFER | IRIS_DIRTY_WM_DEPTH_STENCIL)) {
6523       /* Listen for buffer changes, and also write enable changes. */
6524       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
6525       pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa);
6526    }
6527 
6528    if (dirty & IRIS_DIRTY_POLYGON_STIPPLE) {
6529       iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_PATTERN), poly) {
6530          for (int i = 0; i < 32; i++) {
6531             poly.PatternRow[i] = ice->state.poly_stipple.stipple[i];
6532          }
6533       }
6534    }
6535 
6536    if (dirty & IRIS_DIRTY_LINE_STIPPLE) {
6537       struct iris_rasterizer_state *cso = ice->state.cso_rast;
6538       iris_batch_emit(batch, cso->line_stipple, sizeof(cso->line_stipple));
6539    }
6540 
6541    if (dirty & IRIS_DIRTY_VF_TOPOLOGY) {
6542       iris_emit_cmd(batch, GENX(3DSTATE_VF_TOPOLOGY), topo) {
6543          topo.PrimitiveTopologyType =
6544             translate_prim_type(draw->mode, ice->state.vertices_per_patch);
6545       }
6546    }
6547 
6548    if (dirty & IRIS_DIRTY_VERTEX_BUFFERS) {
6549       int count = util_bitcount64(ice->state.bound_vertex_buffers);
6550       uint64_t dynamic_bound = ice->state.bound_vertex_buffers;
6551 
6552       if (ice->state.vs_uses_draw_params) {
6553          assert(ice->draw.draw_params.res);
6554 
6555          struct iris_vertex_buffer_state *state =
6556             &(ice->state.genx->vertex_buffers[count]);
6557          pipe_resource_reference(&state->resource, ice->draw.draw_params.res);
6558          struct iris_resource *res = (void *) state->resource;
6559 
6560          iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
6561             vb.VertexBufferIndex = count;
6562             vb.AddressModifyEnable = true;
6563             vb.BufferPitch = 0;
6564             vb.BufferSize = res->bo->size - ice->draw.draw_params.offset;
6565             vb.BufferStartingAddress =
6566                ro_bo(NULL, res->bo->address +
6567                            (int) ice->draw.draw_params.offset);
6568             vb.MOCS = iris_mocs(res->bo, &screen->isl_dev,
6569                                 ISL_SURF_USAGE_VERTEX_BUFFER_BIT);
6570 #if GFX_VER >= 12
6571             vb.L3BypassDisable       = true;
6572 #endif
6573          }
6574          dynamic_bound |= 1ull << count;
6575          count++;
6576       }
6577 
6578       if (ice->state.vs_uses_derived_draw_params) {
6579          struct iris_vertex_buffer_state *state =
6580             &(ice->state.genx->vertex_buffers[count]);
6581          pipe_resource_reference(&state->resource,
6582                                  ice->draw.derived_draw_params.res);
6583          struct iris_resource *res = (void *) ice->draw.derived_draw_params.res;
6584 
6585          iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
6586              vb.VertexBufferIndex = count;
6587             vb.AddressModifyEnable = true;
6588             vb.BufferPitch = 0;
6589             vb.BufferSize =
6590                res->bo->size - ice->draw.derived_draw_params.offset;
6591             vb.BufferStartingAddress =
6592                ro_bo(NULL, res->bo->address +
6593                            (int) ice->draw.derived_draw_params.offset);
6594             vb.MOCS = iris_mocs(res->bo, &screen->isl_dev,
6595                                 ISL_SURF_USAGE_VERTEX_BUFFER_BIT);
6596 #if GFX_VER >= 12
6597             vb.L3BypassDisable       = true;
6598 #endif
6599          }
6600          dynamic_bound |= 1ull << count;
6601          count++;
6602       }
6603 
6604       if (count) {
6605 #if GFX_VER >= 11
6606          /* Gfx11+ doesn't need the cache workaround below */
6607          uint64_t bound = dynamic_bound;
6608          while (bound) {
6609             const int i = u_bit_scan64(&bound);
6610             iris_use_optional_res(batch, genx->vertex_buffers[i].resource,
6611                                   false, IRIS_DOMAIN_VF_READ);
6612          }
6613 #else
6614          /* The VF cache designers cut corners, and made the cache key's
6615           * <VertexBufferIndex, Memory Address> tuple only consider the bottom
6616           * 32 bits of the address.  If you have two vertex buffers which get
6617           * placed exactly 4 GiB apart and use them in back-to-back draw calls,
6618           * you can get collisions (even within a single batch).
6619           *
6620           * So, we need to do a VF cache invalidate if the buffer for a VB
6621           * slot slot changes [48:32] address bits from the previous time.
6622           */
6623          unsigned flush_flags = 0;
6624 
6625          uint64_t bound = dynamic_bound;
6626          while (bound) {
6627             const int i = u_bit_scan64(&bound);
6628             uint16_t high_bits = 0;
6629 
6630             struct iris_resource *res =
6631                (void *) genx->vertex_buffers[i].resource;
6632             if (res) {
6633                iris_use_pinned_bo(batch, res->bo, false, IRIS_DOMAIN_VF_READ);
6634 
6635                high_bits = res->bo->address >> 32ull;
6636                if (high_bits != ice->state.last_vbo_high_bits[i]) {
6637                   flush_flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE |
6638                                  PIPE_CONTROL_CS_STALL;
6639                   ice->state.last_vbo_high_bits[i] = high_bits;
6640                }
6641             }
6642          }
6643 
6644          if (flush_flags) {
6645             iris_emit_pipe_control_flush(batch,
6646                                          "workaround: VF cache 32-bit key [VB]",
6647                                          flush_flags);
6648          }
6649 #endif
6650 
6651          const unsigned vb_dwords = GENX(VERTEX_BUFFER_STATE_length);
6652 
6653          uint32_t *map =
6654             iris_get_command_space(batch, 4 * (1 + vb_dwords * count));
6655          _iris_pack_command(batch, GENX(3DSTATE_VERTEX_BUFFERS), map, vb) {
6656             vb.DWordLength = (vb_dwords * count + 1) - 2;
6657          }
6658          map += 1;
6659 
6660          bound = dynamic_bound;
6661          while (bound) {
6662             const int i = u_bit_scan64(&bound);
6663             memcpy(map, genx->vertex_buffers[i].state,
6664                    sizeof(uint32_t) * vb_dwords);
6665             map += vb_dwords;
6666          }
6667       }
6668    }
6669 
6670    if (dirty & IRIS_DIRTY_VERTEX_ELEMENTS) {
6671       struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements;
6672       const unsigned entries = MAX2(cso->count, 1);
6673       if (!(ice->state.vs_needs_sgvs_element ||
6674             ice->state.vs_uses_derived_draw_params ||
6675             ice->state.vs_needs_edge_flag)) {
6676          iris_batch_emit(batch, cso->vertex_elements, sizeof(uint32_t) *
6677                          (1 + entries * GENX(VERTEX_ELEMENT_STATE_length)));
6678       } else {
6679          uint32_t dynamic_ves[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)];
6680          const unsigned dyn_count = cso->count +
6681             ice->state.vs_needs_sgvs_element +
6682             ice->state.vs_uses_derived_draw_params;
6683 
6684          iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS),
6685                            &dynamic_ves, ve) {
6686             ve.DWordLength =
6687                1 + GENX(VERTEX_ELEMENT_STATE_length) * dyn_count - 2;
6688          }
6689          memcpy(&dynamic_ves[1], &cso->vertex_elements[1],
6690                 (cso->count - ice->state.vs_needs_edge_flag) *
6691                 GENX(VERTEX_ELEMENT_STATE_length) * sizeof(uint32_t));
6692          uint32_t *ve_pack_dest =
6693             &dynamic_ves[1 + (cso->count - ice->state.vs_needs_edge_flag) *
6694                          GENX(VERTEX_ELEMENT_STATE_length)];
6695 
6696          if (ice->state.vs_needs_sgvs_element) {
6697             uint32_t base_ctrl = ice->state.vs_uses_draw_params ?
6698                                  VFCOMP_STORE_SRC : VFCOMP_STORE_0;
6699             iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
6700                ve.Valid = true;
6701                ve.VertexBufferIndex =
6702                   util_bitcount64(ice->state.bound_vertex_buffers);
6703                ve.SourceElementFormat = ISL_FORMAT_R32G32_UINT;
6704                ve.Component0Control = base_ctrl;
6705                ve.Component1Control = base_ctrl;
6706                ve.Component2Control = VFCOMP_STORE_0;
6707                ve.Component3Control = VFCOMP_STORE_0;
6708             }
6709             ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
6710          }
6711          if (ice->state.vs_uses_derived_draw_params) {
6712             iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
6713                ve.Valid = true;
6714                ve.VertexBufferIndex =
6715                   util_bitcount64(ice->state.bound_vertex_buffers) +
6716                   ice->state.vs_uses_draw_params;
6717                ve.SourceElementFormat = ISL_FORMAT_R32G32_UINT;
6718                ve.Component0Control = VFCOMP_STORE_SRC;
6719                ve.Component1Control = VFCOMP_STORE_SRC;
6720                ve.Component2Control = VFCOMP_STORE_0;
6721                ve.Component3Control = VFCOMP_STORE_0;
6722             }
6723             ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
6724          }
6725          if (ice->state.vs_needs_edge_flag) {
6726             for (int i = 0; i < GENX(VERTEX_ELEMENT_STATE_length);  i++)
6727                ve_pack_dest[i] = cso->edgeflag_ve[i];
6728          }
6729 
6730          iris_batch_emit(batch, &dynamic_ves, sizeof(uint32_t) *
6731                          (1 + dyn_count * GENX(VERTEX_ELEMENT_STATE_length)));
6732       }
6733 
6734       if (!ice->state.vs_needs_edge_flag) {
6735          iris_batch_emit(batch, cso->vf_instancing, sizeof(uint32_t) *
6736                          entries * GENX(3DSTATE_VF_INSTANCING_length));
6737       } else {
6738          assert(cso->count > 0);
6739          const unsigned edgeflag_index = cso->count - 1;
6740          uint32_t dynamic_vfi[33 * GENX(3DSTATE_VF_INSTANCING_length)];
6741          memcpy(&dynamic_vfi[0], cso->vf_instancing, edgeflag_index *
6742                 GENX(3DSTATE_VF_INSTANCING_length) * sizeof(uint32_t));
6743 
6744          uint32_t *vfi_pack_dest = &dynamic_vfi[0] +
6745             edgeflag_index * GENX(3DSTATE_VF_INSTANCING_length);
6746          iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
6747             vi.VertexElementIndex = edgeflag_index +
6748                ice->state.vs_needs_sgvs_element +
6749                ice->state.vs_uses_derived_draw_params;
6750          }
6751          for (int i = 0; i < GENX(3DSTATE_VF_INSTANCING_length);  i++)
6752             vfi_pack_dest[i] |= cso->edgeflag_vfi[i];
6753 
6754          iris_batch_emit(batch, &dynamic_vfi[0], sizeof(uint32_t) *
6755                          entries * GENX(3DSTATE_VF_INSTANCING_length));
6756       }
6757    }
6758 
6759    if (dirty & IRIS_DIRTY_VF_SGVS) {
6760       const struct brw_vs_prog_data *vs_prog_data = (void *)
6761          ice->shaders.prog[MESA_SHADER_VERTEX]->prog_data;
6762       struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements;
6763 
6764       iris_emit_cmd(batch, GENX(3DSTATE_VF_SGVS), sgv) {
6765          if (vs_prog_data->uses_vertexid) {
6766             sgv.VertexIDEnable = true;
6767             sgv.VertexIDComponentNumber = 2;
6768             sgv.VertexIDElementOffset =
6769                cso->count - ice->state.vs_needs_edge_flag;
6770          }
6771 
6772          if (vs_prog_data->uses_instanceid) {
6773             sgv.InstanceIDEnable = true;
6774             sgv.InstanceIDComponentNumber = 3;
6775             sgv.InstanceIDElementOffset =
6776                cso->count - ice->state.vs_needs_edge_flag;
6777          }
6778       }
6779    }
6780 
6781    if (dirty & IRIS_DIRTY_VF) {
6782       iris_emit_cmd(batch, GENX(3DSTATE_VF), vf) {
6783 #if GFX_VERx10 >= 125
6784          vf.GeometryDistributionEnable = true;
6785 #endif
6786          if (draw->primitive_restart) {
6787             vf.IndexedDrawCutIndexEnable = true;
6788             vf.CutIndex = draw->restart_index;
6789          }
6790       }
6791    }
6792 
6793 #if GFX_VERx10 >= 125
6794    if (dirty & IRIS_DIRTY_VFG) {
6795       iris_emit_cmd(batch, GENX(3DSTATE_VFG), vfg) {
6796          /* If 3DSTATE_TE: TE Enable == 1 then RR_STRICT else RR_FREE*/
6797          vfg.DistributionMode =
6798             ice->shaders.prog[MESA_SHADER_TESS_EVAL] != NULL ? RR_STRICT :
6799                                                                RR_FREE;
6800          vfg.DistributionGranularity = BatchLevelGranularity;
6801          /* Wa_14014890652 */
6802          if (intel_device_info_is_dg2(&batch->screen->devinfo))
6803             vfg.GranularityThresholdDisable = 1;
6804          vfg.ListCutIndexEnable = draw->primitive_restart;
6805          /* 192 vertices for TRILIST_ADJ */
6806          vfg.ListNBatchSizeScale = 0;
6807          /* Batch size of 384 vertices */
6808          vfg.List3BatchSizeScale = 2;
6809          /* Batch size of 128 vertices */
6810          vfg.List2BatchSizeScale = 1;
6811          /* Batch size of 128 vertices */
6812          vfg.List1BatchSizeScale = 2;
6813          /* Batch size of 256 vertices for STRIP topologies */
6814          vfg.StripBatchSizeScale = 3;
6815          /* 192 control points for PATCHLIST_3 */
6816          vfg.PatchBatchSizeScale = 1;
6817          /* 192 control points for PATCHLIST_3 */
6818          vfg.PatchBatchSizeMultiplier = 31;
6819       }
6820    }
6821 #endif
6822 
6823    if (dirty & IRIS_DIRTY_VF_STATISTICS) {
6824       iris_emit_cmd(batch, GENX(3DSTATE_VF_STATISTICS), vf) {
6825          vf.StatisticsEnable = true;
6826       }
6827    }
6828 
6829 #if GFX_VER == 8
6830    if (dirty & IRIS_DIRTY_PMA_FIX) {
6831       bool enable = want_pma_fix(ice);
6832       genX(update_pma_fix)(ice, batch, enable);
6833    }
6834 #endif
6835 
6836    if (ice->state.current_hash_scale != 1)
6837       genX(emit_hashing_mode)(ice, batch, UINT_MAX, UINT_MAX, 1);
6838 
6839 #if GFX_VER >= 12
6840    genX(invalidate_aux_map_state)(batch);
6841 #endif
6842 }
6843 
6844 static void
flush_vbos(struct iris_context * ice,struct iris_batch * batch)6845 flush_vbos(struct iris_context *ice, struct iris_batch *batch)
6846 {
6847    struct iris_genx_state *genx = ice->state.genx;
6848    uint64_t bound = ice->state.bound_vertex_buffers;
6849    while (bound) {
6850       const int i = u_bit_scan64(&bound);
6851       struct iris_bo *bo = iris_resource_bo(genx->vertex_buffers[i].resource);
6852       iris_emit_buffer_barrier_for(batch, bo, IRIS_DOMAIN_VF_READ);
6853    }
6854 }
6855 
6856 static void
iris_upload_render_state(struct iris_context * ice,struct iris_batch * batch,const struct pipe_draw_info * draw,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * sc)6857 iris_upload_render_state(struct iris_context *ice,
6858                          struct iris_batch *batch,
6859                          const struct pipe_draw_info *draw,
6860                          unsigned drawid_offset,
6861                          const struct pipe_draw_indirect_info *indirect,
6862                          const struct pipe_draw_start_count_bias *sc)
6863 {
6864    bool use_predicate = ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT;
6865 
6866    trace_intel_begin_draw(&batch->trace, batch);
6867 
6868    if (ice->state.dirty & IRIS_DIRTY_VERTEX_BUFFER_FLUSHES)
6869       flush_vbos(ice, batch);
6870 
6871    iris_batch_sync_region_start(batch);
6872 
6873    /* Always pin the binder.  If we're emitting new binding table pointers,
6874     * we need it.  If not, we're probably inheriting old tables via the
6875     * context, and need it anyway.  Since true zero-bindings cases are
6876     * practically non-existent, just pin it and avoid last_res tracking.
6877     */
6878    iris_use_pinned_bo(batch, ice->state.binder.bo, false,
6879                       IRIS_DOMAIN_NONE);
6880 
6881    if (!batch->contains_draw) {
6882       if (GFX_VER == 12) {
6883          /* Re-emit constants when starting a new batch buffer in order to
6884           * work around push constant corruption on context switch.
6885           *
6886           * XXX - Provide hardware spec quotation when available.
6887           */
6888          ice->state.stage_dirty |= (IRIS_STAGE_DIRTY_CONSTANTS_VS  |
6889                                     IRIS_STAGE_DIRTY_CONSTANTS_TCS |
6890                                     IRIS_STAGE_DIRTY_CONSTANTS_TES |
6891                                     IRIS_STAGE_DIRTY_CONSTANTS_GS  |
6892                                     IRIS_STAGE_DIRTY_CONSTANTS_FS);
6893       }
6894       batch->contains_draw = true;
6895    }
6896 
6897    if (!batch->contains_draw_with_next_seqno) {
6898       iris_restore_render_saved_bos(ice, batch, draw);
6899       batch->contains_draw_with_next_seqno = true;
6900    }
6901 
6902    iris_upload_dirty_render_state(ice, batch, draw);
6903 
6904    if (draw->index_size > 0) {
6905       unsigned offset;
6906 
6907       if (draw->has_user_indices) {
6908          unsigned start_offset = draw->index_size * sc->start;
6909 
6910          u_upload_data(ice->ctx.const_uploader, start_offset,
6911                        sc->count * draw->index_size, 4,
6912                        (char*)draw->index.user + start_offset,
6913                        &offset, &ice->state.last_res.index_buffer);
6914          offset -= start_offset;
6915       } else {
6916          struct iris_resource *res = (void *) draw->index.resource;
6917          res->bind_history |= PIPE_BIND_INDEX_BUFFER;
6918 
6919          pipe_resource_reference(&ice->state.last_res.index_buffer,
6920                                  draw->index.resource);
6921          offset = 0;
6922 
6923          iris_emit_buffer_barrier_for(batch, res->bo, IRIS_DOMAIN_VF_READ);
6924       }
6925 
6926       struct iris_genx_state *genx = ice->state.genx;
6927       struct iris_bo *bo = iris_resource_bo(ice->state.last_res.index_buffer);
6928 
6929       uint32_t ib_packet[GENX(3DSTATE_INDEX_BUFFER_length)];
6930       iris_pack_command(GENX(3DSTATE_INDEX_BUFFER), ib_packet, ib) {
6931          ib.IndexFormat = draw->index_size >> 1;
6932          ib.MOCS = iris_mocs(bo, &batch->screen->isl_dev,
6933                              ISL_SURF_USAGE_INDEX_BUFFER_BIT);
6934          ib.BufferSize = bo->size - offset;
6935          ib.BufferStartingAddress = ro_bo(NULL, bo->address + offset);
6936 #if GFX_VER >= 12
6937          ib.L3BypassDisable       = true;
6938 #endif
6939       }
6940 
6941       if (memcmp(genx->last_index_buffer, ib_packet, sizeof(ib_packet)) != 0) {
6942          memcpy(genx->last_index_buffer, ib_packet, sizeof(ib_packet));
6943          iris_batch_emit(batch, ib_packet, sizeof(ib_packet));
6944          iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_VF_READ);
6945       }
6946 
6947 #if GFX_VER < 11
6948       /* The VF cache key only uses 32-bits, see vertex buffer comment above */
6949       uint16_t high_bits = bo->address >> 32ull;
6950       if (high_bits != ice->state.last_index_bo_high_bits) {
6951          iris_emit_pipe_control_flush(batch,
6952                                       "workaround: VF cache 32-bit key [IB]",
6953                                       PIPE_CONTROL_VF_CACHE_INVALIDATE |
6954                                       PIPE_CONTROL_CS_STALL);
6955          ice->state.last_index_bo_high_bits = high_bits;
6956       }
6957 #endif
6958    }
6959 
6960 #define _3DPRIM_END_OFFSET          0x2420
6961 #define _3DPRIM_START_VERTEX        0x2430
6962 #define _3DPRIM_VERTEX_COUNT        0x2434
6963 #define _3DPRIM_INSTANCE_COUNT      0x2438
6964 #define _3DPRIM_START_INSTANCE      0x243C
6965 #define _3DPRIM_BASE_VERTEX         0x2440
6966 
6967    struct mi_builder b;
6968    mi_builder_init(&b, &batch->screen->devinfo, batch);
6969 
6970    if (indirect && !indirect->count_from_stream_output) {
6971       if (indirect->indirect_draw_count) {
6972          use_predicate = true;
6973 
6974          struct iris_bo *draw_count_bo =
6975             iris_resource_bo(indirect->indirect_draw_count);
6976          unsigned draw_count_offset =
6977             indirect->indirect_draw_count_offset;
6978 
6979          if (ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) {
6980             /* comparison = draw id < draw count */
6981             struct mi_value comparison =
6982                mi_ult(&b, mi_imm(drawid_offset),
6983                           mi_mem32(ro_bo(draw_count_bo, draw_count_offset)));
6984 
6985             /* predicate = comparison & conditional rendering predicate */
6986             mi_store(&b, mi_reg32(MI_PREDICATE_RESULT),
6987                          mi_iand(&b, comparison, mi_reg32(CS_GPR(15))));
6988          } else {
6989             uint32_t mi_predicate;
6990 
6991             /* Upload the id of the current primitive to MI_PREDICATE_SRC1. */
6992             mi_store(&b, mi_reg64(MI_PREDICATE_SRC1), mi_imm(drawid_offset));
6993             /* Upload the current draw count from the draw parameters buffer
6994              * to MI_PREDICATE_SRC0. Zero the top 32-bits of
6995              * MI_PREDICATE_SRC0.
6996              */
6997             mi_store(&b, mi_reg64(MI_PREDICATE_SRC0),
6998                      mi_mem32(ro_bo(draw_count_bo, draw_count_offset)));
6999 
7000             if (drawid_offset == 0) {
7001                mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOADINV |
7002                               MI_PREDICATE_COMBINEOP_SET |
7003                               MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
7004             } else {
7005                /* While draw_index < draw_count the predicate's result will be
7006                 *  (draw_index == draw_count) ^ TRUE = TRUE
7007                 * When draw_index == draw_count the result is
7008                 *  (TRUE) ^ TRUE = FALSE
7009                 * After this all results will be:
7010                 *  (FALSE) ^ FALSE = FALSE
7011                 */
7012                mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOAD |
7013                               MI_PREDICATE_COMBINEOP_XOR |
7014                               MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
7015             }
7016             iris_batch_emit(batch, &mi_predicate, sizeof(uint32_t));
7017          }
7018       }
7019       struct iris_bo *bo = iris_resource_bo(indirect->buffer);
7020       assert(bo);
7021 
7022       mi_store(&b, mi_reg32(_3DPRIM_VERTEX_COUNT),
7023                mi_mem32(ro_bo(bo, indirect->offset + 0)));
7024       mi_store(&b, mi_reg32(_3DPRIM_INSTANCE_COUNT),
7025                mi_mem32(ro_bo(bo, indirect->offset + 4)));
7026       mi_store(&b, mi_reg32(_3DPRIM_START_VERTEX),
7027                mi_mem32(ro_bo(bo, indirect->offset + 8)));
7028       if (draw->index_size) {
7029          mi_store(&b, mi_reg32(_3DPRIM_BASE_VERTEX),
7030                   mi_mem32(ro_bo(bo, indirect->offset + 12)));
7031          mi_store(&b, mi_reg32(_3DPRIM_START_INSTANCE),
7032                   mi_mem32(ro_bo(bo, indirect->offset + 16)));
7033       } else {
7034          mi_store(&b, mi_reg32(_3DPRIM_START_INSTANCE),
7035                   mi_mem32(ro_bo(bo, indirect->offset + 12)));
7036          mi_store(&b, mi_reg32(_3DPRIM_BASE_VERTEX), mi_imm(0));
7037       }
7038    } else if (indirect && indirect->count_from_stream_output) {
7039       struct iris_stream_output_target *so =
7040          (void *) indirect->count_from_stream_output;
7041 
7042       /* XXX: Replace with actual cache tracking */
7043       iris_emit_pipe_control_flush(batch,
7044                                    "draw count from stream output stall",
7045                                    PIPE_CONTROL_CS_STALL);
7046 
7047       struct iris_address addr =
7048          ro_bo(iris_resource_bo(so->offset.res), so->offset.offset);
7049       struct mi_value offset =
7050          mi_iadd_imm(&b, mi_mem32(addr), -so->base.buffer_offset);
7051       mi_store(&b, mi_reg32(_3DPRIM_VERTEX_COUNT),
7052                    mi_udiv32_imm(&b, offset, so->stride));
7053       mi_store(&b, mi_reg32(_3DPRIM_START_VERTEX), mi_imm(0));
7054       mi_store(&b, mi_reg32(_3DPRIM_BASE_VERTEX), mi_imm(0));
7055       mi_store(&b, mi_reg32(_3DPRIM_START_INSTANCE), mi_imm(0));
7056       mi_store(&b, mi_reg32(_3DPRIM_INSTANCE_COUNT),
7057                mi_imm(draw->instance_count));
7058    }
7059 
7060    iris_measure_snapshot(ice, batch, INTEL_SNAPSHOT_DRAW, draw, indirect, sc);
7061 
7062    iris_emit_cmd(batch, GENX(3DPRIMITIVE), prim) {
7063       prim.VertexAccessType = draw->index_size > 0 ? RANDOM : SEQUENTIAL;
7064       prim.PredicateEnable = use_predicate;
7065 
7066       if (indirect) {
7067          prim.IndirectParameterEnable = true;
7068       } else {
7069          prim.StartInstanceLocation = draw->start_instance;
7070          prim.InstanceCount = draw->instance_count;
7071          prim.VertexCountPerInstance = sc->count;
7072 
7073          prim.StartVertexLocation = sc->start;
7074 
7075          if (draw->index_size) {
7076             prim.BaseVertexLocation += sc->index_bias;
7077          }
7078       }
7079    }
7080 
7081    iris_batch_sync_region_end(batch);
7082 
7083    trace_intel_end_draw(&batch->trace, batch, 0);
7084 }
7085 
7086 static void
iris_load_indirect_location(struct iris_context * ice,struct iris_batch * batch,const struct pipe_grid_info * grid)7087 iris_load_indirect_location(struct iris_context *ice,
7088                             struct iris_batch *batch,
7089                             const struct pipe_grid_info *grid)
7090 {
7091 #define GPGPU_DISPATCHDIMX 0x2500
7092 #define GPGPU_DISPATCHDIMY 0x2504
7093 #define GPGPU_DISPATCHDIMZ 0x2508
7094 
7095    assert(grid->indirect);
7096 
7097    struct iris_state_ref *grid_size = &ice->state.grid_size;
7098    struct iris_bo *bo = iris_resource_bo(grid_size->res);
7099    struct mi_builder b;
7100    mi_builder_init(&b, &batch->screen->devinfo, batch);
7101    struct mi_value size_x = mi_mem32(ro_bo(bo, grid_size->offset + 0));
7102    struct mi_value size_y = mi_mem32(ro_bo(bo, grid_size->offset + 4));
7103    struct mi_value size_z = mi_mem32(ro_bo(bo, grid_size->offset + 8));
7104    mi_store(&b, mi_reg32(GPGPU_DISPATCHDIMX), size_x);
7105    mi_store(&b, mi_reg32(GPGPU_DISPATCHDIMY), size_y);
7106    mi_store(&b, mi_reg32(GPGPU_DISPATCHDIMZ), size_z);
7107 }
7108 
7109 #if GFX_VERx10 >= 125
7110 
7111 static void
iris_upload_compute_walker(struct iris_context * ice,struct iris_batch * batch,const struct pipe_grid_info * grid)7112 iris_upload_compute_walker(struct iris_context *ice,
7113                            struct iris_batch *batch,
7114                            const struct pipe_grid_info *grid)
7115 {
7116    const uint64_t stage_dirty = ice->state.stage_dirty;
7117    struct iris_screen *screen = batch->screen;
7118    const struct intel_device_info *devinfo = &screen->devinfo;
7119    struct iris_binder *binder = &ice->state.binder;
7120    struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
7121    struct iris_compiled_shader *shader =
7122       ice->shaders.prog[MESA_SHADER_COMPUTE];
7123    struct brw_stage_prog_data *prog_data = shader->prog_data;
7124    struct brw_cs_prog_data *cs_prog_data = (void *) prog_data;
7125    const struct brw_cs_dispatch_info dispatch =
7126       brw_cs_get_dispatch_info(devinfo, cs_prog_data, grid->block);
7127 
7128    trace_intel_begin_compute(&batch->trace, batch);
7129 
7130    if (stage_dirty & IRIS_STAGE_DIRTY_CS) {
7131       iris_emit_cmd(batch, GENX(CFE_STATE), cfe) {
7132          cfe.MaximumNumberofThreads =
7133             devinfo->max_cs_threads * devinfo->subslice_total - 1;
7134          uint32_t scratch_addr = pin_scratch_space(ice, batch, prog_data,
7135                                                    MESA_SHADER_COMPUTE);
7136          cfe.ScratchSpaceBuffer = scratch_addr >> 4;
7137       }
7138    }
7139 
7140    if (grid->indirect)
7141       iris_load_indirect_location(ice, batch, grid);
7142 
7143    iris_emit_cmd(batch, GENX(COMPUTE_WALKER), cw) {
7144       cw.IndirectParameterEnable        = grid->indirect;
7145       cw.SIMDSize                       = dispatch.simd_size / 16;
7146       cw.LocalXMaximum                  = grid->block[0] - 1;
7147       cw.LocalYMaximum                  = grid->block[1] - 1;
7148       cw.LocalZMaximum                  = grid->block[2] - 1;
7149       cw.ThreadGroupIDXDimension        = grid->grid[0];
7150       cw.ThreadGroupIDYDimension        = grid->grid[1];
7151       cw.ThreadGroupIDZDimension        = grid->grid[2];
7152       cw.ExecutionMask                  = dispatch.right_mask;
7153       cw.PostSync.MOCS                  = iris_mocs(NULL, &screen->isl_dev, 0);
7154 
7155       cw.InterfaceDescriptor = (struct GENX(INTERFACE_DESCRIPTOR_DATA)) {
7156          .KernelStartPointer = KSP(shader),
7157          .NumberofThreadsinGPGPUThreadGroup = dispatch.threads,
7158          .SharedLocalMemorySize =
7159             encode_slm_size(GFX_VER, prog_data->total_shared),
7160          .NumberOfBarriers = cs_prog_data->uses_barrier,
7161          .SamplerStatePointer = shs->sampler_table.offset,
7162          .BindingTablePointer = binder->bt_offset[MESA_SHADER_COMPUTE],
7163       };
7164 
7165       assert(brw_cs_push_const_total_size(cs_prog_data, dispatch.threads) == 0);
7166    }
7167 
7168    trace_intel_end_compute(&batch->trace, batch, grid->grid[0], grid->grid[1], grid->grid[2]);
7169 }
7170 
7171 #else /* #if GFX_VERx10 >= 125 */
7172 
7173 static void
iris_upload_gpgpu_walker(struct iris_context * ice,struct iris_batch * batch,const struct pipe_grid_info * grid)7174 iris_upload_gpgpu_walker(struct iris_context *ice,
7175                          struct iris_batch *batch,
7176                          const struct pipe_grid_info *grid)
7177 {
7178    const uint64_t stage_dirty = ice->state.stage_dirty;
7179    struct iris_screen *screen = batch->screen;
7180    const struct intel_device_info *devinfo = &screen->devinfo;
7181    struct iris_binder *binder = &ice->state.binder;
7182    struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
7183    struct iris_uncompiled_shader *ish =
7184       ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
7185    struct iris_compiled_shader *shader =
7186       ice->shaders.prog[MESA_SHADER_COMPUTE];
7187    struct brw_stage_prog_data *prog_data = shader->prog_data;
7188    struct brw_cs_prog_data *cs_prog_data = (void *) prog_data;
7189    const struct brw_cs_dispatch_info dispatch =
7190       brw_cs_get_dispatch_info(devinfo, cs_prog_data, grid->block);
7191 
7192    trace_intel_begin_compute(&batch->trace, batch);
7193 
7194    if ((stage_dirty & IRIS_STAGE_DIRTY_CS) ||
7195        cs_prog_data->local_size[0] == 0 /* Variable local group size */) {
7196       /* The MEDIA_VFE_STATE documentation for Gfx8+ says:
7197        *
7198        *   "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
7199        *    the only bits that are changed are scoreboard related: Scoreboard
7200        *    Enable, Scoreboard Type, Scoreboard Mask, Scoreboard Delta.  For
7201        *    these scoreboard related states, a MEDIA_STATE_FLUSH is
7202        *    sufficient."
7203        */
7204       iris_emit_pipe_control_flush(batch,
7205                                    "workaround: stall before MEDIA_VFE_STATE",
7206                                    PIPE_CONTROL_CS_STALL);
7207 
7208       iris_emit_cmd(batch, GENX(MEDIA_VFE_STATE), vfe) {
7209          if (prog_data->total_scratch) {
7210             uint32_t scratch_addr =
7211                pin_scratch_space(ice, batch, prog_data, MESA_SHADER_COMPUTE);
7212 
7213             vfe.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
7214             vfe.ScratchSpaceBasePointer =
7215                rw_bo(NULL, scratch_addr, IRIS_DOMAIN_NONE);
7216          }
7217 
7218          vfe.MaximumNumberofThreads =
7219             devinfo->max_cs_threads * devinfo->subslice_total - 1;
7220 #if GFX_VER < 11
7221          vfe.ResetGatewayTimer =
7222             Resettingrelativetimerandlatchingtheglobaltimestamp;
7223 #endif
7224 #if GFX_VER == 8
7225          vfe.BypassGatewayControl = true;
7226 #endif
7227          vfe.NumberofURBEntries = 2;
7228          vfe.URBEntryAllocationSize = 2;
7229 
7230          vfe.CURBEAllocationSize =
7231             ALIGN(cs_prog_data->push.per_thread.regs * dispatch.threads +
7232                   cs_prog_data->push.cross_thread.regs, 2);
7233       }
7234    }
7235 
7236    /* TODO: Combine subgroup-id with cbuf0 so we can push regular uniforms */
7237    if ((stage_dirty & IRIS_STAGE_DIRTY_CS) ||
7238        cs_prog_data->local_size[0] == 0 /* Variable local group size */) {
7239       uint32_t curbe_data_offset = 0;
7240       assert(cs_prog_data->push.cross_thread.dwords == 0 &&
7241              cs_prog_data->push.per_thread.dwords == 1 &&
7242              cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
7243       const unsigned push_const_size =
7244          brw_cs_push_const_total_size(cs_prog_data, dispatch.threads);
7245       uint32_t *curbe_data_map =
7246          stream_state(batch, ice->state.dynamic_uploader,
7247                       &ice->state.last_res.cs_thread_ids,
7248                       ALIGN(push_const_size, 64), 64,
7249                       &curbe_data_offset);
7250       assert(curbe_data_map);
7251       memset(curbe_data_map, 0x5a, ALIGN(push_const_size, 64));
7252       iris_fill_cs_push_const_buffer(cs_prog_data, dispatch.threads,
7253                                      curbe_data_map);
7254 
7255       iris_emit_cmd(batch, GENX(MEDIA_CURBE_LOAD), curbe) {
7256          curbe.CURBETotalDataLength = ALIGN(push_const_size, 64);
7257          curbe.CURBEDataStartAddress = curbe_data_offset;
7258       }
7259    }
7260 
7261    for (unsigned i = 0; i < IRIS_MAX_GLOBAL_BINDINGS; i++) {
7262       struct pipe_resource *res = ice->state.global_bindings[i];
7263       if (!res)
7264          continue;
7265 
7266       iris_use_pinned_bo(batch, iris_resource_bo(res),
7267                          true, IRIS_DOMAIN_NONE);
7268    }
7269 
7270    if (stage_dirty & (IRIS_STAGE_DIRTY_SAMPLER_STATES_CS |
7271                       IRIS_STAGE_DIRTY_BINDINGS_CS |
7272                       IRIS_STAGE_DIRTY_CONSTANTS_CS |
7273                       IRIS_STAGE_DIRTY_CS)) {
7274       uint32_t desc[GENX(INTERFACE_DESCRIPTOR_DATA_length)];
7275 
7276       iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), desc, idd) {
7277          idd.SharedLocalMemorySize =
7278             encode_slm_size(GFX_VER, ish->kernel_shared_size);
7279          idd.KernelStartPointer =
7280             KSP(shader) + brw_cs_prog_data_prog_offset(cs_prog_data,
7281                                                        dispatch.simd_size);
7282          idd.SamplerStatePointer = shs->sampler_table.offset;
7283          idd.BindingTablePointer =
7284             binder->bt_offset[MESA_SHADER_COMPUTE] >> IRIS_BT_OFFSET_SHIFT;
7285          idd.NumberofThreadsinGPGPUThreadGroup = dispatch.threads;
7286       }
7287 
7288       for (int i = 0; i < GENX(INTERFACE_DESCRIPTOR_DATA_length); i++)
7289          desc[i] |= ((uint32_t *) shader->derived_data)[i];
7290 
7291       iris_emit_cmd(batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), load) {
7292          load.InterfaceDescriptorTotalLength =
7293             GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
7294          load.InterfaceDescriptorDataStartAddress =
7295             emit_state(batch, ice->state.dynamic_uploader,
7296                        &ice->state.last_res.cs_desc, desc, sizeof(desc), 64);
7297       }
7298    }
7299 
7300    if (grid->indirect)
7301       iris_load_indirect_location(ice, batch, grid);
7302 
7303    iris_measure_snapshot(ice, batch, INTEL_SNAPSHOT_COMPUTE, NULL, NULL, NULL);
7304 
7305    iris_emit_cmd(batch, GENX(GPGPU_WALKER), ggw) {
7306       ggw.IndirectParameterEnable    = grid->indirect != NULL;
7307       ggw.SIMDSize                   = dispatch.simd_size / 16;
7308       ggw.ThreadDepthCounterMaximum  = 0;
7309       ggw.ThreadHeightCounterMaximum = 0;
7310       ggw.ThreadWidthCounterMaximum  = dispatch.threads - 1;
7311       ggw.ThreadGroupIDXDimension    = grid->grid[0];
7312       ggw.ThreadGroupIDYDimension    = grid->grid[1];
7313       ggw.ThreadGroupIDZDimension    = grid->grid[2];
7314       ggw.RightExecutionMask         = dispatch.right_mask;
7315       ggw.BottomExecutionMask        = 0xffffffff;
7316    }
7317 
7318    iris_emit_cmd(batch, GENX(MEDIA_STATE_FLUSH), msf);
7319 
7320    trace_intel_end_compute(&batch->trace, batch, grid->grid[0], grid->grid[1], grid->grid[2]);
7321 }
7322 
7323 #endif /* #if GFX_VERx10 >= 125 */
7324 
7325 static void
iris_upload_compute_state(struct iris_context * ice,struct iris_batch * batch,const struct pipe_grid_info * grid)7326 iris_upload_compute_state(struct iris_context *ice,
7327                           struct iris_batch *batch,
7328                           const struct pipe_grid_info *grid)
7329 {
7330    struct iris_screen *screen = batch->screen;
7331    const uint64_t stage_dirty = ice->state.stage_dirty;
7332    struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
7333    struct iris_compiled_shader *shader =
7334       ice->shaders.prog[MESA_SHADER_COMPUTE];
7335    struct iris_border_color_pool *border_color_pool =
7336       iris_bufmgr_get_border_color_pool(screen->bufmgr);
7337 
7338    iris_batch_sync_region_start(batch);
7339 
7340    /* Always pin the binder.  If we're emitting new binding table pointers,
7341     * we need it.  If not, we're probably inheriting old tables via the
7342     * context, and need it anyway.  Since true zero-bindings cases are
7343     * practically non-existent, just pin it and avoid last_res tracking.
7344     */
7345    iris_use_pinned_bo(batch, ice->state.binder.bo, false, IRIS_DOMAIN_NONE);
7346 
7347    if (((stage_dirty & IRIS_STAGE_DIRTY_CONSTANTS_CS) &&
7348         shs->sysvals_need_upload) ||
7349        shader->kernel_input_size > 0)
7350       upload_sysvals(ice, MESA_SHADER_COMPUTE, grid);
7351 
7352    if (stage_dirty & IRIS_STAGE_DIRTY_BINDINGS_CS)
7353       iris_populate_binding_table(ice, batch, MESA_SHADER_COMPUTE, false);
7354 
7355    if (stage_dirty & IRIS_STAGE_DIRTY_SAMPLER_STATES_CS)
7356       iris_upload_sampler_states(ice, MESA_SHADER_COMPUTE);
7357 
7358    iris_use_optional_res(batch, shs->sampler_table.res, false,
7359                          IRIS_DOMAIN_NONE);
7360    iris_use_pinned_bo(batch, iris_resource_bo(shader->assembly.res), false,
7361                       IRIS_DOMAIN_NONE);
7362 
7363    if (ice->state.need_border_colors)
7364       iris_use_pinned_bo(batch, border_color_pool->bo, false,
7365                          IRIS_DOMAIN_NONE);
7366 
7367 #if GFX_VER >= 12
7368    genX(invalidate_aux_map_state)(batch);
7369 #endif
7370 
7371 #if GFX_VERx10 >= 125
7372    iris_upload_compute_walker(ice, batch, grid);
7373 #else
7374    iris_upload_gpgpu_walker(ice, batch, grid);
7375 #endif
7376 
7377    if (!batch->contains_draw_with_next_seqno) {
7378       iris_restore_compute_saved_bos(ice, batch, grid);
7379       batch->contains_draw_with_next_seqno = batch->contains_draw = true;
7380    }
7381 
7382    iris_batch_sync_region_end(batch);
7383 }
7384 
7385 /**
7386  * State module teardown.
7387  */
7388 static void
iris_destroy_state(struct iris_context * ice)7389 iris_destroy_state(struct iris_context *ice)
7390 {
7391    struct iris_genx_state *genx = ice->state.genx;
7392 
7393    pipe_resource_reference(&ice->state.pixel_hashing_tables, NULL);
7394 
7395    pipe_resource_reference(&ice->draw.draw_params.res, NULL);
7396    pipe_resource_reference(&ice->draw.derived_draw_params.res, NULL);
7397 
7398    /* Loop over all VBOs, including ones for draw parameters */
7399    for (unsigned i = 0; i < ARRAY_SIZE(genx->vertex_buffers); i++) {
7400       pipe_resource_reference(&genx->vertex_buffers[i].resource, NULL);
7401    }
7402 
7403    free(ice->state.genx);
7404 
7405    for (int i = 0; i < 4; i++) {
7406       pipe_so_target_reference(&ice->state.so_target[i], NULL);
7407    }
7408 
7409    for (unsigned i = 0; i < ice->state.framebuffer.nr_cbufs; i++) {
7410       pipe_surface_reference(&ice->state.framebuffer.cbufs[i], NULL);
7411    }
7412    pipe_surface_reference(&ice->state.framebuffer.zsbuf, NULL);
7413 
7414    for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) {
7415       struct iris_shader_state *shs = &ice->state.shaders[stage];
7416       pipe_resource_reference(&shs->sampler_table.res, NULL);
7417       for (int i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
7418          pipe_resource_reference(&shs->constbuf[i].buffer, NULL);
7419          pipe_resource_reference(&shs->constbuf_surf_state[i].res, NULL);
7420       }
7421       for (int i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
7422          pipe_resource_reference(&shs->image[i].base.resource, NULL);
7423          pipe_resource_reference(&shs->image[i].surface_state.ref.res, NULL);
7424          free(shs->image[i].surface_state.cpu);
7425       }
7426       for (int i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) {
7427          pipe_resource_reference(&shs->ssbo[i].buffer, NULL);
7428          pipe_resource_reference(&shs->ssbo_surf_state[i].res, NULL);
7429       }
7430       for (int i = 0; i < IRIS_MAX_TEXTURE_SAMPLERS; i++) {
7431          pipe_sampler_view_reference((struct pipe_sampler_view **)
7432                                      &shs->textures[i], NULL);
7433       }
7434    }
7435 
7436    pipe_resource_reference(&ice->state.grid_size.res, NULL);
7437    pipe_resource_reference(&ice->state.grid_surf_state.res, NULL);
7438 
7439    pipe_resource_reference(&ice->state.null_fb.res, NULL);
7440    pipe_resource_reference(&ice->state.unbound_tex.res, NULL);
7441 
7442    pipe_resource_reference(&ice->state.last_res.cc_vp, NULL);
7443    pipe_resource_reference(&ice->state.last_res.sf_cl_vp, NULL);
7444    pipe_resource_reference(&ice->state.last_res.color_calc, NULL);
7445    pipe_resource_reference(&ice->state.last_res.scissor, NULL);
7446    pipe_resource_reference(&ice->state.last_res.blend, NULL);
7447    pipe_resource_reference(&ice->state.last_res.index_buffer, NULL);
7448    pipe_resource_reference(&ice->state.last_res.cs_thread_ids, NULL);
7449    pipe_resource_reference(&ice->state.last_res.cs_desc, NULL);
7450 }
7451 
7452 /* ------------------------------------------------------------------- */
7453 
7454 static void
iris_rebind_buffer(struct iris_context * ice,struct iris_resource * res)7455 iris_rebind_buffer(struct iris_context *ice,
7456                    struct iris_resource *res)
7457 {
7458    struct pipe_context *ctx = &ice->ctx;
7459    struct iris_genx_state *genx = ice->state.genx;
7460 
7461    assert(res->base.b.target == PIPE_BUFFER);
7462 
7463    /* Buffers can't be framebuffer attachments, nor display related,
7464     * and we don't have upstream Clover support.
7465     */
7466    assert(!(res->bind_history & (PIPE_BIND_DEPTH_STENCIL |
7467                                  PIPE_BIND_RENDER_TARGET |
7468                                  PIPE_BIND_BLENDABLE |
7469                                  PIPE_BIND_DISPLAY_TARGET |
7470                                  PIPE_BIND_CURSOR |
7471                                  PIPE_BIND_COMPUTE_RESOURCE |
7472                                  PIPE_BIND_GLOBAL)));
7473 
7474    if (res->bind_history & PIPE_BIND_VERTEX_BUFFER) {
7475       uint64_t bound_vbs = ice->state.bound_vertex_buffers;
7476       while (bound_vbs) {
7477          const int i = u_bit_scan64(&bound_vbs);
7478          struct iris_vertex_buffer_state *state = &genx->vertex_buffers[i];
7479 
7480          /* Update the CPU struct */
7481          STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_start) == 32);
7482          STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_bits) == 64);
7483          uint64_t *addr = (uint64_t *) &state->state[1];
7484          struct iris_bo *bo = iris_resource_bo(state->resource);
7485 
7486          if (*addr != bo->address + state->offset) {
7487             *addr = bo->address + state->offset;
7488             ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
7489                                 IRIS_DIRTY_VERTEX_BUFFER_FLUSHES;
7490          }
7491       }
7492    }
7493 
7494    /* We don't need to handle PIPE_BIND_INDEX_BUFFER here: we re-emit
7495     * the 3DSTATE_INDEX_BUFFER packet whenever the address changes.
7496     *
7497     * There is also no need to handle these:
7498     * - PIPE_BIND_COMMAND_ARGS_BUFFER (emitted for every indirect draw)
7499     * - PIPE_BIND_QUERY_BUFFER (no persistent state references)
7500     */
7501 
7502    if (res->bind_history & PIPE_BIND_STREAM_OUTPUT) {
7503       uint32_t *so_buffers = genx->so_buffers;
7504       for (unsigned i = 0; i < 4; i++,
7505            so_buffers += GENX(3DSTATE_SO_BUFFER_length)) {
7506 
7507          /* There are no other fields in bits 127:64 */
7508          uint64_t *addr = (uint64_t *) &so_buffers[2];
7509          STATIC_ASSERT(GENX(3DSTATE_SO_BUFFER_SurfaceBaseAddress_start) == 66);
7510          STATIC_ASSERT(GENX(3DSTATE_SO_BUFFER_SurfaceBaseAddress_bits) == 46);
7511 
7512          struct pipe_stream_output_target *tgt = ice->state.so_target[i];
7513          if (tgt) {
7514             struct iris_bo *bo = iris_resource_bo(tgt->buffer);
7515             if (*addr != bo->address + tgt->buffer_offset) {
7516                *addr = bo->address + tgt->buffer_offset;
7517                ice->state.dirty |= IRIS_DIRTY_SO_BUFFERS;
7518             }
7519          }
7520       }
7521    }
7522 
7523    for (int s = MESA_SHADER_VERTEX; s < MESA_SHADER_STAGES; s++) {
7524       struct iris_shader_state *shs = &ice->state.shaders[s];
7525       enum pipe_shader_type p_stage = stage_to_pipe(s);
7526 
7527       if (!(res->bind_stages & (1 << s)))
7528          continue;
7529 
7530       if (res->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
7531          /* Skip constant buffer 0, it's for regular uniforms, not UBOs */
7532          uint32_t bound_cbufs = shs->bound_cbufs & ~1u;
7533          while (bound_cbufs) {
7534             const int i = u_bit_scan(&bound_cbufs);
7535             struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
7536             struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
7537 
7538             if (res->bo == iris_resource_bo(cbuf->buffer)) {
7539                pipe_resource_reference(&surf_state->res, NULL);
7540                shs->dirty_cbufs |= 1u << i;
7541                ice->state.dirty |= (IRIS_DIRTY_RENDER_MISC_BUFFER_FLUSHES |
7542                                     IRIS_DIRTY_COMPUTE_MISC_BUFFER_FLUSHES);
7543                ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS << s;
7544             }
7545          }
7546       }
7547 
7548       if (res->bind_history & PIPE_BIND_SHADER_BUFFER) {
7549          uint32_t bound_ssbos = shs->bound_ssbos;
7550          while (bound_ssbos) {
7551             const int i = u_bit_scan(&bound_ssbos);
7552             struct pipe_shader_buffer *ssbo = &shs->ssbo[i];
7553 
7554             if (res->bo == iris_resource_bo(ssbo->buffer)) {
7555                struct pipe_shader_buffer buf = {
7556                   .buffer = &res->base.b,
7557                   .buffer_offset = ssbo->buffer_offset,
7558                   .buffer_size = ssbo->buffer_size,
7559                };
7560                iris_set_shader_buffers(ctx, p_stage, i, 1, &buf,
7561                                        (shs->writable_ssbos >> i) & 1);
7562             }
7563          }
7564       }
7565 
7566       if (res->bind_history & PIPE_BIND_SAMPLER_VIEW) {
7567          uint32_t bound_sampler_views = shs->bound_sampler_views;
7568          while (bound_sampler_views) {
7569             const int i = u_bit_scan(&bound_sampler_views);
7570             struct iris_sampler_view *isv = shs->textures[i];
7571             struct iris_bo *bo = isv->res->bo;
7572 
7573             if (update_surface_state_addrs(ice->state.surface_uploader,
7574                                            &isv->surface_state, bo)) {
7575                ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << s;
7576             }
7577          }
7578       }
7579 
7580       if (res->bind_history & PIPE_BIND_SHADER_IMAGE) {
7581          uint32_t bound_image_views = shs->bound_image_views;
7582          while (bound_image_views) {
7583             const int i = u_bit_scan(&bound_image_views);
7584             struct iris_image_view *iv = &shs->image[i];
7585             struct iris_bo *bo = iris_resource_bo(iv->base.resource);
7586 
7587             if (update_surface_state_addrs(ice->state.surface_uploader,
7588                                            &iv->surface_state, bo)) {
7589                ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << s;
7590             }
7591          }
7592       }
7593    }
7594 }
7595 
7596 /* ------------------------------------------------------------------- */
7597 
7598 /**
7599  * Introduce a batch synchronization boundary, and update its cache coherency
7600  * status to reflect the execution of a PIPE_CONTROL command with the
7601  * specified flags.
7602  */
7603 static void
batch_mark_sync_for_pipe_control(struct iris_batch * batch,uint32_t flags)7604 batch_mark_sync_for_pipe_control(struct iris_batch *batch, uint32_t flags)
7605 {
7606    iris_batch_sync_boundary(batch);
7607 
7608    if ((flags & PIPE_CONTROL_CS_STALL)) {
7609       if ((flags & PIPE_CONTROL_RENDER_TARGET_FLUSH))
7610          iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_RENDER_WRITE);
7611 
7612       if ((flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH))
7613          iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_DEPTH_WRITE);
7614 
7615       if ((flags & PIPE_CONTROL_DATA_CACHE_FLUSH))
7616          iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_DATA_WRITE);
7617 
7618       if ((flags & PIPE_CONTROL_FLUSH_ENABLE))
7619          iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_OTHER_WRITE);
7620 
7621       if ((flags & (PIPE_CONTROL_CACHE_FLUSH_BITS |
7622                     PIPE_CONTROL_STALL_AT_SCOREBOARD))) {
7623          iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_VF_READ);
7624          iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_OTHER_READ);
7625       }
7626    }
7627 
7628    if ((flags & PIPE_CONTROL_RENDER_TARGET_FLUSH))
7629       iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_RENDER_WRITE);
7630 
7631    if ((flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH))
7632       iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_DEPTH_WRITE);
7633 
7634    if ((flags & PIPE_CONTROL_DATA_CACHE_FLUSH))
7635       iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_DATA_WRITE);
7636 
7637    if ((flags & PIPE_CONTROL_FLUSH_ENABLE))
7638       iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_OTHER_WRITE);
7639 
7640    if ((flags & PIPE_CONTROL_VF_CACHE_INVALIDATE))
7641       iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_VF_READ);
7642 
7643    if ((flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE) &&
7644        (flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE))
7645       iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_OTHER_READ);
7646 }
7647 
7648 static unsigned
flags_to_post_sync_op(uint32_t flags)7649 flags_to_post_sync_op(uint32_t flags)
7650 {
7651    if (flags & PIPE_CONTROL_WRITE_IMMEDIATE)
7652       return WriteImmediateData;
7653 
7654    if (flags & PIPE_CONTROL_WRITE_DEPTH_COUNT)
7655       return WritePSDepthCount;
7656 
7657    if (flags & PIPE_CONTROL_WRITE_TIMESTAMP)
7658       return WriteTimestamp;
7659 
7660    return 0;
7661 }
7662 
7663 /**
7664  * Do the given flags have a Post Sync or LRI Post Sync operation?
7665  */
7666 static enum pipe_control_flags
get_post_sync_flags(enum pipe_control_flags flags)7667 get_post_sync_flags(enum pipe_control_flags flags)
7668 {
7669    flags &= PIPE_CONTROL_WRITE_IMMEDIATE |
7670             PIPE_CONTROL_WRITE_DEPTH_COUNT |
7671             PIPE_CONTROL_WRITE_TIMESTAMP |
7672             PIPE_CONTROL_LRI_POST_SYNC_OP;
7673 
7674    /* Only one "Post Sync Op" is allowed, and it's mutually exclusive with
7675     * "LRI Post Sync Operation".  So more than one bit set would be illegal.
7676     */
7677    assert(util_bitcount(flags) <= 1);
7678 
7679    return flags;
7680 }
7681 
7682 #define IS_COMPUTE_PIPELINE(batch) (batch->name == IRIS_BATCH_COMPUTE)
7683 
7684 /**
7685  * Emit a series of PIPE_CONTROL commands, taking into account any
7686  * workarounds necessary to actually accomplish the caller's request.
7687  *
7688  * Unless otherwise noted, spec quotations in this function come from:
7689  *
7690  * Synchronization of the 3D Pipeline > PIPE_CONTROL Command > Programming
7691  * Restrictions for PIPE_CONTROL.
7692  *
7693  * You should not use this function directly.  Use the helpers in
7694  * iris_pipe_control.c instead, which may split the pipe control further.
7695  */
7696 static void
iris_emit_raw_pipe_control(struct iris_batch * batch,const char * reason,uint32_t flags,struct iris_bo * bo,uint32_t offset,uint64_t imm)7697 iris_emit_raw_pipe_control(struct iris_batch *batch,
7698                            const char *reason,
7699                            uint32_t flags,
7700                            struct iris_bo *bo,
7701                            uint32_t offset,
7702                            uint64_t imm)
7703 {
7704    UNUSED const struct intel_device_info *devinfo = &batch->screen->devinfo;
7705    enum pipe_control_flags post_sync_flags = get_post_sync_flags(flags);
7706    enum pipe_control_flags non_lri_post_sync_flags =
7707       post_sync_flags & ~PIPE_CONTROL_LRI_POST_SYNC_OP;
7708 
7709 #if GFX_VER >= 12
7710    if (batch->name == IRIS_BATCH_BLITTER) {
7711       batch_mark_sync_for_pipe_control(batch, flags);
7712       iris_batch_sync_region_start(batch);
7713 
7714       assert(!(flags & PIPE_CONTROL_WRITE_DEPTH_COUNT));
7715 
7716       /* The blitter doesn't actually use PIPE_CONTROL; rather it uses the
7717        * MI_FLUSH_DW command.  However, all of our code is set up to flush
7718        * via emitting a pipe control, so we just translate it at this point,
7719        * even if it is a bit hacky.
7720        */
7721       iris_emit_cmd(batch, GENX(MI_FLUSH_DW), fd) {
7722          fd.Address = rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE);
7723          fd.ImmediateData = imm;
7724          fd.PostSyncOperation = flags_to_post_sync_op(flags);
7725 #if GFX_VERx10 >= 125
7726          /* TODO: This may not always be necessary */
7727          fd.FlushCCS = true;
7728 #endif
7729       }
7730       iris_batch_sync_region_end(batch);
7731       return;
7732    }
7733 #endif
7734 
7735    /* Recursive PIPE_CONTROL workarounds --------------------------------
7736     * (http://knowyourmeme.com/memes/xzibit-yo-dawg)
7737     *
7738     * We do these first because we want to look at the original operation,
7739     * rather than any workarounds we set.
7740     */
7741    if (GFX_VER == 9 && (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE)) {
7742       /* The PIPE_CONTROL "VF Cache Invalidation Enable" bit description
7743        * lists several workarounds:
7744        *
7745        *    "Project: SKL, KBL, BXT
7746        *
7747        *     If the VF Cache Invalidation Enable is set to a 1 in a
7748        *     PIPE_CONTROL, a separate Null PIPE_CONTROL, all bitfields
7749        *     sets to 0, with the VF Cache Invalidation Enable set to 0
7750        *     needs to be sent prior to the PIPE_CONTROL with VF Cache
7751        *     Invalidation Enable set to a 1."
7752        */
7753       iris_emit_raw_pipe_control(batch,
7754                                  "workaround: recursive VF cache invalidate",
7755                                  0, NULL, 0, 0);
7756    }
7757 
7758    /* Wa_1409226450, Wait for EU to be idle before pipe control which
7759     * invalidates the instruction cache
7760     */
7761    if (GFX_VER == 12 && (flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE)) {
7762       iris_emit_raw_pipe_control(batch,
7763                                  "workaround: CS stall before instruction "
7764                                  "cache invalidate",
7765                                  PIPE_CONTROL_CS_STALL |
7766                                  PIPE_CONTROL_STALL_AT_SCOREBOARD, bo, offset,
7767                                  imm);
7768    }
7769 
7770    if (GFX_VER == 9 && IS_COMPUTE_PIPELINE(batch) && post_sync_flags) {
7771       /* Project: SKL / Argument: LRI Post Sync Operation [23]
7772        *
7773        * "PIPECONTROL command with “Command Streamer Stall Enable” must be
7774        *  programmed prior to programming a PIPECONTROL command with "LRI
7775        *  Post Sync Operation" in GPGPU mode of operation (i.e when
7776        *  PIPELINE_SELECT command is set to GPGPU mode of operation)."
7777        *
7778        * The same text exists a few rows below for Post Sync Op.
7779        */
7780       iris_emit_raw_pipe_control(batch,
7781                                  "workaround: CS stall before gpgpu post-sync",
7782                                  PIPE_CONTROL_CS_STALL, bo, offset, imm);
7783    }
7784 
7785    /* "Flush Types" workarounds ---------------------------------------------
7786     * We do these now because they may add post-sync operations or CS stalls.
7787     */
7788 
7789    if (GFX_VER < 11 && flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) {
7790       /* Project: BDW, SKL+ (stopping at CNL) / Argument: VF Invalidate
7791        *
7792        * "'Post Sync Operation' must be enabled to 'Write Immediate Data' or
7793        *  'Write PS Depth Count' or 'Write Timestamp'."
7794        */
7795       if (!bo) {
7796          flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
7797          post_sync_flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
7798          non_lri_post_sync_flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
7799          bo = batch->screen->workaround_address.bo;
7800          offset = batch->screen->workaround_address.offset;
7801       }
7802    }
7803 
7804    if (flags & PIPE_CONTROL_DEPTH_STALL) {
7805       /* From the PIPE_CONTROL instruction table, bit 13 (Depth Stall Enable):
7806        *
7807        *    "This bit must be DISABLED for operations other than writing
7808        *     PS_DEPTH_COUNT."
7809        *
7810        * This seems like nonsense.  An Ivybridge workaround requires us to
7811        * emit a PIPE_CONTROL with a depth stall and write immediate post-sync
7812        * operation.  Gfx8+ requires us to emit depth stalls and depth cache
7813        * flushes together.  So, it's hard to imagine this means anything other
7814        * than "we originally intended this to be used for PS_DEPTH_COUNT".
7815        *
7816        * We ignore the supposed restriction and do nothing.
7817        */
7818    }
7819 
7820    if (flags & (PIPE_CONTROL_RENDER_TARGET_FLUSH |
7821                 PIPE_CONTROL_STALL_AT_SCOREBOARD)) {
7822       /* From the PIPE_CONTROL instruction table, bit 12 and bit 1:
7823        *
7824        *    "This bit must be DISABLED for End-of-pipe (Read) fences,
7825        *     PS_DEPTH_COUNT or TIMESTAMP queries."
7826        *
7827        * TODO: Implement end-of-pipe checking.
7828        */
7829       assert(!(post_sync_flags & (PIPE_CONTROL_WRITE_DEPTH_COUNT |
7830                                   PIPE_CONTROL_WRITE_TIMESTAMP)));
7831    }
7832 
7833    if (GFX_VER < 11 && (flags & PIPE_CONTROL_STALL_AT_SCOREBOARD)) {
7834       /* From the PIPE_CONTROL instruction table, bit 1:
7835        *
7836        *    "This bit is ignored if Depth Stall Enable is set.
7837        *     Further, the render cache is not flushed even if Write Cache
7838        *     Flush Enable bit is set."
7839        *
7840        * We assert that the caller doesn't do this combination, to try and
7841        * prevent mistakes.  It shouldn't hurt the GPU, though.
7842        *
7843        * We skip this check on Gfx11+ as the "Stall at Pixel Scoreboard"
7844        * and "Render Target Flush" combo is explicitly required for BTI
7845        * update workarounds.
7846        */
7847       assert(!(flags & (PIPE_CONTROL_DEPTH_STALL |
7848                         PIPE_CONTROL_RENDER_TARGET_FLUSH)));
7849    }
7850 
7851    /* PIPE_CONTROL page workarounds ------------------------------------- */
7852 
7853    if (GFX_VER <= 8 && (flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE)) {
7854       /* From the PIPE_CONTROL page itself:
7855        *
7856        *    "IVB, HSW, BDW
7857        *     Restriction: Pipe_control with CS-stall bit set must be issued
7858        *     before a pipe-control command that has the State Cache
7859        *     Invalidate bit set."
7860        */
7861       flags |= PIPE_CONTROL_CS_STALL;
7862    }
7863 
7864    if (flags & PIPE_CONTROL_FLUSH_LLC) {
7865       /* From the PIPE_CONTROL instruction table, bit 26 (Flush LLC):
7866        *
7867        *    "Project: ALL
7868        *     SW must always program Post-Sync Operation to "Write Immediate
7869        *     Data" when Flush LLC is set."
7870        *
7871        * For now, we just require the caller to do it.
7872        */
7873       assert(flags & PIPE_CONTROL_WRITE_IMMEDIATE);
7874    }
7875 
7876    /* "Post-Sync Operation" workarounds -------------------------------- */
7877 
7878    /* Project: All / Argument: Global Snapshot Count Reset [19]
7879     *
7880     * "This bit must not be exercised on any product.
7881     *  Requires stall bit ([20] of DW1) set."
7882     *
7883     * We don't use this, so we just assert that it isn't used.  The
7884     * PIPE_CONTROL instruction page indicates that they intended this
7885     * as a debug feature and don't think it is useful in production,
7886     * but it may actually be usable, should we ever want to.
7887     */
7888    assert((flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET) == 0);
7889 
7890    if (flags & (PIPE_CONTROL_MEDIA_STATE_CLEAR |
7891                 PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE)) {
7892       /* Project: All / Arguments:
7893        *
7894        * - Generic Media State Clear [16]
7895        * - Indirect State Pointers Disable [16]
7896        *
7897        *    "Requires stall bit ([20] of DW1) set."
7898        *
7899        * Also, the PIPE_CONTROL instruction table, bit 16 (Generic Media
7900        * State Clear) says:
7901        *
7902        *    "PIPECONTROL command with “Command Streamer Stall Enable” must be
7903        *     programmed prior to programming a PIPECONTROL command with "Media
7904        *     State Clear" set in GPGPU mode of operation"
7905        *
7906        * This is a subset of the earlier rule, so there's nothing to do.
7907        */
7908       flags |= PIPE_CONTROL_CS_STALL;
7909    }
7910 
7911    if (flags & PIPE_CONTROL_STORE_DATA_INDEX) {
7912       /* Project: All / Argument: Store Data Index
7913        *
7914        * "Post-Sync Operation ([15:14] of DW1) must be set to something other
7915        *  than '0'."
7916        *
7917        * For now, we just assert that the caller does this.  We might want to
7918        * automatically add a write to the workaround BO...
7919        */
7920       assert(non_lri_post_sync_flags != 0);
7921    }
7922 
7923    if (flags & PIPE_CONTROL_SYNC_GFDT) {
7924       /* Project: All / Argument: Sync GFDT
7925        *
7926        * "Post-Sync Operation ([15:14] of DW1) must be set to something other
7927        *  than '0' or 0x2520[13] must be set."
7928        *
7929        * For now, we just assert that the caller does this.
7930        */
7931       assert(non_lri_post_sync_flags != 0);
7932    }
7933 
7934    if (flags & PIPE_CONTROL_TLB_INVALIDATE) {
7935       /* Project: IVB+ / Argument: TLB inv
7936        *
7937        *    "Requires stall bit ([20] of DW1) set."
7938        *
7939        * Also, from the PIPE_CONTROL instruction table:
7940        *
7941        *    "Project: SKL+
7942        *     Post Sync Operation or CS stall must be set to ensure a TLB
7943        *     invalidation occurs.  Otherwise no cycle will occur to the TLB
7944        *     cache to invalidate."
7945        *
7946        * This is not a subset of the earlier rule, so there's nothing to do.
7947        */
7948       flags |= PIPE_CONTROL_CS_STALL;
7949    }
7950 
7951    if (GFX_VER == 9 && devinfo->gt == 4) {
7952       /* TODO: The big Skylake GT4 post sync op workaround */
7953    }
7954 
7955    /* "GPGPU specific workarounds" (both post-sync and flush) ------------ */
7956 
7957    if (IS_COMPUTE_PIPELINE(batch)) {
7958       if (GFX_VER >= 9 && (flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE)) {
7959          /* Project: SKL+ / Argument: Tex Invalidate
7960           * "Requires stall bit ([20] of DW) set for all GPGPU Workloads."
7961           */
7962          flags |= PIPE_CONTROL_CS_STALL;
7963       }
7964 
7965       if (GFX_VER == 8 && (post_sync_flags ||
7966                            (flags & (PIPE_CONTROL_NOTIFY_ENABLE |
7967                                      PIPE_CONTROL_DEPTH_STALL |
7968                                      PIPE_CONTROL_RENDER_TARGET_FLUSH |
7969                                      PIPE_CONTROL_DEPTH_CACHE_FLUSH |
7970                                      PIPE_CONTROL_DATA_CACHE_FLUSH)))) {
7971          /* Project: BDW / Arguments:
7972           *
7973           * - LRI Post Sync Operation   [23]
7974           * - Post Sync Op              [15:14]
7975           * - Notify En                 [8]
7976           * - Depth Stall               [13]
7977           * - Render Target Cache Flush [12]
7978           * - Depth Cache Flush         [0]
7979           * - DC Flush Enable           [5]
7980           *
7981           *    "Requires stall bit ([20] of DW) set for all GPGPU and Media
7982           *     Workloads."
7983           */
7984          flags |= PIPE_CONTROL_CS_STALL;
7985 
7986          /* Also, from the PIPE_CONTROL instruction table, bit 20:
7987           *
7988           *    "Project: BDW
7989           *     This bit must be always set when PIPE_CONTROL command is
7990           *     programmed by GPGPU and MEDIA workloads, except for the cases
7991           *     when only Read Only Cache Invalidation bits are set (State
7992           *     Cache Invalidation Enable, Instruction cache Invalidation
7993           *     Enable, Texture Cache Invalidation Enable, Constant Cache
7994           *     Invalidation Enable). This is to WA FFDOP CG issue, this WA
7995           *     need not implemented when FF_DOP_CG is disable via "Fixed
7996           *     Function DOP Clock Gate Disable" bit in RC_PSMI_CTRL register."
7997           *
7998           * It sounds like we could avoid CS stalls in some cases, but we
7999           * don't currently bother.  This list isn't exactly the list above,
8000           * either...
8001           */
8002       }
8003    }
8004 
8005    /* "Stall" workarounds ----------------------------------------------
8006     * These have to come after the earlier ones because we may have added
8007     * some additional CS stalls above.
8008     */
8009 
8010    if (GFX_VER < 9 && (flags & PIPE_CONTROL_CS_STALL)) {
8011       /* Project: PRE-SKL, VLV, CHV
8012        *
8013        * "[All Stepping][All SKUs]:
8014        *
8015        *  One of the following must also be set:
8016        *
8017        *  - Render Target Cache Flush Enable ([12] of DW1)
8018        *  - Depth Cache Flush Enable ([0] of DW1)
8019        *  - Stall at Pixel Scoreboard ([1] of DW1)
8020        *  - Depth Stall ([13] of DW1)
8021        *  - Post-Sync Operation ([13] of DW1)
8022        *  - DC Flush Enable ([5] of DW1)"
8023        *
8024        * If we don't already have one of those bits set, we choose to add
8025        * "Stall at Pixel Scoreboard".  Some of the other bits require a
8026        * CS stall as a workaround (see above), which would send us into
8027        * an infinite recursion of PIPE_CONTROLs.  "Stall at Pixel Scoreboard"
8028        * appears to be safe, so we choose that.
8029        */
8030       const uint32_t wa_bits = PIPE_CONTROL_RENDER_TARGET_FLUSH |
8031                                PIPE_CONTROL_DEPTH_CACHE_FLUSH |
8032                                PIPE_CONTROL_WRITE_IMMEDIATE |
8033                                PIPE_CONTROL_WRITE_DEPTH_COUNT |
8034                                PIPE_CONTROL_WRITE_TIMESTAMP |
8035                                PIPE_CONTROL_STALL_AT_SCOREBOARD |
8036                                PIPE_CONTROL_DEPTH_STALL |
8037                                PIPE_CONTROL_DATA_CACHE_FLUSH;
8038       if (!(flags & wa_bits))
8039          flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
8040    }
8041 
8042    if (GFX_VER >= 12 && (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH)) {
8043       /* Wa_1409600907:
8044        *
8045        * "PIPE_CONTROL with Depth Stall Enable bit must be set
8046        * with any PIPE_CONTROL with Depth Flush Enable bit set.
8047        */
8048       flags |= PIPE_CONTROL_DEPTH_STALL;
8049    }
8050 
8051    /* Emit --------------------------------------------------------------- */
8052 
8053    if (INTEL_DEBUG(DEBUG_PIPE_CONTROL)) {
8054       fprintf(stderr,
8055               "  PC [%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%"PRIx64"]: %s\n",
8056               (flags & PIPE_CONTROL_FLUSH_ENABLE) ? "PipeCon " : "",
8057               (flags & PIPE_CONTROL_CS_STALL) ? "CS " : "",
8058               (flags & PIPE_CONTROL_STALL_AT_SCOREBOARD) ? "Scoreboard " : "",
8059               (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) ? "VF " : "",
8060               (flags & PIPE_CONTROL_RENDER_TARGET_FLUSH) ? "RT " : "",
8061               (flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE) ? "Const " : "",
8062               (flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE) ? "TC " : "",
8063               (flags & PIPE_CONTROL_DATA_CACHE_FLUSH) ? "DC " : "",
8064               (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH) ? "ZFlush " : "",
8065               (flags & PIPE_CONTROL_TILE_CACHE_FLUSH) ? "Tile " : "",
8066               (flags & PIPE_CONTROL_DEPTH_STALL) ? "ZStall " : "",
8067               (flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE) ? "State " : "",
8068               (flags & PIPE_CONTROL_TLB_INVALIDATE) ? "TLB " : "",
8069               (flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE) ? "Inst " : "",
8070               (flags & PIPE_CONTROL_MEDIA_STATE_CLEAR) ? "MediaClear " : "",
8071               (flags & PIPE_CONTROL_NOTIFY_ENABLE) ? "Notify " : "",
8072               (flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET) ?
8073                  "SnapRes" : "",
8074               (flags & PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE) ?
8075                   "ISPDis" : "",
8076               (flags & PIPE_CONTROL_WRITE_IMMEDIATE) ? "WriteImm " : "",
8077               (flags & PIPE_CONTROL_WRITE_DEPTH_COUNT) ? "WriteZCount " : "",
8078               (flags & PIPE_CONTROL_WRITE_TIMESTAMP) ? "WriteTimestamp " : "",
8079               (flags & PIPE_CONTROL_FLUSH_HDC) ? "HDC " : "",
8080               (flags & PIPE_CONTROL_PSS_STALL_SYNC) ? "PSS " : "",
8081               imm, reason);
8082    }
8083 
8084    batch_mark_sync_for_pipe_control(batch, flags);
8085    iris_batch_sync_region_start(batch);
8086 
8087    const bool trace_pc =
8088       (flags & (PIPE_CONTROL_CACHE_FLUSH_BITS | PIPE_CONTROL_CACHE_INVALIDATE_BITS)) != 0;
8089 
8090    if (trace_pc)
8091       trace_intel_begin_stall(&batch->trace, batch);
8092 
8093    iris_emit_cmd(batch, GENX(PIPE_CONTROL), pc) {
8094 #if GFX_VERx10 >= 125
8095       pc.PSSStallSyncEnable = flags & PIPE_CONTROL_PSS_STALL_SYNC;
8096 #endif
8097 #if GFX_VER >= 12
8098       pc.TileCacheFlushEnable = flags & PIPE_CONTROL_TILE_CACHE_FLUSH;
8099 #endif
8100 #if GFX_VER >= 11
8101       pc.HDCPipelineFlushEnable = flags & PIPE_CONTROL_FLUSH_HDC;
8102 #endif
8103       pc.LRIPostSyncOperation = NoLRIOperation;
8104       pc.PipeControlFlushEnable = flags & PIPE_CONTROL_FLUSH_ENABLE;
8105       pc.DCFlushEnable = flags & PIPE_CONTROL_DATA_CACHE_FLUSH;
8106       pc.StoreDataIndex = 0;
8107       pc.CommandStreamerStallEnable = flags & PIPE_CONTROL_CS_STALL;
8108       pc.GlobalSnapshotCountReset =
8109          flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET;
8110       pc.TLBInvalidate = flags & PIPE_CONTROL_TLB_INVALIDATE;
8111       pc.GenericMediaStateClear = flags & PIPE_CONTROL_MEDIA_STATE_CLEAR;
8112       pc.StallAtPixelScoreboard = flags & PIPE_CONTROL_STALL_AT_SCOREBOARD;
8113       pc.RenderTargetCacheFlushEnable =
8114          flags & PIPE_CONTROL_RENDER_TARGET_FLUSH;
8115       pc.DepthCacheFlushEnable = flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH;
8116       pc.StateCacheInvalidationEnable =
8117          flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE;
8118 #if GFX_VER >= 12
8119       /* Invalidates the L3 cache part in which index & vertex data is loaded
8120        * when VERTEX_BUFFER_STATE::L3BypassDisable is set.
8121        */
8122       pc.L3ReadOnlyCacheInvalidationEnable =
8123          flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
8124 #endif
8125       pc.VFCacheInvalidationEnable = flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
8126       pc.ConstantCacheInvalidationEnable =
8127          flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE;
8128       pc.PostSyncOperation = flags_to_post_sync_op(flags);
8129       pc.DepthStallEnable = flags & PIPE_CONTROL_DEPTH_STALL;
8130       pc.InstructionCacheInvalidateEnable =
8131          flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE;
8132       pc.NotifyEnable = flags & PIPE_CONTROL_NOTIFY_ENABLE;
8133       pc.IndirectStatePointersDisable =
8134          flags & PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE;
8135       pc.TextureCacheInvalidationEnable =
8136          flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
8137       pc.Address = rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE);
8138       pc.ImmediateData = imm;
8139    }
8140 
8141    if (trace_pc) {
8142       trace_intel_end_stall(&batch->trace, batch, flags,
8143                             iris_utrace_pipe_flush_bit_to_ds_stall_flag,
8144                             reason);
8145    }
8146 
8147    iris_batch_sync_region_end(batch);
8148 }
8149 
8150 #if GFX_VER == 9
8151 /**
8152  * Preemption on Gfx9 has to be enabled or disabled in various cases.
8153  *
8154  * See these workarounds for preemption:
8155  *  - WaDisableMidObjectPreemptionForGSLineStripAdj
8156  *  - WaDisableMidObjectPreemptionForTrifanOrPolygon
8157  *  - WaDisableMidObjectPreemptionForLineLoop
8158  *  - WA#0798
8159  *
8160  * We don't put this in the vtable because it's only used on Gfx9.
8161  */
8162 void
gfx9_toggle_preemption(struct iris_context * ice,struct iris_batch * batch,const struct pipe_draw_info * draw)8163 gfx9_toggle_preemption(struct iris_context *ice,
8164                        struct iris_batch *batch,
8165                        const struct pipe_draw_info *draw)
8166 {
8167    struct iris_genx_state *genx = ice->state.genx;
8168    bool object_preemption = true;
8169 
8170    /* WaDisableMidObjectPreemptionForGSLineStripAdj
8171     *
8172     *    "WA: Disable mid-draw preemption when draw-call is a linestrip_adj
8173     *     and GS is enabled."
8174     */
8175    if (draw->mode == PIPE_PRIM_LINE_STRIP_ADJACENCY &&
8176        ice->shaders.prog[MESA_SHADER_GEOMETRY])
8177       object_preemption = false;
8178 
8179    /* WaDisableMidObjectPreemptionForTrifanOrPolygon
8180     *
8181     *    "TriFan miscompare in Execlist Preemption test. Cut index that is
8182     *     on a previous context. End the previous, the resume another context
8183     *     with a tri-fan or polygon, and the vertex count is corrupted. If we
8184     *     prempt again we will cause corruption.
8185     *
8186     *     WA: Disable mid-draw preemption when draw-call has a tri-fan."
8187     */
8188    if (draw->mode == PIPE_PRIM_TRIANGLE_FAN)
8189       object_preemption = false;
8190 
8191    /* WaDisableMidObjectPreemptionForLineLoop
8192     *
8193     *    "VF Stats Counters Missing a vertex when preemption enabled.
8194     *
8195     *     WA: Disable mid-draw preemption when the draw uses a lineloop
8196     *     topology."
8197     */
8198    if (draw->mode == PIPE_PRIM_LINE_LOOP)
8199       object_preemption = false;
8200 
8201    /* WA#0798
8202     *
8203     *    "VF is corrupting GAFS data when preempted on an instance boundary
8204     *     and replayed with instancing enabled.
8205     *
8206     *     WA: Disable preemption when using instanceing."
8207     */
8208    if (draw->instance_count > 1)
8209       object_preemption = false;
8210 
8211    if (genx->object_preemption != object_preemption) {
8212       iris_enable_obj_preemption(batch, object_preemption);
8213       genx->object_preemption = object_preemption;
8214    }
8215 }
8216 #endif
8217 
8218 static void
iris_lost_genx_state(struct iris_context * ice,struct iris_batch * batch)8219 iris_lost_genx_state(struct iris_context *ice, struct iris_batch *batch)
8220 {
8221    struct iris_genx_state *genx = ice->state.genx;
8222 
8223 #if GFX_VERx10 == 120
8224    genx->depth_reg_mode = IRIS_DEPTH_REG_MODE_UNKNOWN;
8225 #endif
8226 
8227    memset(genx->last_index_buffer, 0, sizeof(genx->last_index_buffer));
8228 }
8229 
8230 static void
iris_emit_mi_report_perf_count(struct iris_batch * batch,struct iris_bo * bo,uint32_t offset_in_bytes,uint32_t report_id)8231 iris_emit_mi_report_perf_count(struct iris_batch *batch,
8232                                struct iris_bo *bo,
8233                                uint32_t offset_in_bytes,
8234                                uint32_t report_id)
8235 {
8236    iris_batch_sync_region_start(batch);
8237    iris_emit_cmd(batch, GENX(MI_REPORT_PERF_COUNT), mi_rpc) {
8238       mi_rpc.MemoryAddress = rw_bo(bo, offset_in_bytes,
8239                                    IRIS_DOMAIN_OTHER_WRITE);
8240       mi_rpc.ReportID = report_id;
8241    }
8242    iris_batch_sync_region_end(batch);
8243 }
8244 
8245 /**
8246  * Update the pixel hashing modes that determine the balancing of PS threads
8247  * across subslices and slices.
8248  *
8249  * \param width Width bound of the rendering area (already scaled down if \p
8250  *              scale is greater than 1).
8251  * \param height Height bound of the rendering area (already scaled down if \p
8252  *               scale is greater than 1).
8253  * \param scale The number of framebuffer samples that could potentially be
8254  *              affected by an individual channel of the PS thread.  This is
8255  *              typically one for single-sampled rendering, but for operations
8256  *              like CCS resolves and fast clears a single PS invocation may
8257  *              update a huge number of pixels, in which case a finer
8258  *              balancing is desirable in order to maximally utilize the
8259  *              bandwidth available.  UINT_MAX can be used as shorthand for
8260  *              "finest hashing mode available".
8261  */
8262 void
genX(emit_hashing_mode)8263 genX(emit_hashing_mode)(struct iris_context *ice, struct iris_batch *batch,
8264                         unsigned width, unsigned height, unsigned scale)
8265 {
8266 #if GFX_VER == 9
8267    const struct intel_device_info *devinfo = &batch->screen->devinfo;
8268    const unsigned slice_hashing[] = {
8269       /* Because all Gfx9 platforms with more than one slice require
8270        * three-way subslice hashing, a single "normal" 16x16 slice hashing
8271        * block is guaranteed to suffer from substantial imbalance, with one
8272        * subslice receiving twice as much work as the other two in the
8273        * slice.
8274        *
8275        * The performance impact of that would be particularly severe when
8276        * three-way hashing is also in use for slice balancing (which is the
8277        * case for all Gfx9 GT4 platforms), because one of the slices
8278        * receives one every three 16x16 blocks in either direction, which
8279        * is roughly the periodicity of the underlying subslice imbalance
8280        * pattern ("roughly" because in reality the hardware's
8281        * implementation of three-way hashing doesn't do exact modulo 3
8282        * arithmetic, which somewhat decreases the magnitude of this effect
8283        * in practice).  This leads to a systematic subslice imbalance
8284        * within that slice regardless of the size of the primitive.  The
8285        * 32x32 hashing mode guarantees that the subslice imbalance within a
8286        * single slice hashing block is minimal, largely eliminating this
8287        * effect.
8288        */
8289       _32x32,
8290       /* Finest slice hashing mode available. */
8291       NORMAL
8292    };
8293    const unsigned subslice_hashing[] = {
8294       /* 16x16 would provide a slight cache locality benefit especially
8295        * visible in the sampler L1 cache efficiency of low-bandwidth
8296        * non-LLC platforms, but it comes at the cost of greater subslice
8297        * imbalance for primitives of dimensions approximately intermediate
8298        * between 16x4 and 16x16.
8299        */
8300       _16x4,
8301       /* Finest subslice hashing mode available. */
8302       _8x4
8303    };
8304    /* Dimensions of the smallest hashing block of a given hashing mode.  If
8305     * the rendering area is smaller than this there can't possibly be any
8306     * benefit from switching to this mode, so we optimize out the
8307     * transition.
8308     */
8309    const unsigned min_size[][2] = {
8310       { 16, 4 },
8311       { 8, 4 }
8312    };
8313    const unsigned idx = scale > 1;
8314 
8315    if (width > min_size[idx][0] || height > min_size[idx][1]) {
8316       iris_emit_raw_pipe_control(batch,
8317                                  "workaround: CS stall before GT_MODE LRI",
8318                                  PIPE_CONTROL_STALL_AT_SCOREBOARD |
8319                                  PIPE_CONTROL_CS_STALL,
8320                                  NULL, 0, 0);
8321 
8322       iris_emit_reg(batch, GENX(GT_MODE), reg) {
8323          reg.SliceHashing = (devinfo->num_slices > 1 ? slice_hashing[idx] : 0);
8324          reg.SliceHashingMask = (devinfo->num_slices > 1 ? -1 : 0);
8325          reg.SubsliceHashing = subslice_hashing[idx];
8326          reg.SubsliceHashingMask = -1;
8327       };
8328 
8329       ice->state.current_hash_scale = scale;
8330    }
8331 #endif
8332 }
8333 
8334 static void
iris_set_frontend_noop(struct pipe_context * ctx,bool enable)8335 iris_set_frontend_noop(struct pipe_context *ctx, bool enable)
8336 {
8337    struct iris_context *ice = (struct iris_context *) ctx;
8338 
8339    if (iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_RENDER], enable)) {
8340       ice->state.dirty |= IRIS_ALL_DIRTY_FOR_RENDER;
8341       ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_FOR_RENDER;
8342    }
8343 
8344    if (iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_COMPUTE], enable)) {
8345       ice->state.dirty |= IRIS_ALL_DIRTY_FOR_COMPUTE;
8346       ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE;
8347    }
8348 }
8349 
8350 void
genX(init_screen_state)8351 genX(init_screen_state)(struct iris_screen *screen)
8352 {
8353    assert(screen->devinfo.verx10 == GFX_VERx10);
8354    screen->vtbl.destroy_state = iris_destroy_state;
8355    screen->vtbl.init_render_context = iris_init_render_context;
8356    screen->vtbl.init_compute_context = iris_init_compute_context;
8357    screen->vtbl.upload_render_state = iris_upload_render_state;
8358    screen->vtbl.update_binder_address = iris_update_binder_address;
8359    screen->vtbl.upload_compute_state = iris_upload_compute_state;
8360    screen->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
8361    screen->vtbl.emit_mi_report_perf_count = iris_emit_mi_report_perf_count;
8362    screen->vtbl.rebind_buffer = iris_rebind_buffer;
8363    screen->vtbl.load_register_reg32 = iris_load_register_reg32;
8364    screen->vtbl.load_register_reg64 = iris_load_register_reg64;
8365    screen->vtbl.load_register_imm32 = iris_load_register_imm32;
8366    screen->vtbl.load_register_imm64 = iris_load_register_imm64;
8367    screen->vtbl.load_register_mem32 = iris_load_register_mem32;
8368    screen->vtbl.load_register_mem64 = iris_load_register_mem64;
8369    screen->vtbl.store_register_mem32 = iris_store_register_mem32;
8370    screen->vtbl.store_register_mem64 = iris_store_register_mem64;
8371    screen->vtbl.store_data_imm32 = iris_store_data_imm32;
8372    screen->vtbl.store_data_imm64 = iris_store_data_imm64;
8373    screen->vtbl.copy_mem_mem = iris_copy_mem_mem;
8374    screen->vtbl.derived_program_state_size = iris_derived_program_state_size;
8375    screen->vtbl.store_derived_program_state = iris_store_derived_program_state;
8376    screen->vtbl.create_so_decl_list = iris_create_so_decl_list;
8377    screen->vtbl.populate_vs_key = iris_populate_vs_key;
8378    screen->vtbl.populate_tcs_key = iris_populate_tcs_key;
8379    screen->vtbl.populate_tes_key = iris_populate_tes_key;
8380    screen->vtbl.populate_gs_key = iris_populate_gs_key;
8381    screen->vtbl.populate_fs_key = iris_populate_fs_key;
8382    screen->vtbl.populate_cs_key = iris_populate_cs_key;
8383    screen->vtbl.lost_genx_state = iris_lost_genx_state;
8384 }
8385 
8386 void
genX(init_state)8387 genX(init_state)(struct iris_context *ice)
8388 {
8389    struct pipe_context *ctx = &ice->ctx;
8390    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
8391 
8392    ctx->create_blend_state = iris_create_blend_state;
8393    ctx->create_depth_stencil_alpha_state = iris_create_zsa_state;
8394    ctx->create_rasterizer_state = iris_create_rasterizer_state;
8395    ctx->create_sampler_state = iris_create_sampler_state;
8396    ctx->create_sampler_view = iris_create_sampler_view;
8397    ctx->create_surface = iris_create_surface;
8398    ctx->create_vertex_elements_state = iris_create_vertex_elements;
8399    ctx->bind_blend_state = iris_bind_blend_state;
8400    ctx->bind_depth_stencil_alpha_state = iris_bind_zsa_state;
8401    ctx->bind_sampler_states = iris_bind_sampler_states;
8402    ctx->bind_rasterizer_state = iris_bind_rasterizer_state;
8403    ctx->bind_vertex_elements_state = iris_bind_vertex_elements_state;
8404    ctx->delete_blend_state = iris_delete_state;
8405    ctx->delete_depth_stencil_alpha_state = iris_delete_state;
8406    ctx->delete_rasterizer_state = iris_delete_state;
8407    ctx->delete_sampler_state = iris_delete_state;
8408    ctx->delete_vertex_elements_state = iris_delete_state;
8409    ctx->set_blend_color = iris_set_blend_color;
8410    ctx->set_clip_state = iris_set_clip_state;
8411    ctx->set_constant_buffer = iris_set_constant_buffer;
8412    ctx->set_shader_buffers = iris_set_shader_buffers;
8413    ctx->set_shader_images = iris_set_shader_images;
8414    ctx->set_sampler_views = iris_set_sampler_views;
8415    ctx->set_compute_resources = iris_set_compute_resources;
8416    ctx->set_global_binding = iris_set_global_binding;
8417    ctx->set_tess_state = iris_set_tess_state;
8418    ctx->set_patch_vertices = iris_set_patch_vertices;
8419    ctx->set_framebuffer_state = iris_set_framebuffer_state;
8420    ctx->set_polygon_stipple = iris_set_polygon_stipple;
8421    ctx->set_sample_mask = iris_set_sample_mask;
8422    ctx->set_scissor_states = iris_set_scissor_states;
8423    ctx->set_stencil_ref = iris_set_stencil_ref;
8424    ctx->set_vertex_buffers = iris_set_vertex_buffers;
8425    ctx->set_viewport_states = iris_set_viewport_states;
8426    ctx->sampler_view_destroy = iris_sampler_view_destroy;
8427    ctx->surface_destroy = iris_surface_destroy;
8428    ctx->draw_vbo = iris_draw_vbo;
8429    ctx->launch_grid = iris_launch_grid;
8430    ctx->create_stream_output_target = iris_create_stream_output_target;
8431    ctx->stream_output_target_destroy = iris_stream_output_target_destroy;
8432    ctx->set_stream_output_targets = iris_set_stream_output_targets;
8433    ctx->set_frontend_noop = iris_set_frontend_noop;
8434 
8435    ice->state.dirty = ~0ull;
8436    ice->state.stage_dirty = ~0ull;
8437 
8438    ice->state.statistics_counters_enabled = true;
8439 
8440    ice->state.sample_mask = 0xffff;
8441    ice->state.num_viewports = 1;
8442    ice->state.prim_mode = PIPE_PRIM_MAX;
8443    ice->state.genx = calloc(1, sizeof(struct iris_genx_state));
8444    ice->draw.derived_params.drawid = -1;
8445 
8446    /* Make a 1x1x1 null surface for unbound textures */
8447    void *null_surf_map =
8448       upload_state(ice->state.surface_uploader, &ice->state.unbound_tex,
8449                    4 * GENX(RENDER_SURFACE_STATE_length), 64);
8450    isl_null_fill_state(&screen->isl_dev, null_surf_map,
8451                        .size = isl_extent3d(1, 1, 1));
8452    ice->state.unbound_tex.offset +=
8453       iris_bo_offset_from_base_address(iris_resource_bo(ice->state.unbound_tex.res));
8454 
8455    /* Default all scissor rectangles to be empty regions. */
8456    for (int i = 0; i < IRIS_MAX_VIEWPORTS; i++) {
8457       ice->state.scissors[i] = (struct pipe_scissor_state) {
8458          .minx = 1, .maxx = 0, .miny = 1, .maxy = 0,
8459       };
8460    }
8461 }
8462