1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef ANV_PRIVATE_H
25 #define ANV_PRIVATE_H
26
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdbool.h>
30 #include <pthread.h>
31 #include <assert.h>
32 #include <stdint.h>
33 #include "drm-uapi/i915_drm.h"
34 #include "drm-uapi/drm_fourcc.h"
35
36 #ifdef HAVE_VALGRIND
37 #include <valgrind.h>
38 #include <memcheck.h>
39 #define VG(x) x
40 #ifndef NDEBUG
41 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
42 #endif
43 #else
44 #define VG(x) ((void)0)
45 #endif
46
47 #include "common/intel_clflush.h"
48 #include "common/intel_decoder.h"
49 #include "common/intel_gem.h"
50 #include "common/intel_l3_config.h"
51 #include "common/intel_measure.h"
52 #include "dev/intel_device_info.h"
53 #include "blorp/blorp.h"
54 #include "compiler/brw_compiler.h"
55 #include "compiler/brw_rt.h"
56 #include "ds/intel_driver_ds.h"
57 #include "util/bitset.h"
58 #include "util/bitscan.h"
59 #include "util/macros.h"
60 #include "util/hash_table.h"
61 #include "util/list.h"
62 #include "util/perf/u_trace.h"
63 #include "util/sparse_array.h"
64 #include "util/u_atomic.h"
65 #include "util/u_vector.h"
66 #include "util/u_math.h"
67 #include "util/vma.h"
68 #include "util/xmlconfig.h"
69 #include "vk_alloc.h"
70 #include "vk_command_buffer.h"
71 #include "vk_command_pool.h"
72 #include "vk_debug_report.h"
73 #include "vk_device.h"
74 #include "vk_drm_syncobj.h"
75 #include "vk_enum_defines.h"
76 #include "vk_framebuffer.h"
77 #include "vk_image.h"
78 #include "vk_instance.h"
79 #include "vk_physical_device.h"
80 #include "vk_shader_module.h"
81 #include "vk_sync.h"
82 #include "vk_sync_timeline.h"
83 #include "vk_util.h"
84 #include "vk_queue.h"
85 #include "vk_log.h"
86
87 /* Pre-declarations needed for WSI entrypoints */
88 struct wl_surface;
89 struct wl_display;
90 typedef struct xcb_connection_t xcb_connection_t;
91 typedef uint32_t xcb_visualid_t;
92 typedef uint32_t xcb_window_t;
93
94 struct anv_batch;
95 struct anv_buffer;
96 struct anv_buffer_view;
97 struct anv_image_view;
98 struct anv_acceleration_structure;
99 struct anv_instance;
100
101 struct intel_aux_map_context;
102 struct intel_perf_config;
103 struct intel_perf_counter_pass;
104 struct intel_perf_query_result;
105
106 #include <vulkan/vulkan.h>
107 #include <vulkan/vk_icd.h>
108
109 #include "anv_android.h"
110 #include "anv_entrypoints.h"
111 #include "isl/isl.h"
112
113 #include "dev/intel_debug.h"
114 #undef MESA_LOG_TAG
115 #define MESA_LOG_TAG "MESA-INTEL"
116 #include "util/log.h"
117 #include "wsi_common.h"
118
119 #define NSEC_PER_SEC 1000000000ull
120
121 /* anv Virtual Memory Layout
122 * =========================
123 *
124 * When the anv driver is determining the virtual graphics addresses of memory
125 * objects itself using the softpin mechanism, the following memory ranges
126 * will be used.
127 *
128 * Three special considerations to notice:
129 *
130 * (1) the dynamic state pool is located within the same 4 GiB as the low
131 * heap. This is to work around a VF cache issue described in a comment in
132 * anv_physical_device_init_heaps.
133 *
134 * (2) the binding table pool is located at lower addresses than the surface
135 * state pool, within a 4 GiB range. This allows surface state base addresses
136 * to cover both binding tables (16 bit offsets) and surface states (32 bit
137 * offsets).
138 *
139 * (3) the last 4 GiB of the address space is withheld from the high
140 * heap. Various hardware units will read past the end of an object for
141 * various reasons. This healthy margin prevents reads from wrapping around
142 * 48-bit addresses.
143 */
144 #define GENERAL_STATE_POOL_MIN_ADDRESS 0x000000200000ULL /* 2 MiB */
145 #define GENERAL_STATE_POOL_MAX_ADDRESS 0x00003fffffffULL
146 #define LOW_HEAP_MIN_ADDRESS 0x000040000000ULL /* 1 GiB */
147 #define LOW_HEAP_MAX_ADDRESS 0x00007fffffffULL
148 #define DYNAMIC_STATE_POOL_MIN_ADDRESS 0x0000c0000000ULL /* 3 GiB */
149 #define DYNAMIC_STATE_POOL_MAX_ADDRESS 0x0000ffffffffULL
150 #define BINDING_TABLE_POOL_MIN_ADDRESS 0x000100000000ULL /* 4 GiB */
151 #define BINDING_TABLE_POOL_MAX_ADDRESS 0x00013fffffffULL
152 #define SURFACE_STATE_POOL_MIN_ADDRESS 0x000140000000ULL /* 5 GiB */
153 #define SURFACE_STATE_POOL_MAX_ADDRESS 0x00017fffffffULL
154 #define INSTRUCTION_STATE_POOL_MIN_ADDRESS 0x000180000000ULL /* 6 GiB */
155 #define INSTRUCTION_STATE_POOL_MAX_ADDRESS 0x0001bfffffffULL
156 #define CLIENT_VISIBLE_HEAP_MIN_ADDRESS 0x0001c0000000ULL /* 7 GiB */
157 #define CLIENT_VISIBLE_HEAP_MAX_ADDRESS 0x0002bfffffffULL
158 #define HIGH_HEAP_MIN_ADDRESS 0x0002c0000000ULL /* 11 GiB */
159
160 #define GENERAL_STATE_POOL_SIZE \
161 (GENERAL_STATE_POOL_MAX_ADDRESS - GENERAL_STATE_POOL_MIN_ADDRESS + 1)
162 #define LOW_HEAP_SIZE \
163 (LOW_HEAP_MAX_ADDRESS - LOW_HEAP_MIN_ADDRESS + 1)
164 #define DYNAMIC_STATE_POOL_SIZE \
165 (DYNAMIC_STATE_POOL_MAX_ADDRESS - DYNAMIC_STATE_POOL_MIN_ADDRESS + 1)
166 #define BINDING_TABLE_POOL_SIZE \
167 (BINDING_TABLE_POOL_MAX_ADDRESS - BINDING_TABLE_POOL_MIN_ADDRESS + 1)
168 #define BINDING_TABLE_POOL_BLOCK_SIZE (65536)
169 #define SURFACE_STATE_POOL_SIZE \
170 (SURFACE_STATE_POOL_MAX_ADDRESS - SURFACE_STATE_POOL_MIN_ADDRESS + 1)
171 #define INSTRUCTION_STATE_POOL_SIZE \
172 (INSTRUCTION_STATE_POOL_MAX_ADDRESS - INSTRUCTION_STATE_POOL_MIN_ADDRESS + 1)
173 #define CLIENT_VISIBLE_HEAP_SIZE \
174 (CLIENT_VISIBLE_HEAP_MAX_ADDRESS - CLIENT_VISIBLE_HEAP_MIN_ADDRESS + 1)
175
176 /* Allowing different clear colors requires us to perform a depth resolve at
177 * the end of certain render passes. This is because while slow clears store
178 * the clear color in the HiZ buffer, fast clears (without a resolve) don't.
179 * See the PRMs for examples describing when additional resolves would be
180 * necessary. To enable fast clears without requiring extra resolves, we set
181 * the clear value to a globally-defined one. We could allow different values
182 * if the user doesn't expect coherent data during or after a render passes
183 * (VK_ATTACHMENT_STORE_OP_DONT_CARE), but such users (aside from the CTS)
184 * don't seem to exist yet. In almost all Vulkan applications tested thus far,
185 * 1.0f seems to be the only value used. The only application that doesn't set
186 * this value does so through the usage of an seemingly uninitialized clear
187 * value.
188 */
189 #define ANV_HZ_FC_VAL 1.0f
190
191 /* 3DSTATE_VERTEX_BUFFER supports 33 VBs, we use 2 for base & drawid SGVs */
192 #define MAX_VBS (33 - 2)
193
194 /* 3DSTATE_VERTEX_ELEMENTS supports up to 34 VEs, but our backend compiler
195 * only supports the push model of VS inputs, and we only have 128 GRFs,
196 * minus the g0 and g1 payload, which gives us a maximum of 31 VEs. Plus,
197 * we use two of them for SGVs.
198 */
199 #define MAX_VES (31 - 2)
200
201 #define MAX_XFB_BUFFERS 4
202 #define MAX_XFB_STREAMS 4
203 #define MAX_SETS 32
204 #define MAX_RTS 8
205 #define MAX_VIEWPORTS 16
206 #define MAX_SCISSORS 16
207 #define MAX_PUSH_CONSTANTS_SIZE 128
208 #define MAX_DYNAMIC_BUFFERS 16
209 #define MAX_IMAGES 64
210 #define MAX_PUSH_DESCRIPTORS 32 /* Minimum requirement */
211 #define MAX_INLINE_UNIFORM_BLOCK_SIZE 4096
212 #define MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS 32
213 /* We need 16 for UBO block reads to work and 32 for push UBOs. However, we
214 * use 64 here to avoid cache issues. This could most likely bring it back to
215 * 32 if we had different virtual addresses for the different views on a given
216 * GEM object.
217 */
218 #define ANV_UBO_ALIGNMENT 64
219 #define ANV_SSBO_ALIGNMENT 4
220 #define ANV_SSBO_BOUNDS_CHECK_ALIGNMENT 4
221 #define MAX_VIEWS_FOR_PRIMITIVE_REPLICATION 16
222 #define MAX_SAMPLE_LOCATIONS 16
223
224 /* From the Skylake PRM Vol. 7 "Binding Table Surface State Model":
225 *
226 * "The surface state model is used when a Binding Table Index (specified
227 * in the message descriptor) of less than 240 is specified. In this model,
228 * the Binding Table Index is used to index into the binding table, and the
229 * binding table entry contains a pointer to the SURFACE_STATE."
230 *
231 * Binding table values above 240 are used for various things in the hardware
232 * such as stateless, stateless with incoherent cache, SLM, and bindless.
233 */
234 #define MAX_BINDING_TABLE_SIZE 240
235
236 /* The kernel relocation API has a limitation of a 32-bit delta value
237 * applied to the address before it is written which, in spite of it being
238 * unsigned, is treated as signed . Because of the way that this maps to
239 * the Vulkan API, we cannot handle an offset into a buffer that does not
240 * fit into a signed 32 bits. The only mechanism we have for dealing with
241 * this at the moment is to limit all VkDeviceMemory objects to a maximum
242 * of 2GB each. The Vulkan spec allows us to do this:
243 *
244 * "Some platforms may have a limit on the maximum size of a single
245 * allocation. For example, certain systems may fail to create
246 * allocations with a size greater than or equal to 4GB. Such a limit is
247 * implementation-dependent, and if such a failure occurs then the error
248 * VK_ERROR_OUT_OF_DEVICE_MEMORY should be returned."
249 */
250 #define MAX_MEMORY_ALLOCATION_SIZE (1ull << 31)
251
252 #define ANV_SVGS_VB_INDEX MAX_VBS
253 #define ANV_DRAWID_VB_INDEX (MAX_VBS + 1)
254
255 /* We reserve this MI ALU register for the purpose of handling predication.
256 * Other code which uses the MI ALU should leave it alone.
257 */
258 #define ANV_PREDICATE_RESULT_REG 0x2678 /* MI_ALU_REG15 */
259
260 /* We reserve this MI ALU register to pass around an offset computed from
261 * VkPerformanceQuerySubmitInfoKHR::counterPassIndex VK_KHR_performance_query.
262 * Other code which uses the MI ALU should leave it alone.
263 */
264 #define ANV_PERF_QUERY_OFFSET_REG 0x2670 /* MI_ALU_REG14 */
265
266 #define ANV_GRAPHICS_SHADER_STAGE_COUNT (MESA_SHADER_MESH + 1)
267
268 /* For gfx12 we set the streamout buffers using 4 separate commands
269 * (3DSTATE_SO_BUFFER_INDEX_*) instead of 3DSTATE_SO_BUFFER. However the layout
270 * of the 3DSTATE_SO_BUFFER_INDEX_* commands is identical to that of
271 * 3DSTATE_SO_BUFFER apart from the SOBufferIndex field, so for now we use the
272 * 3DSTATE_SO_BUFFER command, but change the 3DCommandSubOpcode.
273 * SO_BUFFER_INDEX_0_CMD is actually the 3DCommandSubOpcode for
274 * 3DSTATE_SO_BUFFER_INDEX_0.
275 */
276 #define SO_BUFFER_INDEX_0_CMD 0x60
277 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
278
279 static inline uint32_t
align_down_npot_u32(uint32_t v,uint32_t a)280 align_down_npot_u32(uint32_t v, uint32_t a)
281 {
282 return v - (v % a);
283 }
284
285 static inline uint32_t
align_down_u32(uint32_t v,uint32_t a)286 align_down_u32(uint32_t v, uint32_t a)
287 {
288 assert(a != 0 && a == (a & -a));
289 return v & ~(a - 1);
290 }
291
292 static inline uint32_t
align_u32(uint32_t v,uint32_t a)293 align_u32(uint32_t v, uint32_t a)
294 {
295 assert(a != 0 && a == (a & -a));
296 return align_down_u32(v + a - 1, a);
297 }
298
299 static inline uint64_t
align_down_u64(uint64_t v,uint64_t a)300 align_down_u64(uint64_t v, uint64_t a)
301 {
302 assert(a != 0 && a == (a & -a));
303 return v & ~(a - 1);
304 }
305
306 static inline uint64_t
align_u64(uint64_t v,uint64_t a)307 align_u64(uint64_t v, uint64_t a)
308 {
309 return align_down_u64(v + a - 1, a);
310 }
311
312 static inline int32_t
align_i32(int32_t v,int32_t a)313 align_i32(int32_t v, int32_t a)
314 {
315 assert(a != 0 && a == (a & -a));
316 return (v + a - 1) & ~(a - 1);
317 }
318
319 /** Alignment must be a power of 2. */
320 static inline bool
anv_is_aligned(uintmax_t n,uintmax_t a)321 anv_is_aligned(uintmax_t n, uintmax_t a)
322 {
323 assert(a == (a & -a));
324 return (n & (a - 1)) == 0;
325 }
326
327 static inline uint32_t
anv_minify(uint32_t n,uint32_t levels)328 anv_minify(uint32_t n, uint32_t levels)
329 {
330 if (unlikely(n == 0))
331 return 0;
332 else
333 return MAX2(n >> levels, 1);
334 }
335
336 static inline float
anv_clamp_f(float f,float min,float max)337 anv_clamp_f(float f, float min, float max)
338 {
339 assert(min < max);
340
341 if (f > max)
342 return max;
343 else if (f < min)
344 return min;
345 else
346 return f;
347 }
348
349 static inline bool
anv_clear_mask(uint32_t * inout_mask,uint32_t clear_mask)350 anv_clear_mask(uint32_t *inout_mask, uint32_t clear_mask)
351 {
352 if (*inout_mask & clear_mask) {
353 *inout_mask &= ~clear_mask;
354 return true;
355 } else {
356 return false;
357 }
358 }
359
360 static inline union isl_color_value
vk_to_isl_color(VkClearColorValue color)361 vk_to_isl_color(VkClearColorValue color)
362 {
363 return (union isl_color_value) {
364 .u32 = {
365 color.uint32[0],
366 color.uint32[1],
367 color.uint32[2],
368 color.uint32[3],
369 },
370 };
371 }
372
373 static inline union isl_color_value
vk_to_isl_color_with_format(VkClearColorValue color,enum isl_format format)374 vk_to_isl_color_with_format(VkClearColorValue color, enum isl_format format)
375 {
376 const struct isl_format_layout *fmtl = isl_format_get_layout(format);
377 union isl_color_value isl_color = { .u32 = {0, } };
378
379 #define COPY_COLOR_CHANNEL(c, i) \
380 if (fmtl->channels.c.bits) \
381 isl_color.u32[i] = color.uint32[i]
382
383 COPY_COLOR_CHANNEL(r, 0);
384 COPY_COLOR_CHANNEL(g, 1);
385 COPY_COLOR_CHANNEL(b, 2);
386 COPY_COLOR_CHANNEL(a, 3);
387
388 #undef COPY_COLOR_CHANNEL
389
390 return isl_color;
391 }
392
anv_unpack_ptr(uintptr_t ptr,int bits,int * flags)393 static inline void *anv_unpack_ptr(uintptr_t ptr, int bits, int *flags)
394 {
395 uintptr_t mask = (1ull << bits) - 1;
396 *flags = ptr & mask;
397 return (void *) (ptr & ~mask);
398 }
399
anv_pack_ptr(void * ptr,int bits,int flags)400 static inline uintptr_t anv_pack_ptr(void *ptr, int bits, int flags)
401 {
402 uintptr_t value = (uintptr_t) ptr;
403 uintptr_t mask = (1ull << bits) - 1;
404 return value | (mask & flags);
405 }
406
407 /**
408 * Warn on ignored extension structs.
409 *
410 * The Vulkan spec requires us to ignore unsupported or unknown structs in
411 * a pNext chain. In debug mode, emitting warnings for ignored structs may
412 * help us discover structs that we should not have ignored.
413 *
414 *
415 * From the Vulkan 1.0.38 spec:
416 *
417 * Any component of the implementation (the loader, any enabled layers,
418 * and drivers) must skip over, without processing (other than reading the
419 * sType and pNext members) any chained structures with sType values not
420 * defined by extensions supported by that component.
421 */
422 #define anv_debug_ignored_stype(sType) \
423 mesa_logd("%s: ignored VkStructureType %u\n", __func__, (sType))
424
425 void __anv_perf_warn(struct anv_device *device,
426 const struct vk_object_base *object,
427 const char *file, int line, const char *format, ...)
428 anv_printflike(5, 6);
429
430 /**
431 * Print a FINISHME message, including its source location.
432 */
433 #define anv_finishme(format, ...) \
434 do { \
435 static bool reported = false; \
436 if (!reported) { \
437 mesa_logw("%s:%d: FINISHME: " format, __FILE__, __LINE__, \
438 ##__VA_ARGS__); \
439 reported = true; \
440 } \
441 } while (0)
442
443 /**
444 * Print a perf warning message. Set INTEL_DEBUG=perf to see these.
445 */
446 #define anv_perf_warn(objects_macro, format, ...) \
447 do { \
448 static bool reported = false; \
449 if (!reported && INTEL_DEBUG(DEBUG_PERF)) { \
450 __vk_log(VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT, \
451 VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT, \
452 objects_macro, __FILE__, __LINE__, \
453 format, ## __VA_ARGS__); \
454 reported = true; \
455 } \
456 } while (0)
457
458 /* A non-fatal assert. Useful for debugging. */
459 #ifdef DEBUG
460 #define anv_assert(x) ({ \
461 if (unlikely(!(x))) \
462 mesa_loge("%s:%d ASSERT: %s", __FILE__, __LINE__, #x); \
463 })
464 #else
465 #define anv_assert(x)
466 #endif
467
468 struct anv_bo {
469 const char *name;
470
471 uint32_t gem_handle;
472
473 uint32_t refcount;
474
475 /* Index into the current validation list. This is used by the
476 * validation list building alrogithm to track which buffers are already
477 * in the validation list so that we can ensure uniqueness.
478 */
479 uint32_t exec_obj_index;
480
481 /* Index for use with util_sparse_array_free_list */
482 uint32_t free_index;
483
484 /* Last known offset. This value is provided by the kernel when we
485 * execbuf and is used as the presumed offset for the next bunch of
486 * relocations.
487 */
488 uint64_t offset;
489
490 /** Size of the buffer not including implicit aux */
491 uint64_t size;
492
493 /* Map for internally mapped BOs.
494 *
495 * If ANV_BO_ALLOC_MAPPED is set in flags, this is the map for the whole
496 * BO. If ANV_BO_WRAPPER is set in flags, map points to the wrapped BO.
497 */
498 void *map;
499
500 /** Size of the implicit CCS range at the end of the buffer
501 *
502 * On Gfx12, CCS data is always a direct 1/256 scale-down. A single 64K
503 * page of main surface data maps to a 256B chunk of CCS data and that
504 * mapping is provided on TGL-LP by the AUX table which maps virtual memory
505 * addresses in the main surface to virtual memory addresses for CCS data.
506 *
507 * Because we can't change these maps around easily and because Vulkan
508 * allows two VkImages to be bound to overlapping memory regions (as long
509 * as the app is careful), it's not feasible to make this mapping part of
510 * the image. (On Gfx11 and earlier, the mapping was provided via
511 * RENDER_SURFACE_STATE so each image had its own main -> CCS mapping.)
512 * Instead, we attach the CCS data directly to the buffer object and setup
513 * the AUX table mapping at BO creation time.
514 *
515 * This field is for internal tracking use by the BO allocator only and
516 * should not be touched by other parts of the code. If something wants to
517 * know if a BO has implicit CCS data, it should instead look at the
518 * has_implicit_ccs boolean below.
519 *
520 * This data is not included in maps of this buffer.
521 */
522 uint32_t _ccs_size;
523
524 /** Flags to pass to the kernel through drm_i915_exec_object2::flags */
525 uint32_t flags;
526
527 /** True if this BO may be shared with other processes */
528 bool is_external:1;
529
530 /** True if this BO is a wrapper
531 *
532 * When set to true, none of the fields in this BO are meaningful except
533 * for anv_bo::is_wrapper and anv_bo::map which points to the actual BO.
534 * See also anv_bo_unwrap(). Wrapper BOs are not allowed when use_softpin
535 * is set in the physical device.
536 */
537 bool is_wrapper:1;
538
539 /** See also ANV_BO_ALLOC_FIXED_ADDRESS */
540 bool has_fixed_address:1;
541
542 /** True if this BO wraps a host pointer */
543 bool from_host_ptr:1;
544
545 /** See also ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS */
546 bool has_client_visible_address:1;
547
548 /** True if this BO has implicit CCS data attached to it */
549 bool has_implicit_ccs:1;
550 };
551
552 static inline struct anv_bo *
anv_bo_ref(struct anv_bo * bo)553 anv_bo_ref(struct anv_bo *bo)
554 {
555 p_atomic_inc(&bo->refcount);
556 return bo;
557 }
558
559 static inline struct anv_bo *
anv_bo_unwrap(struct anv_bo * bo)560 anv_bo_unwrap(struct anv_bo *bo)
561 {
562 while (bo->is_wrapper)
563 bo = bo->map;
564 return bo;
565 }
566
567 static inline bool
anv_bo_is_pinned(struct anv_bo * bo)568 anv_bo_is_pinned(struct anv_bo *bo)
569 {
570 #if defined(GFX_VERx10) && GFX_VERx10 >= 90
571 /* Sky Lake and later always uses softpin */
572 assert(bo->flags & EXEC_OBJECT_PINNED);
573 return true;
574 #elif defined(GFX_VERx10) && GFX_VERx10 < 80
575 /* Haswell and earlier never use softpin */
576 assert(!(bo->flags & EXEC_OBJECT_PINNED));
577 assert(!bo->has_fixed_address);
578 return false;
579 #else
580 /* If we don't have a GFX_VERx10 #define, we need to look at the BO. Also,
581 * for GFX version 8, we need to look at the BO because Broadwell softpins
582 * but Cherryview doesn't.
583 */
584 assert((bo->flags & EXEC_OBJECT_PINNED) || !bo->has_fixed_address);
585 return (bo->flags & EXEC_OBJECT_PINNED) != 0;
586 #endif
587 }
588
589 struct anv_address {
590 struct anv_bo *bo;
591 int64_t offset;
592 };
593
594 #define ANV_NULL_ADDRESS ((struct anv_address) { NULL, 0 })
595
596 static inline struct anv_address
anv_address_from_u64(uint64_t addr_u64)597 anv_address_from_u64(uint64_t addr_u64)
598 {
599 assert(addr_u64 == intel_canonical_address(addr_u64));
600 return (struct anv_address) {
601 .bo = NULL,
602 .offset = addr_u64,
603 };
604 }
605
606 static inline bool
anv_address_is_null(struct anv_address addr)607 anv_address_is_null(struct anv_address addr)
608 {
609 return addr.bo == NULL && addr.offset == 0;
610 }
611
612 static inline uint64_t
anv_address_physical(struct anv_address addr)613 anv_address_physical(struct anv_address addr)
614 {
615 if (addr.bo && anv_bo_is_pinned(addr.bo)) {
616 return intel_canonical_address(addr.bo->offset + addr.offset);
617 } else {
618 return intel_canonical_address(addr.offset);
619 }
620 }
621
622 static inline struct anv_address
anv_address_add(struct anv_address addr,uint64_t offset)623 anv_address_add(struct anv_address addr, uint64_t offset)
624 {
625 addr.offset += offset;
626 return addr;
627 }
628
629 /* Represents a lock-free linked list of "free" things. This is used by
630 * both the block pool and the state pools. Unfortunately, in order to
631 * solve the ABA problem, we can't use a single uint32_t head.
632 */
633 union anv_free_list {
634 struct {
635 uint32_t offset;
636
637 /* A simple count that is incremented every time the head changes. */
638 uint32_t count;
639 };
640 /* Make sure it's aligned to 64 bits. This will make atomic operations
641 * faster on 32 bit platforms.
642 */
643 uint64_t u64 __attribute__ ((aligned (8)));
644 };
645
646 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { UINT32_MAX, 0 } })
647
648 struct anv_block_state {
649 union {
650 struct {
651 uint32_t next;
652 uint32_t end;
653 };
654 /* Make sure it's aligned to 64 bits. This will make atomic operations
655 * faster on 32 bit platforms.
656 */
657 uint64_t u64 __attribute__ ((aligned (8)));
658 };
659 };
660
661 #define anv_block_pool_foreach_bo(bo, pool) \
662 for (struct anv_bo **_pp_bo = (pool)->bos, *bo; \
663 _pp_bo != &(pool)->bos[(pool)->nbos] && (bo = *_pp_bo, true); \
664 _pp_bo++)
665
666 #define ANV_MAX_BLOCK_POOL_BOS 20
667
668 struct anv_block_pool {
669 const char *name;
670
671 struct anv_device *device;
672 bool use_relocations;
673
674 /* Wrapper BO for use in relocation lists. This BO is simply a wrapper
675 * around the actual BO so that we grow the pool after the wrapper BO has
676 * been put in a relocation list. This is only used in the non-softpin
677 * case.
678 */
679 struct anv_bo wrapper_bo;
680
681 struct anv_bo *bos[ANV_MAX_BLOCK_POOL_BOS];
682 struct anv_bo *bo;
683 uint32_t nbos;
684
685 uint64_t size;
686
687 /* The address where the start of the pool is pinned. The various bos that
688 * are created as the pool grows will have addresses in the range
689 * [start_address, start_address + BLOCK_POOL_MEMFD_SIZE).
690 */
691 uint64_t start_address;
692
693 /* The offset from the start of the bo to the "center" of the block
694 * pool. Pointers to allocated blocks are given by
695 * bo.map + center_bo_offset + offsets.
696 */
697 uint32_t center_bo_offset;
698
699 /* Current memory map of the block pool. This pointer may or may not
700 * point to the actual beginning of the block pool memory. If
701 * anv_block_pool_alloc_back has ever been called, then this pointer
702 * will point to the "center" position of the buffer and all offsets
703 * (negative or positive) given out by the block pool alloc functions
704 * will be valid relative to this pointer.
705 *
706 * In particular, map == bo.map + center_offset
707 *
708 * DO NOT access this pointer directly. Use anv_block_pool_map() instead,
709 * since it will handle the softpin case as well, where this points to NULL.
710 */
711 void *map;
712 int fd;
713
714 /**
715 * Array of mmaps and gem handles owned by the block pool, reclaimed when
716 * the block pool is destroyed.
717 */
718 struct u_vector mmap_cleanups;
719
720 struct anv_block_state state;
721
722 struct anv_block_state back_state;
723 };
724
725 /* Block pools are backed by a fixed-size 1GB memfd */
726 #define BLOCK_POOL_MEMFD_SIZE (1ul << 30)
727
728 /* The center of the block pool is also the middle of the memfd. This may
729 * change in the future if we decide differently for some reason.
730 */
731 #define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
732
733 static inline uint32_t
anv_block_pool_size(struct anv_block_pool * pool)734 anv_block_pool_size(struct anv_block_pool *pool)
735 {
736 return pool->state.end + pool->back_state.end;
737 }
738
739 struct anv_state {
740 int32_t offset;
741 uint32_t alloc_size;
742 void *map;
743 uint32_t idx;
744 };
745
746 #define ANV_STATE_NULL ((struct anv_state) { .alloc_size = 0 })
747
748 struct anv_fixed_size_state_pool {
749 union anv_free_list free_list;
750 struct anv_block_state block;
751 };
752
753 #define ANV_MIN_STATE_SIZE_LOG2 6
754 #define ANV_MAX_STATE_SIZE_LOG2 21
755
756 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
757
758 struct anv_free_entry {
759 uint32_t next;
760 struct anv_state state;
761 };
762
763 struct anv_state_table {
764 struct anv_device *device;
765 int fd;
766 struct anv_free_entry *map;
767 uint32_t size;
768 struct anv_block_state state;
769 struct u_vector cleanups;
770 };
771
772 struct anv_state_pool {
773 struct anv_block_pool block_pool;
774
775 /* Offset into the relevant state base address where the state pool starts
776 * allocating memory.
777 */
778 int32_t start_offset;
779
780 struct anv_state_table table;
781
782 /* The size of blocks which will be allocated from the block pool */
783 uint32_t block_size;
784
785 /** Free list for "back" allocations */
786 union anv_free_list back_alloc_free_list;
787
788 struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
789 };
790
791 struct anv_state_reserved_pool {
792 struct anv_state_pool *pool;
793 union anv_free_list reserved_blocks;
794 uint32_t count;
795 };
796
797 struct anv_state_stream {
798 struct anv_state_pool *state_pool;
799
800 /* The size of blocks to allocate from the state pool */
801 uint32_t block_size;
802
803 /* Current block we're allocating from */
804 struct anv_state block;
805
806 /* Offset into the current block at which to allocate the next state */
807 uint32_t next;
808
809 /* List of all blocks allocated from this pool */
810 struct util_dynarray all_blocks;
811 };
812
813 /* The block_pool functions exported for testing only. The block pool should
814 * only be used via a state pool (see below).
815 */
816 VkResult anv_block_pool_init(struct anv_block_pool *pool,
817 struct anv_device *device,
818 const char *name,
819 uint64_t start_address,
820 uint32_t initial_size);
821 void anv_block_pool_finish(struct anv_block_pool *pool);
822 int32_t anv_block_pool_alloc(struct anv_block_pool *pool,
823 uint32_t block_size, uint32_t *padding);
824 int32_t anv_block_pool_alloc_back(struct anv_block_pool *pool,
825 uint32_t block_size);
826 void* anv_block_pool_map(struct anv_block_pool *pool, int32_t offset, uint32_t
827 size);
828
829 VkResult anv_state_pool_init(struct anv_state_pool *pool,
830 struct anv_device *device,
831 const char *name,
832 uint64_t base_address,
833 int32_t start_offset,
834 uint32_t block_size);
835 void anv_state_pool_finish(struct anv_state_pool *pool);
836 struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
837 uint32_t state_size, uint32_t alignment);
838 struct anv_state anv_state_pool_alloc_back(struct anv_state_pool *pool);
839 void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state);
840 void anv_state_stream_init(struct anv_state_stream *stream,
841 struct anv_state_pool *state_pool,
842 uint32_t block_size);
843 void anv_state_stream_finish(struct anv_state_stream *stream);
844 struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
845 uint32_t size, uint32_t alignment);
846
847 void anv_state_reserved_pool_init(struct anv_state_reserved_pool *pool,
848 struct anv_state_pool *parent,
849 uint32_t count, uint32_t size,
850 uint32_t alignment);
851 void anv_state_reserved_pool_finish(struct anv_state_reserved_pool *pool);
852 struct anv_state anv_state_reserved_pool_alloc(struct anv_state_reserved_pool *pool);
853 void anv_state_reserved_pool_free(struct anv_state_reserved_pool *pool,
854 struct anv_state state);
855
856 VkResult anv_state_table_init(struct anv_state_table *table,
857 struct anv_device *device,
858 uint32_t initial_entries);
859 void anv_state_table_finish(struct anv_state_table *table);
860 VkResult anv_state_table_add(struct anv_state_table *table, uint32_t *idx,
861 uint32_t count);
862 void anv_free_list_push(union anv_free_list *list,
863 struct anv_state_table *table,
864 uint32_t idx, uint32_t count);
865 struct anv_state* anv_free_list_pop(union anv_free_list *list,
866 struct anv_state_table *table);
867
868
869 static inline struct anv_state *
anv_state_table_get(struct anv_state_table * table,uint32_t idx)870 anv_state_table_get(struct anv_state_table *table, uint32_t idx)
871 {
872 return &table->map[idx].state;
873 }
874 /**
875 * Implements a pool of re-usable BOs. The interface is identical to that
876 * of block_pool except that each block is its own BO.
877 */
878 struct anv_bo_pool {
879 const char *name;
880
881 struct anv_device *device;
882
883 struct util_sparse_array_free_list free_list[16];
884 };
885
886 void anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device,
887 const char *name);
888 void anv_bo_pool_finish(struct anv_bo_pool *pool);
889 VkResult anv_bo_pool_alloc(struct anv_bo_pool *pool, uint32_t size,
890 struct anv_bo **bo_out);
891 void anv_bo_pool_free(struct anv_bo_pool *pool, struct anv_bo *bo);
892
893 struct anv_scratch_pool {
894 /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
895 struct anv_bo *bos[16][MESA_SHADER_STAGES];
896 uint32_t surfs[16];
897 struct anv_state surf_states[16];
898 };
899
900 void anv_scratch_pool_init(struct anv_device *device,
901 struct anv_scratch_pool *pool);
902 void anv_scratch_pool_finish(struct anv_device *device,
903 struct anv_scratch_pool *pool);
904 struct anv_bo *anv_scratch_pool_alloc(struct anv_device *device,
905 struct anv_scratch_pool *pool,
906 gl_shader_stage stage,
907 unsigned per_thread_scratch);
908 uint32_t anv_scratch_pool_get_surf(struct anv_device *device,
909 struct anv_scratch_pool *pool,
910 unsigned per_thread_scratch);
911
912 /** Implements a BO cache that ensures a 1-1 mapping of GEM BOs to anv_bos */
913 struct anv_bo_cache {
914 struct util_sparse_array bo_map;
915 pthread_mutex_t mutex;
916 };
917
918 VkResult anv_bo_cache_init(struct anv_bo_cache *cache,
919 struct anv_device *device);
920 void anv_bo_cache_finish(struct anv_bo_cache *cache);
921
922 struct anv_queue_family {
923 /* Standard bits passed on to the client */
924 VkQueueFlags queueFlags;
925 uint32_t queueCount;
926
927 /* Driver internal information */
928 enum drm_i915_gem_engine_class engine_class;
929 };
930
931 #define ANV_MAX_QUEUE_FAMILIES 3
932
933 struct anv_memory_type {
934 /* Standard bits passed on to the client */
935 VkMemoryPropertyFlags propertyFlags;
936 uint32_t heapIndex;
937 };
938
939 struct anv_memory_heap {
940 /* Standard bits passed on to the client */
941 VkDeviceSize size;
942 VkMemoryHeapFlags flags;
943
944 /** Driver-internal book-keeping.
945 *
946 * Align it to 64 bits to make atomic operations faster on 32 bit platforms.
947 */
948 VkDeviceSize used __attribute__ ((aligned (8)));
949
950 bool is_local_mem;
951 };
952
953 struct anv_memregion {
954 struct drm_i915_gem_memory_class_instance region;
955 uint64_t size;
956 uint64_t available;
957 };
958
959 struct anv_physical_device {
960 struct vk_physical_device vk;
961
962 /* Link in anv_instance::physical_devices */
963 struct list_head link;
964
965 struct anv_instance * instance;
966 char path[20];
967 struct intel_device_info info;
968 /** Amount of "GPU memory" we want to advertise
969 *
970 * Clearly, this value is bogus since Intel is a UMA architecture. On
971 * gfx7 platforms, we are limited by GTT size unless we want to implement
972 * fine-grained tracking and GTT splitting. On Broadwell and above we are
973 * practically unlimited. However, we will never report more than 3/4 of
974 * the total system ram to try and avoid running out of RAM.
975 */
976 bool supports_48bit_addresses;
977 struct brw_compiler * compiler;
978 struct isl_device isl_dev;
979 struct intel_perf_config * perf;
980 /* True if hardware support is incomplete/alpha */
981 bool is_alpha;
982 /*
983 * Number of commands required to implement a performance query begin +
984 * end.
985 */
986 uint32_t n_perf_query_commands;
987 int cmd_parser_version;
988 bool has_exec_async;
989 bool has_exec_capture;
990 int max_context_priority;
991 bool has_context_isolation;
992 bool has_mmap_offset;
993 bool has_userptr_probe;
994 uint64_t gtt_size;
995
996 bool use_relocations;
997 bool use_softpin;
998 bool always_use_bindless;
999 bool use_call_secondary;
1000
1001 /** True if we can access buffers using A64 messages */
1002 bool has_a64_buffer_access;
1003 /** True if we can use bindless access for images */
1004 bool has_bindless_images;
1005 /** True if we can use bindless access for samplers */
1006 bool has_bindless_samplers;
1007 /** True if we can use timeline semaphores through execbuf */
1008 bool has_exec_timeline;
1009
1010 /** True if we can read the GPU timestamp register
1011 *
1012 * When running in a virtual context, the timestamp register is unreadable
1013 * on Gfx12+.
1014 */
1015 bool has_reg_timestamp;
1016
1017 /** True if this device has implicit AUX
1018 *
1019 * If true, CCS is handled as an implicit attachment to the BO rather than
1020 * as an explicitly bound surface.
1021 */
1022 bool has_implicit_ccs;
1023
1024 bool always_flush_cache;
1025
1026 struct {
1027 uint32_t family_count;
1028 struct anv_queue_family families[ANV_MAX_QUEUE_FAMILIES];
1029 } queue;
1030
1031 struct {
1032 uint32_t type_count;
1033 struct anv_memory_type types[VK_MAX_MEMORY_TYPES];
1034 uint32_t heap_count;
1035 struct anv_memory_heap heaps[VK_MAX_MEMORY_HEAPS];
1036 bool need_clflush;
1037 } memory;
1038
1039 struct anv_memregion vram;
1040 struct anv_memregion sys;
1041 uint8_t driver_build_sha1[20];
1042 uint8_t pipeline_cache_uuid[VK_UUID_SIZE];
1043 uint8_t driver_uuid[VK_UUID_SIZE];
1044 uint8_t device_uuid[VK_UUID_SIZE];
1045
1046 struct vk_sync_type sync_syncobj_type;
1047 struct vk_sync_timeline_type sync_timeline_type;
1048 const struct vk_sync_type * sync_types[4];
1049
1050 struct disk_cache * disk_cache;
1051
1052 struct wsi_device wsi_device;
1053 int local_fd;
1054 bool has_local;
1055 int64_t local_major;
1056 int64_t local_minor;
1057 int master_fd;
1058 bool has_master;
1059 int64_t master_major;
1060 int64_t master_minor;
1061 struct drm_i915_query_engine_info * engine_info;
1062
1063 void (*cmd_emit_timestamp)(struct anv_batch *, struct anv_device *, struct anv_address, bool);
1064 struct intel_measure_device measure_device;
1065 };
1066
1067 struct anv_app_info {
1068 const char* app_name;
1069 uint32_t app_version;
1070 const char* engine_name;
1071 uint32_t engine_version;
1072 uint32_t api_version;
1073 };
1074
1075 struct anv_instance {
1076 struct vk_instance vk;
1077
1078 bool physical_devices_enumerated;
1079 struct list_head physical_devices;
1080
1081 bool pipeline_cache_enabled;
1082
1083 struct driOptionCache dri_options;
1084 struct driOptionCache available_dri_options;
1085 };
1086
1087 VkResult anv_init_wsi(struct anv_physical_device *physical_device);
1088 void anv_finish_wsi(struct anv_physical_device *physical_device);
1089
1090 struct anv_queue {
1091 struct vk_queue vk;
1092
1093 struct anv_device * device;
1094
1095 const struct anv_queue_family * family;
1096
1097 uint32_t index_in_family;
1098
1099 uint32_t exec_flags;
1100
1101 struct intel_ds_queue * ds;
1102 };
1103
1104 struct anv_pipeline_cache {
1105 struct vk_object_base base;
1106 struct anv_device * device;
1107 pthread_mutex_t mutex;
1108
1109 struct hash_table * nir_cache;
1110
1111 struct hash_table * cache;
1112
1113 bool external_sync;
1114 };
1115
1116 struct nir_xfb_info;
1117 struct anv_pipeline_bind_map;
1118
1119 void anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
1120 struct anv_device *device,
1121 bool cache_enabled,
1122 bool external_sync);
1123 void anv_pipeline_cache_finish(struct anv_pipeline_cache *cache);
1124
1125 struct anv_shader_bin *
1126 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
1127 const void *key, uint32_t key_size);
1128 struct anv_shader_bin *
1129 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
1130 gl_shader_stage stage,
1131 const void *key_data, uint32_t key_size,
1132 const void *kernel_data, uint32_t kernel_size,
1133 const struct brw_stage_prog_data *prog_data,
1134 uint32_t prog_data_size,
1135 const struct brw_compile_stats *stats,
1136 uint32_t num_stats,
1137 const struct nir_xfb_info *xfb_info,
1138 const struct anv_pipeline_bind_map *bind_map);
1139
1140 struct anv_shader_bin *
1141 anv_device_search_for_kernel(struct anv_device *device,
1142 struct anv_pipeline_cache *cache,
1143 const void *key_data, uint32_t key_size,
1144 bool *user_cache_bit);
1145
1146 struct anv_shader_bin *
1147 anv_device_upload_kernel(struct anv_device *device,
1148 struct anv_pipeline_cache *cache,
1149 gl_shader_stage stage,
1150 const void *key_data, uint32_t key_size,
1151 const void *kernel_data, uint32_t kernel_size,
1152 const struct brw_stage_prog_data *prog_data,
1153 uint32_t prog_data_size,
1154 const struct brw_compile_stats *stats,
1155 uint32_t num_stats,
1156 const struct nir_xfb_info *xfb_info,
1157 const struct anv_pipeline_bind_map *bind_map);
1158
1159 struct nir_shader;
1160 struct nir_shader_compiler_options;
1161
1162 struct nir_shader *
1163 anv_device_search_for_nir(struct anv_device *device,
1164 struct anv_pipeline_cache *cache,
1165 const struct nir_shader_compiler_options *nir_options,
1166 unsigned char sha1_key[20],
1167 void *mem_ctx);
1168
1169 void
1170 anv_device_upload_nir(struct anv_device *device,
1171 struct anv_pipeline_cache *cache,
1172 const struct nir_shader *nir,
1173 unsigned char sha1_key[20]);
1174
1175 struct anv_device {
1176 struct vk_device vk;
1177
1178 struct anv_physical_device * physical;
1179 struct intel_device_info info;
1180 struct isl_device isl_dev;
1181 int context_id;
1182 int fd;
1183 bool can_chain_batches;
1184 bool robust_buffer_access;
1185
1186 pthread_mutex_t vma_mutex;
1187 struct util_vma_heap vma_lo;
1188 struct util_vma_heap vma_cva;
1189 struct util_vma_heap vma_hi;
1190
1191 /** List of all anv_device_memory objects */
1192 struct list_head memory_objects;
1193
1194 struct anv_bo_pool batch_bo_pool;
1195
1196 struct anv_bo_cache bo_cache;
1197
1198 struct anv_state_pool general_state_pool;
1199 struct anv_state_pool dynamic_state_pool;
1200 struct anv_state_pool instruction_state_pool;
1201 struct anv_state_pool binding_table_pool;
1202 struct anv_state_pool surface_state_pool;
1203
1204 struct anv_state_reserved_pool custom_border_colors;
1205
1206 /** BO used for various workarounds
1207 *
1208 * There are a number of workarounds on our hardware which require writing
1209 * data somewhere and it doesn't really matter where. For that, we use
1210 * this BO and just write to the first dword or so.
1211 *
1212 * We also need to be able to handle NULL buffers bound as pushed UBOs.
1213 * For that, we use the high bytes (>= 1024) of the workaround BO.
1214 */
1215 struct anv_bo * workaround_bo;
1216 struct anv_address workaround_address;
1217
1218 struct anv_bo * trivial_batch_bo;
1219 struct anv_state null_surface_state;
1220
1221 struct anv_pipeline_cache default_pipeline_cache;
1222 struct blorp_context blorp;
1223
1224 struct anv_state border_colors;
1225
1226 struct anv_state slice_hash;
1227
1228 /** An array of CPS_STATE structures grouped by MAX_VIEWPORTS elements
1229 *
1230 * We need to emit CPS_STATE structures for each viewport accessible by a
1231 * pipeline. So rather than write many identical CPS_STATE structures
1232 * dynamically, we can enumerate all possible combinaisons and then just
1233 * emit a 3DSTATE_CPS_POINTERS instruction with the right offset into this
1234 * array.
1235 */
1236 struct anv_state cps_states;
1237
1238 uint32_t queue_count;
1239 struct anv_queue * queues;
1240
1241 struct anv_scratch_pool scratch_pool;
1242 struct anv_bo *rt_scratch_bos[16];
1243
1244 /** Shadow ray query BO
1245 *
1246 * The ray_query_bo only holds the current ray being traced. When using
1247 * more than 1 ray query per thread, we cannot fit all the queries in
1248 * there, so we need a another buffer to hold query data that is not
1249 * currently being used by the HW for tracing, similar to a scratch space.
1250 *
1251 * The size of the shadow buffer depends on the number of queries per
1252 * shader.
1253 */
1254 struct anv_bo *ray_query_shadow_bos[16];
1255 /** Ray query buffer used to communicated with HW unit.
1256 */
1257 struct anv_bo *ray_query_bo;
1258
1259 struct anv_shader_bin *rt_trampoline;
1260 struct anv_shader_bin *rt_trivial_return;
1261
1262 pthread_mutex_t mutex;
1263 pthread_cond_t queue_submit;
1264
1265 struct intel_batch_decode_ctx decoder_ctx;
1266 /*
1267 * When decoding a anv_cmd_buffer, we might need to search for BOs through
1268 * the cmd_buffer's list.
1269 */
1270 struct anv_cmd_buffer *cmd_buffer_being_decoded;
1271
1272 int perf_fd; /* -1 if no opened */
1273 uint64_t perf_metric; /* 0 if unset */
1274
1275 struct intel_aux_map_context *aux_map_ctx;
1276
1277 const struct intel_l3_config *l3_config;
1278
1279 struct intel_debug_block_frame *debug_frame_desc;
1280
1281 struct intel_ds_device ds;
1282 };
1283
1284 #if defined(GFX_VERx10) && GFX_VERx10 >= 90
1285 #define ANV_ALWAYS_SOFTPIN true
1286 #else
1287 #define ANV_ALWAYS_SOFTPIN false
1288 #endif
1289
1290 static inline bool
anv_use_relocations(const struct anv_physical_device * pdevice)1291 anv_use_relocations(const struct anv_physical_device *pdevice)
1292 {
1293 #if defined(GFX_VERx10) && GFX_VERx10 >= 90
1294 /* Sky Lake and later always uses softpin */
1295 assert(!pdevice->use_relocations);
1296 return false;
1297 #elif defined(GFX_VERx10) && GFX_VERx10 < 80
1298 /* Haswell and earlier never use softpin */
1299 assert(pdevice->use_relocations);
1300 return true;
1301 #else
1302 /* If we don't have a GFX_VERx10 #define, we need to look at the physical
1303 * device. Also, for GFX version 8, we need to look at the physical
1304 * device because Broadwell softpins but Cherryview doesn't.
1305 */
1306 return pdevice->use_relocations;
1307 #endif
1308 }
1309
1310 static inline struct anv_state_pool *
anv_binding_table_pool(struct anv_device * device)1311 anv_binding_table_pool(struct anv_device *device)
1312 {
1313 if (anv_use_relocations(device->physical))
1314 return &device->surface_state_pool;
1315 else
1316 return &device->binding_table_pool;
1317 }
1318
1319 static inline struct anv_state
anv_binding_table_pool_alloc(struct anv_device * device)1320 anv_binding_table_pool_alloc(struct anv_device *device)
1321 {
1322 if (anv_use_relocations(device->physical))
1323 return anv_state_pool_alloc_back(&device->surface_state_pool);
1324 else
1325 return anv_state_pool_alloc(&device->binding_table_pool,
1326 device->binding_table_pool.block_size, 0);
1327 }
1328
1329 static inline void
anv_binding_table_pool_free(struct anv_device * device,struct anv_state state)1330 anv_binding_table_pool_free(struct anv_device *device, struct anv_state state) {
1331 anv_state_pool_free(anv_binding_table_pool(device), state);
1332 }
1333
1334 static inline uint32_t
anv_mocs(const struct anv_device * device,const struct anv_bo * bo,isl_surf_usage_flags_t usage)1335 anv_mocs(const struct anv_device *device,
1336 const struct anv_bo *bo,
1337 isl_surf_usage_flags_t usage)
1338 {
1339 return isl_mocs(&device->isl_dev, usage, bo && bo->is_external);
1340 }
1341
1342 void anv_device_init_blorp(struct anv_device *device);
1343 void anv_device_finish_blorp(struct anv_device *device);
1344
1345 enum anv_bo_alloc_flags {
1346 /** Specifies that the BO must have a 32-bit address
1347 *
1348 * This is the opposite of EXEC_OBJECT_SUPPORTS_48B_ADDRESS.
1349 */
1350 ANV_BO_ALLOC_32BIT_ADDRESS = (1 << 0),
1351
1352 /** Specifies that the BO may be shared externally */
1353 ANV_BO_ALLOC_EXTERNAL = (1 << 1),
1354
1355 /** Specifies that the BO should be mapped */
1356 ANV_BO_ALLOC_MAPPED = (1 << 2),
1357
1358 /** Specifies that the BO should be snooped so we get coherency */
1359 ANV_BO_ALLOC_SNOOPED = (1 << 3),
1360
1361 /** Specifies that the BO should be captured in error states */
1362 ANV_BO_ALLOC_CAPTURE = (1 << 4),
1363
1364 /** Specifies that the BO will have an address assigned by the caller
1365 *
1366 * Such BOs do not exist in any VMA heap.
1367 */
1368 ANV_BO_ALLOC_FIXED_ADDRESS = (1 << 5),
1369
1370 /** Enables implicit synchronization on the BO
1371 *
1372 * This is the opposite of EXEC_OBJECT_ASYNC.
1373 */
1374 ANV_BO_ALLOC_IMPLICIT_SYNC = (1 << 6),
1375
1376 /** Enables implicit synchronization on the BO
1377 *
1378 * This is equivalent to EXEC_OBJECT_WRITE.
1379 */
1380 ANV_BO_ALLOC_IMPLICIT_WRITE = (1 << 7),
1381
1382 /** Has an address which is visible to the client */
1383 ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS = (1 << 8),
1384
1385 /** This buffer has implicit CCS data attached to it */
1386 ANV_BO_ALLOC_IMPLICIT_CCS = (1 << 9),
1387
1388 /** This buffer is allocated from local memory */
1389 ANV_BO_ALLOC_LOCAL_MEM = (1 << 10),
1390 };
1391
1392 VkResult anv_device_alloc_bo(struct anv_device *device,
1393 const char *name, uint64_t size,
1394 enum anv_bo_alloc_flags alloc_flags,
1395 uint64_t explicit_address,
1396 struct anv_bo **bo);
1397 VkResult anv_device_map_bo(struct anv_device *device,
1398 struct anv_bo *bo,
1399 uint64_t offset,
1400 size_t size,
1401 uint32_t gem_flags,
1402 void **map_out);
1403 void anv_device_unmap_bo(struct anv_device *device,
1404 struct anv_bo *bo,
1405 void *map, size_t map_size);
1406 VkResult anv_device_import_bo_from_host_ptr(struct anv_device *device,
1407 void *host_ptr, uint32_t size,
1408 enum anv_bo_alloc_flags alloc_flags,
1409 uint64_t client_address,
1410 struct anv_bo **bo_out);
1411 VkResult anv_device_import_bo(struct anv_device *device, int fd,
1412 enum anv_bo_alloc_flags alloc_flags,
1413 uint64_t client_address,
1414 struct anv_bo **bo);
1415 VkResult anv_device_export_bo(struct anv_device *device,
1416 struct anv_bo *bo, int *fd_out);
1417 VkResult anv_device_get_bo_tiling(struct anv_device *device,
1418 struct anv_bo *bo,
1419 enum isl_tiling *tiling_out);
1420 VkResult anv_device_set_bo_tiling(struct anv_device *device,
1421 struct anv_bo *bo,
1422 uint32_t row_pitch_B,
1423 enum isl_tiling tiling);
1424 void anv_device_release_bo(struct anv_device *device,
1425 struct anv_bo *bo);
1426
1427 static inline struct anv_bo *
anv_device_lookup_bo(struct anv_device * device,uint32_t gem_handle)1428 anv_device_lookup_bo(struct anv_device *device, uint32_t gem_handle)
1429 {
1430 return util_sparse_array_get(&device->bo_cache.bo_map, gem_handle);
1431 }
1432
1433 VkResult anv_device_wait(struct anv_device *device, struct anv_bo *bo,
1434 int64_t timeout);
1435
1436 VkResult anv_queue_init(struct anv_device *device, struct anv_queue *queue,
1437 uint32_t exec_flags,
1438 const VkDeviceQueueCreateInfo *pCreateInfo,
1439 uint32_t index_in_family);
1440 void anv_queue_finish(struct anv_queue *queue);
1441
1442 VkResult anv_queue_submit(struct vk_queue *queue,
1443 struct vk_queue_submit *submit);
1444 VkResult anv_queue_submit_simple_batch(struct anv_queue *queue,
1445 struct anv_batch *batch);
1446
1447 void* anv_gem_mmap(struct anv_device *device,
1448 uint32_t gem_handle, uint64_t offset, uint64_t size, uint32_t flags);
1449 void anv_gem_munmap(struct anv_device *device, void *p, uint64_t size);
1450 uint32_t anv_gem_create(struct anv_device *device, uint64_t size);
1451 void anv_gem_close(struct anv_device *device, uint32_t gem_handle);
1452 uint32_t anv_gem_create_regions(struct anv_device *device, uint64_t anv_bo_size,
1453 uint32_t num_regions,
1454 struct drm_i915_gem_memory_class_instance *regions);
1455 uint32_t anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
1456 int anv_gem_busy(struct anv_device *device, uint32_t gem_handle);
1457 int anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns);
1458 int anv_gem_execbuffer(struct anv_device *device,
1459 struct drm_i915_gem_execbuffer2 *execbuf);
1460 int anv_gem_set_tiling(struct anv_device *device, uint32_t gem_handle,
1461 uint32_t stride, uint32_t tiling);
1462 int anv_gem_create_context(struct anv_device *device);
1463 bool anv_gem_has_context_priority(int fd, int priority);
1464 int anv_gem_destroy_context(struct anv_device *device, int context);
1465 int anv_gem_set_context_param(int fd, int context, uint32_t param,
1466 uint64_t value);
1467 int anv_gem_get_param(int fd, uint32_t param);
1468 int anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle);
1469 int anv_gem_context_get_reset_stats(int fd, int context,
1470 uint32_t *active, uint32_t *pending);
1471 int anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle);
1472 int anv_gem_reg_read(int fd, uint32_t offset, uint64_t *result);
1473 uint32_t anv_gem_fd_to_handle(struct anv_device *device, int fd);
1474 int anv_gem_set_caching(struct anv_device *device, uint32_t gem_handle, uint32_t caching);
1475 int anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
1476 uint32_t read_domains, uint32_t write_domain);
1477 int anv_i915_query(int fd, uint64_t query_id, void *buffer,
1478 int32_t *buffer_len);
1479 struct drm_i915_query_engine_info *anv_gem_get_engine_info(int fd);
1480
1481 uint64_t anv_vma_alloc(struct anv_device *device,
1482 uint64_t size, uint64_t align,
1483 enum anv_bo_alloc_flags alloc_flags,
1484 uint64_t client_address);
1485 void anv_vma_free(struct anv_device *device,
1486 uint64_t address, uint64_t size);
1487
1488 struct anv_reloc_list {
1489 uint32_t num_relocs;
1490 uint32_t array_length;
1491 struct drm_i915_gem_relocation_entry * relocs;
1492 struct anv_bo ** reloc_bos;
1493 uint32_t dep_words;
1494 BITSET_WORD * deps;
1495 };
1496
1497 VkResult anv_reloc_list_init(struct anv_reloc_list *list,
1498 const VkAllocationCallbacks *alloc);
1499 void anv_reloc_list_finish(struct anv_reloc_list *list,
1500 const VkAllocationCallbacks *alloc);
1501
1502 VkResult anv_reloc_list_add(struct anv_reloc_list *list,
1503 const VkAllocationCallbacks *alloc,
1504 uint32_t offset, struct anv_bo *target_bo,
1505 uint32_t delta, uint64_t *address_u64_out);
1506
1507 VkResult anv_reloc_list_add_bo(struct anv_reloc_list *list,
1508 const VkAllocationCallbacks *alloc,
1509 struct anv_bo *target_bo);
1510
1511 struct anv_batch_bo {
1512 /* Link in the anv_cmd_buffer.owned_batch_bos list */
1513 struct list_head link;
1514
1515 struct anv_bo * bo;
1516
1517 /* Bytes actually consumed in this batch BO */
1518 uint32_t length;
1519
1520 /* When this batch BO is used as part of a primary batch buffer, this
1521 * tracked whether it is chained to another primary batch buffer.
1522 *
1523 * If this is the case, the relocation list's last entry points the
1524 * location of the MI_BATCH_BUFFER_START chaining to the next batch.
1525 */
1526 bool chained;
1527
1528 struct anv_reloc_list relocs;
1529 };
1530
1531 struct anv_batch {
1532 const VkAllocationCallbacks * alloc;
1533
1534 struct anv_address start_addr;
1535
1536 void * start;
1537 void * end;
1538 void * next;
1539
1540 struct anv_reloc_list * relocs;
1541
1542 /* This callback is called (with the associated user data) in the event
1543 * that the batch runs out of space.
1544 */
1545 VkResult (*extend_cb)(struct anv_batch *, void *);
1546 void * user_data;
1547
1548 /**
1549 * Current error status of the command buffer. Used to track inconsistent
1550 * or incomplete command buffer states that are the consequence of run-time
1551 * errors such as out of memory scenarios. We want to track this in the
1552 * batch because the command buffer object is not visible to some parts
1553 * of the driver.
1554 */
1555 VkResult status;
1556 };
1557
1558 void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords);
1559 void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
1560 struct anv_address anv_batch_address(struct anv_batch *batch, void *batch_location);
1561
1562 static inline void
anv_batch_set_storage(struct anv_batch * batch,struct anv_address addr,void * map,size_t size)1563 anv_batch_set_storage(struct anv_batch *batch, struct anv_address addr,
1564 void *map, size_t size)
1565 {
1566 batch->start_addr = addr;
1567 batch->next = batch->start = map;
1568 batch->end = map + size;
1569 }
1570
1571 static inline VkResult
anv_batch_set_error(struct anv_batch * batch,VkResult error)1572 anv_batch_set_error(struct anv_batch *batch, VkResult error)
1573 {
1574 assert(error != VK_SUCCESS);
1575 if (batch->status == VK_SUCCESS)
1576 batch->status = error;
1577 return batch->status;
1578 }
1579
1580 static inline bool
anv_batch_has_error(struct anv_batch * batch)1581 anv_batch_has_error(struct anv_batch *batch)
1582 {
1583 return batch->status != VK_SUCCESS;
1584 }
1585
1586 static inline uint64_t
anv_batch_emit_reloc(struct anv_batch * batch,void * location,struct anv_bo * bo,uint32_t delta)1587 anv_batch_emit_reloc(struct anv_batch *batch,
1588 void *location, struct anv_bo *bo, uint32_t delta)
1589 {
1590 uint64_t address_u64 = 0;
1591 VkResult result;
1592
1593 if (ANV_ALWAYS_SOFTPIN) {
1594 address_u64 = bo->offset + delta;
1595 result = anv_reloc_list_add_bo(batch->relocs, batch->alloc, bo);
1596 } else {
1597 result = anv_reloc_list_add(batch->relocs, batch->alloc,
1598 location - batch->start, bo, delta,
1599 &address_u64);
1600 }
1601 if (unlikely(result != VK_SUCCESS)) {
1602 anv_batch_set_error(batch, result);
1603 return 0;
1604 }
1605
1606 return address_u64;
1607 }
1608
1609 static inline void
write_reloc(const struct anv_device * device,void * p,uint64_t v,bool flush)1610 write_reloc(const struct anv_device *device, void *p, uint64_t v, bool flush)
1611 {
1612 unsigned reloc_size = 0;
1613 if (device->info.ver >= 8) {
1614 reloc_size = sizeof(uint64_t);
1615 *(uint64_t *)p = intel_canonical_address(v);
1616 } else {
1617 reloc_size = sizeof(uint32_t);
1618 *(uint32_t *)p = v;
1619 }
1620
1621 if (flush && !device->info.has_llc)
1622 intel_flush_range(p, reloc_size);
1623 }
1624
1625 static inline uint64_t
_anv_combine_address(struct anv_batch * batch,void * location,const struct anv_address address,uint32_t delta)1626 _anv_combine_address(struct anv_batch *batch, void *location,
1627 const struct anv_address address, uint32_t delta)
1628 {
1629 if (address.bo == NULL) {
1630 return address.offset + delta;
1631 } else if (batch == NULL) {
1632 assert(anv_bo_is_pinned(address.bo));
1633 return anv_address_physical(anv_address_add(address, delta));
1634 } else {
1635 assert(batch->start <= location && location < batch->end);
1636 /* i915 relocations are signed. */
1637 assert(INT32_MIN <= address.offset && address.offset <= INT32_MAX);
1638 return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta);
1639 }
1640 }
1641
1642 #define __gen_address_type struct anv_address
1643 #define __gen_user_data struct anv_batch
1644 #define __gen_combine_address _anv_combine_address
1645
1646 /* Wrapper macros needed to work around preprocessor argument issues. In
1647 * particular, arguments don't get pre-evaluated if they are concatenated.
1648 * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
1649 * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
1650 * We can work around this easily enough with these helpers.
1651 */
1652 #define __anv_cmd_length(cmd) cmd ## _length
1653 #define __anv_cmd_length_bias(cmd) cmd ## _length_bias
1654 #define __anv_cmd_header(cmd) cmd ## _header
1655 #define __anv_cmd_pack(cmd) cmd ## _pack
1656 #define __anv_reg_num(reg) reg ## _num
1657
1658 #define anv_pack_struct(dst, struc, ...) do { \
1659 struct struc __template = { \
1660 __VA_ARGS__ \
1661 }; \
1662 __anv_cmd_pack(struc)(NULL, dst, &__template); \
1663 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
1664 } while (0)
1665
1666 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
1667 void *__dst = anv_batch_emit_dwords(batch, n); \
1668 if (__dst) { \
1669 struct cmd __template = { \
1670 __anv_cmd_header(cmd), \
1671 .DWordLength = n - __anv_cmd_length_bias(cmd), \
1672 __VA_ARGS__ \
1673 }; \
1674 __anv_cmd_pack(cmd)(batch, __dst, &__template); \
1675 } \
1676 __dst; \
1677 })
1678
1679 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
1680 do { \
1681 uint32_t *dw; \
1682 \
1683 STATIC_ASSERT(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
1684 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
1685 if (!dw) \
1686 break; \
1687 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
1688 dw[i] = (dwords0)[i] | (dwords1)[i]; \
1689 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
1690 } while (0)
1691
1692 #define anv_batch_emit(batch, cmd, name) \
1693 for (struct cmd name = { __anv_cmd_header(cmd) }, \
1694 *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
1695 __builtin_expect(_dst != NULL, 1); \
1696 ({ __anv_cmd_pack(cmd)(batch, _dst, &name); \
1697 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
1698 _dst = NULL; \
1699 }))
1700
1701 #define anv_batch_write_reg(batch, reg, name) \
1702 for (struct reg name = {}, *_cont = (struct reg *)1; _cont != NULL; \
1703 ({ \
1704 uint32_t _dw[__anv_cmd_length(reg)]; \
1705 __anv_cmd_pack(reg)(NULL, _dw, &name); \
1706 for (unsigned i = 0; i < __anv_cmd_length(reg); i++) { \
1707 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) { \
1708 lri.RegisterOffset = __anv_reg_num(reg); \
1709 lri.DataDWord = _dw[i]; \
1710 } \
1711 } \
1712 _cont = NULL; \
1713 }))
1714
1715 /* #define __gen_get_batch_dwords anv_batch_emit_dwords */
1716 /* #define __gen_get_batch_address anv_batch_address */
1717 /* #define __gen_address_value anv_address_physical */
1718 /* #define __gen_address_offset anv_address_add */
1719
1720 struct anv_device_memory {
1721 struct vk_object_base base;
1722
1723 struct list_head link;
1724
1725 struct anv_bo * bo;
1726 const struct anv_memory_type * type;
1727
1728 void * map;
1729 size_t map_size;
1730
1731 /* The map, from the user PoV is map + map_delta */
1732 uint64_t map_delta;
1733
1734 /* If set, we are holding reference to AHardwareBuffer
1735 * which we must release when memory is freed.
1736 */
1737 struct AHardwareBuffer * ahw;
1738
1739 /* If set, this memory comes from a host pointer. */
1740 void * host_ptr;
1741 };
1742
1743 /**
1744 * Header for Vertex URB Entry (VUE)
1745 */
1746 struct anv_vue_header {
1747 uint32_t Reserved;
1748 uint32_t RTAIndex; /* RenderTargetArrayIndex */
1749 uint32_t ViewportIndex;
1750 float PointWidth;
1751 };
1752
1753 /** Struct representing a sampled image descriptor
1754 *
1755 * This descriptor layout is used for sampled images, bare sampler, and
1756 * combined image/sampler descriptors.
1757 */
1758 struct anv_sampled_image_descriptor {
1759 /** Bindless image handle
1760 *
1761 * This is expected to already be shifted such that the 20-bit
1762 * SURFACE_STATE table index is in the top 20 bits.
1763 */
1764 uint32_t image;
1765
1766 /** Bindless sampler handle
1767 *
1768 * This is assumed to be a 32B-aligned SAMPLER_STATE pointer relative
1769 * to the dynamic state base address.
1770 */
1771 uint32_t sampler;
1772 };
1773
1774 struct anv_texture_swizzle_descriptor {
1775 /** Texture swizzle
1776 *
1777 * See also nir_intrinsic_channel_select_intel
1778 */
1779 uint8_t swizzle[4];
1780
1781 /** Unused padding to ensure the struct is a multiple of 64 bits */
1782 uint32_t _pad;
1783 };
1784
1785 /** Struct representing a storage image descriptor */
1786 struct anv_storage_image_descriptor {
1787 /** Bindless image handles
1788 *
1789 * These are expected to already be shifted such that the 20-bit
1790 * SURFACE_STATE table index is in the top 20 bits.
1791 */
1792 uint32_t vanilla;
1793 uint32_t lowered;
1794 };
1795
1796 /** Struct representing a address/range descriptor
1797 *
1798 * The fields of this struct correspond directly to the data layout of
1799 * nir_address_format_64bit_bounded_global addresses. The last field is the
1800 * offset in the NIR address so it must be zero so that when you load the
1801 * descriptor you get a pointer to the start of the range.
1802 */
1803 struct anv_address_range_descriptor {
1804 uint64_t address;
1805 uint32_t range;
1806 uint32_t zero;
1807 };
1808
1809 enum anv_descriptor_data {
1810 /** The descriptor contains a BTI reference to a surface state */
1811 ANV_DESCRIPTOR_SURFACE_STATE = (1 << 0),
1812 /** The descriptor contains a BTI reference to a sampler state */
1813 ANV_DESCRIPTOR_SAMPLER_STATE = (1 << 1),
1814 /** The descriptor contains an actual buffer view */
1815 ANV_DESCRIPTOR_BUFFER_VIEW = (1 << 2),
1816 /** The descriptor contains auxiliary image layout data */
1817 ANV_DESCRIPTOR_IMAGE_PARAM = (1 << 3),
1818 /** The descriptor contains auxiliary image layout data */
1819 ANV_DESCRIPTOR_INLINE_UNIFORM = (1 << 4),
1820 /** anv_address_range_descriptor with a buffer address and range */
1821 ANV_DESCRIPTOR_ADDRESS_RANGE = (1 << 5),
1822 /** Bindless surface handle */
1823 ANV_DESCRIPTOR_SAMPLED_IMAGE = (1 << 6),
1824 /** Storage image handles */
1825 ANV_DESCRIPTOR_STORAGE_IMAGE = (1 << 7),
1826 /** Storage image handles */
1827 ANV_DESCRIPTOR_TEXTURE_SWIZZLE = (1 << 8),
1828 };
1829
1830 struct anv_descriptor_set_binding_layout {
1831 /* The type of the descriptors in this binding */
1832 VkDescriptorType type;
1833
1834 /* Flags provided when this binding was created */
1835 VkDescriptorBindingFlagsEXT flags;
1836
1837 /* Bitfield representing the type of data this descriptor contains */
1838 enum anv_descriptor_data data;
1839
1840 /* Maximum number of YCbCr texture/sampler planes */
1841 uint8_t max_plane_count;
1842
1843 /* Number of array elements in this binding (or size in bytes for inline
1844 * uniform data)
1845 */
1846 uint32_t array_size;
1847
1848 /* Index into the flattend descriptor set */
1849 uint32_t descriptor_index;
1850
1851 /* Index into the dynamic state array for a dynamic buffer */
1852 int16_t dynamic_offset_index;
1853
1854 /* Index into the descriptor set buffer views */
1855 int32_t buffer_view_index;
1856
1857 /* Offset into the descriptor buffer where this descriptor lives */
1858 uint32_t descriptor_offset;
1859
1860 /* Pre computed stride */
1861 unsigned descriptor_stride;
1862
1863 /* Immutable samplers (or NULL if no immutable samplers) */
1864 struct anv_sampler **immutable_samplers;
1865 };
1866
1867 bool anv_descriptor_supports_bindless(const struct anv_physical_device *pdevice,
1868 const struct anv_descriptor_set_binding_layout *binding,
1869 bool sampler);
1870
1871 bool anv_descriptor_requires_bindless(const struct anv_physical_device *pdevice,
1872 const struct anv_descriptor_set_binding_layout *binding,
1873 bool sampler);
1874
1875 struct anv_descriptor_set_layout {
1876 struct vk_object_base base;
1877
1878 /* Descriptor set layouts can be destroyed at almost any time */
1879 uint32_t ref_cnt;
1880
1881 /* Number of bindings in this descriptor set */
1882 uint32_t binding_count;
1883
1884 /* Total number of descriptors */
1885 uint32_t descriptor_count;
1886
1887 /* Shader stages affected by this descriptor set */
1888 uint16_t shader_stages;
1889
1890 /* Number of buffer views in this descriptor set */
1891 uint32_t buffer_view_count;
1892
1893 /* Number of dynamic offsets used by this descriptor set */
1894 uint16_t dynamic_offset_count;
1895
1896 /* For each dynamic buffer, which VkShaderStageFlagBits stages are using
1897 * this buffer
1898 */
1899 VkShaderStageFlags dynamic_offset_stages[MAX_DYNAMIC_BUFFERS];
1900
1901 /* Size of the descriptor buffer for this descriptor set */
1902 uint32_t descriptor_buffer_size;
1903
1904 /* Bindings in this descriptor set */
1905 struct anv_descriptor_set_binding_layout binding[0];
1906 };
1907
1908 void anv_descriptor_set_layout_destroy(struct anv_device *device,
1909 struct anv_descriptor_set_layout *layout);
1910
1911 static inline void
anv_descriptor_set_layout_ref(struct anv_descriptor_set_layout * layout)1912 anv_descriptor_set_layout_ref(struct anv_descriptor_set_layout *layout)
1913 {
1914 assert(layout && layout->ref_cnt >= 1);
1915 p_atomic_inc(&layout->ref_cnt);
1916 }
1917
1918 static inline void
anv_descriptor_set_layout_unref(struct anv_device * device,struct anv_descriptor_set_layout * layout)1919 anv_descriptor_set_layout_unref(struct anv_device *device,
1920 struct anv_descriptor_set_layout *layout)
1921 {
1922 assert(layout && layout->ref_cnt >= 1);
1923 if (p_atomic_dec_zero(&layout->ref_cnt))
1924 anv_descriptor_set_layout_destroy(device, layout);
1925 }
1926
1927 struct anv_descriptor {
1928 VkDescriptorType type;
1929
1930 union {
1931 struct {
1932 VkImageLayout layout;
1933 struct anv_image_view *image_view;
1934 struct anv_sampler *sampler;
1935 };
1936
1937 struct {
1938 struct anv_buffer_view *set_buffer_view;
1939 struct anv_buffer *buffer;
1940 uint64_t offset;
1941 uint64_t range;
1942 };
1943
1944 struct anv_buffer_view *buffer_view;
1945
1946 struct anv_acceleration_structure *accel_struct;
1947 };
1948 };
1949
1950 struct anv_descriptor_set {
1951 struct vk_object_base base;
1952
1953 struct anv_descriptor_pool *pool;
1954 struct anv_descriptor_set_layout *layout;
1955
1956 /* Amount of space occupied in the the pool by this descriptor set. It can
1957 * be larger than the size of the descriptor set.
1958 */
1959 uint32_t size;
1960
1961 /* State relative to anv_descriptor_pool::bo */
1962 struct anv_state desc_mem;
1963 /* Surface state for the descriptor buffer */
1964 struct anv_state desc_surface_state;
1965
1966 /* Descriptor set address. */
1967 struct anv_address desc_addr;
1968
1969 uint32_t buffer_view_count;
1970 struct anv_buffer_view *buffer_views;
1971
1972 /* Link to descriptor pool's desc_sets list . */
1973 struct list_head pool_link;
1974
1975 uint32_t descriptor_count;
1976 struct anv_descriptor descriptors[0];
1977 };
1978
1979 static inline bool
anv_descriptor_set_is_push(struct anv_descriptor_set * set)1980 anv_descriptor_set_is_push(struct anv_descriptor_set *set)
1981 {
1982 return set->pool == NULL;
1983 }
1984
1985 struct anv_buffer_view {
1986 struct vk_object_base base;
1987
1988 enum isl_format format; /**< VkBufferViewCreateInfo::format */
1989 uint64_t range; /**< VkBufferViewCreateInfo::range */
1990
1991 struct anv_address address;
1992
1993 struct anv_state surface_state;
1994 struct anv_state storage_surface_state;
1995 struct anv_state lowered_storage_surface_state;
1996
1997 struct brw_image_param lowered_storage_image_param;
1998 };
1999
2000 struct anv_push_descriptor_set {
2001 struct anv_descriptor_set set;
2002
2003 /* Put this field right behind anv_descriptor_set so it fills up the
2004 * descriptors[0] field. */
2005 struct anv_descriptor descriptors[MAX_PUSH_DESCRIPTORS];
2006
2007 /** True if the descriptor set buffer has been referenced by a draw or
2008 * dispatch command.
2009 */
2010 bool set_used_on_gpu;
2011
2012 struct anv_buffer_view buffer_views[MAX_PUSH_DESCRIPTORS];
2013 };
2014
2015 static inline struct anv_address
anv_descriptor_set_address(struct anv_descriptor_set * set)2016 anv_descriptor_set_address(struct anv_descriptor_set *set)
2017 {
2018 if (anv_descriptor_set_is_push(set)) {
2019 /* We have to flag push descriptor set as used on the GPU
2020 * so that the next time we push descriptors, we grab a new memory.
2021 */
2022 struct anv_push_descriptor_set *push_set =
2023 (struct anv_push_descriptor_set *)set;
2024 push_set->set_used_on_gpu = true;
2025 }
2026
2027 return set->desc_addr;
2028 }
2029
2030 struct anv_descriptor_pool {
2031 struct vk_object_base base;
2032
2033 uint32_t size;
2034 uint32_t next;
2035 uint32_t free_list;
2036
2037 struct anv_bo *bo;
2038 struct util_vma_heap bo_heap;
2039
2040 struct anv_state_stream surface_state_stream;
2041 void *surface_state_free_list;
2042
2043 struct list_head desc_sets;
2044
2045 bool host_only;
2046
2047 char data[0];
2048 };
2049
2050 enum anv_descriptor_template_entry_type {
2051 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_IMAGE,
2052 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER,
2053 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER_VIEW
2054 };
2055
2056 struct anv_descriptor_template_entry {
2057 /* The type of descriptor in this entry */
2058 VkDescriptorType type;
2059
2060 /* Binding in the descriptor set */
2061 uint32_t binding;
2062
2063 /* Offset at which to write into the descriptor set binding */
2064 uint32_t array_element;
2065
2066 /* Number of elements to write into the descriptor set binding */
2067 uint32_t array_count;
2068
2069 /* Offset into the user provided data */
2070 size_t offset;
2071
2072 /* Stride between elements into the user provided data */
2073 size_t stride;
2074 };
2075
2076 struct anv_descriptor_update_template {
2077 struct vk_object_base base;
2078
2079 VkPipelineBindPoint bind_point;
2080
2081 /* The descriptor set this template corresponds to. This value is only
2082 * valid if the template was created with the templateType
2083 * VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET.
2084 */
2085 uint8_t set;
2086
2087 /* Number of entries in this template */
2088 uint32_t entry_count;
2089
2090 /* Entries of the template */
2091 struct anv_descriptor_template_entry entries[0];
2092 };
2093
2094 size_t
2095 anv_descriptor_set_layout_size(const struct anv_descriptor_set_layout *layout,
2096 uint32_t var_desc_count);
2097
2098 uint32_t
2099 anv_descriptor_set_layout_descriptor_buffer_size(const struct anv_descriptor_set_layout *set_layout,
2100 uint32_t var_desc_count);
2101
2102 void
2103 anv_descriptor_set_write_image_view(struct anv_device *device,
2104 struct anv_descriptor_set *set,
2105 const VkDescriptorImageInfo * const info,
2106 VkDescriptorType type,
2107 uint32_t binding,
2108 uint32_t element);
2109
2110 void
2111 anv_descriptor_set_write_buffer_view(struct anv_device *device,
2112 struct anv_descriptor_set *set,
2113 VkDescriptorType type,
2114 struct anv_buffer_view *buffer_view,
2115 uint32_t binding,
2116 uint32_t element);
2117
2118 void
2119 anv_descriptor_set_write_buffer(struct anv_device *device,
2120 struct anv_descriptor_set *set,
2121 struct anv_state_stream *alloc_stream,
2122 VkDescriptorType type,
2123 struct anv_buffer *buffer,
2124 uint32_t binding,
2125 uint32_t element,
2126 VkDeviceSize offset,
2127 VkDeviceSize range);
2128
2129 void
2130 anv_descriptor_set_write_acceleration_structure(struct anv_device *device,
2131 struct anv_descriptor_set *set,
2132 struct anv_acceleration_structure *accel,
2133 uint32_t binding,
2134 uint32_t element);
2135
2136 void
2137 anv_descriptor_set_write_inline_uniform_data(struct anv_device *device,
2138 struct anv_descriptor_set *set,
2139 uint32_t binding,
2140 const void *data,
2141 size_t offset,
2142 size_t size);
2143
2144 void
2145 anv_descriptor_set_write_template(struct anv_device *device,
2146 struct anv_descriptor_set *set,
2147 struct anv_state_stream *alloc_stream,
2148 const struct anv_descriptor_update_template *template,
2149 const void *data);
2150
2151 #define ANV_DESCRIPTOR_SET_NULL (UINT8_MAX - 5)
2152 #define ANV_DESCRIPTOR_SET_PUSH_CONSTANTS (UINT8_MAX - 4)
2153 #define ANV_DESCRIPTOR_SET_DESCRIPTORS (UINT8_MAX - 3)
2154 #define ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS (UINT8_MAX - 2)
2155 #define ANV_DESCRIPTOR_SET_SHADER_CONSTANTS (UINT8_MAX - 1)
2156 #define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
2157
2158 struct anv_pipeline_binding {
2159 /** Index in the descriptor set
2160 *
2161 * This is a flattened index; the descriptor set layout is already taken
2162 * into account.
2163 */
2164 uint32_t index;
2165
2166 /** The descriptor set this surface corresponds to.
2167 *
2168 * The special ANV_DESCRIPTOR_SET_* values above indicates that this
2169 * binding is not a normal descriptor set but something else.
2170 */
2171 uint8_t set;
2172
2173 union {
2174 /** Plane in the binding index for images */
2175 uint8_t plane;
2176
2177 /** Dynamic offset index (for dynamic UBOs and SSBOs) */
2178 uint8_t dynamic_offset_index;
2179 };
2180
2181 /** For a storage image, whether it requires a lowered surface */
2182 uint8_t lowered_storage_surface;
2183
2184 /** Pad to 64 bits so that there are no holes and we can safely memcmp
2185 * assuming POD zero-initialization.
2186 */
2187 uint8_t pad;
2188 };
2189
2190 struct anv_push_range {
2191 /** Index in the descriptor set */
2192 uint32_t index;
2193
2194 /** Descriptor set index */
2195 uint8_t set;
2196
2197 /** Dynamic offset index (for dynamic UBOs) */
2198 uint8_t dynamic_offset_index;
2199
2200 /** Start offset in units of 32B */
2201 uint8_t start;
2202
2203 /** Range in units of 32B */
2204 uint8_t length;
2205 };
2206
2207 struct anv_pipeline_layout {
2208 struct vk_object_base base;
2209
2210 struct {
2211 struct anv_descriptor_set_layout *layout;
2212 uint32_t dynamic_offset_start;
2213 } set[MAX_SETS];
2214
2215 uint32_t num_sets;
2216
2217 unsigned char sha1[20];
2218 };
2219
2220 struct anv_buffer {
2221 struct vk_object_base base;
2222
2223 struct anv_device * device;
2224 VkDeviceSize size;
2225
2226 VkBufferCreateFlags create_flags;
2227 VkBufferUsageFlags usage;
2228
2229 /* Set when bound */
2230 struct anv_address address;
2231 };
2232
2233 static inline uint64_t
anv_buffer_get_range(struct anv_buffer * buffer,uint64_t offset,uint64_t range)2234 anv_buffer_get_range(struct anv_buffer *buffer, uint64_t offset, uint64_t range)
2235 {
2236 assert(offset <= buffer->size);
2237 if (range == VK_WHOLE_SIZE) {
2238 return buffer->size - offset;
2239 } else {
2240 assert(range + offset >= range);
2241 assert(range + offset <= buffer->size);
2242 return range;
2243 }
2244 }
2245
2246 enum anv_cmd_dirty_bits {
2247 ANV_CMD_DIRTY_DYNAMIC_VIEWPORT = 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
2248 ANV_CMD_DIRTY_DYNAMIC_SCISSOR = 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
2249 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH = 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
2250 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS = 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
2251 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS = 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
2252 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS = 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
2253 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK = 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
2254 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK = 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
2255 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE = 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
2256 ANV_CMD_DIRTY_PIPELINE = 1 << 9,
2257 ANV_CMD_DIRTY_INDEX_BUFFER = 1 << 10,
2258 ANV_CMD_DIRTY_RENDER_TARGETS = 1 << 11,
2259 ANV_CMD_DIRTY_XFB_ENABLE = 1 << 12,
2260 ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE = 1 << 13, /* VK_DYNAMIC_STATE_LINE_STIPPLE_EXT */
2261 ANV_CMD_DIRTY_DYNAMIC_CULL_MODE = 1 << 14, /* VK_DYNAMIC_STATE_CULL_MODE_EXT */
2262 ANV_CMD_DIRTY_DYNAMIC_FRONT_FACE = 1 << 15, /* VK_DYNAMIC_STATE_FRONT_FACE_EXT */
2263 ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY = 1 << 16, /* VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT */
2264 ANV_CMD_DIRTY_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE = 1 << 17, /* VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT */
2265 ANV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE = 1 << 18, /* VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT */
2266 ANV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE = 1 << 19, /* VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT */
2267 ANV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP = 1 << 20, /* VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT */
2268 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE = 1 << 21, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT */
2269 ANV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE = 1 << 22, /* VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT */
2270 ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP = 1 << 23, /* VK_DYNAMIC_STATE_STENCIL_OP_EXT */
2271 ANV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS = 1 << 24, /* VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT */
2272 ANV_CMD_DIRTY_DYNAMIC_COLOR_BLEND_STATE = 1 << 25, /* VK_DYNAMIC_STATE_COLOR_WRITE_ENABLE_EXT */
2273 ANV_CMD_DIRTY_DYNAMIC_SHADING_RATE = 1 << 26, /* VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR */
2274 ANV_CMD_DIRTY_DYNAMIC_RASTERIZER_DISCARD_ENABLE = 1 << 27, /* VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT */
2275 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS_ENABLE = 1 << 28, /* VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT */
2276 ANV_CMD_DIRTY_DYNAMIC_LOGIC_OP = 1 << 29, /* VK_DYNAMIC_STATE_LOGIC_OP_EXT */
2277 ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_RESTART_ENABLE = 1 << 30, /* VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT */
2278 };
2279 typedef uint32_t anv_cmd_dirty_mask_t;
2280
2281 #define ANV_CMD_DIRTY_DYNAMIC_ALL \
2282 (ANV_CMD_DIRTY_DYNAMIC_VIEWPORT | \
2283 ANV_CMD_DIRTY_DYNAMIC_SCISSOR | \
2284 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH | \
2285 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS | \
2286 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS | \
2287 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS | \
2288 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK | \
2289 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK | \
2290 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE | \
2291 ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE | \
2292 ANV_CMD_DIRTY_DYNAMIC_CULL_MODE | \
2293 ANV_CMD_DIRTY_DYNAMIC_FRONT_FACE | \
2294 ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY | \
2295 ANV_CMD_DIRTY_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE | \
2296 ANV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE | \
2297 ANV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE | \
2298 ANV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP | \
2299 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE | \
2300 ANV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE | \
2301 ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP | \
2302 ANV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS | \
2303 ANV_CMD_DIRTY_DYNAMIC_COLOR_BLEND_STATE | \
2304 ANV_CMD_DIRTY_DYNAMIC_SHADING_RATE | \
2305 ANV_CMD_DIRTY_DYNAMIC_RASTERIZER_DISCARD_ENABLE | \
2306 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS_ENABLE | \
2307 ANV_CMD_DIRTY_DYNAMIC_LOGIC_OP | \
2308 ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_RESTART_ENABLE)
2309
2310 static inline enum anv_cmd_dirty_bits
anv_cmd_dirty_bit_for_vk_dynamic_state(VkDynamicState vk_state)2311 anv_cmd_dirty_bit_for_vk_dynamic_state(VkDynamicState vk_state)
2312 {
2313 switch (vk_state) {
2314 case VK_DYNAMIC_STATE_VIEWPORT:
2315 case VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT:
2316 return ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
2317 case VK_DYNAMIC_STATE_SCISSOR:
2318 case VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT:
2319 return ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
2320 case VK_DYNAMIC_STATE_LINE_WIDTH:
2321 return ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
2322 case VK_DYNAMIC_STATE_DEPTH_BIAS:
2323 return ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
2324 case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
2325 return ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
2326 case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
2327 return ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
2328 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
2329 return ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
2330 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
2331 return ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
2332 case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
2333 return ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
2334 case VK_DYNAMIC_STATE_LINE_STIPPLE_EXT:
2335 return ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE;
2336 case VK_DYNAMIC_STATE_CULL_MODE_EXT:
2337 return ANV_CMD_DIRTY_DYNAMIC_CULL_MODE;
2338 case VK_DYNAMIC_STATE_FRONT_FACE_EXT:
2339 return ANV_CMD_DIRTY_DYNAMIC_FRONT_FACE;
2340 case VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT:
2341 return ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY;
2342 case VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT:
2343 return ANV_CMD_DIRTY_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE;
2344 case VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT:
2345 return ANV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE;
2346 case VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT:
2347 return ANV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE;
2348 case VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT:
2349 return ANV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP;
2350 case VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT:
2351 return ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE;
2352 case VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT:
2353 return ANV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE;
2354 case VK_DYNAMIC_STATE_STENCIL_OP_EXT:
2355 return ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP;
2356 case VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT:
2357 return ANV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS;
2358 case VK_DYNAMIC_STATE_COLOR_WRITE_ENABLE_EXT:
2359 return ANV_CMD_DIRTY_DYNAMIC_COLOR_BLEND_STATE;
2360 case VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR:
2361 return ANV_CMD_DIRTY_DYNAMIC_SHADING_RATE;
2362 case VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT:
2363 return ANV_CMD_DIRTY_DYNAMIC_RASTERIZER_DISCARD_ENABLE;
2364 case VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT:
2365 return ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS_ENABLE;
2366 case VK_DYNAMIC_STATE_LOGIC_OP_EXT:
2367 return ANV_CMD_DIRTY_DYNAMIC_LOGIC_OP;
2368 case VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT:
2369 return ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_RESTART_ENABLE;
2370 default:
2371 assert(!"Unsupported dynamic state");
2372 return 0;
2373 }
2374 }
2375
2376
2377 enum anv_pipe_bits {
2378 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT = (1 << 0),
2379 ANV_PIPE_STALL_AT_SCOREBOARD_BIT = (1 << 1),
2380 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT = (1 << 2),
2381 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT = (1 << 3),
2382 ANV_PIPE_VF_CACHE_INVALIDATE_BIT = (1 << 4),
2383 ANV_PIPE_DATA_CACHE_FLUSH_BIT = (1 << 5),
2384 ANV_PIPE_TILE_CACHE_FLUSH_BIT = (1 << 6),
2385 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT = (1 << 10),
2386 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT = (1 << 11),
2387 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT = (1 << 12),
2388 ANV_PIPE_DEPTH_STALL_BIT = (1 << 13),
2389
2390 /* ANV_PIPE_HDC_PIPELINE_FLUSH_BIT is a precise way to ensure prior data
2391 * cache work has completed. Available on Gfx12+. For earlier Gfx we
2392 * must reinterpret this flush as ANV_PIPE_DATA_CACHE_FLUSH_BIT.
2393 */
2394 ANV_PIPE_HDC_PIPELINE_FLUSH_BIT = (1 << 14),
2395 ANV_PIPE_PSS_STALL_SYNC_BIT = (1 << 15),
2396 ANV_PIPE_CS_STALL_BIT = (1 << 20),
2397 ANV_PIPE_END_OF_PIPE_SYNC_BIT = (1 << 21),
2398
2399 /* This bit does not exist directly in PIPE_CONTROL. Instead it means that
2400 * a flush has happened but not a CS stall. The next time we do any sort
2401 * of invalidation we need to insert a CS stall at that time. Otherwise,
2402 * we would have to CS stall on every flush which could be bad.
2403 */
2404 ANV_PIPE_NEEDS_END_OF_PIPE_SYNC_BIT = (1 << 22),
2405
2406 /* This bit does not exist directly in PIPE_CONTROL. It means that render
2407 * target operations related to transfer commands with VkBuffer as
2408 * destination are ongoing. Some operations like copies on the command
2409 * streamer might need to be aware of this to trigger the appropriate stall
2410 * before they can proceed with the copy.
2411 */
2412 ANV_PIPE_RENDER_TARGET_BUFFER_WRITES = (1 << 23),
2413
2414 /* This bit does not exist directly in PIPE_CONTROL. It means that Gfx12
2415 * AUX-TT data has changed and we need to invalidate AUX-TT data. This is
2416 * done by writing the AUX-TT register.
2417 */
2418 ANV_PIPE_AUX_TABLE_INVALIDATE_BIT = (1 << 24),
2419
2420 /* This bit does not exist directly in PIPE_CONTROL. It means that a
2421 * PIPE_CONTROL with a post-sync operation will follow. This is used to
2422 * implement a workaround for Gfx9.
2423 */
2424 ANV_PIPE_POST_SYNC_BIT = (1 << 25),
2425 };
2426
2427 #define ANV_PIPE_FLUSH_BITS ( \
2428 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
2429 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
2430 ANV_PIPE_HDC_PIPELINE_FLUSH_BIT | \
2431 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | \
2432 ANV_PIPE_TILE_CACHE_FLUSH_BIT)
2433
2434 #define ANV_PIPE_STALL_BITS ( \
2435 ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
2436 ANV_PIPE_DEPTH_STALL_BIT | \
2437 ANV_PIPE_CS_STALL_BIT)
2438
2439 #define ANV_PIPE_INVALIDATE_BITS ( \
2440 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
2441 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
2442 ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
2443 ANV_PIPE_HDC_PIPELINE_FLUSH_BIT | \
2444 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
2445 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT | \
2446 ANV_PIPE_AUX_TABLE_INVALIDATE_BIT)
2447
2448 enum intel_ds_stall_flag
2449 anv_pipe_flush_bit_to_ds_stall_flag(enum anv_pipe_bits bits);
2450
2451 static inline enum anv_pipe_bits
anv_pipe_flush_bits_for_access_flags(struct anv_device * device,VkAccessFlags2KHR flags)2452 anv_pipe_flush_bits_for_access_flags(struct anv_device *device,
2453 VkAccessFlags2KHR flags)
2454 {
2455 enum anv_pipe_bits pipe_bits = 0;
2456
2457 u_foreach_bit64(b, flags) {
2458 switch ((VkAccessFlags2KHR)BITFIELD64_BIT(b)) {
2459 case VK_ACCESS_2_SHADER_WRITE_BIT_KHR:
2460 case VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT_KHR:
2461 /* We're transitioning a buffer that was previously used as write
2462 * destination through the data port. To make its content available
2463 * to future operations, flush the hdc pipeline.
2464 */
2465 pipe_bits |= ANV_PIPE_HDC_PIPELINE_FLUSH_BIT;
2466 break;
2467 case VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT_KHR:
2468 /* We're transitioning a buffer that was previously used as render
2469 * target. To make its content available to future operations, flush
2470 * the render target cache.
2471 */
2472 pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
2473 break;
2474 case VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT_KHR:
2475 /* We're transitioning a buffer that was previously used as depth
2476 * buffer. To make its content available to future operations, flush
2477 * the depth cache.
2478 */
2479 pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
2480 break;
2481 case VK_ACCESS_2_TRANSFER_WRITE_BIT_KHR:
2482 /* We're transitioning a buffer that was previously used as a
2483 * transfer write destination. Generic write operations include color
2484 * & depth operations as well as buffer operations like :
2485 * - vkCmdClearColorImage()
2486 * - vkCmdClearDepthStencilImage()
2487 * - vkCmdBlitImage()
2488 * - vkCmdCopy*(), vkCmdUpdate*(), vkCmdFill*()
2489 *
2490 * Most of these operations are implemented using Blorp which writes
2491 * through the render target, so flush that cache to make it visible
2492 * to future operations. And for depth related operations we also
2493 * need to flush the depth cache.
2494 */
2495 pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
2496 pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
2497 break;
2498 case VK_ACCESS_2_MEMORY_WRITE_BIT_KHR:
2499 /* We're transitioning a buffer for generic write operations. Flush
2500 * all the caches.
2501 */
2502 pipe_bits |= ANV_PIPE_FLUSH_BITS;
2503 break;
2504 case VK_ACCESS_2_HOST_WRITE_BIT_KHR:
2505 /* We're transitioning a buffer for access by CPU. Invalidate
2506 * all the caches. Since data and tile caches don't have invalidate,
2507 * we are forced to flush those as well.
2508 */
2509 pipe_bits |= ANV_PIPE_FLUSH_BITS;
2510 pipe_bits |= ANV_PIPE_INVALIDATE_BITS;
2511 break;
2512 case VK_ACCESS_2_TRANSFORM_FEEDBACK_WRITE_BIT_EXT:
2513 case VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT:
2514 /* We're transitioning a buffer written either from VS stage or from
2515 * the command streamer (see CmdEndTransformFeedbackEXT), we just
2516 * need to stall the CS.
2517 */
2518 pipe_bits |= ANV_PIPE_CS_STALL_BIT;
2519 break;
2520 default:
2521 break; /* Nothing to do */
2522 }
2523 }
2524
2525 return pipe_bits;
2526 }
2527
2528 static inline enum anv_pipe_bits
anv_pipe_invalidate_bits_for_access_flags(struct anv_device * device,VkAccessFlags2KHR flags)2529 anv_pipe_invalidate_bits_for_access_flags(struct anv_device *device,
2530 VkAccessFlags2KHR flags)
2531 {
2532 enum anv_pipe_bits pipe_bits = 0;
2533
2534 u_foreach_bit64(b, flags) {
2535 switch ((VkAccessFlags2KHR)BITFIELD64_BIT(b)) {
2536 case VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT_KHR:
2537 /* Indirect draw commands take a buffer as input that we're going to
2538 * read from the command streamer to load some of the HW registers
2539 * (see genX_cmd_buffer.c:load_indirect_parameters). This requires a
2540 * command streamer stall so that all the cache flushes have
2541 * completed before the command streamer loads from memory.
2542 */
2543 pipe_bits |= ANV_PIPE_CS_STALL_BIT;
2544 /* Indirect draw commands also set gl_BaseVertex & gl_BaseIndex
2545 * through a vertex buffer, so invalidate that cache.
2546 */
2547 pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
2548 /* For CmdDipatchIndirect, we also load gl_NumWorkGroups through a
2549 * UBO from the buffer, so we need to invalidate constant cache.
2550 */
2551 pipe_bits |= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
2552 pipe_bits |= ANV_PIPE_DATA_CACHE_FLUSH_BIT;
2553 /* Tile cache flush needed For CmdDipatchIndirect since command
2554 * streamer and vertex fetch aren't L3 coherent.
2555 */
2556 pipe_bits |= ANV_PIPE_TILE_CACHE_FLUSH_BIT;
2557 break;
2558 case VK_ACCESS_2_INDEX_READ_BIT_KHR:
2559 case VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT_KHR:
2560 /* We transitioning a buffer to be used for as input for vkCmdDraw*
2561 * commands, so we invalidate the VF cache to make sure there is no
2562 * stale data when we start rendering.
2563 */
2564 pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
2565 break;
2566 case VK_ACCESS_2_UNIFORM_READ_BIT_KHR:
2567 /* We transitioning a buffer to be used as uniform data. Because
2568 * uniform is accessed through the data port & sampler, we need to
2569 * invalidate the texture cache (sampler) & constant cache (data
2570 * port) to avoid stale data.
2571 */
2572 pipe_bits |= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
2573 if (device->physical->compiler->indirect_ubos_use_sampler)
2574 pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
2575 else
2576 pipe_bits |= ANV_PIPE_HDC_PIPELINE_FLUSH_BIT;
2577 break;
2578 case VK_ACCESS_2_SHADER_READ_BIT_KHR:
2579 case VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT_KHR:
2580 case VK_ACCESS_2_TRANSFER_READ_BIT_KHR:
2581 /* Transitioning a buffer to be read through the sampler, so
2582 * invalidate the texture cache, we don't want any stale data.
2583 */
2584 pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
2585 break;
2586 case VK_ACCESS_2_MEMORY_READ_BIT_KHR:
2587 /* Transitioning a buffer for generic read, invalidate all the
2588 * caches.
2589 */
2590 pipe_bits |= ANV_PIPE_INVALIDATE_BITS;
2591 break;
2592 case VK_ACCESS_2_MEMORY_WRITE_BIT_KHR:
2593 /* Generic write, make sure all previously written things land in
2594 * memory.
2595 */
2596 pipe_bits |= ANV_PIPE_FLUSH_BITS;
2597 break;
2598 case VK_ACCESS_2_CONDITIONAL_RENDERING_READ_BIT_EXT:
2599 case VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT:
2600 /* Transitioning a buffer for conditional rendering or transform
2601 * feedback. We'll load the content of this buffer into HW registers
2602 * using the command streamer, so we need to stall the command
2603 * streamer , so we need to stall the command streamer to make sure
2604 * any in-flight flush operations have completed.
2605 */
2606 pipe_bits |= ANV_PIPE_CS_STALL_BIT;
2607 pipe_bits |= ANV_PIPE_TILE_CACHE_FLUSH_BIT;
2608 pipe_bits |= ANV_PIPE_DATA_CACHE_FLUSH_BIT;
2609 break;
2610 case VK_ACCESS_2_HOST_READ_BIT_KHR:
2611 /* We're transitioning a buffer that was written by CPU. Flush
2612 * all the caches.
2613 */
2614 pipe_bits |= ANV_PIPE_FLUSH_BITS;
2615 break;
2616 default:
2617 break; /* Nothing to do */
2618 }
2619 }
2620
2621 return pipe_bits;
2622 }
2623
2624 #define VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV ( \
2625 VK_IMAGE_ASPECT_COLOR_BIT | \
2626 VK_IMAGE_ASPECT_PLANE_0_BIT | \
2627 VK_IMAGE_ASPECT_PLANE_1_BIT | \
2628 VK_IMAGE_ASPECT_PLANE_2_BIT)
2629 #define VK_IMAGE_ASPECT_PLANES_BITS_ANV ( \
2630 VK_IMAGE_ASPECT_PLANE_0_BIT | \
2631 VK_IMAGE_ASPECT_PLANE_1_BIT | \
2632 VK_IMAGE_ASPECT_PLANE_2_BIT)
2633
2634 struct anv_vertex_binding {
2635 struct anv_buffer * buffer;
2636 VkDeviceSize offset;
2637 VkDeviceSize stride;
2638 VkDeviceSize size;
2639 };
2640
2641 struct anv_xfb_binding {
2642 struct anv_buffer * buffer;
2643 VkDeviceSize offset;
2644 VkDeviceSize size;
2645 };
2646
2647 struct anv_push_constants {
2648 /** Push constant data provided by the client through vkPushConstants */
2649 uint8_t client_data[MAX_PUSH_CONSTANTS_SIZE];
2650
2651 /** Dynamic offsets for dynamic UBOs and SSBOs */
2652 uint32_t dynamic_offsets[MAX_DYNAMIC_BUFFERS];
2653
2654 /* Robust access pushed registers. */
2655 uint64_t push_reg_mask[MESA_SHADER_STAGES];
2656
2657 /** Ray query globals (RT_DISPATCH_GLOBALS) */
2658 uint64_t ray_query_globals;
2659
2660 /* Base addresses for descriptor sets */
2661 uint64_t desc_sets[MAX_SETS];
2662
2663 struct {
2664 /** Base workgroup ID
2665 *
2666 * Used for vkCmdDispatchBase.
2667 */
2668 uint32_t base_work_group_id[3];
2669
2670 /** Subgroup ID
2671 *
2672 * This is never set by software but is implicitly filled out when
2673 * uploading the push constants for compute shaders.
2674 */
2675 uint32_t subgroup_id;
2676 } cs;
2677 };
2678
2679 struct anv_dynamic_state {
2680 struct {
2681 uint32_t count;
2682 VkViewport viewports[MAX_VIEWPORTS];
2683 } viewport;
2684
2685 struct {
2686 uint32_t count;
2687 VkRect2D scissors[MAX_SCISSORS];
2688 } scissor;
2689
2690 float line_width;
2691
2692 struct {
2693 float bias;
2694 float clamp;
2695 float slope;
2696 } depth_bias;
2697
2698 float blend_constants[4];
2699
2700 struct {
2701 float min;
2702 float max;
2703 } depth_bounds;
2704
2705 struct {
2706 uint32_t front;
2707 uint32_t back;
2708 } stencil_compare_mask;
2709
2710 struct {
2711 uint32_t front;
2712 uint32_t back;
2713 } stencil_write_mask;
2714
2715 struct {
2716 uint32_t front;
2717 uint32_t back;
2718 } stencil_reference;
2719
2720 struct {
2721 struct {
2722 VkStencilOp fail_op;
2723 VkStencilOp pass_op;
2724 VkStencilOp depth_fail_op;
2725 VkCompareOp compare_op;
2726 } front;
2727 struct {
2728 VkStencilOp fail_op;
2729 VkStencilOp pass_op;
2730 VkStencilOp depth_fail_op;
2731 VkCompareOp compare_op;
2732 } back;
2733 } stencil_op;
2734
2735 struct {
2736 uint32_t factor;
2737 uint16_t pattern;
2738 } line_stipple;
2739
2740 struct {
2741 VkSampleLocationEXT locations[MAX_SAMPLE_LOCATIONS];
2742 } sample_locations;
2743
2744 struct {
2745 VkExtent2D rate;
2746 VkFragmentShadingRateCombinerOpKHR ops[2];
2747 } fragment_shading_rate;
2748
2749 VkCullModeFlags cull_mode;
2750 VkFrontFace front_face;
2751 VkPrimitiveTopology primitive_topology;
2752 bool depth_test_enable;
2753 bool depth_write_enable;
2754 VkCompareOp depth_compare_op;
2755 bool depth_bounds_test_enable;
2756 bool stencil_test_enable;
2757 bool raster_discard;
2758 bool depth_bias_enable;
2759 bool primitive_restart_enable;
2760 VkLogicOp logic_op;
2761 bool dyn_vbo_stride;
2762 bool dyn_vbo_size;
2763
2764 /* Bitfield, one bit per render target */
2765 uint8_t color_writes;
2766 };
2767
2768 extern const struct anv_dynamic_state default_dynamic_state;
2769
2770 uint32_t anv_dynamic_state_copy(struct anv_dynamic_state *dest,
2771 const struct anv_dynamic_state *src,
2772 uint32_t copy_mask);
2773
2774 struct anv_surface_state {
2775 struct anv_state state;
2776 /** Address of the surface referred to by this state
2777 *
2778 * This address is relative to the start of the BO.
2779 */
2780 struct anv_address address;
2781 /* Address of the aux surface, if any
2782 *
2783 * This field is ANV_NULL_ADDRESS if and only if no aux surface exists.
2784 *
2785 * With the exception of gfx8, the bottom 12 bits of this address' offset
2786 * include extra aux information.
2787 */
2788 struct anv_address aux_address;
2789 /* Address of the clear color, if any
2790 *
2791 * This address is relative to the start of the BO.
2792 */
2793 struct anv_address clear_address;
2794 };
2795
2796 struct anv_attachment {
2797 VkFormat vk_format;
2798 const struct anv_image_view *iview;
2799 VkImageLayout layout;
2800 enum isl_aux_usage aux_usage;
2801 struct anv_surface_state surface_state;
2802
2803 VkResolveModeFlagBits resolve_mode;
2804 const struct anv_image_view *resolve_iview;
2805 VkImageLayout resolve_layout;
2806 };
2807
2808 /** State tracking for vertex buffer flushes
2809 *
2810 * On Gfx8-9, the VF cache only considers the bottom 32 bits of memory
2811 * addresses. If you happen to have two vertex buffers which get placed
2812 * exactly 4 GiB apart and use them in back-to-back draw calls, you can get
2813 * collisions. In order to solve this problem, we track vertex address ranges
2814 * which are live in the cache and invalidate the cache if one ever exceeds 32
2815 * bits.
2816 */
2817 struct anv_vb_cache_range {
2818 /* Virtual address at which the live vertex buffer cache range starts for
2819 * this vertex buffer index.
2820 */
2821 uint64_t start;
2822
2823 /* Virtual address of the byte after where vertex buffer cache range ends.
2824 * This is exclusive such that end - start is the size of the range.
2825 */
2826 uint64_t end;
2827 };
2828
2829 /* Check whether we need to apply the Gfx8-9 vertex buffer workaround*/
2830 static inline bool
anv_gfx8_9_vb_cache_range_needs_workaround(struct anv_vb_cache_range * bound,struct anv_vb_cache_range * dirty,struct anv_address vb_address,uint32_t vb_size)2831 anv_gfx8_9_vb_cache_range_needs_workaround(struct anv_vb_cache_range *bound,
2832 struct anv_vb_cache_range *dirty,
2833 struct anv_address vb_address,
2834 uint32_t vb_size)
2835 {
2836 if (vb_size == 0) {
2837 bound->start = 0;
2838 bound->end = 0;
2839 return false;
2840 }
2841
2842 assert(vb_address.bo && anv_bo_is_pinned(vb_address.bo));
2843 bound->start = intel_48b_address(anv_address_physical(vb_address));
2844 bound->end = bound->start + vb_size;
2845 assert(bound->end > bound->start); /* No overflow */
2846
2847 /* Align everything to a cache line */
2848 bound->start &= ~(64ull - 1ull);
2849 bound->end = align_u64(bound->end, 64);
2850
2851 /* Compute the dirty range */
2852 dirty->start = MIN2(dirty->start, bound->start);
2853 dirty->end = MAX2(dirty->end, bound->end);
2854
2855 /* If our range is larger than 32 bits, we have to flush */
2856 assert(bound->end - bound->start <= (1ull << 32));
2857 return (dirty->end - dirty->start) > (1ull << 32);
2858 }
2859
2860 /** State tracking for particular pipeline bind point
2861 *
2862 * This struct is the base struct for anv_cmd_graphics_state and
2863 * anv_cmd_compute_state. These are used to track state which is bound to a
2864 * particular type of pipeline. Generic state that applies per-stage such as
2865 * binding table offsets and push constants is tracked generically with a
2866 * per-stage array in anv_cmd_state.
2867 */
2868 struct anv_cmd_pipeline_state {
2869 struct anv_descriptor_set *descriptors[MAX_SETS];
2870 struct anv_push_descriptor_set *push_descriptors[MAX_SETS];
2871
2872 struct anv_push_constants push_constants;
2873
2874 /* Push constant state allocated when flushing push constants. */
2875 struct anv_state push_constants_state;
2876 };
2877
2878 /** State tracking for graphics pipeline
2879 *
2880 * This has anv_cmd_pipeline_state as a base struct to track things which get
2881 * bound to a graphics pipeline. Along with general pipeline bind point state
2882 * which is in the anv_cmd_pipeline_state base struct, it also contains other
2883 * state which is graphics-specific.
2884 */
2885 struct anv_cmd_graphics_state {
2886 struct anv_cmd_pipeline_state base;
2887
2888 struct anv_graphics_pipeline *pipeline;
2889
2890 VkRenderingFlags rendering_flags;
2891 VkRect2D render_area;
2892 uint32_t layer_count;
2893 uint32_t samples;
2894 uint32_t view_mask;
2895 uint32_t color_att_count;
2896 struct anv_state att_states;
2897 struct anv_attachment color_att[MAX_RTS];
2898 struct anv_attachment depth_att;
2899 struct anv_attachment stencil_att;
2900 struct anv_state null_surface_state;
2901
2902 anv_cmd_dirty_mask_t dirty;
2903 uint32_t vb_dirty;
2904
2905 struct anv_vb_cache_range ib_bound_range;
2906 struct anv_vb_cache_range ib_dirty_range;
2907 struct anv_vb_cache_range vb_bound_ranges[33];
2908 struct anv_vb_cache_range vb_dirty_ranges[33];
2909
2910 VkShaderStageFlags push_constant_stages;
2911
2912 struct anv_dynamic_state dynamic;
2913
2914 uint32_t primitive_topology;
2915
2916 struct {
2917 struct anv_buffer *index_buffer;
2918 uint32_t index_type; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
2919 uint32_t index_offset;
2920 } gfx7;
2921 };
2922
2923 enum anv_depth_reg_mode {
2924 ANV_DEPTH_REG_MODE_UNKNOWN = 0,
2925 ANV_DEPTH_REG_MODE_HW_DEFAULT,
2926 ANV_DEPTH_REG_MODE_D16,
2927 };
2928
2929 /** State tracking for compute pipeline
2930 *
2931 * This has anv_cmd_pipeline_state as a base struct to track things which get
2932 * bound to a compute pipeline. Along with general pipeline bind point state
2933 * which is in the anv_cmd_pipeline_state base struct, it also contains other
2934 * state which is compute-specific.
2935 */
2936 struct anv_cmd_compute_state {
2937 struct anv_cmd_pipeline_state base;
2938
2939 struct anv_compute_pipeline *pipeline;
2940
2941 bool pipeline_dirty;
2942
2943 struct anv_state push_data;
2944
2945 struct anv_address num_workgroups;
2946 };
2947
2948 struct anv_cmd_ray_tracing_state {
2949 struct anv_cmd_pipeline_state base;
2950
2951 struct anv_ray_tracing_pipeline *pipeline;
2952
2953 bool pipeline_dirty;
2954
2955 struct {
2956 struct anv_bo *bo;
2957 struct brw_rt_scratch_layout layout;
2958 } scratch;
2959 };
2960
2961 /** State required while building cmd buffer */
2962 struct anv_cmd_state {
2963 /* PIPELINE_SELECT.PipelineSelection */
2964 uint32_t current_pipeline;
2965 const struct intel_l3_config * current_l3_config;
2966 uint32_t last_aux_map_state;
2967
2968 struct anv_cmd_graphics_state gfx;
2969 struct anv_cmd_compute_state compute;
2970 struct anv_cmd_ray_tracing_state rt;
2971
2972 enum anv_pipe_bits pending_pipe_bits;
2973 VkShaderStageFlags descriptors_dirty;
2974 VkShaderStageFlags push_constants_dirty;
2975
2976 uint32_t restart_index;
2977 struct anv_vertex_binding vertex_bindings[MAX_VBS];
2978 bool xfb_enabled;
2979 struct anv_xfb_binding xfb_bindings[MAX_XFB_BUFFERS];
2980 struct anv_state binding_tables[MESA_VULKAN_SHADER_STAGES];
2981 struct anv_state samplers[MESA_VULKAN_SHADER_STAGES];
2982
2983 unsigned char sampler_sha1s[MESA_VULKAN_SHADER_STAGES][20];
2984 unsigned char surface_sha1s[MESA_VULKAN_SHADER_STAGES][20];
2985 unsigned char push_sha1s[MESA_VULKAN_SHADER_STAGES][20];
2986
2987 /**
2988 * Whether or not the gfx8 PMA fix is enabled. We ensure that, at the top
2989 * of any command buffer it is disabled by disabling it in EndCommandBuffer
2990 * and before invoking the secondary in ExecuteCommands.
2991 */
2992 bool pma_fix_enabled;
2993
2994 /**
2995 * Whether or not we know for certain that HiZ is enabled for the current
2996 * subpass. If, for whatever reason, we are unsure as to whether HiZ is
2997 * enabled or not, this will be false.
2998 */
2999 bool hiz_enabled;
3000
3001 /* We ensure the registers for the gfx12 D16 fix are initalized at the
3002 * first non-NULL depth stencil packet emission of every command buffer.
3003 * For secondary command buffer execution, we transfer the state from the
3004 * last command buffer to the primary (if known).
3005 */
3006 enum anv_depth_reg_mode depth_reg_mode;
3007
3008 bool conditional_render_enabled;
3009
3010 /**
3011 * Last rendering scale argument provided to
3012 * genX(cmd_buffer_emit_hashing_mode)().
3013 */
3014 unsigned current_hash_scale;
3015
3016 /**
3017 * A buffer used for spill/fill of ray queries.
3018 */
3019 struct anv_bo * ray_query_shadow_bo;
3020 };
3021
3022 #define ANV_MIN_CMD_BUFFER_BATCH_SIZE 8192
3023 #define ANV_MAX_CMD_BUFFER_BATCH_SIZE (16 * 1024 * 1024)
3024
3025 enum anv_cmd_buffer_exec_mode {
3026 ANV_CMD_BUFFER_EXEC_MODE_PRIMARY,
3027 ANV_CMD_BUFFER_EXEC_MODE_EMIT,
3028 ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT,
3029 ANV_CMD_BUFFER_EXEC_MODE_CHAIN,
3030 ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN,
3031 ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN,
3032 };
3033
3034 struct anv_measure_batch;
3035
3036 struct anv_cmd_buffer {
3037 struct vk_command_buffer vk;
3038
3039 struct anv_device * device;
3040 struct anv_queue_family * queue_family;
3041
3042 struct anv_batch batch;
3043
3044 /* Pointer to the location in the batch where MI_BATCH_BUFFER_END was
3045 * recorded upon calling vkEndCommandBuffer(). This is useful if we need to
3046 * rewrite the end to chain multiple batch together at vkQueueSubmit().
3047 */
3048 void * batch_end;
3049
3050 /* Fields required for the actual chain of anv_batch_bo's.
3051 *
3052 * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
3053 */
3054 struct list_head batch_bos;
3055 enum anv_cmd_buffer_exec_mode exec_mode;
3056
3057 /* A vector of anv_batch_bo pointers for every batch or surface buffer
3058 * referenced by this command buffer
3059 *
3060 * initialized by anv_cmd_buffer_init_batch_bo_chain()
3061 */
3062 struct u_vector seen_bbos;
3063
3064 /* A vector of int32_t's for every block of binding tables.
3065 *
3066 * initialized by anv_cmd_buffer_init_batch_bo_chain()
3067 */
3068 struct u_vector bt_block_states;
3069 struct anv_state bt_next;
3070
3071 struct anv_reloc_list surface_relocs;
3072 /** Last seen surface state block pool center bo offset */
3073 uint32_t last_ss_pool_center;
3074
3075 /* Serial for tracking buffer completion */
3076 uint32_t serial;
3077
3078 /* Stream objects for storing temporary data */
3079 struct anv_state_stream surface_state_stream;
3080 struct anv_state_stream dynamic_state_stream;
3081 struct anv_state_stream general_state_stream;
3082
3083 VkCommandBufferUsageFlags usage_flags;
3084
3085 struct anv_query_pool *perf_query_pool;
3086
3087 struct anv_cmd_state state;
3088
3089 struct anv_address return_addr;
3090
3091 /* Set by SetPerformanceMarkerINTEL, written into queries by CmdBeginQuery */
3092 uint64_t intel_perf_marker;
3093
3094 struct anv_measure_batch *measure;
3095
3096 /**
3097 * KHR_performance_query requires self modifying command buffers and this
3098 * array has the location of modifying commands to the query begin and end
3099 * instructions storing performance counters. The array length is
3100 * anv_physical_device::n_perf_query_commands.
3101 */
3102 struct mi_address_token *self_mod_locations;
3103
3104 /**
3105 * Index tracking which of the self_mod_locations items have already been
3106 * used.
3107 */
3108 uint32_t perf_reloc_idx;
3109
3110 /**
3111 * Sum of all the anv_batch_bo sizes allocated for this command buffer.
3112 * Used to increase allocation size for long command buffers.
3113 */
3114 uint32_t total_batch_size;
3115
3116 /**
3117 *
3118 */
3119 struct u_trace trace;
3120 };
3121
3122 /* Determine whether we can chain a given cmd_buffer to another one. We need
3123 * softpin and we also need to make sure that we can edit the end of the batch
3124 * to point to next one, which requires the command buffer to not be used
3125 * simultaneously.
3126 */
3127 static inline bool
anv_cmd_buffer_is_chainable(struct anv_cmd_buffer * cmd_buffer)3128 anv_cmd_buffer_is_chainable(struct anv_cmd_buffer *cmd_buffer)
3129 {
3130 return !anv_use_relocations(cmd_buffer->device->physical) &&
3131 !(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT);
3132 }
3133
3134 VkResult anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
3135 void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
3136 void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
3137 void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer);
3138 void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
3139 struct anv_cmd_buffer *secondary);
3140 void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer);
3141 VkResult anv_cmd_buffer_execbuf(struct anv_queue *queue,
3142 struct anv_cmd_buffer *cmd_buffer,
3143 const VkSemaphore *in_semaphores,
3144 const uint64_t *in_wait_values,
3145 uint32_t num_in_semaphores,
3146 const VkSemaphore *out_semaphores,
3147 const uint64_t *out_signal_values,
3148 uint32_t num_out_semaphores,
3149 VkFence fence,
3150 int perf_query_pass);
3151
3152 VkResult anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer);
3153
3154 struct anv_state anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
3155 const void *data, uint32_t size, uint32_t alignment);
3156 struct anv_state anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
3157 uint32_t *a, uint32_t *b,
3158 uint32_t dwords, uint32_t alignment);
3159
3160 struct anv_address
3161 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer);
3162 struct anv_state
3163 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
3164 uint32_t entries, uint32_t *state_offset);
3165 struct anv_state
3166 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer);
3167 struct anv_state
3168 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
3169 uint32_t size, uint32_t alignment);
3170
3171 VkResult
3172 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer);
3173
3174 void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
3175
3176 struct anv_state
3177 anv_cmd_buffer_gfx_push_constants(struct anv_cmd_buffer *cmd_buffer);
3178 struct anv_state
3179 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer);
3180
3181 VkResult
3182 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
3183 uint32_t num_entries,
3184 uint32_t *state_offset,
3185 struct anv_state *bt_state);
3186
3187 void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
3188
3189 void anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer *cmd_buffer);
3190
3191 enum anv_bo_sync_state {
3192 /** Indicates that this is a new (or newly reset fence) */
3193 ANV_BO_SYNC_STATE_RESET,
3194
3195 /** Indicates that this fence has been submitted to the GPU but is still
3196 * (as far as we know) in use by the GPU.
3197 */
3198 ANV_BO_SYNC_STATE_SUBMITTED,
3199
3200 ANV_BO_SYNC_STATE_SIGNALED,
3201 };
3202
3203 struct anv_bo_sync {
3204 struct vk_sync sync;
3205
3206 enum anv_bo_sync_state state;
3207 struct anv_bo *bo;
3208 };
3209
3210 extern const struct vk_sync_type anv_bo_sync_type;
3211
3212 static inline bool
vk_sync_is_anv_bo_sync(const struct vk_sync * sync)3213 vk_sync_is_anv_bo_sync(const struct vk_sync *sync)
3214 {
3215 return sync->type == &anv_bo_sync_type;
3216 }
3217
3218 VkResult anv_create_sync_for_memory(struct vk_device *device,
3219 VkDeviceMemory memory,
3220 bool signal_memory,
3221 struct vk_sync **sync_out);
3222
3223 struct anv_event {
3224 struct vk_object_base base;
3225 uint64_t semaphore;
3226 struct anv_state state;
3227 };
3228
3229 #define ANV_STAGE_MASK ((1 << MESA_VULKAN_SHADER_STAGES) - 1)
3230
3231 #define anv_foreach_stage(stage, stage_bits) \
3232 for (gl_shader_stage stage, \
3233 __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK); \
3234 stage = __builtin_ffs(__tmp) - 1, __tmp; \
3235 __tmp &= ~(1 << (stage)))
3236
3237 struct anv_pipeline_bind_map {
3238 unsigned char surface_sha1[20];
3239 unsigned char sampler_sha1[20];
3240 unsigned char push_sha1[20];
3241
3242 uint32_t surface_count;
3243 uint32_t sampler_count;
3244
3245 struct anv_pipeline_binding * surface_to_descriptor;
3246 struct anv_pipeline_binding * sampler_to_descriptor;
3247
3248 struct anv_push_range push_ranges[4];
3249 };
3250
3251 struct anv_shader_bin_key {
3252 uint32_t size;
3253 uint8_t data[0];
3254 };
3255
3256 struct anv_shader_bin {
3257 uint32_t ref_cnt;
3258
3259 gl_shader_stage stage;
3260
3261 const struct anv_shader_bin_key *key;
3262
3263 struct anv_state kernel;
3264 uint32_t kernel_size;
3265
3266 const struct brw_stage_prog_data *prog_data;
3267 uint32_t prog_data_size;
3268
3269 struct brw_compile_stats stats[3];
3270 uint32_t num_stats;
3271
3272 struct nir_xfb_info *xfb_info;
3273
3274 struct anv_pipeline_bind_map bind_map;
3275 };
3276
3277 struct anv_shader_bin *
3278 anv_shader_bin_create(struct anv_device *device,
3279 gl_shader_stage stage,
3280 const void *key, uint32_t key_size,
3281 const void *kernel, uint32_t kernel_size,
3282 const struct brw_stage_prog_data *prog_data,
3283 uint32_t prog_data_size,
3284 const struct brw_compile_stats *stats, uint32_t num_stats,
3285 const struct nir_xfb_info *xfb_info,
3286 const struct anv_pipeline_bind_map *bind_map);
3287
3288 void
3289 anv_shader_bin_destroy(struct anv_device *device, struct anv_shader_bin *shader);
3290
3291 static inline void
anv_shader_bin_ref(struct anv_shader_bin * shader)3292 anv_shader_bin_ref(struct anv_shader_bin *shader)
3293 {
3294 assert(shader && shader->ref_cnt >= 1);
3295 p_atomic_inc(&shader->ref_cnt);
3296 }
3297
3298 static inline void
anv_shader_bin_unref(struct anv_device * device,struct anv_shader_bin * shader)3299 anv_shader_bin_unref(struct anv_device *device, struct anv_shader_bin *shader)
3300 {
3301 assert(shader && shader->ref_cnt >= 1);
3302 if (p_atomic_dec_zero(&shader->ref_cnt))
3303 anv_shader_bin_destroy(device, shader);
3304 }
3305
3306 #define anv_shader_bin_get_bsr(bin, local_arg_offset) ({ \
3307 assert((local_arg_offset) % 8 == 0); \
3308 const struct brw_bs_prog_data *prog_data = \
3309 brw_bs_prog_data_const(bin->prog_data); \
3310 assert(prog_data->simd_size == 8 || prog_data->simd_size == 16); \
3311 \
3312 (struct GFX_BINDLESS_SHADER_RECORD) { \
3313 .OffsetToLocalArguments = (local_arg_offset) / 8, \
3314 .BindlessShaderDispatchMode = \
3315 prog_data->simd_size == 16 ? RT_SIMD16 : RT_SIMD8, \
3316 .KernelStartPointer = bin->kernel.offset, \
3317 }; \
3318 })
3319
3320 struct anv_pipeline_executable {
3321 gl_shader_stage stage;
3322
3323 struct brw_compile_stats stats;
3324
3325 char *nir;
3326 char *disasm;
3327 };
3328
3329 enum anv_pipeline_type {
3330 ANV_PIPELINE_GRAPHICS,
3331 ANV_PIPELINE_COMPUTE,
3332 ANV_PIPELINE_RAY_TRACING,
3333 };
3334
3335 struct anv_pipeline {
3336 struct vk_object_base base;
3337
3338 struct anv_device * device;
3339
3340 struct anv_batch batch;
3341 struct anv_reloc_list batch_relocs;
3342
3343 void * mem_ctx;
3344
3345 enum anv_pipeline_type type;
3346 VkPipelineCreateFlags flags;
3347
3348 uint32_t ray_queries;
3349
3350 struct util_dynarray executables;
3351
3352 const struct intel_l3_config * l3_config;
3353 };
3354
3355 struct anv_graphics_pipeline {
3356 struct anv_pipeline base;
3357
3358 uint32_t batch_data[512];
3359
3360 /* States that are part of batch_data and should be not emitted
3361 * dynamically.
3362 */
3363 anv_cmd_dirty_mask_t static_state_mask;
3364
3365 /* States that need to be reemitted in cmd_buffer_flush_dynamic_state().
3366 * This might cover more than the dynamic states specified at pipeline
3367 * creation.
3368 */
3369 anv_cmd_dirty_mask_t dynamic_state_mask;
3370
3371 struct anv_dynamic_state dynamic_state;
3372
3373 /* States declared dynamic at pipeline creation. */
3374 anv_cmd_dirty_mask_t dynamic_states;
3375
3376 uint32_t topology;
3377
3378 /* These fields are required with dynamic primitive topology,
3379 * rasterization_samples used only with gen < 8.
3380 */
3381 VkLineRasterizationModeEXT line_mode;
3382 VkPolygonMode polygon_mode;
3383 uint32_t rasterization_samples;
3384
3385 VkColorComponentFlags color_comp_writes[MAX_RTS];
3386
3387 struct anv_shader_bin * shaders[ANV_GRAPHICS_SHADER_STAGE_COUNT];
3388
3389 VkShaderStageFlags active_stages;
3390 uint32_t view_mask;
3391
3392 bool writes_depth;
3393 bool depth_test_enable;
3394 bool writes_stencil;
3395 bool stencil_test_enable;
3396 bool depth_clamp_enable;
3397 bool depth_clip_enable;
3398 bool kill_pixel;
3399 bool depth_bounds_test_enable;
3400 bool force_fragment_thread_dispatch;
3401 bool negative_one_to_one;
3402
3403 /* When primitive replication is used, subpass->view_mask will describe what
3404 * views to replicate.
3405 */
3406 bool use_primitive_replication;
3407
3408 struct anv_state blend_state;
3409
3410 uint32_t vb_used;
3411 struct anv_pipeline_vertex_binding {
3412 uint32_t stride;
3413 bool instanced;
3414 uint32_t instance_divisor;
3415 } vb[MAX_VBS];
3416
3417 struct {
3418 uint32_t sf[7];
3419 uint32_t depth_stencil_state[3];
3420 uint32_t clip[4];
3421 uint32_t xfb_bo_pitch[4];
3422 uint32_t wm[3];
3423 uint32_t blend_state[MAX_RTS * 2];
3424 uint32_t streamout_state[3];
3425 } gfx7;
3426
3427 struct {
3428 uint32_t sf[4];
3429 uint32_t raster[5];
3430 uint32_t wm_depth_stencil[3];
3431 uint32_t wm[2];
3432 uint32_t ps_blend[2];
3433 uint32_t blend_state[1 + MAX_RTS * 2];
3434 uint32_t streamout_state[5];
3435 } gfx8;
3436
3437 struct {
3438 uint32_t wm_depth_stencil[4];
3439 } gfx9;
3440 };
3441
3442 struct anv_compute_pipeline {
3443 struct anv_pipeline base;
3444
3445 struct anv_shader_bin * cs;
3446 uint32_t batch_data[9];
3447 uint32_t interface_descriptor_data[8];
3448 };
3449
3450 struct anv_rt_shader_group {
3451 VkRayTracingShaderGroupTypeKHR type;
3452
3453 struct anv_shader_bin *general;
3454 struct anv_shader_bin *closest_hit;
3455 struct anv_shader_bin *any_hit;
3456 struct anv_shader_bin *intersection;
3457
3458 /* VK_KHR_ray_tracing requires shaderGroupHandleSize == 32 */
3459 uint32_t handle[8];
3460 };
3461
3462 struct anv_ray_tracing_pipeline {
3463 struct anv_pipeline base;
3464
3465 /* All shaders in the pipeline */
3466 struct util_dynarray shaders;
3467
3468 uint32_t group_count;
3469 struct anv_rt_shader_group * groups;
3470
3471 /* If non-zero, this is the default computed stack size as per the stack
3472 * size computation in the Vulkan spec. If zero, that indicates that the
3473 * client has requested a dynamic stack size.
3474 */
3475 uint32_t stack_size;
3476 };
3477
3478 #define ANV_DECL_PIPELINE_DOWNCAST(pipe_type, pipe_enum) \
3479 static inline struct anv_##pipe_type##_pipeline * \
3480 anv_pipeline_to_##pipe_type(struct anv_pipeline *pipeline) \
3481 { \
3482 assert(pipeline->type == pipe_enum); \
3483 return (struct anv_##pipe_type##_pipeline *) pipeline; \
3484 }
3485
ANV_DECL_PIPELINE_DOWNCAST(graphics,ANV_PIPELINE_GRAPHICS)3486 ANV_DECL_PIPELINE_DOWNCAST(graphics, ANV_PIPELINE_GRAPHICS)
3487 ANV_DECL_PIPELINE_DOWNCAST(compute, ANV_PIPELINE_COMPUTE)
3488 ANV_DECL_PIPELINE_DOWNCAST(ray_tracing, ANV_PIPELINE_RAY_TRACING)
3489
3490 static inline bool
3491 anv_pipeline_has_stage(const struct anv_graphics_pipeline *pipeline,
3492 gl_shader_stage stage)
3493 {
3494 return (pipeline->active_stages & mesa_to_vk_shader_stage(stage)) != 0;
3495 }
3496
3497 static inline bool
anv_pipeline_is_primitive(const struct anv_graphics_pipeline * pipeline)3498 anv_pipeline_is_primitive(const struct anv_graphics_pipeline *pipeline)
3499 {
3500 return anv_pipeline_has_stage(pipeline, MESA_SHADER_VERTEX);
3501 }
3502
3503 static inline bool
anv_pipeline_is_mesh(const struct anv_graphics_pipeline * pipeline)3504 anv_pipeline_is_mesh(const struct anv_graphics_pipeline *pipeline)
3505 {
3506 return anv_pipeline_has_stage(pipeline, MESA_SHADER_MESH);
3507 }
3508
3509 static inline bool
anv_cmd_buffer_all_color_write_masked(const struct anv_cmd_buffer * cmd_buffer)3510 anv_cmd_buffer_all_color_write_masked(const struct anv_cmd_buffer *cmd_buffer)
3511 {
3512 const struct anv_cmd_graphics_state *state = &cmd_buffer->state.gfx;
3513 uint8_t color_writes = state->dynamic.color_writes;
3514
3515 /* All writes disabled through vkCmdSetColorWriteEnableEXT */
3516 if ((color_writes & ((1u << state->color_att_count) - 1)) == 0)
3517 return true;
3518
3519 /* Or all write masks are empty */
3520 for (uint32_t i = 0; i < state->color_att_count; i++) {
3521 if (state->pipeline->color_comp_writes[i] != 0)
3522 return false;
3523 }
3524
3525 return true;
3526 }
3527
3528 #define ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(prefix, stage) \
3529 static inline const struct brw_##prefix##_prog_data * \
3530 get_##prefix##_prog_data(const struct anv_graphics_pipeline *pipeline) \
3531 { \
3532 if (anv_pipeline_has_stage(pipeline, stage)) { \
3533 return (const struct brw_##prefix##_prog_data *) \
3534 pipeline->shaders[stage]->prog_data; \
3535 } else { \
3536 return NULL; \
3537 } \
3538 }
3539
ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(vs,MESA_SHADER_VERTEX)3540 ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(vs, MESA_SHADER_VERTEX)
3541 ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(tcs, MESA_SHADER_TESS_CTRL)
3542 ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(tes, MESA_SHADER_TESS_EVAL)
3543 ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(gs, MESA_SHADER_GEOMETRY)
3544 ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(wm, MESA_SHADER_FRAGMENT)
3545 ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(mesh, MESA_SHADER_MESH)
3546 ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(task, MESA_SHADER_TASK)
3547
3548 static inline const struct brw_cs_prog_data *
3549 get_cs_prog_data(const struct anv_compute_pipeline *pipeline)
3550 {
3551 assert(pipeline->cs);
3552 return (const struct brw_cs_prog_data *) pipeline->cs->prog_data;
3553 }
3554
3555 static inline const struct brw_vue_prog_data *
anv_pipeline_get_last_vue_prog_data(const struct anv_graphics_pipeline * pipeline)3556 anv_pipeline_get_last_vue_prog_data(const struct anv_graphics_pipeline *pipeline)
3557 {
3558 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY))
3559 return &get_gs_prog_data(pipeline)->base;
3560 else if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
3561 return &get_tes_prog_data(pipeline)->base;
3562 else
3563 return &get_vs_prog_data(pipeline)->base;
3564 }
3565
3566 static inline bool
anv_cmd_buffer_needs_dynamic_state(const struct anv_cmd_buffer * cmd_buffer,anv_cmd_dirty_mask_t mask)3567 anv_cmd_buffer_needs_dynamic_state(const struct anv_cmd_buffer *cmd_buffer,
3568 anv_cmd_dirty_mask_t mask)
3569 {
3570 /* Only dynamic state */
3571 assert((mask & ANV_CMD_DIRTY_PIPELINE) == 0);
3572
3573 /* If all the state is statically put into the pipeline batch, nothing to
3574 * do.
3575 */
3576 if ((cmd_buffer->state.gfx.pipeline->static_state_mask & mask) == mask)
3577 return false;
3578
3579 /* Dynamic state affected by vkCmd* commands */
3580 if (cmd_buffer->state.gfx.dirty & mask)
3581 return true;
3582
3583 /* For all other states we might have part of the information in the
3584 * anv_graphics_pipeline::dynamic_state not emitted as part of the pipeline
3585 * batch so we need to reemit the packet associated with this state if the
3586 * pipeline changed.
3587 */
3588 return (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_PIPELINE) != 0;
3589 }
3590
3591 VkResult
3592 anv_device_init_rt_shaders(struct anv_device *device);
3593
3594 void
3595 anv_device_finish_rt_shaders(struct anv_device *device);
3596
3597 VkResult
3598 anv_pipeline_init(struct anv_pipeline *pipeline,
3599 struct anv_device *device,
3600 enum anv_pipeline_type type,
3601 VkPipelineCreateFlags flags,
3602 const VkAllocationCallbacks *pAllocator);
3603
3604 void
3605 anv_pipeline_finish(struct anv_pipeline *pipeline,
3606 struct anv_device *device,
3607 const VkAllocationCallbacks *pAllocator);
3608
3609 VkResult
3610 anv_graphics_pipeline_init(struct anv_graphics_pipeline *pipeline, struct anv_device *device,
3611 struct anv_pipeline_cache *cache,
3612 const VkGraphicsPipelineCreateInfo *pCreateInfo,
3613 const VkPipelineRenderingCreateInfoKHR *rendering_info,
3614 const VkAllocationCallbacks *alloc);
3615
3616 VkResult
3617 anv_pipeline_compile_cs(struct anv_compute_pipeline *pipeline,
3618 struct anv_pipeline_cache *cache,
3619 const VkComputePipelineCreateInfo *info,
3620 const struct vk_shader_module *module,
3621 const char *entrypoint,
3622 const VkSpecializationInfo *spec_info);
3623
3624 VkResult
3625 anv_ray_tracing_pipeline_init(struct anv_ray_tracing_pipeline *pipeline,
3626 struct anv_device *device,
3627 struct anv_pipeline_cache *cache,
3628 const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
3629 const VkAllocationCallbacks *alloc);
3630
3631 struct anv_format_plane {
3632 enum isl_format isl_format:16;
3633 struct isl_swizzle swizzle;
3634
3635 /* Whether this plane contains chroma channels */
3636 bool has_chroma;
3637
3638 /* For downscaling of YUV planes */
3639 uint8_t denominator_scales[2];
3640
3641 /* How to map sampled ycbcr planes to a single 4 component element. */
3642 struct isl_swizzle ycbcr_swizzle;
3643
3644 /* What aspect is associated to this plane */
3645 VkImageAspectFlags aspect;
3646 };
3647
3648
3649 struct anv_format {
3650 struct anv_format_plane planes[3];
3651 VkFormat vk_format;
3652 uint8_t n_planes;
3653 bool can_ycbcr;
3654 };
3655
3656 static inline void
anv_assert_valid_aspect_set(VkImageAspectFlags aspects)3657 anv_assert_valid_aspect_set(VkImageAspectFlags aspects)
3658 {
3659 if (util_bitcount(aspects) == 1) {
3660 assert(aspects & (VK_IMAGE_ASPECT_COLOR_BIT |
3661 VK_IMAGE_ASPECT_DEPTH_BIT |
3662 VK_IMAGE_ASPECT_STENCIL_BIT |
3663 VK_IMAGE_ASPECT_PLANE_0_BIT |
3664 VK_IMAGE_ASPECT_PLANE_1_BIT |
3665 VK_IMAGE_ASPECT_PLANE_2_BIT));
3666 } else if (aspects & VK_IMAGE_ASPECT_PLANES_BITS_ANV) {
3667 assert(aspects == VK_IMAGE_ASPECT_PLANE_0_BIT ||
3668 aspects == (VK_IMAGE_ASPECT_PLANE_0_BIT |
3669 VK_IMAGE_ASPECT_PLANE_1_BIT) ||
3670 aspects == (VK_IMAGE_ASPECT_PLANE_0_BIT |
3671 VK_IMAGE_ASPECT_PLANE_1_BIT |
3672 VK_IMAGE_ASPECT_PLANE_2_BIT));
3673 } else {
3674 assert(aspects == (VK_IMAGE_ASPECT_DEPTH_BIT |
3675 VK_IMAGE_ASPECT_STENCIL_BIT));
3676 }
3677 }
3678
3679 /**
3680 * Return the aspect's plane relative to all_aspects. For an image, for
3681 * instance, all_aspects would be the set of aspects in the image. For
3682 * an image view, all_aspects would be the subset of aspects represented
3683 * by that particular view.
3684 */
3685 static inline uint32_t
anv_aspect_to_plane(VkImageAspectFlags all_aspects,VkImageAspectFlagBits aspect)3686 anv_aspect_to_plane(VkImageAspectFlags all_aspects,
3687 VkImageAspectFlagBits aspect)
3688 {
3689 anv_assert_valid_aspect_set(all_aspects);
3690 assert(util_bitcount(aspect) == 1);
3691 assert(!(aspect & ~all_aspects));
3692
3693 /* Because we always put image and view planes in aspect-bit-order, the
3694 * plane index is the number of bits in all_aspects before aspect.
3695 */
3696 return util_bitcount(all_aspects & (aspect - 1));
3697 }
3698
3699 #define anv_foreach_image_aspect_bit(b, image, aspects) \
3700 u_foreach_bit(b, vk_image_expand_aspect_mask(&(image)->vk, aspects))
3701
3702 const struct anv_format *
3703 anv_get_format(VkFormat format);
3704
3705 static inline uint32_t
anv_get_format_planes(VkFormat vk_format)3706 anv_get_format_planes(VkFormat vk_format)
3707 {
3708 const struct anv_format *format = anv_get_format(vk_format);
3709
3710 return format != NULL ? format->n_planes : 0;
3711 }
3712
3713 struct anv_format_plane
3714 anv_get_format_plane(const struct intel_device_info *devinfo,
3715 VkFormat vk_format, uint32_t plane,
3716 VkImageTiling tiling);
3717
3718 struct anv_format_plane
3719 anv_get_format_aspect(const struct intel_device_info *devinfo,
3720 VkFormat vk_format,
3721 VkImageAspectFlagBits aspect, VkImageTiling tiling);
3722
3723 static inline enum isl_format
anv_get_isl_format(const struct intel_device_info * devinfo,VkFormat vk_format,VkImageAspectFlags aspect,VkImageTiling tiling)3724 anv_get_isl_format(const struct intel_device_info *devinfo, VkFormat vk_format,
3725 VkImageAspectFlags aspect, VkImageTiling tiling)
3726 {
3727 return anv_get_format_aspect(devinfo, vk_format, aspect, tiling).isl_format;
3728 }
3729
3730 bool anv_formats_ccs_e_compatible(const struct intel_device_info *devinfo,
3731 VkImageCreateFlags create_flags,
3732 VkFormat vk_format, VkImageTiling vk_tiling,
3733 VkImageUsageFlags vk_usage,
3734 const VkImageFormatListCreateInfoKHR *fmt_list);
3735
3736 extern VkFormat
3737 vk_format_from_android(unsigned android_format, unsigned android_usage);
3738
3739 static inline struct isl_swizzle
anv_swizzle_for_render(struct isl_swizzle swizzle)3740 anv_swizzle_for_render(struct isl_swizzle swizzle)
3741 {
3742 /* Sometimes the swizzle will have alpha map to one. We do this to fake
3743 * RGB as RGBA for texturing
3744 */
3745 assert(swizzle.a == ISL_CHANNEL_SELECT_ONE ||
3746 swizzle.a == ISL_CHANNEL_SELECT_ALPHA);
3747
3748 /* But it doesn't matter what we render to that channel */
3749 swizzle.a = ISL_CHANNEL_SELECT_ALPHA;
3750
3751 return swizzle;
3752 }
3753
3754 void
3755 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm);
3756
3757 /**
3758 * Describes how each part of anv_image will be bound to memory.
3759 */
3760 struct anv_image_memory_range {
3761 /**
3762 * Disjoint bindings into which each portion of the image will be bound.
3763 *
3764 * Binding images to memory can be complicated and invold binding different
3765 * portions of the image to different memory objects or regions. For most
3766 * images, everything lives in the MAIN binding and gets bound by
3767 * vkBindImageMemory. For disjoint multi-planar images, each plane has
3768 * a unique, disjoint binding and gets bound by vkBindImageMemory2 with
3769 * VkBindImagePlaneMemoryInfo. There may also exist bits of memory which are
3770 * implicit or driver-managed and live in special-case bindings.
3771 */
3772 enum anv_image_memory_binding {
3773 /**
3774 * Used if and only if image is not multi-planar disjoint. Bound by
3775 * vkBindImageMemory2 without VkBindImagePlaneMemoryInfo.
3776 */
3777 ANV_IMAGE_MEMORY_BINDING_MAIN,
3778
3779 /**
3780 * Used if and only if image is multi-planar disjoint. Bound by
3781 * vkBindImageMemory2 with VkBindImagePlaneMemoryInfo.
3782 */
3783 ANV_IMAGE_MEMORY_BINDING_PLANE_0,
3784 ANV_IMAGE_MEMORY_BINDING_PLANE_1,
3785 ANV_IMAGE_MEMORY_BINDING_PLANE_2,
3786
3787 /**
3788 * Driver-private bo. In special cases we may store the aux surface and/or
3789 * aux state in this binding.
3790 */
3791 ANV_IMAGE_MEMORY_BINDING_PRIVATE,
3792
3793 /** Sentinel */
3794 ANV_IMAGE_MEMORY_BINDING_END,
3795 } binding;
3796
3797 /**
3798 * Offset is relative to the start of the binding created by
3799 * vkBindImageMemory, not to the start of the bo.
3800 */
3801 uint64_t offset;
3802
3803 uint64_t size;
3804 uint32_t alignment;
3805 };
3806
3807 /**
3808 * Subsurface of an anv_image.
3809 */
3810 struct anv_surface {
3811 struct isl_surf isl;
3812 struct anv_image_memory_range memory_range;
3813 };
3814
3815 static inline bool MUST_CHECK
anv_surface_is_valid(const struct anv_surface * surface)3816 anv_surface_is_valid(const struct anv_surface *surface)
3817 {
3818 return surface->isl.size_B > 0 && surface->memory_range.size > 0;
3819 }
3820
3821 struct anv_image {
3822 struct vk_image vk;
3823
3824 uint32_t n_planes;
3825
3826 /**
3827 * Image has multi-planar format and was created with
3828 * VK_IMAGE_CREATE_DISJOINT_BIT.
3829 */
3830 bool disjoint;
3831
3832 /**
3833 * Image is a WSI image
3834 */
3835 bool from_wsi;
3836
3837 /**
3838 * Image was imported from an struct AHardwareBuffer. We have to delay
3839 * final image creation until bind time.
3840 */
3841 bool from_ahb;
3842
3843 /**
3844 * Image was imported from gralloc with VkNativeBufferANDROID. The gralloc bo
3845 * must be released when the image is destroyed.
3846 */
3847 bool from_gralloc;
3848
3849 /**
3850 * The memory bindings created by vkCreateImage and vkBindImageMemory.
3851 *
3852 * For details on the image's memory layout, see check_memory_bindings().
3853 *
3854 * vkCreateImage constructs the `memory_range` for each
3855 * anv_image_memory_binding. After vkCreateImage, each binding is valid if
3856 * and only if `memory_range::size > 0`.
3857 *
3858 * vkBindImageMemory binds each valid `memory_range` to an `address`.
3859 * Usually, the app will provide the address via the parameters of
3860 * vkBindImageMemory. However, special-case bindings may be bound to
3861 * driver-private memory.
3862 */
3863 struct anv_image_binding {
3864 struct anv_image_memory_range memory_range;
3865 struct anv_address address;
3866 } bindings[ANV_IMAGE_MEMORY_BINDING_END];
3867
3868 /**
3869 * Image subsurfaces
3870 *
3871 * For each foo, anv_image::planes[x].surface is valid if and only if
3872 * anv_image::aspects has a x aspect. Refer to anv_image_aspect_to_plane()
3873 * to figure the number associated with a given aspect.
3874 *
3875 * The hardware requires that the depth buffer and stencil buffer be
3876 * separate surfaces. From Vulkan's perspective, though, depth and stencil
3877 * reside in the same VkImage. To satisfy both the hardware and Vulkan, we
3878 * allocate the depth and stencil buffers as separate surfaces in the same
3879 * bo.
3880 */
3881 struct anv_image_plane {
3882 struct anv_surface primary_surface;
3883
3884 /**
3885 * A surface which shadows the main surface and may have different
3886 * tiling. This is used for sampling using a tiling that isn't supported
3887 * for other operations.
3888 */
3889 struct anv_surface shadow_surface;
3890
3891 /**
3892 * The base aux usage for this image. For color images, this can be
3893 * either CCS_E or CCS_D depending on whether or not we can reliably
3894 * leave CCS on all the time.
3895 */
3896 enum isl_aux_usage aux_usage;
3897
3898 struct anv_surface aux_surface;
3899
3900 /** Location of the fast clear state. */
3901 struct anv_image_memory_range fast_clear_memory_range;
3902 } planes[3];
3903 };
3904
3905 static inline bool
anv_image_is_externally_shared(const struct anv_image * image)3906 anv_image_is_externally_shared(const struct anv_image *image)
3907 {
3908 return image->vk.drm_format_mod != DRM_FORMAT_MOD_INVALID ||
3909 image->vk.external_handle_types != 0;
3910 }
3911
3912 static inline bool
anv_image_has_private_binding(const struct anv_image * image)3913 anv_image_has_private_binding(const struct anv_image *image)
3914 {
3915 const struct anv_image_binding private_binding =
3916 image->bindings[ANV_IMAGE_MEMORY_BINDING_PRIVATE];
3917 return private_binding.memory_range.size != 0;
3918 }
3919
3920 /* The ordering of this enum is important */
3921 enum anv_fast_clear_type {
3922 /** Image does not have/support any fast-clear blocks */
3923 ANV_FAST_CLEAR_NONE = 0,
3924 /** Image has/supports fast-clear but only to the default value */
3925 ANV_FAST_CLEAR_DEFAULT_VALUE = 1,
3926 /** Image has/supports fast-clear with an arbitrary fast-clear value */
3927 ANV_FAST_CLEAR_ANY = 2,
3928 };
3929
3930 /**
3931 * Return the aspect's _format_ plane, not its _memory_ plane (using the
3932 * vocabulary of VK_EXT_image_drm_format_modifier). As a consequence, \a
3933 * aspect_mask may contain VK_IMAGE_ASPECT_PLANE_*, but must not contain
3934 * VK_IMAGE_ASPECT_MEMORY_PLANE_* .
3935 */
3936 static inline uint32_t
anv_image_aspect_to_plane(const struct anv_image * image,VkImageAspectFlagBits aspect)3937 anv_image_aspect_to_plane(const struct anv_image *image,
3938 VkImageAspectFlagBits aspect)
3939 {
3940 return anv_aspect_to_plane(image->vk.aspects, aspect);
3941 }
3942
3943 /* Returns the number of auxiliary buffer levels attached to an image. */
3944 static inline uint8_t
anv_image_aux_levels(const struct anv_image * const image,VkImageAspectFlagBits aspect)3945 anv_image_aux_levels(const struct anv_image * const image,
3946 VkImageAspectFlagBits aspect)
3947 {
3948 uint32_t plane = anv_image_aspect_to_plane(image, aspect);
3949 if (image->planes[plane].aux_usage == ISL_AUX_USAGE_NONE)
3950 return 0;
3951
3952 return image->vk.mip_levels;
3953 }
3954
3955 /* Returns the number of auxiliary buffer layers attached to an image. */
3956 static inline uint32_t
anv_image_aux_layers(const struct anv_image * const image,VkImageAspectFlagBits aspect,const uint8_t miplevel)3957 anv_image_aux_layers(const struct anv_image * const image,
3958 VkImageAspectFlagBits aspect,
3959 const uint8_t miplevel)
3960 {
3961 assert(image);
3962
3963 /* The miplevel must exist in the main buffer. */
3964 assert(miplevel < image->vk.mip_levels);
3965
3966 if (miplevel >= anv_image_aux_levels(image, aspect)) {
3967 /* There are no layers with auxiliary data because the miplevel has no
3968 * auxiliary data.
3969 */
3970 return 0;
3971 }
3972
3973 return MAX2(image->vk.array_layers, image->vk.extent.depth >> miplevel);
3974 }
3975
3976 static inline struct anv_address MUST_CHECK
anv_image_address(const struct anv_image * image,const struct anv_image_memory_range * mem_range)3977 anv_image_address(const struct anv_image *image,
3978 const struct anv_image_memory_range *mem_range)
3979 {
3980 const struct anv_image_binding *binding = &image->bindings[mem_range->binding];
3981 assert(binding->memory_range.offset == 0);
3982
3983 if (mem_range->size == 0)
3984 return ANV_NULL_ADDRESS;
3985
3986 return anv_address_add(binding->address, mem_range->offset);
3987 }
3988
3989 static inline struct anv_address
anv_image_get_clear_color_addr(UNUSED const struct anv_device * device,const struct anv_image * image,VkImageAspectFlagBits aspect)3990 anv_image_get_clear_color_addr(UNUSED const struct anv_device *device,
3991 const struct anv_image *image,
3992 VkImageAspectFlagBits aspect)
3993 {
3994 assert(image->vk.aspects & (VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV |
3995 VK_IMAGE_ASPECT_DEPTH_BIT));
3996
3997 uint32_t plane = anv_image_aspect_to_plane(image, aspect);
3998 const struct anv_image_memory_range *mem_range =
3999 &image->planes[plane].fast_clear_memory_range;
4000
4001 return anv_image_address(image, mem_range);
4002 }
4003
4004 static inline struct anv_address
anv_image_get_fast_clear_type_addr(const struct anv_device * device,const struct anv_image * image,VkImageAspectFlagBits aspect)4005 anv_image_get_fast_clear_type_addr(const struct anv_device *device,
4006 const struct anv_image *image,
4007 VkImageAspectFlagBits aspect)
4008 {
4009 struct anv_address addr =
4010 anv_image_get_clear_color_addr(device, image, aspect);
4011
4012 const unsigned clear_color_state_size = device->info.ver >= 10 ?
4013 device->isl_dev.ss.clear_color_state_size :
4014 device->isl_dev.ss.clear_value_size;
4015 return anv_address_add(addr, clear_color_state_size);
4016 }
4017
4018 static inline struct anv_address
anv_image_get_compression_state_addr(const struct anv_device * device,const struct anv_image * image,VkImageAspectFlagBits aspect,uint32_t level,uint32_t array_layer)4019 anv_image_get_compression_state_addr(const struct anv_device *device,
4020 const struct anv_image *image,
4021 VkImageAspectFlagBits aspect,
4022 uint32_t level, uint32_t array_layer)
4023 {
4024 assert(level < anv_image_aux_levels(image, aspect));
4025 assert(array_layer < anv_image_aux_layers(image, aspect, level));
4026 UNUSED uint32_t plane = anv_image_aspect_to_plane(image, aspect);
4027 assert(image->planes[plane].aux_usage == ISL_AUX_USAGE_CCS_E);
4028
4029 /* Relative to start of the plane's fast clear memory range */
4030 uint32_t offset;
4031
4032 offset = 4; /* Go past the fast clear type */
4033
4034 if (image->vk.image_type == VK_IMAGE_TYPE_3D) {
4035 for (uint32_t l = 0; l < level; l++)
4036 offset += anv_minify(image->vk.extent.depth, l) * 4;
4037 } else {
4038 offset += level * image->vk.array_layers * 4;
4039 }
4040
4041 offset += array_layer * 4;
4042
4043 assert(offset < image->planes[plane].fast_clear_memory_range.size);
4044
4045 return anv_address_add(
4046 anv_image_get_fast_clear_type_addr(device, image, aspect),
4047 offset);
4048 }
4049
4050 /* Returns true if a HiZ-enabled depth buffer can be sampled from. */
4051 static inline bool
anv_can_sample_with_hiz(const struct intel_device_info * const devinfo,const struct anv_image * image)4052 anv_can_sample_with_hiz(const struct intel_device_info * const devinfo,
4053 const struct anv_image *image)
4054 {
4055 if (!(image->vk.aspects & VK_IMAGE_ASPECT_DEPTH_BIT))
4056 return false;
4057
4058 /* For Gfx8-11, there are some restrictions around sampling from HiZ.
4059 * The Skylake PRM docs for RENDER_SURFACE_STATE::AuxiliarySurfaceMode
4060 * say:
4061 *
4062 * "If this field is set to AUX_HIZ, Number of Multisamples must
4063 * be MULTISAMPLECOUNT_1, and Surface Type cannot be SURFTYPE_3D."
4064 */
4065 if (image->vk.image_type == VK_IMAGE_TYPE_3D)
4066 return false;
4067
4068 /* Allow this feature on BDW even though it is disabled in the BDW devinfo
4069 * struct. There's documentation which suggests that this feature actually
4070 * reduces performance on BDW, but it has only been observed to help so
4071 * far. Sampling fast-cleared blocks on BDW must also be handled with care
4072 * (see depth_stencil_attachment_compute_aux_usage() for more info).
4073 */
4074 if (devinfo->ver != 8 && !devinfo->has_sample_with_hiz)
4075 return false;
4076
4077 return image->vk.samples == 1;
4078 }
4079
4080 /* Returns true if an MCS-enabled buffer can be sampled from. */
4081 static inline bool
anv_can_sample_mcs_with_clear(const struct intel_device_info * const devinfo,const struct anv_image * image)4082 anv_can_sample_mcs_with_clear(const struct intel_device_info * const devinfo,
4083 const struct anv_image *image)
4084 {
4085 assert(image->vk.aspects == VK_IMAGE_ASPECT_COLOR_BIT);
4086 const uint32_t plane =
4087 anv_image_aspect_to_plane(image, VK_IMAGE_ASPECT_COLOR_BIT);
4088
4089 assert(isl_aux_usage_has_mcs(image->planes[plane].aux_usage));
4090
4091 const struct anv_surface *anv_surf = &image->planes[plane].primary_surface;
4092
4093 /* On TGL, the sampler has an issue with some 8 and 16bpp MSAA fast clears.
4094 * See HSD 1707282275, wa_14013111325. Due to the use of
4095 * format-reinterpretation, a simplified workaround is implemented.
4096 */
4097 if (devinfo->ver >= 12 &&
4098 isl_format_get_layout(anv_surf->isl.format)->bpb <= 16) {
4099 return false;
4100 }
4101
4102 return true;
4103 }
4104
4105 static inline bool
anv_image_plane_uses_aux_map(const struct anv_device * device,const struct anv_image * image,uint32_t plane)4106 anv_image_plane_uses_aux_map(const struct anv_device *device,
4107 const struct anv_image *image,
4108 uint32_t plane)
4109 {
4110 return device->info.has_aux_map &&
4111 isl_aux_usage_has_ccs(image->planes[plane].aux_usage);
4112 }
4113
4114 void
4115 anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer *cmd_buffer,
4116 const struct anv_image *image,
4117 VkImageAspectFlagBits aspect,
4118 enum isl_aux_usage aux_usage,
4119 uint32_t level,
4120 uint32_t base_layer,
4121 uint32_t layer_count);
4122
4123 void
4124 anv_image_clear_color(struct anv_cmd_buffer *cmd_buffer,
4125 const struct anv_image *image,
4126 VkImageAspectFlagBits aspect,
4127 enum isl_aux_usage aux_usage,
4128 enum isl_format format, struct isl_swizzle swizzle,
4129 uint32_t level, uint32_t base_layer, uint32_t layer_count,
4130 VkRect2D area, union isl_color_value clear_color);
4131 void
4132 anv_image_clear_depth_stencil(struct anv_cmd_buffer *cmd_buffer,
4133 const struct anv_image *image,
4134 VkImageAspectFlags aspects,
4135 enum isl_aux_usage depth_aux_usage,
4136 uint32_t level,
4137 uint32_t base_layer, uint32_t layer_count,
4138 VkRect2D area,
4139 float depth_value, uint8_t stencil_value);
4140 void
4141 anv_image_msaa_resolve(struct anv_cmd_buffer *cmd_buffer,
4142 const struct anv_image *src_image,
4143 enum isl_aux_usage src_aux_usage,
4144 uint32_t src_level, uint32_t src_base_layer,
4145 const struct anv_image *dst_image,
4146 enum isl_aux_usage dst_aux_usage,
4147 uint32_t dst_level, uint32_t dst_base_layer,
4148 VkImageAspectFlagBits aspect,
4149 uint32_t src_x, uint32_t src_y,
4150 uint32_t dst_x, uint32_t dst_y,
4151 uint32_t width, uint32_t height,
4152 uint32_t layer_count,
4153 enum blorp_filter filter);
4154 void
4155 anv_image_hiz_op(struct anv_cmd_buffer *cmd_buffer,
4156 const struct anv_image *image,
4157 VkImageAspectFlagBits aspect, uint32_t level,
4158 uint32_t base_layer, uint32_t layer_count,
4159 enum isl_aux_op hiz_op);
4160 void
4161 anv_image_hiz_clear(struct anv_cmd_buffer *cmd_buffer,
4162 const struct anv_image *image,
4163 VkImageAspectFlags aspects,
4164 uint32_t level,
4165 uint32_t base_layer, uint32_t layer_count,
4166 VkRect2D area, uint8_t stencil_value);
4167 void
4168 anv_image_mcs_op(struct anv_cmd_buffer *cmd_buffer,
4169 const struct anv_image *image,
4170 enum isl_format format, struct isl_swizzle swizzle,
4171 VkImageAspectFlagBits aspect,
4172 uint32_t base_layer, uint32_t layer_count,
4173 enum isl_aux_op mcs_op, union isl_color_value *clear_value,
4174 bool predicate);
4175 void
4176 anv_image_ccs_op(struct anv_cmd_buffer *cmd_buffer,
4177 const struct anv_image *image,
4178 enum isl_format format, struct isl_swizzle swizzle,
4179 VkImageAspectFlagBits aspect, uint32_t level,
4180 uint32_t base_layer, uint32_t layer_count,
4181 enum isl_aux_op ccs_op, union isl_color_value *clear_value,
4182 bool predicate);
4183
4184 void
4185 anv_image_copy_to_shadow(struct anv_cmd_buffer *cmd_buffer,
4186 const struct anv_image *image,
4187 VkImageAspectFlagBits aspect,
4188 uint32_t base_level, uint32_t level_count,
4189 uint32_t base_layer, uint32_t layer_count);
4190
4191 enum isl_aux_state ATTRIBUTE_PURE
4192 anv_layout_to_aux_state(const struct intel_device_info * const devinfo,
4193 const struct anv_image *image,
4194 const VkImageAspectFlagBits aspect,
4195 const VkImageLayout layout);
4196
4197 enum isl_aux_usage ATTRIBUTE_PURE
4198 anv_layout_to_aux_usage(const struct intel_device_info * const devinfo,
4199 const struct anv_image *image,
4200 const VkImageAspectFlagBits aspect,
4201 const VkImageUsageFlagBits usage,
4202 const VkImageLayout layout);
4203
4204 enum anv_fast_clear_type ATTRIBUTE_PURE
4205 anv_layout_to_fast_clear_type(const struct intel_device_info * const devinfo,
4206 const struct anv_image * const image,
4207 const VkImageAspectFlagBits aspect,
4208 const VkImageLayout layout);
4209
4210 static inline bool
anv_image_aspects_compatible(VkImageAspectFlags aspects1,VkImageAspectFlags aspects2)4211 anv_image_aspects_compatible(VkImageAspectFlags aspects1,
4212 VkImageAspectFlags aspects2)
4213 {
4214 if (aspects1 == aspects2)
4215 return true;
4216
4217 /* Only 1 color aspects are compatibles. */
4218 if ((aspects1 & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) != 0 &&
4219 (aspects2 & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) != 0 &&
4220 util_bitcount(aspects1) == util_bitcount(aspects2))
4221 return true;
4222
4223 return false;
4224 }
4225
4226 struct anv_image_view {
4227 struct vk_image_view vk;
4228
4229 const struct anv_image *image; /**< VkImageViewCreateInfo::image */
4230
4231 unsigned n_planes;
4232 struct {
4233 uint32_t image_plane;
4234
4235 struct isl_view isl;
4236
4237 /**
4238 * RENDER_SURFACE_STATE when using image as a sampler surface with an
4239 * image layout of SHADER_READ_ONLY_OPTIMAL or
4240 * DEPTH_STENCIL_READ_ONLY_OPTIMAL.
4241 */
4242 struct anv_surface_state optimal_sampler_surface_state;
4243
4244 /**
4245 * RENDER_SURFACE_STATE when using image as a sampler surface with an
4246 * image layout of GENERAL.
4247 */
4248 struct anv_surface_state general_sampler_surface_state;
4249
4250 /**
4251 * RENDER_SURFACE_STATE when using image as a storage image. Separate
4252 * states for vanilla (with the original format) and one which has been
4253 * lowered to a format suitable for reading. This may be a raw surface
4254 * in extreme cases or simply a surface with a different format where we
4255 * expect some conversion to be done in the shader.
4256 */
4257 struct anv_surface_state storage_surface_state;
4258 struct anv_surface_state lowered_storage_surface_state;
4259
4260 struct brw_image_param lowered_storage_image_param;
4261 } planes[3];
4262 };
4263
4264 enum anv_image_view_state_flags {
4265 ANV_IMAGE_VIEW_STATE_STORAGE_LOWERED = (1 << 0),
4266 ANV_IMAGE_VIEW_STATE_TEXTURE_OPTIMAL = (1 << 1),
4267 };
4268
4269 void anv_image_fill_surface_state(struct anv_device *device,
4270 const struct anv_image *image,
4271 VkImageAspectFlagBits aspect,
4272 const struct isl_view *view,
4273 isl_surf_usage_flags_t view_usage,
4274 enum isl_aux_usage aux_usage,
4275 const union isl_color_value *clear_color,
4276 enum anv_image_view_state_flags flags,
4277 struct anv_surface_state *state_inout,
4278 struct brw_image_param *image_param_out);
4279
4280 struct anv_image_create_info {
4281 const VkImageCreateInfo *vk_info;
4282
4283 /** An opt-in bitmask which filters an ISL-mapping of the Vulkan tiling. */
4284 isl_tiling_flags_t isl_tiling_flags;
4285
4286 /** These flags will be added to any derived from VkImageCreateInfo. */
4287 isl_surf_usage_flags_t isl_extra_usage_flags;
4288 };
4289
4290 VkResult anv_image_init(struct anv_device *device, struct anv_image *image,
4291 const struct anv_image_create_info *create_info);
4292
4293 void anv_image_finish(struct anv_image *image);
4294
4295 void anv_image_get_memory_requirements(struct anv_device *device,
4296 struct anv_image *image,
4297 VkImageAspectFlags aspects,
4298 VkMemoryRequirements2 *pMemoryRequirements);
4299
4300 enum isl_format
4301 anv_isl_format_for_descriptor_type(const struct anv_device *device,
4302 VkDescriptorType type);
4303
4304 static inline VkExtent3D
anv_sanitize_image_extent(const VkImageType imageType,const VkExtent3D imageExtent)4305 anv_sanitize_image_extent(const VkImageType imageType,
4306 const VkExtent3D imageExtent)
4307 {
4308 switch (imageType) {
4309 case VK_IMAGE_TYPE_1D:
4310 return (VkExtent3D) { imageExtent.width, 1, 1 };
4311 case VK_IMAGE_TYPE_2D:
4312 return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 };
4313 case VK_IMAGE_TYPE_3D:
4314 return imageExtent;
4315 default:
4316 unreachable("invalid image type");
4317 }
4318 }
4319
4320 static inline VkOffset3D
anv_sanitize_image_offset(const VkImageType imageType,const VkOffset3D imageOffset)4321 anv_sanitize_image_offset(const VkImageType imageType,
4322 const VkOffset3D imageOffset)
4323 {
4324 switch (imageType) {
4325 case VK_IMAGE_TYPE_1D:
4326 return (VkOffset3D) { imageOffset.x, 0, 0 };
4327 case VK_IMAGE_TYPE_2D:
4328 return (VkOffset3D) { imageOffset.x, imageOffset.y, 0 };
4329 case VK_IMAGE_TYPE_3D:
4330 return imageOffset;
4331 default:
4332 unreachable("invalid image type");
4333 }
4334 }
4335
4336 static inline uint32_t
anv_rasterization_aa_mode(VkPolygonMode raster_mode,VkLineRasterizationModeEXT line_mode)4337 anv_rasterization_aa_mode(VkPolygonMode raster_mode,
4338 VkLineRasterizationModeEXT line_mode)
4339 {
4340 if (raster_mode == VK_POLYGON_MODE_LINE &&
4341 line_mode == VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT)
4342 return true;
4343 return false;
4344 }
4345
4346 VkFormatFeatureFlags2KHR
4347 anv_get_image_format_features2(const struct intel_device_info *devinfo,
4348 VkFormat vk_format,
4349 const struct anv_format *anv_format,
4350 VkImageTiling vk_tiling,
4351 const struct isl_drm_modifier_info *isl_mod_info);
4352
4353 void anv_fill_buffer_surface_state(struct anv_device *device,
4354 struct anv_state state,
4355 enum isl_format format,
4356 isl_surf_usage_flags_t usage,
4357 struct anv_address address,
4358 uint32_t range, uint32_t stride);
4359
4360
4361 /* Haswell border color is a bit of a disaster. Float and unorm formats use a
4362 * straightforward 32-bit float color in the first 64 bytes. Instead of using
4363 * a nice float/integer union like Gfx8+, Haswell specifies the integer border
4364 * color as a separate entry /after/ the float color. The layout of this entry
4365 * also depends on the format's bpp (with extra hacks for RG32), and overlaps.
4366 *
4367 * Since we don't know the format/bpp, we can't make any of the border colors
4368 * containing '1' work for all formats, as it would be in the wrong place for
4369 * some of them. We opt to make 32-bit integers work as this seems like the
4370 * most common option. Fortunately, transparent black works regardless, as
4371 * all zeroes is the same in every bit-size.
4372 */
4373 struct hsw_border_color {
4374 float float32[4];
4375 uint32_t _pad0[12];
4376 uint32_t uint32[4];
4377 uint32_t _pad1[108];
4378 };
4379
4380 struct gfx8_border_color {
4381 union {
4382 float float32[4];
4383 uint32_t uint32[4];
4384 };
4385 /* Pad out to 64 bytes */
4386 uint32_t _pad[12];
4387 };
4388
4389 struct anv_ycbcr_conversion {
4390 struct vk_object_base base;
4391
4392 const struct anv_format * format;
4393 VkSamplerYcbcrModelConversion ycbcr_model;
4394 VkSamplerYcbcrRange ycbcr_range;
4395 VkComponentSwizzle mapping[4];
4396 VkChromaLocation chroma_offsets[2];
4397 VkFilter chroma_filter;
4398 bool chroma_reconstruction;
4399 };
4400
4401 struct anv_sampler {
4402 struct vk_object_base base;
4403
4404 uint32_t state[3][4];
4405 uint32_t n_planes;
4406 struct anv_ycbcr_conversion *conversion;
4407
4408 /* Blob of sampler state data which is guaranteed to be 32-byte aligned
4409 * and with a 32-byte stride for use as bindless samplers.
4410 */
4411 struct anv_state bindless_state;
4412
4413 struct anv_state custom_border_color;
4414 };
4415
4416 #define ANV_PIPELINE_STATISTICS_MASK 0x000007ff
4417
4418 struct anv_query_pool {
4419 struct vk_object_base base;
4420
4421 VkQueryType type;
4422 VkQueryPipelineStatisticFlags pipeline_statistics;
4423 /** Stride between slots, in bytes */
4424 uint32_t stride;
4425 /** Number of slots in this query pool */
4426 uint32_t slots;
4427 struct anv_bo * bo;
4428
4429 /* KHR perf queries : */
4430 uint32_t pass_size;
4431 uint32_t data_offset;
4432 uint32_t snapshot_size;
4433 uint32_t n_counters;
4434 struct intel_perf_counter_pass *counter_pass;
4435 uint32_t n_passes;
4436 struct intel_perf_query_info **pass_query;
4437 };
4438
khr_perf_query_preamble_offset(const struct anv_query_pool * pool,uint32_t pass)4439 static inline uint32_t khr_perf_query_preamble_offset(const struct anv_query_pool *pool,
4440 uint32_t pass)
4441 {
4442 return pool->pass_size * pass + 8;
4443 }
4444
4445 struct anv_acceleration_structure {
4446 struct vk_object_base base;
4447
4448 VkDeviceSize size;
4449 struct anv_address address;
4450 };
4451
4452 int anv_get_instance_entrypoint_index(const char *name);
4453 int anv_get_device_entrypoint_index(const char *name);
4454 int anv_get_physical_device_entrypoint_index(const char *name);
4455
4456 const char *anv_get_instance_entry_name(int index);
4457 const char *anv_get_physical_device_entry_name(int index);
4458 const char *anv_get_device_entry_name(int index);
4459
4460 bool
4461 anv_instance_entrypoint_is_enabled(int index, uint32_t core_version,
4462 const struct vk_instance_extension_table *instance);
4463 bool
4464 anv_physical_device_entrypoint_is_enabled(int index, uint32_t core_version,
4465 const struct vk_instance_extension_table *instance);
4466 bool
4467 anv_device_entrypoint_is_enabled(int index, uint32_t core_version,
4468 const struct vk_instance_extension_table *instance,
4469 const struct vk_device_extension_table *device);
4470
4471 const struct vk_device_dispatch_table *
4472 anv_get_device_dispatch_table(const struct intel_device_info *devinfo);
4473
4474 void
4475 anv_dump_pipe_bits(enum anv_pipe_bits bits);
4476
4477 static inline void
anv_add_pending_pipe_bits(struct anv_cmd_buffer * cmd_buffer,enum anv_pipe_bits bits,const char * reason)4478 anv_add_pending_pipe_bits(struct anv_cmd_buffer* cmd_buffer,
4479 enum anv_pipe_bits bits,
4480 const char* reason)
4481 {
4482 cmd_buffer->state.pending_pipe_bits |= bits;
4483 if (INTEL_DEBUG(DEBUG_PIPE_CONTROL) && bits)
4484 {
4485 fputs("pc: add ", stderr);
4486 anv_dump_pipe_bits(bits);
4487 fprintf(stderr, "reason: %s\n", reason);
4488 }
4489 }
4490
4491 struct anv_performance_configuration_intel {
4492 struct vk_object_base base;
4493
4494 struct intel_perf_registers *register_config;
4495
4496 uint64_t config_id;
4497 };
4498
4499 void anv_physical_device_init_perf(struct anv_physical_device *device, int fd);
4500 void anv_device_perf_init(struct anv_device *device);
4501 void anv_perf_write_pass_results(struct intel_perf_config *perf,
4502 struct anv_query_pool *pool, uint32_t pass,
4503 const struct intel_perf_query_result *accumulated_results,
4504 union VkPerformanceCounterResultKHR *results);
4505
4506 /* Use to emit a series of memcpy operations */
4507 struct anv_memcpy_state {
4508 struct anv_device *device;
4509 struct anv_batch *batch;
4510
4511 struct anv_vb_cache_range vb_bound;
4512 struct anv_vb_cache_range vb_dirty;
4513 };
4514
4515 struct anv_utrace_flush_copy {
4516 /* Needs to be the first field */
4517 struct intel_ds_flush_data ds;
4518
4519 /* Batch stuff to implement of copy of timestamps recorded in another
4520 * buffer.
4521 */
4522 struct anv_reloc_list relocs;
4523 struct anv_batch batch;
4524 struct anv_bo *batch_bo;
4525
4526 /* Buffer of 64bits timestamps */
4527 struct anv_bo *trace_bo;
4528
4529 /* Syncobj to be signaled when the batch completes */
4530 struct vk_sync *sync;
4531
4532 /* Queue on which all the recorded traces are submitted */
4533 struct anv_queue *queue;
4534
4535 struct anv_memcpy_state memcpy_state;
4536 };
4537
4538 void anv_device_utrace_init(struct anv_device *device);
4539 void anv_device_utrace_finish(struct anv_device *device);
4540 VkResult
4541 anv_device_utrace_flush_cmd_buffers(struct anv_queue *queue,
4542 uint32_t cmd_buffer_count,
4543 struct anv_cmd_buffer **cmd_buffers,
4544 struct anv_utrace_flush_copy **out_flush_data);
4545
4546 #ifdef HAVE_PERFETTO
4547 void anv_perfetto_init(void);
4548 uint64_t anv_perfetto_begin_submit(struct anv_queue *queue);
4549 void anv_perfetto_end_submit(struct anv_queue *queue, uint32_t submission_id,
4550 uint64_t start_ts);
4551 #else
anv_perfetto_init(void)4552 static inline void anv_perfetto_init(void)
4553 {
4554 }
anv_perfetto_begin_submit(struct anv_queue * queue)4555 static inline uint64_t anv_perfetto_begin_submit(struct anv_queue *queue)
4556 {
4557 return 0;
4558 }
anv_perfetto_end_submit(struct anv_queue * queue,uint32_t submission_id,uint64_t start_ts)4559 static inline void anv_perfetto_end_submit(struct anv_queue *queue,
4560 uint32_t submission_id,
4561 uint64_t start_ts)
4562 {}
4563 #endif
4564
4565
4566 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
4567 VK_FROM_HANDLE(__anv_type, __name, __handle)
4568
4569 VK_DEFINE_HANDLE_CASTS(anv_cmd_buffer, vk.base, VkCommandBuffer,
4570 VK_OBJECT_TYPE_COMMAND_BUFFER)
4571 VK_DEFINE_HANDLE_CASTS(anv_device, vk.base, VkDevice, VK_OBJECT_TYPE_DEVICE)
4572 VK_DEFINE_HANDLE_CASTS(anv_instance, vk.base, VkInstance, VK_OBJECT_TYPE_INSTANCE)
4573 VK_DEFINE_HANDLE_CASTS(anv_physical_device, vk.base, VkPhysicalDevice,
4574 VK_OBJECT_TYPE_PHYSICAL_DEVICE)
4575 VK_DEFINE_HANDLE_CASTS(anv_queue, vk.base, VkQueue, VK_OBJECT_TYPE_QUEUE)
4576
4577 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_acceleration_structure, base,
4578 VkAccelerationStructureKHR,
4579 VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR)
4580 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer, base, VkBuffer,
4581 VK_OBJECT_TYPE_BUFFER)
4582 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view, base, VkBufferView,
4583 VK_OBJECT_TYPE_BUFFER_VIEW)
4584 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool, base, VkDescriptorPool,
4585 VK_OBJECT_TYPE_DESCRIPTOR_POOL)
4586 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set, base, VkDescriptorSet,
4587 VK_OBJECT_TYPE_DESCRIPTOR_SET)
4588 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout, base,
4589 VkDescriptorSetLayout,
4590 VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT)
4591 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_update_template, base,
4592 VkDescriptorUpdateTemplate,
4593 VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE)
4594 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory, base, VkDeviceMemory,
4595 VK_OBJECT_TYPE_DEVICE_MEMORY)
4596 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_event, base, VkEvent, VK_OBJECT_TYPE_EVENT)
4597 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_image, vk.base, VkImage, VK_OBJECT_TYPE_IMAGE)
4598 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view, vk.base, VkImageView,
4599 VK_OBJECT_TYPE_IMAGE_VIEW);
4600 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache, base, VkPipelineCache,
4601 VK_OBJECT_TYPE_PIPELINE_CACHE)
4602 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline, base, VkPipeline,
4603 VK_OBJECT_TYPE_PIPELINE)
4604 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout, base, VkPipelineLayout,
4605 VK_OBJECT_TYPE_PIPELINE_LAYOUT)
4606 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool, base, VkQueryPool,
4607 VK_OBJECT_TYPE_QUERY_POOL)
4608 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler, base, VkSampler,
4609 VK_OBJECT_TYPE_SAMPLER)
4610 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_ycbcr_conversion, base,
4611 VkSamplerYcbcrConversion,
4612 VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION)
4613 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_performance_configuration_intel, base,
4614 VkPerformanceConfigurationINTEL,
4615 VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL)
4616
4617 #define anv_genX(devinfo, thing) ({ \
4618 __typeof(&gfx9_##thing) genX_thing; \
4619 switch ((devinfo)->verx10) { \
4620 case 70: \
4621 genX_thing = &gfx7_##thing; \
4622 break; \
4623 case 75: \
4624 genX_thing = &gfx75_##thing; \
4625 break; \
4626 case 80: \
4627 genX_thing = &gfx8_##thing; \
4628 break; \
4629 case 90: \
4630 genX_thing = &gfx9_##thing; \
4631 break; \
4632 case 110: \
4633 genX_thing = &gfx11_##thing; \
4634 break; \
4635 case 120: \
4636 genX_thing = &gfx12_##thing; \
4637 break; \
4638 case 125: \
4639 genX_thing = &gfx125_##thing; \
4640 break; \
4641 default: \
4642 unreachable("Unknown hardware generation"); \
4643 } \
4644 genX_thing; \
4645 })
4646
4647 /* Gen-specific function declarations */
4648 #ifdef genX
4649 # include "anv_genX.h"
4650 #else
4651 # define genX(x) gfx7_##x
4652 # include "anv_genX.h"
4653 # undef genX
4654 # define genX(x) gfx75_##x
4655 # include "anv_genX.h"
4656 # undef genX
4657 # define genX(x) gfx8_##x
4658 # include "anv_genX.h"
4659 # undef genX
4660 # define genX(x) gfx9_##x
4661 # include "anv_genX.h"
4662 # undef genX
4663 # define genX(x) gfx11_##x
4664 # include "anv_genX.h"
4665 # undef genX
4666 # define genX(x) gfx12_##x
4667 # include "anv_genX.h"
4668 # undef genX
4669 # define genX(x) gfx125_##x
4670 # include "anv_genX.h"
4671 # undef genX
4672 #endif
4673
4674 #endif /* ANV_PRIVATE_H */
4675