1 /*
2  * Copyright © 2016 Red Hat.
3  * Copyright © 2016 Bas Nieuwenhuizen
4  *
5  * based in part on anv driver which is:
6  * Copyright © 2015 Intel Corporation
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25  * IN THE SOFTWARE.
26  */
27 
28 #ifndef RADV_PRIVATE_H
29 #define RADV_PRIVATE_H
30 
31 #include <stdlib.h>
32 #include <stdio.h>
33 #include <stdbool.h>
34 #include <pthread.h>
35 #include <assert.h>
36 #include <stdint.h>
37 #include <string.h>
38 #ifdef HAVE_VALGRIND
39 #include <valgrind.h>
40 #include <memcheck.h>
41 #define VG(x) x
42 #else
43 #define VG(x) ((void)0)
44 #endif
45 
46 #include "c11/threads.h"
47 #include <amdgpu.h>
48 #include "compiler/shader_enums.h"
49 #include "util/macros.h"
50 #include "util/list.h"
51 #include "util/xmlconfig.h"
52 #include "vk_alloc.h"
53 #include "vk_debug_report.h"
54 #include "vk_object.h"
55 
56 #include "radv_radeon_winsys.h"
57 #include "ac_binary.h"
58 #include "ac_nir_to_llvm.h"
59 #include "ac_gpu_info.h"
60 #include "ac_surface.h"
61 #include "ac_llvm_build.h"
62 #include "ac_llvm_util.h"
63 #include "radv_constants.h"
64 #include "radv_descriptor_set.h"
65 #include "radv_extensions.h"
66 #include "sid.h"
67 
68 /* Pre-declarations needed for WSI entrypoints */
69 struct wl_surface;
70 struct wl_display;
71 typedef struct xcb_connection_t xcb_connection_t;
72 typedef uint32_t xcb_visualid_t;
73 typedef uint32_t xcb_window_t;
74 
75 #include <vulkan/vulkan.h>
76 #include <vulkan/vulkan_intel.h>
77 #include <vulkan/vulkan_android.h>
78 #include <vulkan/vk_icd.h>
79 #include <vulkan/vk_android_native_buffer.h>
80 
81 #include "radv_entrypoints.h"
82 
83 #include "wsi_common.h"
84 #include "wsi_common_display.h"
85 
86 /* Helper to determine if we should compile
87  * any of the Android AHB support.
88  *
89  * To actually enable the ext we also need
90  * the necessary kernel support.
91  */
92 #if defined(ANDROID) && ANDROID_API_LEVEL >= 26
93 #define RADV_SUPPORT_ANDROID_HARDWARE_BUFFER 1
94 #else
95 #define RADV_SUPPORT_ANDROID_HARDWARE_BUFFER 0
96 #endif
97 
98 #define radv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
99 
100 static inline uint32_t
align_u32(uint32_t v,uint32_t a)101 align_u32(uint32_t v, uint32_t a)
102 {
103 	assert(a != 0 && a == (a & -a));
104 	return (v + a - 1) & ~(a - 1);
105 }
106 
107 static inline uint32_t
align_u32_npot(uint32_t v,uint32_t a)108 align_u32_npot(uint32_t v, uint32_t a)
109 {
110 	return (v + a - 1) / a * a;
111 }
112 
113 static inline uint64_t
align_u64(uint64_t v,uint64_t a)114 align_u64(uint64_t v, uint64_t a)
115 {
116 	assert(a != 0 && a == (a & -a));
117 	return (v + a - 1) & ~(a - 1);
118 }
119 
120 static inline int32_t
align_i32(int32_t v,int32_t a)121 align_i32(int32_t v, int32_t a)
122 {
123 	assert(a != 0 && a == (a & -a));
124 	return (v + a - 1) & ~(a - 1);
125 }
126 
127 /** Alignment must be a power of 2. */
128 static inline bool
radv_is_aligned(uintmax_t n,uintmax_t a)129 radv_is_aligned(uintmax_t n, uintmax_t a)
130 {
131 	assert(a == (a & -a));
132 	return (n & (a - 1)) == 0;
133 }
134 
135 static inline uint32_t
round_up_u32(uint32_t v,uint32_t a)136 round_up_u32(uint32_t v, uint32_t a)
137 {
138 	return (v + a - 1) / a;
139 }
140 
141 static inline uint64_t
round_up_u64(uint64_t v,uint64_t a)142 round_up_u64(uint64_t v, uint64_t a)
143 {
144 	return (v + a - 1) / a;
145 }
146 
147 static inline uint32_t
radv_minify(uint32_t n,uint32_t levels)148 radv_minify(uint32_t n, uint32_t levels)
149 {
150 	if (unlikely(n == 0))
151 		return 0;
152 	else
153 		return MAX2(n >> levels, 1);
154 }
155 static inline float
radv_clamp_f(float f,float min,float max)156 radv_clamp_f(float f, float min, float max)
157 {
158 	assert(min < max);
159 
160 	if (f > max)
161 		return max;
162 	else if (f < min)
163 		return min;
164 	else
165 		return f;
166 }
167 
168 static inline bool
radv_clear_mask(uint32_t * inout_mask,uint32_t clear_mask)169 radv_clear_mask(uint32_t *inout_mask, uint32_t clear_mask)
170 {
171 	if (*inout_mask & clear_mask) {
172 		*inout_mask &= ~clear_mask;
173 		return true;
174 	} else {
175 		return false;
176 	}
177 }
178 
179 #define for_each_bit(b, dword)                          \
180 	for (uint32_t __dword = (dword);		\
181 	     (b) = __builtin_ffs(__dword) - 1, __dword;	\
182 	     __dword &= ~(1 << (b)))
183 
184 #define typed_memcpy(dest, src, count) ({				\
185 			STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
186 			memcpy((dest), (src), (count) * sizeof(*(src))); \
187 		})
188 
189 /* Whenever we generate an error, pass it through this function. Useful for
190  * debugging, where we can break on it. Only call at error site, not when
191  * propagating errors. Might be useful to plug in a stack trace here.
192  */
193 
194 struct radv_image_view;
195 struct radv_instance;
196 
197 VkResult __vk_errorf(struct radv_instance *instance, VkResult error, const char *file, int line, const char *format, ...);
198 
199 #define vk_error(instance, error) __vk_errorf(instance, error, __FILE__, __LINE__, NULL);
200 #define vk_errorf(instance, error, format, ...) __vk_errorf(instance, error, __FILE__, __LINE__, format, ## __VA_ARGS__);
201 
202 void __radv_finishme(const char *file, int line, const char *format, ...)
203 	radv_printflike(3, 4);
204 void radv_loge(const char *format, ...) radv_printflike(1, 2);
205 void radv_loge_v(const char *format, va_list va);
206 void radv_logi(const char *format, ...) radv_printflike(1, 2);
207 void radv_logi_v(const char *format, va_list va);
208 
209 /**
210  * Print a FINISHME message, including its source location.
211  */
212 #define radv_finishme(format, ...)					\
213 	do { \
214 		static bool reported = false; \
215 		if (!reported) { \
216 			__radv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \
217 			reported = true; \
218 		} \
219 	} while (0)
220 
221 /* A non-fatal assert.  Useful for debugging. */
222 #ifdef DEBUG
223 #define radv_assert(x) ({						\
224 			if (unlikely(!(x)))				\
225 				fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
226 		})
227 #else
228 #define radv_assert(x) do {} while(0)
229 #endif
230 
231 #define stub_return(v)					\
232 	do {						\
233 		radv_finishme("stub %s", __func__);	\
234 		return (v);				\
235 	} while (0)
236 
237 #define stub()						\
238 	do {						\
239 		radv_finishme("stub %s", __func__);	\
240 		return;					\
241 	} while (0)
242 
243 int radv_get_instance_entrypoint_index(const char *name);
244 int radv_get_device_entrypoint_index(const char *name);
245 int radv_get_physical_device_entrypoint_index(const char *name);
246 
247 const char *radv_get_instance_entry_name(int index);
248 const char *radv_get_physical_device_entry_name(int index);
249 const char *radv_get_device_entry_name(int index);
250 
251 bool radv_instance_entrypoint_is_enabled(int index, uint32_t core_version,
252 					 const struct radv_instance_extension_table *instance);
253 bool radv_physical_device_entrypoint_is_enabled(int index, uint32_t core_version,
254 						const struct radv_instance_extension_table *instance);
255 bool radv_device_entrypoint_is_enabled(int index, uint32_t core_version,
256 				       const struct radv_instance_extension_table *instance,
257 				       const struct radv_device_extension_table *device);
258 
259 void *radv_lookup_entrypoint(const char *name);
260 
261 struct radv_physical_device {
262 	VK_LOADER_DATA                              _loader_data;
263 
264 	/* Link in radv_instance::physical_devices */
265 	struct list_head                            link;
266 
267 	struct radv_instance *                       instance;
268 
269 	struct radeon_winsys *ws;
270 	struct radeon_info rad_info;
271 	char                                        name[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE];
272 	uint8_t                                     driver_uuid[VK_UUID_SIZE];
273 	uint8_t                                     device_uuid[VK_UUID_SIZE];
274 	uint8_t                                     cache_uuid[VK_UUID_SIZE];
275 
276 	int local_fd;
277 	int master_fd;
278 	struct wsi_device                       wsi_device;
279 
280 	bool out_of_order_rast_allowed;
281 
282 	/* Whether DCC should be enabled for MSAA textures. */
283 	bool dcc_msaa_allowed;
284 
285 	/* Whether to enable NGG. */
286 	bool use_ngg;
287 
288 	/* Whether to enable NGG GS. */
289 	bool use_ngg_gs;
290 
291 	/* Whether to enable NGG streamout. */
292 	bool use_ngg_streamout;
293 
294 	/* Number of threads per wave. */
295 	uint8_t ps_wave_size;
296 	uint8_t cs_wave_size;
297 	uint8_t ge_wave_size;
298 
299 	/* Whether to use the LLVM compiler backend */
300 	bool use_llvm;
301 
302 	/* This is the drivers on-disk cache used as a fallback as opposed to
303 	 * the pipeline cache defined by apps.
304 	 */
305 	struct disk_cache *                          disk_cache;
306 
307 	VkPhysicalDeviceMemoryProperties memory_properties;
308 	enum radeon_bo_domain memory_domains[VK_MAX_MEMORY_TYPES];
309 	enum radeon_bo_flag memory_flags[VK_MAX_MEMORY_TYPES];
310 
311 	drmPciBusInfo bus_info;
312 
313 	struct radv_device_extension_table supported_extensions;
314 };
315 
316 struct radv_instance {
317 	struct vk_object_base                       base;
318 
319 	VkAllocationCallbacks                       alloc;
320 
321 	uint32_t                                    apiVersion;
322 
323 	char *                                      applicationName;
324 	uint32_t                                    applicationVersion;
325 	char *                                      engineName;
326 	uint32_t                                    engineVersion;
327 
328 	uint64_t debug_flags;
329 	uint64_t perftest_flags;
330 
331 	struct vk_debug_report_instance             debug_report_callbacks;
332 
333 	struct radv_instance_extension_table enabled_extensions;
334 	struct radv_instance_dispatch_table          dispatch;
335 	struct radv_physical_device_dispatch_table   physical_device_dispatch;
336 	struct radv_device_dispatch_table            device_dispatch;
337 
338 	bool                                        physical_devices_enumerated;
339 	struct list_head                            physical_devices;
340 
341 	struct driOptionCache dri_options;
342 	struct driOptionCache available_dri_options;
343 
344 	/**
345 	 * Workarounds for game bugs.
346 	 */
347 	bool enable_mrt_output_nan_fixup;
348 };
349 
350 VkResult radv_init_wsi(struct radv_physical_device *physical_device);
351 void radv_finish_wsi(struct radv_physical_device *physical_device);
352 
353 bool radv_instance_extension_supported(const char *name);
354 uint32_t radv_physical_device_api_version(struct radv_physical_device *dev);
355 bool radv_physical_device_extension_supported(struct radv_physical_device *dev,
356 					      const char *name);
357 
358 struct cache_entry;
359 
360 struct radv_pipeline_cache {
361 	struct vk_object_base                        base;
362 	struct radv_device *                         device;
363 	pthread_mutex_t                              mutex;
364 	VkPipelineCacheCreateFlags                   flags;
365 
366 	uint32_t                                     total_size;
367 	uint32_t                                     table_size;
368 	uint32_t                                     kernel_count;
369 	struct cache_entry **                        hash_table;
370 	bool                                         modified;
371 
372 	VkAllocationCallbacks                        alloc;
373 };
374 
375 struct radv_pipeline_key {
376 	uint32_t instance_rate_inputs;
377 	uint32_t instance_rate_divisors[MAX_VERTEX_ATTRIBS];
378 	uint8_t vertex_attribute_formats[MAX_VERTEX_ATTRIBS];
379 	uint32_t vertex_attribute_bindings[MAX_VERTEX_ATTRIBS];
380 	uint32_t vertex_attribute_offsets[MAX_VERTEX_ATTRIBS];
381 	uint32_t vertex_attribute_strides[MAX_VERTEX_ATTRIBS];
382 	uint64_t vertex_alpha_adjust;
383 	uint32_t vertex_post_shuffle;
384 	unsigned tess_input_vertices;
385 	uint32_t col_format;
386 	uint32_t is_int8;
387 	uint32_t is_int10;
388 	uint8_t log2_ps_iter_samples;
389 	uint8_t num_samples;
390 	bool is_dual_src;
391 	uint32_t has_multiview_view_index : 1;
392 	uint32_t optimisations_disabled : 1;
393 	uint8_t topology;
394 
395 	/* Non-zero if a required subgroup size is specified via
396 	 * VK_EXT_subgroup_size_control.
397 	 */
398 	uint8_t compute_subgroup_size;
399 };
400 
401 struct radv_shader_binary;
402 struct radv_shader_variant;
403 
404 void
405 radv_pipeline_cache_init(struct radv_pipeline_cache *cache,
406 			 struct radv_device *device);
407 void
408 radv_pipeline_cache_finish(struct radv_pipeline_cache *cache);
409 bool
410 radv_pipeline_cache_load(struct radv_pipeline_cache *cache,
411 			 const void *data, size_t size);
412 
413 bool
414 radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
415 					        struct radv_pipeline_cache *cache,
416 					        const unsigned char *sha1,
417 					        struct radv_shader_variant **variants,
418 						bool *found_in_application_cache);
419 
420 void
421 radv_pipeline_cache_insert_shaders(struct radv_device *device,
422 				   struct radv_pipeline_cache *cache,
423 				   const unsigned char *sha1,
424 				   struct radv_shader_variant **variants,
425 				   struct radv_shader_binary *const *binaries);
426 
427 enum radv_blit_ds_layout {
428 	RADV_BLIT_DS_LAYOUT_TILE_ENABLE,
429 	RADV_BLIT_DS_LAYOUT_TILE_DISABLE,
430 	RADV_BLIT_DS_LAYOUT_COUNT,
431 };
432 
radv_meta_blit_ds_to_type(VkImageLayout layout)433 static inline enum radv_blit_ds_layout radv_meta_blit_ds_to_type(VkImageLayout layout)
434 {
435 	return (layout == VK_IMAGE_LAYOUT_GENERAL) ? RADV_BLIT_DS_LAYOUT_TILE_DISABLE : RADV_BLIT_DS_LAYOUT_TILE_ENABLE;
436 }
437 
radv_meta_blit_ds_to_layout(enum radv_blit_ds_layout ds_layout)438 static inline VkImageLayout radv_meta_blit_ds_to_layout(enum radv_blit_ds_layout ds_layout)
439 {
440 	return ds_layout == RADV_BLIT_DS_LAYOUT_TILE_ENABLE ? VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL : VK_IMAGE_LAYOUT_GENERAL;
441 }
442 
443 enum radv_meta_dst_layout {
444 	RADV_META_DST_LAYOUT_GENERAL,
445 	RADV_META_DST_LAYOUT_OPTIMAL,
446 	RADV_META_DST_LAYOUT_COUNT,
447 };
448 
radv_meta_dst_layout_from_layout(VkImageLayout layout)449 static inline enum radv_meta_dst_layout radv_meta_dst_layout_from_layout(VkImageLayout layout)
450 {
451 	return (layout == VK_IMAGE_LAYOUT_GENERAL) ? RADV_META_DST_LAYOUT_GENERAL : RADV_META_DST_LAYOUT_OPTIMAL;
452 }
453 
radv_meta_dst_layout_to_layout(enum radv_meta_dst_layout layout)454 static inline VkImageLayout radv_meta_dst_layout_to_layout(enum radv_meta_dst_layout layout)
455 {
456 	return layout == RADV_META_DST_LAYOUT_OPTIMAL ? VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL : VK_IMAGE_LAYOUT_GENERAL;
457 }
458 
459 struct radv_meta_state {
460 	VkAllocationCallbacks alloc;
461 
462 	struct radv_pipeline_cache cache;
463 
464 	/*
465 	 * For on-demand pipeline creation, makes sure that
466 	 * only one thread tries to build a pipeline at the same time.
467 	 */
468 	mtx_t mtx;
469 
470 	/**
471 	 * Use array element `i` for images with `2^i` samples.
472 	 */
473 	struct {
474 		VkRenderPass render_pass[NUM_META_FS_KEYS];
475 		VkPipeline color_pipelines[NUM_META_FS_KEYS];
476 
477 		VkRenderPass depthstencil_rp;
478 		VkPipeline depth_only_pipeline[NUM_DEPTH_CLEAR_PIPELINES];
479 		VkPipeline stencil_only_pipeline[NUM_DEPTH_CLEAR_PIPELINES];
480 		VkPipeline depthstencil_pipeline[NUM_DEPTH_CLEAR_PIPELINES];
481 
482 		VkPipeline depth_only_unrestricted_pipeline[NUM_DEPTH_CLEAR_PIPELINES];
483 		VkPipeline stencil_only_unrestricted_pipeline[NUM_DEPTH_CLEAR_PIPELINES];
484 		VkPipeline depthstencil_unrestricted_pipeline[NUM_DEPTH_CLEAR_PIPELINES];
485 	} clear[MAX_SAMPLES_LOG2];
486 
487 	VkPipelineLayout                          clear_color_p_layout;
488 	VkPipelineLayout                          clear_depth_p_layout;
489 	VkPipelineLayout                          clear_depth_unrestricted_p_layout;
490 
491 	/* Optimized compute fast HTILE clear for stencil or depth only. */
492 	VkPipeline clear_htile_mask_pipeline;
493 	VkPipelineLayout clear_htile_mask_p_layout;
494 	VkDescriptorSetLayout clear_htile_mask_ds_layout;
495 
496 	struct {
497 		VkRenderPass render_pass[NUM_META_FS_KEYS][RADV_META_DST_LAYOUT_COUNT];
498 
499 		/** Pipeline that blits from a 1D image. */
500 		VkPipeline pipeline_1d_src[NUM_META_FS_KEYS];
501 
502 		/** Pipeline that blits from a 2D image. */
503 		VkPipeline pipeline_2d_src[NUM_META_FS_KEYS];
504 
505 		/** Pipeline that blits from a 3D image. */
506 		VkPipeline pipeline_3d_src[NUM_META_FS_KEYS];
507 
508 		VkRenderPass depth_only_rp[RADV_BLIT_DS_LAYOUT_COUNT];
509 		VkPipeline depth_only_1d_pipeline;
510 		VkPipeline depth_only_2d_pipeline;
511 		VkPipeline depth_only_3d_pipeline;
512 
513 		VkRenderPass stencil_only_rp[RADV_BLIT_DS_LAYOUT_COUNT];
514 		VkPipeline stencil_only_1d_pipeline;
515 		VkPipeline stencil_only_2d_pipeline;
516 		VkPipeline stencil_only_3d_pipeline;
517 		VkPipelineLayout                          pipeline_layout;
518 		VkDescriptorSetLayout                     ds_layout;
519 	} blit;
520 
521 	struct {
522 		VkPipelineLayout p_layouts[5];
523 		VkDescriptorSetLayout ds_layouts[5];
524 		VkPipeline pipelines[5][NUM_META_FS_KEYS];
525 
526 		VkPipeline depth_only_pipeline[5];
527 
528 		VkPipeline stencil_only_pipeline[5];
529 	} blit2d[MAX_SAMPLES_LOG2];
530 
531 	VkRenderPass blit2d_render_passes[NUM_META_FS_KEYS][RADV_META_DST_LAYOUT_COUNT];
532 	VkRenderPass blit2d_depth_only_rp[RADV_BLIT_DS_LAYOUT_COUNT];
533 	VkRenderPass blit2d_stencil_only_rp[RADV_BLIT_DS_LAYOUT_COUNT];
534 
535 	struct {
536 		VkPipelineLayout                          img_p_layout;
537 		VkDescriptorSetLayout                     img_ds_layout;
538 		VkPipeline pipeline;
539 		VkPipeline pipeline_3d;
540 	} itob;
541 	struct {
542 		VkPipelineLayout                          img_p_layout;
543 		VkDescriptorSetLayout                     img_ds_layout;
544 		VkPipeline pipeline;
545 		VkPipeline pipeline_3d;
546 	} btoi;
547 	struct {
548 		VkPipelineLayout                          img_p_layout;
549 		VkDescriptorSetLayout                     img_ds_layout;
550 		VkPipeline pipeline;
551 	} btoi_r32g32b32;
552 	struct {
553 		VkPipelineLayout                          img_p_layout;
554 		VkDescriptorSetLayout                     img_ds_layout;
555 		VkPipeline pipeline;
556 		VkPipeline pipeline_3d;
557 	} itoi;
558 	struct {
559 		VkPipelineLayout                          img_p_layout;
560 		VkDescriptorSetLayout                     img_ds_layout;
561 		VkPipeline pipeline;
562 	} itoi_r32g32b32;
563 	struct {
564 		VkPipelineLayout                          img_p_layout;
565 		VkDescriptorSetLayout                     img_ds_layout;
566 		VkPipeline pipeline;
567 		VkPipeline pipeline_3d;
568 	} cleari;
569 	struct {
570 		VkPipelineLayout                          img_p_layout;
571 		VkDescriptorSetLayout                     img_ds_layout;
572 		VkPipeline pipeline;
573 	} cleari_r32g32b32;
574 
575 	struct {
576 		VkPipelineLayout                          p_layout;
577 		VkPipeline                                pipeline[NUM_META_FS_KEYS];
578 		VkRenderPass                              pass[NUM_META_FS_KEYS];
579 	} resolve;
580 
581 	struct {
582 		VkDescriptorSetLayout                     ds_layout;
583 		VkPipelineLayout                          p_layout;
584 		struct {
585 			VkPipeline                                pipeline;
586 			VkPipeline                                i_pipeline;
587 			VkPipeline                                srgb_pipeline;
588 		} rc[MAX_SAMPLES_LOG2];
589 
590 		VkPipeline depth_zero_pipeline;
591 		struct {
592 			VkPipeline average_pipeline;
593 			VkPipeline max_pipeline;
594 			VkPipeline min_pipeline;
595 		} depth[MAX_SAMPLES_LOG2];
596 
597 		VkPipeline stencil_zero_pipeline;
598 		struct {
599 			VkPipeline max_pipeline;
600 			VkPipeline min_pipeline;
601 		} stencil[MAX_SAMPLES_LOG2];
602 	} resolve_compute;
603 
604 	struct {
605 		VkDescriptorSetLayout                     ds_layout;
606 		VkPipelineLayout                          p_layout;
607 
608 		struct {
609 			VkRenderPass render_pass[NUM_META_FS_KEYS][RADV_META_DST_LAYOUT_COUNT];
610 			VkPipeline   pipeline[NUM_META_FS_KEYS];
611 		} rc[MAX_SAMPLES_LOG2];
612 
613 		VkRenderPass depth_render_pass;
614 		VkPipeline depth_zero_pipeline;
615 		struct {
616 			VkPipeline average_pipeline;
617 			VkPipeline max_pipeline;
618 			VkPipeline min_pipeline;
619 		} depth[MAX_SAMPLES_LOG2];
620 
621 		VkRenderPass stencil_render_pass;
622 		VkPipeline stencil_zero_pipeline;
623 		struct {
624 			VkPipeline max_pipeline;
625 			VkPipeline min_pipeline;
626 		} stencil[MAX_SAMPLES_LOG2];
627 	} resolve_fragment;
628 
629 	struct {
630 		VkPipelineLayout                          p_layout;
631 		VkPipeline                                decompress_pipeline[NUM_DEPTH_DECOMPRESS_PIPELINES];
632 		VkPipeline                                resummarize_pipeline;
633 		VkRenderPass                              pass;
634 	} depth_decomp[MAX_SAMPLES_LOG2];
635 
636 	struct {
637 		VkPipelineLayout                          p_layout;
638 		VkPipeline                                cmask_eliminate_pipeline;
639 		VkPipeline                                fmask_decompress_pipeline;
640 		VkPipeline                                dcc_decompress_pipeline;
641 		VkRenderPass                              pass;
642 
643 		VkDescriptorSetLayout                     dcc_decompress_compute_ds_layout;
644 		VkPipelineLayout                          dcc_decompress_compute_p_layout;
645 		VkPipeline                                dcc_decompress_compute_pipeline;
646 	} fast_clear_flush;
647 
648 	struct {
649 		VkPipelineLayout fill_p_layout;
650 		VkPipelineLayout copy_p_layout;
651 		VkDescriptorSetLayout fill_ds_layout;
652 		VkDescriptorSetLayout copy_ds_layout;
653 		VkPipeline fill_pipeline;
654 		VkPipeline copy_pipeline;
655 	} buffer;
656 
657 	struct {
658 		VkDescriptorSetLayout ds_layout;
659 		VkPipelineLayout p_layout;
660 		VkPipeline occlusion_query_pipeline;
661 		VkPipeline pipeline_statistics_query_pipeline;
662 		VkPipeline tfb_query_pipeline;
663 		VkPipeline timestamp_query_pipeline;
664 	} query;
665 
666 	struct {
667 		VkDescriptorSetLayout ds_layout;
668 		VkPipelineLayout p_layout;
669 		VkPipeline pipeline[MAX_SAMPLES_LOG2];
670 	} fmask_expand;
671 };
672 
673 /* queue types */
674 #define RADV_QUEUE_GENERAL 0
675 #define RADV_QUEUE_COMPUTE 1
676 #define RADV_QUEUE_TRANSFER 2
677 
678 #define RADV_MAX_QUEUE_FAMILIES 3
679 
680 struct radv_deferred_queue_submission;
681 
682 enum ring_type radv_queue_family_to_ring(int f);
683 
684 struct radv_queue {
685 	VK_LOADER_DATA                              _loader_data;
686 	struct radv_device *                         device;
687 	struct radeon_winsys_ctx                    *hw_ctx;
688 	enum radeon_ctx_priority                     priority;
689 	uint32_t queue_family_index;
690 	int queue_idx;
691 	VkDeviceQueueCreateFlags flags;
692 
693 	uint32_t scratch_size_per_wave;
694 	uint32_t scratch_waves;
695 	uint32_t compute_scratch_size_per_wave;
696 	uint32_t compute_scratch_waves;
697 	uint32_t esgs_ring_size;
698 	uint32_t gsvs_ring_size;
699 	bool has_tess_rings;
700 	bool has_gds;
701 	bool has_gds_oa;
702 	bool has_sample_positions;
703 
704 	struct radeon_winsys_bo *scratch_bo;
705 	struct radeon_winsys_bo *descriptor_bo;
706 	struct radeon_winsys_bo *compute_scratch_bo;
707 	struct radeon_winsys_bo *esgs_ring_bo;
708 	struct radeon_winsys_bo *gsvs_ring_bo;
709 	struct radeon_winsys_bo *tess_rings_bo;
710 	struct radeon_winsys_bo *gds_bo;
711 	struct radeon_winsys_bo *gds_oa_bo;
712 	struct radeon_cmdbuf *initial_preamble_cs;
713 	struct radeon_cmdbuf *initial_full_flush_preamble_cs;
714 	struct radeon_cmdbuf *continue_preamble_cs;
715 
716 	struct list_head pending_submissions;
717 	pthread_mutex_t pending_mutex;
718 
719 	pthread_mutex_t thread_mutex;
720 	pthread_cond_t thread_cond;
721 	struct radv_deferred_queue_submission *thread_submission;
722 	pthread_t submission_thread;
723 	bool thread_exit;
724 	bool thread_running;
725 };
726 
727 struct radv_bo_list {
728 	struct radv_winsys_bo_list list;
729 	unsigned capacity;
730 	pthread_rwlock_t rwlock;
731 };
732 
733 VkResult radv_bo_list_add(struct radv_device *device,
734 			  struct radeon_winsys_bo *bo);
735 void radv_bo_list_remove(struct radv_device *device,
736 			 struct radeon_winsys_bo *bo);
737 
738 #define RADV_BORDER_COLOR_COUNT       4096
739 #define RADV_BORDER_COLOR_BUFFER_SIZE (sizeof(VkClearColorValue) * RADV_BORDER_COLOR_COUNT)
740 
741 struct radv_device_border_color_data {
742 	bool 			 used[RADV_BORDER_COLOR_COUNT];
743 
744 	struct radeon_winsys_bo *bo;
745 	VkClearColorValue       *colors_gpu_ptr;
746 
747 	/* Mutex is required to guarantee vkCreateSampler thread safety
748 	 * given that we are writing to a buffer and checking color occupation */
749 	pthread_mutex_t          mutex;
750 };
751 
752 struct radv_device {
753 	struct vk_device vk;
754 
755 	struct radv_instance *                       instance;
756 	struct radeon_winsys *ws;
757 
758 	struct radv_meta_state                       meta_state;
759 
760 	struct radv_queue *queues[RADV_MAX_QUEUE_FAMILIES];
761 	int queue_count[RADV_MAX_QUEUE_FAMILIES];
762 	struct radeon_cmdbuf *empty_cs[RADV_MAX_QUEUE_FAMILIES];
763 
764 	bool always_use_syncobj;
765 	bool pbb_allowed;
766 	bool dfsm_allowed;
767 	uint32_t tess_offchip_block_dw_size;
768 	uint32_t scratch_waves;
769 	uint32_t dispatch_initiator;
770 
771 	uint32_t gs_table_depth;
772 
773 	/* MSAA sample locations.
774 	 * The first index is the sample index.
775 	 * The second index is the coordinate: X, Y. */
776 	float sample_locations_1x[1][2];
777 	float sample_locations_2x[2][2];
778 	float sample_locations_4x[4][2];
779 	float sample_locations_8x[8][2];
780 
781 	/* GFX7 and later */
782 	uint32_t gfx_init_size_dw;
783 	struct radeon_winsys_bo                      *gfx_init;
784 
785 	struct radeon_winsys_bo                      *trace_bo;
786 	uint32_t                                     *trace_id_ptr;
787 
788 	/* Whether to keep shader debug info, for tracing or VK_AMD_shader_info */
789 	bool                                         keep_shader_info;
790 
791 	struct radv_physical_device                  *physical_device;
792 
793 	/* Backup in-memory cache to be used if the app doesn't provide one */
794 	struct radv_pipeline_cache *                mem_cache;
795 
796 	/*
797 	 * use different counters so MSAA MRTs get consecutive surface indices,
798 	 * even if MASK is allocated in between.
799 	 */
800 	uint32_t image_mrt_offset_counter;
801 	uint32_t fmask_mrt_offset_counter;
802 	struct list_head shader_slabs;
803 	mtx_t shader_slab_mutex;
804 
805 	/* For detecting VM faults reported by dmesg. */
806 	uint64_t dmesg_timestamp;
807 
808 	struct radv_device_extension_table enabled_extensions;
809 	struct radv_device_dispatch_table dispatch;
810 
811 	/* Whether the app has enabled the robustBufferAccess feature. */
812 	bool robust_buffer_access;
813 
814 	/* Whether the driver uses a global BO list. */
815 	bool use_global_bo_list;
816 
817 	struct radv_bo_list bo_list;
818 
819 	/* Whether anisotropy is forced with RADV_TEX_ANISO (-1 is disabled). */
820 	int force_aniso;
821 
822 	struct radv_device_border_color_data border_color_data;
823 
824 	/* Condition variable for legacy timelines, to notify waiters when a
825 	 * new point gets submitted. */
826 	pthread_cond_t timeline_cond;
827 
828 	/* Thread trace. */
829 	struct radeon_cmdbuf *thread_trace_start_cs[2];
830 	struct radeon_cmdbuf *thread_trace_stop_cs[2];
831 	struct radeon_winsys_bo *thread_trace_bo;
832 	void *thread_trace_ptr;
833 	uint32_t thread_trace_buffer_size;
834 	int thread_trace_start_frame;
835 
836 	/* Overallocation. */
837 	bool overallocation_disallowed;
838 	uint64_t allocated_memory_size[VK_MAX_MEMORY_HEAPS];
839 	mtx_t overallocation_mutex;
840 };
841 
842 struct radv_device_memory {
843 	struct vk_object_base                        base;
844 	struct radeon_winsys_bo                      *bo;
845 	/* for dedicated allocations */
846 	struct radv_image                            *image;
847 	struct radv_buffer                           *buffer;
848 	uint32_t                                     heap_index;
849 	uint64_t                                     alloc_size;
850 	void *                                       map;
851 	void *                                       user_ptr;
852 
853 #if RADV_SUPPORT_ANDROID_HARDWARE_BUFFER
854 	struct AHardwareBuffer *                    android_hardware_buffer;
855 #endif
856 };
857 
858 
859 struct radv_descriptor_range {
860 	uint64_t va;
861 	uint32_t size;
862 };
863 
864 struct radv_descriptor_set {
865 	struct vk_object_base base;
866 	const struct radv_descriptor_set_layout *layout;
867 	uint32_t size;
868 	uint32_t buffer_count;
869 
870 	struct radeon_winsys_bo *bo;
871 	uint64_t va;
872 	uint32_t *mapped_ptr;
873 	struct radv_descriptor_range *dynamic_descriptors;
874 
875 	struct radeon_winsys_bo *descriptors[0];
876 };
877 
878 struct radv_push_descriptor_set
879 {
880 	struct radv_descriptor_set set;
881 	uint32_t capacity;
882 };
883 
884 struct radv_descriptor_pool_entry {
885 	uint32_t offset;
886 	uint32_t size;
887 	struct radv_descriptor_set *set;
888 };
889 
890 struct radv_descriptor_pool {
891 	struct vk_object_base base;
892 	struct radeon_winsys_bo *bo;
893 	uint8_t *mapped_ptr;
894 	uint64_t current_offset;
895 	uint64_t size;
896 
897 	uint8_t *host_memory_base;
898 	uint8_t *host_memory_ptr;
899 	uint8_t *host_memory_end;
900 
901 	uint32_t entry_count;
902 	uint32_t max_entry_count;
903 	struct radv_descriptor_pool_entry entries[0];
904 };
905 
906 struct radv_descriptor_update_template_entry {
907 	VkDescriptorType descriptor_type;
908 
909 	/* The number of descriptors to update */
910 	uint32_t descriptor_count;
911 
912 	/* Into mapped_ptr or dynamic_descriptors, in units of the respective array */
913 	uint32_t dst_offset;
914 
915 	/* In dwords. Not valid/used for dynamic descriptors */
916 	uint32_t dst_stride;
917 
918 	uint32_t buffer_offset;
919 
920 	/* Only valid for combined image samplers and samplers */
921 	uint8_t has_sampler;
922 	uint8_t sampler_offset;
923 
924 	/* In bytes */
925 	size_t src_offset;
926 	size_t src_stride;
927 
928 	/* For push descriptors */
929 	const uint32_t *immutable_samplers;
930 };
931 
932 struct radv_descriptor_update_template {
933 	struct vk_object_base base;
934 	uint32_t entry_count;
935 	VkPipelineBindPoint bind_point;
936 	struct radv_descriptor_update_template_entry entry[0];
937 };
938 
939 struct radv_buffer {
940 	struct vk_object_base                        base;
941 	VkDeviceSize                                 size;
942 
943 	VkBufferUsageFlags                           usage;
944 	VkBufferCreateFlags                          flags;
945 
946 	/* Set when bound */
947 	struct radeon_winsys_bo *                      bo;
948 	VkDeviceSize                                 offset;
949 
950 	bool shareable;
951 };
952 
953 enum radv_dynamic_state_bits {
954 	RADV_DYNAMIC_VIEWPORT				= 1 << 0,
955 	RADV_DYNAMIC_SCISSOR				= 1 << 1,
956 	RADV_DYNAMIC_LINE_WIDTH				= 1 << 2,
957 	RADV_DYNAMIC_DEPTH_BIAS				= 1 << 3,
958 	RADV_DYNAMIC_BLEND_CONSTANTS			= 1 << 4,
959 	RADV_DYNAMIC_DEPTH_BOUNDS			= 1 << 5,
960 	RADV_DYNAMIC_STENCIL_COMPARE_MASK		= 1 << 6,
961 	RADV_DYNAMIC_STENCIL_WRITE_MASK			= 1 << 7,
962 	RADV_DYNAMIC_STENCIL_REFERENCE			= 1 << 8,
963 	RADV_DYNAMIC_DISCARD_RECTANGLE			= 1 << 9,
964 	RADV_DYNAMIC_SAMPLE_LOCATIONS			= 1 << 10,
965 	RADV_DYNAMIC_LINE_STIPPLE			= 1 << 11,
966 	RADV_DYNAMIC_CULL_MODE				= 1 << 12,
967 	RADV_DYNAMIC_FRONT_FACE				= 1 << 13,
968 	RADV_DYNAMIC_PRIMITIVE_TOPOLOGY			= 1 << 14,
969 	RADV_DYNAMIC_DEPTH_TEST_ENABLE			= 1 << 15,
970 	RADV_DYNAMIC_DEPTH_WRITE_ENABLE			= 1 << 16,
971 	RADV_DYNAMIC_DEPTH_COMPARE_OP			= 1 << 17,
972 	RADV_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE		= 1 << 18,
973 	RADV_DYNAMIC_STENCIL_TEST_ENABLE		= 1 << 19,
974 	RADV_DYNAMIC_STENCIL_OP				= 1 << 20,
975 	RADV_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE        = 1 << 21,
976 	RADV_DYNAMIC_ALL				= (1 << 22) - 1,
977 };
978 
979 enum radv_cmd_dirty_bits {
980 	/* Keep the dynamic state dirty bits in sync with
981 	 * enum radv_dynamic_state_bits */
982 	RADV_CMD_DIRTY_DYNAMIC_VIEWPORT				= 1 << 0,
983 	RADV_CMD_DIRTY_DYNAMIC_SCISSOR				= 1 << 1,
984 	RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH			= 1 << 2,
985 	RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS			= 1 << 3,
986 	RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS			= 1 << 4,
987 	RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS			= 1 << 5,
988 	RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK		= 1 << 6,
989 	RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK		= 1 << 7,
990 	RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE		= 1 << 8,
991 	RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE		= 1 << 9,
992 	RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS			= 1 << 10,
993 	RADV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE			= 1 << 11,
994 	RADV_CMD_DIRTY_DYNAMIC_CULL_MODE			= 1 << 12,
995 	RADV_CMD_DIRTY_DYNAMIC_FRONT_FACE			= 1 << 13,
996 	RADV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY		= 1 << 14,
997 	RADV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE		= 1 << 15,
998 	RADV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE		= 1 << 16,
999 	RADV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP			= 1 << 17,
1000 	RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE		= 1 << 18,
1001 	RADV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE		= 1 << 19,
1002 	RADV_CMD_DIRTY_DYNAMIC_STENCIL_OP			= 1 << 20,
1003 	RADV_CMD_DIRTY_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE      = 1 << 21,
1004 	RADV_CMD_DIRTY_DYNAMIC_ALL				= (1 << 22) - 1,
1005 	RADV_CMD_DIRTY_PIPELINE					= 1 << 22,
1006 	RADV_CMD_DIRTY_INDEX_BUFFER				= 1 << 23,
1007 	RADV_CMD_DIRTY_FRAMEBUFFER				= 1 << 24,
1008 	RADV_CMD_DIRTY_VERTEX_BUFFER				= 1 << 25,
1009 	RADV_CMD_DIRTY_STREAMOUT_BUFFER				= 1 << 26,
1010 };
1011 
1012 enum radv_cmd_flush_bits {
1013 	/* Instruction cache. */
1014 	RADV_CMD_FLAG_INV_ICACHE			 = 1 << 0,
1015 	/* Scalar L1 cache. */
1016 	RADV_CMD_FLAG_INV_SCACHE			 = 1 << 1,
1017 	/* Vector L1 cache. */
1018 	RADV_CMD_FLAG_INV_VCACHE			 = 1 << 2,
1019 	/* L2 cache + L2 metadata cache writeback & invalidate.
1020 	 * GFX6-8: Used by shaders only. GFX9-10: Used by everything. */
1021 	RADV_CMD_FLAG_INV_L2				 = 1 << 3,
1022 	/* L2 writeback (write dirty L2 lines to memory for non-L2 clients).
1023 	 * Only used for coherency with non-L2 clients like CB, DB, CP on GFX6-8.
1024 	 * GFX6-7 will do complete invalidation, because the writeback is unsupported. */
1025 	RADV_CMD_FLAG_WB_L2				 = 1 << 4,
1026 	/* Framebuffer caches */
1027 	RADV_CMD_FLAG_FLUSH_AND_INV_CB_META		 = 1 << 5,
1028 	RADV_CMD_FLAG_FLUSH_AND_INV_DB_META		 = 1 << 6,
1029 	RADV_CMD_FLAG_FLUSH_AND_INV_DB			 = 1 << 7,
1030 	RADV_CMD_FLAG_FLUSH_AND_INV_CB			 = 1 << 8,
1031 	/* Engine synchronization. */
1032 	RADV_CMD_FLAG_VS_PARTIAL_FLUSH			 = 1 << 9,
1033 	RADV_CMD_FLAG_PS_PARTIAL_FLUSH			 = 1 << 10,
1034 	RADV_CMD_FLAG_CS_PARTIAL_FLUSH			 = 1 << 11,
1035 	RADV_CMD_FLAG_VGT_FLUSH				 = 1 << 12,
1036 	/* Pipeline query controls. */
1037 	RADV_CMD_FLAG_START_PIPELINE_STATS		 = 1 << 13,
1038 	RADV_CMD_FLAG_STOP_PIPELINE_STATS		 = 1 << 14,
1039 	RADV_CMD_FLAG_VGT_STREAMOUT_SYNC		 = 1 << 15,
1040 
1041 	RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER = (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
1042 					      RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
1043 					      RADV_CMD_FLAG_FLUSH_AND_INV_DB |
1044 					      RADV_CMD_FLAG_FLUSH_AND_INV_DB_META)
1045 };
1046 
1047 struct radv_vertex_binding {
1048 	struct radv_buffer *                          buffer;
1049 	VkDeviceSize                                 offset;
1050 	VkDeviceSize size;
1051 	VkDeviceSize stride;
1052 };
1053 
1054 struct radv_streamout_binding {
1055 	struct radv_buffer *buffer;
1056 	VkDeviceSize offset;
1057 	VkDeviceSize size;
1058 };
1059 
1060 struct radv_streamout_state {
1061 	/* Mask of bound streamout buffers. */
1062 	uint8_t enabled_mask;
1063 
1064 	/* External state that comes from the last vertex stage, it must be
1065 	 * set explicitely when binding a new graphics pipeline.
1066 	 */
1067 	uint16_t stride_in_dw[MAX_SO_BUFFERS];
1068 	uint32_t enabled_stream_buffers_mask; /* stream0 buffers0-3 in 4 LSB */
1069 
1070 	/* State of VGT_STRMOUT_BUFFER_(CONFIG|END) */
1071 	uint32_t hw_enabled_mask;
1072 
1073 	/* State of VGT_STRMOUT_(CONFIG|EN) */
1074 	bool streamout_enabled;
1075 };
1076 
1077 struct radv_viewport_state {
1078 	uint32_t                                          count;
1079 	VkViewport                                        viewports[MAX_VIEWPORTS];
1080 };
1081 
1082 struct radv_scissor_state {
1083 	uint32_t                                          count;
1084 	VkRect2D                                          scissors[MAX_SCISSORS];
1085 };
1086 
1087 struct radv_discard_rectangle_state {
1088 	uint32_t                                          count;
1089 	VkRect2D                                          rectangles[MAX_DISCARD_RECTANGLES];
1090 };
1091 
1092 struct radv_sample_locations_state {
1093 	VkSampleCountFlagBits per_pixel;
1094 	VkExtent2D grid_size;
1095 	uint32_t count;
1096 	VkSampleLocationEXT locations[MAX_SAMPLE_LOCATIONS];
1097 };
1098 
1099 struct radv_dynamic_state {
1100 	/**
1101 	 * Bitmask of (1 << VK_DYNAMIC_STATE_*).
1102 	 * Defines the set of saved dynamic state.
1103 	 */
1104 	uint32_t mask;
1105 
1106 	struct radv_viewport_state                        viewport;
1107 
1108 	struct radv_scissor_state                         scissor;
1109 
1110 	float                                        line_width;
1111 
1112 	struct {
1113 		float                                     bias;
1114 		float                                     clamp;
1115 		float                                     slope;
1116 	} depth_bias;
1117 
1118 	float                                        blend_constants[4];
1119 
1120 	struct {
1121 		float                                     min;
1122 		float                                     max;
1123 	} depth_bounds;
1124 
1125 	struct {
1126 		uint32_t                                  front;
1127 		uint32_t                                  back;
1128 	} stencil_compare_mask;
1129 
1130 	struct {
1131 		uint32_t                                  front;
1132 		uint32_t                                  back;
1133 	} stencil_write_mask;
1134 
1135 	struct {
1136 		struct {
1137 			VkStencilOp fail_op;
1138 			VkStencilOp pass_op;
1139 			VkStencilOp depth_fail_op;
1140 			VkCompareOp compare_op;
1141 		} front;
1142 
1143 		struct {
1144 			VkStencilOp fail_op;
1145 			VkStencilOp pass_op;
1146 			VkStencilOp depth_fail_op;
1147 			VkCompareOp compare_op;
1148 		} back;
1149 	} stencil_op;
1150 
1151 	struct {
1152 		uint32_t                                  front;
1153 		uint32_t                                  back;
1154 	} stencil_reference;
1155 
1156 	struct radv_discard_rectangle_state               discard_rectangle;
1157 
1158 	struct radv_sample_locations_state                sample_location;
1159 
1160 	struct {
1161 		uint32_t factor;
1162 		uint16_t pattern;
1163 	} line_stipple;
1164 
1165 	VkCullModeFlags cull_mode;
1166 	VkFrontFace front_face;
1167 	unsigned primitive_topology;
1168 
1169 	bool depth_test_enable;
1170 	bool depth_write_enable;
1171 	VkCompareOp depth_compare_op;
1172 	bool depth_bounds_test_enable;
1173 	bool stencil_test_enable;
1174 };
1175 
1176 extern const struct radv_dynamic_state default_dynamic_state;
1177 
1178 const char *
1179 radv_get_debug_option_name(int id);
1180 
1181 const char *
1182 radv_get_perftest_option_name(int id);
1183 
1184 struct radv_color_buffer_info {
1185 	uint64_t cb_color_base;
1186 	uint64_t cb_color_cmask;
1187 	uint64_t cb_color_fmask;
1188 	uint64_t cb_dcc_base;
1189 	uint32_t cb_color_slice;
1190 	uint32_t cb_color_view;
1191 	uint32_t cb_color_info;
1192 	uint32_t cb_color_attrib;
1193 	uint32_t cb_color_attrib2; /* GFX9 and later */
1194 	uint32_t cb_color_attrib3; /* GFX10 and later */
1195 	uint32_t cb_dcc_control;
1196 	uint32_t cb_color_cmask_slice;
1197 	uint32_t cb_color_fmask_slice;
1198 	union {
1199 		uint32_t cb_color_pitch; // GFX6-GFX8
1200 		uint32_t cb_mrt_epitch; // GFX9+
1201 	};
1202 };
1203 
1204 struct radv_ds_buffer_info {
1205 	uint64_t db_z_read_base;
1206 	uint64_t db_stencil_read_base;
1207 	uint64_t db_z_write_base;
1208 	uint64_t db_stencil_write_base;
1209 	uint64_t db_htile_data_base;
1210 	uint32_t db_depth_info;
1211 	uint32_t db_z_info;
1212 	uint32_t db_stencil_info;
1213 	uint32_t db_depth_view;
1214 	uint32_t db_depth_size;
1215 	uint32_t db_depth_slice;
1216 	uint32_t db_htile_surface;
1217 	uint32_t pa_su_poly_offset_db_fmt_cntl;
1218 	uint32_t db_z_info2; /* GFX9 only */
1219 	uint32_t db_stencil_info2; /* GFX9 only */
1220 	float offset_scale;
1221 };
1222 
1223 void
1224 radv_initialise_color_surface(struct radv_device *device,
1225 			      struct radv_color_buffer_info *cb,
1226 			      struct radv_image_view *iview);
1227 void
1228 radv_initialise_ds_surface(struct radv_device *device,
1229 			   struct radv_ds_buffer_info *ds,
1230 			   struct radv_image_view *iview);
1231 
1232 /**
1233  * Attachment state when recording a renderpass instance.
1234  *
1235  * The clear value is valid only if there exists a pending clear.
1236  */
1237 struct radv_attachment_state {
1238 	VkImageAspectFlags                           pending_clear_aspects;
1239 	uint32_t                                     cleared_views;
1240 	VkClearValue                                 clear_value;
1241 	VkImageLayout                                current_layout;
1242 	VkImageLayout                                current_stencil_layout;
1243 	bool                                         current_in_render_loop;
1244 	struct radv_sample_locations_state	     sample_location;
1245 
1246 	union {
1247 		struct radv_color_buffer_info cb;
1248 		struct radv_ds_buffer_info ds;
1249 	};
1250 	struct radv_image_view *iview;
1251 };
1252 
1253 struct radv_descriptor_state {
1254 	struct radv_descriptor_set *sets[MAX_SETS];
1255 	uint32_t dirty;
1256 	uint32_t valid;
1257 	struct radv_push_descriptor_set push_set;
1258 	bool push_dirty;
1259 	uint32_t dynamic_buffers[4 * MAX_DYNAMIC_BUFFERS];
1260 };
1261 
1262 struct radv_subpass_sample_locs_state {
1263 	uint32_t subpass_idx;
1264 	struct radv_sample_locations_state sample_location;
1265 };
1266 
1267 struct radv_cmd_state {
1268 	/* Vertex descriptors */
1269 	uint64_t                                      vb_va;
1270 	unsigned                                      vb_size;
1271 
1272 	bool predicating;
1273 	uint32_t                                      dirty;
1274 
1275 	uint32_t                                      prefetch_L2_mask;
1276 
1277 	struct radv_pipeline *                        pipeline;
1278 	struct radv_pipeline *                        emitted_pipeline;
1279 	struct radv_pipeline *                        compute_pipeline;
1280 	struct radv_pipeline *                        emitted_compute_pipeline;
1281 	struct radv_framebuffer *                     framebuffer;
1282 	struct radv_render_pass *                     pass;
1283 	const struct radv_subpass *                         subpass;
1284 	struct radv_dynamic_state                     dynamic;
1285 	struct radv_attachment_state *                attachments;
1286 	struct radv_streamout_state                  streamout;
1287 	VkRect2D                                     render_area;
1288 
1289 	uint32_t                                     num_subpass_sample_locs;
1290 	struct radv_subpass_sample_locs_state *      subpass_sample_locs;
1291 
1292 	/* Index buffer */
1293 	struct radv_buffer                           *index_buffer;
1294 	uint64_t                                     index_offset;
1295 	uint32_t                                     index_type;
1296 	uint32_t                                     max_index_count;
1297 	uint64_t                                     index_va;
1298 	int32_t                                      last_index_type;
1299 
1300 	int32_t                                      last_primitive_reset_en;
1301 	uint32_t                                     last_primitive_reset_index;
1302 	enum radv_cmd_flush_bits                     flush_bits;
1303 	unsigned                                     active_occlusion_queries;
1304 	bool                                         perfect_occlusion_queries_enabled;
1305 	unsigned                                     active_pipeline_queries;
1306 	unsigned                                     active_pipeline_gds_queries;
1307 	float					     offset_scale;
1308 	uint32_t                                      trace_id;
1309 	uint32_t                                      last_ia_multi_vgt_param;
1310 
1311 	uint32_t last_num_instances;
1312 	uint32_t last_first_instance;
1313 	uint32_t last_vertex_offset;
1314 
1315 	uint32_t last_sx_ps_downconvert;
1316 	uint32_t last_sx_blend_opt_epsilon;
1317 	uint32_t last_sx_blend_opt_control;
1318 
1319 	/* Whether CP DMA is busy/idle. */
1320 	bool dma_is_busy;
1321 
1322 	/* Conditional rendering info. */
1323 	int predication_type; /* -1: disabled, 0: normal, 1: inverted */
1324 	uint64_t predication_va;
1325 
1326 	/* Inheritance info. */
1327 	VkQueryPipelineStatisticFlags inherited_pipeline_statistics;
1328 
1329 	bool context_roll_without_scissor_emitted;
1330 
1331 	/* SQTT related state. */
1332 	uint32_t current_event_type;
1333 	uint32_t num_events;
1334 	uint32_t num_layout_transitions;
1335 };
1336 
1337 struct radv_cmd_pool {
1338 	struct vk_object_base                        base;
1339 	VkAllocationCallbacks                        alloc;
1340 	struct list_head                             cmd_buffers;
1341 	struct list_head                             free_cmd_buffers;
1342 	uint32_t queue_family_index;
1343 };
1344 
1345 struct radv_cmd_buffer_upload {
1346 	uint8_t *map;
1347 	unsigned offset;
1348 	uint64_t size;
1349 	struct radeon_winsys_bo *upload_bo;
1350 	struct list_head list;
1351 };
1352 
1353 enum radv_cmd_buffer_status {
1354 	RADV_CMD_BUFFER_STATUS_INVALID,
1355 	RADV_CMD_BUFFER_STATUS_INITIAL,
1356 	RADV_CMD_BUFFER_STATUS_RECORDING,
1357 	RADV_CMD_BUFFER_STATUS_EXECUTABLE,
1358 	RADV_CMD_BUFFER_STATUS_PENDING,
1359 };
1360 
1361 struct radv_cmd_buffer {
1362 	struct vk_object_base                         base;
1363 
1364 	struct radv_device *                          device;
1365 
1366 	struct radv_cmd_pool *                        pool;
1367 	struct list_head                             pool_link;
1368 
1369 	VkCommandBufferUsageFlags                    usage_flags;
1370 	VkCommandBufferLevel                         level;
1371 	enum radv_cmd_buffer_status status;
1372 	struct radeon_cmdbuf *cs;
1373 	struct radv_cmd_state state;
1374 	struct radv_vertex_binding                   vertex_bindings[MAX_VBS];
1375 	struct radv_streamout_binding                streamout_bindings[MAX_SO_BUFFERS];
1376 	uint32_t queue_family_index;
1377 
1378 	uint8_t push_constants[MAX_PUSH_CONSTANTS_SIZE];
1379 	VkShaderStageFlags push_constant_stages;
1380 	struct radv_descriptor_set meta_push_descriptors;
1381 
1382 	struct radv_descriptor_state descriptors[MAX_BIND_POINTS];
1383 
1384 	struct radv_cmd_buffer_upload upload;
1385 
1386 	uint32_t scratch_size_per_wave_needed;
1387 	uint32_t scratch_waves_wanted;
1388 	uint32_t compute_scratch_size_per_wave_needed;
1389 	uint32_t compute_scratch_waves_wanted;
1390 	uint32_t esgs_ring_size_needed;
1391 	uint32_t gsvs_ring_size_needed;
1392 	bool tess_rings_needed;
1393 	bool gds_needed; /* for GFX10 streamout and NGG GS queries */
1394 	bool gds_oa_needed; /* for GFX10 streamout */
1395 	bool sample_positions_needed;
1396 
1397 	VkResult record_result;
1398 
1399 	uint64_t gfx9_fence_va;
1400 	uint32_t gfx9_fence_idx;
1401 	uint64_t gfx9_eop_bug_va;
1402 
1403 	/**
1404 	 * Whether a query pool has been resetted and we have to flush caches.
1405 	 */
1406 	bool pending_reset_query;
1407 
1408 	/**
1409 	 * Bitmask of pending active query flushes.
1410 	 */
1411 	enum radv_cmd_flush_bits active_query_flush_bits;
1412 };
1413 
1414 struct radv_image;
1415 struct radv_image_view;
1416 
1417 bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer);
1418 
1419 void si_emit_graphics(struct radv_device *device,
1420 		      struct radeon_cmdbuf *cs);
1421 void si_emit_compute(struct radv_device *device,
1422 		      struct radeon_cmdbuf *cs);
1423 
1424 void cik_create_gfx_config(struct radv_device *device);
1425 
1426 void si_write_viewport(struct radeon_cmdbuf *cs, int first_vp,
1427 		       int count, const VkViewport *viewports);
1428 void si_write_scissors(struct radeon_cmdbuf *cs, int first,
1429 		       int count, const VkRect2D *scissors,
1430 		       const VkViewport *viewports, bool can_use_guardband);
1431 uint32_t si_get_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer,
1432 				   bool instanced_draw, bool indirect_draw,
1433 				   bool count_from_stream_output,
1434 				   uint32_t draw_vertex_count,
1435 				   unsigned topology);
1436 void si_cs_emit_write_event_eop(struct radeon_cmdbuf *cs,
1437 				enum chip_class chip_class,
1438 				bool is_mec,
1439 				unsigned event, unsigned event_flags,
1440 				unsigned dst_sel, unsigned data_sel,
1441 				uint64_t va,
1442 				uint32_t new_fence,
1443 				uint64_t gfx9_eop_bug_va);
1444 
1445 void radv_cp_wait_mem(struct radeon_cmdbuf *cs, uint32_t op, uint64_t va,
1446 		      uint32_t ref, uint32_t mask);
1447 void si_cs_emit_cache_flush(struct radeon_cmdbuf *cs,
1448 			    enum chip_class chip_class,
1449 			    uint32_t *fence_ptr, uint64_t va,
1450 			    bool is_mec,
1451 			    enum radv_cmd_flush_bits flush_bits,
1452 			    uint64_t gfx9_eop_bug_va);
1453 void si_emit_cache_flush(struct radv_cmd_buffer *cmd_buffer);
1454 void si_emit_set_predication_state(struct radv_cmd_buffer *cmd_buffer,
1455 				   bool inverted, uint64_t va);
1456 void si_cp_dma_buffer_copy(struct radv_cmd_buffer *cmd_buffer,
1457 			   uint64_t src_va, uint64_t dest_va,
1458 			   uint64_t size);
1459 void si_cp_dma_prefetch(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
1460                         unsigned size);
1461 void si_cp_dma_clear_buffer(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
1462 			    uint64_t size, unsigned value);
1463 void si_cp_dma_wait_for_idle(struct radv_cmd_buffer *cmd_buffer);
1464 
1465 void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer);
1466 bool
1467 radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer,
1468 			     unsigned size,
1469 			     unsigned alignment,
1470 			     unsigned *out_offset,
1471 			     void **ptr);
1472 void
1473 radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer,
1474 			    const struct radv_subpass *subpass);
1475 bool
1476 radv_cmd_buffer_upload_data(struct radv_cmd_buffer *cmd_buffer,
1477 			    unsigned size, unsigned alignmnet,
1478 			    const void *data, unsigned *out_offset);
1479 
1480 void radv_cmd_buffer_clear_subpass(struct radv_cmd_buffer *cmd_buffer);
1481 void radv_cmd_buffer_resolve_subpass(struct radv_cmd_buffer *cmd_buffer);
1482 void radv_cmd_buffer_resolve_subpass_cs(struct radv_cmd_buffer *cmd_buffer);
1483 void radv_depth_stencil_resolve_subpass_cs(struct radv_cmd_buffer *cmd_buffer,
1484 					   VkImageAspectFlags aspects,
1485 					   VkResolveModeFlagBits resolve_mode);
1486 void radv_cmd_buffer_resolve_subpass_fs(struct radv_cmd_buffer *cmd_buffer);
1487 void radv_depth_stencil_resolve_subpass_fs(struct radv_cmd_buffer *cmd_buffer,
1488 					   VkImageAspectFlags aspects,
1489 					   VkResolveModeFlagBits resolve_mode);
1490 void radv_emit_default_sample_locations(struct radeon_cmdbuf *cs, int nr_samples);
1491 unsigned radv_get_default_max_sample_dist(int log_samples);
1492 void radv_device_init_msaa(struct radv_device *device);
1493 
1494 void radv_update_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1495 				   const struct radv_image_view *iview,
1496 				   VkClearDepthStencilValue ds_clear_value,
1497 				   VkImageAspectFlags aspects);
1498 
1499 void radv_update_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1500 				      const struct radv_image_view *iview,
1501 				      int cb_idx,
1502 				      uint32_t color_values[2]);
1503 
1504 void radv_update_fce_metadata(struct radv_cmd_buffer *cmd_buffer,
1505 			      struct radv_image *image,
1506 			      const VkImageSubresourceRange *range, bool value);
1507 
1508 void radv_update_dcc_metadata(struct radv_cmd_buffer *cmd_buffer,
1509 			      struct radv_image *image,
1510 			      const VkImageSubresourceRange *range, bool value);
1511 
1512 uint32_t radv_fill_buffer(struct radv_cmd_buffer *cmd_buffer,
1513 			  struct radeon_winsys_bo *bo,
1514 			  uint64_t offset, uint64_t size, uint32_t value);
1515 void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer);
1516 bool radv_get_memory_fd(struct radv_device *device,
1517 			struct radv_device_memory *memory,
1518 			int *pFD);
1519 void radv_free_memory(struct radv_device *device,
1520 		      const VkAllocationCallbacks* pAllocator,
1521 		      struct radv_device_memory *mem);
1522 
1523 static inline void
radv_emit_shader_pointer_head(struct radeon_cmdbuf * cs,unsigned sh_offset,unsigned pointer_count,bool use_32bit_pointers)1524 radv_emit_shader_pointer_head(struct radeon_cmdbuf *cs,
1525 			      unsigned sh_offset, unsigned pointer_count,
1526 			      bool use_32bit_pointers)
1527 {
1528 	radeon_emit(cs, PKT3(PKT3_SET_SH_REG, pointer_count * (use_32bit_pointers ? 1 : 2), 0));
1529 	radeon_emit(cs, (sh_offset - SI_SH_REG_OFFSET) >> 2);
1530 }
1531 
1532 static inline void
radv_emit_shader_pointer_body(struct radv_device * device,struct radeon_cmdbuf * cs,uint64_t va,bool use_32bit_pointers)1533 radv_emit_shader_pointer_body(struct radv_device *device,
1534 			      struct radeon_cmdbuf *cs,
1535 			      uint64_t va, bool use_32bit_pointers)
1536 {
1537 	radeon_emit(cs, va);
1538 
1539 	if (use_32bit_pointers) {
1540 		assert(va == 0 ||
1541 		       (va >> 32) == device->physical_device->rad_info.address32_hi);
1542 	} else {
1543 		radeon_emit(cs, va >> 32);
1544 	}
1545 }
1546 
1547 static inline void
radv_emit_shader_pointer(struct radv_device * device,struct radeon_cmdbuf * cs,uint32_t sh_offset,uint64_t va,bool global)1548 radv_emit_shader_pointer(struct radv_device *device,
1549 			 struct radeon_cmdbuf *cs,
1550 			 uint32_t sh_offset, uint64_t va, bool global)
1551 {
1552 	bool use_32bit_pointers = !global;
1553 
1554 	radv_emit_shader_pointer_head(cs, sh_offset, 1, use_32bit_pointers);
1555 	radv_emit_shader_pointer_body(device, cs, va, use_32bit_pointers);
1556 }
1557 
1558 static inline struct radv_descriptor_state *
radv_get_descriptors_state(struct radv_cmd_buffer * cmd_buffer,VkPipelineBindPoint bind_point)1559 radv_get_descriptors_state(struct radv_cmd_buffer *cmd_buffer,
1560 			   VkPipelineBindPoint bind_point)
1561 {
1562 	assert(bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS ||
1563 	       bind_point == VK_PIPELINE_BIND_POINT_COMPUTE);
1564 	return &cmd_buffer->descriptors[bind_point];
1565 }
1566 
1567 /*
1568  * Takes x,y,z as exact numbers of invocations, instead of blocks.
1569  *
1570  * Limitations: Can't call normal dispatch functions without binding or rebinding
1571  *              the compute pipeline.
1572  */
1573 void radv_unaligned_dispatch(
1574 	struct radv_cmd_buffer                      *cmd_buffer,
1575 	uint32_t                                    x,
1576 	uint32_t                                    y,
1577 	uint32_t                                    z);
1578 
1579 struct radv_event {
1580 	struct vk_object_base base;
1581 	struct radeon_winsys_bo *bo;
1582 	uint64_t *map;
1583 };
1584 
1585 struct radv_shader_module;
1586 
1587 #define RADV_HASH_SHADER_NO_NGG              (1 << 0)
1588 #define RADV_HASH_SHADER_CS_WAVE32           (1 << 1)
1589 #define RADV_HASH_SHADER_PS_WAVE32           (1 << 2)
1590 #define RADV_HASH_SHADER_GE_WAVE32           (1 << 3)
1591 #define RADV_HASH_SHADER_LLVM                (1 << 4)
1592 #define RADV_HASH_SHADER_DISCARD_TO_DEMOTE   (1 << 5)
1593 #define RADV_HASH_SHADER_MRT_NAN_FIXUP       (1 << 6)
1594 
1595 void
1596 radv_hash_shaders(unsigned char *hash,
1597 		  const VkPipelineShaderStageCreateInfo **stages,
1598 		  const struct radv_pipeline_layout *layout,
1599 		  const struct radv_pipeline_key *key,
1600 		  uint32_t flags);
1601 
1602 static inline gl_shader_stage
vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)1603 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
1604 {
1605 	assert(__builtin_popcount(vk_stage) == 1);
1606 	return ffs(vk_stage) - 1;
1607 }
1608 
1609 static inline VkShaderStageFlagBits
mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)1610 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
1611 {
1612 	return (1 << mesa_stage);
1613 }
1614 
1615 #define RADV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
1616 
1617 #define radv_foreach_stage(stage, stage_bits)				\
1618 	for (gl_shader_stage stage,					\
1619 		     __tmp = (gl_shader_stage)((stage_bits) & RADV_STAGE_MASK);	\
1620 	     stage = __builtin_ffs(__tmp) - 1, __tmp;			\
1621 	     __tmp &= ~(1 << (stage)))
1622 
1623 extern const VkFormat radv_fs_key_format_exemplars[NUM_META_FS_KEYS];
1624 unsigned radv_format_meta_fs_key(VkFormat format);
1625 
1626 struct radv_multisample_state {
1627 	uint32_t db_eqaa;
1628 	uint32_t pa_sc_mode_cntl_0;
1629 	uint32_t pa_sc_mode_cntl_1;
1630 	uint32_t pa_sc_aa_config;
1631 	uint32_t pa_sc_aa_mask[2];
1632 	unsigned num_samples;
1633 };
1634 
1635 struct radv_prim_vertex_count {
1636 	uint8_t min;
1637 	uint8_t incr;
1638 };
1639 
1640 struct radv_ia_multi_vgt_param_helpers {
1641 	uint32_t base;
1642 	bool partial_es_wave;
1643 	uint8_t primgroup_size;
1644 	bool ia_switch_on_eoi;
1645 	bool partial_vs_wave;
1646 };
1647 
1648 struct radv_binning_state {
1649 	uint32_t pa_sc_binner_cntl_0;
1650 	uint32_t db_dfsm_control;
1651 };
1652 
1653 #define SI_GS_PER_ES 128
1654 
1655 struct radv_pipeline {
1656 	struct vk_object_base                         base;
1657 	struct radv_device *                          device;
1658 	struct radv_dynamic_state                     dynamic_state;
1659 
1660 	struct radv_pipeline_layout *                 layout;
1661 
1662 	bool					     need_indirect_descriptor_sets;
1663 	struct radv_shader_variant *                 shaders[MESA_SHADER_STAGES];
1664 	struct radv_shader_variant *gs_copy_shader;
1665 	VkShaderStageFlags                           active_stages;
1666 
1667 	struct radeon_cmdbuf                      cs;
1668 	uint32_t                                  ctx_cs_hash;
1669 	struct radeon_cmdbuf                      ctx_cs;
1670 
1671 	uint32_t                                     binding_stride[MAX_VBS];
1672 	uint8_t                                      num_vertex_bindings;
1673 
1674 	uint32_t user_data_0[MESA_SHADER_STAGES];
1675 	union {
1676 		struct {
1677 			struct radv_multisample_state ms;
1678 			struct radv_binning_state binning;
1679 			uint32_t spi_baryc_cntl;
1680 			bool prim_restart_enable;
1681 			unsigned esgs_ring_size;
1682 			unsigned gsvs_ring_size;
1683 			uint32_t vtx_base_sgpr;
1684 			struct radv_ia_multi_vgt_param_helpers ia_multi_vgt_param;
1685 			uint8_t vtx_emit_num;
1686  			bool can_use_guardband;
1687 			uint32_t needed_dynamic_state;
1688 			bool disable_out_of_order_rast_for_occlusion;
1689 			unsigned tess_patch_control_points;
1690 			unsigned pa_su_sc_mode_cntl;
1691 			unsigned db_depth_control;
1692 			bool uses_dynamic_stride;
1693 
1694 			/* Used for rbplus */
1695 			uint32_t col_format;
1696 			uint32_t cb_target_mask;
1697 		} graphics;
1698 	};
1699 
1700 	unsigned max_waves;
1701 	unsigned scratch_bytes_per_wave;
1702 
1703 	/* Not NULL if graphics pipeline uses streamout. */
1704 	struct radv_shader_variant *streamout_shader;
1705 };
1706 
radv_pipeline_has_gs(const struct radv_pipeline * pipeline)1707 static inline bool radv_pipeline_has_gs(const struct radv_pipeline *pipeline)
1708 {
1709 	return pipeline->shaders[MESA_SHADER_GEOMETRY] ? true : false;
1710 }
1711 
radv_pipeline_has_tess(const struct radv_pipeline * pipeline)1712 static inline bool radv_pipeline_has_tess(const struct radv_pipeline *pipeline)
1713 {
1714 	return pipeline->shaders[MESA_SHADER_TESS_CTRL] ? true : false;
1715 }
1716 
1717 bool radv_pipeline_has_ngg(const struct radv_pipeline *pipeline);
1718 
1719 bool radv_pipeline_has_ngg_passthrough(const struct radv_pipeline *pipeline);
1720 
1721 bool radv_pipeline_has_gs_copy_shader(const struct radv_pipeline *pipeline);
1722 
1723 struct radv_userdata_info *radv_lookup_user_sgpr(struct radv_pipeline *pipeline,
1724 						 gl_shader_stage stage,
1725 						 int idx);
1726 
1727 struct radv_shader_variant *radv_get_shader(const struct radv_pipeline *pipeline,
1728 					    gl_shader_stage stage);
1729 
1730 struct radv_graphics_pipeline_create_info {
1731 	bool use_rectlist;
1732 	bool db_depth_clear;
1733 	bool db_stencil_clear;
1734 	bool db_depth_disable_expclear;
1735 	bool db_stencil_disable_expclear;
1736 	bool depth_compress_disable;
1737 	bool stencil_compress_disable;
1738 	bool resummarize_enable;
1739 	uint32_t custom_blend_mode;
1740 };
1741 
1742 VkResult
1743 radv_graphics_pipeline_create(VkDevice device,
1744 			      VkPipelineCache cache,
1745 			      const VkGraphicsPipelineCreateInfo *pCreateInfo,
1746 			      const struct radv_graphics_pipeline_create_info *extra,
1747 			      const VkAllocationCallbacks *alloc,
1748 			      VkPipeline *pPipeline);
1749 
1750 struct radv_binning_settings {
1751 	unsigned context_states_per_bin; /* allowed range: [1, 6] */
1752 	unsigned persistent_states_per_bin; /* allowed range: [1, 32] */
1753 	unsigned fpovs_per_batch; /* allowed range: [0, 255], 0 = unlimited */
1754 };
1755 
1756 struct radv_binning_settings
1757 radv_get_binning_settings(const struct radv_physical_device *pdev);
1758 
1759 struct vk_format_description;
1760 uint32_t radv_translate_buffer_dataformat(const struct vk_format_description *desc,
1761 					  int first_non_void);
1762 uint32_t radv_translate_buffer_numformat(const struct vk_format_description *desc,
1763 					 int first_non_void);
1764 bool radv_is_buffer_format_supported(VkFormat format, bool *scaled);
1765 uint32_t radv_translate_colorformat(VkFormat format);
1766 uint32_t radv_translate_color_numformat(VkFormat format,
1767 					const struct vk_format_description *desc,
1768 					int first_non_void);
1769 uint32_t radv_colorformat_endian_swap(uint32_t colorformat);
1770 unsigned radv_translate_colorswap(VkFormat format, bool do_endian_swap);
1771 uint32_t radv_translate_dbformat(VkFormat format);
1772 uint32_t radv_translate_tex_dataformat(VkFormat format,
1773 				       const struct vk_format_description *desc,
1774 				       int first_non_void);
1775 uint32_t radv_translate_tex_numformat(VkFormat format,
1776 				      const struct vk_format_description *desc,
1777 				      int first_non_void);
1778 bool radv_format_pack_clear_color(VkFormat format,
1779 				  uint32_t clear_vals[2],
1780 				  VkClearColorValue *value);
1781 bool radv_is_colorbuffer_format_supported(VkFormat format, bool *blendable);
1782 bool radv_dcc_formats_compatible(VkFormat format1,
1783                                  VkFormat format2);
1784 bool radv_device_supports_etc(struct radv_physical_device *physical_device);
1785 
1786 struct radv_image_plane {
1787 	VkFormat format;
1788 	struct radeon_surf surface;
1789 	uint64_t offset;
1790 };
1791 
1792 struct radv_image {
1793 	struct vk_object_base base;
1794 	VkImageType type;
1795 	/* The original VkFormat provided by the client.  This may not match any
1796 	 * of the actual surface formats.
1797 	 */
1798 	VkFormat vk_format;
1799 	VkImageAspectFlags aspects;
1800 	VkImageUsageFlags usage; /**< Superset of VkImageCreateInfo::usage. */
1801 	struct ac_surf_info info;
1802 	VkImageTiling tiling; /** VkImageCreateInfo::tiling */
1803 	VkImageCreateFlags flags; /** VkImageCreateInfo::flags */
1804 
1805 	VkDeviceSize size;
1806 	uint32_t alignment;
1807 
1808 	unsigned queue_family_mask;
1809 	bool exclusive;
1810 	bool shareable;
1811 
1812 	/* Set when bound */
1813 	struct radeon_winsys_bo *bo;
1814 	VkDeviceSize offset;
1815 	bool tc_compatible_htile;
1816 	bool tc_compatible_cmask;
1817 
1818 	uint64_t clear_value_offset;
1819 	uint64_t fce_pred_offset;
1820 	uint64_t dcc_pred_offset;
1821 
1822 	/*
1823 	 * Metadata for the TC-compat zrange workaround. If the 32-bit value
1824 	 * stored at this offset is UINT_MAX, the driver will emit
1825 	 * DB_Z_INFO.ZRANGE_PRECISION=0, otherwise it will skip the
1826 	 * SET_CONTEXT_REG packet.
1827 	 */
1828 	uint64_t tc_compat_zrange_offset;
1829 
1830 	/* For VK_ANDROID_native_buffer, the WSI image owns the memory, */
1831 	VkDeviceMemory owned_memory;
1832 
1833 	unsigned plane_count;
1834 	struct radv_image_plane planes[0];
1835 };
1836 
1837 /* Whether the image has a htile  that is known consistent with the contents of
1838  * the image and is allowed to be in compressed form.
1839  *
1840  * If this is false reads that don't use the htile should be able to return
1841  * correct results.
1842  */
1843 bool radv_layout_is_htile_compressed(const struct radv_image *image,
1844                                      VkImageLayout layout,
1845                                      bool in_render_loop,
1846                                      unsigned queue_mask);
1847 
1848 bool radv_layout_can_fast_clear(const struct radv_image *image,
1849 			        VkImageLayout layout,
1850 			        bool in_render_loop,
1851 			        unsigned queue_mask);
1852 
1853 bool radv_layout_dcc_compressed(const struct radv_device *device,
1854 				const struct radv_image *image,
1855 			        VkImageLayout layout,
1856 			        bool in_render_loop,
1857 			        unsigned queue_mask);
1858 
1859 /**
1860  * Return whether the image has CMASK metadata for color surfaces.
1861  */
1862 static inline bool
radv_image_has_cmask(const struct radv_image * image)1863 radv_image_has_cmask(const struct radv_image *image)
1864 {
1865 	return image->planes[0].surface.cmask_offset;
1866 }
1867 
1868 /**
1869  * Return whether the image has FMASK metadata for color surfaces.
1870  */
1871 static inline bool
radv_image_has_fmask(const struct radv_image * image)1872 radv_image_has_fmask(const struct radv_image *image)
1873 {
1874 	return image->planes[0].surface.fmask_offset;
1875 }
1876 
1877 /**
1878  * Return whether the image has DCC metadata for color surfaces.
1879  */
1880 static inline bool
radv_image_has_dcc(const struct radv_image * image)1881 radv_image_has_dcc(const struct radv_image *image)
1882 {
1883 	return image->planes[0].surface.dcc_size;
1884 }
1885 
1886 /**
1887  * Return whether the image is TC-compatible CMASK.
1888  */
1889 static inline bool
radv_image_is_tc_compat_cmask(const struct radv_image * image)1890 radv_image_is_tc_compat_cmask(const struct radv_image *image)
1891 {
1892 	return radv_image_has_fmask(image) && image->tc_compatible_cmask;
1893 }
1894 
1895 /**
1896  * Return whether DCC metadata is enabled for a level.
1897  */
1898 static inline bool
radv_dcc_enabled(const struct radv_image * image,unsigned level)1899 radv_dcc_enabled(const struct radv_image *image, unsigned level)
1900 {
1901 	return radv_image_has_dcc(image) &&
1902 	       level < image->planes[0].surface.num_dcc_levels;
1903 }
1904 
1905 /**
1906  * Return whether the image has CB metadata.
1907  */
1908 static inline bool
radv_image_has_CB_metadata(const struct radv_image * image)1909 radv_image_has_CB_metadata(const struct radv_image *image)
1910 {
1911 	return radv_image_has_cmask(image) ||
1912 	       radv_image_has_fmask(image) ||
1913 	       radv_image_has_dcc(image);
1914 }
1915 
1916 /**
1917  * Return whether the image has HTILE metadata for depth surfaces.
1918  */
1919 static inline bool
radv_image_has_htile(const struct radv_image * image)1920 radv_image_has_htile(const struct radv_image *image)
1921 {
1922 	return image->planes[0].surface.htile_size;
1923 }
1924 
1925 /**
1926  * Return whether HTILE metadata is enabled for a level.
1927  */
1928 static inline bool
radv_htile_enabled(const struct radv_image * image,unsigned level)1929 radv_htile_enabled(const struct radv_image *image, unsigned level)
1930 {
1931 	return radv_image_has_htile(image) && level == 0;
1932 }
1933 
1934 /**
1935  * Return whether the image is TC-compatible HTILE.
1936  */
1937 static inline bool
radv_image_is_tc_compat_htile(const struct radv_image * image)1938 radv_image_is_tc_compat_htile(const struct radv_image *image)
1939 {
1940 	return radv_image_has_htile(image) && image->tc_compatible_htile;
1941 }
1942 
1943 static inline uint64_t
radv_image_get_fast_clear_va(const struct radv_image * image,uint32_t base_level)1944 radv_image_get_fast_clear_va(const struct radv_image *image,
1945 			     uint32_t base_level)
1946 {
1947 	uint64_t va = radv_buffer_get_va(image->bo);
1948 	va += image->offset + image->clear_value_offset + base_level * 8;
1949 	return va;
1950 }
1951 
1952 static inline uint64_t
radv_image_get_fce_pred_va(const struct radv_image * image,uint32_t base_level)1953 radv_image_get_fce_pred_va(const struct radv_image *image,
1954 			   uint32_t base_level)
1955 {
1956 	uint64_t va = radv_buffer_get_va(image->bo);
1957 	va += image->offset + image->fce_pred_offset + base_level * 8;
1958 	return va;
1959 }
1960 
1961 static inline uint64_t
radv_image_get_dcc_pred_va(const struct radv_image * image,uint32_t base_level)1962 radv_image_get_dcc_pred_va(const struct radv_image *image,
1963 			   uint32_t base_level)
1964 {
1965 	uint64_t va = radv_buffer_get_va(image->bo);
1966 	va += image->offset + image->dcc_pred_offset + base_level * 8;
1967 	return va;
1968 }
1969 
1970 static inline uint64_t
radv_get_tc_compat_zrange_va(const struct radv_image * image,uint32_t base_level)1971 radv_get_tc_compat_zrange_va(const struct radv_image *image,
1972 			     uint32_t base_level)
1973 {
1974 	uint64_t va = radv_buffer_get_va(image->bo);
1975 	va += image->offset + image->tc_compat_zrange_offset + base_level * 4;
1976 	return va;
1977 }
1978 
1979 static inline uint64_t
radv_get_ds_clear_value_va(const struct radv_image * image,uint32_t base_level)1980 radv_get_ds_clear_value_va(const struct radv_image *image,
1981 			   uint32_t base_level)
1982 {
1983 	uint64_t va = radv_buffer_get_va(image->bo);
1984 	va += image->offset + image->clear_value_offset + base_level * 8;
1985 	return va;
1986 }
1987 
1988 unsigned radv_image_queue_family_mask(const struct radv_image *image, uint32_t family, uint32_t queue_family);
1989 
1990 static inline uint32_t
radv_get_layerCount(const struct radv_image * image,const VkImageSubresourceRange * range)1991 radv_get_layerCount(const struct radv_image *image,
1992 		    const VkImageSubresourceRange *range)
1993 {
1994 	return range->layerCount == VK_REMAINING_ARRAY_LAYERS ?
1995 		image->info.array_size - range->baseArrayLayer : range->layerCount;
1996 }
1997 
1998 static inline uint32_t
radv_get_levelCount(const struct radv_image * image,const VkImageSubresourceRange * range)1999 radv_get_levelCount(const struct radv_image *image,
2000 		    const VkImageSubresourceRange *range)
2001 {
2002 	return range->levelCount == VK_REMAINING_MIP_LEVELS ?
2003 		image->info.levels - range->baseMipLevel : range->levelCount;
2004 }
2005 
2006 struct radeon_bo_metadata;
2007 void
2008 radv_init_metadata(struct radv_device *device,
2009 		   struct radv_image *image,
2010 		   struct radeon_bo_metadata *metadata);
2011 
2012 void
2013 radv_image_override_offset_stride(struct radv_device *device,
2014                                   struct radv_image *image,
2015                                   uint64_t offset, uint32_t stride);
2016 
2017 union radv_descriptor {
2018 	struct {
2019 		uint32_t plane0_descriptor[8];
2020 		uint32_t fmask_descriptor[8];
2021 	};
2022 	struct {
2023 		uint32_t plane_descriptors[3][8];
2024 	};
2025 };
2026 
2027 struct radv_image_view {
2028 	struct vk_object_base base;
2029 	struct radv_image *image; /**< VkImageViewCreateInfo::image */
2030 	struct radeon_winsys_bo *bo;
2031 
2032 	VkImageViewType type;
2033 	VkImageAspectFlags aspect_mask;
2034 	VkFormat vk_format;
2035 	unsigned plane_id;
2036 	bool multiple_planes;
2037 	uint32_t base_layer;
2038 	uint32_t layer_count;
2039 	uint32_t base_mip;
2040 	uint32_t level_count;
2041 	VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
2042 
2043 	union radv_descriptor descriptor;
2044 
2045 	/* Descriptor for use as a storage image as opposed to a sampled image.
2046 	 * This has a few differences for cube maps (e.g. type).
2047 	 */
2048 	union radv_descriptor storage_descriptor;
2049 };
2050 
2051 struct radv_image_create_info {
2052 	const VkImageCreateInfo *vk_info;
2053 	bool scanout;
2054 	bool no_metadata_planes;
2055 	const struct radeon_bo_metadata *bo_metadata;
2056 };
2057 
2058 VkResult
2059 radv_image_create_layout(struct radv_device *device,
2060                          struct radv_image_create_info create_info,
2061                          struct radv_image *image);
2062 
2063 VkResult radv_image_create(VkDevice _device,
2064 			   const struct radv_image_create_info *info,
2065 			   const VkAllocationCallbacks* alloc,
2066 			   VkImage *pImage);
2067 
2068 bool vi_alpha_is_on_msb(struct radv_device *device, VkFormat format);
2069 
2070 VkResult
2071 radv_image_from_gralloc(VkDevice device_h,
2072                        const VkImageCreateInfo *base_info,
2073                        const VkNativeBufferANDROID *gralloc_info,
2074                        const VkAllocationCallbacks *alloc,
2075                        VkImage *out_image_h);
2076 uint64_t
2077 radv_ahb_usage_from_vk_usage(const VkImageCreateFlags vk_create,
2078                              const VkImageUsageFlags vk_usage);
2079 VkResult
2080 radv_import_ahb_memory(struct radv_device *device,
2081                        struct radv_device_memory *mem,
2082                        unsigned priority,
2083                        const VkImportAndroidHardwareBufferInfoANDROID *info);
2084 VkResult
2085 radv_create_ahb_memory(struct radv_device *device,
2086                        struct radv_device_memory *mem,
2087                        unsigned priority,
2088                        const VkMemoryAllocateInfo *pAllocateInfo);
2089 
2090 VkFormat
2091 radv_select_android_external_format(const void *next, VkFormat default_format);
2092 
2093 bool radv_android_gralloc_supports_format(VkFormat format, VkImageUsageFlagBits usage);
2094 
2095 struct radv_image_view_extra_create_info {
2096 	bool disable_compression;
2097 };
2098 
2099 void radv_image_view_init(struct radv_image_view *view,
2100 			  struct radv_device *device,
2101 			  const VkImageViewCreateInfo *pCreateInfo,
2102 			  const struct radv_image_view_extra_create_info* extra_create_info);
2103 
2104 VkFormat radv_get_aspect_format(struct radv_image *image, VkImageAspectFlags mask);
2105 
2106 struct radv_sampler_ycbcr_conversion {
2107 	struct vk_object_base base;
2108 	VkFormat format;
2109 	VkSamplerYcbcrModelConversion ycbcr_model;
2110 	VkSamplerYcbcrRange ycbcr_range;
2111 	VkComponentMapping components;
2112 	VkChromaLocation chroma_offsets[2];
2113 	VkFilter chroma_filter;
2114 };
2115 
2116 struct radv_buffer_view {
2117 	struct vk_object_base base;
2118 	struct radeon_winsys_bo *bo;
2119 	VkFormat vk_format;
2120 	uint64_t range; /**< VkBufferViewCreateInfo::range */
2121 	uint32_t state[4];
2122 };
2123 void radv_buffer_view_init(struct radv_buffer_view *view,
2124 			   struct radv_device *device,
2125 			   const VkBufferViewCreateInfo* pCreateInfo);
2126 
2127 static inline struct VkExtent3D
radv_sanitize_image_extent(const VkImageType imageType,const struct VkExtent3D imageExtent)2128 radv_sanitize_image_extent(const VkImageType imageType,
2129 			   const struct VkExtent3D imageExtent)
2130 {
2131 	switch (imageType) {
2132 	case VK_IMAGE_TYPE_1D:
2133 		return (VkExtent3D) { imageExtent.width, 1, 1 };
2134 	case VK_IMAGE_TYPE_2D:
2135 		return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 };
2136 	case VK_IMAGE_TYPE_3D:
2137 		return imageExtent;
2138 	default:
2139 		unreachable("invalid image type");
2140 	}
2141 }
2142 
2143 static inline struct VkOffset3D
radv_sanitize_image_offset(const VkImageType imageType,const struct VkOffset3D imageOffset)2144 radv_sanitize_image_offset(const VkImageType imageType,
2145 			   const struct VkOffset3D imageOffset)
2146 {
2147 	switch (imageType) {
2148 	case VK_IMAGE_TYPE_1D:
2149 		return (VkOffset3D) { imageOffset.x, 0, 0 };
2150 	case VK_IMAGE_TYPE_2D:
2151 		return (VkOffset3D) { imageOffset.x, imageOffset.y, 0 };
2152 	case VK_IMAGE_TYPE_3D:
2153 		return imageOffset;
2154 	default:
2155 		unreachable("invalid image type");
2156 	}
2157 }
2158 
2159 static inline bool
radv_image_extent_compare(const struct radv_image * image,const VkExtent3D * extent)2160 radv_image_extent_compare(const struct radv_image *image,
2161 			  const VkExtent3D *extent)
2162 {
2163 	if (extent->width != image->info.width ||
2164 	    extent->height != image->info.height ||
2165 	    extent->depth != image->info.depth)
2166 		return false;
2167 	return true;
2168 }
2169 
2170 struct radv_sampler {
2171 	struct vk_object_base base;
2172 	uint32_t state[4];
2173 	struct radv_sampler_ycbcr_conversion *ycbcr_sampler;
2174 	uint32_t border_color_slot;
2175 };
2176 
2177 struct radv_framebuffer {
2178 	struct vk_object_base                        base;
2179 	uint32_t                                     width;
2180 	uint32_t                                     height;
2181 	uint32_t                                     layers;
2182 
2183 	uint32_t                                     attachment_count;
2184 	struct radv_image_view                       *attachments[0];
2185 };
2186 
2187 struct radv_subpass_barrier {
2188 	VkPipelineStageFlags src_stage_mask;
2189 	VkAccessFlags        src_access_mask;
2190 	VkAccessFlags        dst_access_mask;
2191 };
2192 
2193 void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer,
2194 			  const struct radv_subpass_barrier *barrier);
2195 
2196 struct radv_subpass_attachment {
2197 	uint32_t         attachment;
2198 	VkImageLayout    layout;
2199 	VkImageLayout    stencil_layout;
2200 	bool             in_render_loop;
2201 };
2202 
2203 struct radv_subpass {
2204 	uint32_t                                     attachment_count;
2205 	struct radv_subpass_attachment *             attachments;
2206 
2207 	uint32_t                                     input_count;
2208 	uint32_t                                     color_count;
2209 	struct radv_subpass_attachment *             input_attachments;
2210 	struct radv_subpass_attachment *             color_attachments;
2211 	struct radv_subpass_attachment *             resolve_attachments;
2212 	struct radv_subpass_attachment *             depth_stencil_attachment;
2213 	struct radv_subpass_attachment *             ds_resolve_attachment;
2214 	VkResolveModeFlagBits                        depth_resolve_mode;
2215 	VkResolveModeFlagBits                        stencil_resolve_mode;
2216 
2217 	/** Subpass has at least one color resolve attachment */
2218 	bool                                         has_color_resolve;
2219 
2220 	/** Subpass has at least one color attachment */
2221 	bool                                         has_color_att;
2222 
2223 	struct radv_subpass_barrier                  start_barrier;
2224 
2225 	uint32_t                                     view_mask;
2226 
2227 	VkSampleCountFlagBits                        color_sample_count;
2228 	VkSampleCountFlagBits                        depth_sample_count;
2229 	VkSampleCountFlagBits                        max_sample_count;
2230 };
2231 
2232 uint32_t
2233 radv_get_subpass_id(struct radv_cmd_buffer *cmd_buffer);
2234 
2235 struct radv_render_pass_attachment {
2236 	VkFormat                                     format;
2237 	uint32_t                                     samples;
2238 	VkAttachmentLoadOp                           load_op;
2239 	VkAttachmentLoadOp                           stencil_load_op;
2240 	VkImageLayout                                initial_layout;
2241 	VkImageLayout                                final_layout;
2242 	VkImageLayout                                stencil_initial_layout;
2243 	VkImageLayout                                stencil_final_layout;
2244 
2245 	/* The subpass id in which the attachment will be used first/last. */
2246 	uint32_t				     first_subpass_idx;
2247 	uint32_t                                     last_subpass_idx;
2248 };
2249 
2250 struct radv_render_pass {
2251 	struct vk_object_base                        base;
2252 	uint32_t                                     attachment_count;
2253 	uint32_t                                     subpass_count;
2254 	struct radv_subpass_attachment *             subpass_attachments;
2255 	struct radv_render_pass_attachment *         attachments;
2256 	struct radv_subpass_barrier                  end_barrier;
2257 	struct radv_subpass                          subpasses[0];
2258 };
2259 
2260 VkResult radv_device_init_meta(struct radv_device *device);
2261 void radv_device_finish_meta(struct radv_device *device);
2262 
2263 struct radv_query_pool {
2264 	struct vk_object_base base;
2265 	struct radeon_winsys_bo *bo;
2266 	uint32_t stride;
2267 	uint32_t availability_offset;
2268 	uint64_t size;
2269 	char *ptr;
2270 	VkQueryType type;
2271 	uint32_t pipeline_stats_mask;
2272 };
2273 
2274 typedef enum {
2275 	RADV_SEMAPHORE_NONE,
2276 	RADV_SEMAPHORE_WINSYS,
2277 	RADV_SEMAPHORE_SYNCOBJ,
2278 	RADV_SEMAPHORE_TIMELINE_SYNCOBJ,
2279 	RADV_SEMAPHORE_TIMELINE,
2280 } radv_semaphore_kind;
2281 
2282 struct radv_deferred_queue_submission;
2283 
2284 struct radv_timeline_waiter {
2285 	struct list_head list;
2286 	struct radv_deferred_queue_submission *submission;
2287 	uint64_t value;
2288 };
2289 
2290 struct radv_timeline_point {
2291 	struct list_head list;
2292 
2293 	uint64_t value;
2294 	uint32_t syncobj;
2295 
2296 	/* Separate from the list to accomodate CPU wait being async, as well
2297 	 * as prevent point deletion during submission. */
2298 	unsigned wait_count;
2299 };
2300 
2301 struct radv_timeline {
2302 	/* Using a pthread mutex to be compatible with condition variables. */
2303 	pthread_mutex_t mutex;
2304 
2305 	uint64_t highest_signaled;
2306 	uint64_t highest_submitted;
2307 
2308 	struct list_head points;
2309 
2310 	/* Keep free points on hand so we do not have to recreate syncobjs all
2311 	 * the time. */
2312 	struct list_head free_points;
2313 
2314 	/* Submissions that are deferred waiting for a specific value to be
2315 	 * submitted. */
2316 	struct list_head waiters;
2317 };
2318 
2319 struct radv_timeline_syncobj {
2320 	/* Keep syncobj first, so common-code can just handle this as
2321 	 * non-timeline syncobj. */
2322 	uint32_t syncobj;
2323 	uint64_t max_point; /* max submitted point. */
2324 };
2325 
2326 struct radv_semaphore_part {
2327 	radv_semaphore_kind kind;
2328 	union {
2329 		uint32_t syncobj;
2330 		struct radeon_winsys_sem *ws_sem;
2331 		struct radv_timeline timeline;
2332 		struct radv_timeline_syncobj timeline_syncobj;
2333 	};
2334 };
2335 
2336 struct radv_semaphore {
2337 	struct vk_object_base base;
2338 	struct radv_semaphore_part permanent;
2339 	struct radv_semaphore_part temporary;
2340 };
2341 
2342 bool radv_queue_internal_submit(struct radv_queue *queue,
2343 				struct radeon_cmdbuf *cs);
2344 
2345 void radv_set_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
2346 			     VkPipelineBindPoint bind_point,
2347 			     struct radv_descriptor_set *set,
2348 			     unsigned idx);
2349 
2350 void
2351 radv_update_descriptor_sets(struct radv_device *device,
2352                             struct radv_cmd_buffer *cmd_buffer,
2353                             VkDescriptorSet overrideSet,
2354                             uint32_t descriptorWriteCount,
2355                             const VkWriteDescriptorSet *pDescriptorWrites,
2356                             uint32_t descriptorCopyCount,
2357                             const VkCopyDescriptorSet *pDescriptorCopies);
2358 
2359 void
2360 radv_update_descriptor_set_with_template(struct radv_device *device,
2361                                          struct radv_cmd_buffer *cmd_buffer,
2362                                          struct radv_descriptor_set *set,
2363                                          VkDescriptorUpdateTemplate descriptorUpdateTemplate,
2364                                          const void *pData);
2365 
2366 void radv_meta_push_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
2367                                    VkPipelineBindPoint pipelineBindPoint,
2368                                    VkPipelineLayout _layout,
2369                                    uint32_t set,
2370                                    uint32_t descriptorWriteCount,
2371                                    const VkWriteDescriptorSet *pDescriptorWrites);
2372 
2373 void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer,
2374 			 struct radv_image *image,
2375 			 const VkImageSubresourceRange *range, uint32_t value);
2376 
2377 void radv_initialize_fmask(struct radv_cmd_buffer *cmd_buffer,
2378 			   struct radv_image *image,
2379 			   const VkImageSubresourceRange *range);
2380 
2381 typedef enum {
2382 	RADV_FENCE_NONE,
2383 	RADV_FENCE_WINSYS,
2384 	RADV_FENCE_SYNCOBJ,
2385 	RADV_FENCE_WSI,
2386 } radv_fence_kind;
2387 
2388 struct radv_fence_part {
2389 	radv_fence_kind kind;
2390 
2391 	union {
2392 		/* AMDGPU winsys fence. */
2393 		struct radeon_winsys_fence *fence;
2394 
2395 		/* DRM syncobj handle for syncobj-based fences. */
2396 		uint32_t syncobj;
2397 
2398 		/* WSI fence. */
2399 		struct wsi_fence *fence_wsi;
2400 	};
2401 };
2402 
2403 struct radv_fence {
2404 	struct vk_object_base base;
2405 	struct radv_fence_part permanent;
2406 	struct radv_fence_part temporary;
2407 };
2408 
2409 /* radv_nir_to_llvm.c */
2410 struct radv_shader_args;
2411 
2412 void llvm_compile_shader(struct radv_device *device,
2413 			 unsigned shader_count,
2414 			 struct nir_shader *const *shaders,
2415 			 struct radv_shader_binary **binary,
2416 			 struct radv_shader_args *args);
2417 
2418 unsigned radv_nir_get_max_workgroup_size(enum chip_class chip_class,
2419 					 gl_shader_stage stage,
2420 					 const struct nir_shader *nir);
2421 
2422 /* radv_shader_info.h */
2423 struct radv_shader_info;
2424 struct radv_shader_variant_key;
2425 
2426 void radv_nir_shader_info_pass(const struct nir_shader *nir,
2427 			       const struct radv_pipeline_layout *layout,
2428 			       const struct radv_shader_variant_key *key,
2429 			       struct radv_shader_info *info,
2430 			       bool use_llvm);
2431 
2432 void radv_nir_shader_info_init(struct radv_shader_info *info);
2433 
2434 /* radv_sqtt.c */
2435 struct radv_thread_trace_info {
2436 	uint32_t cur_offset;
2437 	uint32_t trace_status;
2438 	union {
2439 		uint32_t gfx9_write_counter;
2440 		uint32_t gfx10_dropped_cntr;
2441 	};
2442 };
2443 
2444 struct radv_thread_trace_se {
2445 	struct radv_thread_trace_info info;
2446 	void *data_ptr;
2447 	uint32_t shader_engine;
2448 	uint32_t compute_unit;
2449 };
2450 
2451 struct radv_thread_trace {
2452 	uint32_t num_traces;
2453 	struct radv_thread_trace_se traces[4];
2454 };
2455 
2456 bool radv_thread_trace_init(struct radv_device *device);
2457 void radv_thread_trace_finish(struct radv_device *device);
2458 bool radv_begin_thread_trace(struct radv_queue *queue);
2459 bool radv_end_thread_trace(struct radv_queue *queue);
2460 bool radv_get_thread_trace(struct radv_queue *queue,
2461 			   struct radv_thread_trace *thread_trace);
2462 void radv_emit_thread_trace_userdata(struct radeon_cmdbuf *cs,
2463 				     const void *data, uint32_t num_dwords);
2464 
2465 /* radv_rgp.c */
2466 int radv_dump_thread_trace(struct radv_device *device,
2467 			   const struct radv_thread_trace *trace);
2468 
2469 /* radv_sqtt_layer_.c */
2470 struct radv_barrier_data {
2471 	union {
2472 		struct {
2473 			uint16_t depth_stencil_expand : 1;
2474 			uint16_t htile_hiz_range_expand : 1;
2475 			uint16_t depth_stencil_resummarize : 1;
2476 			uint16_t dcc_decompress : 1;
2477 			uint16_t fmask_decompress : 1;
2478 			uint16_t fast_clear_eliminate : 1;
2479 			uint16_t fmask_color_expand : 1;
2480 			uint16_t init_mask_ram : 1;
2481 			uint16_t reserved : 8;
2482 		};
2483 		uint16_t all;
2484 	} layout_transitions;
2485 };
2486 
2487 /**
2488  * Value for the reason field of an RGP barrier start marker originating from
2489  * the Vulkan client (does not include PAL-defined values). (Table 15)
2490  */
2491 enum rgp_barrier_reason {
2492 	RGP_BARRIER_UNKNOWN_REASON = 0xFFFFFFFF,
2493 
2494 	/* External app-generated barrier reasons, i.e. API synchronization
2495 	 * commands Range of valid values: [0x00000001 ... 0x7FFFFFFF].
2496 	 */
2497 	RGP_BARRIER_EXTERNAL_CMD_PIPELINE_BARRIER = 0x00000001,
2498 	RGP_BARRIER_EXTERNAL_RENDER_PASS_SYNC	  = 0x00000002,
2499 	RGP_BARRIER_EXTERNAL_CMD_WAIT_EVENTS	  = 0x00000003,
2500 
2501 	/* Internal barrier reasons, i.e. implicit synchronization inserted by
2502 	 * the Vulkan driver Range of valid values: [0xC0000000 ... 0xFFFFFFFE].
2503 	 */
2504 	RGP_BARRIER_INTERNAL_BASE                             = 0xC0000000,
2505 	RGP_BARRIER_INTERNAL_PRE_RESET_QUERY_POOL_SYNC        = RGP_BARRIER_INTERNAL_BASE + 0,
2506 	RGP_BARRIER_INTERNAL_POST_RESET_QUERY_POOL_SYNC       = RGP_BARRIER_INTERNAL_BASE + 1,
2507 	RGP_BARRIER_INTERNAL_GPU_EVENT_RECYCLE_STALL	      = RGP_BARRIER_INTERNAL_BASE + 2,
2508 	RGP_BARRIER_INTERNAL_PRE_COPY_QUERY_POOL_RESULTS_SYNC = RGP_BARRIER_INTERNAL_BASE + 3
2509 };
2510 
2511 void radv_describe_begin_cmd_buffer(struct radv_cmd_buffer *cmd_buffer);
2512 void radv_describe_end_cmd_buffer(struct radv_cmd_buffer *cmd_buffer);
2513 void radv_describe_draw(struct radv_cmd_buffer *cmd_buffer);
2514 void radv_describe_dispatch(struct radv_cmd_buffer *cmd_buffer, int x, int y, int z);
2515 void radv_describe_begin_render_pass_clear(struct radv_cmd_buffer *cmd_buffer,
2516 					   VkImageAspectFlagBits aspects);
2517 void radv_describe_end_render_pass_clear(struct radv_cmd_buffer *cmd_buffer);
2518 void radv_describe_barrier_start(struct radv_cmd_buffer *cmd_buffer,
2519 				 enum rgp_barrier_reason reason);
2520 void radv_describe_barrier_end(struct radv_cmd_buffer *cmd_buffer);
2521 void radv_describe_layout_transition(struct radv_cmd_buffer *cmd_buffer,
2522 				     const struct radv_barrier_data *barrier);
2523 
2524 struct radeon_winsys_sem;
2525 
2526 uint64_t radv_get_current_time(void);
2527 
2528 static inline uint32_t
si_conv_gl_prim_to_vertices(unsigned gl_prim)2529 si_conv_gl_prim_to_vertices(unsigned gl_prim)
2530 {
2531 	switch (gl_prim) {
2532 	case 0: /* GL_POINTS */
2533 		return 1;
2534 	case 1: /* GL_LINES */
2535 	case 3: /* GL_LINE_STRIP */
2536 		return 2;
2537 	case 4: /* GL_TRIANGLES */
2538 	case 5: /* GL_TRIANGLE_STRIP */
2539 		return 3;
2540 	case 0xA: /* GL_LINE_STRIP_ADJACENCY_ARB */
2541 		return 4;
2542 	case 0xc: /* GL_TRIANGLES_ADJACENCY_ARB */
2543 		return 6;
2544 	case 7: /* GL_QUADS */
2545 		return V_028A6C_OUTPRIM_TYPE_TRISTRIP;
2546 	default:
2547 		assert(0);
2548 		return 0;
2549 	}
2550 }
2551 
2552 void radv_cmd_buffer_begin_render_pass(struct radv_cmd_buffer *cmd_buffer,
2553 				       const VkRenderPassBeginInfo *pRenderPassBegin);
2554 void radv_cmd_buffer_end_render_pass(struct radv_cmd_buffer *cmd_buffer);
2555 
si_translate_prim(unsigned topology)2556 static inline uint32_t si_translate_prim(unsigned topology)
2557 {
2558 	switch (topology) {
2559 	case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
2560 		return V_008958_DI_PT_POINTLIST;
2561 	case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
2562 		return V_008958_DI_PT_LINELIST;
2563 	case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
2564 		return V_008958_DI_PT_LINESTRIP;
2565 	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
2566 		return V_008958_DI_PT_TRILIST;
2567 	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
2568 		return V_008958_DI_PT_TRISTRIP;
2569 	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
2570 		return V_008958_DI_PT_TRIFAN;
2571 	case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY:
2572 		return V_008958_DI_PT_LINELIST_ADJ;
2573 	case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY:
2574 		return V_008958_DI_PT_LINESTRIP_ADJ;
2575 	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
2576 		return V_008958_DI_PT_TRILIST_ADJ;
2577 	case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
2578 		return V_008958_DI_PT_TRISTRIP_ADJ;
2579 	case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
2580 		return V_008958_DI_PT_PATCH;
2581 	default:
2582 		assert(0);
2583 		return 0;
2584 	}
2585 }
2586 
si_translate_stencil_op(enum VkStencilOp op)2587 static inline uint32_t si_translate_stencil_op(enum VkStencilOp op)
2588 {
2589 	switch (op) {
2590 	case VK_STENCIL_OP_KEEP:
2591 		return V_02842C_STENCIL_KEEP;
2592 	case VK_STENCIL_OP_ZERO:
2593 		return V_02842C_STENCIL_ZERO;
2594 	case VK_STENCIL_OP_REPLACE:
2595 		return V_02842C_STENCIL_REPLACE_TEST;
2596 	case VK_STENCIL_OP_INCREMENT_AND_CLAMP:
2597 		return V_02842C_STENCIL_ADD_CLAMP;
2598 	case VK_STENCIL_OP_DECREMENT_AND_CLAMP:
2599 		return V_02842C_STENCIL_SUB_CLAMP;
2600 	case VK_STENCIL_OP_INVERT:
2601 		return V_02842C_STENCIL_INVERT;
2602 	case VK_STENCIL_OP_INCREMENT_AND_WRAP:
2603 		return V_02842C_STENCIL_ADD_WRAP;
2604 	case VK_STENCIL_OP_DECREMENT_AND_WRAP:
2605 		return V_02842C_STENCIL_SUB_WRAP;
2606 	default:
2607 		return 0;
2608 	}
2609 }
2610 
2611 #define RADV_DEFINE_HANDLE_CASTS(__radv_type, __VkType)		\
2612 								\
2613 	static inline struct __radv_type *			\
2614 	__radv_type ## _from_handle(__VkType _handle)		\
2615 	{							\
2616 		return (struct __radv_type *) _handle;		\
2617 	}							\
2618 								\
2619 	static inline __VkType					\
2620 	__radv_type ## _to_handle(struct __radv_type *_obj)	\
2621 	{							\
2622 		return (__VkType) _obj;				\
2623 	}
2624 
2625 #define RADV_DEFINE_NONDISP_HANDLE_CASTS(__radv_type, __VkType)		\
2626 									\
2627 	static inline struct __radv_type *				\
2628 	__radv_type ## _from_handle(__VkType _handle)			\
2629 	{								\
2630 		return (struct __radv_type *)(uintptr_t) _handle;	\
2631 	}								\
2632 									\
2633 	static inline __VkType						\
2634 	__radv_type ## _to_handle(struct __radv_type *_obj)		\
2635 	{								\
2636 		return (__VkType)(uintptr_t) _obj;			\
2637 	}
2638 
2639 #define RADV_FROM_HANDLE(__radv_type, __name, __handle)			\
2640 	struct __radv_type *__name = __radv_type ## _from_handle(__handle)
2641 
2642 RADV_DEFINE_HANDLE_CASTS(radv_cmd_buffer, VkCommandBuffer)
2643 RADV_DEFINE_HANDLE_CASTS(radv_device, VkDevice)
2644 RADV_DEFINE_HANDLE_CASTS(radv_instance, VkInstance)
2645 RADV_DEFINE_HANDLE_CASTS(radv_physical_device, VkPhysicalDevice)
2646 RADV_DEFINE_HANDLE_CASTS(radv_queue, VkQueue)
2647 
2648 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_cmd_pool, VkCommandPool)
2649 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_buffer, VkBuffer)
2650 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_buffer_view, VkBufferView)
2651 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_descriptor_pool, VkDescriptorPool)
2652 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_descriptor_set, VkDescriptorSet)
2653 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_descriptor_set_layout, VkDescriptorSetLayout)
2654 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_descriptor_update_template, VkDescriptorUpdateTemplate)
2655 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_device_memory, VkDeviceMemory)
2656 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_fence, VkFence)
2657 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_event, VkEvent)
2658 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_framebuffer, VkFramebuffer)
2659 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_image, VkImage)
2660 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_image_view, VkImageView);
2661 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_pipeline_cache, VkPipelineCache)
2662 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_pipeline, VkPipeline)
2663 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_pipeline_layout, VkPipelineLayout)
2664 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_query_pool, VkQueryPool)
2665 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_render_pass, VkRenderPass)
2666 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_sampler, VkSampler)
2667 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_sampler_ycbcr_conversion, VkSamplerYcbcrConversion)
2668 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_shader_module, VkShaderModule)
2669 RADV_DEFINE_NONDISP_HANDLE_CASTS(radv_semaphore, VkSemaphore)
2670 
2671 #endif /* RADV_PRIVATE_H */
2672