1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include <fcntl.h>
29 #include <stdbool.h>
30 #include <string.h>
31
32 #if defined(__FreeBSD__) || defined(__DragonFly__)
33 #include <sys/types.h>
34 #endif
35 #ifdef MAJOR_IN_MKDEV
36 #include <sys/mkdev.h>
37 #endif
38 #ifdef MAJOR_IN_SYSMACROS
39 #include <sys/sysmacros.h>
40 #endif
41
42 #ifdef __linux__
43 #include <sys/inotify.h>
44 #endif
45
46 #include "util/debug.h"
47 #include "util/disk_cache.h"
48 #include "radv_cs.h"
49 #include "radv_debug.h"
50 #include "radv_private.h"
51 #include "radv_shader.h"
52 #include "vk_util.h"
53 #ifdef _WIN32
54 typedef void *drmDevicePtr;
55 #include <io.h>
56 #else
57 #include <amdgpu.h>
58 #include <xf86drm.h>
59 #include "drm-uapi/amdgpu_drm.h"
60 #include "winsys/amdgpu/radv_amdgpu_winsys_public.h"
61 #endif
62 #include "util/build_id.h"
63 #include "util/debug.h"
64 #include "util/driconf.h"
65 #include "util/mesa-sha1.h"
66 #include "util/timespec.h"
67 #include "util/u_atomic.h"
68 #include "winsys/null/radv_null_winsys_public.h"
69 #include "git_sha1.h"
70 #include "sid.h"
71 #include "vk_format.h"
72 #include "vk_sync.h"
73 #include "vk_sync_dummy.h"
74 #include "vulkan/vk_icd.h"
75
76 #ifdef LLVM_AVAILABLE
77 #include "ac_llvm_util.h"
78 #endif
79
80 /* The number of IBs per submit isn't infinite, it depends on the ring type
81 * (ie. some initial setup needed for a submit) and the number of IBs (4 DW).
82 * This limit is arbitrary but should be safe for now. Ideally, we should get
83 * this limit from the KMD.
84 */
85 #define RADV_MAX_IBS_PER_SUBMIT 192
86
87 /* The "RAW" clocks on Linux are called "FAST" on FreeBSD */
88 #if !defined(CLOCK_MONOTONIC_RAW) && defined(CLOCK_MONOTONIC_FAST)
89 #define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC_FAST
90 #endif
91
92 static VkResult radv_queue_submit(struct vk_queue *vqueue, struct vk_queue_submit *submission);
93
94 uint64_t
radv_get_current_time(void)95 radv_get_current_time(void)
96 {
97 return os_time_get_nano();
98 }
99
100 static int
radv_device_get_cache_uuid(struct radv_physical_device * pdevice,void * uuid)101 radv_device_get_cache_uuid(struct radv_physical_device *pdevice, void *uuid)
102 {
103 enum radeon_family family = pdevice->rad_info.family;
104 struct mesa_sha1 ctx;
105 unsigned char sha1[20];
106 unsigned ptr_size = sizeof(void *);
107
108 memset(uuid, 0, VK_UUID_SIZE);
109 _mesa_sha1_init(&ctx);
110
111 if (!disk_cache_get_function_identifier(radv_device_get_cache_uuid, &ctx)
112 #ifdef LLVM_AVAILABLE
113 || (pdevice->use_llvm &&
114 !disk_cache_get_function_identifier(LLVMInitializeAMDGPUTargetInfo, &ctx))
115 #endif
116 )
117 return -1;
118
119 _mesa_sha1_update(&ctx, &family, sizeof(family));
120 _mesa_sha1_update(&ctx, &ptr_size, sizeof(ptr_size));
121 _mesa_sha1_final(&ctx, sha1);
122
123 memcpy(uuid, sha1, VK_UUID_SIZE);
124 return 0;
125 }
126
127 static void
radv_get_driver_uuid(void * uuid)128 radv_get_driver_uuid(void *uuid)
129 {
130 ac_compute_driver_uuid(uuid, VK_UUID_SIZE);
131 }
132
133 static void
radv_get_device_uuid(struct radeon_info * info,void * uuid)134 radv_get_device_uuid(struct radeon_info *info, void *uuid)
135 {
136 ac_compute_device_uuid(info, uuid, VK_UUID_SIZE);
137 }
138
139 static uint64_t
radv_get_adjusted_vram_size(struct radv_physical_device * device)140 radv_get_adjusted_vram_size(struct radv_physical_device *device)
141 {
142 int ov = driQueryOptioni(&device->instance->dri_options, "override_vram_size");
143 if (ov >= 0)
144 return MIN2(device->rad_info.vram_size, (uint64_t)ov << 20);
145 return device->rad_info.vram_size;
146 }
147
148 static uint64_t
radv_get_visible_vram_size(struct radv_physical_device * device)149 radv_get_visible_vram_size(struct radv_physical_device *device)
150 {
151 return MIN2(radv_get_adjusted_vram_size(device), device->rad_info.vram_vis_size);
152 }
153
154 static uint64_t
radv_get_vram_size(struct radv_physical_device * device)155 radv_get_vram_size(struct radv_physical_device *device)
156 {
157 uint64_t total_size = radv_get_adjusted_vram_size(device);
158 return total_size - MIN2(total_size, device->rad_info.vram_vis_size);
159 }
160
161 enum radv_heap {
162 RADV_HEAP_VRAM = 1 << 0,
163 RADV_HEAP_GTT = 1 << 1,
164 RADV_HEAP_VRAM_VIS = 1 << 2,
165 RADV_HEAP_MAX = 1 << 3,
166 };
167
168 static void
radv_physical_device_init_mem_types(struct radv_physical_device * device)169 radv_physical_device_init_mem_types(struct radv_physical_device *device)
170 {
171 uint64_t visible_vram_size = radv_get_visible_vram_size(device);
172 uint64_t vram_size = radv_get_vram_size(device);
173 uint64_t gtt_size = device->rad_info.gart_size;
174 int vram_index = -1, visible_vram_index = -1, gart_index = -1;
175
176 device->memory_properties.memoryHeapCount = 0;
177 device->heaps = 0;
178
179 if (!device->rad_info.has_dedicated_vram) {
180 /* On APUs, the carveout is usually too small for games that request a minimum VRAM size
181 * greater than it. To workaround this, we compute the total available memory size (GTT +
182 * visible VRAM size) and report 2/3 as VRAM and 1/3 as GTT.
183 */
184 const uint64_t total_size = gtt_size + visible_vram_size;
185 visible_vram_size = align64((total_size * 2) / 3, device->rad_info.gart_page_size);
186 gtt_size = total_size - visible_vram_size;
187 vram_size = 0;
188 }
189
190 /* Only get a VRAM heap if it is significant, not if it is a 16 MiB
191 * remainder above visible VRAM. */
192 if (vram_size > 0 && vram_size * 9 >= visible_vram_size) {
193 vram_index = device->memory_properties.memoryHeapCount++;
194 device->heaps |= RADV_HEAP_VRAM;
195 device->memory_properties.memoryHeaps[vram_index] = (VkMemoryHeap){
196 .size = vram_size,
197 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
198 };
199 }
200
201 if (gtt_size > 0) {
202 gart_index = device->memory_properties.memoryHeapCount++;
203 device->heaps |= RADV_HEAP_GTT;
204 device->memory_properties.memoryHeaps[gart_index] = (VkMemoryHeap){
205 .size = gtt_size,
206 .flags = 0,
207 };
208 }
209
210 if (visible_vram_size) {
211 visible_vram_index = device->memory_properties.memoryHeapCount++;
212 device->heaps |= RADV_HEAP_VRAM_VIS;
213 device->memory_properties.memoryHeaps[visible_vram_index] = (VkMemoryHeap){
214 .size = visible_vram_size,
215 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
216 };
217 }
218
219 unsigned type_count = 0;
220
221 if (vram_index >= 0 || visible_vram_index >= 0) {
222 device->memory_domains[type_count] = RADEON_DOMAIN_VRAM;
223 device->memory_flags[type_count] = RADEON_FLAG_NO_CPU_ACCESS;
224 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType){
225 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
226 .heapIndex = vram_index >= 0 ? vram_index : visible_vram_index,
227 };
228 }
229
230 if (gart_index >= 0) {
231 device->memory_domains[type_count] = RADEON_DOMAIN_GTT;
232 device->memory_flags[type_count] = RADEON_FLAG_GTT_WC | RADEON_FLAG_CPU_ACCESS;
233 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType){
234 .propertyFlags =
235 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
236 .heapIndex = gart_index,
237 };
238 }
239 if (visible_vram_index >= 0) {
240 device->memory_domains[type_count] = RADEON_DOMAIN_VRAM;
241 device->memory_flags[type_count] = RADEON_FLAG_CPU_ACCESS;
242 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType){
243 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
244 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
245 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
246 .heapIndex = visible_vram_index,
247 };
248 }
249
250 if (gart_index >= 0) {
251 device->memory_domains[type_count] = RADEON_DOMAIN_GTT;
252 device->memory_flags[type_count] = RADEON_FLAG_CPU_ACCESS;
253 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType){
254 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
255 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
256 .heapIndex = gart_index,
257 };
258 }
259 device->memory_properties.memoryTypeCount = type_count;
260
261 if (device->rad_info.has_l2_uncached) {
262 for (int i = 0; i < device->memory_properties.memoryTypeCount; i++) {
263 VkMemoryType mem_type = device->memory_properties.memoryTypes[i];
264
265 if ((mem_type.propertyFlags &
266 (VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)) ||
267 mem_type.propertyFlags == VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {
268
269 VkMemoryPropertyFlags property_flags = mem_type.propertyFlags |
270 VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD |
271 VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD;
272
273 device->memory_domains[type_count] = device->memory_domains[i];
274 device->memory_flags[type_count] = device->memory_flags[i] | RADEON_FLAG_VA_UNCACHED;
275 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType){
276 .propertyFlags = property_flags,
277 .heapIndex = mem_type.heapIndex,
278 };
279 }
280 }
281 device->memory_properties.memoryTypeCount = type_count;
282 }
283 }
284
285 static const char *
radv_get_compiler_string(struct radv_physical_device * pdevice)286 radv_get_compiler_string(struct radv_physical_device *pdevice)
287 {
288 if (!pdevice->use_llvm) {
289 /* Some games like SotTR apply shader workarounds if the LLVM
290 * version is too old or if the LLVM version string is
291 * missing. This gives 2-5% performance with SotTR and ACO.
292 */
293 if (driQueryOptionb(&pdevice->instance->dri_options, "radv_report_llvm9_version_string")) {
294 return " (LLVM 9.0.1)";
295 }
296
297 return "";
298 }
299
300 #ifdef LLVM_AVAILABLE
301 return " (LLVM " MESA_LLVM_VERSION_STRING ")";
302 #else
303 unreachable("LLVM is not available");
304 #endif
305 }
306
307 int
radv_get_int_debug_option(const char * name,int default_value)308 radv_get_int_debug_option(const char *name, int default_value)
309 {
310 const char *str;
311 int result;
312
313 str = getenv(name);
314 if (!str) {
315 result = default_value;
316 } else {
317 char *endptr;
318
319 result = strtol(str, &endptr, 0);
320 if (str == endptr) {
321 /* No digits founs. */
322 result = default_value;
323 }
324 }
325
326 return result;
327 }
328
329 static bool
radv_thread_trace_enabled()330 radv_thread_trace_enabled()
331 {
332 return radv_get_int_debug_option("RADV_THREAD_TRACE", -1) >= 0 ||
333 getenv("RADV_THREAD_TRACE_TRIGGER");
334 }
335
336 static bool
radv_spm_trace_enabled()337 radv_spm_trace_enabled()
338 {
339 return radv_thread_trace_enabled() &&
340 debug_get_bool_option("RADV_THREAD_TRACE_CACHE_COUNTERS", false);
341 }
342
343 #if defined(VK_USE_PLATFORM_WAYLAND_KHR) || defined(VK_USE_PLATFORM_XCB_KHR) || \
344 defined(VK_USE_PLATFORM_XLIB_KHR) || defined(VK_USE_PLATFORM_DISPLAY_KHR)
345 #define RADV_USE_WSI_PLATFORM
346 #endif
347
348 #ifdef ANDROID
349 #define RADV_API_VERSION VK_MAKE_VERSION(1, 1, VK_HEADER_VERSION)
350 #else
351 #define RADV_API_VERSION VK_MAKE_VERSION(1, 3, VK_HEADER_VERSION)
352 #endif
353
354 VKAPI_ATTR VkResult VKAPI_CALL
radv_EnumerateInstanceVersion(uint32_t * pApiVersion)355 radv_EnumerateInstanceVersion(uint32_t *pApiVersion)
356 {
357 *pApiVersion = RADV_API_VERSION;
358 return VK_SUCCESS;
359 }
360
361 static const struct vk_instance_extension_table radv_instance_extensions_supported = {
362 .KHR_device_group_creation = true,
363 .KHR_external_fence_capabilities = true,
364 .KHR_external_memory_capabilities = true,
365 .KHR_external_semaphore_capabilities = true,
366 .KHR_get_physical_device_properties2 = true,
367 .EXT_debug_report = true,
368 /* EXT_debug_utils is exposed only if thread trace is enabled. See radv_CreateInstance */
369
370 #ifdef RADV_USE_WSI_PLATFORM
371 .KHR_get_surface_capabilities2 = true,
372 .KHR_surface = true,
373 .KHR_surface_protected_capabilities = true,
374 #endif
375 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
376 .KHR_wayland_surface = true,
377 #endif
378 #ifdef VK_USE_PLATFORM_XCB_KHR
379 .KHR_xcb_surface = true,
380 #endif
381 #ifdef VK_USE_PLATFORM_XLIB_KHR
382 .KHR_xlib_surface = true,
383 #endif
384 #ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
385 .EXT_acquire_xlib_display = true,
386 #endif
387 #ifdef VK_USE_PLATFORM_DISPLAY_KHR
388 .KHR_display = true,
389 .KHR_get_display_properties2 = true,
390 .EXT_direct_mode_display = true,
391 .EXT_display_surface_counter = true,
392 .EXT_acquire_drm_display = true,
393 #endif
394 };
395
396 static void
radv_physical_device_get_supported_extensions(const struct radv_physical_device * device,struct vk_device_extension_table * ext)397 radv_physical_device_get_supported_extensions(const struct radv_physical_device *device,
398 struct vk_device_extension_table *ext)
399 {
400 *ext = (struct vk_device_extension_table){
401 .KHR_8bit_storage = true,
402 .KHR_16bit_storage = true,
403 .KHR_acceleration_structure = !!(device->instance->perftest_flags & RADV_PERFTEST_RT),
404 .KHR_bind_memory2 = true,
405 .KHR_buffer_device_address = true,
406 .KHR_copy_commands2 = true,
407 .KHR_create_renderpass2 = true,
408 .KHR_dedicated_allocation = true,
409 .KHR_deferred_host_operations = true,
410 .KHR_depth_stencil_resolve = true,
411 .KHR_descriptor_update_template = true,
412 .KHR_device_group = true,
413 .KHR_draw_indirect_count = true,
414 .KHR_driver_properties = true,
415 .KHR_dynamic_rendering = true,
416 .KHR_external_fence = true,
417 .KHR_external_fence_fd = true,
418 .KHR_external_memory = true,
419 .KHR_external_memory_fd = true,
420 .KHR_external_semaphore = true,
421 .KHR_external_semaphore_fd = true,
422 .KHR_format_feature_flags2 = true,
423 .KHR_fragment_shading_rate = device->rad_info.chip_class >= GFX10_3,
424 .KHR_get_memory_requirements2 = true,
425 .KHR_image_format_list = true,
426 .KHR_imageless_framebuffer = true,
427 #ifdef RADV_USE_WSI_PLATFORM
428 .KHR_incremental_present = true,
429 #endif
430 .KHR_maintenance1 = true,
431 .KHR_maintenance2 = true,
432 .KHR_maintenance3 = true,
433 .KHR_maintenance4 = true,
434 .KHR_multiview = true,
435 .KHR_pipeline_executable_properties = true,
436 .KHR_pipeline_library =
437 (device->instance->perftest_flags & RADV_PERFTEST_RT) && !device->use_llvm,
438 .KHR_push_descriptor = true,
439 .KHR_ray_query =
440 (device->instance->perftest_flags & RADV_PERFTEST_RT) && !device->use_llvm,
441 .KHR_ray_tracing_pipeline =
442 (device->instance->perftest_flags & RADV_PERFTEST_RT) && !device->use_llvm,
443 .KHR_relaxed_block_layout = true,
444 .KHR_sampler_mirror_clamp_to_edge = true,
445 .KHR_sampler_ycbcr_conversion = true,
446 .KHR_separate_depth_stencil_layouts = true,
447 .KHR_shader_atomic_int64 = true,
448 .KHR_shader_clock = true,
449 .KHR_shader_draw_parameters = true,
450 .KHR_shader_float16_int8 = true,
451 .KHR_shader_float_controls = true,
452 .KHR_shader_integer_dot_product = true,
453 .KHR_shader_non_semantic_info = true,
454 .KHR_shader_subgroup_extended_types = true,
455 .KHR_shader_subgroup_uniform_control_flow = true,
456 .KHR_shader_terminate_invocation = true,
457 .KHR_spirv_1_4 = true,
458 .KHR_storage_buffer_storage_class = true,
459 #ifdef RADV_USE_WSI_PLATFORM
460 .KHR_swapchain = true,
461 .KHR_swapchain_mutable_format = true,
462 #endif
463 .KHR_synchronization2 = true,
464 .KHR_timeline_semaphore = true,
465 .KHR_uniform_buffer_standard_layout = true,
466 .KHR_variable_pointers = true,
467 .KHR_vulkan_memory_model = true,
468 .KHR_workgroup_memory_explicit_layout = true,
469 .KHR_zero_initialize_workgroup_memory = true,
470 .EXT_4444_formats = true,
471 .EXT_buffer_device_address = true,
472 .EXT_calibrated_timestamps = RADV_SUPPORT_CALIBRATED_TIMESTAMPS,
473 .EXT_color_write_enable = true,
474 .EXT_conditional_rendering = true,
475 .EXT_conservative_rasterization = device->rad_info.chip_class >= GFX9,
476 .EXT_custom_border_color = true,
477 .EXT_debug_marker = radv_thread_trace_enabled(),
478 .EXT_depth_clip_control = true,
479 .EXT_depth_clip_enable = true,
480 .EXT_depth_range_unrestricted = true,
481 .EXT_descriptor_indexing = true,
482 .EXT_discard_rectangles = true,
483 #ifdef VK_USE_PLATFORM_DISPLAY_KHR
484 .EXT_display_control = true,
485 #endif
486 .EXT_extended_dynamic_state = true,
487 .EXT_extended_dynamic_state2 = true,
488 .EXT_external_memory_dma_buf = true,
489 .EXT_external_memory_host = device->rad_info.has_userptr,
490 .EXT_global_priority = true,
491 .EXT_global_priority_query = true,
492 .EXT_host_query_reset = true,
493 .EXT_image_drm_format_modifier = device->rad_info.chip_class >= GFX9,
494 .EXT_image_robustness = true,
495 .EXT_image_view_min_lod = true,
496 .EXT_index_type_uint8 = device->rad_info.chip_class >= GFX8,
497 .EXT_inline_uniform_block = true,
498 .EXT_line_rasterization = true,
499 .EXT_memory_budget = true,
500 .EXT_memory_priority = true,
501 .EXT_multi_draw = true,
502 .EXT_pci_bus_info = true,
503 #ifndef _WIN32
504 .EXT_physical_device_drm = true,
505 #endif
506 .EXT_pipeline_creation_cache_control = true,
507 .EXT_pipeline_creation_feedback = true,
508 .EXT_post_depth_coverage = device->rad_info.chip_class >= GFX10,
509 .EXT_primitive_topology_list_restart = true,
510 .EXT_private_data = true,
511 .EXT_provoking_vertex = true,
512 .EXT_queue_family_foreign = true,
513 .EXT_robustness2 = true,
514 .EXT_sample_locations = device->rad_info.chip_class < GFX10,
515 .EXT_sampler_filter_minmax = true,
516 .EXT_scalar_block_layout = device->rad_info.chip_class >= GFX7,
517 .EXT_shader_atomic_float = true,
518 #ifdef LLVM_AVAILABLE
519 .EXT_shader_atomic_float2 = !device->use_llvm || LLVM_VERSION_MAJOR >= 14,
520 #else
521 .EXT_shader_atomic_float2 = true,
522 #endif
523 .EXT_shader_demote_to_helper_invocation = true,
524 .EXT_shader_image_atomic_int64 = true,
525 .EXT_shader_stencil_export = true,
526 .EXT_shader_subgroup_ballot = true,
527 .EXT_shader_subgroup_vote = true,
528 .EXT_shader_viewport_index_layer = true,
529 .EXT_subgroup_size_control = true,
530 .EXT_texel_buffer_alignment = true,
531 .EXT_transform_feedback = true,
532 .EXT_vertex_attribute_divisor = true,
533 .EXT_vertex_input_dynamic_state = !device->use_llvm,
534 .EXT_ycbcr_image_arrays = true,
535 .AMD_buffer_marker = true,
536 .AMD_device_coherent_memory = true,
537 .AMD_draw_indirect_count = true,
538 .AMD_gcn_shader = true,
539 .AMD_gpu_shader_half_float = device->rad_info.has_packed_math_16bit,
540 .AMD_gpu_shader_int16 = device->rad_info.has_packed_math_16bit,
541 .AMD_memory_overallocation_behavior = true,
542 .AMD_mixed_attachment_samples = true,
543 .AMD_rasterization_order = device->rad_info.has_out_of_order_rast,
544 .AMD_shader_ballot = true,
545 .AMD_shader_core_properties = true,
546 .AMD_shader_core_properties2 = true,
547 .AMD_shader_explicit_vertex_parameter = true,
548 .AMD_shader_fragment_mask = true,
549 .AMD_shader_image_load_store_lod = true,
550 .AMD_shader_trinary_minmax = true,
551 .AMD_texture_gather_bias_lod = true,
552 #ifdef ANDROID
553 .ANDROID_external_memory_android_hardware_buffer = RADV_SUPPORT_ANDROID_HARDWARE_BUFFER,
554 .ANDROID_native_buffer = true,
555 #endif
556 .GOOGLE_decorate_string = true,
557 .GOOGLE_hlsl_functionality1 = true,
558 .GOOGLE_user_type = true,
559 .NV_compute_shader_derivatives = true,
560 .NV_mesh_shader = device->use_ngg && device->rad_info.chip_class >= GFX10_3 &&
561 device->instance->perftest_flags & RADV_PERFTEST_NV_MS && !device->use_llvm,
562 /* Undocumented extension purely for vkd3d-proton. This check is to prevent anyone else from
563 * using it.
564 */
565 .VALVE_descriptor_set_host_mapping =
566 device->vk.instance->app_info.engine_name &&
567 strcmp(device->vk.instance->app_info.engine_name, "vkd3d") == 0,
568 .VALVE_mutable_descriptor_type = true,
569 };
570 }
571
572 static bool
radv_is_conformant(const struct radv_physical_device * pdevice)573 radv_is_conformant(const struct radv_physical_device *pdevice)
574 {
575 return pdevice->rad_info.chip_class >= GFX8;
576 }
577
578 static void
radv_physical_device_init_queue_table(struct radv_physical_device * pdevice)579 radv_physical_device_init_queue_table(struct radv_physical_device *pdevice)
580 {
581 int idx = 0;
582 pdevice->vk_queue_to_radv[idx] = RADV_QUEUE_GENERAL;
583 idx++;
584
585 for (unsigned i = 1; i < RADV_MAX_QUEUE_FAMILIES; i++)
586 pdevice->vk_queue_to_radv[i] = RADV_MAX_QUEUE_FAMILIES + 1;
587
588 if (pdevice->rad_info.num_rings[RING_COMPUTE] > 0 &&
589 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE)) {
590 pdevice->vk_queue_to_radv[idx] = RADV_QUEUE_COMPUTE;
591 idx++;
592 }
593 pdevice->num_queues = idx;
594 }
595
596 static VkResult
radv_physical_device_try_create(struct radv_instance * instance,drmDevicePtr drm_device,struct radv_physical_device ** device_out)597 radv_physical_device_try_create(struct radv_instance *instance, drmDevicePtr drm_device,
598 struct radv_physical_device **device_out)
599 {
600 VkResult result;
601 int fd = -1;
602 int master_fd = -1;
603
604 #ifdef _WIN32
605 assert(drm_device == NULL);
606 #else
607 if (drm_device) {
608 const char *path = drm_device->nodes[DRM_NODE_RENDER];
609 drmVersionPtr version;
610
611 fd = open(path, O_RDWR | O_CLOEXEC);
612 if (fd < 0) {
613 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
614 "Could not open device %s: %m", path);
615 }
616
617 version = drmGetVersion(fd);
618 if (!version) {
619 close(fd);
620
621 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
622 "Could not get the kernel driver version for device %s: %m", path);
623 }
624
625 if (strcmp(version->name, "amdgpu")) {
626 drmFreeVersion(version);
627 close(fd);
628
629 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
630 "Device '%s' is not using the AMDGPU kernel driver: %m", path);
631 }
632 drmFreeVersion(version);
633
634 if (instance->debug_flags & RADV_DEBUG_STARTUP)
635 radv_logi("Found compatible device '%s'.", path);
636 }
637 #endif
638
639 struct radv_physical_device *device = vk_zalloc2(&instance->vk.alloc, NULL, sizeof(*device), 8,
640 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
641 if (!device) {
642 result = vk_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
643 goto fail_fd;
644 }
645
646 struct vk_physical_device_dispatch_table dispatch_table;
647 vk_physical_device_dispatch_table_from_entrypoints(&dispatch_table,
648 &radv_physical_device_entrypoints, true);
649 vk_physical_device_dispatch_table_from_entrypoints(&dispatch_table,
650 &wsi_physical_device_entrypoints, false);
651
652 result = vk_physical_device_init(&device->vk, &instance->vk, NULL, &dispatch_table);
653 if (result != VK_SUCCESS) {
654 goto fail_alloc;
655 }
656
657 device->instance = instance;
658
659 #ifdef _WIN32
660 device->ws = radv_null_winsys_create();
661 #else
662 if (drm_device) {
663 bool reserve_vmid = radv_thread_trace_enabled();
664
665 device->ws = radv_amdgpu_winsys_create(fd, instance->debug_flags, instance->perftest_flags,
666 reserve_vmid);
667 } else {
668 device->ws = radv_null_winsys_create();
669 }
670 #endif
671
672 if (!device->ws) {
673 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED, "failed to initialize winsys");
674 goto fail_base;
675 }
676
677 device->vk.supported_sync_types = device->ws->get_sync_types(device->ws);
678
679 #ifndef _WIN32
680 if (drm_device && instance->vk.enabled_extensions.KHR_display) {
681 master_fd = open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
682 if (master_fd >= 0) {
683 uint32_t accel_working = 0;
684 struct drm_amdgpu_info request = {.return_pointer = (uintptr_t)&accel_working,
685 .return_size = sizeof(accel_working),
686 .query = AMDGPU_INFO_ACCEL_WORKING};
687
688 if (drmCommandWrite(master_fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info)) <
689 0 ||
690 !accel_working) {
691 close(master_fd);
692 master_fd = -1;
693 }
694 }
695 }
696 #endif
697
698 device->master_fd = master_fd;
699 device->local_fd = fd;
700 device->ws->query_info(device->ws, &device->rad_info);
701
702 device->use_llvm = instance->debug_flags & RADV_DEBUG_LLVM;
703 #ifndef LLVM_AVAILABLE
704 if (device->use_llvm) {
705 fprintf(stderr, "ERROR: LLVM compiler backend selected for radv, but LLVM support was not "
706 "enabled at build time.\n");
707 abort();
708 }
709 #endif
710
711 #ifdef ANDROID
712 device->emulate_etc2 = !radv_device_supports_etc(device);
713 #else
714 device->emulate_etc2 = !radv_device_supports_etc(device) &&
715 driQueryOptionb(&device->instance->dri_options, "radv_require_etc2");
716 #endif
717
718 snprintf(device->name, sizeof(device->name), "AMD RADV %s%s", device->rad_info.name,
719 radv_get_compiler_string(device));
720
721 #ifdef ENABLE_SHADER_CACHE
722 if (radv_device_get_cache_uuid(device, device->cache_uuid)) {
723 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED, "cannot generate UUID");
724 goto fail_wsi;
725 }
726
727 /* The gpu id is already embedded in the uuid so we just pass "radv"
728 * when creating the cache.
729 */
730 char buf[VK_UUID_SIZE * 2 + 1];
731 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
732 device->disk_cache = disk_cache_create(device->name, buf, 0);
733 #endif
734
735 if (!radv_is_conformant(device))
736 vk_warn_non_conformant_implementation("radv");
737
738 radv_get_driver_uuid(&device->driver_uuid);
739 radv_get_device_uuid(&device->rad_info, &device->device_uuid);
740
741 device->out_of_order_rast_allowed =
742 device->rad_info.has_out_of_order_rast &&
743 !(device->instance->debug_flags & RADV_DEBUG_NO_OUT_OF_ORDER);
744
745 device->dcc_msaa_allowed = (device->instance->perftest_flags & RADV_PERFTEST_DCC_MSAA);
746
747 device->use_ngg = device->rad_info.chip_class >= GFX10 &&
748 device->rad_info.family != CHIP_NAVI14 &&
749 !(device->instance->debug_flags & RADV_DEBUG_NO_NGG);
750
751 device->use_ngg_culling =
752 device->use_ngg &&
753 device->rad_info.max_render_backends > 1 &&
754 (device->rad_info.chip_class >= GFX10_3 ||
755 (device->instance->perftest_flags & RADV_PERFTEST_NGGC)) &&
756 !(device->instance->debug_flags & RADV_DEBUG_NO_NGGC);
757
758 device->use_ngg_streamout = false;
759
760 /* Determine the number of threads per wave for all stages. */
761 device->cs_wave_size = 64;
762 device->ps_wave_size = 64;
763 device->ge_wave_size = 64;
764 device->rt_wave_size = 64;
765
766 if (device->rad_info.chip_class >= GFX10) {
767 if (device->instance->perftest_flags & RADV_PERFTEST_CS_WAVE_32)
768 device->cs_wave_size = 32;
769
770 /* For pixel shaders, wave64 is recommanded. */
771 if (device->instance->perftest_flags & RADV_PERFTEST_PS_WAVE_32)
772 device->ps_wave_size = 32;
773
774 if (device->instance->perftest_flags & RADV_PERFTEST_GE_WAVE_32)
775 device->ge_wave_size = 32;
776
777 if (!(device->instance->perftest_flags & RADV_PERFTEST_RT_WAVE_64))
778 device->rt_wave_size = 32;
779 }
780
781 radv_physical_device_init_mem_types(device);
782
783 radv_physical_device_get_supported_extensions(device, &device->vk.supported_extensions);
784
785 radv_get_nir_options(device);
786
787 #ifndef _WIN32
788 if (drm_device) {
789 struct stat primary_stat = {0}, render_stat = {0};
790
791 device->available_nodes = drm_device->available_nodes;
792 device->bus_info = *drm_device->businfo.pci;
793
794 if ((drm_device->available_nodes & (1 << DRM_NODE_PRIMARY)) &&
795 stat(drm_device->nodes[DRM_NODE_PRIMARY], &primary_stat) != 0) {
796 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
797 "failed to stat DRM primary node %s",
798 drm_device->nodes[DRM_NODE_PRIMARY]);
799 goto fail_disk_cache;
800 }
801 device->primary_devid = primary_stat.st_rdev;
802
803 if ((drm_device->available_nodes & (1 << DRM_NODE_RENDER)) &&
804 stat(drm_device->nodes[DRM_NODE_RENDER], &render_stat) != 0) {
805 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
806 "failed to stat DRM render node %s",
807 drm_device->nodes[DRM_NODE_RENDER]);
808 goto fail_disk_cache;
809 }
810 device->render_devid = render_stat.st_rdev;
811 }
812 #endif
813
814 if ((device->instance->debug_flags & RADV_DEBUG_INFO))
815 ac_print_gpu_info(&device->rad_info, stdout);
816
817 radv_physical_device_init_queue_table(device);
818
819 /* The WSI is structured as a layer on top of the driver, so this has
820 * to be the last part of initialization (at least until we get other
821 * semi-layers).
822 */
823 result = radv_init_wsi(device);
824 if (result != VK_SUCCESS) {
825 vk_error(instance, result);
826 goto fail_disk_cache;
827 }
828
829 *device_out = device;
830
831 return VK_SUCCESS;
832
833 fail_disk_cache:
834 disk_cache_destroy(device->disk_cache);
835 #ifdef ENABLE_SHADER_CACHE
836 fail_wsi:
837 #endif
838 device->ws->destroy(device->ws);
839 fail_base:
840 vk_physical_device_finish(&device->vk);
841 fail_alloc:
842 vk_free(&instance->vk.alloc, device);
843 fail_fd:
844 if (fd != -1)
845 close(fd);
846 if (master_fd != -1)
847 close(master_fd);
848 return result;
849 }
850
851 static void
radv_physical_device_destroy(struct radv_physical_device * device)852 radv_physical_device_destroy(struct radv_physical_device *device)
853 {
854 radv_finish_wsi(device);
855 device->ws->destroy(device->ws);
856 disk_cache_destroy(device->disk_cache);
857 if (device->local_fd != -1)
858 close(device->local_fd);
859 if (device->master_fd != -1)
860 close(device->master_fd);
861 vk_physical_device_finish(&device->vk);
862 vk_free(&device->instance->vk.alloc, device);
863 }
864
865 static const struct debug_control radv_debug_options[] = {
866 {"nofastclears", RADV_DEBUG_NO_FAST_CLEARS},
867 {"nodcc", RADV_DEBUG_NO_DCC},
868 {"shaders", RADV_DEBUG_DUMP_SHADERS},
869 {"nocache", RADV_DEBUG_NO_CACHE},
870 {"shaderstats", RADV_DEBUG_DUMP_SHADER_STATS},
871 {"nohiz", RADV_DEBUG_NO_HIZ},
872 {"nocompute", RADV_DEBUG_NO_COMPUTE_QUEUE},
873 {"allbos", RADV_DEBUG_ALL_BOS},
874 {"noibs", RADV_DEBUG_NO_IBS},
875 {"spirv", RADV_DEBUG_DUMP_SPIRV},
876 {"vmfaults", RADV_DEBUG_VM_FAULTS},
877 {"zerovram", RADV_DEBUG_ZERO_VRAM},
878 {"syncshaders", RADV_DEBUG_SYNC_SHADERS},
879 {"preoptir", RADV_DEBUG_PREOPTIR},
880 {"nodynamicbounds", RADV_DEBUG_NO_DYNAMIC_BOUNDS},
881 {"nooutoforder", RADV_DEBUG_NO_OUT_OF_ORDER},
882 {"info", RADV_DEBUG_INFO},
883 {"startup", RADV_DEBUG_STARTUP},
884 {"checkir", RADV_DEBUG_CHECKIR},
885 {"nobinning", RADV_DEBUG_NOBINNING},
886 {"nongg", RADV_DEBUG_NO_NGG},
887 {"metashaders", RADV_DEBUG_DUMP_META_SHADERS},
888 {"nomemorycache", RADV_DEBUG_NO_MEMORY_CACHE},
889 {"discardtodemote", RADV_DEBUG_DISCARD_TO_DEMOTE},
890 {"llvm", RADV_DEBUG_LLVM},
891 {"forcecompress", RADV_DEBUG_FORCE_COMPRESS},
892 {"hang", RADV_DEBUG_HANG},
893 {"img", RADV_DEBUG_IMG},
894 {"noumr", RADV_DEBUG_NO_UMR},
895 {"invariantgeom", RADV_DEBUG_INVARIANT_GEOM},
896 {"splitfma", RADV_DEBUG_SPLIT_FMA},
897 {"nodisplaydcc", RADV_DEBUG_NO_DISPLAY_DCC},
898 {"notccompatcmask", RADV_DEBUG_NO_TC_COMPAT_CMASK},
899 {"novrsflatshading", RADV_DEBUG_NO_VRS_FLAT_SHADING},
900 {"noatocdithering", RADV_DEBUG_NO_ATOC_DITHERING},
901 {"nonggc", RADV_DEBUG_NO_NGGC},
902 {"prologs", RADV_DEBUG_DUMP_PROLOGS},
903 {"nodma", RADV_DEBUG_NO_DMA_BLIT},
904 {NULL, 0}};
905
906 const char *
radv_get_debug_option_name(int id)907 radv_get_debug_option_name(int id)
908 {
909 assert(id < ARRAY_SIZE(radv_debug_options) - 1);
910 return radv_debug_options[id].string;
911 }
912
913 static const struct debug_control radv_perftest_options[] = {{"localbos", RADV_PERFTEST_LOCAL_BOS},
914 {"dccmsaa", RADV_PERFTEST_DCC_MSAA},
915 {"bolist", RADV_PERFTEST_BO_LIST},
916 {"cswave32", RADV_PERFTEST_CS_WAVE_32},
917 {"pswave32", RADV_PERFTEST_PS_WAVE_32},
918 {"gewave32", RADV_PERFTEST_GE_WAVE_32},
919 {"nosam", RADV_PERFTEST_NO_SAM},
920 {"sam", RADV_PERFTEST_SAM},
921 {"rt", RADV_PERFTEST_RT},
922 {"nggc", RADV_PERFTEST_NGGC},
923 {"force_emulate_rt", RADV_PERFTEST_FORCE_EMULATE_RT},
924 {"nv_ms", RADV_PERFTEST_NV_MS},
925 {"rtwave64", RADV_PERFTEST_RT_WAVE_64},
926 {NULL, 0}};
927
928 const char *
radv_get_perftest_option_name(int id)929 radv_get_perftest_option_name(int id)
930 {
931 assert(id < ARRAY_SIZE(radv_perftest_options) - 1);
932 return radv_perftest_options[id].string;
933 }
934
935 // clang-format off
936 static const driOptionDescription radv_dri_options[] = {
937 DRI_CONF_SECTION_PERFORMANCE
938 DRI_CONF_ADAPTIVE_SYNC(true)
939 DRI_CONF_VK_X11_OVERRIDE_MIN_IMAGE_COUNT(0)
940 DRI_CONF_VK_X11_STRICT_IMAGE_COUNT(false)
941 DRI_CONF_VK_X11_ENSURE_MIN_IMAGE_COUNT(false)
942 DRI_CONF_VK_XWAYLAND_WAIT_READY(true)
943 DRI_CONF_RADV_REPORT_LLVM9_VERSION_STRING(false)
944 DRI_CONF_RADV_ENABLE_MRT_OUTPUT_NAN_FIXUP(false)
945 DRI_CONF_RADV_DISABLE_SHRINK_IMAGE_STORE(false)
946 DRI_CONF_RADV_NO_DYNAMIC_BOUNDS(false)
947 DRI_CONF_RADV_ABSOLUTE_DEPTH_BIAS(false)
948 DRI_CONF_RADV_OVERRIDE_UNIFORM_OFFSET_ALIGNMENT(0)
949 DRI_CONF_SECTION_END
950
951 DRI_CONF_SECTION_DEBUG
952 DRI_CONF_OVERRIDE_VRAM_SIZE()
953 DRI_CONF_VK_WSI_FORCE_BGRA8_UNORM_FIRST(false)
954 DRI_CONF_RADV_ZERO_VRAM(false)
955 DRI_CONF_RADV_LOWER_DISCARD_TO_DEMOTE(false)
956 DRI_CONF_RADV_INVARIANT_GEOM(false)
957 DRI_CONF_RADV_SPLIT_FMA(false)
958 DRI_CONF_RADV_DISABLE_TC_COMPAT_HTILE_GENERAL(false)
959 DRI_CONF_RADV_DISABLE_DCC(false)
960 DRI_CONF_RADV_REPORT_APU_AS_DGPU(false)
961 DRI_CONF_RADV_REQUIRE_ETC2(false)
962 DRI_CONF_RADV_DISABLE_HTILE_LAYERS(false)
963 DRI_CONF_RADV_DISABLE_ANISO_SINGLE_LEVEL(false)
964 DRI_CONF_SECTION_END
965 };
966 // clang-format on
967
968 static void
radv_init_dri_options(struct radv_instance * instance)969 radv_init_dri_options(struct radv_instance *instance)
970 {
971 driParseOptionInfo(&instance->available_dri_options, radv_dri_options,
972 ARRAY_SIZE(radv_dri_options));
973 driParseConfigFiles(&instance->dri_options, &instance->available_dri_options, 0, "radv", NULL, NULL,
974 instance->vk.app_info.app_name, instance->vk.app_info.app_version,
975 instance->vk.app_info.engine_name, instance->vk.app_info.engine_version);
976
977 instance->enable_mrt_output_nan_fixup =
978 driQueryOptionb(&instance->dri_options, "radv_enable_mrt_output_nan_fixup");
979
980 instance->disable_shrink_image_store =
981 driQueryOptionb(&instance->dri_options, "radv_disable_shrink_image_store");
982
983 instance->absolute_depth_bias =
984 driQueryOptionb(&instance->dri_options, "radv_absolute_depth_bias");
985
986 instance->disable_tc_compat_htile_in_general =
987 driQueryOptionb(&instance->dri_options, "radv_disable_tc_compat_htile_general");
988
989 if (driQueryOptionb(&instance->dri_options, "radv_no_dynamic_bounds"))
990 instance->debug_flags |= RADV_DEBUG_NO_DYNAMIC_BOUNDS;
991
992 if (driQueryOptionb(&instance->dri_options, "radv_lower_discard_to_demote"))
993 instance->debug_flags |= RADV_DEBUG_DISCARD_TO_DEMOTE;
994
995 if (driQueryOptionb(&instance->dri_options, "radv_invariant_geom"))
996 instance->debug_flags |= RADV_DEBUG_INVARIANT_GEOM;
997
998 if (driQueryOptionb(&instance->dri_options, "radv_split_fma"))
999 instance->debug_flags |= RADV_DEBUG_SPLIT_FMA;
1000
1001 if (driQueryOptionb(&instance->dri_options, "radv_disable_dcc"))
1002 instance->debug_flags |= RADV_DEBUG_NO_DCC;
1003
1004 instance->zero_vram =
1005 driQueryOptionb(&instance->dri_options, "radv_zero_vram");
1006
1007 instance->report_apu_as_dgpu =
1008 driQueryOptionb(&instance->dri_options, "radv_report_apu_as_dgpu");
1009
1010 instance->disable_htile_layers =
1011 driQueryOptionb(&instance->dri_options, "radv_disable_htile_layers");
1012
1013 instance->disable_aniso_single_level =
1014 driQueryOptionb(&instance->dri_options, "radv_disable_aniso_single_level");
1015 }
1016
1017 VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateInstance(const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkInstance * pInstance)1018 radv_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
1019 const VkAllocationCallbacks *pAllocator, VkInstance *pInstance)
1020 {
1021 struct radv_instance *instance;
1022 VkResult result;
1023
1024 if (!pAllocator)
1025 pAllocator = vk_default_allocator();
1026
1027 instance = vk_zalloc(pAllocator, sizeof(*instance), 8, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1028 if (!instance)
1029 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
1030
1031 struct vk_instance_dispatch_table dispatch_table;
1032 vk_instance_dispatch_table_from_entrypoints(&dispatch_table, &radv_instance_entrypoints, true);
1033 vk_instance_dispatch_table_from_entrypoints(&dispatch_table, &wsi_instance_entrypoints, false);
1034 struct vk_instance_extension_table extensions_supported = radv_instance_extensions_supported;
1035 if (radv_thread_trace_enabled())
1036 extensions_supported.EXT_debug_utils = true;
1037 result = vk_instance_init(&instance->vk, &extensions_supported, &dispatch_table,
1038 pCreateInfo, pAllocator);
1039 if (result != VK_SUCCESS) {
1040 vk_free(pAllocator, instance);
1041 return vk_error(instance, result);
1042 }
1043
1044 instance->debug_flags = parse_debug_string(getenv("RADV_DEBUG"), radv_debug_options);
1045 instance->perftest_flags = parse_debug_string(getenv("RADV_PERFTEST"), radv_perftest_options);
1046
1047 if (instance->debug_flags & RADV_DEBUG_STARTUP)
1048 radv_logi("Created an instance");
1049
1050 instance->physical_devices_enumerated = false;
1051 list_inithead(&instance->physical_devices);
1052
1053 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
1054
1055 radv_init_dri_options(instance);
1056
1057 *pInstance = radv_instance_to_handle(instance);
1058
1059 return VK_SUCCESS;
1060 }
1061
1062 VKAPI_ATTR void VKAPI_CALL
radv_DestroyInstance(VkInstance _instance,const VkAllocationCallbacks * pAllocator)1063 radv_DestroyInstance(VkInstance _instance, const VkAllocationCallbacks *pAllocator)
1064 {
1065 RADV_FROM_HANDLE(radv_instance, instance, _instance);
1066
1067 if (!instance)
1068 return;
1069
1070 list_for_each_entry_safe(struct radv_physical_device, pdevice, &instance->physical_devices, link)
1071 {
1072 radv_physical_device_destroy(pdevice);
1073 }
1074
1075 VG(VALGRIND_DESTROY_MEMPOOL(instance));
1076
1077 driDestroyOptionCache(&instance->dri_options);
1078 driDestroyOptionInfo(&instance->available_dri_options);
1079
1080 vk_instance_finish(&instance->vk);
1081 vk_free(&instance->vk.alloc, instance);
1082 }
1083
1084 static VkResult
radv_enumerate_physical_devices(struct radv_instance * instance)1085 radv_enumerate_physical_devices(struct radv_instance *instance)
1086 {
1087 if (instance->physical_devices_enumerated)
1088 return VK_SUCCESS;
1089
1090 instance->physical_devices_enumerated = true;
1091
1092 VkResult result = VK_SUCCESS;
1093
1094 if (getenv("RADV_FORCE_FAMILY")) {
1095 /* When RADV_FORCE_FAMILY is set, the driver creates a nul
1096 * device that allows to test the compiler without having an
1097 * AMDGPU instance.
1098 */
1099 struct radv_physical_device *pdevice;
1100
1101 result = radv_physical_device_try_create(instance, NULL, &pdevice);
1102 if (result != VK_SUCCESS)
1103 return result;
1104
1105 list_addtail(&pdevice->link, &instance->physical_devices);
1106 return VK_SUCCESS;
1107 }
1108
1109 #ifndef _WIN32
1110 /* TODO: Check for more devices ? */
1111 drmDevicePtr devices[8];
1112 int max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
1113
1114 if (instance->debug_flags & RADV_DEBUG_STARTUP)
1115 radv_logi("Found %d drm nodes", max_devices);
1116
1117 if (max_devices < 1)
1118 return vk_error(instance, VK_SUCCESS);
1119
1120 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
1121 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
1122 devices[i]->bustype == DRM_BUS_PCI &&
1123 devices[i]->deviceinfo.pci->vendor_id == ATI_VENDOR_ID) {
1124
1125 struct radv_physical_device *pdevice;
1126 result = radv_physical_device_try_create(instance, devices[i], &pdevice);
1127 /* Incompatible DRM device, skip. */
1128 if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
1129 result = VK_SUCCESS;
1130 continue;
1131 }
1132
1133 /* Error creating the physical device, report the error. */
1134 if (result != VK_SUCCESS)
1135 break;
1136
1137 list_addtail(&pdevice->link, &instance->physical_devices);
1138 }
1139 }
1140 drmFreeDevices(devices, max_devices);
1141 #endif
1142
1143 /* If we successfully enumerated any devices, call it success */
1144 return result;
1145 }
1146
1147 VKAPI_ATTR VkResult VKAPI_CALL
radv_EnumeratePhysicalDevices(VkInstance _instance,uint32_t * pPhysicalDeviceCount,VkPhysicalDevice * pPhysicalDevices)1148 radv_EnumeratePhysicalDevices(VkInstance _instance, uint32_t *pPhysicalDeviceCount,
1149 VkPhysicalDevice *pPhysicalDevices)
1150 {
1151 RADV_FROM_HANDLE(radv_instance, instance, _instance);
1152 VK_OUTARRAY_MAKE_TYPED(VkPhysicalDevice, out, pPhysicalDevices, pPhysicalDeviceCount);
1153
1154 VkResult result = radv_enumerate_physical_devices(instance);
1155 if (result != VK_SUCCESS)
1156 return result;
1157
1158 list_for_each_entry(struct radv_physical_device, pdevice, &instance->physical_devices, link)
1159 {
1160 vk_outarray_append_typed(VkPhysicalDevice, &out, i)
1161 {
1162 *i = radv_physical_device_to_handle(pdevice);
1163 }
1164 }
1165
1166 return vk_outarray_status(&out);
1167 }
1168
1169 VKAPI_ATTR VkResult VKAPI_CALL
radv_EnumeratePhysicalDeviceGroups(VkInstance _instance,uint32_t * pPhysicalDeviceGroupCount,VkPhysicalDeviceGroupProperties * pPhysicalDeviceGroupProperties)1170 radv_EnumeratePhysicalDeviceGroups(VkInstance _instance, uint32_t *pPhysicalDeviceGroupCount,
1171 VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
1172 {
1173 RADV_FROM_HANDLE(radv_instance, instance, _instance);
1174 VK_OUTARRAY_MAKE_TYPED(VkPhysicalDeviceGroupProperties, out, pPhysicalDeviceGroupProperties,
1175 pPhysicalDeviceGroupCount);
1176
1177 VkResult result = radv_enumerate_physical_devices(instance);
1178 if (result != VK_SUCCESS)
1179 return result;
1180
1181 list_for_each_entry(struct radv_physical_device, pdevice, &instance->physical_devices, link)
1182 {
1183 vk_outarray_append_typed(VkPhysicalDeviceGroupProperties, &out, p)
1184 {
1185 p->physicalDeviceCount = 1;
1186 memset(p->physicalDevices, 0, sizeof(p->physicalDevices));
1187 p->physicalDevices[0] = radv_physical_device_to_handle(pdevice);
1188 p->subsetAllocation = false;
1189 }
1190 }
1191
1192 return vk_outarray_status(&out);
1193 }
1194
1195 VKAPI_ATTR void VKAPI_CALL
radv_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,VkPhysicalDeviceFeatures * pFeatures)1196 radv_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures *pFeatures)
1197 {
1198 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
1199 memset(pFeatures, 0, sizeof(*pFeatures));
1200
1201 *pFeatures = (VkPhysicalDeviceFeatures){
1202 .robustBufferAccess = true,
1203 .fullDrawIndexUint32 = true,
1204 .imageCubeArray = true,
1205 .independentBlend = true,
1206 .geometryShader = true,
1207 .tessellationShader = true,
1208 .sampleRateShading = true,
1209 .dualSrcBlend = true,
1210 .logicOp = true,
1211 .multiDrawIndirect = true,
1212 .drawIndirectFirstInstance = true,
1213 .depthClamp = true,
1214 .depthBiasClamp = true,
1215 .fillModeNonSolid = true,
1216 .depthBounds = true,
1217 .wideLines = true,
1218 .largePoints = true,
1219 .alphaToOne = false,
1220 .multiViewport = true,
1221 .samplerAnisotropy = true,
1222 .textureCompressionETC2 = radv_device_supports_etc(pdevice) || pdevice->emulate_etc2,
1223 .textureCompressionASTC_LDR = false,
1224 .textureCompressionBC = true,
1225 .occlusionQueryPrecise = true,
1226 .pipelineStatisticsQuery = true,
1227 .vertexPipelineStoresAndAtomics = true,
1228 .fragmentStoresAndAtomics = true,
1229 .shaderTessellationAndGeometryPointSize = true,
1230 .shaderImageGatherExtended = true,
1231 .shaderStorageImageExtendedFormats = true,
1232 .shaderStorageImageMultisample = true,
1233 .shaderUniformBufferArrayDynamicIndexing = true,
1234 .shaderSampledImageArrayDynamicIndexing = true,
1235 .shaderStorageBufferArrayDynamicIndexing = true,
1236 .shaderStorageImageArrayDynamicIndexing = true,
1237 .shaderStorageImageReadWithoutFormat = true,
1238 .shaderStorageImageWriteWithoutFormat = true,
1239 .shaderClipDistance = true,
1240 .shaderCullDistance = true,
1241 .shaderFloat64 = true,
1242 .shaderInt64 = true,
1243 .shaderInt16 = true,
1244 .sparseBinding = true,
1245 .sparseResidencyBuffer = pdevice->rad_info.family >= CHIP_POLARIS10,
1246 .sparseResidencyImage2D = pdevice->rad_info.family >= CHIP_POLARIS10,
1247 .sparseResidencyAliased = pdevice->rad_info.family >= CHIP_POLARIS10,
1248 .variableMultisampleRate = true,
1249 .shaderResourceMinLod = true,
1250 .shaderResourceResidency = true,
1251 .inheritedQueries = true,
1252 };
1253 }
1254
1255 static void
radv_get_physical_device_features_1_1(struct radv_physical_device * pdevice,VkPhysicalDeviceVulkan11Features * f)1256 radv_get_physical_device_features_1_1(struct radv_physical_device *pdevice,
1257 VkPhysicalDeviceVulkan11Features *f)
1258 {
1259 assert(f->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES);
1260
1261 f->storageBuffer16BitAccess = true;
1262 f->uniformAndStorageBuffer16BitAccess = true;
1263 f->storagePushConstant16 = true;
1264 f->storageInputOutput16 = pdevice->rad_info.has_packed_math_16bit;
1265 f->multiview = true;
1266 f->multiviewGeometryShader = true;
1267 f->multiviewTessellationShader = true;
1268 f->variablePointersStorageBuffer = true;
1269 f->variablePointers = true;
1270 f->protectedMemory = false;
1271 f->samplerYcbcrConversion = true;
1272 f->shaderDrawParameters = true;
1273 }
1274
1275 static void
radv_get_physical_device_features_1_2(struct radv_physical_device * pdevice,VkPhysicalDeviceVulkan12Features * f)1276 radv_get_physical_device_features_1_2(struct radv_physical_device *pdevice,
1277 VkPhysicalDeviceVulkan12Features *f)
1278 {
1279 assert(f->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES);
1280
1281 f->samplerMirrorClampToEdge = true;
1282 f->drawIndirectCount = true;
1283 f->storageBuffer8BitAccess = true;
1284 f->uniformAndStorageBuffer8BitAccess = true;
1285 f->storagePushConstant8 = true;
1286 f->shaderBufferInt64Atomics = true;
1287 f->shaderSharedInt64Atomics = true;
1288 f->shaderFloat16 = pdevice->rad_info.has_packed_math_16bit;
1289 f->shaderInt8 = true;
1290
1291 f->descriptorIndexing = true;
1292 f->shaderInputAttachmentArrayDynamicIndexing = true;
1293 f->shaderUniformTexelBufferArrayDynamicIndexing = true;
1294 f->shaderStorageTexelBufferArrayDynamicIndexing = true;
1295 f->shaderUniformBufferArrayNonUniformIndexing = true;
1296 f->shaderSampledImageArrayNonUniformIndexing = true;
1297 f->shaderStorageBufferArrayNonUniformIndexing = true;
1298 f->shaderStorageImageArrayNonUniformIndexing = true;
1299 f->shaderInputAttachmentArrayNonUniformIndexing = true;
1300 f->shaderUniformTexelBufferArrayNonUniformIndexing = true;
1301 f->shaderStorageTexelBufferArrayNonUniformIndexing = true;
1302 f->descriptorBindingUniformBufferUpdateAfterBind = true;
1303 f->descriptorBindingSampledImageUpdateAfterBind = true;
1304 f->descriptorBindingStorageImageUpdateAfterBind = true;
1305 f->descriptorBindingStorageBufferUpdateAfterBind = true;
1306 f->descriptorBindingUniformTexelBufferUpdateAfterBind = true;
1307 f->descriptorBindingStorageTexelBufferUpdateAfterBind = true;
1308 f->descriptorBindingUpdateUnusedWhilePending = true;
1309 f->descriptorBindingPartiallyBound = true;
1310 f->descriptorBindingVariableDescriptorCount = true;
1311 f->runtimeDescriptorArray = true;
1312
1313 f->samplerFilterMinmax = true;
1314 f->scalarBlockLayout = pdevice->rad_info.chip_class >= GFX7;
1315 f->imagelessFramebuffer = true;
1316 f->uniformBufferStandardLayout = true;
1317 f->shaderSubgroupExtendedTypes = true;
1318 f->separateDepthStencilLayouts = true;
1319 f->hostQueryReset = true;
1320 f->timelineSemaphore = true, f->bufferDeviceAddress = true;
1321 f->bufferDeviceAddressCaptureReplay = true;
1322 f->bufferDeviceAddressMultiDevice = false;
1323 f->vulkanMemoryModel = true;
1324 f->vulkanMemoryModelDeviceScope = true;
1325 f->vulkanMemoryModelAvailabilityVisibilityChains = false;
1326 f->shaderOutputViewportIndex = true;
1327 f->shaderOutputLayer = true;
1328 f->subgroupBroadcastDynamicId = true;
1329 }
1330
1331 static void
radv_get_physical_device_features_1_3(struct radv_physical_device * pdevice,VkPhysicalDeviceVulkan13Features * f)1332 radv_get_physical_device_features_1_3(struct radv_physical_device *pdevice,
1333 VkPhysicalDeviceVulkan13Features *f)
1334 {
1335 assert(f->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES);
1336
1337 f->robustImageAccess = true;
1338 f->inlineUniformBlock = true;
1339 f->descriptorBindingInlineUniformBlockUpdateAfterBind = true;
1340 f->pipelineCreationCacheControl = true;
1341 f->privateData = true;
1342 f->shaderDemoteToHelperInvocation = true;
1343 f->shaderTerminateInvocation = true;
1344 f->subgroupSizeControl = true;
1345 f->computeFullSubgroups = true;
1346 f->synchronization2 = true;
1347 f->textureCompressionASTC_HDR = false;
1348 f->shaderZeroInitializeWorkgroupMemory = true;
1349 f->dynamicRendering = true;
1350 f->shaderIntegerDotProduct = true;
1351 f->maintenance4 = true;
1352 }
1353
1354 VKAPI_ATTR void VKAPI_CALL
radv_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,VkPhysicalDeviceFeatures2 * pFeatures)1355 radv_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
1356 VkPhysicalDeviceFeatures2 *pFeatures)
1357 {
1358 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
1359 radv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
1360
1361 VkPhysicalDeviceVulkan11Features core_1_1 = {
1362 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES,
1363 };
1364 radv_get_physical_device_features_1_1(pdevice, &core_1_1);
1365
1366 VkPhysicalDeviceVulkan12Features core_1_2 = {
1367 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES,
1368 };
1369 radv_get_physical_device_features_1_2(pdevice, &core_1_2);
1370
1371 VkPhysicalDeviceVulkan13Features core_1_3 = {
1372 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES,
1373 };
1374 radv_get_physical_device_features_1_3(pdevice, &core_1_3);
1375
1376 #define CORE_FEATURE(major, minor, feature) features->feature = core_##major##_##minor.feature
1377
1378 vk_foreach_struct(ext, pFeatures->pNext)
1379 {
1380 if (vk_get_physical_device_core_1_1_feature_ext(ext, &core_1_1))
1381 continue;
1382 if (vk_get_physical_device_core_1_2_feature_ext(ext, &core_1_2))
1383 continue;
1384 if (vk_get_physical_device_core_1_3_feature_ext(ext, &core_1_3))
1385 continue;
1386
1387 switch (ext->sType) {
1388 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
1389 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
1390 (VkPhysicalDeviceConditionalRenderingFeaturesEXT *)ext;
1391 features->conditionalRendering = true;
1392 features->inheritedConditionalRendering = false;
1393 break;
1394 }
1395 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: {
1396 VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *features =
1397 (VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)ext;
1398 features->vertexAttributeInstanceRateDivisor = true;
1399 features->vertexAttributeInstanceRateZeroDivisor = true;
1400 break;
1401 }
1402 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
1403 VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
1404 (VkPhysicalDeviceTransformFeedbackFeaturesEXT *)ext;
1405 features->transformFeedback = true;
1406 features->geometryStreams = !pdevice->use_ngg_streamout;
1407 break;
1408 }
1409 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES: {
1410 VkPhysicalDeviceScalarBlockLayoutFeatures *features =
1411 (VkPhysicalDeviceScalarBlockLayoutFeatures *)ext;
1412 CORE_FEATURE(1, 2, scalarBlockLayout);
1413 break;
1414 }
1415 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT: {
1416 VkPhysicalDeviceMemoryPriorityFeaturesEXT *features =
1417 (VkPhysicalDeviceMemoryPriorityFeaturesEXT *)ext;
1418 features->memoryPriority = true;
1419 break;
1420 }
1421 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT: {
1422 VkPhysicalDeviceBufferDeviceAddressFeaturesEXT *features =
1423 (VkPhysicalDeviceBufferDeviceAddressFeaturesEXT *)ext;
1424 CORE_FEATURE(1, 2, bufferDeviceAddress);
1425 CORE_FEATURE(1, 2, bufferDeviceAddressCaptureReplay);
1426 CORE_FEATURE(1, 2, bufferDeviceAddressMultiDevice);
1427 break;
1428 }
1429 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT: {
1430 VkPhysicalDeviceDepthClipEnableFeaturesEXT *features =
1431 (VkPhysicalDeviceDepthClipEnableFeaturesEXT *)ext;
1432 features->depthClipEnable = true;
1433 break;
1434 }
1435 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV: {
1436 VkPhysicalDeviceComputeShaderDerivativesFeaturesNV *features =
1437 (VkPhysicalDeviceComputeShaderDerivativesFeaturesNV *)ext;
1438 features->computeDerivativeGroupQuads = false;
1439 features->computeDerivativeGroupLinear = true;
1440 break;
1441 }
1442 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT: {
1443 VkPhysicalDeviceYcbcrImageArraysFeaturesEXT *features =
1444 (VkPhysicalDeviceYcbcrImageArraysFeaturesEXT *)ext;
1445 features->ycbcrImageArrays = true;
1446 break;
1447 }
1448 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT: {
1449 VkPhysicalDeviceIndexTypeUint8FeaturesEXT *features =
1450 (VkPhysicalDeviceIndexTypeUint8FeaturesEXT *)ext;
1451 features->indexTypeUint8 = pdevice->rad_info.chip_class >= GFX8;
1452 break;
1453 }
1454 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR: {
1455 VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR *features =
1456 (VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR *)ext;
1457 features->pipelineExecutableInfo = true;
1458 break;
1459 }
1460 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR: {
1461 VkPhysicalDeviceShaderClockFeaturesKHR *features =
1462 (VkPhysicalDeviceShaderClockFeaturesKHR *)ext;
1463 features->shaderSubgroupClock = true;
1464 features->shaderDeviceClock = pdevice->rad_info.chip_class >= GFX8;
1465 break;
1466 }
1467 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT: {
1468 VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *features =
1469 (VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT *)ext;
1470 features->texelBufferAlignment = true;
1471 break;
1472 }
1473 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD: {
1474 VkPhysicalDeviceCoherentMemoryFeaturesAMD *features =
1475 (VkPhysicalDeviceCoherentMemoryFeaturesAMD *)ext;
1476 features->deviceCoherentMemory = pdevice->rad_info.has_l2_uncached;
1477 break;
1478 }
1479 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT: {
1480 VkPhysicalDeviceLineRasterizationFeaturesEXT *features =
1481 (VkPhysicalDeviceLineRasterizationFeaturesEXT *)ext;
1482 features->rectangularLines = false;
1483 features->bresenhamLines = true;
1484 features->smoothLines = false;
1485 features->stippledRectangularLines = false;
1486 /* FIXME: Some stippled Bresenham CTS fails on Vega10
1487 * but work on Raven.
1488 */
1489 features->stippledBresenhamLines = pdevice->rad_info.chip_class != GFX9;
1490 features->stippledSmoothLines = false;
1491 break;
1492 }
1493 case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD: {
1494 VkDeviceMemoryOverallocationCreateInfoAMD *features =
1495 (VkDeviceMemoryOverallocationCreateInfoAMD *)ext;
1496 features->overallocationBehavior = true;
1497 break;
1498 }
1499 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT: {
1500 VkPhysicalDeviceRobustness2FeaturesEXT *features =
1501 (VkPhysicalDeviceRobustness2FeaturesEXT *)ext;
1502 features->robustBufferAccess2 = true;
1503 features->robustImageAccess2 = true;
1504 features->nullDescriptor = true;
1505 break;
1506 }
1507 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT: {
1508 VkPhysicalDeviceCustomBorderColorFeaturesEXT *features =
1509 (VkPhysicalDeviceCustomBorderColorFeaturesEXT *)ext;
1510 features->customBorderColors = true;
1511 features->customBorderColorWithoutFormat = true;
1512 break;
1513 }
1514 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT: {
1515 VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *features =
1516 (VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *)ext;
1517 features->extendedDynamicState = true;
1518 break;
1519 }
1520 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT: {
1521 VkPhysicalDeviceShaderAtomicFloatFeaturesEXT *features =
1522 (VkPhysicalDeviceShaderAtomicFloatFeaturesEXT *)ext;
1523 features->shaderBufferFloat32Atomics = true;
1524 features->shaderBufferFloat32AtomicAdd = false;
1525 features->shaderBufferFloat64Atomics = true;
1526 features->shaderBufferFloat64AtomicAdd = false;
1527 features->shaderSharedFloat32Atomics = true;
1528 features->shaderSharedFloat32AtomicAdd = pdevice->rad_info.chip_class >= GFX8;
1529 features->shaderSharedFloat64Atomics = true;
1530 features->shaderSharedFloat64AtomicAdd = false;
1531 features->shaderImageFloat32Atomics = true;
1532 features->shaderImageFloat32AtomicAdd = false;
1533 features->sparseImageFloat32Atomics = true;
1534 features->sparseImageFloat32AtomicAdd = false;
1535 break;
1536 }
1537 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT: {
1538 VkPhysicalDevice4444FormatsFeaturesEXT *features =
1539 (VkPhysicalDevice4444FormatsFeaturesEXT *)ext;
1540 features->formatA4R4G4B4 = true;
1541 features->formatA4B4G4R4 = true;
1542 break;
1543 }
1544 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT: {
1545 VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT *features =
1546 (VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT *)ext;
1547 features->shaderImageInt64Atomics = true;
1548 features->sparseImageInt64Atomics = true;
1549 break;
1550 }
1551 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_VALVE: {
1552 VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE *features =
1553 (VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE *)ext;
1554 features->mutableDescriptorType = true;
1555 break;
1556 }
1557 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR: {
1558 VkPhysicalDeviceFragmentShadingRateFeaturesKHR *features =
1559 (VkPhysicalDeviceFragmentShadingRateFeaturesKHR *)ext;
1560 features->pipelineFragmentShadingRate = true;
1561 features->primitiveFragmentShadingRate = true;
1562 features->attachmentFragmentShadingRate = !(pdevice->instance->debug_flags & RADV_DEBUG_NO_HIZ);
1563 break;
1564 }
1565 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_FEATURES_KHR: {
1566 VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR *features =
1567 (VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR *)ext;
1568 features->workgroupMemoryExplicitLayout = true;
1569 features->workgroupMemoryExplicitLayoutScalarBlockLayout = true;
1570 features->workgroupMemoryExplicitLayout8BitAccess = true;
1571 features->workgroupMemoryExplicitLayout16BitAccess = true;
1572 break;
1573 }
1574 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT: {
1575 VkPhysicalDeviceProvokingVertexFeaturesEXT *features =
1576 (VkPhysicalDeviceProvokingVertexFeaturesEXT *)ext;
1577 features->provokingVertexLast = true;
1578 features->transformFeedbackPreservesProvokingVertex = true;
1579 break;
1580 }
1581 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_2_FEATURES_EXT: {
1582 VkPhysicalDeviceExtendedDynamicState2FeaturesEXT *features =
1583 (VkPhysicalDeviceExtendedDynamicState2FeaturesEXT *)ext;
1584 features->extendedDynamicState2 = true;
1585 features->extendedDynamicState2LogicOp = true;
1586 features->extendedDynamicState2PatchControlPoints = false;
1587 break;
1588 }
1589 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_EXT: {
1590 VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT *features =
1591 (VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT *)ext;
1592 features->globalPriorityQuery = true;
1593 break;
1594 }
1595 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR: {
1596 VkPhysicalDeviceAccelerationStructureFeaturesKHR *features =
1597 (VkPhysicalDeviceAccelerationStructureFeaturesKHR *)ext;
1598 features->accelerationStructure = true;
1599 features->accelerationStructureCaptureReplay = false;
1600 features->accelerationStructureIndirectBuild = false;
1601 features->accelerationStructureHostCommands = true;
1602 features->descriptorBindingAccelerationStructureUpdateAfterBind = true;
1603 break;
1604 }
1605 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_UNIFORM_CONTROL_FLOW_FEATURES_KHR: {
1606 VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR *features =
1607 (VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR *)ext;
1608 features->shaderSubgroupUniformControlFlow = true;
1609 break;
1610 }
1611 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_FEATURES_EXT: {
1612 VkPhysicalDeviceMultiDrawFeaturesEXT *features = (VkPhysicalDeviceMultiDrawFeaturesEXT *)ext;
1613 features->multiDraw = true;
1614 break;
1615 }
1616 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COLOR_WRITE_ENABLE_FEATURES_EXT: {
1617 VkPhysicalDeviceColorWriteEnableFeaturesEXT *features =
1618 (VkPhysicalDeviceColorWriteEnableFeaturesEXT *)ext;
1619 features->colorWriteEnable = true;
1620 break;
1621 }
1622 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_2_FEATURES_EXT: {
1623 VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT *features =
1624 (VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT *)ext;
1625 bool has_shader_buffer_float_minmax = ((pdevice->rad_info.chip_class == GFX6 ||
1626 pdevice->rad_info.chip_class == GFX7) &&
1627 !pdevice->use_llvm) ||
1628 pdevice->rad_info.chip_class >= GFX10;
1629 bool has_shader_image_float_minmax = pdevice->rad_info.chip_class != GFX8 &&
1630 pdevice->rad_info.chip_class != GFX9;
1631 features->shaderBufferFloat16Atomics = false;
1632 features->shaderBufferFloat16AtomicAdd = false;
1633 features->shaderBufferFloat16AtomicMinMax = false;
1634 features->shaderBufferFloat32AtomicMinMax = has_shader_buffer_float_minmax;
1635 features->shaderBufferFloat64AtomicMinMax = has_shader_buffer_float_minmax;
1636 features->shaderSharedFloat16Atomics = false;
1637 features->shaderSharedFloat16AtomicAdd = false;
1638 features->shaderSharedFloat16AtomicMinMax = false;
1639 features->shaderSharedFloat32AtomicMinMax = true;
1640 features->shaderSharedFloat64AtomicMinMax = true;
1641 features->shaderImageFloat32AtomicMinMax = has_shader_image_float_minmax;
1642 features->sparseImageFloat32AtomicMinMax = has_shader_image_float_minmax;
1643 break;
1644 }
1645 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVE_TOPOLOGY_LIST_RESTART_FEATURES_EXT: {
1646 VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT *features =
1647 (VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT *)ext;
1648 features->primitiveTopologyListRestart = true;
1649 features->primitiveTopologyPatchListRestart = false;
1650 break;
1651 }
1652 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR: {
1653 VkPhysicalDeviceRayQueryFeaturesKHR *features =
1654 (VkPhysicalDeviceRayQueryFeaturesKHR *)ext;
1655 features->rayQuery = true;
1656 break;
1657 }
1658 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR: {
1659 VkPhysicalDeviceRayTracingPipelineFeaturesKHR *features =
1660 (VkPhysicalDeviceRayTracingPipelineFeaturesKHR *)ext;
1661 features->rayTracingPipeline = true;
1662 features->rayTracingPipelineShaderGroupHandleCaptureReplay = false;
1663 features->rayTracingPipelineShaderGroupHandleCaptureReplayMixed = false;
1664 features->rayTracingPipelineTraceRaysIndirect = false;
1665 features->rayTraversalPrimitiveCulling = false;
1666 break;
1667 }
1668 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES_KHR: {
1669 VkPhysicalDeviceMaintenance4FeaturesKHR *features =
1670 (VkPhysicalDeviceMaintenance4FeaturesKHR *)ext;
1671 features->maintenance4 = true;
1672 break;
1673 }
1674 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_INPUT_DYNAMIC_STATE_FEATURES_EXT: {
1675 VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT *features =
1676 (VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT *)ext;
1677 features->vertexInputDynamicState = true;
1678 break;
1679 }
1680 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_MIN_LOD_FEATURES_EXT: {
1681 VkPhysicalDeviceImageViewMinLodFeaturesEXT *features =
1682 (VkPhysicalDeviceImageViewMinLodFeaturesEXT *)ext;
1683 features->minLod = true;
1684 break;
1685 }
1686 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR: {
1687 VkPhysicalDeviceSynchronization2FeaturesKHR *features =
1688 (VkPhysicalDeviceSynchronization2FeaturesKHR *)ext;
1689 features->synchronization2 = true;
1690 break;
1691 }
1692 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES_KHR: {
1693 VkPhysicalDeviceDynamicRenderingFeaturesKHR *features =
1694 (VkPhysicalDeviceDynamicRenderingFeaturesKHR *)ext;
1695 features->dynamicRendering = true;
1696 break;
1697 }
1698 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV: {
1699 VkPhysicalDeviceMeshShaderFeaturesNV *features =
1700 (VkPhysicalDeviceMeshShaderFeaturesNV *)ext;
1701 features->meshShader = true;
1702 features->taskShader = false; /* TODO */
1703 break;
1704 }
1705 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES: {
1706 VkPhysicalDeviceTextureCompressionASTCHDRFeatures *features =
1707 (VkPhysicalDeviceTextureCompressionASTCHDRFeatures *)ext;
1708 features->textureCompressionASTC_HDR = false;
1709 break;
1710 }
1711 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_SET_HOST_MAPPING_FEATURES_VALVE: {
1712 VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE *features =
1713 (VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE *)ext;
1714 features->descriptorSetHostMapping = true;
1715 break;
1716 }
1717 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT: {
1718 VkPhysicalDeviceDepthClipControlFeaturesEXT *features =
1719 (VkPhysicalDeviceDepthClipControlFeaturesEXT *)ext;
1720 features->depthClipControl = true;
1721 break;
1722 }
1723 default:
1724 break;
1725 }
1726 }
1727 }
1728
1729 static size_t
radv_max_descriptor_set_size()1730 radv_max_descriptor_set_size()
1731 {
1732 /* make sure that the entire descriptor set is addressable with a signed
1733 * 32-bit int. So the sum of all limits scaled by descriptor size has to
1734 * be at most 2 GiB. the combined image & samples object count as one of
1735 * both. This limit is for the pipeline layout, not for the set layout, but
1736 * there is no set limit, so we just set a pipeline limit. I don't think
1737 * any app is going to hit this soon. */
1738 return ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS -
1739 MAX_INLINE_UNIFORM_BLOCK_SIZE * MAX_INLINE_UNIFORM_BLOCK_COUNT) /
1740 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
1741 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
1742 32 /* sampler, largest when combined with image */ + 64 /* sampled image */ +
1743 64 /* storage image */);
1744 }
1745
1746 static uint32_t
radv_uniform_buffer_offset_alignment(const struct radv_physical_device * pdevice)1747 radv_uniform_buffer_offset_alignment(const struct radv_physical_device *pdevice)
1748 {
1749 uint32_t uniform_offset_alignment =
1750 driQueryOptioni(&pdevice->instance->dri_options, "radv_override_uniform_offset_alignment");
1751 if (!util_is_power_of_two_or_zero(uniform_offset_alignment)) {
1752 fprintf(stderr,
1753 "ERROR: invalid radv_override_uniform_offset_alignment setting %d:"
1754 "not a power of two\n",
1755 uniform_offset_alignment);
1756 uniform_offset_alignment = 0;
1757 }
1758
1759 /* Take at least the hardware limit. */
1760 return MAX2(uniform_offset_alignment, 4);
1761 }
1762
1763 VKAPI_ATTR void VKAPI_CALL
radv_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties * pProperties)1764 radv_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
1765 VkPhysicalDeviceProperties *pProperties)
1766 {
1767 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
1768 VkSampleCountFlags sample_counts = 0xf;
1769
1770 size_t max_descriptor_set_size = radv_max_descriptor_set_size();
1771
1772 VkPhysicalDeviceLimits limits = {
1773 .maxImageDimension1D = (1 << 14),
1774 .maxImageDimension2D = (1 << 14),
1775 .maxImageDimension3D = (1 << 11),
1776 .maxImageDimensionCube = (1 << 14),
1777 .maxImageArrayLayers = (1 << 11),
1778 .maxTexelBufferElements = UINT32_MAX,
1779 .maxUniformBufferRange = UINT32_MAX,
1780 .maxStorageBufferRange = UINT32_MAX,
1781 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
1782 .maxMemoryAllocationCount = UINT32_MAX,
1783 .maxSamplerAllocationCount = 64 * 1024,
1784 .bufferImageGranularity = 1,
1785 .sparseAddressSpaceSize = RADV_MAX_MEMORY_ALLOCATION_SIZE, /* buffer max size */
1786 .maxBoundDescriptorSets = MAX_SETS,
1787 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
1788 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
1789 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
1790 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
1791 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
1792 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
1793 .maxPerStageResources = max_descriptor_set_size,
1794 .maxDescriptorSetSamplers = max_descriptor_set_size,
1795 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
1796 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
1797 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
1798 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
1799 .maxDescriptorSetSampledImages = max_descriptor_set_size,
1800 .maxDescriptorSetStorageImages = max_descriptor_set_size,
1801 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
1802 .maxVertexInputAttributes = MAX_VERTEX_ATTRIBS,
1803 .maxVertexInputBindings = MAX_VBS,
1804 .maxVertexInputAttributeOffset = UINT32_MAX,
1805 .maxVertexInputBindingStride = 2048,
1806 .maxVertexOutputComponents = 128,
1807 .maxTessellationGenerationLevel = 64,
1808 .maxTessellationPatchSize = 32,
1809 .maxTessellationControlPerVertexInputComponents = 128,
1810 .maxTessellationControlPerVertexOutputComponents = 128,
1811 .maxTessellationControlPerPatchOutputComponents = 120,
1812 .maxTessellationControlTotalOutputComponents = 4096,
1813 .maxTessellationEvaluationInputComponents = 128,
1814 .maxTessellationEvaluationOutputComponents = 128,
1815 .maxGeometryShaderInvocations = 127,
1816 .maxGeometryInputComponents = 64,
1817 .maxGeometryOutputComponents = 128,
1818 .maxGeometryOutputVertices = 256,
1819 .maxGeometryTotalOutputComponents = 1024,
1820 .maxFragmentInputComponents = 128,
1821 .maxFragmentOutputAttachments = 8,
1822 .maxFragmentDualSrcAttachments = 1,
1823 .maxFragmentCombinedOutputResources = max_descriptor_set_size,
1824 .maxComputeSharedMemorySize = pdevice->rad_info.chip_class >= GFX7 ? 65536 : 32768,
1825 .maxComputeWorkGroupCount = {65535, 65535, 65535},
1826 .maxComputeWorkGroupInvocations = 1024,
1827 .maxComputeWorkGroupSize = {1024, 1024, 1024},
1828 .subPixelPrecisionBits = 8,
1829 .subTexelPrecisionBits = 8,
1830 .mipmapPrecisionBits = 8,
1831 .maxDrawIndexedIndexValue = UINT32_MAX,
1832 .maxDrawIndirectCount = UINT32_MAX,
1833 .maxSamplerLodBias = 16,
1834 .maxSamplerAnisotropy = 16,
1835 .maxViewports = MAX_VIEWPORTS,
1836 .maxViewportDimensions = {(1 << 14), (1 << 14)},
1837 .viewportBoundsRange = {INT16_MIN, INT16_MAX},
1838 .viewportSubPixelBits = 8,
1839 .minMemoryMapAlignment = 4096, /* A page */
1840 .minTexelBufferOffsetAlignment = 4,
1841 .minUniformBufferOffsetAlignment = radv_uniform_buffer_offset_alignment(pdevice),
1842 .minStorageBufferOffsetAlignment = 4,
1843 .minTexelOffset = -32,
1844 .maxTexelOffset = 31,
1845 .minTexelGatherOffset = -32,
1846 .maxTexelGatherOffset = 31,
1847 .minInterpolationOffset = -2,
1848 .maxInterpolationOffset = 2,
1849 .subPixelInterpolationOffsetBits = 8,
1850 .maxFramebufferWidth = MAX_FRAMEBUFFER_WIDTH,
1851 .maxFramebufferHeight = MAX_FRAMEBUFFER_HEIGHT,
1852 .maxFramebufferLayers = (1 << 10),
1853 .framebufferColorSampleCounts = sample_counts,
1854 .framebufferDepthSampleCounts = sample_counts,
1855 .framebufferStencilSampleCounts = sample_counts,
1856 .framebufferNoAttachmentsSampleCounts = sample_counts,
1857 .maxColorAttachments = MAX_RTS,
1858 .sampledImageColorSampleCounts = sample_counts,
1859 .sampledImageIntegerSampleCounts = sample_counts,
1860 .sampledImageDepthSampleCounts = sample_counts,
1861 .sampledImageStencilSampleCounts = sample_counts,
1862 .storageImageSampleCounts = sample_counts,
1863 .maxSampleMaskWords = 1,
1864 .timestampComputeAndGraphics = true,
1865 .timestampPeriod = 1000000.0 / pdevice->rad_info.clock_crystal_freq,
1866 .maxClipDistances = 8,
1867 .maxCullDistances = 8,
1868 .maxCombinedClipAndCullDistances = 8,
1869 .discreteQueuePriorities = 2,
1870 .pointSizeRange = {0.0, 8191.875},
1871 .lineWidthRange = {0.0, 8191.875},
1872 .pointSizeGranularity = (1.0 / 8.0),
1873 .lineWidthGranularity = (1.0 / 8.0),
1874 .strictLines = false, /* FINISHME */
1875 .standardSampleLocations = true,
1876 .optimalBufferCopyOffsetAlignment = 1,
1877 .optimalBufferCopyRowPitchAlignment = 1,
1878 .nonCoherentAtomSize = 64,
1879 };
1880
1881 VkPhysicalDeviceType device_type;
1882
1883 if (pdevice->rad_info.has_dedicated_vram || pdevice->instance->report_apu_as_dgpu) {
1884 device_type = VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU;
1885 } else {
1886 device_type = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
1887 }
1888
1889 *pProperties = (VkPhysicalDeviceProperties){
1890 .apiVersion = RADV_API_VERSION,
1891 .driverVersion = vk_get_driver_version(),
1892 .vendorID = ATI_VENDOR_ID,
1893 .deviceID = pdevice->rad_info.pci_id,
1894 .deviceType = device_type,
1895 .limits = limits,
1896 .sparseProperties =
1897 {
1898 .residencyNonResidentStrict = pdevice->rad_info.family >= CHIP_POLARIS10,
1899 .residencyStandard2DBlockShape = pdevice->rad_info.family >= CHIP_POLARIS10,
1900 },
1901 };
1902
1903 strcpy(pProperties->deviceName, pdevice->name);
1904 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
1905 }
1906
1907 static void
radv_get_physical_device_properties_1_1(struct radv_physical_device * pdevice,VkPhysicalDeviceVulkan11Properties * p)1908 radv_get_physical_device_properties_1_1(struct radv_physical_device *pdevice,
1909 VkPhysicalDeviceVulkan11Properties *p)
1910 {
1911 assert(p->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES);
1912
1913 memcpy(p->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
1914 memcpy(p->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
1915 memset(p->deviceLUID, 0, VK_LUID_SIZE);
1916 /* The LUID is for Windows. */
1917 p->deviceLUIDValid = false;
1918 p->deviceNodeMask = 0;
1919
1920 p->subgroupSize = RADV_SUBGROUP_SIZE;
1921 p->subgroupSupportedStages = VK_SHADER_STAGE_ALL_GRAPHICS | VK_SHADER_STAGE_COMPUTE_BIT;
1922 p->subgroupSupportedOperations =
1923 VK_SUBGROUP_FEATURE_BASIC_BIT | VK_SUBGROUP_FEATURE_VOTE_BIT |
1924 VK_SUBGROUP_FEATURE_ARITHMETIC_BIT | VK_SUBGROUP_FEATURE_BALLOT_BIT |
1925 VK_SUBGROUP_FEATURE_CLUSTERED_BIT | VK_SUBGROUP_FEATURE_QUAD_BIT |
1926 VK_SUBGROUP_FEATURE_SHUFFLE_BIT | VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT;
1927 p->subgroupQuadOperationsInAllStages = true;
1928
1929 p->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
1930 p->maxMultiviewViewCount = MAX_VIEWS;
1931 p->maxMultiviewInstanceIndex = INT_MAX;
1932 p->protectedNoFault = false;
1933 p->maxPerSetDescriptors = RADV_MAX_PER_SET_DESCRIPTORS;
1934 p->maxMemoryAllocationSize = RADV_MAX_MEMORY_ALLOCATION_SIZE;
1935 }
1936
1937 static void
radv_get_physical_device_properties_1_2(struct radv_physical_device * pdevice,VkPhysicalDeviceVulkan12Properties * p)1938 radv_get_physical_device_properties_1_2(struct radv_physical_device *pdevice,
1939 VkPhysicalDeviceVulkan12Properties *p)
1940 {
1941 assert(p->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES);
1942
1943 p->driverID = VK_DRIVER_ID_MESA_RADV;
1944 snprintf(p->driverName, VK_MAX_DRIVER_NAME_SIZE, "radv");
1945 snprintf(p->driverInfo, VK_MAX_DRIVER_INFO_SIZE, "Mesa " PACKAGE_VERSION MESA_GIT_SHA1 "%s",
1946 radv_get_compiler_string(pdevice));
1947
1948 if (radv_is_conformant(pdevice)) {
1949 if (pdevice->rad_info.chip_class >= GFX10_3) {
1950 p->conformanceVersion = (VkConformanceVersion){
1951 .major = 1,
1952 .minor = 3,
1953 .subminor = 0,
1954 .patch = 0,
1955 };
1956 } else {
1957 p->conformanceVersion = (VkConformanceVersion){
1958 .major = 1,
1959 .minor = 2,
1960 .subminor = 7,
1961 .patch = 1,
1962 };
1963 }
1964 } else {
1965 p->conformanceVersion = (VkConformanceVersion){
1966 .major = 0,
1967 .minor = 0,
1968 .subminor = 0,
1969 .patch = 0,
1970 };
1971 }
1972
1973 /* On AMD hardware, denormals and rounding modes for fp16/fp64 are
1974 * controlled by the same config register.
1975 */
1976 if (pdevice->rad_info.has_packed_math_16bit) {
1977 p->denormBehaviorIndependence = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR;
1978 p->roundingModeIndependence = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR;
1979 } else {
1980 p->denormBehaviorIndependence = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR;
1981 p->roundingModeIndependence = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR;
1982 }
1983
1984 /* With LLVM, do not allow both preserving and flushing denorms because
1985 * different shaders in the same pipeline can have different settings and
1986 * this won't work for merged shaders. To make it work, this requires LLVM
1987 * support for changing the register. The same logic applies for the
1988 * rounding modes because they are configured with the same config
1989 * register.
1990 */
1991 p->shaderDenormFlushToZeroFloat32 = true;
1992 p->shaderDenormPreserveFloat32 = !pdevice->use_llvm;
1993 p->shaderRoundingModeRTEFloat32 = true;
1994 p->shaderRoundingModeRTZFloat32 = !pdevice->use_llvm;
1995 p->shaderSignedZeroInfNanPreserveFloat32 = true;
1996
1997 p->shaderDenormFlushToZeroFloat16 =
1998 pdevice->rad_info.has_packed_math_16bit && !pdevice->use_llvm;
1999 p->shaderDenormPreserveFloat16 = pdevice->rad_info.has_packed_math_16bit;
2000 p->shaderRoundingModeRTEFloat16 = pdevice->rad_info.has_packed_math_16bit;
2001 p->shaderRoundingModeRTZFloat16 = pdevice->rad_info.has_packed_math_16bit && !pdevice->use_llvm;
2002 p->shaderSignedZeroInfNanPreserveFloat16 = pdevice->rad_info.has_packed_math_16bit;
2003
2004 p->shaderDenormFlushToZeroFloat64 = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_llvm;
2005 p->shaderDenormPreserveFloat64 = pdevice->rad_info.chip_class >= GFX8;
2006 p->shaderRoundingModeRTEFloat64 = pdevice->rad_info.chip_class >= GFX8;
2007 p->shaderRoundingModeRTZFloat64 = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_llvm;
2008 p->shaderSignedZeroInfNanPreserveFloat64 = pdevice->rad_info.chip_class >= GFX8;
2009
2010 p->maxUpdateAfterBindDescriptorsInAllPools = UINT32_MAX / 64;
2011 p->shaderUniformBufferArrayNonUniformIndexingNative = false;
2012 p->shaderSampledImageArrayNonUniformIndexingNative = false;
2013 p->shaderStorageBufferArrayNonUniformIndexingNative = false;
2014 p->shaderStorageImageArrayNonUniformIndexingNative = false;
2015 p->shaderInputAttachmentArrayNonUniformIndexingNative = false;
2016 p->robustBufferAccessUpdateAfterBind = true;
2017 p->quadDivergentImplicitLod = false;
2018
2019 size_t max_descriptor_set_size =
2020 ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS -
2021 MAX_INLINE_UNIFORM_BLOCK_SIZE * MAX_INLINE_UNIFORM_BLOCK_COUNT) /
2022 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
2023 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
2024 32 /* sampler, largest when combined with image */ + 64 /* sampled image */ +
2025 64 /* storage image */);
2026 p->maxPerStageDescriptorUpdateAfterBindSamplers = max_descriptor_set_size;
2027 p->maxPerStageDescriptorUpdateAfterBindUniformBuffers = max_descriptor_set_size;
2028 p->maxPerStageDescriptorUpdateAfterBindStorageBuffers = max_descriptor_set_size;
2029 p->maxPerStageDescriptorUpdateAfterBindSampledImages = max_descriptor_set_size;
2030 p->maxPerStageDescriptorUpdateAfterBindStorageImages = max_descriptor_set_size;
2031 p->maxPerStageDescriptorUpdateAfterBindInputAttachments = max_descriptor_set_size;
2032 p->maxPerStageUpdateAfterBindResources = max_descriptor_set_size;
2033 p->maxDescriptorSetUpdateAfterBindSamplers = max_descriptor_set_size;
2034 p->maxDescriptorSetUpdateAfterBindUniformBuffers = max_descriptor_set_size;
2035 p->maxDescriptorSetUpdateAfterBindUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS;
2036 p->maxDescriptorSetUpdateAfterBindStorageBuffers = max_descriptor_set_size;
2037 p->maxDescriptorSetUpdateAfterBindStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS;
2038 p->maxDescriptorSetUpdateAfterBindSampledImages = max_descriptor_set_size;
2039 p->maxDescriptorSetUpdateAfterBindStorageImages = max_descriptor_set_size;
2040 p->maxDescriptorSetUpdateAfterBindInputAttachments = max_descriptor_set_size;
2041
2042 /* We support all of the depth resolve modes */
2043 p->supportedDepthResolveModes = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR |
2044 VK_RESOLVE_MODE_AVERAGE_BIT_KHR | VK_RESOLVE_MODE_MIN_BIT_KHR |
2045 VK_RESOLVE_MODE_MAX_BIT_KHR;
2046
2047 /* Average doesn't make sense for stencil so we don't support that */
2048 p->supportedStencilResolveModes = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR |
2049 VK_RESOLVE_MODE_MIN_BIT_KHR | VK_RESOLVE_MODE_MAX_BIT_KHR;
2050
2051 p->independentResolveNone = true;
2052 p->independentResolve = true;
2053
2054 /* GFX6-8 only support single channel min/max filter. */
2055 p->filterMinmaxImageComponentMapping = pdevice->rad_info.chip_class >= GFX9;
2056 p->filterMinmaxSingleComponentFormats = true;
2057
2058 p->maxTimelineSemaphoreValueDifference = UINT64_MAX;
2059
2060 p->framebufferIntegerColorSampleCounts = VK_SAMPLE_COUNT_1_BIT;
2061 }
2062
2063 static void
radv_get_physical_device_properties_1_3(struct radv_physical_device * pdevice,VkPhysicalDeviceVulkan13Properties * p)2064 radv_get_physical_device_properties_1_3(struct radv_physical_device *pdevice,
2065 VkPhysicalDeviceVulkan13Properties *p)
2066 {
2067 assert(p->sType == VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_PROPERTIES);
2068
2069 p->minSubgroupSize = 64;
2070 p->maxSubgroupSize = 64;
2071 p->maxComputeWorkgroupSubgroups = UINT32_MAX;
2072 p->requiredSubgroupSizeStages = 0;
2073 if (pdevice->rad_info.chip_class >= GFX10) {
2074 /* Only GFX10+ supports wave32. */
2075 p->minSubgroupSize = 32;
2076 p->requiredSubgroupSizeStages = VK_SHADER_STAGE_COMPUTE_BIT;
2077 }
2078
2079 p->maxInlineUniformBlockSize = MAX_INLINE_UNIFORM_BLOCK_SIZE;
2080 p->maxPerStageDescriptorInlineUniformBlocks = MAX_INLINE_UNIFORM_BLOCK_SIZE * MAX_SETS;
2081 p->maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks = MAX_INLINE_UNIFORM_BLOCK_SIZE * MAX_SETS;
2082 p->maxDescriptorSetInlineUniformBlocks = MAX_INLINE_UNIFORM_BLOCK_COUNT;
2083 p->maxDescriptorSetUpdateAfterBindInlineUniformBlocks = MAX_INLINE_UNIFORM_BLOCK_COUNT;
2084 p->maxInlineUniformTotalSize = UINT16_MAX;
2085
2086 bool accel = pdevice->rad_info.has_accelerated_dot_product;
2087 p->integerDotProduct8BitUnsignedAccelerated = accel;
2088 p->integerDotProduct8BitSignedAccelerated = accel;
2089 p->integerDotProduct8BitMixedSignednessAccelerated = false;
2090 p->integerDotProduct4x8BitPackedUnsignedAccelerated = accel;
2091 p->integerDotProduct4x8BitPackedSignedAccelerated = accel;
2092 p->integerDotProduct4x8BitPackedMixedSignednessAccelerated = false;
2093 p->integerDotProduct16BitUnsignedAccelerated = accel;
2094 p->integerDotProduct16BitSignedAccelerated = accel;
2095 p->integerDotProduct16BitMixedSignednessAccelerated = false;
2096 p->integerDotProduct32BitUnsignedAccelerated = false;
2097 p->integerDotProduct32BitSignedAccelerated = false;
2098 p->integerDotProduct32BitMixedSignednessAccelerated = false;
2099 p->integerDotProduct64BitUnsignedAccelerated = false;
2100 p->integerDotProduct64BitSignedAccelerated = false;
2101 p->integerDotProduct64BitMixedSignednessAccelerated = false;
2102 p->integerDotProductAccumulatingSaturating8BitUnsignedAccelerated = accel;
2103 p->integerDotProductAccumulatingSaturating8BitSignedAccelerated = accel;
2104 p->integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated = false;
2105 p->integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated = accel;
2106 p->integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated = accel;
2107 p->integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated = false;
2108 p->integerDotProductAccumulatingSaturating16BitUnsignedAccelerated = accel;
2109 p->integerDotProductAccumulatingSaturating16BitSignedAccelerated = accel;
2110 p->integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated = false;
2111 p->integerDotProductAccumulatingSaturating32BitUnsignedAccelerated = false;
2112 p->integerDotProductAccumulatingSaturating32BitSignedAccelerated = false;
2113 p->integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated = false;
2114 p->integerDotProductAccumulatingSaturating64BitUnsignedAccelerated = false;
2115 p->integerDotProductAccumulatingSaturating64BitSignedAccelerated = false;
2116 p->integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated = false;
2117
2118 p->storageTexelBufferOffsetAlignmentBytes = 4;
2119 p->storageTexelBufferOffsetSingleTexelAlignment = true;
2120 p->uniformTexelBufferOffsetAlignmentBytes = 4;
2121 p->uniformTexelBufferOffsetSingleTexelAlignment = true;
2122
2123 p->maxBufferSize = RADV_MAX_MEMORY_ALLOCATION_SIZE;
2124 }
2125
2126 VKAPI_ATTR void VKAPI_CALL
radv_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties2 * pProperties)2127 radv_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
2128 VkPhysicalDeviceProperties2 *pProperties)
2129 {
2130 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
2131 radv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
2132
2133 VkPhysicalDeviceVulkan11Properties core_1_1 = {
2134 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES,
2135 };
2136 radv_get_physical_device_properties_1_1(pdevice, &core_1_1);
2137
2138 VkPhysicalDeviceVulkan12Properties core_1_2 = {
2139 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES,
2140 };
2141 radv_get_physical_device_properties_1_2(pdevice, &core_1_2);
2142
2143 VkPhysicalDeviceVulkan13Properties core_1_3 = {
2144 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_PROPERTIES,
2145 };
2146 radv_get_physical_device_properties_1_3(pdevice, &core_1_3);
2147
2148 vk_foreach_struct(ext, pProperties->pNext)
2149 {
2150 if (vk_get_physical_device_core_1_1_property_ext(ext, &core_1_1))
2151 continue;
2152 if (vk_get_physical_device_core_1_2_property_ext(ext, &core_1_2))
2153 continue;
2154 if (vk_get_physical_device_core_1_3_property_ext(ext, &core_1_3))
2155 continue;
2156
2157 switch (ext->sType) {
2158 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
2159 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
2160 (VkPhysicalDevicePushDescriptorPropertiesKHR *)ext;
2161 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
2162 break;
2163 }
2164 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT: {
2165 VkPhysicalDeviceDiscardRectanglePropertiesEXT *properties =
2166 (VkPhysicalDeviceDiscardRectanglePropertiesEXT *)ext;
2167 properties->maxDiscardRectangles = MAX_DISCARD_RECTANGLES;
2168 break;
2169 }
2170 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT: {
2171 VkPhysicalDeviceExternalMemoryHostPropertiesEXT *properties =
2172 (VkPhysicalDeviceExternalMemoryHostPropertiesEXT *)ext;
2173 properties->minImportedHostPointerAlignment = 4096;
2174 break;
2175 }
2176 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD: {
2177 VkPhysicalDeviceShaderCorePropertiesAMD *properties =
2178 (VkPhysicalDeviceShaderCorePropertiesAMD *)ext;
2179
2180 /* Shader engines. */
2181 properties->shaderEngineCount = pdevice->rad_info.max_se;
2182 properties->shaderArraysPerEngineCount = pdevice->rad_info.max_sa_per_se;
2183 properties->computeUnitsPerShaderArray = pdevice->rad_info.min_good_cu_per_sa;
2184 properties->simdPerComputeUnit = pdevice->rad_info.num_simd_per_compute_unit;
2185 properties->wavefrontsPerSimd = pdevice->rad_info.max_wave64_per_simd;
2186 properties->wavefrontSize = 64;
2187
2188 /* SGPR. */
2189 properties->sgprsPerSimd = pdevice->rad_info.num_physical_sgprs_per_simd;
2190 properties->minSgprAllocation = pdevice->rad_info.min_sgpr_alloc;
2191 properties->maxSgprAllocation = pdevice->rad_info.max_sgpr_alloc;
2192 properties->sgprAllocationGranularity = pdevice->rad_info.sgpr_alloc_granularity;
2193
2194 /* VGPR. */
2195 properties->vgprsPerSimd = pdevice->rad_info.num_physical_wave64_vgprs_per_simd;
2196 properties->minVgprAllocation = pdevice->rad_info.min_wave64_vgpr_alloc;
2197 properties->maxVgprAllocation = pdevice->rad_info.max_vgpr_alloc;
2198 properties->vgprAllocationGranularity = pdevice->rad_info.wave64_vgpr_alloc_granularity;
2199 break;
2200 }
2201 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD: {
2202 VkPhysicalDeviceShaderCoreProperties2AMD *properties =
2203 (VkPhysicalDeviceShaderCoreProperties2AMD *)ext;
2204
2205 properties->shaderCoreFeatures = 0;
2206 properties->activeComputeUnitCount = pdevice->rad_info.num_good_compute_units;
2207 break;
2208 }
2209 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: {
2210 VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *properties =
2211 (VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *)ext;
2212 properties->maxVertexAttribDivisor = UINT32_MAX;
2213 break;
2214 }
2215 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT: {
2216 VkPhysicalDeviceConservativeRasterizationPropertiesEXT *properties =
2217 (VkPhysicalDeviceConservativeRasterizationPropertiesEXT *)ext;
2218 properties->primitiveOverestimationSize = 0;
2219 properties->maxExtraPrimitiveOverestimationSize = 0;
2220 properties->extraPrimitiveOverestimationSizeGranularity = 0;
2221 properties->primitiveUnderestimation = false;
2222 properties->conservativePointAndLineRasterization = false;
2223 properties->degenerateTrianglesRasterized = true;
2224 properties->degenerateLinesRasterized = false;
2225 properties->fullyCoveredFragmentShaderInputVariable = false;
2226 properties->conservativeRasterizationPostDepthCoverage = false;
2227 break;
2228 }
2229 #ifndef _WIN32
2230 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT: {
2231 VkPhysicalDevicePCIBusInfoPropertiesEXT *properties =
2232 (VkPhysicalDevicePCIBusInfoPropertiesEXT *)ext;
2233 properties->pciDomain = pdevice->bus_info.domain;
2234 properties->pciBus = pdevice->bus_info.bus;
2235 properties->pciDevice = pdevice->bus_info.dev;
2236 properties->pciFunction = pdevice->bus_info.func;
2237 break;
2238 }
2239 #endif
2240 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
2241 VkPhysicalDeviceTransformFeedbackPropertiesEXT *properties =
2242 (VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
2243 properties->maxTransformFeedbackStreams = MAX_SO_STREAMS;
2244 properties->maxTransformFeedbackBuffers = MAX_SO_BUFFERS;
2245 properties->maxTransformFeedbackBufferSize = UINT32_MAX;
2246 properties->maxTransformFeedbackStreamDataSize = 512;
2247 properties->maxTransformFeedbackBufferDataSize = 512;
2248 properties->maxTransformFeedbackBufferDataStride = 512;
2249 properties->transformFeedbackQueries = !pdevice->use_ngg_streamout;
2250 properties->transformFeedbackStreamsLinesTriangles = !pdevice->use_ngg_streamout;
2251 properties->transformFeedbackRasterizationStreamSelect = false;
2252 properties->transformFeedbackDraw = true;
2253 break;
2254 }
2255 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: {
2256 VkPhysicalDeviceSampleLocationsPropertiesEXT *properties =
2257 (VkPhysicalDeviceSampleLocationsPropertiesEXT *)ext;
2258 properties->sampleLocationSampleCounts = VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT |
2259 VK_SAMPLE_COUNT_8_BIT;
2260 properties->maxSampleLocationGridSize = (VkExtent2D){2, 2};
2261 properties->sampleLocationCoordinateRange[0] = 0.0f;
2262 properties->sampleLocationCoordinateRange[1] = 0.9375f;
2263 properties->sampleLocationSubPixelBits = 4;
2264 properties->variableSampleLocations = false;
2265 break;
2266 }
2267 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT: {
2268 VkPhysicalDeviceLineRasterizationPropertiesEXT *props =
2269 (VkPhysicalDeviceLineRasterizationPropertiesEXT *)ext;
2270 props->lineSubPixelPrecisionBits = 4;
2271 break;
2272 }
2273 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT: {
2274 VkPhysicalDeviceRobustness2PropertiesEXT *properties =
2275 (VkPhysicalDeviceRobustness2PropertiesEXT *)ext;
2276 properties->robustStorageBufferAccessSizeAlignment = 4;
2277 properties->robustUniformBufferAccessSizeAlignment = 4;
2278 break;
2279 }
2280 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT: {
2281 VkPhysicalDeviceCustomBorderColorPropertiesEXT *props =
2282 (VkPhysicalDeviceCustomBorderColorPropertiesEXT *)ext;
2283 props->maxCustomBorderColorSamplers = RADV_BORDER_COLOR_COUNT;
2284 break;
2285 }
2286 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR: {
2287 VkPhysicalDeviceFragmentShadingRatePropertiesKHR *props =
2288 (VkPhysicalDeviceFragmentShadingRatePropertiesKHR *)ext;
2289 props->minFragmentShadingRateAttachmentTexelSize = (VkExtent2D){8, 8};
2290 props->maxFragmentShadingRateAttachmentTexelSize = (VkExtent2D){8, 8};
2291 props->maxFragmentShadingRateAttachmentTexelSizeAspectRatio = 1;
2292 props->primitiveFragmentShadingRateWithMultipleViewports = true;
2293 props->layeredShadingRateAttachments = false; /* TODO */
2294 props->fragmentShadingRateNonTrivialCombinerOps = true;
2295 props->maxFragmentSize = (VkExtent2D){2, 2};
2296 props->maxFragmentSizeAspectRatio = 2;
2297 props->maxFragmentShadingRateCoverageSamples = 32;
2298 props->maxFragmentShadingRateRasterizationSamples = VK_SAMPLE_COUNT_8_BIT;
2299 props->fragmentShadingRateWithShaderDepthStencilWrites = false;
2300 props->fragmentShadingRateWithSampleMask = true;
2301 props->fragmentShadingRateWithShaderSampleMask = false;
2302 props->fragmentShadingRateWithConservativeRasterization = true;
2303 props->fragmentShadingRateWithFragmentShaderInterlock = false;
2304 props->fragmentShadingRateWithCustomSampleLocations = false;
2305 props->fragmentShadingRateStrictMultiplyCombiner = true;
2306 break;
2307 }
2308 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_PROPERTIES_EXT: {
2309 VkPhysicalDeviceProvokingVertexPropertiesEXT *props =
2310 (VkPhysicalDeviceProvokingVertexPropertiesEXT *)ext;
2311 props->provokingVertexModePerPipeline = true;
2312 props->transformFeedbackPreservesTriangleFanProvokingVertex = true;
2313 break;
2314 }
2315 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR: {
2316 VkPhysicalDeviceAccelerationStructurePropertiesKHR *props =
2317 (VkPhysicalDeviceAccelerationStructurePropertiesKHR *)ext;
2318 props->maxGeometryCount = (1 << 24) - 1;
2319 props->maxInstanceCount = (1 << 24) - 1;
2320 props->maxPrimitiveCount = (1 << 29) - 1;
2321 props->maxPerStageDescriptorAccelerationStructures =
2322 pProperties->properties.limits.maxPerStageDescriptorStorageBuffers;
2323 props->maxPerStageDescriptorUpdateAfterBindAccelerationStructures =
2324 pProperties->properties.limits.maxPerStageDescriptorStorageBuffers;
2325 props->maxDescriptorSetAccelerationStructures =
2326 pProperties->properties.limits.maxDescriptorSetStorageBuffers;
2327 props->maxDescriptorSetUpdateAfterBindAccelerationStructures =
2328 pProperties->properties.limits.maxDescriptorSetStorageBuffers;
2329 props->minAccelerationStructureScratchOffsetAlignment = 128;
2330 break;
2331 }
2332 #ifndef _WIN32
2333 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRM_PROPERTIES_EXT: {
2334 VkPhysicalDeviceDrmPropertiesEXT *props = (VkPhysicalDeviceDrmPropertiesEXT *)ext;
2335 if (pdevice->available_nodes & (1 << DRM_NODE_PRIMARY)) {
2336 props->hasPrimary = true;
2337 props->primaryMajor = (int64_t)major(pdevice->primary_devid);
2338 props->primaryMinor = (int64_t)minor(pdevice->primary_devid);
2339 } else {
2340 props->hasPrimary = false;
2341 }
2342 if (pdevice->available_nodes & (1 << DRM_NODE_RENDER)) {
2343 props->hasRender = true;
2344 props->renderMajor = (int64_t)major(pdevice->render_devid);
2345 props->renderMinor = (int64_t)minor(pdevice->render_devid);
2346 } else {
2347 props->hasRender = false;
2348 }
2349 break;
2350 }
2351 #endif
2352 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_PROPERTIES_EXT: {
2353 VkPhysicalDeviceMultiDrawPropertiesEXT *props = (VkPhysicalDeviceMultiDrawPropertiesEXT *)ext;
2354 props->maxMultiDrawCount = 2048;
2355 break;
2356 }
2357 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR: {
2358 VkPhysicalDeviceRayTracingPipelinePropertiesKHR *props =
2359 (VkPhysicalDeviceRayTracingPipelinePropertiesKHR *)ext;
2360 props->shaderGroupHandleSize = RADV_RT_HANDLE_SIZE;
2361 props->maxRayRecursionDepth = 31; /* Minimum allowed for DXR. */
2362 props->maxShaderGroupStride = 16384; /* dummy */
2363 props->shaderGroupBaseAlignment = 16;
2364 props->shaderGroupHandleCaptureReplaySize = 16;
2365 props->maxRayDispatchInvocationCount = 1024 * 1024 * 64;
2366 props->shaderGroupHandleAlignment = 16;
2367 props->maxRayHitAttributeSize = RADV_MAX_HIT_ATTRIB_SIZE;
2368 break;
2369 }
2370 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES_KHR: {
2371 VkPhysicalDeviceMaintenance4PropertiesKHR *properties =
2372 (VkPhysicalDeviceMaintenance4PropertiesKHR *)ext;
2373 properties->maxBufferSize = RADV_MAX_MEMORY_ALLOCATION_SIZE;
2374 break;
2375 }
2376 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV: {
2377 VkPhysicalDeviceMeshShaderPropertiesNV *properties =
2378 (VkPhysicalDeviceMeshShaderPropertiesNV *)ext;
2379
2380 /* Task shader limitations:
2381 * Same as compute, because TS are compiled to CS.
2382 */
2383 properties->maxDrawMeshTasksCount = 65535;
2384 properties->maxTaskTotalMemorySize = 65536;
2385 properties->maxTaskWorkGroupInvocations = 1024;
2386 properties->maxTaskWorkGroupSize[0] = 1024;
2387 properties->maxTaskWorkGroupSize[1] = 1024;
2388 properties->maxTaskWorkGroupSize[2] = 1024;
2389 properties->maxTaskOutputCount = 65535;
2390
2391 /* Mesh shader limitations:
2392 * Same as NGG, because MS are compiled to NGG.
2393 */
2394 properties->maxMeshMultiviewViewCount = MAX_VIEWS;
2395 properties->maxMeshOutputPrimitives = 256;
2396 properties->maxMeshOutputVertices = 256;
2397 properties->maxMeshTotalMemorySize = 31 * 1024; /* Reserve 1K for prim indices, etc. */
2398 properties->maxMeshWorkGroupInvocations = 256;
2399 properties->maxMeshWorkGroupSize[0] = 256;
2400 properties->maxMeshWorkGroupSize[1] = 256;
2401 properties->maxMeshWorkGroupSize[2] = 256;
2402 properties->meshOutputPerPrimitiveGranularity = 1;
2403 properties->meshOutputPerVertexGranularity = 1;
2404
2405 break;
2406 }
2407 default:
2408 break;
2409 }
2410 }
2411 }
2412
2413 static void
radv_get_physical_device_queue_family_properties(struct radv_physical_device * pdevice,uint32_t * pCount,VkQueueFamilyProperties ** pQueueFamilyProperties)2414 radv_get_physical_device_queue_family_properties(struct radv_physical_device *pdevice,
2415 uint32_t *pCount,
2416 VkQueueFamilyProperties **pQueueFamilyProperties)
2417 {
2418 int num_queue_families = 1;
2419 int idx;
2420 if (pdevice->rad_info.num_rings[RING_COMPUTE] > 0 &&
2421 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE))
2422 num_queue_families++;
2423
2424 if (pQueueFamilyProperties == NULL) {
2425 *pCount = num_queue_families;
2426 return;
2427 }
2428
2429 if (!*pCount)
2430 return;
2431
2432 idx = 0;
2433 if (*pCount >= 1) {
2434 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties){
2435 .queueFlags = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT |
2436 VK_QUEUE_SPARSE_BINDING_BIT,
2437 .queueCount = 1,
2438 .timestampValidBits = 64,
2439 .minImageTransferGranularity = (VkExtent3D){1, 1, 1},
2440 };
2441 idx++;
2442 }
2443
2444 if (pdevice->rad_info.num_rings[RING_COMPUTE] > 0 &&
2445 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE)) {
2446 if (*pCount > idx) {
2447 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties){
2448 .queueFlags =
2449 VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT | VK_QUEUE_SPARSE_BINDING_BIT,
2450 .queueCount = pdevice->rad_info.num_rings[RING_COMPUTE],
2451 .timestampValidBits = 64,
2452 .minImageTransferGranularity = (VkExtent3D){1, 1, 1},
2453 };
2454 idx++;
2455 }
2456 }
2457 *pCount = idx;
2458 }
2459
2460 static const VkQueueGlobalPriorityEXT radv_global_queue_priorities[] = {
2461 VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT,
2462 VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT,
2463 VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT,
2464 VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT,
2465 };
2466
2467 VKAPI_ATTR void VKAPI_CALL
radv_GetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,uint32_t * pCount,VkQueueFamilyProperties2 * pQueueFamilyProperties)2468 radv_GetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice, uint32_t *pCount,
2469 VkQueueFamilyProperties2 *pQueueFamilyProperties)
2470 {
2471 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
2472 if (!pQueueFamilyProperties) {
2473 radv_get_physical_device_queue_family_properties(pdevice, pCount, NULL);
2474 return;
2475 }
2476 VkQueueFamilyProperties *properties[] = {
2477 &pQueueFamilyProperties[0].queueFamilyProperties,
2478 &pQueueFamilyProperties[1].queueFamilyProperties,
2479 &pQueueFamilyProperties[2].queueFamilyProperties,
2480 };
2481 radv_get_physical_device_queue_family_properties(pdevice, pCount, properties);
2482 assert(*pCount <= 3);
2483
2484 for (uint32_t i = 0; i < *pCount; i++) {
2485 vk_foreach_struct(ext, pQueueFamilyProperties[i].pNext)
2486 {
2487 switch (ext->sType) {
2488 case VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_EXT: {
2489 VkQueueFamilyGlobalPriorityPropertiesEXT *prop =
2490 (VkQueueFamilyGlobalPriorityPropertiesEXT *)ext;
2491 STATIC_ASSERT(ARRAY_SIZE(radv_global_queue_priorities) <= VK_MAX_GLOBAL_PRIORITY_SIZE_EXT);
2492 prop->priorityCount = ARRAY_SIZE(radv_global_queue_priorities);
2493 memcpy(&prop->priorities, radv_global_queue_priorities, sizeof(radv_global_queue_priorities));
2494 break;
2495 }
2496 default:
2497 break;
2498 }
2499 }
2500 }
2501 }
2502
2503 static void
radv_get_memory_budget_properties(VkPhysicalDevice physicalDevice,VkPhysicalDeviceMemoryBudgetPropertiesEXT * memoryBudget)2504 radv_get_memory_budget_properties(VkPhysicalDevice physicalDevice,
2505 VkPhysicalDeviceMemoryBudgetPropertiesEXT *memoryBudget)
2506 {
2507 RADV_FROM_HANDLE(radv_physical_device, device, physicalDevice);
2508 VkPhysicalDeviceMemoryProperties *memory_properties = &device->memory_properties;
2509
2510 /* For all memory heaps, the computation of budget is as follow:
2511 * heap_budget = heap_size - global_heap_usage + app_heap_usage
2512 *
2513 * The Vulkan spec 1.1.97 says that the budget should include any
2514 * currently allocated device memory.
2515 *
2516 * Note that the application heap usages are not really accurate (eg.
2517 * in presence of shared buffers).
2518 */
2519 if (!device->rad_info.has_dedicated_vram) {
2520 /* On APUs, the driver exposes fake heaps to the application because usually the carveout is
2521 * too small for games but the budgets need to be redistributed accordingly.
2522 */
2523
2524 assert(device->heaps == (RADV_HEAP_GTT | RADV_HEAP_VRAM_VIS));
2525 assert(device->memory_properties.memoryHeaps[0].flags == 0); /* GTT */
2526 assert(device->memory_properties.memoryHeaps[1].flags == VK_MEMORY_HEAP_DEVICE_LOCAL_BIT);
2527 uint8_t gtt_heap_idx = 0, vram_vis_heap_idx = 1;
2528
2529 /* Get the visible VRAM/GTT heap sizes and internal usages. */
2530 uint64_t gtt_heap_size = device->memory_properties.memoryHeaps[gtt_heap_idx].size;
2531 uint64_t vram_vis_heap_size = device->memory_properties.memoryHeaps[vram_vis_heap_idx].size;
2532
2533 uint64_t vram_vis_internal_usage = device->ws->query_value(device->ws, RADEON_ALLOCATED_VRAM_VIS) +
2534 device->ws->query_value(device->ws, RADEON_ALLOCATED_VRAM);
2535 uint64_t gtt_internal_usage = device->ws->query_value(device->ws, RADEON_ALLOCATED_GTT);
2536
2537 /* Compute the total heap size, internal and system usage. */
2538 uint64_t total_heap_size = vram_vis_heap_size + gtt_heap_size;
2539 uint64_t total_internal_usage = vram_vis_internal_usage + gtt_internal_usage;
2540 uint64_t total_system_usage = device->ws->query_value(device->ws, RADEON_VRAM_VIS_USAGE) +
2541 device->ws->query_value(device->ws, RADEON_GTT_USAGE);
2542
2543 uint64_t total_usage = MAX2(total_internal_usage, total_system_usage);
2544
2545 /* Compute the total free space that can be allocated for this process accross all heaps. */
2546 uint64_t total_free_space = total_heap_size - MIN2(total_heap_size, total_usage);
2547
2548 /* Compute the remaining visible VRAM size for this process. */
2549 uint64_t vram_vis_free_space = vram_vis_heap_size - MIN2(vram_vis_heap_size, vram_vis_internal_usage);
2550
2551 /* Distribute the total free space (2/3rd as VRAM and 1/3rd as GTT) to match the heap sizes,
2552 * and align down to the page size to be conservative.
2553 */
2554 vram_vis_free_space = ROUND_DOWN_TO(MIN2((total_free_space * 2) / 3, vram_vis_free_space),
2555 device->rad_info.gart_page_size);
2556 uint64_t gtt_free_space = total_free_space - vram_vis_free_space;
2557
2558 memoryBudget->heapBudget[vram_vis_heap_idx] = vram_vis_free_space + vram_vis_internal_usage;
2559 memoryBudget->heapUsage[vram_vis_heap_idx] = vram_vis_internal_usage;
2560 memoryBudget->heapBudget[gtt_heap_idx] = gtt_free_space + gtt_internal_usage;
2561 memoryBudget->heapUsage[gtt_heap_idx] = gtt_internal_usage;
2562 } else {
2563 unsigned mask = device->heaps;
2564 unsigned heap = 0;
2565 while (mask) {
2566 uint64_t internal_usage = 0, system_usage = 0;
2567 unsigned type = 1u << u_bit_scan(&mask);
2568
2569 switch (type) {
2570 case RADV_HEAP_VRAM:
2571 internal_usage = device->ws->query_value(device->ws, RADEON_ALLOCATED_VRAM);
2572 system_usage = device->ws->query_value(device->ws, RADEON_VRAM_USAGE);
2573 break;
2574 case RADV_HEAP_VRAM_VIS:
2575 internal_usage = device->ws->query_value(device->ws, RADEON_ALLOCATED_VRAM_VIS);
2576 if (!(device->heaps & RADV_HEAP_VRAM))
2577 internal_usage += device->ws->query_value(device->ws, RADEON_ALLOCATED_VRAM);
2578 system_usage = device->ws->query_value(device->ws, RADEON_VRAM_VIS_USAGE);
2579 break;
2580 case RADV_HEAP_GTT:
2581 internal_usage = device->ws->query_value(device->ws, RADEON_ALLOCATED_GTT);
2582 system_usage = device->ws->query_value(device->ws, RADEON_GTT_USAGE);
2583 break;
2584 }
2585
2586 uint64_t total_usage = MAX2(internal_usage, system_usage);
2587
2588 uint64_t free_space = device->memory_properties.memoryHeaps[heap].size -
2589 MIN2(device->memory_properties.memoryHeaps[heap].size, total_usage);
2590 memoryBudget->heapBudget[heap] = free_space + internal_usage;
2591 memoryBudget->heapUsage[heap] = internal_usage;
2592 ++heap;
2593 }
2594
2595 assert(heap == memory_properties->memoryHeapCount);
2596 }
2597
2598 /* The heapBudget and heapUsage values must be zero for array elements
2599 * greater than or equal to
2600 * VkPhysicalDeviceMemoryProperties::memoryHeapCount.
2601 */
2602 for (uint32_t i = memory_properties->memoryHeapCount; i < VK_MAX_MEMORY_HEAPS; i++) {
2603 memoryBudget->heapBudget[i] = 0;
2604 memoryBudget->heapUsage[i] = 0;
2605 }
2606 }
2607
2608 VKAPI_ATTR void VKAPI_CALL
radv_GetPhysicalDeviceMemoryProperties2(VkPhysicalDevice physicalDevice,VkPhysicalDeviceMemoryProperties2 * pMemoryProperties)2609 radv_GetPhysicalDeviceMemoryProperties2(VkPhysicalDevice physicalDevice,
2610 VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
2611 {
2612 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
2613
2614 pMemoryProperties->memoryProperties.memoryTypeCount = pdevice->memory_properties.memoryTypeCount;
2615 for (uint32_t i = 0; i < pdevice->memory_properties.memoryTypeCount; i++) {
2616 pMemoryProperties->memoryProperties.memoryTypes[i] = (VkMemoryType) {
2617 .propertyFlags = pdevice->memory_properties.memoryTypes[i].propertyFlags,
2618 .heapIndex = pdevice->memory_properties.memoryTypes[i].heapIndex,
2619 };
2620 }
2621
2622 pMemoryProperties->memoryProperties.memoryHeapCount = pdevice->memory_properties.memoryHeapCount;
2623 for (uint32_t i = 0; i < pdevice->memory_properties.memoryHeapCount; i++) {
2624 pMemoryProperties->memoryProperties.memoryHeaps[i] = (VkMemoryHeap) {
2625 .size = pdevice->memory_properties.memoryHeaps[i].size,
2626 .flags = pdevice->memory_properties.memoryHeaps[i].flags,
2627 };
2628 }
2629
2630 VkPhysicalDeviceMemoryBudgetPropertiesEXT *memory_budget =
2631 vk_find_struct(pMemoryProperties->pNext, PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT);
2632 if (memory_budget)
2633 radv_get_memory_budget_properties(physicalDevice, memory_budget);
2634 }
2635
2636 VKAPI_ATTR VkResult VKAPI_CALL
radv_GetMemoryHostPointerPropertiesEXT(VkDevice _device,VkExternalMemoryHandleTypeFlagBits handleType,const void * pHostPointer,VkMemoryHostPointerPropertiesEXT * pMemoryHostPointerProperties)2637 radv_GetMemoryHostPointerPropertiesEXT(
2638 VkDevice _device, VkExternalMemoryHandleTypeFlagBits handleType, const void *pHostPointer,
2639 VkMemoryHostPointerPropertiesEXT *pMemoryHostPointerProperties)
2640 {
2641 RADV_FROM_HANDLE(radv_device, device, _device);
2642
2643 switch (handleType) {
2644 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT: {
2645 const struct radv_physical_device *physical_device = device->physical_device;
2646 uint32_t memoryTypeBits = 0;
2647 for (int i = 0; i < physical_device->memory_properties.memoryTypeCount; i++) {
2648 if (physical_device->memory_domains[i] == RADEON_DOMAIN_GTT &&
2649 !(physical_device->memory_flags[i] & RADEON_FLAG_GTT_WC)) {
2650 memoryTypeBits = (1 << i);
2651 break;
2652 }
2653 }
2654 pMemoryHostPointerProperties->memoryTypeBits = memoryTypeBits;
2655 return VK_SUCCESS;
2656 }
2657 default:
2658 return VK_ERROR_INVALID_EXTERNAL_HANDLE;
2659 }
2660 }
2661
2662 static enum radeon_ctx_priority
radv_get_queue_global_priority(const VkDeviceQueueGlobalPriorityCreateInfoEXT * pObj)2663 radv_get_queue_global_priority(const VkDeviceQueueGlobalPriorityCreateInfoEXT *pObj)
2664 {
2665 /* Default to MEDIUM when a specific global priority isn't requested */
2666 if (!pObj)
2667 return RADEON_CTX_PRIORITY_MEDIUM;
2668
2669 switch (pObj->globalPriority) {
2670 case VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT:
2671 return RADEON_CTX_PRIORITY_REALTIME;
2672 case VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT:
2673 return RADEON_CTX_PRIORITY_HIGH;
2674 case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT:
2675 return RADEON_CTX_PRIORITY_MEDIUM;
2676 case VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT:
2677 return RADEON_CTX_PRIORITY_LOW;
2678 default:
2679 unreachable("Illegal global priority value");
2680 return RADEON_CTX_PRIORITY_INVALID;
2681 }
2682 }
2683
2684 int
radv_queue_init(struct radv_device * device,struct radv_queue * queue,int idx,const VkDeviceQueueCreateInfo * create_info,const VkDeviceQueueGlobalPriorityCreateInfoEXT * global_priority)2685 radv_queue_init(struct radv_device *device, struct radv_queue *queue, int idx,
2686 const VkDeviceQueueCreateInfo *create_info,
2687 const VkDeviceQueueGlobalPriorityCreateInfoEXT *global_priority)
2688 {
2689 queue->device = device;
2690 queue->priority = radv_get_queue_global_priority(global_priority);
2691 queue->hw_ctx = device->hw_ctx[queue->priority];
2692 queue->qf = vk_queue_to_radv(device->physical_device, create_info->queueFamilyIndex);
2693
2694 VkResult result = vk_queue_init(&queue->vk, &device->vk, create_info, idx);
2695 if (result != VK_SUCCESS)
2696 return result;
2697
2698 queue->vk.driver_submit = radv_queue_submit;
2699
2700 return VK_SUCCESS;
2701 }
2702
2703 static void
radv_queue_finish(struct radv_queue * queue)2704 radv_queue_finish(struct radv_queue *queue)
2705 {
2706 if (queue->initial_full_flush_preamble_cs)
2707 queue->device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
2708 if (queue->initial_preamble_cs)
2709 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
2710 if (queue->continue_preamble_cs)
2711 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
2712 if (queue->descriptor_bo)
2713 queue->device->ws->buffer_destroy(queue->device->ws, queue->descriptor_bo);
2714 if (queue->scratch_bo)
2715 queue->device->ws->buffer_destroy(queue->device->ws, queue->scratch_bo);
2716 if (queue->esgs_ring_bo)
2717 queue->device->ws->buffer_destroy(queue->device->ws, queue->esgs_ring_bo);
2718 if (queue->gsvs_ring_bo)
2719 queue->device->ws->buffer_destroy(queue->device->ws, queue->gsvs_ring_bo);
2720 if (queue->tess_rings_bo)
2721 queue->device->ws->buffer_destroy(queue->device->ws, queue->tess_rings_bo);
2722 if (queue->gds_bo)
2723 queue->device->ws->buffer_destroy(queue->device->ws, queue->gds_bo);
2724 if (queue->gds_oa_bo)
2725 queue->device->ws->buffer_destroy(queue->device->ws, queue->gds_oa_bo);
2726 if (queue->compute_scratch_bo)
2727 queue->device->ws->buffer_destroy(queue->device->ws, queue->compute_scratch_bo);
2728
2729 vk_queue_finish(&queue->vk);
2730 }
2731
2732 static void
radv_device_init_gs_info(struct radv_device * device)2733 radv_device_init_gs_info(struct radv_device *device)
2734 {
2735 device->gs_table_depth = ac_get_gs_table_depth(device->physical_device->rad_info.chip_class,
2736 device->physical_device->rad_info.family);
2737 }
2738
2739 static VkResult
radv_device_init_border_color(struct radv_device * device)2740 radv_device_init_border_color(struct radv_device *device)
2741 {
2742 VkResult result;
2743
2744 result = device->ws->buffer_create(
2745 device->ws, RADV_BORDER_COLOR_BUFFER_SIZE, 4096, RADEON_DOMAIN_VRAM,
2746 RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_READ_ONLY | RADEON_FLAG_NO_INTERPROCESS_SHARING,
2747 RADV_BO_PRIORITY_SHADER, 0, &device->border_color_data.bo);
2748
2749 if (result != VK_SUCCESS)
2750 return vk_error(device, result);
2751
2752 result = device->ws->buffer_make_resident(device->ws, device->border_color_data.bo, true);
2753 if (result != VK_SUCCESS)
2754 return vk_error(device, result);
2755
2756 device->border_color_data.colors_gpu_ptr = device->ws->buffer_map(device->border_color_data.bo);
2757 if (!device->border_color_data.colors_gpu_ptr)
2758 return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2759 mtx_init(&device->border_color_data.mutex, mtx_plain);
2760
2761 return VK_SUCCESS;
2762 }
2763
2764 static void
radv_device_finish_border_color(struct radv_device * device)2765 radv_device_finish_border_color(struct radv_device *device)
2766 {
2767 if (device->border_color_data.bo) {
2768 device->ws->buffer_make_resident(device->ws, device->border_color_data.bo, false);
2769 device->ws->buffer_destroy(device->ws, device->border_color_data.bo);
2770
2771 mtx_destroy(&device->border_color_data.mutex);
2772 }
2773 }
2774
2775 static VkResult
radv_device_init_vs_prologs(struct radv_device * device)2776 radv_device_init_vs_prologs(struct radv_device *device)
2777 {
2778 u_rwlock_init(&device->vs_prologs_lock);
2779 device->vs_prologs = _mesa_hash_table_create(NULL, &radv_hash_vs_prolog, &radv_cmp_vs_prolog);
2780 if (!device->vs_prologs)
2781 return vk_error(device->physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2782
2783 /* don't pre-compile prologs if we want to print them */
2784 if (device->instance->debug_flags & RADV_DEBUG_DUMP_PROLOGS)
2785 return VK_SUCCESS;
2786
2787 struct radv_vs_input_state state;
2788 state.nontrivial_divisors = 0;
2789 memset(state.offsets, 0, sizeof(state.offsets));
2790 state.alpha_adjust_lo = 0;
2791 state.alpha_adjust_hi = 0;
2792 memset(state.formats, 0, sizeof(state.formats));
2793
2794 struct radv_vs_prolog_key key;
2795 key.state = &state;
2796 key.misaligned_mask = 0;
2797 key.as_ls = false;
2798 key.is_ngg = device->physical_device->use_ngg;
2799 key.next_stage = MESA_SHADER_VERTEX;
2800 key.wave32 = device->physical_device->ge_wave_size == 32;
2801
2802 for (unsigned i = 1; i <= MAX_VERTEX_ATTRIBS; i++) {
2803 state.attribute_mask = BITFIELD_MASK(i);
2804 state.instance_rate_inputs = 0;
2805
2806 key.num_attributes = i;
2807
2808 device->simple_vs_prologs[i - 1] = radv_create_vs_prolog(device, &key);
2809 if (!device->simple_vs_prologs[i - 1])
2810 return vk_error(device->physical_device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2811 }
2812
2813 unsigned idx = 0;
2814 for (unsigned num_attributes = 1; num_attributes <= 16; num_attributes++) {
2815 state.attribute_mask = BITFIELD_MASK(num_attributes);
2816
2817 for (unsigned i = 0; i < num_attributes; i++)
2818 state.divisors[i] = 1;
2819
2820 for (unsigned count = 1; count <= num_attributes; count++) {
2821 for (unsigned start = 0; start <= (num_attributes - count); start++) {
2822 state.instance_rate_inputs = u_bit_consecutive(start, count);
2823
2824 key.num_attributes = num_attributes;
2825
2826 struct radv_shader_prolog *prolog = radv_create_vs_prolog(device, &key);
2827 if (!prolog)
2828 return vk_error(device->physical_device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2829
2830 assert(idx ==
2831 radv_instance_rate_prolog_index(num_attributes, state.instance_rate_inputs));
2832 device->instance_rate_vs_prologs[idx++] = prolog;
2833 }
2834 }
2835 }
2836 assert(idx == ARRAY_SIZE(device->instance_rate_vs_prologs));
2837
2838 return VK_SUCCESS;
2839 }
2840
2841 static void
radv_device_finish_vs_prologs(struct radv_device * device)2842 radv_device_finish_vs_prologs(struct radv_device *device)
2843 {
2844 if (device->vs_prologs) {
2845 hash_table_foreach(device->vs_prologs, entry)
2846 {
2847 free((void *)entry->key);
2848 radv_prolog_destroy(device, entry->data);
2849 }
2850 _mesa_hash_table_destroy(device->vs_prologs, NULL);
2851 }
2852
2853 for (unsigned i = 0; i < ARRAY_SIZE(device->simple_vs_prologs); i++)
2854 radv_prolog_destroy(device, device->simple_vs_prologs[i]);
2855
2856 for (unsigned i = 0; i < ARRAY_SIZE(device->instance_rate_vs_prologs); i++)
2857 radv_prolog_destroy(device, device->instance_rate_vs_prologs[i]);
2858 }
2859
2860 VkResult
radv_device_init_vrs_state(struct radv_device * device)2861 radv_device_init_vrs_state(struct radv_device *device)
2862 {
2863 /* FIXME: 4k depth buffers should be large enough for now but we might want to adjust this
2864 * dynamically at some point.
2865 */
2866 uint32_t width = 4096, height = 4096;
2867 VkDeviceMemory mem;
2868 VkBuffer buffer;
2869 VkResult result;
2870 VkImage image;
2871
2872 VkImageCreateInfo image_create_info = {
2873 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
2874 .imageType = VK_IMAGE_TYPE_2D,
2875 .format = VK_FORMAT_D16_UNORM,
2876 .extent = {width, height, 1},
2877 .mipLevels = 1,
2878 .arrayLayers = 1,
2879 .samples = VK_SAMPLE_COUNT_1_BIT,
2880 .tiling = VK_IMAGE_TILING_OPTIMAL,
2881 .usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
2882 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
2883 .queueFamilyIndexCount = 0,
2884 .pQueueFamilyIndices = NULL,
2885 .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
2886 };
2887
2888 result = radv_CreateImage(radv_device_to_handle(device), &image_create_info,
2889 &device->meta_state.alloc, &image);
2890 if (result != VK_SUCCESS)
2891 return result;
2892
2893 VkBufferCreateInfo buffer_create_info = {
2894 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
2895 .size = radv_image_from_handle(image)->planes[0].surface.meta_size,
2896 .usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT,
2897 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
2898 };
2899
2900 result = radv_CreateBuffer(radv_device_to_handle(device), &buffer_create_info,
2901 &device->meta_state.alloc, &buffer);
2902 if (result != VK_SUCCESS)
2903 goto fail_create;
2904
2905 VkBufferMemoryRequirementsInfo2 info = {
2906 .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2,
2907 .buffer = buffer,
2908 };
2909 VkMemoryRequirements2 mem_req = {
2910 .sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
2911 };
2912 radv_GetBufferMemoryRequirements2(radv_device_to_handle(device), &info, &mem_req);
2913
2914 VkMemoryAllocateInfo alloc_info = {
2915 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
2916 .allocationSize = mem_req.memoryRequirements.size,
2917 };
2918
2919 result = radv_AllocateMemory(radv_device_to_handle(device), &alloc_info,
2920 &device->meta_state.alloc, &mem);
2921 if (result != VK_SUCCESS)
2922 goto fail_alloc;
2923
2924 VkBindBufferMemoryInfo bind_info = {
2925 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
2926 .buffer = buffer,
2927 .memory = mem,
2928 .memoryOffset = 0
2929 };
2930
2931 result = radv_BindBufferMemory2(radv_device_to_handle(device), 1, &bind_info);
2932 if (result != VK_SUCCESS)
2933 goto fail_bind;
2934
2935 device->vrs.image = radv_image_from_handle(image);
2936 device->vrs.buffer = radv_buffer_from_handle(buffer);
2937 device->vrs.mem = radv_device_memory_from_handle(mem);
2938
2939 return VK_SUCCESS;
2940
2941 fail_bind:
2942 radv_FreeMemory(radv_device_to_handle(device), mem, &device->meta_state.alloc);
2943 fail_alloc:
2944 radv_DestroyBuffer(radv_device_to_handle(device), buffer, &device->meta_state.alloc);
2945 fail_create:
2946 radv_DestroyImage(radv_device_to_handle(device), image, &device->meta_state.alloc);
2947
2948 return result;
2949 }
2950
2951 static void
radv_device_finish_vrs_image(struct radv_device * device)2952 radv_device_finish_vrs_image(struct radv_device *device)
2953 {
2954 if (!device->vrs.image)
2955 return;
2956
2957 radv_FreeMemory(radv_device_to_handle(device), radv_device_memory_to_handle(device->vrs.mem),
2958 &device->meta_state.alloc);
2959 radv_DestroyBuffer(radv_device_to_handle(device), radv_buffer_to_handle(device->vrs.buffer),
2960 &device->meta_state.alloc);
2961 radv_DestroyImage(radv_device_to_handle(device), radv_image_to_handle(device->vrs.image),
2962 &device->meta_state.alloc);
2963 }
2964
2965 static VkResult
radv_create_sync_for_memory(struct vk_device * device,VkDeviceMemory memory,bool signal_memory,struct vk_sync ** sync_out)2966 radv_create_sync_for_memory(struct vk_device *device,
2967 VkDeviceMemory memory,
2968 bool signal_memory,
2969 struct vk_sync **sync_out)
2970 {
2971 return vk_sync_create(device, &vk_sync_dummy_type, 0, 1, sync_out);
2972 }
2973
2974 static enum radv_force_vrs
radv_parse_vrs_rates(const char * str)2975 radv_parse_vrs_rates(const char *str)
2976 {
2977 if (!strcmp(str, "2x2")) {
2978 return RADV_FORCE_VRS_2x2;
2979 } else if (!strcmp(str, "2x1")) {
2980 return RADV_FORCE_VRS_2x1;
2981 } else if (!strcmp(str, "1x2")) {
2982 return RADV_FORCE_VRS_1x2;
2983 } else if (!strcmp(str, "1x1")) {
2984 return RADV_FORCE_VRS_1x1;
2985 }
2986
2987 fprintf(stderr, "radv: Invalid VRS rates specified (valid values are 2x2, 2x1, 1x2 and 1x1)\n");
2988 return RADV_FORCE_VRS_1x1;
2989 }
2990
2991 static const char *
radv_get_force_vrs_config_file(void)2992 radv_get_force_vrs_config_file(void)
2993 {
2994 return getenv("RADV_FORCE_VRS_CONFIG_FILE");
2995 }
2996
2997 static enum radv_force_vrs
radv_parse_force_vrs_config_file(const char * config_file)2998 radv_parse_force_vrs_config_file(const char *config_file)
2999 {
3000 enum radv_force_vrs force_vrs = RADV_FORCE_VRS_1x1;
3001 char buf[4];
3002 FILE *f;
3003
3004 f = fopen(config_file, "r");
3005 if (!f) {
3006 fprintf(stderr, "radv: Can't open file: '%s'.\n", config_file);
3007 return force_vrs;
3008 }
3009
3010 if (fread(buf, sizeof(buf), 1, f) == 1) {
3011 buf[3] = '\0';
3012 force_vrs = radv_parse_vrs_rates(buf);
3013 }
3014
3015 fclose(f);
3016 return force_vrs;
3017 }
3018
3019 #ifdef __linux__
3020
3021 #define BUF_LEN ((10 * (sizeof(struct inotify_event) + NAME_MAX + 1)))
3022
3023 static int
radv_notifier_thread_run(void * data)3024 radv_notifier_thread_run(void *data)
3025 {
3026 struct radv_device *device = data;
3027 struct radv_notifier *notifier = &device->notifier;
3028 char buf[BUF_LEN];
3029
3030 while (!notifier->quit) {
3031 const char *file = radv_get_force_vrs_config_file();
3032 struct timespec tm = { .tv_nsec = 100000000 }; /* 1OOms */
3033 int length, i = 0;
3034
3035 length = read(notifier->fd, buf, BUF_LEN);
3036 while (i < length) {
3037 struct inotify_event *event = (struct inotify_event *)&buf[i];
3038
3039 i += sizeof(struct inotify_event) + event->len;
3040 if (event->mask & IN_MODIFY || event->mask & IN_DELETE_SELF) {
3041 /* Sleep 100ms for editors that use a temporary file and delete the original. */
3042 thrd_sleep(&tm, NULL);
3043 device->force_vrs = radv_parse_force_vrs_config_file(file);
3044
3045 fprintf(stderr, "radv: Updated the per-vertex VRS rate to '%d'.\n", device->force_vrs);
3046
3047 if (event->mask & IN_DELETE_SELF) {
3048 inotify_rm_watch(notifier->fd, notifier->watch);
3049 notifier->watch = inotify_add_watch(notifier->fd, file, IN_MODIFY | IN_DELETE_SELF);
3050 }
3051 }
3052 }
3053
3054 thrd_sleep(&tm, NULL);
3055 }
3056
3057 return 0;
3058 }
3059
3060 #endif
3061
3062 static int
radv_device_init_notifier(struct radv_device * device)3063 radv_device_init_notifier(struct radv_device *device)
3064 {
3065 #ifndef __linux__
3066 return true;
3067 #else
3068 struct radv_notifier *notifier = &device->notifier;
3069 const char *file = radv_get_force_vrs_config_file();
3070 int ret;
3071
3072 notifier->fd = inotify_init1(IN_NONBLOCK);
3073 if (notifier->fd < 0)
3074 return false;
3075
3076 notifier->watch = inotify_add_watch(notifier->fd, file, IN_MODIFY | IN_DELETE_SELF);
3077 if (notifier->watch < 0)
3078 goto fail_watch;
3079
3080 ret = thrd_create(¬ifier->thread, radv_notifier_thread_run, device);
3081 if (ret)
3082 goto fail_thread;
3083
3084 return true;
3085
3086 fail_thread:
3087 inotify_rm_watch(notifier->fd, notifier->watch);
3088 fail_watch:
3089 close(notifier->fd);
3090
3091 return false;
3092 #endif
3093 }
3094
3095 static void
radv_device_finish_notifier(struct radv_device * device)3096 radv_device_finish_notifier(struct radv_device *device)
3097 {
3098 #ifdef __linux__
3099 struct radv_notifier *notifier = &device->notifier;
3100
3101 if (!notifier->thread)
3102 return;
3103
3104 notifier->quit = true;
3105 thrd_join(notifier->thread, NULL);
3106 inotify_rm_watch(notifier->fd, notifier->watch);
3107 close(notifier->fd);
3108 #endif
3109 }
3110
3111 VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateDevice(VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice)3112 radv_CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo,
3113 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice)
3114 {
3115 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
3116 VkResult result;
3117 struct radv_device *device;
3118
3119 bool keep_shader_info = false;
3120 bool robust_buffer_access = false;
3121 bool robust_buffer_access2 = false;
3122 bool overallocation_disallowed = false;
3123 bool custom_border_colors = false;
3124 bool attachment_vrs_enabled = false;
3125 bool image_float32_atomics = false;
3126 bool vs_prologs = false;
3127
3128 /* Check enabled features */
3129 if (pCreateInfo->pEnabledFeatures) {
3130 if (pCreateInfo->pEnabledFeatures->robustBufferAccess)
3131 robust_buffer_access = true;
3132 }
3133
3134 vk_foreach_struct_const(ext, pCreateInfo->pNext)
3135 {
3136 switch (ext->sType) {
3137 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2: {
3138 const VkPhysicalDeviceFeatures2 *features = (const void *)ext;
3139 if (features->features.robustBufferAccess)
3140 robust_buffer_access = true;
3141 break;
3142 }
3143 case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD: {
3144 const VkDeviceMemoryOverallocationCreateInfoAMD *overallocation = (const void *)ext;
3145 if (overallocation->overallocationBehavior ==
3146 VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD)
3147 overallocation_disallowed = true;
3148 break;
3149 }
3150 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT: {
3151 const VkPhysicalDeviceCustomBorderColorFeaturesEXT *border_color_features =
3152 (const void *)ext;
3153 custom_border_colors = border_color_features->customBorderColors;
3154 break;
3155 }
3156 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR: {
3157 const VkPhysicalDeviceFragmentShadingRateFeaturesKHR *vrs = (const void *)ext;
3158 attachment_vrs_enabled = vrs->attachmentFragmentShadingRate;
3159 break;
3160 }
3161 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT: {
3162 const VkPhysicalDeviceRobustness2FeaturesEXT *features = (const void *)ext;
3163 if (features->robustBufferAccess2)
3164 robust_buffer_access2 = true;
3165 break;
3166 }
3167 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT: {
3168 const VkPhysicalDeviceShaderAtomicFloatFeaturesEXT *features = (const void *)ext;
3169 if (features->shaderImageFloat32Atomics ||
3170 features->sparseImageFloat32Atomics)
3171 image_float32_atomics = true;
3172 break;
3173 }
3174 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_2_FEATURES_EXT: {
3175 const VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT *features = (const void *)ext;
3176 if (features->shaderImageFloat32AtomicMinMax ||
3177 features->sparseImageFloat32AtomicMinMax)
3178 image_float32_atomics = true;
3179 break;
3180 }
3181 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_INPUT_DYNAMIC_STATE_FEATURES_EXT: {
3182 const VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT *features = (const void *)ext;
3183 if (features->vertexInputDynamicState)
3184 vs_prologs = true;
3185 break;
3186 }
3187 default:
3188 break;
3189 }
3190 }
3191
3192 device = vk_zalloc2(&physical_device->instance->vk.alloc, pAllocator, sizeof(*device), 8,
3193 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
3194 if (!device)
3195 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
3196
3197 struct vk_device_dispatch_table dispatch_table;
3198
3199 if (physical_device->instance->vk.app_info.app_name &&
3200 !strcmp(physical_device->instance->vk.app_info.app_name, "metroexodus")) {
3201 /* Metro Exodus (Linux native) calls vkGetSemaphoreCounterValue() with a NULL semaphore and it
3202 * crashes sometimes. Workaround this game bug by enabling an internal layer. Remove this
3203 * when the game is fixed.
3204 */
3205 vk_device_dispatch_table_from_entrypoints(&dispatch_table, &metro_exodus_device_entrypoints, true);
3206 vk_device_dispatch_table_from_entrypoints(&dispatch_table, &radv_device_entrypoints, false);
3207 } else if (radv_thread_trace_enabled()) {
3208 vk_device_dispatch_table_from_entrypoints(&dispatch_table, &sqtt_device_entrypoints, true);
3209 vk_device_dispatch_table_from_entrypoints(&dispatch_table, &radv_device_entrypoints, false);
3210 } else {
3211 vk_device_dispatch_table_from_entrypoints(&dispatch_table, &radv_device_entrypoints, true);
3212 }
3213 vk_device_dispatch_table_from_entrypoints(&dispatch_table, &wsi_device_entrypoints, false);
3214
3215 result =
3216 vk_device_init(&device->vk, &physical_device->vk, &dispatch_table, pCreateInfo, pAllocator);
3217 if (result != VK_SUCCESS) {
3218 vk_free(&device->vk.alloc, device);
3219 return result;
3220 }
3221
3222 device->instance = physical_device->instance;
3223 device->physical_device = physical_device;
3224 simple_mtx_init(&device->trace_mtx, mtx_plain);
3225
3226 device->ws = physical_device->ws;
3227 device->vk.create_sync_for_memory = radv_create_sync_for_memory;
3228 vk_device_set_drm_fd(&device->vk, device->ws->get_fd(device->ws));
3229
3230 /* With update after bind we can't attach bo's to the command buffer
3231 * from the descriptor set anymore, so we have to use a global BO list.
3232 */
3233 device->use_global_bo_list = (device->instance->perftest_flags & RADV_PERFTEST_BO_LIST) ||
3234 device->vk.enabled_extensions.EXT_descriptor_indexing ||
3235 device->vk.enabled_extensions.EXT_buffer_device_address ||
3236 device->vk.enabled_extensions.KHR_buffer_device_address ||
3237 device->vk.enabled_extensions.KHR_ray_tracing_pipeline ||
3238 device->vk.enabled_extensions.KHR_acceleration_structure ||
3239 device->vk.enabled_extensions.VALVE_descriptor_set_host_mapping;
3240
3241 device->robust_buffer_access = robust_buffer_access || robust_buffer_access2;
3242 device->robust_buffer_access2 = robust_buffer_access2;
3243
3244 device->attachment_vrs_enabled = attachment_vrs_enabled;
3245
3246 device->image_float32_atomics = image_float32_atomics;
3247
3248 radv_init_shader_arenas(device);
3249
3250 device->overallocation_disallowed = overallocation_disallowed;
3251 mtx_init(&device->overallocation_mutex, mtx_plain);
3252
3253 /* Create one context per queue priority. */
3254 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
3255 const VkDeviceQueueCreateInfo *queue_create = &pCreateInfo->pQueueCreateInfos[i];
3256 const VkDeviceQueueGlobalPriorityCreateInfoEXT *global_priority =
3257 vk_find_struct_const(queue_create->pNext, DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT);
3258 enum radeon_ctx_priority priority = radv_get_queue_global_priority(global_priority);
3259
3260 if (device->hw_ctx[priority])
3261 continue;
3262
3263 result = device->ws->ctx_create(device->ws, priority, &device->hw_ctx[priority]);
3264 if (result != VK_SUCCESS)
3265 goto fail;
3266 }
3267
3268 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
3269 const VkDeviceQueueCreateInfo *queue_create = &pCreateInfo->pQueueCreateInfos[i];
3270 uint32_t qfi = queue_create->queueFamilyIndex;
3271 const VkDeviceQueueGlobalPriorityCreateInfoEXT *global_priority =
3272 vk_find_struct_const(queue_create->pNext, DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT);
3273
3274 device->queues[qfi] =
3275 vk_alloc(&device->vk.alloc, queue_create->queueCount * sizeof(struct radv_queue), 8,
3276 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
3277 if (!device->queues[qfi]) {
3278 result = VK_ERROR_OUT_OF_HOST_MEMORY;
3279 goto fail;
3280 }
3281
3282 memset(device->queues[qfi], 0, queue_create->queueCount * sizeof(struct radv_queue));
3283
3284 device->queue_count[qfi] = queue_create->queueCount;
3285
3286 for (unsigned q = 0; q < queue_create->queueCount; q++) {
3287 result = radv_queue_init(device, &device->queues[qfi][q], q, queue_create, global_priority);
3288 if (result != VK_SUCCESS)
3289 goto fail;
3290 }
3291 }
3292 device->private_sdma_queue = VK_NULL_HANDLE;
3293
3294 device->pbb_allowed = device->physical_device->rad_info.chip_class >= GFX9 &&
3295 !(device->instance->debug_flags & RADV_DEBUG_NOBINNING);
3296
3297 /* The maximum number of scratch waves. Scratch space isn't divided
3298 * evenly between CUs. The number is only a function of the number of CUs.
3299 * We can decrease the constant to decrease the scratch buffer size.
3300 *
3301 * sctx->scratch_waves must be >= the maximum possible size of
3302 * 1 threadgroup, so that the hw doesn't hang from being unable
3303 * to start any.
3304 *
3305 * The recommended value is 4 per CU at most. Higher numbers don't
3306 * bring much benefit, but they still occupy chip resources (think
3307 * async compute). I've seen ~2% performance difference between 4 and 32.
3308 */
3309 uint32_t max_threads_per_block = 2048;
3310 device->scratch_waves =
3311 MAX2(32 * physical_device->rad_info.num_good_compute_units, max_threads_per_block / 64);
3312
3313 device->dispatch_initiator = S_00B800_COMPUTE_SHADER_EN(1);
3314
3315 if (device->physical_device->rad_info.chip_class >= GFX7) {
3316 /* If the KMD allows it (there is a KMD hw register for it),
3317 * allow launching waves out-of-order.
3318 */
3319 device->dispatch_initiator |= S_00B800_ORDER_MODE(1);
3320 }
3321
3322 radv_device_init_gs_info(device);
3323
3324 device->tess_offchip_block_dw_size =
3325 device->physical_device->rad_info.family == CHIP_HAWAII ? 4096 : 8192;
3326
3327 if (device->instance->debug_flags & RADV_DEBUG_HANG) {
3328 /* Enable GPU hangs detection and dump logs if a GPU hang is
3329 * detected.
3330 */
3331 keep_shader_info = true;
3332
3333 if (!radv_init_trace(device))
3334 goto fail;
3335
3336 fprintf(stderr,
3337 "*****************************************************************************\n");
3338 fprintf(stderr,
3339 "* WARNING: RADV_DEBUG=hang is costly and should only be used for debugging! *\n");
3340 fprintf(stderr,
3341 "*****************************************************************************\n");
3342
3343 /* Wait for idle after every draw/dispatch to identify the
3344 * first bad call.
3345 */
3346 device->instance->debug_flags |= RADV_DEBUG_SYNC_SHADERS;
3347
3348 radv_dump_enabled_options(device, stderr);
3349 }
3350
3351 if (radv_thread_trace_enabled()) {
3352 if (device->physical_device->rad_info.chip_class < GFX8 ||
3353 device->physical_device->rad_info.chip_class > GFX10_3) {
3354 fprintf(stderr, "GPU hardware not supported: refer to "
3355 "the RGP documentation for the list of "
3356 "supported GPUs!\n");
3357 abort();
3358 }
3359
3360 if (!radv_thread_trace_init(device))
3361 goto fail;
3362
3363 fprintf(stderr, "radv: Thread trace support is enabled (initial buffer size: %u MiB, "
3364 "instruction timing: %s, cache counters: %s).\n",
3365 device->thread_trace.buffer_size / (1024 * 1024),
3366 radv_is_instruction_timing_enabled() ? "enabled" : "disabled",
3367 radv_spm_trace_enabled() ? "enabled" : "disabled");
3368
3369 if (radv_spm_trace_enabled()) {
3370 if (device->physical_device->rad_info.chip_class < GFX10) {
3371 fprintf(stderr, "SPM isn't supported for this GPU!\n");
3372 abort();
3373 }
3374
3375 if (!radv_spm_init(device))
3376 goto fail;
3377 }
3378 }
3379
3380 if (getenv("RADV_TRAP_HANDLER")) {
3381 /* TODO: Add support for more hardware. */
3382 assert(device->physical_device->rad_info.chip_class == GFX8);
3383
3384 fprintf(stderr, "**********************************************************************\n");
3385 fprintf(stderr, "* WARNING: RADV_TRAP_HANDLER is experimental and only for debugging! *\n");
3386 fprintf(stderr, "**********************************************************************\n");
3387
3388 /* To get the disassembly of the faulty shaders, we have to
3389 * keep some shader info around.
3390 */
3391 keep_shader_info = true;
3392
3393 if (!radv_trap_handler_init(device))
3394 goto fail;
3395 }
3396
3397 if (device->physical_device->rad_info.chip_class >= GFX10_3) {
3398 if (getenv("RADV_FORCE_VRS_CONFIG_FILE")) {
3399 const char *file = radv_get_force_vrs_config_file();
3400
3401 device->force_vrs = radv_parse_force_vrs_config_file(file);
3402
3403 if (radv_device_init_notifier(device)) {
3404 device->force_vrs_enabled = true;
3405 } else {
3406 fprintf(stderr, "radv: Failed to initialize the notifier for RADV_FORCE_VRS_CONFIG_FILE!\n");
3407 }
3408 } else if (getenv("RADV_FORCE_VRS")) {
3409 const char *vrs_rates = getenv("RADV_FORCE_VRS");
3410
3411 device->force_vrs = radv_parse_vrs_rates(vrs_rates);
3412 device->force_vrs_enabled = device->force_vrs != RADV_FORCE_VRS_1x1;
3413 }
3414 }
3415
3416 device->adjust_frag_coord_z =
3417 (device->vk.enabled_extensions.KHR_fragment_shading_rate || device->force_vrs_enabled) &&
3418 (device->physical_device->rad_info.family == CHIP_SIENNA_CICHLID ||
3419 device->physical_device->rad_info.family == CHIP_NAVY_FLOUNDER ||
3420 device->physical_device->rad_info.family == CHIP_VANGOGH);
3421
3422 /* PKT3_LOAD_SH_REG_INDEX is supported on GFX8+, but it hangs with compute queues until GFX10.3. */
3423 device->load_grid_size_from_user_sgpr = device->physical_device->rad_info.chip_class >= GFX10_3;
3424
3425 device->keep_shader_info = keep_shader_info;
3426 result = radv_device_init_meta(device);
3427 if (result != VK_SUCCESS)
3428 goto fail;
3429
3430 radv_device_init_msaa(device);
3431
3432 /* If the border color extension is enabled, let's create the buffer we need. */
3433 if (custom_border_colors) {
3434 result = radv_device_init_border_color(device);
3435 if (result != VK_SUCCESS)
3436 goto fail;
3437 }
3438
3439 if (vs_prologs) {
3440 result = radv_device_init_vs_prologs(device);
3441 if (result != VK_SUCCESS)
3442 goto fail;
3443 }
3444
3445 if (device->physical_device->rad_info.chip_class >= GFX7)
3446 cik_create_gfx_config(device);
3447
3448 VkPipelineCacheCreateInfo ci;
3449 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
3450 ci.pNext = NULL;
3451 ci.flags = 0;
3452 ci.pInitialData = NULL;
3453 ci.initialDataSize = 0;
3454 VkPipelineCache pc;
3455 result = radv_CreatePipelineCache(radv_device_to_handle(device), &ci, NULL, &pc);
3456 if (result != VK_SUCCESS)
3457 goto fail_meta;
3458
3459 device->mem_cache = radv_pipeline_cache_from_handle(pc);
3460
3461 device->force_aniso = MIN2(16, radv_get_int_debug_option("RADV_TEX_ANISO", -1));
3462 if (device->force_aniso >= 0) {
3463 fprintf(stderr, "radv: Forcing anisotropy filter to %ix\n",
3464 1 << util_logbase2(device->force_aniso));
3465 }
3466
3467 *pDevice = radv_device_to_handle(device);
3468 return VK_SUCCESS;
3469
3470 fail_meta:
3471 radv_device_finish_meta(device);
3472 fail:
3473 radv_thread_trace_finish(device);
3474
3475 radv_spm_finish(device);
3476
3477 radv_trap_handler_finish(device);
3478 radv_finish_trace(device);
3479
3480 if (device->gfx_init)
3481 device->ws->buffer_destroy(device->ws, device->gfx_init);
3482
3483 radv_device_finish_notifier(device);
3484 radv_device_finish_vs_prologs(device);
3485 radv_device_finish_border_color(device);
3486
3487 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
3488 for (unsigned q = 0; q < device->queue_count[i]; q++)
3489 radv_queue_finish(&device->queues[i][q]);
3490 if (device->queue_count[i])
3491 vk_free(&device->vk.alloc, device->queues[i]);
3492 }
3493
3494 for (unsigned i = 0; i < RADV_NUM_HW_CTX; i++) {
3495 if (device->hw_ctx[i])
3496 device->ws->ctx_destroy(device->hw_ctx[i]);
3497 }
3498
3499 simple_mtx_destroy(&device->trace_mtx);
3500 mtx_destroy(&device->overallocation_mutex);
3501
3502 vk_device_finish(&device->vk);
3503 vk_free(&device->vk.alloc, device);
3504 return result;
3505 }
3506
3507 VKAPI_ATTR void VKAPI_CALL
radv_DestroyDevice(VkDevice _device,const VkAllocationCallbacks * pAllocator)3508 radv_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
3509 {
3510 RADV_FROM_HANDLE(radv_device, device, _device);
3511
3512 if (!device)
3513 return;
3514
3515 if (device->gfx_init)
3516 device->ws->buffer_destroy(device->ws, device->gfx_init);
3517
3518 radv_device_finish_notifier(device);
3519 radv_device_finish_vs_prologs(device);
3520 radv_device_finish_border_color(device);
3521 radv_device_finish_vrs_image(device);
3522
3523 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
3524 for (unsigned q = 0; q < device->queue_count[i]; q++)
3525 radv_queue_finish(&device->queues[i][q]);
3526 if (device->queue_count[i])
3527 vk_free(&device->vk.alloc, device->queues[i]);
3528 }
3529 if (device->private_sdma_queue != VK_NULL_HANDLE) {
3530 radv_queue_finish(device->private_sdma_queue);
3531 vk_free(&device->vk.alloc, device->private_sdma_queue);
3532 }
3533
3534 for (unsigned i = 0; i < RADV_NUM_HW_CTX; i++) {
3535 if (device->hw_ctx[i])
3536 device->ws->ctx_destroy(device->hw_ctx[i]);
3537 }
3538
3539 mtx_destroy(&device->overallocation_mutex);
3540 simple_mtx_destroy(&device->trace_mtx);
3541
3542 radv_device_finish_meta(device);
3543
3544 VkPipelineCache pc = radv_pipeline_cache_to_handle(device->mem_cache);
3545 radv_DestroyPipelineCache(radv_device_to_handle(device), pc, NULL);
3546
3547 radv_trap_handler_finish(device);
3548 radv_finish_trace(device);
3549
3550 radv_destroy_shader_arenas(device);
3551
3552 radv_thread_trace_finish(device);
3553
3554 radv_spm_finish(device);
3555
3556 vk_device_finish(&device->vk);
3557 vk_free(&device->vk.alloc, device);
3558 }
3559
3560 VKAPI_ATTR VkResult VKAPI_CALL
radv_EnumerateInstanceLayerProperties(uint32_t * pPropertyCount,VkLayerProperties * pProperties)3561 radv_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount, VkLayerProperties *pProperties)
3562 {
3563 if (pProperties == NULL) {
3564 *pPropertyCount = 0;
3565 return VK_SUCCESS;
3566 }
3567
3568 /* None supported at this time */
3569 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
3570 }
3571
3572 VKAPI_ATTR VkResult VKAPI_CALL
radv_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,uint32_t * pPropertyCount,VkLayerProperties * pProperties)3573 radv_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
3574 VkLayerProperties *pProperties)
3575 {
3576 if (pProperties == NULL) {
3577 *pPropertyCount = 0;
3578 return VK_SUCCESS;
3579 }
3580
3581 /* None supported at this time */
3582 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
3583 }
3584
3585 static void
fill_geom_tess_rings(struct radv_queue * queue,uint32_t * map,bool add_sample_positions,uint32_t esgs_ring_size,struct radeon_winsys_bo * esgs_ring_bo,uint32_t gsvs_ring_size,struct radeon_winsys_bo * gsvs_ring_bo,uint32_t tess_factor_ring_size,uint32_t tess_offchip_ring_offset,uint32_t tess_offchip_ring_size,struct radeon_winsys_bo * tess_rings_bo)3586 fill_geom_tess_rings(struct radv_queue *queue, uint32_t *map, bool add_sample_positions,
3587 uint32_t esgs_ring_size, struct radeon_winsys_bo *esgs_ring_bo,
3588 uint32_t gsvs_ring_size, struct radeon_winsys_bo *gsvs_ring_bo,
3589 uint32_t tess_factor_ring_size, uint32_t tess_offchip_ring_offset,
3590 uint32_t tess_offchip_ring_size, struct radeon_winsys_bo *tess_rings_bo)
3591 {
3592 uint32_t *desc = &map[4];
3593
3594 if (esgs_ring_bo) {
3595 uint64_t esgs_va = radv_buffer_get_va(esgs_ring_bo);
3596
3597 /* stride 0, num records - size, add tid, swizzle, elsize4,
3598 index stride 64 */
3599 desc[0] = esgs_va;
3600 desc[1] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32) | S_008F04_SWIZZLE_ENABLE(true);
3601 desc[2] = esgs_ring_size;
3602 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
3603 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
3604 S_008F0C_INDEX_STRIDE(3) | S_008F0C_ADD_TID_ENABLE(1);
3605
3606 if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
3607 desc[3] |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
3608 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) | S_008F0C_RESOURCE_LEVEL(1);
3609 } else {
3610 desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
3611 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) | S_008F0C_ELEMENT_SIZE(1);
3612 }
3613
3614 /* GS entry for ES->GS ring */
3615 /* stride 0, num records - size, elsize0,
3616 index stride 0 */
3617 desc[4] = esgs_va;
3618 desc[5] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32);
3619 desc[6] = esgs_ring_size;
3620 desc[7] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
3621 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
3622
3623 if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
3624 desc[7] |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
3625 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) | S_008F0C_RESOURCE_LEVEL(1);
3626 } else {
3627 desc[7] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
3628 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
3629 }
3630 }
3631
3632 desc += 8;
3633
3634 if (gsvs_ring_bo) {
3635 uint64_t gsvs_va = radv_buffer_get_va(gsvs_ring_bo);
3636
3637 /* VS entry for GS->VS ring */
3638 /* stride 0, num records - size, elsize0,
3639 index stride 0 */
3640 desc[0] = gsvs_va;
3641 desc[1] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32);
3642 desc[2] = gsvs_ring_size;
3643 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
3644 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
3645
3646 if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
3647 desc[3] |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
3648 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) | S_008F0C_RESOURCE_LEVEL(1);
3649 } else {
3650 desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
3651 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
3652 }
3653
3654 /* stride gsvs_itemsize, num records 64
3655 elsize 4, index stride 16 */
3656 /* shader will patch stride and desc[2] */
3657 desc[4] = gsvs_va;
3658 desc[5] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32) | S_008F04_SWIZZLE_ENABLE(1);
3659 desc[6] = 0;
3660 desc[7] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
3661 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
3662 S_008F0C_INDEX_STRIDE(1) | S_008F0C_ADD_TID_ENABLE(true);
3663
3664 if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
3665 desc[7] |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
3666 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) | S_008F0C_RESOURCE_LEVEL(1);
3667 } else {
3668 desc[7] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
3669 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) | S_008F0C_ELEMENT_SIZE(1);
3670 }
3671 }
3672
3673 desc += 8;
3674
3675 if (tess_rings_bo) {
3676 uint64_t tess_va = radv_buffer_get_va(tess_rings_bo);
3677 uint64_t tess_offchip_va = tess_va + tess_offchip_ring_offset;
3678
3679 desc[0] = tess_va;
3680 desc[1] = S_008F04_BASE_ADDRESS_HI(tess_va >> 32);
3681 desc[2] = tess_factor_ring_size;
3682 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
3683 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
3684
3685 if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
3686 desc[3] |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
3687 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) | S_008F0C_RESOURCE_LEVEL(1);
3688 } else {
3689 desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
3690 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
3691 }
3692
3693 desc[4] = tess_offchip_va;
3694 desc[5] = S_008F04_BASE_ADDRESS_HI(tess_offchip_va >> 32);
3695 desc[6] = tess_offchip_ring_size;
3696 desc[7] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
3697 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
3698
3699 if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
3700 desc[7] |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
3701 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) | S_008F0C_RESOURCE_LEVEL(1);
3702 } else {
3703 desc[7] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
3704 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
3705 }
3706 }
3707
3708 desc += 8;
3709
3710 if (add_sample_positions) {
3711 /* add sample positions after all rings */
3712 memcpy(desc, queue->device->sample_locations_1x, 8);
3713 desc += 2;
3714 memcpy(desc, queue->device->sample_locations_2x, 16);
3715 desc += 4;
3716 memcpy(desc, queue->device->sample_locations_4x, 32);
3717 desc += 8;
3718 memcpy(desc, queue->device->sample_locations_8x, 64);
3719 }
3720 }
3721
3722 static unsigned
radv_get_hs_offchip_param(struct radv_device * device,uint32_t * max_offchip_buffers_p)3723 radv_get_hs_offchip_param(struct radv_device *device, uint32_t *max_offchip_buffers_p)
3724 {
3725 bool double_offchip_buffers = device->physical_device->rad_info.chip_class >= GFX7 &&
3726 device->physical_device->rad_info.family != CHIP_CARRIZO &&
3727 device->physical_device->rad_info.family != CHIP_STONEY;
3728 unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64;
3729 unsigned max_offchip_buffers;
3730 unsigned offchip_granularity;
3731 unsigned hs_offchip_param;
3732
3733 /*
3734 * Per RadeonSI:
3735 * This must be one less than the maximum number due to a hw limitation.
3736 * Various hardware bugs need thGFX7
3737 *
3738 * Per AMDVLK:
3739 * Vega10 should limit max_offchip_buffers to 508 (4 * 127).
3740 * Gfx7 should limit max_offchip_buffers to 508
3741 * Gfx6 should limit max_offchip_buffers to 126 (2 * 63)
3742 *
3743 * Follow AMDVLK here.
3744 */
3745 if (device->physical_device->rad_info.chip_class >= GFX10) {
3746 max_offchip_buffers_per_se = 128;
3747 } else if (device->physical_device->rad_info.family == CHIP_VEGA10 ||
3748 device->physical_device->rad_info.chip_class == GFX7 ||
3749 device->physical_device->rad_info.chip_class == GFX6)
3750 --max_offchip_buffers_per_se;
3751
3752 max_offchip_buffers = max_offchip_buffers_per_se * device->physical_device->rad_info.max_se;
3753
3754 /* Hawaii has a bug with offchip buffers > 256 that can be worked
3755 * around by setting 4K granularity.
3756 */
3757 if (device->tess_offchip_block_dw_size == 4096) {
3758 assert(device->physical_device->rad_info.family == CHIP_HAWAII);
3759 offchip_granularity = V_03093C_X_4K_DWORDS;
3760 } else {
3761 assert(device->tess_offchip_block_dw_size == 8192);
3762 offchip_granularity = V_03093C_X_8K_DWORDS;
3763 }
3764
3765 switch (device->physical_device->rad_info.chip_class) {
3766 case GFX6:
3767 max_offchip_buffers = MIN2(max_offchip_buffers, 126);
3768 break;
3769 case GFX7:
3770 case GFX8:
3771 case GFX9:
3772 max_offchip_buffers = MIN2(max_offchip_buffers, 508);
3773 break;
3774 case GFX10:
3775 break;
3776 default:
3777 break;
3778 }
3779
3780 *max_offchip_buffers_p = max_offchip_buffers;
3781 if (device->physical_device->rad_info.chip_class >= GFX10_3) {
3782 hs_offchip_param = S_03093C_OFFCHIP_BUFFERING_GFX103(max_offchip_buffers - 1) |
3783 S_03093C_OFFCHIP_GRANULARITY_GFX103(offchip_granularity);
3784 } else if (device->physical_device->rad_info.chip_class >= GFX7) {
3785 if (device->physical_device->rad_info.chip_class >= GFX8)
3786 --max_offchip_buffers;
3787 hs_offchip_param = S_03093C_OFFCHIP_BUFFERING_GFX7(max_offchip_buffers) |
3788 S_03093C_OFFCHIP_GRANULARITY_GFX7(offchip_granularity);
3789 } else {
3790 hs_offchip_param = S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers);
3791 }
3792 return hs_offchip_param;
3793 }
3794
3795 static void
radv_emit_gs_ring_sizes(struct radv_queue * queue,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * esgs_ring_bo,uint32_t esgs_ring_size,struct radeon_winsys_bo * gsvs_ring_bo,uint32_t gsvs_ring_size)3796 radv_emit_gs_ring_sizes(struct radv_queue *queue, struct radeon_cmdbuf *cs,
3797 struct radeon_winsys_bo *esgs_ring_bo, uint32_t esgs_ring_size,
3798 struct radeon_winsys_bo *gsvs_ring_bo, uint32_t gsvs_ring_size)
3799 {
3800 if (!esgs_ring_bo && !gsvs_ring_bo)
3801 return;
3802
3803 if (esgs_ring_bo)
3804 radv_cs_add_buffer(queue->device->ws, cs, esgs_ring_bo);
3805
3806 if (gsvs_ring_bo)
3807 radv_cs_add_buffer(queue->device->ws, cs, gsvs_ring_bo);
3808
3809 if (queue->device->physical_device->rad_info.chip_class >= GFX7) {
3810 radeon_set_uconfig_reg_seq(cs, R_030900_VGT_ESGS_RING_SIZE, 2);
3811 radeon_emit(cs, esgs_ring_size >> 8);
3812 radeon_emit(cs, gsvs_ring_size >> 8);
3813 } else {
3814 radeon_set_config_reg_seq(cs, R_0088C8_VGT_ESGS_RING_SIZE, 2);
3815 radeon_emit(cs, esgs_ring_size >> 8);
3816 radeon_emit(cs, gsvs_ring_size >> 8);
3817 }
3818 }
3819
3820 static void
radv_emit_tess_factor_ring(struct radv_queue * queue,struct radeon_cmdbuf * cs,unsigned hs_offchip_param,unsigned tf_ring_size,struct radeon_winsys_bo * tess_rings_bo)3821 radv_emit_tess_factor_ring(struct radv_queue *queue, struct radeon_cmdbuf *cs,
3822 unsigned hs_offchip_param, unsigned tf_ring_size,
3823 struct radeon_winsys_bo *tess_rings_bo)
3824 {
3825 uint64_t tf_va;
3826
3827 if (!tess_rings_bo)
3828 return;
3829
3830 tf_va = radv_buffer_get_va(tess_rings_bo);
3831
3832 radv_cs_add_buffer(queue->device->ws, cs, tess_rings_bo);
3833
3834 if (queue->device->physical_device->rad_info.chip_class >= GFX7) {
3835 radeon_set_uconfig_reg(cs, R_030938_VGT_TF_RING_SIZE, S_030938_SIZE(tf_ring_size / 4));
3836 radeon_set_uconfig_reg(cs, R_030940_VGT_TF_MEMORY_BASE, tf_va >> 8);
3837
3838 if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
3839 radeon_set_uconfig_reg(cs, R_030984_VGT_TF_MEMORY_BASE_HI,
3840 S_030984_BASE_HI(tf_va >> 40));
3841 } else if (queue->device->physical_device->rad_info.chip_class == GFX9) {
3842 radeon_set_uconfig_reg(cs, R_030944_VGT_TF_MEMORY_BASE_HI, S_030944_BASE_HI(tf_va >> 40));
3843 }
3844 radeon_set_uconfig_reg(cs, R_03093C_VGT_HS_OFFCHIP_PARAM, hs_offchip_param);
3845 } else {
3846 radeon_set_config_reg(cs, R_008988_VGT_TF_RING_SIZE, S_008988_SIZE(tf_ring_size / 4));
3847 radeon_set_config_reg(cs, R_0089B8_VGT_TF_MEMORY_BASE, tf_va >> 8);
3848 radeon_set_config_reg(cs, R_0089B0_VGT_HS_OFFCHIP_PARAM, hs_offchip_param);
3849 }
3850 }
3851
3852 static void
radv_emit_graphics_scratch(struct radv_queue * queue,struct radeon_cmdbuf * cs,uint32_t size_per_wave,uint32_t waves,struct radeon_winsys_bo * scratch_bo)3853 radv_emit_graphics_scratch(struct radv_queue *queue, struct radeon_cmdbuf *cs,
3854 uint32_t size_per_wave, uint32_t waves,
3855 struct radeon_winsys_bo *scratch_bo)
3856 {
3857 if (queue->qf != RADV_QUEUE_GENERAL)
3858 return;
3859
3860 if (!scratch_bo)
3861 return;
3862
3863 radv_cs_add_buffer(queue->device->ws, cs, scratch_bo);
3864
3865 radeon_set_context_reg(
3866 cs, R_0286E8_SPI_TMPRING_SIZE,
3867 S_0286E8_WAVES(waves) | S_0286E8_WAVESIZE(round_up_u32(size_per_wave, 1024)));
3868 }
3869
3870 static void
radv_emit_compute_scratch(struct radv_queue * queue,struct radeon_cmdbuf * cs,uint32_t size_per_wave,uint32_t waves,struct radeon_winsys_bo * compute_scratch_bo)3871 radv_emit_compute_scratch(struct radv_queue *queue, struct radeon_cmdbuf *cs,
3872 uint32_t size_per_wave, uint32_t waves,
3873 struct radeon_winsys_bo *compute_scratch_bo)
3874 {
3875 uint64_t scratch_va;
3876
3877 if (!compute_scratch_bo)
3878 return;
3879
3880 scratch_va = radv_buffer_get_va(compute_scratch_bo);
3881
3882 radv_cs_add_buffer(queue->device->ws, cs, compute_scratch_bo);
3883
3884 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
3885 radeon_emit(cs, scratch_va);
3886 radeon_emit(cs, S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) | S_008F04_SWIZZLE_ENABLE(1));
3887
3888 radeon_set_sh_reg(cs, R_00B860_COMPUTE_TMPRING_SIZE,
3889 S_00B860_WAVES(waves) | S_00B860_WAVESIZE(round_up_u32(size_per_wave, 1024)));
3890 }
3891
3892 static void
radv_emit_global_shader_pointers(struct radv_queue * queue,struct radeon_cmdbuf * cs,struct radeon_winsys_bo * descriptor_bo)3893 radv_emit_global_shader_pointers(struct radv_queue *queue, struct radeon_cmdbuf *cs,
3894 struct radeon_winsys_bo *descriptor_bo)
3895 {
3896 uint64_t va;
3897
3898 if (!descriptor_bo)
3899 return;
3900
3901 va = radv_buffer_get_va(descriptor_bo);
3902
3903 radv_cs_add_buffer(queue->device->ws, cs, descriptor_bo);
3904
3905 if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
3906 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B130_SPI_SHADER_USER_DATA_VS_0,
3907 R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS,
3908 R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS};
3909
3910 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
3911 radv_emit_shader_pointer(queue->device, cs, regs[i], va, true);
3912 }
3913 } else if (queue->device->physical_device->rad_info.chip_class == GFX9) {
3914 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B130_SPI_SHADER_USER_DATA_VS_0,
3915 R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS,
3916 R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS};
3917
3918 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
3919 radv_emit_shader_pointer(queue->device, cs, regs[i], va, true);
3920 }
3921 } else {
3922 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0, R_00B130_SPI_SHADER_USER_DATA_VS_0,
3923 R_00B230_SPI_SHADER_USER_DATA_GS_0, R_00B330_SPI_SHADER_USER_DATA_ES_0,
3924 R_00B430_SPI_SHADER_USER_DATA_HS_0, R_00B530_SPI_SHADER_USER_DATA_LS_0};
3925
3926 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
3927 radv_emit_shader_pointer(queue->device, cs, regs[i], va, true);
3928 }
3929 }
3930 }
3931
3932 static void
radv_init_graphics_state(struct radeon_cmdbuf * cs,struct radv_queue * queue)3933 radv_init_graphics_state(struct radeon_cmdbuf *cs, struct radv_queue *queue)
3934 {
3935 struct radv_device *device = queue->device;
3936
3937 if (device->gfx_init) {
3938 uint64_t va = radv_buffer_get_va(device->gfx_init);
3939
3940 radeon_emit(cs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
3941 radeon_emit(cs, va);
3942 radeon_emit(cs, va >> 32);
3943 radeon_emit(cs, device->gfx_init_size_dw & 0xffff);
3944
3945 radv_cs_add_buffer(device->ws, cs, device->gfx_init);
3946 } else {
3947 si_emit_graphics(device, cs);
3948 }
3949 }
3950
3951 static void
radv_init_compute_state(struct radeon_cmdbuf * cs,struct radv_queue * queue)3952 radv_init_compute_state(struct radeon_cmdbuf *cs, struct radv_queue *queue)
3953 {
3954 si_emit_compute(queue->device, cs);
3955 }
3956
3957 static VkResult
radv_get_preamble_cs(struct radv_queue * queue,uint32_t scratch_size_per_wave,uint32_t scratch_waves,uint32_t compute_scratch_size_per_wave,uint32_t compute_scratch_waves,uint32_t esgs_ring_size,uint32_t gsvs_ring_size,bool needs_tess_rings,bool needs_gds,bool needs_gds_oa,bool needs_sample_positions,struct radeon_cmdbuf ** initial_full_flush_preamble_cs,struct radeon_cmdbuf ** initial_preamble_cs,struct radeon_cmdbuf ** continue_preamble_cs)3958 radv_get_preamble_cs(struct radv_queue *queue, uint32_t scratch_size_per_wave,
3959 uint32_t scratch_waves, uint32_t compute_scratch_size_per_wave,
3960 uint32_t compute_scratch_waves, uint32_t esgs_ring_size,
3961 uint32_t gsvs_ring_size, bool needs_tess_rings, bool needs_gds,
3962 bool needs_gds_oa, bool needs_sample_positions,
3963 struct radeon_cmdbuf **initial_full_flush_preamble_cs,
3964 struct radeon_cmdbuf **initial_preamble_cs,
3965 struct radeon_cmdbuf **continue_preamble_cs)
3966 {
3967 struct radeon_winsys_bo *scratch_bo = NULL;
3968 struct radeon_winsys_bo *descriptor_bo = NULL;
3969 struct radeon_winsys_bo *compute_scratch_bo = NULL;
3970 struct radeon_winsys_bo *esgs_ring_bo = NULL;
3971 struct radeon_winsys_bo *gsvs_ring_bo = NULL;
3972 struct radeon_winsys_bo *tess_rings_bo = NULL;
3973 struct radeon_winsys_bo *gds_bo = NULL;
3974 struct radeon_winsys_bo *gds_oa_bo = NULL;
3975 struct radeon_cmdbuf *dest_cs[3] = {0};
3976 bool add_tess_rings = false, add_gds = false, add_gds_oa = false, add_sample_positions = false;
3977 unsigned tess_factor_ring_size = 0, tess_offchip_ring_size = 0;
3978 unsigned max_offchip_buffers;
3979 unsigned hs_offchip_param = 0;
3980 unsigned tess_offchip_ring_offset;
3981 uint32_t ring_bo_flags = RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING;
3982 VkResult result = VK_SUCCESS;
3983 if (queue->qf == RADV_QUEUE_TRANSFER)
3984 return VK_SUCCESS;
3985
3986 if (!queue->has_tess_rings) {
3987 if (needs_tess_rings)
3988 add_tess_rings = true;
3989 }
3990 if (!queue->has_gds) {
3991 if (needs_gds)
3992 add_gds = true;
3993 }
3994 if (!queue->has_gds_oa) {
3995 if (needs_gds_oa)
3996 add_gds_oa = true;
3997 }
3998 if (!queue->has_sample_positions) {
3999 if (needs_sample_positions)
4000 add_sample_positions = true;
4001 }
4002 tess_factor_ring_size = 32768 * queue->device->physical_device->rad_info.max_se;
4003 hs_offchip_param = radv_get_hs_offchip_param(queue->device, &max_offchip_buffers);
4004 tess_offchip_ring_offset = align(tess_factor_ring_size, 64 * 1024);
4005 tess_offchip_ring_size = max_offchip_buffers * queue->device->tess_offchip_block_dw_size * 4;
4006
4007 scratch_size_per_wave = MAX2(scratch_size_per_wave, queue->scratch_size_per_wave);
4008 if (scratch_size_per_wave)
4009 scratch_waves = MIN2(scratch_waves, UINT32_MAX / scratch_size_per_wave);
4010 else
4011 scratch_waves = 0;
4012
4013 compute_scratch_size_per_wave =
4014 MAX2(compute_scratch_size_per_wave, queue->compute_scratch_size_per_wave);
4015 if (compute_scratch_size_per_wave)
4016 compute_scratch_waves =
4017 MIN2(compute_scratch_waves, UINT32_MAX / compute_scratch_size_per_wave);
4018 else
4019 compute_scratch_waves = 0;
4020
4021 if (scratch_size_per_wave <= queue->scratch_size_per_wave &&
4022 scratch_waves <= queue->scratch_waves &&
4023 compute_scratch_size_per_wave <= queue->compute_scratch_size_per_wave &&
4024 compute_scratch_waves <= queue->compute_scratch_waves &&
4025 esgs_ring_size <= queue->esgs_ring_size && gsvs_ring_size <= queue->gsvs_ring_size &&
4026 !add_tess_rings && !add_gds && !add_gds_oa && !add_sample_positions &&
4027 queue->initial_preamble_cs) {
4028 *initial_full_flush_preamble_cs = queue->initial_full_flush_preamble_cs;
4029 *initial_preamble_cs = queue->initial_preamble_cs;
4030 *continue_preamble_cs = queue->continue_preamble_cs;
4031 if (!scratch_size_per_wave && !compute_scratch_size_per_wave && !esgs_ring_size &&
4032 !gsvs_ring_size && !needs_tess_rings && !needs_gds && !needs_gds_oa &&
4033 !needs_sample_positions)
4034 *continue_preamble_cs = NULL;
4035 return VK_SUCCESS;
4036 }
4037
4038 uint32_t scratch_size = scratch_size_per_wave * scratch_waves;
4039 uint32_t queue_scratch_size = queue->scratch_size_per_wave * queue->scratch_waves;
4040 if (scratch_size > queue_scratch_size) {
4041 result =
4042 queue->device->ws->buffer_create(queue->device->ws, scratch_size, 4096, RADEON_DOMAIN_VRAM,
4043 ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, 0, &scratch_bo);
4044 if (result != VK_SUCCESS)
4045 goto fail;
4046 } else
4047 scratch_bo = queue->scratch_bo;
4048
4049 uint32_t compute_scratch_size = compute_scratch_size_per_wave * compute_scratch_waves;
4050 uint32_t compute_queue_scratch_size =
4051 queue->compute_scratch_size_per_wave * queue->compute_scratch_waves;
4052 if (compute_scratch_size > compute_queue_scratch_size) {
4053 result = queue->device->ws->buffer_create(queue->device->ws, compute_scratch_size, 4096,
4054 RADEON_DOMAIN_VRAM, ring_bo_flags,
4055 RADV_BO_PRIORITY_SCRATCH, 0, &compute_scratch_bo);
4056 if (result != VK_SUCCESS)
4057 goto fail;
4058
4059 } else
4060 compute_scratch_bo = queue->compute_scratch_bo;
4061
4062 if (esgs_ring_size > queue->esgs_ring_size) {
4063 result = queue->device->ws->buffer_create(queue->device->ws, esgs_ring_size, 4096,
4064 RADEON_DOMAIN_VRAM, ring_bo_flags,
4065 RADV_BO_PRIORITY_SCRATCH, 0, &esgs_ring_bo);
4066 if (result != VK_SUCCESS)
4067 goto fail;
4068 } else {
4069 esgs_ring_bo = queue->esgs_ring_bo;
4070 esgs_ring_size = queue->esgs_ring_size;
4071 }
4072
4073 if (gsvs_ring_size > queue->gsvs_ring_size) {
4074 result = queue->device->ws->buffer_create(queue->device->ws, gsvs_ring_size, 4096,
4075 RADEON_DOMAIN_VRAM, ring_bo_flags,
4076 RADV_BO_PRIORITY_SCRATCH, 0, &gsvs_ring_bo);
4077 if (result != VK_SUCCESS)
4078 goto fail;
4079 } else {
4080 gsvs_ring_bo = queue->gsvs_ring_bo;
4081 gsvs_ring_size = queue->gsvs_ring_size;
4082 }
4083
4084 if (add_tess_rings) {
4085 result = queue->device->ws->buffer_create(
4086 queue->device->ws, tess_offchip_ring_offset + tess_offchip_ring_size, 256,
4087 RADEON_DOMAIN_VRAM, ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, 0, &tess_rings_bo);
4088 if (result != VK_SUCCESS)
4089 goto fail;
4090 } else {
4091 tess_rings_bo = queue->tess_rings_bo;
4092 }
4093
4094 if (add_gds) {
4095 assert(queue->device->physical_device->rad_info.chip_class >= GFX10);
4096
4097 /* 4 streamout GDS counters.
4098 * We need 256B (64 dw) of GDS, otherwise streamout hangs.
4099 */
4100 result =
4101 queue->device->ws->buffer_create(queue->device->ws, 256, 4, RADEON_DOMAIN_GDS,
4102 ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, 0, &gds_bo);
4103 if (result != VK_SUCCESS)
4104 goto fail;
4105 } else {
4106 gds_bo = queue->gds_bo;
4107 }
4108
4109 if (add_gds_oa) {
4110 assert(queue->device->physical_device->rad_info.chip_class >= GFX10);
4111
4112 result =
4113 queue->device->ws->buffer_create(queue->device->ws, 4, 1, RADEON_DOMAIN_OA, ring_bo_flags,
4114 RADV_BO_PRIORITY_SCRATCH, 0, &gds_oa_bo);
4115 if (result != VK_SUCCESS)
4116 goto fail;
4117 } else {
4118 gds_oa_bo = queue->gds_oa_bo;
4119 }
4120
4121 if (scratch_bo != queue->scratch_bo || esgs_ring_bo != queue->esgs_ring_bo ||
4122 gsvs_ring_bo != queue->gsvs_ring_bo || tess_rings_bo != queue->tess_rings_bo ||
4123 add_sample_positions) {
4124 uint32_t size = 0;
4125 if (gsvs_ring_bo || esgs_ring_bo || tess_rings_bo || add_sample_positions) {
4126 size = 112; /* 2 dword + 2 padding + 4 dword * 6 */
4127 if (add_sample_positions)
4128 size += 128; /* 64+32+16+8 = 120 bytes */
4129 } else if (scratch_bo)
4130 size = 8; /* 2 dword */
4131
4132 result = queue->device->ws->buffer_create(
4133 queue->device->ws, size, 4096, RADEON_DOMAIN_VRAM,
4134 RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY,
4135 RADV_BO_PRIORITY_DESCRIPTOR, 0, &descriptor_bo);
4136 if (result != VK_SUCCESS)
4137 goto fail;
4138 } else
4139 descriptor_bo = queue->descriptor_bo;
4140
4141 if (descriptor_bo != queue->descriptor_bo) {
4142 uint32_t *map = (uint32_t *)queue->device->ws->buffer_map(descriptor_bo);
4143 if (!map)
4144 goto fail;
4145
4146 if (scratch_bo) {
4147 uint64_t scratch_va = radv_buffer_get_va(scratch_bo);
4148 uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) | S_008F04_SWIZZLE_ENABLE(1);
4149 map[0] = scratch_va;
4150 map[1] = rsrc1;
4151 }
4152
4153 if (esgs_ring_bo || gsvs_ring_bo || tess_rings_bo || add_sample_positions)
4154 fill_geom_tess_rings(queue, map, add_sample_positions, esgs_ring_size, esgs_ring_bo,
4155 gsvs_ring_size, gsvs_ring_bo, tess_factor_ring_size,
4156 tess_offchip_ring_offset, tess_offchip_ring_size, tess_rings_bo);
4157
4158 queue->device->ws->buffer_unmap(descriptor_bo);
4159 }
4160
4161 for (int i = 0; i < 3; ++i) {
4162 enum rgp_flush_bits sqtt_flush_bits = 0;
4163 struct radeon_cmdbuf *cs = NULL;
4164 cs = queue->device->ws->cs_create(queue->device->ws,
4165 radv_queue_ring(queue));
4166 if (!cs) {
4167 result = VK_ERROR_OUT_OF_HOST_MEMORY;
4168 goto fail;
4169 }
4170
4171 dest_cs[i] = cs;
4172
4173 if (scratch_bo)
4174 radv_cs_add_buffer(queue->device->ws, cs, scratch_bo);
4175
4176 /* Emit initial configuration. */
4177 switch (queue->qf) {
4178 case RADV_QUEUE_GENERAL:
4179 radv_init_graphics_state(cs, queue);
4180 break;
4181 case RADV_QUEUE_COMPUTE:
4182 radv_init_compute_state(cs, queue);
4183 break;
4184 case RADV_QUEUE_TRANSFER:
4185 default:
4186 break;
4187 }
4188
4189 if (esgs_ring_bo || gsvs_ring_bo || tess_rings_bo) {
4190 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
4191 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
4192
4193 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
4194 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
4195 }
4196
4197 radv_emit_gs_ring_sizes(queue, cs, esgs_ring_bo, esgs_ring_size, gsvs_ring_bo,
4198 gsvs_ring_size);
4199 radv_emit_tess_factor_ring(queue, cs, hs_offchip_param, tess_factor_ring_size, tess_rings_bo);
4200 radv_emit_global_shader_pointers(queue, cs, descriptor_bo);
4201 radv_emit_compute_scratch(queue, cs, compute_scratch_size_per_wave, compute_scratch_waves,
4202 compute_scratch_bo);
4203 radv_emit_graphics_scratch(queue, cs, scratch_size_per_wave, scratch_waves, scratch_bo);
4204
4205 if (gds_bo)
4206 radv_cs_add_buffer(queue->device->ws, cs, gds_bo);
4207 if (gds_oa_bo)
4208 radv_cs_add_buffer(queue->device->ws, cs, gds_oa_bo);
4209
4210 if (i == 0) {
4211 si_cs_emit_cache_flush(
4212 cs, queue->device->physical_device->rad_info.chip_class, NULL, 0,
4213 queue->qf == RADV_QUEUE_COMPUTE &&
4214 queue->device->physical_device->rad_info.chip_class >= GFX7,
4215 (queue->qf == RADV_QUEUE_COMPUTE
4216 ? RADV_CMD_FLAG_CS_PARTIAL_FLUSH
4217 : (RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH)) |
4218 RADV_CMD_FLAG_INV_ICACHE | RADV_CMD_FLAG_INV_SCACHE | RADV_CMD_FLAG_INV_VCACHE |
4219 RADV_CMD_FLAG_INV_L2 | RADV_CMD_FLAG_START_PIPELINE_STATS,
4220 &sqtt_flush_bits, 0);
4221 } else if (i == 1) {
4222 si_cs_emit_cache_flush(cs, queue->device->physical_device->rad_info.chip_class, NULL, 0,
4223 queue->qf == RADV_QUEUE_COMPUTE &&
4224 queue->device->physical_device->rad_info.chip_class >= GFX7,
4225 RADV_CMD_FLAG_INV_ICACHE | RADV_CMD_FLAG_INV_SCACHE |
4226 RADV_CMD_FLAG_INV_VCACHE | RADV_CMD_FLAG_INV_L2 |
4227 RADV_CMD_FLAG_START_PIPELINE_STATS,
4228 &sqtt_flush_bits, 0);
4229 }
4230
4231 result = queue->device->ws->cs_finalize(cs);
4232 if (result != VK_SUCCESS)
4233 goto fail;
4234 }
4235
4236 if (queue->initial_full_flush_preamble_cs)
4237 queue->device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
4238
4239 if (queue->initial_preamble_cs)
4240 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
4241
4242 if (queue->continue_preamble_cs)
4243 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
4244
4245 queue->initial_full_flush_preamble_cs = dest_cs[0];
4246 queue->initial_preamble_cs = dest_cs[1];
4247 queue->continue_preamble_cs = dest_cs[2];
4248
4249 if (scratch_bo != queue->scratch_bo) {
4250 if (queue->scratch_bo)
4251 queue->device->ws->buffer_destroy(queue->device->ws, queue->scratch_bo);
4252 queue->scratch_bo = scratch_bo;
4253 }
4254 queue->scratch_size_per_wave = scratch_size_per_wave;
4255 queue->scratch_waves = scratch_waves;
4256
4257 if (compute_scratch_bo != queue->compute_scratch_bo) {
4258 if (queue->compute_scratch_bo)
4259 queue->device->ws->buffer_destroy(queue->device->ws, queue->compute_scratch_bo);
4260 queue->compute_scratch_bo = compute_scratch_bo;
4261 }
4262 queue->compute_scratch_size_per_wave = compute_scratch_size_per_wave;
4263 queue->compute_scratch_waves = compute_scratch_waves;
4264
4265 if (esgs_ring_bo != queue->esgs_ring_bo) {
4266 if (queue->esgs_ring_bo)
4267 queue->device->ws->buffer_destroy(queue->device->ws, queue->esgs_ring_bo);
4268 queue->esgs_ring_bo = esgs_ring_bo;
4269 queue->esgs_ring_size = esgs_ring_size;
4270 }
4271
4272 if (gsvs_ring_bo != queue->gsvs_ring_bo) {
4273 if (queue->gsvs_ring_bo)
4274 queue->device->ws->buffer_destroy(queue->device->ws, queue->gsvs_ring_bo);
4275 queue->gsvs_ring_bo = gsvs_ring_bo;
4276 queue->gsvs_ring_size = gsvs_ring_size;
4277 }
4278
4279 if (tess_rings_bo != queue->tess_rings_bo) {
4280 queue->tess_rings_bo = tess_rings_bo;
4281 queue->has_tess_rings = true;
4282 }
4283
4284 if (gds_bo != queue->gds_bo) {
4285 queue->gds_bo = gds_bo;
4286 queue->has_gds = true;
4287 }
4288
4289 if (gds_oa_bo != queue->gds_oa_bo) {
4290 queue->gds_oa_bo = gds_oa_bo;
4291 queue->has_gds_oa = true;
4292 }
4293
4294 if (descriptor_bo != queue->descriptor_bo) {
4295 if (queue->descriptor_bo)
4296 queue->device->ws->buffer_destroy(queue->device->ws, queue->descriptor_bo);
4297
4298 queue->descriptor_bo = descriptor_bo;
4299 }
4300
4301 if (add_sample_positions)
4302 queue->has_sample_positions = true;
4303
4304 *initial_full_flush_preamble_cs = queue->initial_full_flush_preamble_cs;
4305 *initial_preamble_cs = queue->initial_preamble_cs;
4306 *continue_preamble_cs = queue->continue_preamble_cs;
4307 if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
4308 *continue_preamble_cs = NULL;
4309 return VK_SUCCESS;
4310 fail:
4311 for (int i = 0; i < ARRAY_SIZE(dest_cs); ++i)
4312 if (dest_cs[i])
4313 queue->device->ws->cs_destroy(dest_cs[i]);
4314 if (descriptor_bo && descriptor_bo != queue->descriptor_bo)
4315 queue->device->ws->buffer_destroy(queue->device->ws, descriptor_bo);
4316 if (scratch_bo && scratch_bo != queue->scratch_bo)
4317 queue->device->ws->buffer_destroy(queue->device->ws, scratch_bo);
4318 if (compute_scratch_bo && compute_scratch_bo != queue->compute_scratch_bo)
4319 queue->device->ws->buffer_destroy(queue->device->ws, compute_scratch_bo);
4320 if (esgs_ring_bo && esgs_ring_bo != queue->esgs_ring_bo)
4321 queue->device->ws->buffer_destroy(queue->device->ws, esgs_ring_bo);
4322 if (gsvs_ring_bo && gsvs_ring_bo != queue->gsvs_ring_bo)
4323 queue->device->ws->buffer_destroy(queue->device->ws, gsvs_ring_bo);
4324 if (tess_rings_bo && tess_rings_bo != queue->tess_rings_bo)
4325 queue->device->ws->buffer_destroy(queue->device->ws, tess_rings_bo);
4326 if (gds_bo && gds_bo != queue->gds_bo)
4327 queue->device->ws->buffer_destroy(queue->device->ws, gds_bo);
4328 if (gds_oa_bo && gds_oa_bo != queue->gds_oa_bo)
4329 queue->device->ws->buffer_destroy(queue->device->ws, gds_oa_bo);
4330
4331 return vk_error(queue, result);
4332 }
4333
4334 static VkResult
radv_sparse_buffer_bind_memory(struct radv_device * device,const VkSparseBufferMemoryBindInfo * bind)4335 radv_sparse_buffer_bind_memory(struct radv_device *device, const VkSparseBufferMemoryBindInfo *bind)
4336 {
4337 RADV_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
4338 VkResult result;
4339
4340 for (uint32_t i = 0; i < bind->bindCount; ++i) {
4341 struct radv_device_memory *mem = NULL;
4342
4343 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
4344 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
4345
4346 result = device->ws->buffer_virtual_bind(device->ws, buffer->bo,
4347 bind->pBinds[i].resourceOffset, bind->pBinds[i].size,
4348 mem ? mem->bo : NULL, bind->pBinds[i].memoryOffset);
4349 if (result != VK_SUCCESS)
4350 return result;
4351 }
4352
4353 return VK_SUCCESS;
4354 }
4355
4356 static VkResult
radv_sparse_image_opaque_bind_memory(struct radv_device * device,const VkSparseImageOpaqueMemoryBindInfo * bind)4357 radv_sparse_image_opaque_bind_memory(struct radv_device *device,
4358 const VkSparseImageOpaqueMemoryBindInfo *bind)
4359 {
4360 RADV_FROM_HANDLE(radv_image, image, bind->image);
4361 VkResult result;
4362
4363 for (uint32_t i = 0; i < bind->bindCount; ++i) {
4364 struct radv_device_memory *mem = NULL;
4365
4366 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
4367 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
4368
4369 result = device->ws->buffer_virtual_bind(device->ws, image->bo,
4370 bind->pBinds[i].resourceOffset, bind->pBinds[i].size,
4371 mem ? mem->bo : NULL, bind->pBinds[i].memoryOffset);
4372 if (result != VK_SUCCESS)
4373 return result;
4374 }
4375
4376 return VK_SUCCESS;
4377 }
4378
4379 static VkResult
radv_sparse_image_bind_memory(struct radv_device * device,const VkSparseImageMemoryBindInfo * bind)4380 radv_sparse_image_bind_memory(struct radv_device *device, const VkSparseImageMemoryBindInfo *bind)
4381 {
4382 RADV_FROM_HANDLE(radv_image, image, bind->image);
4383 struct radeon_surf *surface = &image->planes[0].surface;
4384 uint32_t bs = vk_format_get_blocksize(image->vk_format);
4385 VkResult result;
4386
4387 for (uint32_t i = 0; i < bind->bindCount; ++i) {
4388 struct radv_device_memory *mem = NULL;
4389 uint32_t offset, pitch;
4390 uint32_t mem_offset = bind->pBinds[i].memoryOffset;
4391 const uint32_t layer = bind->pBinds[i].subresource.arrayLayer;
4392 const uint32_t level = bind->pBinds[i].subresource.mipLevel;
4393
4394 VkExtent3D bind_extent = bind->pBinds[i].extent;
4395 bind_extent.width =
4396 DIV_ROUND_UP(bind_extent.width, vk_format_get_blockwidth(image->vk_format));
4397 bind_extent.height =
4398 DIV_ROUND_UP(bind_extent.height, vk_format_get_blockheight(image->vk_format));
4399
4400 VkOffset3D bind_offset = bind->pBinds[i].offset;
4401 bind_offset.x /= vk_format_get_blockwidth(image->vk_format);
4402 bind_offset.y /= vk_format_get_blockheight(image->vk_format);
4403
4404 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
4405 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
4406
4407 if (device->physical_device->rad_info.chip_class >= GFX9) {
4408 offset = surface->u.gfx9.surf_slice_size * layer + surface->u.gfx9.prt_level_offset[level];
4409 pitch = surface->u.gfx9.prt_level_pitch[level];
4410 } else {
4411 offset = (uint64_t)surface->u.legacy.level[level].offset_256B * 256 +
4412 surface->u.legacy.level[level].slice_size_dw * 4 * layer;
4413 pitch = surface->u.legacy.level[level].nblk_x;
4414 }
4415
4416 offset += (bind_offset.y * pitch * bs) + (bind_offset.x * surface->prt_tile_height * bs);
4417
4418 uint32_t aligned_extent_width = ALIGN(bind_extent.width, surface->prt_tile_width);
4419
4420 bool whole_subres = bind_offset.x == 0 && aligned_extent_width == pitch;
4421
4422 if (whole_subres) {
4423 uint32_t aligned_extent_height = ALIGN(bind_extent.height, surface->prt_tile_height);
4424
4425 uint32_t size = aligned_extent_width * aligned_extent_height * bs;
4426 result = device->ws->buffer_virtual_bind(device->ws, image->bo, offset, size,
4427 mem ? mem->bo : NULL, mem_offset);
4428 if (result != VK_SUCCESS)
4429 return result;
4430 } else {
4431 uint32_t img_increment = pitch * bs;
4432 uint32_t mem_increment = aligned_extent_width * bs;
4433 uint32_t size = mem_increment * surface->prt_tile_height;
4434 for (unsigned y = 0; y < bind_extent.height; y += surface->prt_tile_height) {
4435 result = device->ws->buffer_virtual_bind(
4436 device->ws, image->bo, offset + img_increment * y, size, mem ? mem->bo : NULL,
4437 mem_offset + mem_increment * y);
4438 if (result != VK_SUCCESS)
4439 return result;
4440 }
4441 }
4442 }
4443
4444 return VK_SUCCESS;
4445 }
4446
4447 static VkResult
radv_get_preambles(struct radv_queue * queue,struct vk_command_buffer * const * cmd_buffers,uint32_t cmd_buffer_count,struct radeon_cmdbuf ** initial_full_flush_preamble_cs,struct radeon_cmdbuf ** initial_preamble_cs,struct radeon_cmdbuf ** continue_preamble_cs)4448 radv_get_preambles(struct radv_queue *queue, struct vk_command_buffer *const *cmd_buffers,
4449 uint32_t cmd_buffer_count, struct radeon_cmdbuf **initial_full_flush_preamble_cs,
4450 struct radeon_cmdbuf **initial_preamble_cs,
4451 struct radeon_cmdbuf **continue_preamble_cs)
4452 {
4453 uint32_t scratch_size_per_wave = 0, waves_wanted = 0;
4454 uint32_t compute_scratch_size_per_wave = 0, compute_waves_wanted = 0;
4455 uint32_t esgs_ring_size = 0, gsvs_ring_size = 0;
4456 bool tess_rings_needed = false;
4457 bool gds_needed = false;
4458 bool gds_oa_needed = false;
4459 bool sample_positions_needed = false;
4460
4461 for (uint32_t j = 0; j < cmd_buffer_count; j++) {
4462 struct radv_cmd_buffer *cmd_buffer = container_of(cmd_buffers[j], struct radv_cmd_buffer, vk);
4463
4464 scratch_size_per_wave = MAX2(scratch_size_per_wave, cmd_buffer->scratch_size_per_wave_needed);
4465 waves_wanted = MAX2(waves_wanted, cmd_buffer->scratch_waves_wanted);
4466 compute_scratch_size_per_wave =
4467 MAX2(compute_scratch_size_per_wave, cmd_buffer->compute_scratch_size_per_wave_needed);
4468 compute_waves_wanted = MAX2(compute_waves_wanted, cmd_buffer->compute_scratch_waves_wanted);
4469 esgs_ring_size = MAX2(esgs_ring_size, cmd_buffer->esgs_ring_size_needed);
4470 gsvs_ring_size = MAX2(gsvs_ring_size, cmd_buffer->gsvs_ring_size_needed);
4471 tess_rings_needed |= cmd_buffer->tess_rings_needed;
4472 gds_needed |= cmd_buffer->gds_needed;
4473 gds_oa_needed |= cmd_buffer->gds_oa_needed;
4474 sample_positions_needed |= cmd_buffer->sample_positions_needed;
4475 }
4476
4477 return radv_get_preamble_cs(queue, scratch_size_per_wave, waves_wanted,
4478 compute_scratch_size_per_wave, compute_waves_wanted, esgs_ring_size,
4479 gsvs_ring_size, tess_rings_needed, gds_needed, gds_oa_needed,
4480 sample_positions_needed, initial_full_flush_preamble_cs,
4481 initial_preamble_cs, continue_preamble_cs);
4482 }
4483
4484 struct radv_deferred_queue_submission {
4485 struct radv_queue *queue;
4486 VkCommandBuffer *cmd_buffers;
4487 uint32_t cmd_buffer_count;
4488
4489 /* Sparse bindings that happen on a queue. */
4490 VkSparseBufferMemoryBindInfo *buffer_binds;
4491 uint32_t buffer_bind_count;
4492 VkSparseImageOpaqueMemoryBindInfo *image_opaque_binds;
4493 uint32_t image_opaque_bind_count;
4494 VkSparseImageMemoryBindInfo *image_binds;
4495 uint32_t image_bind_count;
4496
4497 bool flush_caches;
4498 VkPipelineStageFlags2KHR wait_dst_stage_mask;
4499 struct radv_semaphore_part **wait_semaphores;
4500 uint32_t wait_semaphore_count;
4501 struct radv_semaphore_part **signal_semaphores;
4502 uint32_t signal_semaphore_count;
4503 VkFence fence;
4504
4505 uint64_t *wait_values;
4506 uint64_t *signal_values;
4507
4508 struct radv_semaphore_part *temporary_semaphore_parts;
4509 uint32_t temporary_semaphore_part_count;
4510
4511 struct list_head queue_pending_list;
4512 uint32_t submission_wait_count;
4513
4514 struct list_head processing_list;
4515 };
4516
4517 static VkResult
radv_queue_submit(struct vk_queue * vqueue,struct vk_queue_submit * submission)4518 radv_queue_submit(struct vk_queue *vqueue, struct vk_queue_submit *submission)
4519 {
4520 struct radv_queue *queue = (struct radv_queue *)vqueue;
4521 struct radeon_winsys_ctx *ctx = queue->hw_ctx;
4522 uint32_t max_cs_submission = queue->device->trace_bo ? 1 : RADV_MAX_IBS_PER_SUBMIT;
4523 bool can_patch = true;
4524 uint32_t advance;
4525 VkResult result;
4526 struct radeon_cmdbuf *initial_preamble_cs = NULL;
4527 struct radeon_cmdbuf *initial_flush_preamble_cs = NULL;
4528 struct radeon_cmdbuf *continue_preamble_cs = NULL;
4529 enum ring_type ring = radv_queue_ring(queue);
4530
4531 result =
4532 radv_get_preambles(queue, submission->command_buffers, submission->command_buffer_count,
4533 &initial_flush_preamble_cs, &initial_preamble_cs, &continue_preamble_cs);
4534 if (result != VK_SUCCESS)
4535 goto fail;
4536
4537 for (uint32_t i = 0; i < submission->buffer_bind_count; ++i) {
4538 result = radv_sparse_buffer_bind_memory(queue->device, submission->buffer_binds + i);
4539 if (result != VK_SUCCESS)
4540 goto fail;
4541 }
4542
4543 for (uint32_t i = 0; i < submission->image_opaque_bind_count; ++i) {
4544 result =
4545 radv_sparse_image_opaque_bind_memory(queue->device, submission->image_opaque_binds + i);
4546 if (result != VK_SUCCESS)
4547 goto fail;
4548 }
4549
4550 for (uint32_t i = 0; i < submission->image_bind_count; ++i) {
4551 result = radv_sparse_image_bind_memory(queue->device, submission->image_binds + i);
4552 if (result != VK_SUCCESS)
4553 goto fail;
4554 }
4555
4556 if (!submission->command_buffer_count && !submission->wait_count && !submission->signal_count)
4557 return VK_SUCCESS;
4558
4559 if (!submission->command_buffer_count) {
4560 result = queue->device->ws->cs_submit(ctx, ring,
4561 queue->vk.index_in_family, NULL, 0, NULL, NULL,
4562 submission->wait_count, submission->waits,
4563 submission->signal_count, submission->signals, false);
4564 if (result != VK_SUCCESS)
4565 goto fail;
4566 } else {
4567 if (queue->device->trace_bo)
4568 simple_mtx_lock(&queue->device->trace_mtx);
4569
4570 struct radeon_cmdbuf **cs_array =
4571 malloc(sizeof(struct radeon_cmdbuf *) * (submission->command_buffer_count));
4572
4573 for (uint32_t j = 0; j < submission->command_buffer_count; j++) {
4574 struct radv_cmd_buffer *cmd_buffer =
4575 (struct radv_cmd_buffer *)submission->command_buffers[j];
4576 assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
4577
4578 cs_array[j] = cmd_buffer->cs;
4579 if ((cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT))
4580 can_patch = false;
4581
4582 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_PENDING;
4583 }
4584
4585 for (uint32_t j = 0; j < submission->command_buffer_count; j += advance) {
4586 /* For fences on the same queue/vm amdgpu doesn't wait till all processing is finished
4587 * before starting the next cmdbuffer, so we need to do it here. */
4588 bool need_wait = !j && submission->wait_count > 0;
4589 struct radeon_cmdbuf *initial_preamble =
4590 need_wait ? initial_flush_preamble_cs : initial_preamble_cs;
4591 advance = MIN2(max_cs_submission, submission->command_buffer_count - j);
4592 bool last_submit = j + advance == submission->command_buffer_count;
4593
4594 if (queue->device->trace_bo)
4595 *queue->device->trace_id_ptr = 0;
4596
4597 result = queue->device->ws->cs_submit(
4598 ctx, ring, queue->vk.index_in_family, cs_array + j, advance,
4599 initial_preamble, continue_preamble_cs, j == 0 ? submission->wait_count : 0,
4600 submission->waits, last_submit ? submission->signal_count : 0, submission->signals,
4601 can_patch);
4602 if (result != VK_SUCCESS) {
4603 free(cs_array);
4604 if (queue->device->trace_bo)
4605 simple_mtx_unlock(&queue->device->trace_mtx);
4606 goto fail;
4607 }
4608
4609 if (queue->device->trace_bo) {
4610 radv_check_gpu_hangs(queue, cs_array[j]);
4611 }
4612
4613 if (queue->device->tma_bo) {
4614 radv_check_trap_handler(queue);
4615 }
4616 }
4617
4618 free(cs_array);
4619 if (queue->device->trace_bo)
4620 simple_mtx_unlock(&queue->device->trace_mtx);
4621 }
4622
4623 fail:
4624 if (result != VK_SUCCESS && result != VK_ERROR_DEVICE_LOST) {
4625 /* When something bad happened during the submission, such as
4626 * an out of memory issue, it might be hard to recover from
4627 * this inconsistent state. To avoid this sort of problem, we
4628 * assume that we are in a really bad situation and return
4629 * VK_ERROR_DEVICE_LOST to ensure the clients do not attempt
4630 * to submit the same job again to this device.
4631 */
4632 result = vk_device_set_lost(&queue->device->vk, "vkQueueSubmit() failed");
4633 }
4634 return result;
4635 }
4636
4637 bool
radv_queue_internal_submit(struct radv_queue * queue,struct radeon_cmdbuf * cs)4638 radv_queue_internal_submit(struct radv_queue *queue, struct radeon_cmdbuf *cs)
4639 {
4640 struct radeon_winsys_ctx *ctx = queue->hw_ctx;
4641
4642 VkResult result =
4643 queue->device->ws->cs_submit(ctx, radv_queue_ring(queue), queue->vk.index_in_family,
4644 &cs, 1, NULL, NULL, 0, NULL, 0, NULL, false);
4645 if (result != VK_SUCCESS)
4646 return false;
4647
4648 return true;
4649 }
4650
4651 VKAPI_ATTR VkResult VKAPI_CALL
radv_EnumerateInstanceExtensionProperties(const char * pLayerName,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)4652 radv_EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pPropertyCount,
4653 VkExtensionProperties *pProperties)
4654 {
4655 if (pLayerName)
4656 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
4657
4658 return vk_enumerate_instance_extension_properties(&radv_instance_extensions_supported,
4659 pPropertyCount, pProperties);
4660 }
4661
4662 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
radv_GetInstanceProcAddr(VkInstance _instance,const char * pName)4663 radv_GetInstanceProcAddr(VkInstance _instance, const char *pName)
4664 {
4665 RADV_FROM_HANDLE(radv_instance, instance, _instance);
4666
4667 /* The Vulkan 1.0 spec for vkGetInstanceProcAddr has a table of exactly
4668 * when we have to return valid function pointers, NULL, or it's left
4669 * undefined. See the table for exact details.
4670 */
4671 if (pName == NULL)
4672 return NULL;
4673
4674 #define LOOKUP_RADV_ENTRYPOINT(entrypoint) \
4675 if (strcmp(pName, "vk" #entrypoint) == 0) \
4676 return (PFN_vkVoidFunction)radv_##entrypoint
4677
4678 LOOKUP_RADV_ENTRYPOINT(EnumerateInstanceExtensionProperties);
4679 LOOKUP_RADV_ENTRYPOINT(EnumerateInstanceLayerProperties);
4680 LOOKUP_RADV_ENTRYPOINT(EnumerateInstanceVersion);
4681 LOOKUP_RADV_ENTRYPOINT(CreateInstance);
4682
4683 /* GetInstanceProcAddr() can also be called with a NULL instance.
4684 * See https://gitlab.khronos.org/vulkan/vulkan/issues/2057
4685 */
4686 LOOKUP_RADV_ENTRYPOINT(GetInstanceProcAddr);
4687
4688 #undef LOOKUP_RADV_ENTRYPOINT
4689
4690 if (instance == NULL)
4691 return NULL;
4692
4693 return vk_instance_get_proc_addr(&instance->vk, &radv_instance_entrypoints, pName);
4694 }
4695
4696 /* Windows will use a dll definition file to avoid build errors. */
4697 #ifdef _WIN32
4698 #undef PUBLIC
4699 #define PUBLIC
4700 #endif
4701
4702 /* The loader wants us to expose a second GetInstanceProcAddr function
4703 * to work around certain LD_PRELOAD issues seen in apps.
4704 */
4705 PUBLIC
4706 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
vk_icdGetInstanceProcAddr(VkInstance instance,const char * pName)4707 vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
4708 {
4709 return radv_GetInstanceProcAddr(instance, pName);
4710 }
4711
4712 PUBLIC
4713 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
vk_icdGetPhysicalDeviceProcAddr(VkInstance _instance,const char * pName)4714 vk_icdGetPhysicalDeviceProcAddr(VkInstance _instance, const char *pName)
4715 {
4716 RADV_FROM_HANDLE(radv_instance, instance, _instance);
4717 return vk_instance_get_physical_device_proc_addr(&instance->vk, pName);
4718 }
4719
4720 bool
radv_get_memory_fd(struct radv_device * device,struct radv_device_memory * memory,int * pFD)4721 radv_get_memory_fd(struct radv_device *device, struct radv_device_memory *memory, int *pFD)
4722 {
4723 /* Only set BO metadata for the first plane */
4724 if (memory->image && memory->image->offset == 0) {
4725 struct radeon_bo_metadata metadata;
4726 radv_init_metadata(device, memory->image, &metadata);
4727 device->ws->buffer_set_metadata(device->ws, memory->bo, &metadata);
4728 }
4729
4730 return device->ws->buffer_get_fd(device->ws, memory->bo, pFD);
4731 }
4732
4733 void
radv_device_memory_init(struct radv_device_memory * mem,struct radv_device * device,struct radeon_winsys_bo * bo)4734 radv_device_memory_init(struct radv_device_memory *mem, struct radv_device *device,
4735 struct radeon_winsys_bo *bo)
4736 {
4737 memset(mem, 0, sizeof(*mem));
4738 vk_object_base_init(&device->vk, &mem->base, VK_OBJECT_TYPE_DEVICE_MEMORY);
4739
4740 mem->bo = bo;
4741 }
4742
4743 void
radv_device_memory_finish(struct radv_device_memory * mem)4744 radv_device_memory_finish(struct radv_device_memory *mem)
4745 {
4746 vk_object_base_finish(&mem->base);
4747 }
4748
4749 void
radv_free_memory(struct radv_device * device,const VkAllocationCallbacks * pAllocator,struct radv_device_memory * mem)4750 radv_free_memory(struct radv_device *device, const VkAllocationCallbacks *pAllocator,
4751 struct radv_device_memory *mem)
4752 {
4753 if (mem == NULL)
4754 return;
4755
4756 #if RADV_SUPPORT_ANDROID_HARDWARE_BUFFER
4757 if (mem->android_hardware_buffer)
4758 AHardwareBuffer_release(mem->android_hardware_buffer);
4759 #endif
4760
4761 if (mem->bo) {
4762 if (device->overallocation_disallowed) {
4763 mtx_lock(&device->overallocation_mutex);
4764 device->allocated_memory_size[mem->heap_index] -= mem->alloc_size;
4765 mtx_unlock(&device->overallocation_mutex);
4766 }
4767
4768 if (device->use_global_bo_list)
4769 device->ws->buffer_make_resident(device->ws, mem->bo, false);
4770 device->ws->buffer_destroy(device->ws, mem->bo);
4771 mem->bo = NULL;
4772 }
4773
4774 radv_device_memory_finish(mem);
4775 vk_free2(&device->vk.alloc, pAllocator, mem);
4776 }
4777
4778 static VkResult
radv_alloc_memory(struct radv_device * device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMem)4779 radv_alloc_memory(struct radv_device *device, const VkMemoryAllocateInfo *pAllocateInfo,
4780 const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMem)
4781 {
4782 struct radv_device_memory *mem;
4783 VkResult result;
4784 enum radeon_bo_domain domain;
4785 uint32_t flags = 0;
4786
4787 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
4788
4789 const VkImportMemoryFdInfoKHR *import_info =
4790 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
4791 const VkMemoryDedicatedAllocateInfo *dedicate_info =
4792 vk_find_struct_const(pAllocateInfo->pNext, MEMORY_DEDICATED_ALLOCATE_INFO);
4793 const VkExportMemoryAllocateInfo *export_info =
4794 vk_find_struct_const(pAllocateInfo->pNext, EXPORT_MEMORY_ALLOCATE_INFO);
4795 const struct VkImportAndroidHardwareBufferInfoANDROID *ahb_import_info =
4796 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID);
4797 const VkImportMemoryHostPointerInfoEXT *host_ptr_info =
4798 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_HOST_POINTER_INFO_EXT);
4799
4800 const struct wsi_memory_allocate_info *wsi_info =
4801 vk_find_struct_const(pAllocateInfo->pNext, WSI_MEMORY_ALLOCATE_INFO_MESA);
4802
4803 if (pAllocateInfo->allocationSize == 0 && !ahb_import_info &&
4804 !(export_info && (export_info->handleTypes &
4805 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID))) {
4806 /* Apparently, this is allowed */
4807 *pMem = VK_NULL_HANDLE;
4808 return VK_SUCCESS;
4809 }
4810
4811 mem =
4812 vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*mem), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4813 if (mem == NULL)
4814 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
4815
4816 radv_device_memory_init(mem, device, NULL);
4817
4818 if (wsi_info) {
4819 if(wsi_info->implicit_sync)
4820 flags |= RADEON_FLAG_IMPLICIT_SYNC;
4821
4822 /* In case of prime, linear buffer is allocated in default heap which is VRAM.
4823 * Due to this when display is connected to iGPU and render on dGPU, ddx
4824 * function amdgpu_present_check_flip() fails due to which there is blit
4825 * instead of flip. Setting the flag RADEON_FLAG_GTT_WC allows kernel to
4826 * allocate GTT memory in supported hardware where GTT can be directly scanout.
4827 * Using wsi_info variable check to set the flag RADEON_FLAG_GTT_WC so that
4828 * only for memory allocated by driver this flag is set.
4829 */
4830 flags |= RADEON_FLAG_GTT_WC;
4831 }
4832
4833 if (dedicate_info) {
4834 mem->image = radv_image_from_handle(dedicate_info->image);
4835 mem->buffer = radv_buffer_from_handle(dedicate_info->buffer);
4836 } else {
4837 mem->image = NULL;
4838 mem->buffer = NULL;
4839 }
4840
4841 if (wsi_info && wsi_info->implicit_sync && mem->buffer) {
4842 /* Mark the linear prime buffer (aka the destination of the prime blit
4843 * as uncached.
4844 */
4845 flags |= RADEON_FLAG_VA_UNCACHED;
4846 }
4847
4848 float priority_float = 0.5;
4849 const struct VkMemoryPriorityAllocateInfoEXT *priority_ext =
4850 vk_find_struct_const(pAllocateInfo->pNext, MEMORY_PRIORITY_ALLOCATE_INFO_EXT);
4851 if (priority_ext)
4852 priority_float = priority_ext->priority;
4853
4854 uint64_t replay_address = 0;
4855 const VkMemoryOpaqueCaptureAddressAllocateInfo *replay_info =
4856 vk_find_struct_const(pAllocateInfo->pNext, MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO);
4857 if (replay_info && replay_info->opaqueCaptureAddress)
4858 replay_address = replay_info->opaqueCaptureAddress;
4859
4860 unsigned priority = MIN2(RADV_BO_PRIORITY_APPLICATION_MAX - 1,
4861 (int)(priority_float * RADV_BO_PRIORITY_APPLICATION_MAX));
4862
4863 mem->user_ptr = NULL;
4864
4865 #if RADV_SUPPORT_ANDROID_HARDWARE_BUFFER
4866 mem->android_hardware_buffer = NULL;
4867 #endif
4868
4869 if (ahb_import_info) {
4870 result = radv_import_ahb_memory(device, mem, priority, ahb_import_info);
4871 if (result != VK_SUCCESS)
4872 goto fail;
4873 } else if (export_info && (export_info->handleTypes &
4874 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)) {
4875 result = radv_create_ahb_memory(device, mem, priority, pAllocateInfo);
4876 if (result != VK_SUCCESS)
4877 goto fail;
4878 } else if (import_info) {
4879 assert(import_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
4880 import_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
4881 result = device->ws->buffer_from_fd(device->ws, import_info->fd, priority, &mem->bo, NULL);
4882 if (result != VK_SUCCESS) {
4883 goto fail;
4884 } else {
4885 close(import_info->fd);
4886 }
4887
4888 if (mem->image && mem->image->plane_count == 1 &&
4889 !vk_format_is_depth_or_stencil(mem->image->vk_format) && mem->image->info.samples == 1 &&
4890 mem->image->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
4891 struct radeon_bo_metadata metadata;
4892 device->ws->buffer_get_metadata(device->ws, mem->bo, &metadata);
4893
4894 struct radv_image_create_info create_info = {.no_metadata_planes = true,
4895 .bo_metadata = &metadata};
4896
4897 /* This gives a basic ability to import radeonsi images
4898 * that don't have DCC. This is not guaranteed by any
4899 * spec and can be removed after we support modifiers. */
4900 result = radv_image_create_layout(device, create_info, NULL, mem->image);
4901 if (result != VK_SUCCESS) {
4902 device->ws->buffer_destroy(device->ws, mem->bo);
4903 goto fail;
4904 }
4905 }
4906 } else if (host_ptr_info) {
4907 assert(host_ptr_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT);
4908 result = device->ws->buffer_from_ptr(device->ws, host_ptr_info->pHostPointer,
4909 pAllocateInfo->allocationSize, priority, &mem->bo);
4910 if (result != VK_SUCCESS) {
4911 goto fail;
4912 } else {
4913 mem->user_ptr = host_ptr_info->pHostPointer;
4914 }
4915 } else {
4916 uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
4917 uint32_t heap_index;
4918
4919 heap_index =
4920 device->physical_device->memory_properties.memoryTypes[pAllocateInfo->memoryTypeIndex]
4921 .heapIndex;
4922 domain = device->physical_device->memory_domains[pAllocateInfo->memoryTypeIndex];
4923 flags |= device->physical_device->memory_flags[pAllocateInfo->memoryTypeIndex];
4924
4925 if (!import_info && (!export_info || !export_info->handleTypes)) {
4926 flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;
4927 if (device->use_global_bo_list) {
4928 flags |= RADEON_FLAG_PREFER_LOCAL_BO;
4929 }
4930 }
4931
4932 const VkMemoryAllocateFlagsInfo *flags_info = vk_find_struct_const(pAllocateInfo->pNext, MEMORY_ALLOCATE_FLAGS_INFO);
4933 if (flags_info && flags_info->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT)
4934 flags |= RADEON_FLAG_REPLAYABLE;
4935
4936 if (device->instance->zero_vram)
4937 flags |= RADEON_FLAG_ZERO_VRAM;
4938
4939 if (device->overallocation_disallowed) {
4940 uint64_t total_size =
4941 device->physical_device->memory_properties.memoryHeaps[heap_index].size;
4942
4943 mtx_lock(&device->overallocation_mutex);
4944 if (device->allocated_memory_size[heap_index] + alloc_size > total_size) {
4945 mtx_unlock(&device->overallocation_mutex);
4946 result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
4947 goto fail;
4948 }
4949 device->allocated_memory_size[heap_index] += alloc_size;
4950 mtx_unlock(&device->overallocation_mutex);
4951 }
4952
4953 result = device->ws->buffer_create(device->ws, alloc_size,
4954 device->physical_device->rad_info.max_alignment, domain,
4955 flags, priority, replay_address, &mem->bo);
4956
4957 if (result != VK_SUCCESS) {
4958 if (device->overallocation_disallowed) {
4959 mtx_lock(&device->overallocation_mutex);
4960 device->allocated_memory_size[heap_index] -= alloc_size;
4961 mtx_unlock(&device->overallocation_mutex);
4962 }
4963 goto fail;
4964 }
4965
4966 mem->heap_index = heap_index;
4967 mem->alloc_size = alloc_size;
4968 }
4969
4970 if (!wsi_info) {
4971 if (device->use_global_bo_list) {
4972 result = device->ws->buffer_make_resident(device->ws, mem->bo, true);
4973 if (result != VK_SUCCESS)
4974 goto fail;
4975 }
4976 }
4977
4978 *pMem = radv_device_memory_to_handle(mem);
4979
4980 return VK_SUCCESS;
4981
4982 fail:
4983 radv_free_memory(device, pAllocator, mem);
4984
4985 return result;
4986 }
4987
4988 VKAPI_ATTR VkResult VKAPI_CALL
radv_AllocateMemory(VkDevice _device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMem)4989 radv_AllocateMemory(VkDevice _device, const VkMemoryAllocateInfo *pAllocateInfo,
4990 const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMem)
4991 {
4992 RADV_FROM_HANDLE(radv_device, device, _device);
4993 return radv_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
4994 }
4995
4996 VKAPI_ATTR void VKAPI_CALL
radv_FreeMemory(VkDevice _device,VkDeviceMemory _mem,const VkAllocationCallbacks * pAllocator)4997 radv_FreeMemory(VkDevice _device, VkDeviceMemory _mem, const VkAllocationCallbacks *pAllocator)
4998 {
4999 RADV_FROM_HANDLE(radv_device, device, _device);
5000 RADV_FROM_HANDLE(radv_device_memory, mem, _mem);
5001
5002 radv_free_memory(device, pAllocator, mem);
5003 }
5004
5005 VKAPI_ATTR VkResult VKAPI_CALL
radv_MapMemory(VkDevice _device,VkDeviceMemory _memory,VkDeviceSize offset,VkDeviceSize size,VkMemoryMapFlags flags,void ** ppData)5006 radv_MapMemory(VkDevice _device, VkDeviceMemory _memory, VkDeviceSize offset, VkDeviceSize size,
5007 VkMemoryMapFlags flags, void **ppData)
5008 {
5009 RADV_FROM_HANDLE(radv_device, device, _device);
5010 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
5011
5012 if (mem->user_ptr)
5013 *ppData = mem->user_ptr;
5014 else
5015 *ppData = device->ws->buffer_map(mem->bo);
5016
5017 if (*ppData) {
5018 *ppData = (uint8_t *)*ppData + offset;
5019 return VK_SUCCESS;
5020 }
5021
5022 return vk_error(device, VK_ERROR_MEMORY_MAP_FAILED);
5023 }
5024
5025 VKAPI_ATTR void VKAPI_CALL
radv_UnmapMemory(VkDevice _device,VkDeviceMemory _memory)5026 radv_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
5027 {
5028 RADV_FROM_HANDLE(radv_device, device, _device);
5029 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
5030
5031 if (mem->user_ptr == NULL)
5032 device->ws->buffer_unmap(mem->bo);
5033 }
5034
5035 VKAPI_ATTR VkResult VKAPI_CALL
radv_FlushMappedMemoryRanges(VkDevice _device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)5036 radv_FlushMappedMemoryRanges(VkDevice _device, uint32_t memoryRangeCount,
5037 const VkMappedMemoryRange *pMemoryRanges)
5038 {
5039 return VK_SUCCESS;
5040 }
5041
5042 VKAPI_ATTR VkResult VKAPI_CALL
radv_InvalidateMappedMemoryRanges(VkDevice _device,uint32_t memoryRangeCount,const VkMappedMemoryRange * pMemoryRanges)5043 radv_InvalidateMappedMemoryRanges(VkDevice _device, uint32_t memoryRangeCount,
5044 const VkMappedMemoryRange *pMemoryRanges)
5045 {
5046 return VK_SUCCESS;
5047 }
5048
5049 static void
radv_get_buffer_memory_requirements(struct radv_device * device,VkDeviceSize size,VkBufferCreateFlags flags,VkMemoryRequirements2 * pMemoryRequirements)5050 radv_get_buffer_memory_requirements(struct radv_device *device,
5051 VkDeviceSize size,
5052 VkBufferCreateFlags flags,
5053 VkMemoryRequirements2 *pMemoryRequirements)
5054 {
5055 pMemoryRequirements->memoryRequirements.memoryTypeBits =
5056 (1u << device->physical_device->memory_properties.memoryTypeCount) - 1;
5057
5058 if (flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
5059 pMemoryRequirements->memoryRequirements.alignment = 4096;
5060 else
5061 pMemoryRequirements->memoryRequirements.alignment = 16;
5062
5063 pMemoryRequirements->memoryRequirements.size =
5064 align64(size, pMemoryRequirements->memoryRequirements.alignment);
5065
5066 vk_foreach_struct(ext, pMemoryRequirements->pNext)
5067 {
5068 switch (ext->sType) {
5069 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
5070 VkMemoryDedicatedRequirements *req = (VkMemoryDedicatedRequirements *)ext;
5071 req->requiresDedicatedAllocation = false;
5072 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
5073 break;
5074 }
5075 default:
5076 break;
5077 }
5078 }
5079 }
5080
5081 VKAPI_ATTR void VKAPI_CALL
radv_GetBufferMemoryRequirements2(VkDevice _device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5082 radv_GetBufferMemoryRequirements2(VkDevice _device, const VkBufferMemoryRequirementsInfo2 *pInfo,
5083 VkMemoryRequirements2 *pMemoryRequirements)
5084 {
5085 RADV_FROM_HANDLE(radv_device, device, _device);
5086 RADV_FROM_HANDLE(radv_buffer, buffer, pInfo->buffer);
5087
5088 radv_get_buffer_memory_requirements(device, buffer->size, buffer->flags, pMemoryRequirements);
5089 }
5090
5091 VKAPI_ATTR void VKAPI_CALL
radv_GetDeviceBufferMemoryRequirementsKHR(VkDevice _device,const VkDeviceBufferMemoryRequirementsKHR * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5092 radv_GetDeviceBufferMemoryRequirementsKHR(VkDevice _device,
5093 const VkDeviceBufferMemoryRequirementsKHR *pInfo,
5094 VkMemoryRequirements2 *pMemoryRequirements)
5095 {
5096 RADV_FROM_HANDLE(radv_device, device, _device);
5097
5098 radv_get_buffer_memory_requirements(device, pInfo->pCreateInfo->size, pInfo->pCreateInfo->flags,
5099 pMemoryRequirements);
5100 }
5101
5102 VKAPI_ATTR void VKAPI_CALL
radv_GetImageMemoryRequirements2(VkDevice _device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5103 radv_GetImageMemoryRequirements2(VkDevice _device, const VkImageMemoryRequirementsInfo2 *pInfo,
5104 VkMemoryRequirements2 *pMemoryRequirements)
5105 {
5106 RADV_FROM_HANDLE(radv_device, device, _device);
5107 RADV_FROM_HANDLE(radv_image, image, pInfo->image);
5108
5109 pMemoryRequirements->memoryRequirements.memoryTypeBits =
5110 (1u << device->physical_device->memory_properties.memoryTypeCount) - 1;
5111
5112 pMemoryRequirements->memoryRequirements.size = image->size;
5113 pMemoryRequirements->memoryRequirements.alignment = image->alignment;
5114
5115 vk_foreach_struct(ext, pMemoryRequirements->pNext)
5116 {
5117 switch (ext->sType) {
5118 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
5119 VkMemoryDedicatedRequirements *req = (VkMemoryDedicatedRequirements *)ext;
5120 req->requiresDedicatedAllocation =
5121 image->shareable && image->tiling != VK_IMAGE_TILING_LINEAR;
5122 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
5123 break;
5124 }
5125 default:
5126 break;
5127 }
5128 }
5129 }
5130
5131 VKAPI_ATTR void VKAPI_CALL
radv_GetDeviceImageMemoryRequirementsKHR(VkDevice device,const VkDeviceImageMemoryRequirementsKHR * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5132 radv_GetDeviceImageMemoryRequirementsKHR(VkDevice device,
5133 const VkDeviceImageMemoryRequirementsKHR *pInfo,
5134 VkMemoryRequirements2 *pMemoryRequirements)
5135 {
5136 UNUSED VkResult result;
5137 VkImage image;
5138
5139 /* Determining the image size/alignment require to create a surface, which is complicated without
5140 * creating an image.
5141 * TODO: Avoid creating an image.
5142 */
5143 result = radv_CreateImage(device, pInfo->pCreateInfo, NULL, &image);
5144 assert(result == VK_SUCCESS);
5145
5146 VkImageMemoryRequirementsInfo2 info2 = {
5147 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2,
5148 .image = image,
5149 };
5150
5151 radv_GetImageMemoryRequirements2(device, &info2, pMemoryRequirements);
5152
5153 radv_DestroyImage(device, image, NULL);
5154 }
5155
5156 VKAPI_ATTR void VKAPI_CALL
radv_GetDeviceMemoryCommitment(VkDevice device,VkDeviceMemory memory,VkDeviceSize * pCommittedMemoryInBytes)5157 radv_GetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory memory,
5158 VkDeviceSize *pCommittedMemoryInBytes)
5159 {
5160 *pCommittedMemoryInBytes = 0;
5161 }
5162
5163 VKAPI_ATTR VkResult VKAPI_CALL
radv_BindBufferMemory2(VkDevice _device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)5164 radv_BindBufferMemory2(VkDevice _device, uint32_t bindInfoCount,
5165 const VkBindBufferMemoryInfo *pBindInfos)
5166 {
5167 RADV_FROM_HANDLE(radv_device, device, _device);
5168
5169 for (uint32_t i = 0; i < bindInfoCount; ++i) {
5170 RADV_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
5171 RADV_FROM_HANDLE(radv_buffer, buffer, pBindInfos[i].buffer);
5172
5173 if (mem->alloc_size) {
5174 VkBufferMemoryRequirementsInfo2 info = {
5175 .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2,
5176 .buffer = pBindInfos[i].buffer,
5177 };
5178 VkMemoryRequirements2 reqs = {
5179 .sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
5180 };
5181
5182 radv_GetBufferMemoryRequirements2(_device, &info, &reqs);
5183
5184 if (pBindInfos[i].memoryOffset + reqs.memoryRequirements.size > mem->alloc_size) {
5185 return vk_errorf(device, VK_ERROR_UNKNOWN,
5186 "Device memory object too small for the buffer.\n");
5187 }
5188 }
5189
5190 buffer->bo = mem->bo;
5191 buffer->offset = pBindInfos[i].memoryOffset;
5192 }
5193 return VK_SUCCESS;
5194 }
5195
5196 VKAPI_ATTR VkResult VKAPI_CALL
radv_BindImageMemory2(VkDevice _device,uint32_t bindInfoCount,const VkBindImageMemoryInfo * pBindInfos)5197 radv_BindImageMemory2(VkDevice _device, uint32_t bindInfoCount,
5198 const VkBindImageMemoryInfo *pBindInfos)
5199 {
5200 RADV_FROM_HANDLE(radv_device, device, _device);
5201
5202 for (uint32_t i = 0; i < bindInfoCount; ++i) {
5203 RADV_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
5204 RADV_FROM_HANDLE(radv_image, image, pBindInfos[i].image);
5205
5206 if (mem->alloc_size) {
5207 VkImageMemoryRequirementsInfo2 info = {
5208 .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2,
5209 .image = pBindInfos[i].image,
5210 };
5211 VkMemoryRequirements2 reqs = {
5212 .sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
5213 };
5214
5215 radv_GetImageMemoryRequirements2(_device, &info, &reqs);
5216
5217 if (pBindInfos[i].memoryOffset + reqs.memoryRequirements.size > mem->alloc_size) {
5218 return vk_errorf(device, VK_ERROR_UNKNOWN,
5219 "Device memory object too small for the image.\n");
5220 }
5221 }
5222
5223 image->bo = mem->bo;
5224 image->offset = pBindInfos[i].memoryOffset;
5225 }
5226 return VK_SUCCESS;
5227 }
5228
5229 static void
radv_destroy_event(struct radv_device * device,const VkAllocationCallbacks * pAllocator,struct radv_event * event)5230 radv_destroy_event(struct radv_device *device, const VkAllocationCallbacks *pAllocator,
5231 struct radv_event *event)
5232 {
5233 if (event->bo)
5234 device->ws->buffer_destroy(device->ws, event->bo);
5235
5236 vk_object_base_finish(&event->base);
5237 vk_free2(&device->vk.alloc, pAllocator, event);
5238 }
5239
5240 VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateEvent(VkDevice _device,const VkEventCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkEvent * pEvent)5241 radv_CreateEvent(VkDevice _device, const VkEventCreateInfo *pCreateInfo,
5242 const VkAllocationCallbacks *pAllocator, VkEvent *pEvent)
5243 {
5244 RADV_FROM_HANDLE(radv_device, device, _device);
5245 enum radeon_bo_domain bo_domain;
5246 enum radeon_bo_flag bo_flags;
5247 struct radv_event *event;
5248 VkResult result;
5249
5250 event = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*event), 8,
5251 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
5252 if (!event)
5253 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
5254
5255 vk_object_base_init(&device->vk, &event->base, VK_OBJECT_TYPE_EVENT);
5256
5257 if (pCreateInfo->flags & VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR) {
5258 bo_domain = RADEON_DOMAIN_VRAM;
5259 bo_flags = RADEON_FLAG_NO_CPU_ACCESS;
5260 } else {
5261 bo_domain = RADEON_DOMAIN_GTT;
5262 bo_flags = RADEON_FLAG_CPU_ACCESS;
5263 }
5264
5265 result = device->ws->buffer_create(
5266 device->ws, 8, 8, bo_domain,
5267 RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_NO_INTERPROCESS_SHARING | bo_flags,
5268 RADV_BO_PRIORITY_FENCE, 0, &event->bo);
5269 if (result != VK_SUCCESS) {
5270 radv_destroy_event(device, pAllocator, event);
5271 return vk_error(device, result);
5272 }
5273
5274 if (!(pCreateInfo->flags & VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR)) {
5275 event->map = (uint64_t *)device->ws->buffer_map(event->bo);
5276 if (!event->map) {
5277 radv_destroy_event(device, pAllocator, event);
5278 return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
5279 }
5280 }
5281
5282 *pEvent = radv_event_to_handle(event);
5283
5284 return VK_SUCCESS;
5285 }
5286
5287 VKAPI_ATTR void VKAPI_CALL
radv_DestroyEvent(VkDevice _device,VkEvent _event,const VkAllocationCallbacks * pAllocator)5288 radv_DestroyEvent(VkDevice _device, VkEvent _event, const VkAllocationCallbacks *pAllocator)
5289 {
5290 RADV_FROM_HANDLE(radv_device, device, _device);
5291 RADV_FROM_HANDLE(radv_event, event, _event);
5292
5293 if (!event)
5294 return;
5295
5296 radv_destroy_event(device, pAllocator, event);
5297 }
5298
5299 VKAPI_ATTR VkResult VKAPI_CALL
radv_GetEventStatus(VkDevice _device,VkEvent _event)5300 radv_GetEventStatus(VkDevice _device, VkEvent _event)
5301 {
5302 RADV_FROM_HANDLE(radv_device, device, _device);
5303 RADV_FROM_HANDLE(radv_event, event, _event);
5304
5305 if (vk_device_is_lost(&device->vk))
5306 return VK_ERROR_DEVICE_LOST;
5307
5308 if (*event->map == 1)
5309 return VK_EVENT_SET;
5310 return VK_EVENT_RESET;
5311 }
5312
5313 VKAPI_ATTR VkResult VKAPI_CALL
radv_SetEvent(VkDevice _device,VkEvent _event)5314 radv_SetEvent(VkDevice _device, VkEvent _event)
5315 {
5316 RADV_FROM_HANDLE(radv_event, event, _event);
5317 *event->map = 1;
5318
5319 return VK_SUCCESS;
5320 }
5321
5322 VKAPI_ATTR VkResult VKAPI_CALL
radv_ResetEvent(VkDevice _device,VkEvent _event)5323 radv_ResetEvent(VkDevice _device, VkEvent _event)
5324 {
5325 RADV_FROM_HANDLE(radv_event, event, _event);
5326 *event->map = 0;
5327
5328 return VK_SUCCESS;
5329 }
5330
5331 void
radv_buffer_init(struct radv_buffer * buffer,struct radv_device * device,struct radeon_winsys_bo * bo,uint64_t size,uint64_t offset)5332 radv_buffer_init(struct radv_buffer *buffer, struct radv_device *device,
5333 struct radeon_winsys_bo *bo, uint64_t size,
5334 uint64_t offset)
5335 {
5336 vk_object_base_init(&device->vk, &buffer->base, VK_OBJECT_TYPE_BUFFER);
5337
5338 buffer->usage = 0;
5339 buffer->flags = 0;
5340 buffer->bo = bo;
5341 buffer->size = size;
5342 buffer->offset = offset;
5343 }
5344
5345 void
radv_buffer_finish(struct radv_buffer * buffer)5346 radv_buffer_finish(struct radv_buffer *buffer)
5347 {
5348 vk_object_base_finish(&buffer->base);
5349 }
5350
5351 static void
radv_destroy_buffer(struct radv_device * device,const VkAllocationCallbacks * pAllocator,struct radv_buffer * buffer)5352 radv_destroy_buffer(struct radv_device *device, const VkAllocationCallbacks *pAllocator,
5353 struct radv_buffer *buffer)
5354 {
5355 if ((buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) && buffer->bo)
5356 device->ws->buffer_destroy(device->ws, buffer->bo);
5357
5358 radv_buffer_finish(buffer);
5359 vk_free2(&device->vk.alloc, pAllocator, buffer);
5360 }
5361
5362 VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateBuffer(VkDevice _device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)5363 radv_CreateBuffer(VkDevice _device, const VkBufferCreateInfo *pCreateInfo,
5364 const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer)
5365 {
5366 RADV_FROM_HANDLE(radv_device, device, _device);
5367 struct radv_buffer *buffer;
5368
5369 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
5370
5371 buffer = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*buffer), 8,
5372 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
5373 if (buffer == NULL)
5374 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
5375
5376 radv_buffer_init(buffer, device, NULL, pCreateInfo->size, 0);
5377
5378 buffer->usage = pCreateInfo->usage;
5379 buffer->flags = pCreateInfo->flags;
5380
5381 if (pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
5382 enum radeon_bo_flag flags = RADEON_FLAG_VIRTUAL;
5383 if (pCreateInfo->flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT)
5384 flags |= RADEON_FLAG_REPLAYABLE;
5385
5386 uint64_t replay_address = 0;
5387 const VkBufferOpaqueCaptureAddressCreateInfo *replay_info =
5388 vk_find_struct_const(pCreateInfo->pNext, BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO);
5389 if (replay_info && replay_info->opaqueCaptureAddress)
5390 replay_address = replay_info->opaqueCaptureAddress;
5391
5392 VkResult result = device->ws->buffer_create(device->ws, align64(buffer->size, 4096), 4096, 0,
5393 flags, RADV_BO_PRIORITY_VIRTUAL,
5394 replay_address, &buffer->bo);
5395 if (result != VK_SUCCESS) {
5396 radv_destroy_buffer(device, pAllocator, buffer);
5397 return vk_error(device, result);
5398 }
5399 }
5400
5401 *pBuffer = radv_buffer_to_handle(buffer);
5402
5403 return VK_SUCCESS;
5404 }
5405
5406 VKAPI_ATTR void VKAPI_CALL
radv_DestroyBuffer(VkDevice _device,VkBuffer _buffer,const VkAllocationCallbacks * pAllocator)5407 radv_DestroyBuffer(VkDevice _device, VkBuffer _buffer, const VkAllocationCallbacks *pAllocator)
5408 {
5409 RADV_FROM_HANDLE(radv_device, device, _device);
5410 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
5411
5412 if (!buffer)
5413 return;
5414
5415 radv_destroy_buffer(device, pAllocator, buffer);
5416 }
5417
5418 VKAPI_ATTR VkDeviceAddress VKAPI_CALL
radv_GetBufferDeviceAddress(VkDevice device,const VkBufferDeviceAddressInfo * pInfo)5419 radv_GetBufferDeviceAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo)
5420 {
5421 RADV_FROM_HANDLE(radv_buffer, buffer, pInfo->buffer);
5422 return radv_buffer_get_va(buffer->bo) + buffer->offset;
5423 }
5424
5425 VKAPI_ATTR uint64_t VKAPI_CALL
radv_GetBufferOpaqueCaptureAddress(VkDevice device,const VkBufferDeviceAddressInfo * pInfo)5426 radv_GetBufferOpaqueCaptureAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo)
5427 {
5428 RADV_FROM_HANDLE(radv_buffer, buffer, pInfo->buffer);
5429 return buffer->bo ? radv_buffer_get_va(buffer->bo) + buffer->offset : 0;
5430 }
5431
5432 VKAPI_ATTR uint64_t VKAPI_CALL
radv_GetDeviceMemoryOpaqueCaptureAddress(VkDevice device,const VkDeviceMemoryOpaqueCaptureAddressInfo * pInfo)5433 radv_GetDeviceMemoryOpaqueCaptureAddress(VkDevice device,
5434 const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo)
5435 {
5436 RADV_FROM_HANDLE(radv_device_memory, mem, pInfo->memory);
5437 return radv_buffer_get_va(mem->bo);
5438 }
5439
5440 static inline unsigned
si_tile_mode_index(const struct radv_image_plane * plane,unsigned level,bool stencil)5441 si_tile_mode_index(const struct radv_image_plane *plane, unsigned level, bool stencil)
5442 {
5443 if (stencil)
5444 return plane->surface.u.legacy.zs.stencil_tiling_index[level];
5445 else
5446 return plane->surface.u.legacy.tiling_index[level];
5447 }
5448
5449 static uint32_t
radv_surface_max_layer_count(struct radv_image_view * iview)5450 radv_surface_max_layer_count(struct radv_image_view *iview)
5451 {
5452 return iview->type == VK_IMAGE_VIEW_TYPE_3D ? iview->extent.depth
5453 : (iview->base_layer + iview->layer_count);
5454 }
5455
5456 static unsigned
get_dcc_max_uncompressed_block_size(const struct radv_device * device,const struct radv_image_view * iview)5457 get_dcc_max_uncompressed_block_size(const struct radv_device *device,
5458 const struct radv_image_view *iview)
5459 {
5460 if (device->physical_device->rad_info.chip_class < GFX10 && iview->image->info.samples > 1) {
5461 if (iview->image->planes[0].surface.bpe == 1)
5462 return V_028C78_MAX_BLOCK_SIZE_64B;
5463 else if (iview->image->planes[0].surface.bpe == 2)
5464 return V_028C78_MAX_BLOCK_SIZE_128B;
5465 }
5466
5467 return V_028C78_MAX_BLOCK_SIZE_256B;
5468 }
5469
5470 static unsigned
get_dcc_min_compressed_block_size(const struct radv_device * device)5471 get_dcc_min_compressed_block_size(const struct radv_device *device)
5472 {
5473 if (!device->physical_device->rad_info.has_dedicated_vram) {
5474 /* amdvlk: [min-compressed-block-size] should be set to 32 for
5475 * dGPU and 64 for APU because all of our APUs to date use
5476 * DIMMs which have a request granularity size of 64B while all
5477 * other chips have a 32B request size.
5478 */
5479 return V_028C78_MIN_BLOCK_SIZE_64B;
5480 }
5481
5482 return V_028C78_MIN_BLOCK_SIZE_32B;
5483 }
5484
5485 static uint32_t
radv_init_dcc_control_reg(struct radv_device * device,struct radv_image_view * iview)5486 radv_init_dcc_control_reg(struct radv_device *device, struct radv_image_view *iview)
5487 {
5488 unsigned max_uncompressed_block_size = get_dcc_max_uncompressed_block_size(device, iview);
5489 unsigned min_compressed_block_size = get_dcc_min_compressed_block_size(device);
5490 unsigned max_compressed_block_size;
5491 unsigned independent_128b_blocks;
5492 unsigned independent_64b_blocks;
5493
5494 if (!radv_dcc_enabled(iview->image, iview->base_mip))
5495 return 0;
5496
5497 /* For GFX9+ ac_surface computes values for us (except min_compressed
5498 * and max_uncompressed) */
5499 if (device->physical_device->rad_info.chip_class >= GFX9) {
5500 max_compressed_block_size =
5501 iview->image->planes[0].surface.u.gfx9.color.dcc.max_compressed_block_size;
5502 independent_128b_blocks = iview->image->planes[0].surface.u.gfx9.color.dcc.independent_128B_blocks;
5503 independent_64b_blocks = iview->image->planes[0].surface.u.gfx9.color.dcc.independent_64B_blocks;
5504 } else {
5505 independent_128b_blocks = 0;
5506
5507 if (iview->image->usage & (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
5508 VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) {
5509 /* If this DCC image is potentially going to be used in texture
5510 * fetches, we need some special settings.
5511 */
5512 independent_64b_blocks = 1;
5513 max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
5514 } else {
5515 /* MAX_UNCOMPRESSED_BLOCK_SIZE must be >=
5516 * MAX_COMPRESSED_BLOCK_SIZE. Set MAX_COMPRESSED_BLOCK_SIZE as
5517 * big as possible for better compression state.
5518 */
5519 independent_64b_blocks = 0;
5520 max_compressed_block_size = max_uncompressed_block_size;
5521 }
5522 }
5523
5524 return S_028C78_MAX_UNCOMPRESSED_BLOCK_SIZE(max_uncompressed_block_size) |
5525 S_028C78_MAX_COMPRESSED_BLOCK_SIZE(max_compressed_block_size) |
5526 S_028C78_MIN_COMPRESSED_BLOCK_SIZE(min_compressed_block_size) |
5527 S_028C78_INDEPENDENT_64B_BLOCKS(independent_64b_blocks) |
5528 S_028C78_INDEPENDENT_128B_BLOCKS(independent_128b_blocks);
5529 }
5530
5531 void
radv_initialise_color_surface(struct radv_device * device,struct radv_color_buffer_info * cb,struct radv_image_view * iview)5532 radv_initialise_color_surface(struct radv_device *device, struct radv_color_buffer_info *cb,
5533 struct radv_image_view *iview)
5534 {
5535 const struct util_format_description *desc;
5536 unsigned ntype, format, swap, endian;
5537 unsigned blend_clamp = 0, blend_bypass = 0;
5538 uint64_t va;
5539 const struct radv_image_plane *plane = &iview->image->planes[iview->plane_id];
5540 const struct radeon_surf *surf = &plane->surface;
5541
5542 desc = vk_format_description(iview->vk_format);
5543
5544 memset(cb, 0, sizeof(*cb));
5545
5546 /* Intensity is implemented as Red, so treat it that way. */
5547 cb->cb_color_attrib = S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] == PIPE_SWIZZLE_1);
5548
5549 va = radv_buffer_get_va(iview->image->bo) + iview->image->offset;
5550
5551 cb->cb_color_base = va >> 8;
5552
5553 if (device->physical_device->rad_info.chip_class >= GFX9) {
5554 if (device->physical_device->rad_info.chip_class >= GFX10) {
5555 cb->cb_color_attrib3 |= S_028EE0_COLOR_SW_MODE(surf->u.gfx9.swizzle_mode) |
5556 S_028EE0_FMASK_SW_MODE(surf->u.gfx9.color.fmask_swizzle_mode) |
5557 S_028EE0_CMASK_PIPE_ALIGNED(1) |
5558 S_028EE0_DCC_PIPE_ALIGNED(surf->u.gfx9.color.dcc.pipe_aligned);
5559 } else {
5560 struct gfx9_surf_meta_flags meta = {
5561 .rb_aligned = 1,
5562 .pipe_aligned = 1,
5563 };
5564
5565 if (surf->meta_offset)
5566 meta = surf->u.gfx9.color.dcc;
5567
5568 cb->cb_color_attrib |= S_028C74_COLOR_SW_MODE(surf->u.gfx9.swizzle_mode) |
5569 S_028C74_FMASK_SW_MODE(surf->u.gfx9.color.fmask_swizzle_mode) |
5570 S_028C74_RB_ALIGNED(meta.rb_aligned) |
5571 S_028C74_PIPE_ALIGNED(meta.pipe_aligned);
5572 cb->cb_mrt_epitch = S_0287A0_EPITCH(surf->u.gfx9.epitch);
5573 }
5574
5575 cb->cb_color_base += surf->u.gfx9.surf_offset >> 8;
5576 cb->cb_color_base |= surf->tile_swizzle;
5577 } else {
5578 const struct legacy_surf_level *level_info = &surf->u.legacy.level[iview->base_mip];
5579 unsigned pitch_tile_max, slice_tile_max, tile_mode_index;
5580
5581 cb->cb_color_base += level_info->offset_256B;
5582 if (level_info->mode == RADEON_SURF_MODE_2D)
5583 cb->cb_color_base |= surf->tile_swizzle;
5584
5585 pitch_tile_max = level_info->nblk_x / 8 - 1;
5586 slice_tile_max = (level_info->nblk_x * level_info->nblk_y) / 64 - 1;
5587 tile_mode_index = si_tile_mode_index(plane, iview->base_mip, false);
5588
5589 cb->cb_color_pitch = S_028C64_TILE_MAX(pitch_tile_max);
5590 cb->cb_color_slice = S_028C68_TILE_MAX(slice_tile_max);
5591 cb->cb_color_cmask_slice = surf->u.legacy.color.cmask_slice_tile_max;
5592
5593 cb->cb_color_attrib |= S_028C74_TILE_MODE_INDEX(tile_mode_index);
5594
5595 if (radv_image_has_fmask(iview->image)) {
5596 if (device->physical_device->rad_info.chip_class >= GFX7)
5597 cb->cb_color_pitch |=
5598 S_028C64_FMASK_TILE_MAX(surf->u.legacy.color.fmask.pitch_in_pixels / 8 - 1);
5599 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(surf->u.legacy.color.fmask.tiling_index);
5600 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(surf->u.legacy.color.fmask.slice_tile_max);
5601 } else {
5602 /* This must be set for fast clear to work without FMASK. */
5603 if (device->physical_device->rad_info.chip_class >= GFX7)
5604 cb->cb_color_pitch |= S_028C64_FMASK_TILE_MAX(pitch_tile_max);
5605 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(tile_mode_index);
5606 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(slice_tile_max);
5607 }
5608 }
5609
5610 /* CMASK variables */
5611 va = radv_buffer_get_va(iview->image->bo) + iview->image->offset;
5612 va += surf->cmask_offset;
5613 cb->cb_color_cmask = va >> 8;
5614
5615 va = radv_buffer_get_va(iview->image->bo) + iview->image->offset;
5616 va += surf->meta_offset;
5617
5618 if (radv_dcc_enabled(iview->image, iview->base_mip) &&
5619 device->physical_device->rad_info.chip_class <= GFX8)
5620 va += plane->surface.u.legacy.color.dcc_level[iview->base_mip].dcc_offset;
5621
5622 unsigned dcc_tile_swizzle = surf->tile_swizzle;
5623 dcc_tile_swizzle &= ((1 << surf->meta_alignment_log2) - 1) >> 8;
5624
5625 cb->cb_dcc_base = va >> 8;
5626 cb->cb_dcc_base |= dcc_tile_swizzle;
5627
5628 /* GFX10 field has the same base shift as the GFX6 field. */
5629 uint32_t max_slice = radv_surface_max_layer_count(iview) - 1;
5630 cb->cb_color_view =
5631 S_028C6C_SLICE_START(iview->base_layer) | S_028C6C_SLICE_MAX_GFX10(max_slice);
5632
5633 if (iview->image->info.samples > 1) {
5634 unsigned log_samples = util_logbase2(iview->image->info.samples);
5635
5636 cb->cb_color_attrib |=
5637 S_028C74_NUM_SAMPLES(log_samples) | S_028C74_NUM_FRAGMENTS(log_samples);
5638 }
5639
5640 if (radv_image_has_fmask(iview->image)) {
5641 va = radv_buffer_get_va(iview->image->bo) + iview->image->offset + surf->fmask_offset;
5642 cb->cb_color_fmask = va >> 8;
5643 cb->cb_color_fmask |= surf->fmask_tile_swizzle;
5644 } else {
5645 cb->cb_color_fmask = cb->cb_color_base;
5646 }
5647
5648 ntype = radv_translate_color_numformat(iview->vk_format, desc,
5649 vk_format_get_first_non_void_channel(iview->vk_format));
5650 format = radv_translate_colorformat(iview->vk_format);
5651 assert(format != V_028C70_COLOR_INVALID);
5652
5653 swap = radv_translate_colorswap(iview->vk_format, false);
5654 endian = radv_colorformat_endian_swap(format);
5655
5656 /* blend clamp should be set for all NORM/SRGB types */
5657 if (ntype == V_028C70_NUMBER_UNORM || ntype == V_028C70_NUMBER_SNORM ||
5658 ntype == V_028C70_NUMBER_SRGB)
5659 blend_clamp = 1;
5660
5661 /* set blend bypass according to docs if SINT/UINT or
5662 8/24 COLOR variants */
5663 if (ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT ||
5664 format == V_028C70_COLOR_8_24 || format == V_028C70_COLOR_24_8 ||
5665 format == V_028C70_COLOR_X24_8_32_FLOAT) {
5666 blend_clamp = 0;
5667 blend_bypass = 1;
5668 }
5669 #if 0
5670 if ((ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT) &&
5671 (format == V_028C70_COLOR_8 ||
5672 format == V_028C70_COLOR_8_8 ||
5673 format == V_028C70_COLOR_8_8_8_8))
5674 ->color_is_int8 = true;
5675 #endif
5676 cb->cb_color_info =
5677 S_028C70_FORMAT(format) | S_028C70_COMP_SWAP(swap) | S_028C70_BLEND_CLAMP(blend_clamp) |
5678 S_028C70_BLEND_BYPASS(blend_bypass) | S_028C70_SIMPLE_FLOAT(1) |
5679 S_028C70_ROUND_MODE(ntype != V_028C70_NUMBER_UNORM && ntype != V_028C70_NUMBER_SNORM &&
5680 ntype != V_028C70_NUMBER_SRGB && format != V_028C70_COLOR_8_24 &&
5681 format != V_028C70_COLOR_24_8) |
5682 S_028C70_NUMBER_TYPE(ntype) | S_028C70_ENDIAN(endian);
5683 if (radv_image_has_fmask(iview->image)) {
5684 cb->cb_color_info |= S_028C70_COMPRESSION(1);
5685 if (device->physical_device->rad_info.chip_class == GFX6) {
5686 unsigned fmask_bankh = util_logbase2(surf->u.legacy.color.fmask.bankh);
5687 cb->cb_color_attrib |= S_028C74_FMASK_BANK_HEIGHT(fmask_bankh);
5688 }
5689
5690 if (radv_image_is_tc_compat_cmask(iview->image)) {
5691 /* Allow the texture block to read FMASK directly
5692 * without decompressing it. This bit must be cleared
5693 * when performing FMASK_DECOMPRESS or DCC_COMPRESS,
5694 * otherwise the operation doesn't happen.
5695 */
5696 cb->cb_color_info |= S_028C70_FMASK_COMPRESS_1FRAG_ONLY(1);
5697
5698 if (device->physical_device->rad_info.chip_class == GFX8) {
5699 /* Set CMASK into a tiling format that allows
5700 * the texture block to read it.
5701 */
5702 cb->cb_color_info |= S_028C70_CMASK_ADDR_TYPE(2);
5703 }
5704 }
5705 }
5706
5707 if (radv_image_has_cmask(iview->image) &&
5708 !(device->instance->debug_flags & RADV_DEBUG_NO_FAST_CLEARS))
5709 cb->cb_color_info |= S_028C70_FAST_CLEAR(1);
5710
5711 if (radv_dcc_enabled(iview->image, iview->base_mip))
5712 cb->cb_color_info |= S_028C70_DCC_ENABLE(1);
5713
5714 cb->cb_dcc_control = radv_init_dcc_control_reg(device, iview);
5715
5716 /* This must be set for fast clear to work without FMASK. */
5717 if (!radv_image_has_fmask(iview->image) &&
5718 device->physical_device->rad_info.chip_class == GFX6) {
5719 unsigned bankh = util_logbase2(surf->u.legacy.bankh);
5720 cb->cb_color_attrib |= S_028C74_FMASK_BANK_HEIGHT(bankh);
5721 }
5722
5723 if (device->physical_device->rad_info.chip_class >= GFX9) {
5724 unsigned mip0_depth = iview->image->type == VK_IMAGE_TYPE_3D
5725 ? (iview->extent.depth - 1)
5726 : (iview->image->info.array_size - 1);
5727 unsigned width =
5728 vk_format_get_plane_width(iview->image->vk_format, iview->plane_id, iview->extent.width);
5729 unsigned height =
5730 vk_format_get_plane_height(iview->image->vk_format, iview->plane_id, iview->extent.height);
5731
5732 if (device->physical_device->rad_info.chip_class >= GFX10) {
5733 cb->cb_color_view |= S_028C6C_MIP_LEVEL_GFX10(iview->base_mip);
5734
5735 cb->cb_color_attrib3 |= S_028EE0_MIP0_DEPTH(mip0_depth) |
5736 S_028EE0_RESOURCE_TYPE(surf->u.gfx9.resource_type) |
5737 S_028EE0_RESOURCE_LEVEL(1);
5738 } else {
5739 cb->cb_color_view |= S_028C6C_MIP_LEVEL_GFX9(iview->base_mip);
5740 cb->cb_color_attrib |=
5741 S_028C74_MIP0_DEPTH(mip0_depth) | S_028C74_RESOURCE_TYPE(surf->u.gfx9.resource_type);
5742 }
5743
5744 cb->cb_color_attrib2 = S_028C68_MIP0_WIDTH(width - 1) | S_028C68_MIP0_HEIGHT(height - 1) |
5745 S_028C68_MAX_MIP(iview->image->info.levels - 1);
5746 }
5747 }
5748
5749 static unsigned
radv_calc_decompress_on_z_planes(struct radv_device * device,struct radv_image_view * iview)5750 radv_calc_decompress_on_z_planes(struct radv_device *device, struct radv_image_view *iview)
5751 {
5752 unsigned max_zplanes = 0;
5753
5754 assert(radv_image_is_tc_compat_htile(iview->image));
5755
5756 if (device->physical_device->rad_info.chip_class >= GFX9) {
5757 /* Default value for 32-bit depth surfaces. */
5758 max_zplanes = 4;
5759
5760 if (iview->vk_format == VK_FORMAT_D16_UNORM && iview->image->info.samples > 1)
5761 max_zplanes = 2;
5762
5763 /* Workaround for a DB hang when ITERATE_256 is set to 1. Only affects 4X MSAA D/S images. */
5764 if (device->physical_device->rad_info.has_two_planes_iterate256_bug &&
5765 radv_image_get_iterate256(device, iview->image) &&
5766 !radv_image_tile_stencil_disabled(device, iview->image) &&
5767 iview->image->info.samples == 4) {
5768 max_zplanes = 1;
5769 }
5770
5771 max_zplanes = max_zplanes + 1;
5772 } else {
5773 if (iview->vk_format == VK_FORMAT_D16_UNORM) {
5774 /* Do not enable Z plane compression for 16-bit depth
5775 * surfaces because isn't supported on GFX8. Only
5776 * 32-bit depth surfaces are supported by the hardware.
5777 * This allows to maintain shader compatibility and to
5778 * reduce the number of depth decompressions.
5779 */
5780 max_zplanes = 1;
5781 } else {
5782 if (iview->image->info.samples <= 1)
5783 max_zplanes = 5;
5784 else if (iview->image->info.samples <= 4)
5785 max_zplanes = 3;
5786 else
5787 max_zplanes = 2;
5788 }
5789 }
5790
5791 return max_zplanes;
5792 }
5793
5794 void
radv_initialise_vrs_surface(struct radv_image * image,struct radv_buffer * htile_buffer,struct radv_ds_buffer_info * ds)5795 radv_initialise_vrs_surface(struct radv_image *image, struct radv_buffer *htile_buffer,
5796 struct radv_ds_buffer_info *ds)
5797 {
5798 const struct radeon_surf *surf = &image->planes[0].surface;
5799
5800 assert(image->vk_format == VK_FORMAT_D16_UNORM);
5801 memset(ds, 0, sizeof(*ds));
5802
5803 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-16);
5804
5805 ds->db_z_info = S_028038_FORMAT(V_028040_Z_16) |
5806 S_028038_SW_MODE(surf->u.gfx9.swizzle_mode) |
5807 S_028038_ZRANGE_PRECISION(1) |
5808 S_028038_TILE_SURFACE_ENABLE(1);
5809 ds->db_stencil_info = S_02803C_FORMAT(V_028044_STENCIL_INVALID);
5810
5811 ds->db_depth_size = S_02801C_X_MAX(image->info.width - 1) |
5812 S_02801C_Y_MAX(image->info.height - 1);
5813
5814 ds->db_htile_data_base = radv_buffer_get_va(htile_buffer->bo) >> 8;
5815 ds->db_htile_surface = S_028ABC_FULL_CACHE(1) | S_028ABC_PIPE_ALIGNED(1) |
5816 S_028ABC_VRS_HTILE_ENCODING(V_028ABC_VRS_HTILE_4BIT_ENCODING);
5817 }
5818
5819 void
radv_initialise_ds_surface(struct radv_device * device,struct radv_ds_buffer_info * ds,struct radv_image_view * iview)5820 radv_initialise_ds_surface(struct radv_device *device, struct radv_ds_buffer_info *ds,
5821 struct radv_image_view *iview)
5822 {
5823 unsigned level = iview->base_mip;
5824 unsigned format, stencil_format;
5825 uint64_t va, s_offs, z_offs;
5826 bool stencil_only = iview->image->vk_format == VK_FORMAT_S8_UINT;
5827 const struct radv_image_plane *plane = &iview->image->planes[0];
5828 const struct radeon_surf *surf = &plane->surface;
5829
5830 assert(vk_format_get_plane_count(iview->image->vk_format) == 1);
5831
5832 memset(ds, 0, sizeof(*ds));
5833 if (!device->instance->absolute_depth_bias) {
5834 switch (iview->image->vk_format) {
5835 case VK_FORMAT_D24_UNORM_S8_UINT:
5836 case VK_FORMAT_X8_D24_UNORM_PACK32:
5837 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-24);
5838 break;
5839 case VK_FORMAT_D16_UNORM:
5840 case VK_FORMAT_D16_UNORM_S8_UINT:
5841 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-16);
5842 break;
5843 case VK_FORMAT_D32_SFLOAT:
5844 case VK_FORMAT_D32_SFLOAT_S8_UINT:
5845 ds->pa_su_poly_offset_db_fmt_cntl =
5846 S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-23) | S_028B78_POLY_OFFSET_DB_IS_FLOAT_FMT(1);
5847 break;
5848 default:
5849 break;
5850 }
5851 }
5852
5853 format = radv_translate_dbformat(iview->image->vk_format);
5854 stencil_format = surf->has_stencil ? V_028044_STENCIL_8 : V_028044_STENCIL_INVALID;
5855
5856 uint32_t max_slice = radv_surface_max_layer_count(iview) - 1;
5857 ds->db_depth_view = S_028008_SLICE_START(iview->base_layer) | S_028008_SLICE_MAX(max_slice);
5858 if (device->physical_device->rad_info.chip_class >= GFX10) {
5859 ds->db_depth_view |=
5860 S_028008_SLICE_START_HI(iview->base_layer >> 11) | S_028008_SLICE_MAX_HI(max_slice >> 11);
5861 }
5862
5863 ds->db_htile_data_base = 0;
5864 ds->db_htile_surface = 0;
5865
5866 va = radv_buffer_get_va(iview->image->bo) + iview->image->offset;
5867 s_offs = z_offs = va;
5868
5869 if (device->physical_device->rad_info.chip_class >= GFX9) {
5870 assert(surf->u.gfx9.surf_offset == 0);
5871 s_offs += surf->u.gfx9.zs.stencil_offset;
5872
5873 ds->db_z_info = S_028038_FORMAT(format) |
5874 S_028038_NUM_SAMPLES(util_logbase2(iview->image->info.samples)) |
5875 S_028038_SW_MODE(surf->u.gfx9.swizzle_mode) |
5876 S_028038_MAXMIP(iview->image->info.levels - 1) | S_028038_ZRANGE_PRECISION(1);
5877 ds->db_stencil_info =
5878 S_02803C_FORMAT(stencil_format) | S_02803C_SW_MODE(surf->u.gfx9.zs.stencil_swizzle_mode);
5879
5880 if (device->physical_device->rad_info.chip_class == GFX9) {
5881 ds->db_z_info2 = S_028068_EPITCH(surf->u.gfx9.epitch);
5882 ds->db_stencil_info2 = S_02806C_EPITCH(surf->u.gfx9.zs.stencil_epitch);
5883 }
5884
5885 ds->db_depth_view |= S_028008_MIPID(level);
5886 ds->db_depth_size = S_02801C_X_MAX(iview->image->info.width - 1) |
5887 S_02801C_Y_MAX(iview->image->info.height - 1);
5888
5889 if (radv_htile_enabled(iview->image, level)) {
5890 ds->db_z_info |= S_028038_TILE_SURFACE_ENABLE(1);
5891
5892 if (radv_image_is_tc_compat_htile(iview->image)) {
5893 unsigned max_zplanes = radv_calc_decompress_on_z_planes(device, iview);
5894
5895 ds->db_z_info |= S_028038_DECOMPRESS_ON_N_ZPLANES(max_zplanes);
5896
5897 if (device->physical_device->rad_info.chip_class >= GFX10) {
5898 bool iterate256 = radv_image_get_iterate256(device, iview->image);
5899
5900 ds->db_z_info |= S_028040_ITERATE_FLUSH(1);
5901 ds->db_stencil_info |= S_028044_ITERATE_FLUSH(1);
5902 ds->db_z_info |= S_028040_ITERATE_256(iterate256);
5903 ds->db_stencil_info |= S_028044_ITERATE_256(iterate256);
5904 } else {
5905 ds->db_z_info |= S_028038_ITERATE_FLUSH(1);
5906 ds->db_stencil_info |= S_02803C_ITERATE_FLUSH(1);
5907 }
5908 }
5909
5910 if (radv_image_tile_stencil_disabled(device, iview->image)) {
5911 ds->db_stencil_info |= S_02803C_TILE_STENCIL_DISABLE(1);
5912 }
5913
5914 va = radv_buffer_get_va(iview->image->bo) + iview->image->offset + surf->meta_offset;
5915 ds->db_htile_data_base = va >> 8;
5916 ds->db_htile_surface = S_028ABC_FULL_CACHE(1) | S_028ABC_PIPE_ALIGNED(1);
5917
5918 if (device->physical_device->rad_info.chip_class == GFX9) {
5919 ds->db_htile_surface |= S_028ABC_RB_ALIGNED(1);
5920 }
5921
5922 if (radv_image_has_vrs_htile(device, iview->image)) {
5923 ds->db_htile_surface |= S_028ABC_VRS_HTILE_ENCODING(V_028ABC_VRS_HTILE_4BIT_ENCODING);
5924 }
5925 }
5926 } else {
5927 const struct legacy_surf_level *level_info = &surf->u.legacy.level[level];
5928
5929 if (stencil_only)
5930 level_info = &surf->u.legacy.zs.stencil_level[level];
5931
5932 z_offs += (uint64_t)surf->u.legacy.level[level].offset_256B * 256;
5933 s_offs += (uint64_t)surf->u.legacy.zs.stencil_level[level].offset_256B * 256;
5934
5935 ds->db_depth_info = S_02803C_ADDR5_SWIZZLE_MASK(!radv_image_is_tc_compat_htile(iview->image));
5936 ds->db_z_info = S_028040_FORMAT(format) | S_028040_ZRANGE_PRECISION(1);
5937 ds->db_stencil_info = S_028044_FORMAT(stencil_format);
5938
5939 if (iview->image->info.samples > 1)
5940 ds->db_z_info |= S_028040_NUM_SAMPLES(util_logbase2(iview->image->info.samples));
5941
5942 if (device->physical_device->rad_info.chip_class >= GFX7) {
5943 struct radeon_info *info = &device->physical_device->rad_info;
5944 unsigned tiling_index = surf->u.legacy.tiling_index[level];
5945 unsigned stencil_index = surf->u.legacy.zs.stencil_tiling_index[level];
5946 unsigned macro_index = surf->u.legacy.macro_tile_index;
5947 unsigned tile_mode = info->si_tile_mode_array[tiling_index];
5948 unsigned stencil_tile_mode = info->si_tile_mode_array[stencil_index];
5949 unsigned macro_mode = info->cik_macrotile_mode_array[macro_index];
5950
5951 if (stencil_only)
5952 tile_mode = stencil_tile_mode;
5953
5954 ds->db_depth_info |= S_02803C_ARRAY_MODE(G_009910_ARRAY_MODE(tile_mode)) |
5955 S_02803C_PIPE_CONFIG(G_009910_PIPE_CONFIG(tile_mode)) |
5956 S_02803C_BANK_WIDTH(G_009990_BANK_WIDTH(macro_mode)) |
5957 S_02803C_BANK_HEIGHT(G_009990_BANK_HEIGHT(macro_mode)) |
5958 S_02803C_MACRO_TILE_ASPECT(G_009990_MACRO_TILE_ASPECT(macro_mode)) |
5959 S_02803C_NUM_BANKS(G_009990_NUM_BANKS(macro_mode));
5960 ds->db_z_info |= S_028040_TILE_SPLIT(G_009910_TILE_SPLIT(tile_mode));
5961 ds->db_stencil_info |= S_028044_TILE_SPLIT(G_009910_TILE_SPLIT(stencil_tile_mode));
5962 } else {
5963 unsigned tile_mode_index = si_tile_mode_index(&iview->image->planes[0], level, false);
5964 ds->db_z_info |= S_028040_TILE_MODE_INDEX(tile_mode_index);
5965 tile_mode_index = si_tile_mode_index(&iview->image->planes[0], level, true);
5966 ds->db_stencil_info |= S_028044_TILE_MODE_INDEX(tile_mode_index);
5967 if (stencil_only)
5968 ds->db_z_info |= S_028040_TILE_MODE_INDEX(tile_mode_index);
5969 }
5970
5971 ds->db_depth_size = S_028058_PITCH_TILE_MAX((level_info->nblk_x / 8) - 1) |
5972 S_028058_HEIGHT_TILE_MAX((level_info->nblk_y / 8) - 1);
5973 ds->db_depth_slice =
5974 S_02805C_SLICE_TILE_MAX((level_info->nblk_x * level_info->nblk_y) / 64 - 1);
5975
5976 if (radv_htile_enabled(iview->image, level)) {
5977 ds->db_z_info |= S_028040_TILE_SURFACE_ENABLE(1);
5978
5979 if (radv_image_tile_stencil_disabled(device, iview->image)) {
5980 ds->db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
5981 }
5982
5983 va = radv_buffer_get_va(iview->image->bo) + iview->image->offset + surf->meta_offset;
5984 ds->db_htile_data_base = va >> 8;
5985 ds->db_htile_surface = S_028ABC_FULL_CACHE(1);
5986
5987 if (radv_image_is_tc_compat_htile(iview->image)) {
5988 unsigned max_zplanes = radv_calc_decompress_on_z_planes(device, iview);
5989
5990 ds->db_htile_surface |= S_028ABC_TC_COMPATIBLE(1);
5991 ds->db_z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(max_zplanes);
5992 }
5993 }
5994 }
5995
5996 ds->db_z_read_base = ds->db_z_write_base = z_offs >> 8;
5997 ds->db_stencil_read_base = ds->db_stencil_write_base = s_offs >> 8;
5998 }
5999
6000 VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateFramebuffer(VkDevice _device,const VkFramebufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFramebuffer * pFramebuffer)6001 radv_CreateFramebuffer(VkDevice _device, const VkFramebufferCreateInfo *pCreateInfo,
6002 const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer)
6003 {
6004 RADV_FROM_HANDLE(radv_device, device, _device);
6005 struct radv_framebuffer *framebuffer;
6006 const VkFramebufferAttachmentsCreateInfo *imageless_create_info =
6007 vk_find_struct_const(pCreateInfo->pNext, FRAMEBUFFER_ATTACHMENTS_CREATE_INFO);
6008
6009 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
6010
6011 size_t size = sizeof(*framebuffer);
6012 if (!imageless_create_info)
6013 size += sizeof(struct radv_image_view *) * pCreateInfo->attachmentCount;
6014 framebuffer =
6015 vk_alloc2(&device->vk.alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
6016 if (framebuffer == NULL)
6017 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
6018
6019 vk_object_base_init(&device->vk, &framebuffer->base, VK_OBJECT_TYPE_FRAMEBUFFER);
6020
6021 framebuffer->attachment_count = pCreateInfo->attachmentCount;
6022 framebuffer->width = pCreateInfo->width;
6023 framebuffer->height = pCreateInfo->height;
6024 framebuffer->layers = pCreateInfo->layers;
6025
6026 if (!imageless_create_info) {
6027 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
6028 VkImageView _iview = pCreateInfo->pAttachments[i];
6029 struct radv_image_view *iview = radv_image_view_from_handle(_iview);
6030 framebuffer->attachments[i] = iview;
6031 }
6032 }
6033
6034 *pFramebuffer = radv_framebuffer_to_handle(framebuffer);
6035 return VK_SUCCESS;
6036 }
6037
6038 VKAPI_ATTR void VKAPI_CALL
radv_DestroyFramebuffer(VkDevice _device,VkFramebuffer _fb,const VkAllocationCallbacks * pAllocator)6039 radv_DestroyFramebuffer(VkDevice _device, VkFramebuffer _fb,
6040 const VkAllocationCallbacks *pAllocator)
6041 {
6042 RADV_FROM_HANDLE(radv_device, device, _device);
6043 RADV_FROM_HANDLE(radv_framebuffer, fb, _fb);
6044
6045 if (!fb)
6046 return;
6047 vk_object_base_finish(&fb->base);
6048 vk_free2(&device->vk.alloc, pAllocator, fb);
6049 }
6050
6051 static unsigned
radv_tex_wrap(VkSamplerAddressMode address_mode)6052 radv_tex_wrap(VkSamplerAddressMode address_mode)
6053 {
6054 switch (address_mode) {
6055 case VK_SAMPLER_ADDRESS_MODE_REPEAT:
6056 return V_008F30_SQ_TEX_WRAP;
6057 case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
6058 return V_008F30_SQ_TEX_MIRROR;
6059 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
6060 return V_008F30_SQ_TEX_CLAMP_LAST_TEXEL;
6061 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
6062 return V_008F30_SQ_TEX_CLAMP_BORDER;
6063 case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
6064 return V_008F30_SQ_TEX_MIRROR_ONCE_LAST_TEXEL;
6065 default:
6066 unreachable("illegal tex wrap mode");
6067 break;
6068 }
6069 }
6070
6071 static unsigned
radv_tex_compare(VkCompareOp op)6072 radv_tex_compare(VkCompareOp op)
6073 {
6074 switch (op) {
6075 case VK_COMPARE_OP_NEVER:
6076 return V_008F30_SQ_TEX_DEPTH_COMPARE_NEVER;
6077 case VK_COMPARE_OP_LESS:
6078 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESS;
6079 case VK_COMPARE_OP_EQUAL:
6080 return V_008F30_SQ_TEX_DEPTH_COMPARE_EQUAL;
6081 case VK_COMPARE_OP_LESS_OR_EQUAL:
6082 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESSEQUAL;
6083 case VK_COMPARE_OP_GREATER:
6084 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATER;
6085 case VK_COMPARE_OP_NOT_EQUAL:
6086 return V_008F30_SQ_TEX_DEPTH_COMPARE_NOTEQUAL;
6087 case VK_COMPARE_OP_GREATER_OR_EQUAL:
6088 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL;
6089 case VK_COMPARE_OP_ALWAYS:
6090 return V_008F30_SQ_TEX_DEPTH_COMPARE_ALWAYS;
6091 default:
6092 unreachable("illegal compare mode");
6093 break;
6094 }
6095 }
6096
6097 static unsigned
radv_tex_filter(VkFilter filter,unsigned max_ansio)6098 radv_tex_filter(VkFilter filter, unsigned max_ansio)
6099 {
6100 switch (filter) {
6101 case VK_FILTER_NEAREST:
6102 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_POINT
6103 : V_008F38_SQ_TEX_XY_FILTER_POINT);
6104 case VK_FILTER_LINEAR:
6105 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_BILINEAR
6106 : V_008F38_SQ_TEX_XY_FILTER_BILINEAR);
6107 case VK_FILTER_CUBIC_IMG:
6108 default:
6109 fprintf(stderr, "illegal texture filter");
6110 return 0;
6111 }
6112 }
6113
6114 static unsigned
radv_tex_mipfilter(VkSamplerMipmapMode mode)6115 radv_tex_mipfilter(VkSamplerMipmapMode mode)
6116 {
6117 switch (mode) {
6118 case VK_SAMPLER_MIPMAP_MODE_NEAREST:
6119 return V_008F38_SQ_TEX_Z_FILTER_POINT;
6120 case VK_SAMPLER_MIPMAP_MODE_LINEAR:
6121 return V_008F38_SQ_TEX_Z_FILTER_LINEAR;
6122 default:
6123 return V_008F38_SQ_TEX_Z_FILTER_NONE;
6124 }
6125 }
6126
6127 static unsigned
radv_tex_bordercolor(VkBorderColor bcolor)6128 radv_tex_bordercolor(VkBorderColor bcolor)
6129 {
6130 switch (bcolor) {
6131 case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK:
6132 case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK:
6133 return V_008F3C_SQ_TEX_BORDER_COLOR_TRANS_BLACK;
6134 case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK:
6135 case VK_BORDER_COLOR_INT_OPAQUE_BLACK:
6136 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_BLACK;
6137 case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE:
6138 case VK_BORDER_COLOR_INT_OPAQUE_WHITE:
6139 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_WHITE;
6140 case VK_BORDER_COLOR_FLOAT_CUSTOM_EXT:
6141 case VK_BORDER_COLOR_INT_CUSTOM_EXT:
6142 return V_008F3C_SQ_TEX_BORDER_COLOR_REGISTER;
6143 default:
6144 break;
6145 }
6146 return 0;
6147 }
6148
6149 static unsigned
radv_tex_aniso_filter(unsigned filter)6150 radv_tex_aniso_filter(unsigned filter)
6151 {
6152 if (filter < 2)
6153 return 0;
6154 if (filter < 4)
6155 return 1;
6156 if (filter < 8)
6157 return 2;
6158 if (filter < 16)
6159 return 3;
6160 return 4;
6161 }
6162
6163 static unsigned
radv_tex_filter_mode(VkSamplerReductionMode mode)6164 radv_tex_filter_mode(VkSamplerReductionMode mode)
6165 {
6166 switch (mode) {
6167 case VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT:
6168 return V_008F30_SQ_IMG_FILTER_MODE_BLEND;
6169 case VK_SAMPLER_REDUCTION_MODE_MIN_EXT:
6170 return V_008F30_SQ_IMG_FILTER_MODE_MIN;
6171 case VK_SAMPLER_REDUCTION_MODE_MAX_EXT:
6172 return V_008F30_SQ_IMG_FILTER_MODE_MAX;
6173 default:
6174 break;
6175 }
6176 return 0;
6177 }
6178
6179 static uint32_t
radv_get_max_anisotropy(struct radv_device * device,const VkSamplerCreateInfo * pCreateInfo)6180 radv_get_max_anisotropy(struct radv_device *device, const VkSamplerCreateInfo *pCreateInfo)
6181 {
6182 if (device->force_aniso >= 0)
6183 return device->force_aniso;
6184
6185 if (pCreateInfo->anisotropyEnable && pCreateInfo->maxAnisotropy > 1.0f)
6186 return (uint32_t)pCreateInfo->maxAnisotropy;
6187
6188 return 0;
6189 }
6190
6191 static uint32_t
radv_register_border_color(struct radv_device * device,VkClearColorValue value)6192 radv_register_border_color(struct radv_device *device, VkClearColorValue value)
6193 {
6194 uint32_t slot;
6195
6196 mtx_lock(&device->border_color_data.mutex);
6197
6198 for (slot = 0; slot < RADV_BORDER_COLOR_COUNT; slot++) {
6199 if (!device->border_color_data.used[slot]) {
6200 /* Copy to the GPU wrt endian-ness. */
6201 util_memcpy_cpu_to_le32(&device->border_color_data.colors_gpu_ptr[slot], &value,
6202 sizeof(VkClearColorValue));
6203
6204 device->border_color_data.used[slot] = true;
6205 break;
6206 }
6207 }
6208
6209 mtx_unlock(&device->border_color_data.mutex);
6210
6211 return slot;
6212 }
6213
6214 static void
radv_unregister_border_color(struct radv_device * device,uint32_t slot)6215 radv_unregister_border_color(struct radv_device *device, uint32_t slot)
6216 {
6217 mtx_lock(&device->border_color_data.mutex);
6218
6219 device->border_color_data.used[slot] = false;
6220
6221 mtx_unlock(&device->border_color_data.mutex);
6222 }
6223
6224 static void
radv_init_sampler(struct radv_device * device,struct radv_sampler * sampler,const VkSamplerCreateInfo * pCreateInfo)6225 radv_init_sampler(struct radv_device *device, struct radv_sampler *sampler,
6226 const VkSamplerCreateInfo *pCreateInfo)
6227 {
6228 uint32_t max_aniso = radv_get_max_anisotropy(device, pCreateInfo);
6229 uint32_t max_aniso_ratio = radv_tex_aniso_filter(max_aniso);
6230 bool compat_mode = device->physical_device->rad_info.chip_class == GFX8 ||
6231 device->physical_device->rad_info.chip_class == GFX9;
6232 unsigned filter_mode = V_008F30_SQ_IMG_FILTER_MODE_BLEND;
6233 unsigned depth_compare_func = V_008F30_SQ_TEX_DEPTH_COMPARE_NEVER;
6234 bool trunc_coord =
6235 pCreateInfo->minFilter == VK_FILTER_NEAREST && pCreateInfo->magFilter == VK_FILTER_NEAREST;
6236 bool uses_border_color = pCreateInfo->addressModeU == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER ||
6237 pCreateInfo->addressModeV == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER ||
6238 pCreateInfo->addressModeW == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
6239 VkBorderColor border_color =
6240 uses_border_color ? pCreateInfo->borderColor : VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
6241 uint32_t border_color_ptr;
6242
6243 const struct VkSamplerReductionModeCreateInfo *sampler_reduction =
6244 vk_find_struct_const(pCreateInfo->pNext, SAMPLER_REDUCTION_MODE_CREATE_INFO);
6245 if (sampler_reduction)
6246 filter_mode = radv_tex_filter_mode(sampler_reduction->reductionMode);
6247
6248 if (pCreateInfo->compareEnable)
6249 depth_compare_func = radv_tex_compare(pCreateInfo->compareOp);
6250
6251 sampler->border_color_slot = RADV_BORDER_COLOR_COUNT;
6252
6253 if (border_color == VK_BORDER_COLOR_FLOAT_CUSTOM_EXT ||
6254 border_color == VK_BORDER_COLOR_INT_CUSTOM_EXT) {
6255 const VkSamplerCustomBorderColorCreateInfoEXT *custom_border_color =
6256 vk_find_struct_const(pCreateInfo->pNext, SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT);
6257
6258 assert(custom_border_color);
6259
6260 sampler->border_color_slot =
6261 radv_register_border_color(device, custom_border_color->customBorderColor);
6262
6263 /* Did we fail to find a slot? */
6264 if (sampler->border_color_slot == RADV_BORDER_COLOR_COUNT) {
6265 fprintf(stderr, "WARNING: no free border color slots, defaulting to TRANS_BLACK.\n");
6266 border_color = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
6267 }
6268 }
6269
6270 /* If we don't have a custom color, set the ptr to 0 */
6271 border_color_ptr =
6272 sampler->border_color_slot != RADV_BORDER_COLOR_COUNT ? sampler->border_color_slot : 0;
6273
6274 sampler->state[0] =
6275 (S_008F30_CLAMP_X(radv_tex_wrap(pCreateInfo->addressModeU)) |
6276 S_008F30_CLAMP_Y(radv_tex_wrap(pCreateInfo->addressModeV)) |
6277 S_008F30_CLAMP_Z(radv_tex_wrap(pCreateInfo->addressModeW)) |
6278 S_008F30_MAX_ANISO_RATIO(max_aniso_ratio) | S_008F30_DEPTH_COMPARE_FUNC(depth_compare_func) |
6279 S_008F30_FORCE_UNNORMALIZED(pCreateInfo->unnormalizedCoordinates ? 1 : 0) |
6280 S_008F30_ANISO_THRESHOLD(max_aniso_ratio >> 1) | S_008F30_ANISO_BIAS(max_aniso_ratio) |
6281 S_008F30_DISABLE_CUBE_WRAP(0) | S_008F30_COMPAT_MODE(compat_mode) |
6282 S_008F30_FILTER_MODE(filter_mode) | S_008F30_TRUNC_COORD(trunc_coord));
6283 sampler->state[1] = (S_008F34_MIN_LOD(radv_float_to_ufixed(CLAMP(pCreateInfo->minLod, 0, 15), 8)) |
6284 S_008F34_MAX_LOD(radv_float_to_ufixed(CLAMP(pCreateInfo->maxLod, 0, 15), 8)) |
6285 S_008F34_PERF_MIP(max_aniso_ratio ? max_aniso_ratio + 6 : 0));
6286 sampler->state[2] = (S_008F38_LOD_BIAS(radv_float_to_sfixed(CLAMP(pCreateInfo->mipLodBias, -16, 16), 8)) |
6287 S_008F38_XY_MAG_FILTER(radv_tex_filter(pCreateInfo->magFilter, max_aniso)) |
6288 S_008F38_XY_MIN_FILTER(radv_tex_filter(pCreateInfo->minFilter, max_aniso)) |
6289 S_008F38_MIP_FILTER(radv_tex_mipfilter(pCreateInfo->mipmapMode)) |
6290 S_008F38_MIP_POINT_PRECLAMP(0));
6291 sampler->state[3] = (S_008F3C_BORDER_COLOR_PTR(border_color_ptr) |
6292 S_008F3C_BORDER_COLOR_TYPE(radv_tex_bordercolor(border_color)));
6293
6294 if (device->physical_device->rad_info.chip_class >= GFX10) {
6295 sampler->state[2] |=
6296 S_008F38_ANISO_OVERRIDE_GFX10(device->instance->disable_aniso_single_level);
6297 } else {
6298 sampler->state[2] |=
6299 S_008F38_DISABLE_LSB_CEIL(device->physical_device->rad_info.chip_class <= GFX8) |
6300 S_008F38_FILTER_PREC_FIX(1) |
6301 S_008F38_ANISO_OVERRIDE_GFX8(device->instance->disable_aniso_single_level &&
6302 device->physical_device->rad_info.chip_class >= GFX8);
6303 }
6304 }
6305
6306 VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateSampler(VkDevice _device,const VkSamplerCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSampler * pSampler)6307 radv_CreateSampler(VkDevice _device, const VkSamplerCreateInfo *pCreateInfo,
6308 const VkAllocationCallbacks *pAllocator, VkSampler *pSampler)
6309 {
6310 RADV_FROM_HANDLE(radv_device, device, _device);
6311 struct radv_sampler *sampler;
6312
6313 const struct VkSamplerYcbcrConversionInfo *ycbcr_conversion =
6314 vk_find_struct_const(pCreateInfo->pNext, SAMPLER_YCBCR_CONVERSION_INFO);
6315
6316 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
6317
6318 sampler = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*sampler), 8,
6319 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
6320 if (!sampler)
6321 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
6322
6323 vk_object_base_init(&device->vk, &sampler->base, VK_OBJECT_TYPE_SAMPLER);
6324
6325 radv_init_sampler(device, sampler, pCreateInfo);
6326
6327 sampler->ycbcr_sampler =
6328 ycbcr_conversion ? radv_sampler_ycbcr_conversion_from_handle(ycbcr_conversion->conversion)
6329 : NULL;
6330 *pSampler = radv_sampler_to_handle(sampler);
6331
6332 return VK_SUCCESS;
6333 }
6334
6335 VKAPI_ATTR void VKAPI_CALL
radv_DestroySampler(VkDevice _device,VkSampler _sampler,const VkAllocationCallbacks * pAllocator)6336 radv_DestroySampler(VkDevice _device, VkSampler _sampler, const VkAllocationCallbacks *pAllocator)
6337 {
6338 RADV_FROM_HANDLE(radv_device, device, _device);
6339 RADV_FROM_HANDLE(radv_sampler, sampler, _sampler);
6340
6341 if (!sampler)
6342 return;
6343
6344 if (sampler->border_color_slot != RADV_BORDER_COLOR_COUNT)
6345 radv_unregister_border_color(device, sampler->border_color_slot);
6346
6347 vk_object_base_finish(&sampler->base);
6348 vk_free2(&device->vk.alloc, pAllocator, sampler);
6349 }
6350
6351 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t * pSupportedVersion)6352 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
6353 {
6354 /* For the full details on loader interface versioning, see
6355 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
6356 * What follows is a condensed summary, to help you navigate the large and
6357 * confusing official doc.
6358 *
6359 * - Loader interface v0 is incompatible with later versions. We don't
6360 * support it.
6361 *
6362 * - In loader interface v1:
6363 * - The first ICD entrypoint called by the loader is
6364 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
6365 * entrypoint.
6366 * - The ICD must statically expose no other Vulkan symbol unless it is
6367 * linked with -Bsymbolic.
6368 * - Each dispatchable Vulkan handle created by the ICD must be
6369 * a pointer to a struct whose first member is VK_LOADER_DATA. The
6370 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
6371 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
6372 * vkDestroySurfaceKHR(). The ICD must be capable of working with
6373 * such loader-managed surfaces.
6374 *
6375 * - Loader interface v2 differs from v1 in:
6376 * - The first ICD entrypoint called by the loader is
6377 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
6378 * statically expose this entrypoint.
6379 *
6380 * - Loader interface v3 differs from v2 in:
6381 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
6382 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
6383 * because the loader no longer does so.
6384 *
6385 * - Loader interface v4 differs from v3 in:
6386 * - The ICD must implement vk_icdGetPhysicalDeviceProcAddr().
6387 *
6388 * - Loader interface v5 differs from v4 in:
6389 * - The ICD must support Vulkan API version 1.1 and must not return
6390 * VK_ERROR_INCOMPATIBLE_DRIVER from vkCreateInstance() unless a
6391 * Vulkan Loader with interface v4 or smaller is being used and the
6392 * application provides an API version that is greater than 1.0.
6393 */
6394 *pSupportedVersion = MIN2(*pSupportedVersion, 5u);
6395 return VK_SUCCESS;
6396 }
6397
6398 VKAPI_ATTR VkResult VKAPI_CALL
radv_GetMemoryFdKHR(VkDevice _device,const VkMemoryGetFdInfoKHR * pGetFdInfo,int * pFD)6399 radv_GetMemoryFdKHR(VkDevice _device, const VkMemoryGetFdInfoKHR *pGetFdInfo, int *pFD)
6400 {
6401 RADV_FROM_HANDLE(radv_device, device, _device);
6402 RADV_FROM_HANDLE(radv_device_memory, memory, pGetFdInfo->memory);
6403
6404 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
6405
6406 /* At the moment, we support only the below handle types. */
6407 assert(pGetFdInfo->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
6408 pGetFdInfo->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
6409
6410 bool ret = radv_get_memory_fd(device, memory, pFD);
6411 if (ret == false)
6412 return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
6413 return VK_SUCCESS;
6414 }
6415
6416 static uint32_t
radv_compute_valid_memory_types_attempt(struct radv_physical_device * dev,enum radeon_bo_domain domains,enum radeon_bo_flag flags,enum radeon_bo_flag ignore_flags)6417 radv_compute_valid_memory_types_attempt(struct radv_physical_device *dev,
6418 enum radeon_bo_domain domains, enum radeon_bo_flag flags,
6419 enum radeon_bo_flag ignore_flags)
6420 {
6421 /* Don't count GTT/CPU as relevant:
6422 *
6423 * - We're not fully consistent between the two.
6424 * - Sometimes VRAM gets VRAM|GTT.
6425 */
6426 const enum radeon_bo_domain relevant_domains =
6427 RADEON_DOMAIN_VRAM | RADEON_DOMAIN_GDS | RADEON_DOMAIN_OA;
6428 uint32_t bits = 0;
6429 for (unsigned i = 0; i < dev->memory_properties.memoryTypeCount; ++i) {
6430 if ((domains & relevant_domains) != (dev->memory_domains[i] & relevant_domains))
6431 continue;
6432
6433 if ((flags & ~ignore_flags) != (dev->memory_flags[i] & ~ignore_flags))
6434 continue;
6435
6436 bits |= 1u << i;
6437 }
6438
6439 return bits;
6440 }
6441
6442 static uint32_t
radv_compute_valid_memory_types(struct radv_physical_device * dev,enum radeon_bo_domain domains,enum radeon_bo_flag flags)6443 radv_compute_valid_memory_types(struct radv_physical_device *dev, enum radeon_bo_domain domains,
6444 enum radeon_bo_flag flags)
6445 {
6446 enum radeon_bo_flag ignore_flags = ~(RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_GTT_WC);
6447 uint32_t bits = radv_compute_valid_memory_types_attempt(dev, domains, flags, ignore_flags);
6448
6449 if (!bits) {
6450 ignore_flags |= RADEON_FLAG_GTT_WC;
6451 bits = radv_compute_valid_memory_types_attempt(dev, domains, flags, ignore_flags);
6452 }
6453
6454 if (!bits) {
6455 ignore_flags |= RADEON_FLAG_NO_CPU_ACCESS;
6456 bits = radv_compute_valid_memory_types_attempt(dev, domains, flags, ignore_flags);
6457 }
6458
6459 return bits;
6460 }
6461 VKAPI_ATTR VkResult VKAPI_CALL
radv_GetMemoryFdPropertiesKHR(VkDevice _device,VkExternalMemoryHandleTypeFlagBits handleType,int fd,VkMemoryFdPropertiesKHR * pMemoryFdProperties)6462 radv_GetMemoryFdPropertiesKHR(VkDevice _device, VkExternalMemoryHandleTypeFlagBits handleType,
6463 int fd, VkMemoryFdPropertiesKHR *pMemoryFdProperties)
6464 {
6465 RADV_FROM_HANDLE(radv_device, device, _device);
6466
6467 switch (handleType) {
6468 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT: {
6469 enum radeon_bo_domain domains;
6470 enum radeon_bo_flag flags;
6471 if (!device->ws->buffer_get_flags_from_fd(device->ws, fd, &domains, &flags))
6472 return vk_error(device, VK_ERROR_INVALID_EXTERNAL_HANDLE);
6473
6474 pMemoryFdProperties->memoryTypeBits =
6475 radv_compute_valid_memory_types(device->physical_device, domains, flags);
6476 return VK_SUCCESS;
6477 }
6478 default:
6479 /* The valid usage section for this function says:
6480 *
6481 * "handleType must not be one of the handle types defined as
6482 * opaque."
6483 *
6484 * So opaque handle types fall into the default "unsupported" case.
6485 */
6486 return vk_error(device, VK_ERROR_INVALID_EXTERNAL_HANDLE);
6487 }
6488 }
6489
6490 VKAPI_ATTR void VKAPI_CALL
radv_GetDeviceGroupPeerMemoryFeatures(VkDevice device,uint32_t heapIndex,uint32_t localDeviceIndex,uint32_t remoteDeviceIndex,VkPeerMemoryFeatureFlags * pPeerMemoryFeatures)6491 radv_GetDeviceGroupPeerMemoryFeatures(VkDevice device, uint32_t heapIndex,
6492 uint32_t localDeviceIndex, uint32_t remoteDeviceIndex,
6493 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
6494 {
6495 assert(localDeviceIndex == remoteDeviceIndex);
6496
6497 *pPeerMemoryFeatures =
6498 VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT | VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
6499 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT | VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
6500 }
6501
6502 static const VkTimeDomainEXT radv_time_domains[] = {
6503 VK_TIME_DOMAIN_DEVICE_EXT,
6504 VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT,
6505 #ifdef CLOCK_MONOTONIC_RAW
6506 VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT,
6507 #endif
6508 };
6509
6510 VKAPI_ATTR VkResult VKAPI_CALL
radv_GetPhysicalDeviceCalibrateableTimeDomainsEXT(VkPhysicalDevice physicalDevice,uint32_t * pTimeDomainCount,VkTimeDomainEXT * pTimeDomains)6511 radv_GetPhysicalDeviceCalibrateableTimeDomainsEXT(VkPhysicalDevice physicalDevice,
6512 uint32_t *pTimeDomainCount,
6513 VkTimeDomainEXT *pTimeDomains)
6514 {
6515 int d;
6516 VK_OUTARRAY_MAKE_TYPED(VkTimeDomainEXT, out, pTimeDomains, pTimeDomainCount);
6517
6518 for (d = 0; d < ARRAY_SIZE(radv_time_domains); d++) {
6519 vk_outarray_append_typed(VkTimeDomainEXT, &out, i)
6520 {
6521 *i = radv_time_domains[d];
6522 }
6523 }
6524
6525 return vk_outarray_status(&out);
6526 }
6527
6528 #ifndef _WIN32
6529 static uint64_t
radv_clock_gettime(clockid_t clock_id)6530 radv_clock_gettime(clockid_t clock_id)
6531 {
6532 struct timespec current;
6533 int ret;
6534
6535 ret = clock_gettime(clock_id, ¤t);
6536 #ifdef CLOCK_MONOTONIC_RAW
6537 if (ret < 0 && clock_id == CLOCK_MONOTONIC_RAW)
6538 ret = clock_gettime(CLOCK_MONOTONIC, ¤t);
6539 #endif
6540 if (ret < 0)
6541 return 0;
6542
6543 return (uint64_t)current.tv_sec * 1000000000ULL + current.tv_nsec;
6544 }
6545
6546 VKAPI_ATTR VkResult VKAPI_CALL
radv_GetCalibratedTimestampsEXT(VkDevice _device,uint32_t timestampCount,const VkCalibratedTimestampInfoEXT * pTimestampInfos,uint64_t * pTimestamps,uint64_t * pMaxDeviation)6547 radv_GetCalibratedTimestampsEXT(VkDevice _device, uint32_t timestampCount,
6548 const VkCalibratedTimestampInfoEXT *pTimestampInfos,
6549 uint64_t *pTimestamps, uint64_t *pMaxDeviation)
6550 {
6551 RADV_FROM_HANDLE(radv_device, device, _device);
6552 uint32_t clock_crystal_freq = device->physical_device->rad_info.clock_crystal_freq;
6553 int d;
6554 uint64_t begin, end;
6555 uint64_t max_clock_period = 0;
6556
6557 #ifdef CLOCK_MONOTONIC_RAW
6558 begin = radv_clock_gettime(CLOCK_MONOTONIC_RAW);
6559 #else
6560 begin = radv_clock_gettime(CLOCK_MONOTONIC);
6561 #endif
6562
6563 for (d = 0; d < timestampCount; d++) {
6564 switch (pTimestampInfos[d].timeDomain) {
6565 case VK_TIME_DOMAIN_DEVICE_EXT:
6566 pTimestamps[d] = device->ws->query_value(device->ws, RADEON_TIMESTAMP);
6567 uint64_t device_period = DIV_ROUND_UP(1000000, clock_crystal_freq);
6568 max_clock_period = MAX2(max_clock_period, device_period);
6569 break;
6570 case VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT:
6571 pTimestamps[d] = radv_clock_gettime(CLOCK_MONOTONIC);
6572 max_clock_period = MAX2(max_clock_period, 1);
6573 break;
6574
6575 #ifdef CLOCK_MONOTONIC_RAW
6576 case VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT:
6577 pTimestamps[d] = begin;
6578 break;
6579 #endif
6580 default:
6581 pTimestamps[d] = 0;
6582 break;
6583 }
6584 }
6585
6586 #ifdef CLOCK_MONOTONIC_RAW
6587 end = radv_clock_gettime(CLOCK_MONOTONIC_RAW);
6588 #else
6589 end = radv_clock_gettime(CLOCK_MONOTONIC);
6590 #endif
6591
6592 /*
6593 * The maximum deviation is the sum of the interval over which we
6594 * perform the sampling and the maximum period of any sampled
6595 * clock. That's because the maximum skew between any two sampled
6596 * clock edges is when the sampled clock with the largest period is
6597 * sampled at the end of that period but right at the beginning of the
6598 * sampling interval and some other clock is sampled right at the
6599 * begining of its sampling period and right at the end of the
6600 * sampling interval. Let's assume the GPU has the longest clock
6601 * period and that the application is sampling GPU and monotonic:
6602 *
6603 * s e
6604 * w x y z 0 1 2 3 4 5 6 7 8 9 a b c d e f
6605 * Raw -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-
6606 *
6607 * g
6608 * 0 1 2 3
6609 * GPU -----_____-----_____-----_____-----_____
6610 *
6611 * m
6612 * x y z 0 1 2 3 4 5 6 7 8 9 a b c
6613 * Monotonic -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-
6614 *
6615 * Interval <----------------->
6616 * Deviation <-------------------------->
6617 *
6618 * s = read(raw) 2
6619 * g = read(GPU) 1
6620 * m = read(monotonic) 2
6621 * e = read(raw) b
6622 *
6623 * We round the sample interval up by one tick to cover sampling error
6624 * in the interval clock
6625 */
6626
6627 uint64_t sample_interval = end - begin + 1;
6628
6629 *pMaxDeviation = sample_interval + max_clock_period;
6630
6631 return VK_SUCCESS;
6632 }
6633 #endif
6634
6635 VKAPI_ATTR void VKAPI_CALL
radv_GetPhysicalDeviceMultisamplePropertiesEXT(VkPhysicalDevice physicalDevice,VkSampleCountFlagBits samples,VkMultisamplePropertiesEXT * pMultisampleProperties)6636 radv_GetPhysicalDeviceMultisamplePropertiesEXT(VkPhysicalDevice physicalDevice,
6637 VkSampleCountFlagBits samples,
6638 VkMultisamplePropertiesEXT *pMultisampleProperties)
6639 {
6640 VkSampleCountFlagBits supported_samples = VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT |
6641 VK_SAMPLE_COUNT_8_BIT;
6642
6643 if (samples & supported_samples) {
6644 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){2, 2};
6645 } else {
6646 pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){0, 0};
6647 }
6648 }
6649
6650 VKAPI_ATTR VkResult VKAPI_CALL
radv_GetPhysicalDeviceFragmentShadingRatesKHR(VkPhysicalDevice physicalDevice,uint32_t * pFragmentShadingRateCount,VkPhysicalDeviceFragmentShadingRateKHR * pFragmentShadingRates)6651 radv_GetPhysicalDeviceFragmentShadingRatesKHR(
6652 VkPhysicalDevice physicalDevice, uint32_t *pFragmentShadingRateCount,
6653 VkPhysicalDeviceFragmentShadingRateKHR *pFragmentShadingRates)
6654 {
6655 VK_OUTARRAY_MAKE_TYPED(VkPhysicalDeviceFragmentShadingRateKHR, out, pFragmentShadingRates,
6656 pFragmentShadingRateCount);
6657
6658 #define append_rate(w, h, s) \
6659 { \
6660 VkPhysicalDeviceFragmentShadingRateKHR rate = { \
6661 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR, \
6662 .sampleCounts = s, \
6663 .fragmentSize = {.width = w, .height = h}, \
6664 }; \
6665 vk_outarray_append_typed(VkPhysicalDeviceFragmentShadingRateKHR, &out, r) *r = rate; \
6666 }
6667
6668 for (uint32_t x = 2; x >= 1; x--) {
6669 for (uint32_t y = 2; y >= 1; y--) {
6670 VkSampleCountFlagBits samples;
6671
6672 if (x == 1 && y == 1) {
6673 samples = ~0;
6674 } else {
6675 samples = VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT |
6676 VK_SAMPLE_COUNT_4_BIT | VK_SAMPLE_COUNT_8_BIT;
6677 }
6678
6679 append_rate(x, y, samples);
6680 }
6681 }
6682 #undef append_rate
6683
6684 return vk_outarray_status(&out);
6685 }
6686