1 /*
2 * Copyright © 2021 Bas Nieuwenhuizen
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23 #include "radv_acceleration_structure.h"
24 #include "radv_private.h"
25
26 #include "util/format/format_utils.h"
27 #include "util/half_float.h"
28 #include "nir_builder.h"
29 #include "radv_cs.h"
30 #include "radv_meta.h"
31
32 VKAPI_ATTR void VKAPI_CALL
radv_GetAccelerationStructureBuildSizesKHR(VkDevice _device,VkAccelerationStructureBuildTypeKHR buildType,const VkAccelerationStructureBuildGeometryInfoKHR * pBuildInfo,const uint32_t * pMaxPrimitiveCounts,VkAccelerationStructureBuildSizesInfoKHR * pSizeInfo)33 radv_GetAccelerationStructureBuildSizesKHR(
34 VkDevice _device, VkAccelerationStructureBuildTypeKHR buildType,
35 const VkAccelerationStructureBuildGeometryInfoKHR *pBuildInfo,
36 const uint32_t *pMaxPrimitiveCounts, VkAccelerationStructureBuildSizesInfoKHR *pSizeInfo)
37 {
38 uint64_t triangles = 0, boxes = 0, instances = 0;
39
40 STATIC_ASSERT(sizeof(struct radv_bvh_triangle_node) == 64);
41 STATIC_ASSERT(sizeof(struct radv_bvh_aabb_node) == 64);
42 STATIC_ASSERT(sizeof(struct radv_bvh_instance_node) == 128);
43 STATIC_ASSERT(sizeof(struct radv_bvh_box16_node) == 64);
44 STATIC_ASSERT(sizeof(struct radv_bvh_box32_node) == 128);
45
46 for (uint32_t i = 0; i < pBuildInfo->geometryCount; ++i) {
47 const VkAccelerationStructureGeometryKHR *geometry;
48 if (pBuildInfo->pGeometries)
49 geometry = &pBuildInfo->pGeometries[i];
50 else
51 geometry = pBuildInfo->ppGeometries[i];
52
53 switch (geometry->geometryType) {
54 case VK_GEOMETRY_TYPE_TRIANGLES_KHR:
55 triangles += pMaxPrimitiveCounts[i];
56 break;
57 case VK_GEOMETRY_TYPE_AABBS_KHR:
58 boxes += pMaxPrimitiveCounts[i];
59 break;
60 case VK_GEOMETRY_TYPE_INSTANCES_KHR:
61 instances += pMaxPrimitiveCounts[i];
62 break;
63 case VK_GEOMETRY_TYPE_MAX_ENUM_KHR:
64 unreachable("VK_GEOMETRY_TYPE_MAX_ENUM_KHR unhandled");
65 }
66 }
67
68 uint64_t children = boxes + instances + triangles;
69 uint64_t internal_nodes = 0;
70 while (children > 1) {
71 children = DIV_ROUND_UP(children, 4);
72 internal_nodes += children;
73 }
74
75 /* The stray 128 is to ensure we have space for a header
76 * which we'd want to use for some metadata (like the
77 * total AABB of the BVH) */
78 uint64_t size = boxes * 128 + instances * 128 + triangles * 64 + internal_nodes * 128 + 192;
79
80 pSizeInfo->accelerationStructureSize = size;
81
82 /* 2x the max number of nodes in a BVH layer (one uint32_t each) */
83 pSizeInfo->updateScratchSize = pSizeInfo->buildScratchSize =
84 MAX2(4096, 2 * (boxes + instances + triangles) * sizeof(uint32_t));
85 }
86
87 VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateAccelerationStructureKHR(VkDevice _device,const VkAccelerationStructureCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkAccelerationStructureKHR * pAccelerationStructure)88 radv_CreateAccelerationStructureKHR(VkDevice _device,
89 const VkAccelerationStructureCreateInfoKHR *pCreateInfo,
90 const VkAllocationCallbacks *pAllocator,
91 VkAccelerationStructureKHR *pAccelerationStructure)
92 {
93 RADV_FROM_HANDLE(radv_device, device, _device);
94 RADV_FROM_HANDLE(radv_buffer, buffer, pCreateInfo->buffer);
95 struct radv_acceleration_structure *accel;
96
97 accel = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*accel), 8,
98 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
99 if (accel == NULL)
100 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
101
102 vk_object_base_init(&device->vk, &accel->base, VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR);
103
104 accel->mem_offset = buffer->offset + pCreateInfo->offset;
105 accel->size = pCreateInfo->size;
106 accel->bo = buffer->bo;
107
108 *pAccelerationStructure = radv_acceleration_structure_to_handle(accel);
109 return VK_SUCCESS;
110 }
111
112 VKAPI_ATTR void VKAPI_CALL
radv_DestroyAccelerationStructureKHR(VkDevice _device,VkAccelerationStructureKHR accelerationStructure,const VkAllocationCallbacks * pAllocator)113 radv_DestroyAccelerationStructureKHR(VkDevice _device,
114 VkAccelerationStructureKHR accelerationStructure,
115 const VkAllocationCallbacks *pAllocator)
116 {
117 RADV_FROM_HANDLE(radv_device, device, _device);
118 RADV_FROM_HANDLE(radv_acceleration_structure, accel, accelerationStructure);
119
120 if (!accel)
121 return;
122
123 vk_object_base_finish(&accel->base);
124 vk_free2(&device->vk.alloc, pAllocator, accel);
125 }
126
127 VKAPI_ATTR VkDeviceAddress VKAPI_CALL
radv_GetAccelerationStructureDeviceAddressKHR(VkDevice _device,const VkAccelerationStructureDeviceAddressInfoKHR * pInfo)128 radv_GetAccelerationStructureDeviceAddressKHR(
129 VkDevice _device, const VkAccelerationStructureDeviceAddressInfoKHR *pInfo)
130 {
131 RADV_FROM_HANDLE(radv_acceleration_structure, accel, pInfo->accelerationStructure);
132 return radv_accel_struct_get_va(accel);
133 }
134
135 VKAPI_ATTR VkResult VKAPI_CALL
radv_WriteAccelerationStructuresPropertiesKHR(VkDevice _device,uint32_t accelerationStructureCount,const VkAccelerationStructureKHR * pAccelerationStructures,VkQueryType queryType,size_t dataSize,void * pData,size_t stride)136 radv_WriteAccelerationStructuresPropertiesKHR(
137 VkDevice _device, uint32_t accelerationStructureCount,
138 const VkAccelerationStructureKHR *pAccelerationStructures, VkQueryType queryType,
139 size_t dataSize, void *pData, size_t stride)
140 {
141 RADV_FROM_HANDLE(radv_device, device, _device);
142 char *data_out = (char*)pData;
143
144 for (uint32_t i = 0; i < accelerationStructureCount; ++i) {
145 RADV_FROM_HANDLE(radv_acceleration_structure, accel, pAccelerationStructures[i]);
146 const char *base_ptr = (const char *)device->ws->buffer_map(accel->bo);
147 if (!base_ptr)
148 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
149
150 const struct radv_accel_struct_header *header = (const void*)(base_ptr + accel->mem_offset);
151 if (stride * i + sizeof(VkDeviceSize) <= dataSize) {
152 uint64_t value;
153 switch (queryType) {
154 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
155 value = header->compacted_size;
156 break;
157 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
158 value = header->serialization_size;
159 break;
160 default:
161 unreachable("Unhandled acceleration structure query");
162 }
163 *(VkDeviceSize *)(data_out + stride * i) = value;
164 }
165 device->ws->buffer_unmap(accel->bo);
166 }
167 return VK_SUCCESS;
168 }
169
170 struct radv_bvh_build_ctx {
171 uint32_t *write_scratch;
172 char *base;
173 char *curr_ptr;
174 };
175
176 static void
build_triangles(struct radv_bvh_build_ctx * ctx,const VkAccelerationStructureGeometryKHR * geom,const VkAccelerationStructureBuildRangeInfoKHR * range,unsigned geometry_id)177 build_triangles(struct radv_bvh_build_ctx *ctx, const VkAccelerationStructureGeometryKHR *geom,
178 const VkAccelerationStructureBuildRangeInfoKHR *range, unsigned geometry_id)
179 {
180 const VkAccelerationStructureGeometryTrianglesDataKHR *tri_data = &geom->geometry.triangles;
181 VkTransformMatrixKHR matrix;
182 const char *index_data = (const char *)tri_data->indexData.hostAddress + range->primitiveOffset;
183
184 if (tri_data->transformData.hostAddress) {
185 matrix = *(const VkTransformMatrixKHR *)((const char *)tri_data->transformData.hostAddress +
186 range->transformOffset);
187 } else {
188 matrix = (VkTransformMatrixKHR){
189 .matrix = {{1.0, 0.0, 0.0, 0.0}, {0.0, 1.0, 0.0, 0.0}, {0.0, 0.0, 1.0, 0.0}}};
190 }
191
192 for (uint32_t p = 0; p < range->primitiveCount; ++p, ctx->curr_ptr += 64) {
193 struct radv_bvh_triangle_node *node = (void*)ctx->curr_ptr;
194 uint32_t node_offset = ctx->curr_ptr - ctx->base;
195 uint32_t node_id = node_offset >> 3;
196 *ctx->write_scratch++ = node_id;
197
198 for (unsigned v = 0; v < 3; ++v) {
199 uint32_t v_index = range->firstVertex;
200 switch (tri_data->indexType) {
201 case VK_INDEX_TYPE_NONE_KHR:
202 v_index += p * 3 + v;
203 break;
204 case VK_INDEX_TYPE_UINT8_EXT:
205 v_index += *(const uint8_t *)index_data;
206 index_data += 1;
207 break;
208 case VK_INDEX_TYPE_UINT16:
209 v_index += *(const uint16_t *)index_data;
210 index_data += 2;
211 break;
212 case VK_INDEX_TYPE_UINT32:
213 v_index += *(const uint32_t *)index_data;
214 index_data += 4;
215 break;
216 case VK_INDEX_TYPE_MAX_ENUM:
217 unreachable("Unhandled VK_INDEX_TYPE_MAX_ENUM");
218 break;
219 }
220
221 const char *v_data = (const char *)tri_data->vertexData.hostAddress + v_index * tri_data->vertexStride;
222 float coords[4];
223 switch (tri_data->vertexFormat) {
224 case VK_FORMAT_R32G32_SFLOAT:
225 coords[0] = *(const float *)(v_data + 0);
226 coords[1] = *(const float *)(v_data + 4);
227 coords[2] = 0.0f;
228 coords[3] = 1.0f;
229 break;
230 case VK_FORMAT_R32G32B32_SFLOAT:
231 coords[0] = *(const float *)(v_data + 0);
232 coords[1] = *(const float *)(v_data + 4);
233 coords[2] = *(const float *)(v_data + 8);
234 coords[3] = 1.0f;
235 break;
236 case VK_FORMAT_R32G32B32A32_SFLOAT:
237 coords[0] = *(const float *)(v_data + 0);
238 coords[1] = *(const float *)(v_data + 4);
239 coords[2] = *(const float *)(v_data + 8);
240 coords[3] = *(const float *)(v_data + 12);
241 break;
242 case VK_FORMAT_R16G16_SFLOAT:
243 coords[0] = _mesa_half_to_float(*(const uint16_t *)(v_data + 0));
244 coords[1] = _mesa_half_to_float(*(const uint16_t *)(v_data + 2));
245 coords[2] = 0.0f;
246 coords[3] = 1.0f;
247 break;
248 case VK_FORMAT_R16G16B16_SFLOAT:
249 coords[0] = _mesa_half_to_float(*(const uint16_t *)(v_data + 0));
250 coords[1] = _mesa_half_to_float(*(const uint16_t *)(v_data + 2));
251 coords[2] = _mesa_half_to_float(*(const uint16_t *)(v_data + 4));
252 coords[3] = 1.0f;
253 break;
254 case VK_FORMAT_R16G16B16A16_SFLOAT:
255 coords[0] = _mesa_half_to_float(*(const uint16_t *)(v_data + 0));
256 coords[1] = _mesa_half_to_float(*(const uint16_t *)(v_data + 2));
257 coords[2] = _mesa_half_to_float(*(const uint16_t *)(v_data + 4));
258 coords[3] = _mesa_half_to_float(*(const uint16_t *)(v_data + 6));
259 break;
260 case VK_FORMAT_R16G16_SNORM:
261 coords[0] = _mesa_snorm_to_float(*(const int16_t *)(v_data + 0), 16);
262 coords[1] = _mesa_snorm_to_float(*(const int16_t *)(v_data + 2), 16);
263 coords[2] = 0.0f;
264 coords[3] = 1.0f;
265 break;
266 case VK_FORMAT_R16G16B16A16_SNORM:
267 coords[0] = _mesa_snorm_to_float(*(const int16_t *)(v_data + 0), 16);
268 coords[1] = _mesa_snorm_to_float(*(const int16_t *)(v_data + 2), 16);
269 coords[2] = _mesa_snorm_to_float(*(const int16_t *)(v_data + 4), 16);
270 coords[3] = _mesa_snorm_to_float(*(const int16_t *)(v_data + 6), 16);
271 break;
272 case VK_FORMAT_R16G16B16A16_UNORM:
273 coords[0] = _mesa_unorm_to_float(*(const uint16_t *)(v_data + 0), 16);
274 coords[1] = _mesa_unorm_to_float(*(const uint16_t *)(v_data + 2), 16);
275 coords[2] = _mesa_unorm_to_float(*(const uint16_t *)(v_data + 4), 16);
276 coords[3] = _mesa_unorm_to_float(*(const uint16_t *)(v_data + 6), 16);
277 break;
278 default:
279 unreachable("Unhandled vertex format in BVH build");
280 }
281
282 for (unsigned j = 0; j < 3; ++j) {
283 float r = 0;
284 for (unsigned k = 0; k < 4; ++k)
285 r += matrix.matrix[j][k] * coords[k];
286 node->coords[v][j] = r;
287 }
288
289 node->triangle_id = p;
290 node->geometry_id_and_flags = geometry_id | (geom->flags << 28);
291
292 /* Seems to be needed for IJ, otherwise I = J = ? */
293 node->id = 9;
294 }
295 }
296 }
297
298 static VkResult
build_instances(struct radv_device * device,struct radv_bvh_build_ctx * ctx,const VkAccelerationStructureGeometryKHR * geom,const VkAccelerationStructureBuildRangeInfoKHR * range)299 build_instances(struct radv_device *device, struct radv_bvh_build_ctx *ctx,
300 const VkAccelerationStructureGeometryKHR *geom,
301 const VkAccelerationStructureBuildRangeInfoKHR *range)
302 {
303 const VkAccelerationStructureGeometryInstancesDataKHR *inst_data = &geom->geometry.instances;
304
305 for (uint32_t p = 0; p < range->primitiveCount; ++p, ctx->curr_ptr += 128) {
306 const VkAccelerationStructureInstanceKHR *instance =
307 inst_data->arrayOfPointers
308 ? (((const VkAccelerationStructureInstanceKHR *const *)inst_data->data.hostAddress)[p])
309 : &((const VkAccelerationStructureInstanceKHR *)inst_data->data.hostAddress)[p];
310 if (!instance->accelerationStructureReference) {
311 continue;
312 }
313
314 struct radv_bvh_instance_node *node = (void*)ctx->curr_ptr;
315 uint32_t node_offset = ctx->curr_ptr - ctx->base;
316 uint32_t node_id = (node_offset >> 3) | 6;
317 *ctx->write_scratch++ = node_id;
318
319 float transform[16], inv_transform[16];
320 memcpy(transform, &instance->transform.matrix, sizeof(instance->transform.matrix));
321 transform[12] = transform[13] = transform[14] = 0.0f;
322 transform[15] = 1.0f;
323
324 util_invert_mat4x4(inv_transform, transform);
325 memcpy(node->wto_matrix, inv_transform, sizeof(node->wto_matrix));
326 node->wto_matrix[3] = transform[3];
327 node->wto_matrix[7] = transform[7];
328 node->wto_matrix[11] = transform[11];
329 node->custom_instance_and_mask = instance->instanceCustomIndex | (instance->mask << 24);
330 node->sbt_offset_and_flags =
331 instance->instanceShaderBindingTableRecordOffset | (instance->flags << 24);
332 node->instance_id = p;
333
334 for (unsigned i = 0; i < 3; ++i)
335 for (unsigned j = 0; j < 3; ++j)
336 node->otw_matrix[i * 3 + j] = instance->transform.matrix[j][i];
337
338 RADV_FROM_HANDLE(radv_acceleration_structure, src_accel_struct,
339 (VkAccelerationStructureKHR)instance->accelerationStructureReference);
340 const void *src_base = device->ws->buffer_map(src_accel_struct->bo);
341 if (!src_base)
342 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
343
344 src_base = (const char *)src_base + src_accel_struct->mem_offset;
345 const struct radv_accel_struct_header *src_header = src_base;
346 node->base_ptr = radv_accel_struct_get_va(src_accel_struct) | src_header->root_node_offset;
347
348 for (unsigned j = 0; j < 3; ++j) {
349 node->aabb[0][j] = instance->transform.matrix[j][3];
350 node->aabb[1][j] = instance->transform.matrix[j][3];
351 for (unsigned k = 0; k < 3; ++k) {
352 node->aabb[0][j] += MIN2(instance->transform.matrix[j][k] * src_header->aabb[0][k],
353 instance->transform.matrix[j][k] * src_header->aabb[1][k]);
354 node->aabb[1][j] += MAX2(instance->transform.matrix[j][k] * src_header->aabb[0][k],
355 instance->transform.matrix[j][k] * src_header->aabb[1][k]);
356 }
357 }
358 device->ws->buffer_unmap(src_accel_struct->bo);
359 }
360 return VK_SUCCESS;
361 }
362
363 static void
build_aabbs(struct radv_bvh_build_ctx * ctx,const VkAccelerationStructureGeometryKHR * geom,const VkAccelerationStructureBuildRangeInfoKHR * range,unsigned geometry_id)364 build_aabbs(struct radv_bvh_build_ctx *ctx, const VkAccelerationStructureGeometryKHR *geom,
365 const VkAccelerationStructureBuildRangeInfoKHR *range, unsigned geometry_id)
366 {
367 const VkAccelerationStructureGeometryAabbsDataKHR *aabb_data = &geom->geometry.aabbs;
368
369 for (uint32_t p = 0; p < range->primitiveCount; ++p, ctx->curr_ptr += 64) {
370 struct radv_bvh_aabb_node *node = (void*)ctx->curr_ptr;
371 uint32_t node_offset = ctx->curr_ptr - ctx->base;
372 uint32_t node_id = (node_offset >> 3) | 7;
373 *ctx->write_scratch++ = node_id;
374
375 const VkAabbPositionsKHR *aabb =
376 (const VkAabbPositionsKHR *)((const char *)aabb_data->data.hostAddress +
377 p * aabb_data->stride);
378
379 node->aabb[0][0] = aabb->minX;
380 node->aabb[0][1] = aabb->minY;
381 node->aabb[0][2] = aabb->minZ;
382 node->aabb[1][0] = aabb->maxX;
383 node->aabb[1][1] = aabb->maxY;
384 node->aabb[1][2] = aabb->maxZ;
385 node->primitive_id = p;
386 node->geometry_id_and_flags = geometry_id;
387 }
388 }
389
390 static uint32_t
leaf_node_count(const VkAccelerationStructureBuildGeometryInfoKHR * info,const VkAccelerationStructureBuildRangeInfoKHR * ranges)391 leaf_node_count(const VkAccelerationStructureBuildGeometryInfoKHR *info,
392 const VkAccelerationStructureBuildRangeInfoKHR *ranges)
393 {
394 uint32_t count = 0;
395 for (uint32_t i = 0; i < info->geometryCount; ++i) {
396 count += ranges[i].primitiveCount;
397 }
398 return count;
399 }
400
401 static void
compute_bounds(const char * base_ptr,uint32_t node_id,float * bounds)402 compute_bounds(const char *base_ptr, uint32_t node_id, float *bounds)
403 {
404 for (unsigned i = 0; i < 3; ++i)
405 bounds[i] = INFINITY;
406 for (unsigned i = 0; i < 3; ++i)
407 bounds[3 + i] = -INFINITY;
408
409 switch (node_id & 7) {
410 case 0: {
411 const struct radv_bvh_triangle_node *node = (const void*)(base_ptr + (node_id / 8 * 64));
412 for (unsigned v = 0; v < 3; ++v) {
413 for (unsigned j = 0; j < 3; ++j) {
414 bounds[j] = MIN2(bounds[j], node->coords[v][j]);
415 bounds[3 + j] = MAX2(bounds[3 + j], node->coords[v][j]);
416 }
417 }
418 break;
419 }
420 case 5: {
421 const struct radv_bvh_box32_node *node = (const void*)(base_ptr + (node_id / 8 * 64));
422 for (unsigned c2 = 0; c2 < 4; ++c2) {
423 if (isnan(node->coords[c2][0][0]))
424 continue;
425 for (unsigned j = 0; j < 3; ++j) {
426 bounds[j] = MIN2(bounds[j], node->coords[c2][0][j]);
427 bounds[3 + j] = MAX2(bounds[3 + j], node->coords[c2][1][j]);
428 }
429 }
430 break;
431 }
432 case 6: {
433 const struct radv_bvh_instance_node *node = (const void*)(base_ptr + (node_id / 8 * 64));
434 for (unsigned j = 0; j < 3; ++j) {
435 bounds[j] = MIN2(bounds[j], node->aabb[0][j]);
436 bounds[3 + j] = MAX2(bounds[3 + j], node->aabb[1][j]);
437 }
438 break;
439 }
440 case 7: {
441 const struct radv_bvh_aabb_node *node = (const void*)(base_ptr + (node_id / 8 * 64));
442 for (unsigned j = 0; j < 3; ++j) {
443 bounds[j] = MIN2(bounds[j], node->aabb[0][j]);
444 bounds[3 + j] = MAX2(bounds[3 + j], node->aabb[1][j]);
445 }
446 break;
447 }
448 }
449 }
450
451 struct bvh_opt_entry {
452 uint64_t key;
453 uint32_t node_id;
454 };
455
456 static int
bvh_opt_compare(const void * _a,const void * _b)457 bvh_opt_compare(const void *_a, const void *_b)
458 {
459 const struct bvh_opt_entry *a = _a;
460 const struct bvh_opt_entry *b = _b;
461
462 if (a->key < b->key)
463 return -1;
464 if (a->key > b->key)
465 return 1;
466 if (a->node_id < b->node_id)
467 return -1;
468 if (a->node_id > b->node_id)
469 return 1;
470 return 0;
471 }
472
473 static void
optimize_bvh(const char * base_ptr,uint32_t * node_ids,uint32_t node_count)474 optimize_bvh(const char *base_ptr, uint32_t *node_ids, uint32_t node_count)
475 {
476 if (node_count == 0)
477 return;
478
479 float bounds[6];
480 for (unsigned i = 0; i < 3; ++i)
481 bounds[i] = INFINITY;
482 for (unsigned i = 0; i < 3; ++i)
483 bounds[3 + i] = -INFINITY;
484
485 for (uint32_t i = 0; i < node_count; ++i) {
486 float node_bounds[6];
487 compute_bounds(base_ptr, node_ids[i], node_bounds);
488 for (unsigned j = 0; j < 3; ++j)
489 bounds[j] = MIN2(bounds[j], node_bounds[j]);
490 for (unsigned j = 0; j < 3; ++j)
491 bounds[3 + j] = MAX2(bounds[3 + j], node_bounds[3 + j]);
492 }
493
494 struct bvh_opt_entry *entries = calloc(node_count, sizeof(struct bvh_opt_entry));
495 if (!entries)
496 return;
497
498 for (uint32_t i = 0; i < node_count; ++i) {
499 float node_bounds[6];
500 compute_bounds(base_ptr, node_ids[i], node_bounds);
501 float node_coords[3];
502 for (unsigned j = 0; j < 3; ++j)
503 node_coords[j] = (node_bounds[j] + node_bounds[3 + j]) * 0.5;
504 int32_t coords[3];
505 for (unsigned j = 0; j < 3; ++j)
506 coords[j] = MAX2(
507 MIN2((int32_t)((node_coords[j] - bounds[j]) / (bounds[3 + j] - bounds[j]) * (1 << 21)),
508 (1 << 21) - 1),
509 0);
510 uint64_t key = 0;
511 for (unsigned j = 0; j < 21; ++j)
512 for (unsigned k = 0; k < 3; ++k)
513 key |= (uint64_t)((coords[k] >> j) & 1) << (j * 3 + k);
514 entries[i].key = key;
515 entries[i].node_id = node_ids[i];
516 }
517
518 qsort(entries, node_count, sizeof(entries[0]), bvh_opt_compare);
519 for (unsigned i = 0; i < node_count; ++i)
520 node_ids[i] = entries[i].node_id;
521
522 free(entries);
523 }
524
525 static VkResult
build_bvh(struct radv_device * device,const VkAccelerationStructureBuildGeometryInfoKHR * info,const VkAccelerationStructureBuildRangeInfoKHR * ranges)526 build_bvh(struct radv_device *device, const VkAccelerationStructureBuildGeometryInfoKHR *info,
527 const VkAccelerationStructureBuildRangeInfoKHR *ranges)
528 {
529 RADV_FROM_HANDLE(radv_acceleration_structure, accel, info->dstAccelerationStructure);
530 VkResult result = VK_SUCCESS;
531
532 uint32_t *scratch[2];
533 scratch[0] = info->scratchData.hostAddress;
534 scratch[1] = scratch[0] + leaf_node_count(info, ranges);
535
536 char *base_ptr = (char*)device->ws->buffer_map(accel->bo);
537 if (!base_ptr)
538 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
539
540 base_ptr = base_ptr + accel->mem_offset;
541 struct radv_accel_struct_header *header = (void*)base_ptr;
542 void *first_node_ptr = (char *)base_ptr + ALIGN(sizeof(*header), 64);
543
544 struct radv_bvh_build_ctx ctx = {.write_scratch = scratch[0],
545 .base = base_ptr,
546 .curr_ptr = (char *)first_node_ptr + 128};
547
548 uint64_t instance_offset = (const char *)ctx.curr_ptr - (const char *)base_ptr;
549 uint64_t instance_count = 0;
550
551 /* This initializes the leaf nodes of the BVH all at the same level. */
552 for (int inst = 1; inst >= 0; --inst) {
553 for (uint32_t i = 0; i < info->geometryCount; ++i) {
554 const VkAccelerationStructureGeometryKHR *geom =
555 info->pGeometries ? &info->pGeometries[i] : info->ppGeometries[i];
556
557 if ((inst && geom->geometryType != VK_GEOMETRY_TYPE_INSTANCES_KHR) ||
558 (!inst && geom->geometryType == VK_GEOMETRY_TYPE_INSTANCES_KHR))
559 continue;
560
561 switch (geom->geometryType) {
562 case VK_GEOMETRY_TYPE_TRIANGLES_KHR:
563 build_triangles(&ctx, geom, ranges + i, i);
564 break;
565 case VK_GEOMETRY_TYPE_AABBS_KHR:
566 build_aabbs(&ctx, geom, ranges + i, i);
567 break;
568 case VK_GEOMETRY_TYPE_INSTANCES_KHR: {
569 result = build_instances(device, &ctx, geom, ranges + i);
570 if (result != VK_SUCCESS)
571 goto fail;
572
573 instance_count += ranges[i].primitiveCount;
574 break;
575 }
576 case VK_GEOMETRY_TYPE_MAX_ENUM_KHR:
577 unreachable("VK_GEOMETRY_TYPE_MAX_ENUM_KHR unhandled");
578 }
579 }
580 }
581
582 uint32_t node_counts[2] = {ctx.write_scratch - scratch[0], 0};
583 optimize_bvh(base_ptr, scratch[0], node_counts[0]);
584 unsigned d;
585
586 /*
587 * This is the most naive BVH building algorithm I could think of:
588 * just iteratively builds each level from bottom to top with
589 * the children of each node being in-order and tightly packed.
590 *
591 * Is probably terrible for traversal but should be easy to build an
592 * equivalent GPU version.
593 */
594 for (d = 0; node_counts[d & 1] > 1 || d == 0; ++d) {
595 uint32_t child_count = node_counts[d & 1];
596 const uint32_t *children = scratch[d & 1];
597 uint32_t *dst_ids = scratch[(d & 1) ^ 1];
598 unsigned dst_count;
599 unsigned child_idx = 0;
600 for (dst_count = 0; child_idx < MAX2(1, child_count); ++dst_count, child_idx += 4) {
601 unsigned local_child_count = MIN2(4, child_count - child_idx);
602 uint32_t child_ids[4];
603 float bounds[4][6];
604
605 for (unsigned c = 0; c < local_child_count; ++c) {
606 uint32_t id = children[child_idx + c];
607 child_ids[c] = id;
608
609 compute_bounds(base_ptr, id, bounds[c]);
610 }
611
612 struct radv_bvh_box32_node *node;
613
614 /* Put the root node at base_ptr so the id = 0, which allows some
615 * traversal optimizations. */
616 if (child_idx == 0 && local_child_count == child_count) {
617 node = first_node_ptr;
618 header->root_node_offset = ((char *)first_node_ptr - (char *)base_ptr) / 64 * 8 + 5;
619 } else {
620 uint32_t dst_id = (ctx.curr_ptr - base_ptr) / 64;
621 dst_ids[dst_count] = dst_id * 8 + 5;
622
623 node = (void*)ctx.curr_ptr;
624 ctx.curr_ptr += 128;
625 }
626
627 for (unsigned c = 0; c < local_child_count; ++c) {
628 node->children[c] = child_ids[c];
629 for (unsigned i = 0; i < 2; ++i)
630 for (unsigned j = 0; j < 3; ++j)
631 node->coords[c][i][j] = bounds[c][i * 3 + j];
632 }
633 for (unsigned c = local_child_count; c < 4; ++c) {
634 for (unsigned i = 0; i < 2; ++i)
635 for (unsigned j = 0; j < 3; ++j)
636 node->coords[c][i][j] = NAN;
637 }
638 }
639
640 node_counts[(d & 1) ^ 1] = dst_count;
641 }
642
643 compute_bounds(base_ptr, header->root_node_offset, &header->aabb[0][0]);
644
645 header->instance_offset = instance_offset;
646 header->instance_count = instance_count;
647 header->compacted_size = (char *)ctx.curr_ptr - base_ptr;
648
649 /* 16 bytes per invocation, 64 invocations per workgroup */
650 header->copy_dispatch_size[0] = DIV_ROUND_UP(header->compacted_size, 16 * 64);
651 header->copy_dispatch_size[1] = 1;
652 header->copy_dispatch_size[2] = 1;
653
654 header->serialization_size =
655 header->compacted_size + align(sizeof(struct radv_accel_struct_serialization_header) +
656 sizeof(uint64_t) * header->instance_count,
657 128);
658
659 fail:
660 device->ws->buffer_unmap(accel->bo);
661 return result;
662 }
663
664 VKAPI_ATTR VkResult VKAPI_CALL
radv_BuildAccelerationStructuresKHR(VkDevice _device,VkDeferredOperationKHR deferredOperation,uint32_t infoCount,const VkAccelerationStructureBuildGeometryInfoKHR * pInfos,const VkAccelerationStructureBuildRangeInfoKHR * const * ppBuildRangeInfos)665 radv_BuildAccelerationStructuresKHR(
666 VkDevice _device, VkDeferredOperationKHR deferredOperation, uint32_t infoCount,
667 const VkAccelerationStructureBuildGeometryInfoKHR *pInfos,
668 const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos)
669 {
670 RADV_FROM_HANDLE(radv_device, device, _device);
671 VkResult result = VK_SUCCESS;
672
673 for (uint32_t i = 0; i < infoCount; ++i) {
674 result = build_bvh(device, pInfos + i, ppBuildRangeInfos[i]);
675 if (result != VK_SUCCESS)
676 break;
677 }
678 return result;
679 }
680
681 VKAPI_ATTR VkResult VKAPI_CALL
radv_CopyAccelerationStructureKHR(VkDevice _device,VkDeferredOperationKHR deferredOperation,const VkCopyAccelerationStructureInfoKHR * pInfo)682 radv_CopyAccelerationStructureKHR(VkDevice _device, VkDeferredOperationKHR deferredOperation,
683 const VkCopyAccelerationStructureInfoKHR *pInfo)
684 {
685 RADV_FROM_HANDLE(radv_device, device, _device);
686 RADV_FROM_HANDLE(radv_acceleration_structure, src_struct, pInfo->src);
687 RADV_FROM_HANDLE(radv_acceleration_structure, dst_struct, pInfo->dst);
688
689 char *src_ptr = (char *)device->ws->buffer_map(src_struct->bo);
690 if (!src_ptr)
691 return VK_ERROR_OUT_OF_HOST_MEMORY;
692
693 char *dst_ptr = (char *)device->ws->buffer_map(dst_struct->bo);
694 if (!dst_ptr) {
695 device->ws->buffer_unmap(src_struct->bo);
696 return VK_ERROR_OUT_OF_HOST_MEMORY;
697 }
698
699 src_ptr += src_struct->mem_offset;
700 dst_ptr += dst_struct->mem_offset;
701
702 const struct radv_accel_struct_header *header = (const void *)src_ptr;
703 memcpy(dst_ptr, src_ptr, header->compacted_size);
704
705 device->ws->buffer_unmap(src_struct->bo);
706 device->ws->buffer_unmap(dst_struct->bo);
707 return VK_SUCCESS;
708 }
709
710 static nir_ssa_def *
get_indices(nir_builder * b,nir_ssa_def * addr,nir_ssa_def * type,nir_ssa_def * id)711 get_indices(nir_builder *b, nir_ssa_def *addr, nir_ssa_def *type, nir_ssa_def *id)
712 {
713 const struct glsl_type *uvec3_type = glsl_vector_type(GLSL_TYPE_UINT, 3);
714 nir_variable *result =
715 nir_variable_create(b->shader, nir_var_shader_temp, uvec3_type, "indices");
716
717 nir_push_if(b, nir_ult(b, type, nir_imm_int(b, 2)));
718 nir_push_if(b, nir_ieq(b, type, nir_imm_int(b, VK_INDEX_TYPE_UINT16)));
719 {
720 nir_ssa_def *index_id = nir_umul24(b, id, nir_imm_int(b, 6));
721 nir_ssa_def *indices[3];
722 for (unsigned i = 0; i < 3; ++i) {
723 indices[i] = nir_build_load_global(
724 b, 1, 16,
725 nir_iadd(b, addr, nir_u2u64(b, nir_iadd(b, index_id, nir_imm_int(b, 2 * i)))));
726 }
727 nir_store_var(b, result, nir_u2u32(b, nir_vec(b, indices, 3)), 7);
728 }
729 nir_push_else(b, NULL);
730 {
731 nir_ssa_def *index_id = nir_umul24(b, id, nir_imm_int(b, 12));
732 nir_ssa_def *indices =
733 nir_build_load_global(b, 3, 32, nir_iadd(b, addr, nir_u2u64(b, index_id)));
734 nir_store_var(b, result, indices, 7);
735 }
736 nir_pop_if(b, NULL);
737 nir_push_else(b, NULL);
738 {
739 nir_ssa_def *index_id = nir_umul24(b, id, nir_imm_int(b, 3));
740 nir_ssa_def *indices[] = {
741 index_id,
742 nir_iadd(b, index_id, nir_imm_int(b, 1)),
743 nir_iadd(b, index_id, nir_imm_int(b, 2)),
744 };
745
746 nir_push_if(b, nir_ieq(b, type, nir_imm_int(b, VK_INDEX_TYPE_NONE_KHR)));
747 {
748 nir_store_var(b, result, nir_vec(b, indices, 3), 7);
749 }
750 nir_push_else(b, NULL);
751 {
752 for (unsigned i = 0; i < 3; ++i) {
753 indices[i] =
754 nir_build_load_global(b, 1, 8, nir_iadd(b, addr, nir_u2u64(b, indices[i])));
755 }
756 nir_store_var(b, result, nir_u2u32(b, nir_vec(b, indices, 3)), 7);
757 }
758 nir_pop_if(b, NULL);
759 }
760 nir_pop_if(b, NULL);
761 return nir_load_var(b, result);
762 }
763
764 static void
get_vertices(nir_builder * b,nir_ssa_def * addresses,nir_ssa_def * format,nir_ssa_def * positions[3])765 get_vertices(nir_builder *b, nir_ssa_def *addresses, nir_ssa_def *format, nir_ssa_def *positions[3])
766 {
767 const struct glsl_type *vec3_type = glsl_vector_type(GLSL_TYPE_FLOAT, 3);
768 nir_variable *results[3] = {
769 nir_variable_create(b->shader, nir_var_shader_temp, vec3_type, "vertex0"),
770 nir_variable_create(b->shader, nir_var_shader_temp, vec3_type, "vertex1"),
771 nir_variable_create(b->shader, nir_var_shader_temp, vec3_type, "vertex2")};
772
773 VkFormat formats[] = {
774 VK_FORMAT_R32G32B32_SFLOAT, VK_FORMAT_R32G32B32A32_SFLOAT, VK_FORMAT_R16G16B16_SFLOAT,
775 VK_FORMAT_R16G16B16A16_SFLOAT, VK_FORMAT_R16G16_SFLOAT, VK_FORMAT_R32G32_SFLOAT,
776 VK_FORMAT_R16G16_SNORM, VK_FORMAT_R16G16B16A16_SNORM, VK_FORMAT_R16G16B16A16_UNORM,
777 };
778
779 for (unsigned f = 0; f < ARRAY_SIZE(formats); ++f) {
780 if (f + 1 < ARRAY_SIZE(formats))
781 nir_push_if(b, nir_ieq(b, format, nir_imm_int(b, formats[f])));
782
783 for (unsigned i = 0; i < 3; ++i) {
784 switch (formats[f]) {
785 case VK_FORMAT_R32G32B32_SFLOAT:
786 case VK_FORMAT_R32G32B32A32_SFLOAT:
787 nir_store_var(b, results[i],
788 nir_build_load_global(b, 3, 32, nir_channel(b, addresses, i)), 7);
789 break;
790 case VK_FORMAT_R32G32_SFLOAT:
791 case VK_FORMAT_R16G16_SFLOAT:
792 case VK_FORMAT_R16G16B16_SFLOAT:
793 case VK_FORMAT_R16G16B16A16_SFLOAT:
794 case VK_FORMAT_R16G16_SNORM:
795 case VK_FORMAT_R16G16B16A16_SNORM:
796 case VK_FORMAT_R16G16B16A16_UNORM: {
797 unsigned components = MIN2(3, vk_format_get_nr_components(formats[f]));
798 unsigned comp_bits =
799 vk_format_get_blocksizebits(formats[f]) / vk_format_get_nr_components(formats[f]);
800 unsigned comp_bytes = comp_bits / 8;
801 nir_ssa_def *values[3];
802 nir_ssa_def *addr = nir_channel(b, addresses, i);
803 for (unsigned j = 0; j < components; ++j)
804 values[j] = nir_build_load_global(
805 b, 1, comp_bits, nir_iadd(b, addr, nir_imm_int64(b, j * comp_bytes)));
806
807 for (unsigned j = components; j < 3; ++j)
808 values[j] = nir_imm_intN_t(b, 0, comp_bits);
809
810 nir_ssa_def *vec;
811 if (util_format_is_snorm(vk_format_to_pipe_format(formats[f]))) {
812 for (unsigned j = 0; j < 3; ++j) {
813 values[j] = nir_fdiv(b, nir_i2f32(b, values[j]),
814 nir_imm_float(b, (1u << (comp_bits - 1)) - 1));
815 values[j] = nir_fmax(b, values[j], nir_imm_float(b, -1.0));
816 }
817 vec = nir_vec(b, values, 3);
818 } else if (util_format_is_unorm(vk_format_to_pipe_format(formats[f]))) {
819 for (unsigned j = 0; j < 3; ++j) {
820 values[j] =
821 nir_fdiv(b, nir_u2f32(b, values[j]), nir_imm_float(b, (1u << comp_bits) - 1));
822 values[j] = nir_fmin(b, values[j], nir_imm_float(b, 1.0));
823 }
824 vec = nir_vec(b, values, 3);
825 } else if (comp_bits == 16)
826 vec = nir_f2f32(b, nir_vec(b, values, 3));
827 else
828 vec = nir_vec(b, values, 3);
829 nir_store_var(b, results[i], vec, 7);
830 break;
831 }
832 default:
833 unreachable("Unhandled format");
834 }
835 }
836 if (f + 1 < ARRAY_SIZE(formats))
837 nir_push_else(b, NULL);
838 }
839 for (unsigned f = 1; f < ARRAY_SIZE(formats); ++f) {
840 nir_pop_if(b, NULL);
841 }
842
843 for (unsigned i = 0; i < 3; ++i)
844 positions[i] = nir_load_var(b, results[i]);
845 }
846
847 struct build_primitive_constants {
848 uint64_t node_dst_addr;
849 uint64_t scratch_addr;
850 uint32_t dst_offset;
851 uint32_t dst_scratch_offset;
852 uint32_t geometry_type;
853 uint32_t geometry_id;
854
855 union {
856 struct {
857 uint64_t vertex_addr;
858 uint64_t index_addr;
859 uint64_t transform_addr;
860 uint32_t vertex_stride;
861 uint32_t vertex_format;
862 uint32_t index_format;
863 };
864 struct {
865 uint64_t instance_data;
866 uint32_t array_of_pointers;
867 };
868 struct {
869 uint64_t aabb_addr;
870 uint32_t aabb_stride;
871 };
872 };
873 };
874
875 struct build_internal_constants {
876 uint64_t node_dst_addr;
877 uint64_t scratch_addr;
878 uint32_t dst_offset;
879 uint32_t dst_scratch_offset;
880 uint32_t src_scratch_offset;
881 uint32_t fill_header;
882 };
883
884 /* This inverts a 3x3 matrix using cofactors, as in e.g.
885 * https://www.mathsisfun.com/algebra/matrix-inverse-minors-cofactors-adjugate.html */
886 static void
nir_invert_3x3(nir_builder * b,nir_ssa_def * in[3][3],nir_ssa_def * out[3][3])887 nir_invert_3x3(nir_builder *b, nir_ssa_def *in[3][3], nir_ssa_def *out[3][3])
888 {
889 nir_ssa_def *cofactors[3][3];
890 for (unsigned i = 0; i < 3; ++i) {
891 for (unsigned j = 0; j < 3; ++j) {
892 cofactors[i][j] =
893 nir_fsub(b, nir_fmul(b, in[(i + 1) % 3][(j + 1) % 3], in[(i + 2) % 3][(j + 2) % 3]),
894 nir_fmul(b, in[(i + 1) % 3][(j + 2) % 3], in[(i + 2) % 3][(j + 1) % 3]));
895 }
896 }
897
898 nir_ssa_def *det = NULL;
899 for (unsigned i = 0; i < 3; ++i) {
900 nir_ssa_def *det_part = nir_fmul(b, in[0][i], cofactors[0][i]);
901 det = det ? nir_fadd(b, det, det_part) : det_part;
902 }
903
904 nir_ssa_def *det_inv = nir_frcp(b, det);
905 for (unsigned i = 0; i < 3; ++i) {
906 for (unsigned j = 0; j < 3; ++j) {
907 out[i][j] = nir_fmul(b, cofactors[j][i], det_inv);
908 }
909 }
910 }
911
912 static nir_shader *
build_leaf_shader(struct radv_device * dev)913 build_leaf_shader(struct radv_device *dev)
914 {
915 const struct glsl_type *vec3_type = glsl_vector_type(GLSL_TYPE_FLOAT, 3);
916 nir_builder b = radv_meta_init_shader(MESA_SHADER_COMPUTE, "accel_build_leaf_shader");
917
918 b.shader->info.workgroup_size[0] = 64;
919
920 nir_ssa_def *pconst0 =
921 nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .base = 0, .range = 16);
922 nir_ssa_def *pconst1 =
923 nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .base = 16, .range = 16);
924 nir_ssa_def *pconst2 =
925 nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .base = 32, .range = 16);
926 nir_ssa_def *pconst3 =
927 nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .base = 48, .range = 16);
928 nir_ssa_def *pconst4 =
929 nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 64, .range = 4);
930
931 nir_ssa_def *geom_type = nir_channel(&b, pconst1, 2);
932 nir_ssa_def *node_dst_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst0, 3));
933 nir_ssa_def *scratch_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst0, 12));
934 nir_ssa_def *node_dst_offset = nir_channel(&b, pconst1, 0);
935 nir_ssa_def *scratch_offset = nir_channel(&b, pconst1, 1);
936 nir_ssa_def *geometry_id = nir_channel(&b, pconst1, 3);
937
938 nir_ssa_def *global_id =
939 nir_iadd(&b,
940 nir_umul24(&b, nir_channels(&b, nir_load_workgroup_id(&b, 32), 1),
941 nir_imm_int(&b, b.shader->info.workgroup_size[0])),
942 nir_channels(&b, nir_load_local_invocation_id(&b), 1));
943 scratch_addr = nir_iadd(
944 &b, scratch_addr,
945 nir_u2u64(&b, nir_iadd(&b, scratch_offset, nir_umul24(&b, global_id, nir_imm_int(&b, 4)))));
946
947 nir_push_if(&b, nir_ieq(&b, geom_type, nir_imm_int(&b, VK_GEOMETRY_TYPE_TRIANGLES_KHR)));
948 { /* Triangles */
949 nir_ssa_def *vertex_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst2, 3));
950 nir_ssa_def *index_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst2, 12));
951 nir_ssa_def *transform_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst3, 3));
952 nir_ssa_def *vertex_stride = nir_channel(&b, pconst3, 2);
953 nir_ssa_def *vertex_format = nir_channel(&b, pconst3, 3);
954 nir_ssa_def *index_format = nir_channel(&b, pconst4, 0);
955 unsigned repl_swizzle[4] = {0, 0, 0, 0};
956
957 nir_ssa_def *node_offset =
958 nir_iadd(&b, node_dst_offset, nir_umul24(&b, global_id, nir_imm_int(&b, 64)));
959 nir_ssa_def *triangle_node_dst_addr = nir_iadd(&b, node_dst_addr, nir_u2u64(&b, node_offset));
960
961 nir_ssa_def *indices = get_indices(&b, index_addr, index_format, global_id);
962 nir_ssa_def *vertex_addresses = nir_iadd(
963 &b, nir_u2u64(&b, nir_imul(&b, indices, nir_swizzle(&b, vertex_stride, repl_swizzle, 3))),
964 nir_swizzle(&b, vertex_addr, repl_swizzle, 3));
965 nir_ssa_def *positions[3];
966 get_vertices(&b, vertex_addresses, vertex_format, positions);
967
968 nir_ssa_def *node_data[16];
969 memset(node_data, 0, sizeof(node_data));
970
971 nir_variable *transform[] = {
972 nir_variable_create(b.shader, nir_var_shader_temp, glsl_vec4_type(), "transform0"),
973 nir_variable_create(b.shader, nir_var_shader_temp, glsl_vec4_type(), "transform1"),
974 nir_variable_create(b.shader, nir_var_shader_temp, glsl_vec4_type(), "transform2"),
975 };
976 nir_store_var(&b, transform[0], nir_imm_vec4(&b, 1.0, 0.0, 0.0, 0.0), 0xf);
977 nir_store_var(&b, transform[1], nir_imm_vec4(&b, 0.0, 1.0, 0.0, 0.0), 0xf);
978 nir_store_var(&b, transform[2], nir_imm_vec4(&b, 0.0, 0.0, 1.0, 0.0), 0xf);
979
980 nir_push_if(&b, nir_ine(&b, transform_addr, nir_imm_int64(&b, 0)));
981 nir_store_var(
982 &b, transform[0],
983 nir_build_load_global(&b, 4, 32, nir_iadd(&b, transform_addr, nir_imm_int64(&b, 0))), 0xf);
984 nir_store_var(
985 &b, transform[1],
986 nir_build_load_global(&b, 4, 32, nir_iadd(&b, transform_addr, nir_imm_int64(&b, 16))),
987 0xf);
988 nir_store_var(
989 &b, transform[2],
990 nir_build_load_global(&b, 4, 32, nir_iadd(&b, transform_addr, nir_imm_int64(&b, 32))),
991 0xf);
992 nir_pop_if(&b, NULL);
993
994 for (unsigned i = 0; i < 3; ++i)
995 for (unsigned j = 0; j < 3; ++j)
996 node_data[i * 3 + j] = nir_fdph(&b, positions[i], nir_load_var(&b, transform[j]));
997
998 node_data[12] = global_id;
999 node_data[13] = geometry_id;
1000 node_data[15] = nir_imm_int(&b, 9);
1001 for (unsigned i = 0; i < ARRAY_SIZE(node_data); ++i)
1002 if (!node_data[i])
1003 node_data[i] = nir_imm_int(&b, 0);
1004
1005 for (unsigned i = 0; i < 4; ++i) {
1006 nir_build_store_global(&b, nir_vec(&b, node_data + i * 4, 4),
1007 nir_iadd(&b, triangle_node_dst_addr, nir_imm_int64(&b, i * 16)),
1008 .align_mul = 16);
1009 }
1010
1011 nir_ssa_def *node_id = nir_ushr(&b, node_offset, nir_imm_int(&b, 3));
1012 nir_build_store_global(&b, node_id, scratch_addr);
1013 }
1014 nir_push_else(&b, NULL);
1015 nir_push_if(&b, nir_ieq(&b, geom_type, nir_imm_int(&b, VK_GEOMETRY_TYPE_AABBS_KHR)));
1016 { /* AABBs */
1017 nir_ssa_def *aabb_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst2, 3));
1018 nir_ssa_def *aabb_stride = nir_channel(&b, pconst2, 2);
1019
1020 nir_ssa_def *node_offset =
1021 nir_iadd(&b, node_dst_offset, nir_umul24(&b, global_id, nir_imm_int(&b, 64)));
1022 nir_ssa_def *aabb_node_dst_addr = nir_iadd(&b, node_dst_addr, nir_u2u64(&b, node_offset));
1023 nir_ssa_def *node_id =
1024 nir_iadd(&b, nir_ushr(&b, node_offset, nir_imm_int(&b, 3)), nir_imm_int(&b, 7));
1025 nir_build_store_global(&b, node_id, scratch_addr);
1026
1027 aabb_addr = nir_iadd(&b, aabb_addr, nir_u2u64(&b, nir_imul(&b, aabb_stride, global_id)));
1028
1029 nir_ssa_def *min_bound =
1030 nir_build_load_global(&b, 3, 32, nir_iadd(&b, aabb_addr, nir_imm_int64(&b, 0)));
1031 nir_ssa_def *max_bound =
1032 nir_build_load_global(&b, 3, 32, nir_iadd(&b, aabb_addr, nir_imm_int64(&b, 12)));
1033
1034 nir_ssa_def *values[] = {nir_channel(&b, min_bound, 0),
1035 nir_channel(&b, min_bound, 1),
1036 nir_channel(&b, min_bound, 2),
1037 nir_channel(&b, max_bound, 0),
1038 nir_channel(&b, max_bound, 1),
1039 nir_channel(&b, max_bound, 2),
1040 global_id,
1041 geometry_id};
1042
1043 nir_build_store_global(&b, nir_vec(&b, values + 0, 4),
1044 nir_iadd(&b, aabb_node_dst_addr, nir_imm_int64(&b, 0)),
1045 .align_mul = 16);
1046 nir_build_store_global(&b, nir_vec(&b, values + 4, 4),
1047 nir_iadd(&b, aabb_node_dst_addr, nir_imm_int64(&b, 16)),
1048 .align_mul = 16);
1049 }
1050 nir_push_else(&b, NULL);
1051 { /* Instances */
1052
1053 nir_variable *instance_addr_var =
1054 nir_variable_create(b.shader, nir_var_shader_temp, glsl_uint64_t_type(), "instance_addr");
1055 nir_push_if(&b, nir_ine(&b, nir_channel(&b, pconst2, 2), nir_imm_int(&b, 0)));
1056 {
1057 nir_ssa_def *ptr = nir_iadd(&b, nir_pack_64_2x32(&b, nir_channels(&b, pconst2, 3)),
1058 nir_u2u64(&b, nir_imul(&b, global_id, nir_imm_int(&b, 8))));
1059 nir_ssa_def *addr =
1060 nir_pack_64_2x32(&b, nir_build_load_global(&b, 2, 32, ptr, .align_mul = 8));
1061 nir_store_var(&b, instance_addr_var, addr, 1);
1062 }
1063 nir_push_else(&b, NULL);
1064 {
1065 nir_ssa_def *addr = nir_iadd(&b, nir_pack_64_2x32(&b, nir_channels(&b, pconst2, 3)),
1066 nir_u2u64(&b, nir_imul(&b, global_id, nir_imm_int(&b, 64))));
1067 nir_store_var(&b, instance_addr_var, addr, 1);
1068 }
1069 nir_pop_if(&b, NULL);
1070 nir_ssa_def *instance_addr = nir_load_var(&b, instance_addr_var);
1071
1072 nir_ssa_def *inst_transform[] = {
1073 nir_build_load_global(&b, 4, 32, nir_iadd(&b, instance_addr, nir_imm_int64(&b, 0))),
1074 nir_build_load_global(&b, 4, 32, nir_iadd(&b, instance_addr, nir_imm_int64(&b, 16))),
1075 nir_build_load_global(&b, 4, 32, nir_iadd(&b, instance_addr, nir_imm_int64(&b, 32)))};
1076 nir_ssa_def *inst3 =
1077 nir_build_load_global(&b, 4, 32, nir_iadd(&b, instance_addr, nir_imm_int64(&b, 48)));
1078
1079 nir_ssa_def *node_offset =
1080 nir_iadd(&b, node_dst_offset, nir_umul24(&b, global_id, nir_imm_int(&b, 128)));
1081 node_dst_addr = nir_iadd(&b, node_dst_addr, nir_u2u64(&b, node_offset));
1082 nir_ssa_def *node_id =
1083 nir_iadd(&b, nir_ushr(&b, node_offset, nir_imm_int(&b, 3)), nir_imm_int(&b, 6));
1084 nir_build_store_global(&b, node_id, scratch_addr);
1085
1086 nir_variable *bounds[2] = {
1087 nir_variable_create(b.shader, nir_var_shader_temp, vec3_type, "min_bound"),
1088 nir_variable_create(b.shader, nir_var_shader_temp, vec3_type, "max_bound"),
1089 };
1090
1091 nir_store_var(&b, bounds[0], nir_channels(&b, nir_imm_vec4(&b, NAN, NAN, NAN, NAN), 7), 7);
1092 nir_store_var(&b, bounds[1], nir_channels(&b, nir_imm_vec4(&b, NAN, NAN, NAN, NAN), 7), 7);
1093
1094 nir_ssa_def *header_addr = nir_pack_64_2x32(&b, nir_channels(&b, inst3, 12));
1095 nir_push_if(&b, nir_ine(&b, header_addr, nir_imm_int64(&b, 0)));
1096 nir_ssa_def *header_root_offset =
1097 nir_build_load_global(&b, 1, 32, nir_iadd(&b, header_addr, nir_imm_int64(&b, 0)));
1098 nir_ssa_def *header_min =
1099 nir_build_load_global(&b, 3, 32, nir_iadd(&b, header_addr, nir_imm_int64(&b, 8)));
1100 nir_ssa_def *header_max =
1101 nir_build_load_global(&b, 3, 32, nir_iadd(&b, header_addr, nir_imm_int64(&b, 20)));
1102
1103 nir_ssa_def *bound_defs[2][3];
1104 for (unsigned i = 0; i < 3; ++i) {
1105 bound_defs[0][i] = bound_defs[1][i] = nir_channel(&b, inst_transform[i], 3);
1106
1107 nir_ssa_def *mul_a = nir_fmul(&b, nir_channels(&b, inst_transform[i], 7), header_min);
1108 nir_ssa_def *mul_b = nir_fmul(&b, nir_channels(&b, inst_transform[i], 7), header_max);
1109 nir_ssa_def *mi = nir_fmin(&b, mul_a, mul_b);
1110 nir_ssa_def *ma = nir_fmax(&b, mul_a, mul_b);
1111 for (unsigned j = 0; j < 3; ++j) {
1112 bound_defs[0][i] = nir_fadd(&b, bound_defs[0][i], nir_channel(&b, mi, j));
1113 bound_defs[1][i] = nir_fadd(&b, bound_defs[1][i], nir_channel(&b, ma, j));
1114 }
1115 }
1116
1117 nir_store_var(&b, bounds[0], nir_vec(&b, bound_defs[0], 3), 7);
1118 nir_store_var(&b, bounds[1], nir_vec(&b, bound_defs[1], 3), 7);
1119
1120 /* Store object to world matrix */
1121 for (unsigned i = 0; i < 3; ++i) {
1122 nir_ssa_def *vals[3];
1123 for (unsigned j = 0; j < 3; ++j)
1124 vals[j] = nir_channel(&b, inst_transform[j], i);
1125
1126 nir_build_store_global(&b, nir_vec(&b, vals, 3),
1127 nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 92 + 12 * i)));
1128 }
1129
1130 nir_ssa_def *m_in[3][3], *m_out[3][3], *m_vec[3][4];
1131 for (unsigned i = 0; i < 3; ++i)
1132 for (unsigned j = 0; j < 3; ++j)
1133 m_in[i][j] = nir_channel(&b, inst_transform[i], j);
1134 nir_invert_3x3(&b, m_in, m_out);
1135 for (unsigned i = 0; i < 3; ++i) {
1136 for (unsigned j = 0; j < 3; ++j)
1137 m_vec[i][j] = m_out[i][j];
1138 m_vec[i][3] = nir_channel(&b, inst_transform[i], 3);
1139 }
1140
1141 for (unsigned i = 0; i < 3; ++i) {
1142 nir_build_store_global(&b, nir_vec(&b, m_vec[i], 4),
1143 nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 16 + 16 * i)));
1144 }
1145
1146 nir_ssa_def *out0[4] = {
1147 nir_ior(&b, nir_channel(&b, nir_unpack_64_2x32(&b, header_addr), 0), header_root_offset),
1148 nir_channel(&b, nir_unpack_64_2x32(&b, header_addr), 1), nir_channel(&b, inst3, 0),
1149 nir_channel(&b, inst3, 1)};
1150 nir_build_store_global(&b, nir_vec(&b, out0, 4),
1151 nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 0)));
1152 nir_build_store_global(&b, global_id, nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 88)));
1153 nir_pop_if(&b, NULL);
1154 nir_build_store_global(&b, nir_load_var(&b, bounds[0]),
1155 nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 64)));
1156 nir_build_store_global(&b, nir_load_var(&b, bounds[1]),
1157 nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 76)));
1158 }
1159 nir_pop_if(&b, NULL);
1160 nir_pop_if(&b, NULL);
1161
1162 return b.shader;
1163 }
1164
1165 static void
determine_bounds(nir_builder * b,nir_ssa_def * node_addr,nir_ssa_def * node_id,nir_variable * bounds_vars[2])1166 determine_bounds(nir_builder *b, nir_ssa_def *node_addr, nir_ssa_def *node_id,
1167 nir_variable *bounds_vars[2])
1168 {
1169 nir_ssa_def *node_type = nir_iand(b, node_id, nir_imm_int(b, 7));
1170 node_addr = nir_iadd(
1171 b, node_addr,
1172 nir_u2u64(b, nir_ishl(b, nir_iand(b, node_id, nir_imm_int(b, ~7u)), nir_imm_int(b, 3))));
1173
1174 nir_push_if(b, nir_ieq(b, node_type, nir_imm_int(b, 0)));
1175 {
1176 nir_ssa_def *positions[3];
1177 for (unsigned i = 0; i < 3; ++i)
1178 positions[i] =
1179 nir_build_load_global(b, 3, 32, nir_iadd(b, node_addr, nir_imm_int64(b, i * 12)));
1180 nir_ssa_def *bounds[] = {positions[0], positions[0]};
1181 for (unsigned i = 1; i < 3; ++i) {
1182 bounds[0] = nir_fmin(b, bounds[0], positions[i]);
1183 bounds[1] = nir_fmax(b, bounds[1], positions[i]);
1184 }
1185 nir_store_var(b, bounds_vars[0], bounds[0], 7);
1186 nir_store_var(b, bounds_vars[1], bounds[1], 7);
1187 }
1188 nir_push_else(b, NULL);
1189 nir_push_if(b, nir_ieq(b, node_type, nir_imm_int(b, 5)));
1190 {
1191 nir_ssa_def *input_bounds[4][2];
1192 for (unsigned i = 0; i < 4; ++i)
1193 for (unsigned j = 0; j < 2; ++j)
1194 input_bounds[i][j] = nir_build_load_global(
1195 b, 3, 32, nir_iadd(b, node_addr, nir_imm_int64(b, 16 + i * 24 + j * 12)));
1196 nir_ssa_def *bounds[] = {input_bounds[0][0], input_bounds[0][1]};
1197 for (unsigned i = 1; i < 4; ++i) {
1198 bounds[0] = nir_fmin(b, bounds[0], input_bounds[i][0]);
1199 bounds[1] = nir_fmax(b, bounds[1], input_bounds[i][1]);
1200 }
1201
1202 nir_store_var(b, bounds_vars[0], bounds[0], 7);
1203 nir_store_var(b, bounds_vars[1], bounds[1], 7);
1204 }
1205 nir_push_else(b, NULL);
1206 nir_push_if(b, nir_ieq(b, node_type, nir_imm_int(b, 6)));
1207 { /* Instances */
1208 nir_ssa_def *bounds[2];
1209 for (unsigned i = 0; i < 2; ++i)
1210 bounds[i] =
1211 nir_build_load_global(b, 3, 32, nir_iadd(b, node_addr, nir_imm_int64(b, 64 + i * 12)));
1212 nir_store_var(b, bounds_vars[0], bounds[0], 7);
1213 nir_store_var(b, bounds_vars[1], bounds[1], 7);
1214 }
1215 nir_push_else(b, NULL);
1216 { /* AABBs */
1217 nir_ssa_def *bounds[2];
1218 for (unsigned i = 0; i < 2; ++i)
1219 bounds[i] =
1220 nir_build_load_global(b, 3, 32, nir_iadd(b, node_addr, nir_imm_int64(b, i * 12)));
1221 nir_store_var(b, bounds_vars[0], bounds[0], 7);
1222 nir_store_var(b, bounds_vars[1], bounds[1], 7);
1223 }
1224 nir_pop_if(b, NULL);
1225 nir_pop_if(b, NULL);
1226 nir_pop_if(b, NULL);
1227 }
1228
1229 static nir_shader *
build_internal_shader(struct radv_device * dev)1230 build_internal_shader(struct radv_device *dev)
1231 {
1232 const struct glsl_type *vec3_type = glsl_vector_type(GLSL_TYPE_FLOAT, 3);
1233 nir_builder b = radv_meta_init_shader(MESA_SHADER_COMPUTE, "accel_build_internal_shader");
1234
1235 b.shader->info.workgroup_size[0] = 64;
1236
1237 /*
1238 * push constants:
1239 * i32 x 2: node dst address
1240 * i32 x 2: scratch address
1241 * i32: dst offset
1242 * i32: dst scratch offset
1243 * i32: src scratch offset
1244 * i32: src_node_count | (fill_header << 31)
1245 */
1246 nir_ssa_def *pconst0 =
1247 nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .base = 0, .range = 16);
1248 nir_ssa_def *pconst1 =
1249 nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .base = 16, .range = 16);
1250
1251 nir_ssa_def *node_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst0, 3));
1252 nir_ssa_def *scratch_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst0, 12));
1253 nir_ssa_def *node_dst_offset = nir_channel(&b, pconst1, 0);
1254 nir_ssa_def *dst_scratch_offset = nir_channel(&b, pconst1, 1);
1255 nir_ssa_def *src_scratch_offset = nir_channel(&b, pconst1, 2);
1256 nir_ssa_def *src_node_count =
1257 nir_iand(&b, nir_channel(&b, pconst1, 3), nir_imm_int(&b, 0x7FFFFFFFU));
1258 nir_ssa_def *fill_header =
1259 nir_ine(&b, nir_iand(&b, nir_channel(&b, pconst1, 3), nir_imm_int(&b, 0x80000000U)),
1260 nir_imm_int(&b, 0));
1261
1262 nir_ssa_def *global_id =
1263 nir_iadd(&b,
1264 nir_umul24(&b, nir_channels(&b, nir_load_workgroup_id(&b, 32), 1),
1265 nir_imm_int(&b, b.shader->info.workgroup_size[0])),
1266 nir_channels(&b, nir_load_local_invocation_id(&b), 1));
1267 nir_ssa_def *src_idx = nir_imul(&b, global_id, nir_imm_int(&b, 4));
1268 nir_ssa_def *src_count = nir_umin(&b, nir_imm_int(&b, 4), nir_isub(&b, src_node_count, src_idx));
1269
1270 nir_ssa_def *node_offset =
1271 nir_iadd(&b, node_dst_offset, nir_ishl(&b, global_id, nir_imm_int(&b, 7)));
1272 nir_ssa_def *node_dst_addr = nir_iadd(&b, node_addr, nir_u2u64(&b, node_offset));
1273 nir_ssa_def *src_nodes = nir_build_load_global(
1274 &b, 4, 32,
1275 nir_iadd(&b, scratch_addr,
1276 nir_u2u64(&b, nir_iadd(&b, src_scratch_offset,
1277 nir_ishl(&b, global_id, nir_imm_int(&b, 4))))));
1278
1279 nir_build_store_global(&b, src_nodes, nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 0)));
1280
1281 nir_ssa_def *total_bounds[2] = {
1282 nir_channels(&b, nir_imm_vec4(&b, NAN, NAN, NAN, NAN), 7),
1283 nir_channels(&b, nir_imm_vec4(&b, NAN, NAN, NAN, NAN), 7),
1284 };
1285
1286 for (unsigned i = 0; i < 4; ++i) {
1287 nir_variable *bounds[2] = {
1288 nir_variable_create(b.shader, nir_var_shader_temp, vec3_type, "min_bound"),
1289 nir_variable_create(b.shader, nir_var_shader_temp, vec3_type, "max_bound"),
1290 };
1291 nir_store_var(&b, bounds[0], nir_channels(&b, nir_imm_vec4(&b, NAN, NAN, NAN, NAN), 7), 7);
1292 nir_store_var(&b, bounds[1], nir_channels(&b, nir_imm_vec4(&b, NAN, NAN, NAN, NAN), 7), 7);
1293
1294 nir_push_if(&b, nir_ilt(&b, nir_imm_int(&b, i), src_count));
1295 determine_bounds(&b, node_addr, nir_channel(&b, src_nodes, i), bounds);
1296 nir_pop_if(&b, NULL);
1297 nir_build_store_global(&b, nir_load_var(&b, bounds[0]),
1298 nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 16 + 24 * i)));
1299 nir_build_store_global(&b, nir_load_var(&b, bounds[1]),
1300 nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 28 + 24 * i)));
1301 total_bounds[0] = nir_fmin(&b, total_bounds[0], nir_load_var(&b, bounds[0]));
1302 total_bounds[1] = nir_fmax(&b, total_bounds[1], nir_load_var(&b, bounds[1]));
1303 }
1304
1305 nir_ssa_def *node_id =
1306 nir_iadd(&b, nir_ushr(&b, node_offset, nir_imm_int(&b, 3)), nir_imm_int(&b, 5));
1307 nir_ssa_def *dst_scratch_addr = nir_iadd(
1308 &b, scratch_addr,
1309 nir_u2u64(&b, nir_iadd(&b, dst_scratch_offset, nir_ishl(&b, global_id, nir_imm_int(&b, 2)))));
1310 nir_build_store_global(&b, node_id, dst_scratch_addr);
1311
1312 nir_push_if(&b, fill_header);
1313 nir_build_store_global(&b, node_id, node_addr);
1314 nir_build_store_global(&b, total_bounds[0], nir_iadd(&b, node_addr, nir_imm_int64(&b, 8)));
1315 nir_build_store_global(&b, total_bounds[1], nir_iadd(&b, node_addr, nir_imm_int64(&b, 20)));
1316 nir_pop_if(&b, NULL);
1317 return b.shader;
1318 }
1319
1320 enum copy_mode {
1321 COPY_MODE_COPY,
1322 COPY_MODE_SERIALIZE,
1323 COPY_MODE_DESERIALIZE,
1324 };
1325
1326 struct copy_constants {
1327 uint64_t src_addr;
1328 uint64_t dst_addr;
1329 uint32_t mode;
1330 };
1331
1332 static nir_shader *
build_copy_shader(struct radv_device * dev)1333 build_copy_shader(struct radv_device *dev)
1334 {
1335 nir_builder b = radv_meta_init_shader(MESA_SHADER_COMPUTE, "accel_copy");
1336 b.shader->info.workgroup_size[0] = 64;
1337
1338 nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b);
1339 nir_ssa_def *wg_id = nir_load_workgroup_id(&b, 32);
1340 nir_ssa_def *block_size =
1341 nir_imm_ivec4(&b, b.shader->info.workgroup_size[0], b.shader->info.workgroup_size[1],
1342 b.shader->info.workgroup_size[2], 0);
1343
1344 nir_ssa_def *global_id =
1345 nir_channel(&b, nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id), 0);
1346
1347 nir_variable *offset_var =
1348 nir_variable_create(b.shader, nir_var_shader_temp, glsl_uint_type(), "offset");
1349 nir_ssa_def *offset = nir_imul(&b, global_id, nir_imm_int(&b, 16));
1350 nir_store_var(&b, offset_var, offset, 1);
1351
1352 nir_ssa_def *increment = nir_imul(&b, nir_channel(&b, nir_load_num_workgroups(&b, 32), 0),
1353 nir_imm_int(&b, b.shader->info.workgroup_size[0] * 16));
1354
1355 nir_ssa_def *pconst0 =
1356 nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .base = 0, .range = 16);
1357 nir_ssa_def *pconst1 =
1358 nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .base = 16, .range = 4);
1359 nir_ssa_def *src_base_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst0, 3));
1360 nir_ssa_def *dst_base_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst0, 0xc));
1361 nir_ssa_def *mode = nir_channel(&b, pconst1, 0);
1362
1363 nir_variable *compacted_size_var =
1364 nir_variable_create(b.shader, nir_var_shader_temp, glsl_uint64_t_type(), "compacted_size");
1365 nir_variable *src_offset_var =
1366 nir_variable_create(b.shader, nir_var_shader_temp, glsl_uint_type(), "src_offset");
1367 nir_variable *dst_offset_var =
1368 nir_variable_create(b.shader, nir_var_shader_temp, glsl_uint_type(), "dst_offset");
1369 nir_variable *instance_offset_var =
1370 nir_variable_create(b.shader, nir_var_shader_temp, glsl_uint_type(), "instance_offset");
1371 nir_variable *instance_count_var =
1372 nir_variable_create(b.shader, nir_var_shader_temp, glsl_uint_type(), "instance_count");
1373 nir_variable *value_var =
1374 nir_variable_create(b.shader, nir_var_shader_temp, glsl_vec4_type(), "value");
1375
1376 nir_push_if(&b, nir_ieq(&b, mode, nir_imm_int(&b, COPY_MODE_SERIALIZE)));
1377 {
1378 nir_ssa_def *instance_count = nir_build_load_global(
1379 &b, 1, 32,
1380 nir_iadd(&b, src_base_addr,
1381 nir_imm_int64(&b, offsetof(struct radv_accel_struct_header, instance_count))));
1382 nir_ssa_def *compacted_size = nir_build_load_global(
1383 &b, 1, 64,
1384 nir_iadd(&b, src_base_addr,
1385 nir_imm_int64(&b, offsetof(struct radv_accel_struct_header, compacted_size))));
1386 nir_ssa_def *serialization_size = nir_build_load_global(
1387 &b, 1, 64,
1388 nir_iadd(
1389 &b, src_base_addr,
1390 nir_imm_int64(&b, offsetof(struct radv_accel_struct_header, serialization_size))));
1391
1392 nir_store_var(&b, compacted_size_var, compacted_size, 1);
1393 nir_store_var(
1394 &b, instance_offset_var,
1395 nir_build_load_global(&b, 1, 32,
1396 nir_iadd(&b, src_base_addr,
1397 nir_imm_int64(&b, offsetof(struct radv_accel_struct_header,
1398 instance_offset)))),
1399 1);
1400 nir_store_var(&b, instance_count_var, instance_count, 1);
1401
1402 nir_ssa_def *dst_offset =
1403 nir_iadd(&b, nir_imm_int(&b, sizeof(struct radv_accel_struct_serialization_header)),
1404 nir_imul(&b, instance_count, nir_imm_int(&b, sizeof(uint64_t))));
1405 nir_store_var(&b, src_offset_var, nir_imm_int(&b, 0), 1);
1406 nir_store_var(&b, dst_offset_var, dst_offset, 1);
1407
1408 nir_push_if(&b, nir_ieq(&b, global_id, nir_imm_int(&b, 0)));
1409 {
1410 nir_build_store_global(
1411 &b, serialization_size,
1412 nir_iadd(&b, dst_base_addr,
1413 nir_imm_int64(&b, offsetof(struct radv_accel_struct_serialization_header,
1414 serialization_size))));
1415 nir_build_store_global(
1416 &b, compacted_size,
1417 nir_iadd(&b, dst_base_addr,
1418 nir_imm_int64(&b, offsetof(struct radv_accel_struct_serialization_header,
1419 compacted_size))));
1420 nir_build_store_global(
1421 &b, nir_u2u64(&b, instance_count),
1422 nir_iadd(&b, dst_base_addr,
1423 nir_imm_int64(&b, offsetof(struct radv_accel_struct_serialization_header,
1424 instance_count))));
1425 }
1426 nir_pop_if(&b, NULL);
1427 }
1428 nir_push_else(&b, NULL);
1429 nir_push_if(&b, nir_ieq(&b, mode, nir_imm_int(&b, COPY_MODE_DESERIALIZE)));
1430 {
1431 nir_ssa_def *instance_count = nir_build_load_global(
1432 &b, 1, 32,
1433 nir_iadd(&b, src_base_addr,
1434 nir_imm_int64(
1435 &b, offsetof(struct radv_accel_struct_serialization_header, instance_count))));
1436 nir_ssa_def *src_offset =
1437 nir_iadd(&b, nir_imm_int(&b, sizeof(struct radv_accel_struct_serialization_header)),
1438 nir_imul(&b, instance_count, nir_imm_int(&b, sizeof(uint64_t))));
1439
1440 nir_ssa_def *header_addr = nir_iadd(&b, src_base_addr, nir_u2u64(&b, src_offset));
1441 nir_store_var(
1442 &b, compacted_size_var,
1443 nir_build_load_global(
1444 &b, 1, 64,
1445 nir_iadd(&b, header_addr,
1446 nir_imm_int64(&b, offsetof(struct radv_accel_struct_header, compacted_size)))),
1447 1);
1448 nir_store_var(
1449 &b, instance_offset_var,
1450 nir_build_load_global(&b, 1, 32,
1451 nir_iadd(&b, header_addr,
1452 nir_imm_int64(&b, offsetof(struct radv_accel_struct_header,
1453 instance_offset)))),
1454 1);
1455 nir_store_var(&b, instance_count_var, instance_count, 1);
1456 nir_store_var(&b, src_offset_var, src_offset, 1);
1457 nir_store_var(&b, dst_offset_var, nir_imm_int(&b, 0), 1);
1458 }
1459 nir_push_else(&b, NULL); /* COPY_MODE_COPY */
1460 {
1461 nir_store_var(
1462 &b, compacted_size_var,
1463 nir_build_load_global(
1464 &b, 1, 64,
1465 nir_iadd(&b, src_base_addr,
1466 nir_imm_int64(&b, offsetof(struct radv_accel_struct_header, compacted_size)))),
1467 1);
1468
1469 nir_store_var(&b, src_offset_var, nir_imm_int(&b, 0), 1);
1470 nir_store_var(&b, dst_offset_var, nir_imm_int(&b, 0), 1);
1471 nir_store_var(&b, instance_offset_var, nir_imm_int(&b, 0), 1);
1472 nir_store_var(&b, instance_count_var, nir_imm_int(&b, 0), 1);
1473 }
1474 nir_pop_if(&b, NULL);
1475 nir_pop_if(&b, NULL);
1476
1477 nir_ssa_def *instance_bound =
1478 nir_imul(&b, nir_imm_int(&b, sizeof(struct radv_bvh_instance_node)),
1479 nir_load_var(&b, instance_count_var));
1480 nir_ssa_def *compacted_size = nir_build_load_global(
1481 &b, 1, 32,
1482 nir_iadd(&b, src_base_addr,
1483 nir_imm_int64(&b, offsetof(struct radv_accel_struct_header, compacted_size))));
1484
1485 nir_push_loop(&b);
1486 {
1487 offset = nir_load_var(&b, offset_var);
1488 nir_push_if(&b, nir_ilt(&b, offset, compacted_size));
1489 {
1490 nir_ssa_def *src_offset = nir_iadd(&b, offset, nir_load_var(&b, src_offset_var));
1491 nir_ssa_def *dst_offset = nir_iadd(&b, offset, nir_load_var(&b, dst_offset_var));
1492 nir_ssa_def *src_addr = nir_iadd(&b, src_base_addr, nir_u2u64(&b, src_offset));
1493 nir_ssa_def *dst_addr = nir_iadd(&b, dst_base_addr, nir_u2u64(&b, dst_offset));
1494
1495 nir_ssa_def *value = nir_build_load_global(&b, 4, 32, src_addr, .align_mul = 16);
1496 nir_store_var(&b, value_var, value, 0xf);
1497
1498 nir_ssa_def *instance_offset = nir_isub(&b, offset, nir_load_var(&b, instance_offset_var));
1499 nir_ssa_def *in_instance_bound =
1500 nir_iand(&b, nir_uge(&b, offset, nir_load_var(&b, instance_offset_var)),
1501 nir_ult(&b, instance_offset, instance_bound));
1502 nir_ssa_def *instance_start =
1503 nir_ieq(&b,
1504 nir_iand(&b, instance_offset,
1505 nir_imm_int(&b, sizeof(struct radv_bvh_instance_node) - 1)),
1506 nir_imm_int(&b, 0));
1507
1508 nir_push_if(&b, nir_iand(&b, in_instance_bound, instance_start));
1509 {
1510 nir_ssa_def *instance_id = nir_ushr(&b, instance_offset, nir_imm_int(&b, 7));
1511
1512 nir_push_if(&b, nir_ieq(&b, mode, nir_imm_int(&b, COPY_MODE_SERIALIZE)));
1513 {
1514 nir_ssa_def *instance_addr =
1515 nir_imul(&b, instance_id, nir_imm_int(&b, sizeof(uint64_t)));
1516 instance_addr =
1517 nir_iadd(&b, instance_addr,
1518 nir_imm_int(&b, sizeof(struct radv_accel_struct_serialization_header)));
1519 instance_addr = nir_iadd(&b, dst_base_addr, nir_u2u64(&b, instance_addr));
1520
1521 nir_build_store_global(&b, nir_channels(&b, value, 3), instance_addr,
1522 .align_mul = 8);
1523 }
1524 nir_push_else(&b, NULL);
1525 {
1526 nir_ssa_def *instance_addr =
1527 nir_imul(&b, instance_id, nir_imm_int(&b, sizeof(uint64_t)));
1528 instance_addr =
1529 nir_iadd(&b, instance_addr,
1530 nir_imm_int(&b, sizeof(struct radv_accel_struct_serialization_header)));
1531 instance_addr = nir_iadd(&b, src_base_addr, nir_u2u64(&b, instance_addr));
1532
1533 nir_ssa_def *instance_value =
1534 nir_build_load_global(&b, 2, 32, instance_addr, .align_mul = 8);
1535
1536 nir_ssa_def *values[] = {
1537 nir_channel(&b, instance_value, 0),
1538 nir_channel(&b, instance_value, 1),
1539 nir_channel(&b, value, 2),
1540 nir_channel(&b, value, 3),
1541 };
1542
1543 nir_store_var(&b, value_var, nir_vec(&b, values, 4), 0xf);
1544 }
1545 nir_pop_if(&b, NULL);
1546 }
1547 nir_pop_if(&b, NULL);
1548
1549 nir_store_var(&b, offset_var, nir_iadd(&b, offset, increment), 1);
1550
1551 nir_build_store_global(&b, nir_load_var(&b, value_var), dst_addr, .align_mul = 16);
1552 }
1553 nir_push_else(&b, NULL);
1554 {
1555 nir_jump(&b, nir_jump_break);
1556 }
1557 nir_pop_if(&b, NULL);
1558 }
1559 nir_pop_loop(&b, NULL);
1560 return b.shader;
1561 }
1562
1563 void
radv_device_finish_accel_struct_build_state(struct radv_device * device)1564 radv_device_finish_accel_struct_build_state(struct radv_device *device)
1565 {
1566 struct radv_meta_state *state = &device->meta_state;
1567 radv_DestroyPipeline(radv_device_to_handle(device), state->accel_struct_build.copy_pipeline,
1568 &state->alloc);
1569 radv_DestroyPipeline(radv_device_to_handle(device), state->accel_struct_build.internal_pipeline,
1570 &state->alloc);
1571 radv_DestroyPipeline(radv_device_to_handle(device), state->accel_struct_build.leaf_pipeline,
1572 &state->alloc);
1573 radv_DestroyPipelineLayout(radv_device_to_handle(device),
1574 state->accel_struct_build.copy_p_layout, &state->alloc);
1575 radv_DestroyPipelineLayout(radv_device_to_handle(device),
1576 state->accel_struct_build.internal_p_layout, &state->alloc);
1577 radv_DestroyPipelineLayout(radv_device_to_handle(device),
1578 state->accel_struct_build.leaf_p_layout, &state->alloc);
1579 }
1580
1581 VkResult
radv_device_init_accel_struct_build_state(struct radv_device * device)1582 radv_device_init_accel_struct_build_state(struct radv_device *device)
1583 {
1584 VkResult result;
1585 nir_shader *leaf_cs = build_leaf_shader(device);
1586 nir_shader *internal_cs = build_internal_shader(device);
1587 nir_shader *copy_cs = build_copy_shader(device);
1588
1589 const VkPipelineLayoutCreateInfo leaf_pl_create_info = {
1590 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
1591 .setLayoutCount = 0,
1592 .pushConstantRangeCount = 1,
1593 .pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0,
1594 sizeof(struct build_primitive_constants)},
1595 };
1596
1597 result = radv_CreatePipelineLayout(radv_device_to_handle(device), &leaf_pl_create_info,
1598 &device->meta_state.alloc,
1599 &device->meta_state.accel_struct_build.leaf_p_layout);
1600 if (result != VK_SUCCESS)
1601 goto fail;
1602
1603 VkPipelineShaderStageCreateInfo leaf_shader_stage = {
1604 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
1605 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
1606 .module = vk_shader_module_handle_from_nir(leaf_cs),
1607 .pName = "main",
1608 .pSpecializationInfo = NULL,
1609 };
1610
1611 VkComputePipelineCreateInfo leaf_pipeline_info = {
1612 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
1613 .stage = leaf_shader_stage,
1614 .flags = 0,
1615 .layout = device->meta_state.accel_struct_build.leaf_p_layout,
1616 };
1617
1618 result = radv_CreateComputePipelines(
1619 radv_device_to_handle(device), radv_pipeline_cache_to_handle(&device->meta_state.cache), 1,
1620 &leaf_pipeline_info, NULL, &device->meta_state.accel_struct_build.leaf_pipeline);
1621 if (result != VK_SUCCESS)
1622 goto fail;
1623
1624 const VkPipelineLayoutCreateInfo internal_pl_create_info = {
1625 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
1626 .setLayoutCount = 0,
1627 .pushConstantRangeCount = 1,
1628 .pPushConstantRanges = &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0,
1629 sizeof(struct build_internal_constants)},
1630 };
1631
1632 result = radv_CreatePipelineLayout(radv_device_to_handle(device), &internal_pl_create_info,
1633 &device->meta_state.alloc,
1634 &device->meta_state.accel_struct_build.internal_p_layout);
1635 if (result != VK_SUCCESS)
1636 goto fail;
1637
1638 VkPipelineShaderStageCreateInfo internal_shader_stage = {
1639 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
1640 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
1641 .module = vk_shader_module_handle_from_nir(internal_cs),
1642 .pName = "main",
1643 .pSpecializationInfo = NULL,
1644 };
1645
1646 VkComputePipelineCreateInfo internal_pipeline_info = {
1647 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
1648 .stage = internal_shader_stage,
1649 .flags = 0,
1650 .layout = device->meta_state.accel_struct_build.internal_p_layout,
1651 };
1652
1653 result = radv_CreateComputePipelines(
1654 radv_device_to_handle(device), radv_pipeline_cache_to_handle(&device->meta_state.cache), 1,
1655 &internal_pipeline_info, NULL, &device->meta_state.accel_struct_build.internal_pipeline);
1656 if (result != VK_SUCCESS)
1657 goto fail;
1658
1659 const VkPipelineLayoutCreateInfo copy_pl_create_info = {
1660 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
1661 .setLayoutCount = 0,
1662 .pushConstantRangeCount = 1,
1663 .pPushConstantRanges =
1664 &(VkPushConstantRange){VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(struct copy_constants)},
1665 };
1666
1667 result = radv_CreatePipelineLayout(radv_device_to_handle(device), ©_pl_create_info,
1668 &device->meta_state.alloc,
1669 &device->meta_state.accel_struct_build.copy_p_layout);
1670 if (result != VK_SUCCESS)
1671 goto fail;
1672
1673 VkPipelineShaderStageCreateInfo copy_shader_stage = {
1674 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
1675 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
1676 .module = vk_shader_module_handle_from_nir(copy_cs),
1677 .pName = "main",
1678 .pSpecializationInfo = NULL,
1679 };
1680
1681 VkComputePipelineCreateInfo copy_pipeline_info = {
1682 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
1683 .stage = copy_shader_stage,
1684 .flags = 0,
1685 .layout = device->meta_state.accel_struct_build.copy_p_layout,
1686 };
1687
1688 result = radv_CreateComputePipelines(
1689 radv_device_to_handle(device), radv_pipeline_cache_to_handle(&device->meta_state.cache), 1,
1690 ©_pipeline_info, NULL, &device->meta_state.accel_struct_build.copy_pipeline);
1691 if (result != VK_SUCCESS)
1692 goto fail;
1693
1694 ralloc_free(copy_cs);
1695 ralloc_free(internal_cs);
1696 ralloc_free(leaf_cs);
1697
1698 return VK_SUCCESS;
1699
1700 fail:
1701 radv_device_finish_accel_struct_build_state(device);
1702 ralloc_free(copy_cs);
1703 ralloc_free(internal_cs);
1704 ralloc_free(leaf_cs);
1705 return result;
1706 }
1707
1708 struct bvh_state {
1709 uint32_t node_offset;
1710 uint32_t node_count;
1711 uint32_t scratch_offset;
1712
1713 uint32_t instance_offset;
1714 uint32_t instance_count;
1715 };
1716
1717 VKAPI_ATTR void VKAPI_CALL
radv_CmdBuildAccelerationStructuresKHR(VkCommandBuffer commandBuffer,uint32_t infoCount,const VkAccelerationStructureBuildGeometryInfoKHR * pInfos,const VkAccelerationStructureBuildRangeInfoKHR * const * ppBuildRangeInfos)1718 radv_CmdBuildAccelerationStructuresKHR(
1719 VkCommandBuffer commandBuffer, uint32_t infoCount,
1720 const VkAccelerationStructureBuildGeometryInfoKHR *pInfos,
1721 const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos)
1722 {
1723 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1724 struct radv_meta_saved_state saved_state;
1725
1726 radv_meta_save(
1727 &saved_state, cmd_buffer,
1728 RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_DESCRIPTORS | RADV_META_SAVE_CONSTANTS);
1729 struct bvh_state *bvh_states = calloc(infoCount, sizeof(struct bvh_state));
1730
1731 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE,
1732 cmd_buffer->device->meta_state.accel_struct_build.leaf_pipeline);
1733
1734 for (uint32_t i = 0; i < infoCount; ++i) {
1735 RADV_FROM_HANDLE(radv_acceleration_structure, accel_struct,
1736 pInfos[i].dstAccelerationStructure);
1737
1738 struct build_primitive_constants prim_consts = {
1739 .node_dst_addr = radv_accel_struct_get_va(accel_struct),
1740 .scratch_addr = pInfos[i].scratchData.deviceAddress,
1741 .dst_offset = ALIGN(sizeof(struct radv_accel_struct_header), 64) + 128,
1742 .dst_scratch_offset = 0,
1743 };
1744 bvh_states[i].node_offset = prim_consts.dst_offset;
1745 bvh_states[i].instance_offset = prim_consts.dst_offset;
1746
1747 for (int inst = 1; inst >= 0; --inst) {
1748 for (unsigned j = 0; j < pInfos[i].geometryCount; ++j) {
1749 const VkAccelerationStructureGeometryKHR *geom =
1750 pInfos[i].pGeometries ? &pInfos[i].pGeometries[j] : pInfos[i].ppGeometries[j];
1751
1752 if ((inst && geom->geometryType != VK_GEOMETRY_TYPE_INSTANCES_KHR) ||
1753 (!inst && geom->geometryType == VK_GEOMETRY_TYPE_INSTANCES_KHR))
1754 continue;
1755
1756 prim_consts.geometry_type = geom->geometryType;
1757 prim_consts.geometry_id = j | (geom->flags << 28);
1758 unsigned prim_size;
1759 switch (geom->geometryType) {
1760 case VK_GEOMETRY_TYPE_TRIANGLES_KHR:
1761 prim_consts.vertex_addr =
1762 geom->geometry.triangles.vertexData.deviceAddress +
1763 ppBuildRangeInfos[i][j].firstVertex * geom->geometry.triangles.vertexStride +
1764 (geom->geometry.triangles.indexType != VK_INDEX_TYPE_NONE_KHR
1765 ? ppBuildRangeInfos[i][j].primitiveOffset
1766 : 0);
1767 prim_consts.index_addr = geom->geometry.triangles.indexData.deviceAddress +
1768 ppBuildRangeInfos[i][j].primitiveOffset;
1769 prim_consts.transform_addr = geom->geometry.triangles.transformData.deviceAddress +
1770 ppBuildRangeInfos[i][j].transformOffset;
1771 prim_consts.vertex_stride = geom->geometry.triangles.vertexStride;
1772 prim_consts.vertex_format = geom->geometry.triangles.vertexFormat;
1773 prim_consts.index_format = geom->geometry.triangles.indexType;
1774 prim_size = 64;
1775 break;
1776 case VK_GEOMETRY_TYPE_AABBS_KHR:
1777 prim_consts.aabb_addr =
1778 geom->geometry.aabbs.data.deviceAddress + ppBuildRangeInfos[i][j].primitiveOffset;
1779 prim_consts.aabb_stride = geom->geometry.aabbs.stride;
1780 prim_size = 64;
1781 break;
1782 case VK_GEOMETRY_TYPE_INSTANCES_KHR:
1783 prim_consts.instance_data = geom->geometry.instances.data.deviceAddress;
1784 prim_consts.array_of_pointers = geom->geometry.instances.arrayOfPointers ? 1 : 0;
1785 prim_size = 128;
1786 bvh_states[i].instance_count += ppBuildRangeInfos[i][j].primitiveCount;
1787 break;
1788 default:
1789 unreachable("Unknown geometryType");
1790 }
1791
1792 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
1793 cmd_buffer->device->meta_state.accel_struct_build.leaf_p_layout,
1794 VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(prim_consts),
1795 &prim_consts);
1796 radv_unaligned_dispatch(cmd_buffer, ppBuildRangeInfos[i][j].primitiveCount, 1, 1);
1797 prim_consts.dst_offset += prim_size * ppBuildRangeInfos[i][j].primitiveCount;
1798 prim_consts.dst_scratch_offset += 4 * ppBuildRangeInfos[i][j].primitiveCount;
1799 }
1800 }
1801 bvh_states[i].node_offset = prim_consts.dst_offset;
1802 bvh_states[i].node_count = prim_consts.dst_scratch_offset / 4;
1803 }
1804
1805 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE,
1806 cmd_buffer->device->meta_state.accel_struct_build.internal_pipeline);
1807 bool progress = true;
1808 for (unsigned iter = 0; progress; ++iter) {
1809 progress = false;
1810 for (uint32_t i = 0; i < infoCount; ++i) {
1811 RADV_FROM_HANDLE(radv_acceleration_structure, accel_struct,
1812 pInfos[i].dstAccelerationStructure);
1813
1814 if (iter && bvh_states[i].node_count == 1)
1815 continue;
1816
1817 if (!progress) {
1818 cmd_buffer->state.flush_bits |=
1819 RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
1820 radv_src_access_flush(cmd_buffer, VK_ACCESS_2_SHADER_WRITE_BIT_KHR, NULL) |
1821 radv_dst_access_flush(cmd_buffer,
1822 VK_ACCESS_2_SHADER_READ_BIT_KHR | VK_ACCESS_2_SHADER_WRITE_BIT_KHR, NULL);
1823 }
1824 progress = true;
1825 uint32_t dst_node_count = MAX2(1, DIV_ROUND_UP(bvh_states[i].node_count, 4));
1826 bool final_iter = dst_node_count == 1;
1827 uint32_t src_scratch_offset = bvh_states[i].scratch_offset;
1828 uint32_t dst_scratch_offset = src_scratch_offset ? 0 : bvh_states[i].node_count * 4;
1829 uint32_t dst_node_offset = bvh_states[i].node_offset;
1830 if (final_iter)
1831 dst_node_offset = ALIGN(sizeof(struct radv_accel_struct_header), 64);
1832
1833 const struct build_internal_constants consts = {
1834 .node_dst_addr = radv_accel_struct_get_va(accel_struct),
1835 .scratch_addr = pInfos[i].scratchData.deviceAddress,
1836 .dst_offset = dst_node_offset,
1837 .dst_scratch_offset = dst_scratch_offset,
1838 .src_scratch_offset = src_scratch_offset,
1839 .fill_header = bvh_states[i].node_count | (final_iter ? 0x80000000U : 0),
1840 };
1841
1842 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
1843 cmd_buffer->device->meta_state.accel_struct_build.internal_p_layout,
1844 VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(consts), &consts);
1845 radv_unaligned_dispatch(cmd_buffer, dst_node_count, 1, 1);
1846 if (!final_iter)
1847 bvh_states[i].node_offset += dst_node_count * 128;
1848 bvh_states[i].node_count = dst_node_count;
1849 bvh_states[i].scratch_offset = dst_scratch_offset;
1850 }
1851 }
1852 for (uint32_t i = 0; i < infoCount; ++i) {
1853 RADV_FROM_HANDLE(radv_acceleration_structure, accel_struct,
1854 pInfos[i].dstAccelerationStructure);
1855 const size_t base = offsetof(struct radv_accel_struct_header, compacted_size);
1856 struct radv_accel_struct_header header;
1857
1858 header.instance_offset = bvh_states[i].instance_offset;
1859 header.instance_count = bvh_states[i].instance_count;
1860 header.compacted_size = bvh_states[i].node_offset;
1861
1862 /* 16 bytes per invocation, 64 invocations per workgroup */
1863 header.copy_dispatch_size[0] = DIV_ROUND_UP(header.compacted_size, 16 * 64);
1864 header.copy_dispatch_size[1] = 1;
1865 header.copy_dispatch_size[2] = 1;
1866
1867 header.serialization_size =
1868 header.compacted_size + align(sizeof(struct radv_accel_struct_serialization_header) +
1869 sizeof(uint64_t) * header.instance_count,
1870 128);
1871
1872 radv_update_buffer_cp(cmd_buffer,
1873 radv_buffer_get_va(accel_struct->bo) + accel_struct->mem_offset + base,
1874 (const char *)&header + base, sizeof(header) - base);
1875 }
1876 free(bvh_states);
1877 radv_meta_restore(&saved_state, cmd_buffer);
1878 }
1879
1880 VKAPI_ATTR void VKAPI_CALL
radv_CmdCopyAccelerationStructureKHR(VkCommandBuffer commandBuffer,const VkCopyAccelerationStructureInfoKHR * pInfo)1881 radv_CmdCopyAccelerationStructureKHR(VkCommandBuffer commandBuffer,
1882 const VkCopyAccelerationStructureInfoKHR *pInfo)
1883 {
1884 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
1885 RADV_FROM_HANDLE(radv_acceleration_structure, src, pInfo->src);
1886 RADV_FROM_HANDLE(radv_acceleration_structure, dst, pInfo->dst);
1887 struct radv_meta_saved_state saved_state;
1888
1889 radv_meta_save(
1890 &saved_state, cmd_buffer,
1891 RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_DESCRIPTORS | RADV_META_SAVE_CONSTANTS);
1892
1893 uint64_t src_addr = radv_accel_struct_get_va(src);
1894 uint64_t dst_addr = radv_accel_struct_get_va(dst);
1895
1896 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE,
1897 cmd_buffer->device->meta_state.accel_struct_build.copy_pipeline);
1898
1899 const struct copy_constants consts = {
1900 .src_addr = src_addr,
1901 .dst_addr = dst_addr,
1902 .mode = COPY_MODE_COPY,
1903 };
1904
1905 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
1906 cmd_buffer->device->meta_state.accel_struct_build.copy_p_layout,
1907 VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(consts), &consts);
1908
1909 cmd_buffer->state.flush_bits |=
1910 radv_dst_access_flush(cmd_buffer, VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT_KHR, NULL);
1911
1912 radv_indirect_dispatch(cmd_buffer, src->bo,
1913 src_addr + offsetof(struct radv_accel_struct_header, copy_dispatch_size));
1914 radv_meta_restore(&saved_state, cmd_buffer);
1915 }
1916
1917 VKAPI_ATTR void VKAPI_CALL
radv_GetDeviceAccelerationStructureCompatibilityKHR(VkDevice _device,const VkAccelerationStructureVersionInfoKHR * pVersionInfo,VkAccelerationStructureCompatibilityKHR * pCompatibility)1918 radv_GetDeviceAccelerationStructureCompatibilityKHR(
1919 VkDevice _device, const VkAccelerationStructureVersionInfoKHR *pVersionInfo,
1920 VkAccelerationStructureCompatibilityKHR *pCompatibility)
1921 {
1922 RADV_FROM_HANDLE(radv_device, device, _device);
1923 uint8_t zero[VK_UUID_SIZE] = {
1924 0,
1925 };
1926 bool compat =
1927 memcmp(pVersionInfo->pVersionData, device->physical_device->driver_uuid, VK_UUID_SIZE) == 0 &&
1928 memcmp(pVersionInfo->pVersionData + VK_UUID_SIZE, zero, VK_UUID_SIZE) == 0;
1929 *pCompatibility = compat ? VK_ACCELERATION_STRUCTURE_COMPATIBILITY_COMPATIBLE_KHR
1930 : VK_ACCELERATION_STRUCTURE_COMPATIBILITY_INCOMPATIBLE_KHR;
1931 }
1932
1933 VKAPI_ATTR VkResult VKAPI_CALL
radv_CopyMemoryToAccelerationStructureKHR(VkDevice _device,VkDeferredOperationKHR deferredOperation,const VkCopyMemoryToAccelerationStructureInfoKHR * pInfo)1934 radv_CopyMemoryToAccelerationStructureKHR(VkDevice _device,
1935 VkDeferredOperationKHR deferredOperation,
1936 const VkCopyMemoryToAccelerationStructureInfoKHR *pInfo)
1937 {
1938 RADV_FROM_HANDLE(radv_device, device, _device);
1939 RADV_FROM_HANDLE(radv_acceleration_structure, accel_struct, pInfo->dst);
1940
1941 char *base = device->ws->buffer_map(accel_struct->bo);
1942 if (!base)
1943 return VK_ERROR_OUT_OF_HOST_MEMORY;
1944
1945 base += accel_struct->mem_offset;
1946 const struct radv_accel_struct_header *header = (const struct radv_accel_struct_header *)base;
1947
1948 const char *src = pInfo->src.hostAddress;
1949 struct radv_accel_struct_serialization_header *src_header = (void *)src;
1950 src += sizeof(*src_header) + sizeof(uint64_t) * src_header->instance_count;
1951
1952 memcpy(base, src, src_header->compacted_size);
1953
1954 for (unsigned i = 0; i < src_header->instance_count; ++i) {
1955 uint64_t *p = (uint64_t *)(base + i * 128 + header->instance_offset);
1956 *p = (*p & 63) | src_header->instances[i];
1957 }
1958
1959 device->ws->buffer_unmap(accel_struct->bo);
1960 return VK_SUCCESS;
1961 }
1962
1963 VKAPI_ATTR VkResult VKAPI_CALL
radv_CopyAccelerationStructureToMemoryKHR(VkDevice _device,VkDeferredOperationKHR deferredOperation,const VkCopyAccelerationStructureToMemoryInfoKHR * pInfo)1964 radv_CopyAccelerationStructureToMemoryKHR(VkDevice _device,
1965 VkDeferredOperationKHR deferredOperation,
1966 const VkCopyAccelerationStructureToMemoryInfoKHR *pInfo)
1967 {
1968 RADV_FROM_HANDLE(radv_device, device, _device);
1969 RADV_FROM_HANDLE(radv_acceleration_structure, accel_struct, pInfo->src);
1970
1971 const char *base = device->ws->buffer_map(accel_struct->bo);
1972 if (!base)
1973 return VK_ERROR_OUT_OF_HOST_MEMORY;
1974
1975 base += accel_struct->mem_offset;
1976 const struct radv_accel_struct_header *header = (const struct radv_accel_struct_header *)base;
1977
1978 char *dst = pInfo->dst.hostAddress;
1979 struct radv_accel_struct_serialization_header *dst_header = (void *)dst;
1980 dst += sizeof(*dst_header) + sizeof(uint64_t) * header->instance_count;
1981
1982 memcpy(dst_header->driver_uuid, device->physical_device->driver_uuid, VK_UUID_SIZE);
1983 memset(dst_header->accel_struct_compat, 0, VK_UUID_SIZE);
1984
1985 dst_header->serialization_size = header->serialization_size;
1986 dst_header->compacted_size = header->compacted_size;
1987 dst_header->instance_count = header->instance_count;
1988
1989 memcpy(dst, base, header->compacted_size);
1990
1991 for (unsigned i = 0; i < header->instance_count; ++i) {
1992 dst_header->instances[i] =
1993 *(const uint64_t *)(base + i * 128 + header->instance_offset) & ~63ull;
1994 }
1995
1996 device->ws->buffer_unmap(accel_struct->bo);
1997 return VK_SUCCESS;
1998 }
1999
2000 VKAPI_ATTR void VKAPI_CALL
radv_CmdCopyMemoryToAccelerationStructureKHR(VkCommandBuffer commandBuffer,const VkCopyMemoryToAccelerationStructureInfoKHR * pInfo)2001 radv_CmdCopyMemoryToAccelerationStructureKHR(
2002 VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR *pInfo)
2003 {
2004 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2005 RADV_FROM_HANDLE(radv_acceleration_structure, dst, pInfo->dst);
2006 struct radv_meta_saved_state saved_state;
2007
2008 radv_meta_save(
2009 &saved_state, cmd_buffer,
2010 RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_DESCRIPTORS | RADV_META_SAVE_CONSTANTS);
2011
2012 uint64_t dst_addr = radv_accel_struct_get_va(dst);
2013
2014 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE,
2015 cmd_buffer->device->meta_state.accel_struct_build.copy_pipeline);
2016
2017 const struct copy_constants consts = {
2018 .src_addr = pInfo->src.deviceAddress,
2019 .dst_addr = dst_addr,
2020 .mode = COPY_MODE_DESERIALIZE,
2021 };
2022
2023 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
2024 cmd_buffer->device->meta_state.accel_struct_build.copy_p_layout,
2025 VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(consts), &consts);
2026
2027 radv_CmdDispatch(commandBuffer, 512, 1, 1);
2028 radv_meta_restore(&saved_state, cmd_buffer);
2029 }
2030
2031 VKAPI_ATTR void VKAPI_CALL
radv_CmdCopyAccelerationStructureToMemoryKHR(VkCommandBuffer commandBuffer,const VkCopyAccelerationStructureToMemoryInfoKHR * pInfo)2032 radv_CmdCopyAccelerationStructureToMemoryKHR(
2033 VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR *pInfo)
2034 {
2035 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2036 RADV_FROM_HANDLE(radv_acceleration_structure, src, pInfo->src);
2037 struct radv_meta_saved_state saved_state;
2038
2039 radv_meta_save(
2040 &saved_state, cmd_buffer,
2041 RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_DESCRIPTORS | RADV_META_SAVE_CONSTANTS);
2042
2043 uint64_t src_addr = radv_accel_struct_get_va(src);
2044
2045 radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE,
2046 cmd_buffer->device->meta_state.accel_struct_build.copy_pipeline);
2047
2048 const struct copy_constants consts = {
2049 .src_addr = src_addr,
2050 .dst_addr = pInfo->dst.deviceAddress,
2051 .mode = COPY_MODE_SERIALIZE,
2052 };
2053
2054 radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
2055 cmd_buffer->device->meta_state.accel_struct_build.copy_p_layout,
2056 VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(consts), &consts);
2057
2058 cmd_buffer->state.flush_bits |=
2059 radv_dst_access_flush(cmd_buffer, VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT_KHR, NULL);
2060
2061 radv_indirect_dispatch(cmd_buffer, src->bo,
2062 src_addr + offsetof(struct radv_accel_struct_header, copy_dispatch_size));
2063 radv_meta_restore(&saved_state, cmd_buffer);
2064
2065 /* Set the header of the serialized data. */
2066 uint8_t header_data[2 * VK_UUID_SIZE] = {0};
2067 memcpy(header_data, cmd_buffer->device->physical_device->driver_uuid, VK_UUID_SIZE);
2068
2069 radv_update_buffer_cp(cmd_buffer, pInfo->dst.deviceAddress, header_data, sizeof(header_data));
2070 }
2071
2072 VKAPI_ATTR void VKAPI_CALL
radv_CmdBuildAccelerationStructuresIndirectKHR(VkCommandBuffer commandBuffer,uint32_t infoCount,const VkAccelerationStructureBuildGeometryInfoKHR * pInfos,const VkDeviceAddress * pIndirectDeviceAddresses,const uint32_t * pIndirectStrides,const uint32_t * const * ppMaxPrimitiveCounts)2073 radv_CmdBuildAccelerationStructuresIndirectKHR(
2074 VkCommandBuffer commandBuffer, uint32_t infoCount,
2075 const VkAccelerationStructureBuildGeometryInfoKHR *pInfos,
2076 const VkDeviceAddress *pIndirectDeviceAddresses, const uint32_t *pIndirectStrides,
2077 const uint32_t *const *ppMaxPrimitiveCounts)
2078 {
2079 unreachable("Unimplemented");
2080 }
2081