1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/debug.h"
25 #include "util/disk_cache.h"
26 #include "util/macros.h"
27 #include "util/mesa-sha1.h"
28 #include "util/u_atomic.h"
29 #include "vulkan/util/vk_util.h"
30 #include "radv_debug.h"
31 #include "radv_private.h"
32 #include "radv_shader.h"
33
34 struct cache_entry {
35 union {
36 unsigned char sha1[20];
37 uint32_t sha1_dw[5];
38 };
39 uint32_t binary_sizes[MESA_SHADER_STAGES];
40 uint32_t num_stack_sizes;
41 struct radv_shader_variant *variants[MESA_SHADER_STAGES];
42 char code[0];
43 };
44
45 static void
radv_pipeline_cache_lock(struct radv_pipeline_cache * cache)46 radv_pipeline_cache_lock(struct radv_pipeline_cache *cache)
47 {
48 if (cache->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT)
49 return;
50
51 mtx_lock(&cache->mutex);
52 }
53
54 static void
radv_pipeline_cache_unlock(struct radv_pipeline_cache * cache)55 radv_pipeline_cache_unlock(struct radv_pipeline_cache *cache)
56 {
57 if (cache->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT)
58 return;
59
60 mtx_unlock(&cache->mutex);
61 }
62
63 void
radv_pipeline_cache_init(struct radv_pipeline_cache * cache,struct radv_device * device)64 radv_pipeline_cache_init(struct radv_pipeline_cache *cache, struct radv_device *device)
65 {
66 vk_object_base_init(&device->vk, &cache->base, VK_OBJECT_TYPE_PIPELINE_CACHE);
67
68 cache->device = device;
69 mtx_init(&cache->mutex, mtx_plain);
70 cache->flags = 0;
71
72 cache->modified = false;
73 cache->kernel_count = 0;
74 cache->total_size = 0;
75 cache->table_size = 1024;
76 const size_t byte_size = cache->table_size * sizeof(cache->hash_table[0]);
77 cache->hash_table = malloc(byte_size);
78
79 /* We don't consider allocation failure fatal, we just start with a 0-sized
80 * cache. Disable caching when we want to keep shader debug info, since
81 * we don't get the debug info on cached shaders. */
82 if (cache->hash_table == NULL || (device->instance->debug_flags & RADV_DEBUG_NO_CACHE))
83 cache->table_size = 0;
84 else
85 memset(cache->hash_table, 0, byte_size);
86 }
87
88 void
radv_pipeline_cache_finish(struct radv_pipeline_cache * cache)89 radv_pipeline_cache_finish(struct radv_pipeline_cache *cache)
90 {
91 for (unsigned i = 0; i < cache->table_size; ++i)
92 if (cache->hash_table[i]) {
93 for (int j = 0; j < MESA_SHADER_STAGES; ++j) {
94 if (cache->hash_table[i]->variants[j])
95 radv_shader_variant_destroy(cache->device, cache->hash_table[i]->variants[j]);
96 }
97 vk_free(&cache->alloc, cache->hash_table[i]);
98 }
99 mtx_destroy(&cache->mutex);
100 free(cache->hash_table);
101
102 vk_object_base_finish(&cache->base);
103 }
104
105 static uint32_t
entry_size(struct cache_entry * entry)106 entry_size(struct cache_entry *entry)
107 {
108 size_t ret = sizeof(*entry);
109 for (int i = 0; i < MESA_SHADER_STAGES; ++i)
110 if (entry->binary_sizes[i])
111 ret += entry->binary_sizes[i];
112 ret += sizeof(struct radv_pipeline_shader_stack_size) * entry->num_stack_sizes;
113 ret = align(ret, alignof(struct cache_entry));
114 return ret;
115 }
116
117 void
radv_hash_shaders(unsigned char * hash,const VkPipelineShaderStageCreateInfo ** stages,const struct radv_pipeline_layout * layout,const struct radv_pipeline_key * key,uint32_t flags)118 radv_hash_shaders(unsigned char *hash, const VkPipelineShaderStageCreateInfo **stages,
119 const struct radv_pipeline_layout *layout, const struct radv_pipeline_key *key,
120 uint32_t flags)
121 {
122 struct mesa_sha1 ctx;
123
124 _mesa_sha1_init(&ctx);
125 if (key)
126 _mesa_sha1_update(&ctx, key, sizeof(*key));
127 if (layout)
128 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
129
130 for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
131 if (stages[i]) {
132 RADV_FROM_HANDLE(vk_shader_module, module, stages[i]->module);
133 const VkSpecializationInfo *spec_info = stages[i]->pSpecializationInfo;
134
135 _mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
136 _mesa_sha1_update(&ctx, stages[i]->pName, strlen(stages[i]->pName));
137 if (spec_info && spec_info->mapEntryCount) {
138 _mesa_sha1_update(&ctx, spec_info->pMapEntries,
139 spec_info->mapEntryCount * sizeof spec_info->pMapEntries[0]);
140 _mesa_sha1_update(&ctx, spec_info->pData, spec_info->dataSize);
141 }
142 }
143 }
144 _mesa_sha1_update(&ctx, &flags, 4);
145 _mesa_sha1_final(&ctx, hash);
146 }
147
148 void
radv_hash_rt_shaders(unsigned char * hash,const VkRayTracingPipelineCreateInfoKHR * pCreateInfo,uint32_t flags)149 radv_hash_rt_shaders(unsigned char *hash, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
150 uint32_t flags)
151 {
152 RADV_FROM_HANDLE(radv_pipeline_layout, layout, pCreateInfo->layout);
153 struct mesa_sha1 ctx;
154
155 _mesa_sha1_init(&ctx);
156 if (layout)
157 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
158
159 for (uint32_t i = 0; i < pCreateInfo->stageCount; ++i) {
160 RADV_FROM_HANDLE(vk_shader_module, module, pCreateInfo->pStages[i].module);
161 const VkSpecializationInfo *spec_info = pCreateInfo->pStages[i].pSpecializationInfo;
162
163 _mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
164 _mesa_sha1_update(&ctx, pCreateInfo->pStages[i].pName, strlen(pCreateInfo->pStages[i].pName));
165 if (spec_info && spec_info->mapEntryCount) {
166 _mesa_sha1_update(&ctx, spec_info->pMapEntries,
167 spec_info->mapEntryCount * sizeof spec_info->pMapEntries[0]);
168 _mesa_sha1_update(&ctx, spec_info->pData, spec_info->dataSize);
169 }
170 }
171
172 _mesa_sha1_update(&ctx, pCreateInfo->pGroups,
173 pCreateInfo->groupCount * sizeof(*pCreateInfo->pGroups));
174
175 if (!radv_rt_pipeline_has_dynamic_stack_size(pCreateInfo))
176 _mesa_sha1_update(&ctx, &pCreateInfo->maxPipelineRayRecursionDepth, 4);
177 _mesa_sha1_update(&ctx, &flags, 4);
178 _mesa_sha1_final(&ctx, hash);
179 }
180
181 static struct cache_entry *
radv_pipeline_cache_search_unlocked(struct radv_pipeline_cache * cache,const unsigned char * sha1)182 radv_pipeline_cache_search_unlocked(struct radv_pipeline_cache *cache, const unsigned char *sha1)
183 {
184 const uint32_t mask = cache->table_size - 1;
185 const uint32_t start = (*(uint32_t *)sha1);
186
187 if (cache->table_size == 0)
188 return NULL;
189
190 for (uint32_t i = 0; i < cache->table_size; i++) {
191 const uint32_t index = (start + i) & mask;
192 struct cache_entry *entry = cache->hash_table[index];
193
194 if (!entry)
195 return NULL;
196
197 if (memcmp(entry->sha1, sha1, sizeof(entry->sha1)) == 0) {
198 return entry;
199 }
200 }
201
202 unreachable("hash table should never be full");
203 }
204
205 static struct cache_entry *
radv_pipeline_cache_search(struct radv_pipeline_cache * cache,const unsigned char * sha1)206 radv_pipeline_cache_search(struct radv_pipeline_cache *cache, const unsigned char *sha1)
207 {
208 struct cache_entry *entry;
209
210 radv_pipeline_cache_lock(cache);
211
212 entry = radv_pipeline_cache_search_unlocked(cache, sha1);
213
214 radv_pipeline_cache_unlock(cache);
215
216 return entry;
217 }
218
219 static void
radv_pipeline_cache_set_entry(struct radv_pipeline_cache * cache,struct cache_entry * entry)220 radv_pipeline_cache_set_entry(struct radv_pipeline_cache *cache, struct cache_entry *entry)
221 {
222 const uint32_t mask = cache->table_size - 1;
223 const uint32_t start = entry->sha1_dw[0];
224
225 /* We'll always be able to insert when we get here. */
226 assert(cache->kernel_count < cache->table_size / 2);
227
228 for (uint32_t i = 0; i < cache->table_size; i++) {
229 const uint32_t index = (start + i) & mask;
230 if (!cache->hash_table[index]) {
231 cache->hash_table[index] = entry;
232 break;
233 }
234 }
235
236 cache->total_size += entry_size(entry);
237 cache->kernel_count++;
238 }
239
240 static VkResult
radv_pipeline_cache_grow(struct radv_pipeline_cache * cache)241 radv_pipeline_cache_grow(struct radv_pipeline_cache *cache)
242 {
243 const uint32_t table_size = cache->table_size * 2;
244 const uint32_t old_table_size = cache->table_size;
245 const size_t byte_size = table_size * sizeof(cache->hash_table[0]);
246 struct cache_entry **table;
247 struct cache_entry **old_table = cache->hash_table;
248
249 table = malloc(byte_size);
250 if (table == NULL)
251 return vk_error(cache, VK_ERROR_OUT_OF_HOST_MEMORY);
252
253 cache->hash_table = table;
254 cache->table_size = table_size;
255 cache->kernel_count = 0;
256 cache->total_size = 0;
257
258 memset(cache->hash_table, 0, byte_size);
259 for (uint32_t i = 0; i < old_table_size; i++) {
260 struct cache_entry *entry = old_table[i];
261 if (!entry)
262 continue;
263
264 radv_pipeline_cache_set_entry(cache, entry);
265 }
266
267 free(old_table);
268
269 return VK_SUCCESS;
270 }
271
272 static void
radv_pipeline_cache_add_entry(struct radv_pipeline_cache * cache,struct cache_entry * entry)273 radv_pipeline_cache_add_entry(struct radv_pipeline_cache *cache, struct cache_entry *entry)
274 {
275 if (cache->kernel_count == cache->table_size / 2)
276 radv_pipeline_cache_grow(cache);
277
278 /* Failing to grow that hash table isn't fatal, but may mean we don't
279 * have enough space to add this new kernel. Only add it if there's room.
280 */
281 if (cache->kernel_count < cache->table_size / 2)
282 radv_pipeline_cache_set_entry(cache, entry);
283 }
284
285 static bool
radv_is_cache_disabled(struct radv_device * device)286 radv_is_cache_disabled(struct radv_device *device)
287 {
288 /* Pipeline caches can be disabled with RADV_DEBUG=nocache, with
289 * MESA_GLSL_CACHE_DISABLE=1, and when VK_AMD_shader_info is requested.
290 */
291 return (device->instance->debug_flags & RADV_DEBUG_NO_CACHE);
292 }
293
294 bool
radv_create_shader_variants_from_pipeline_cache(struct radv_device * device,struct radv_pipeline_cache * cache,const unsigned char * sha1,struct radv_shader_variant ** variants,struct radv_pipeline_shader_stack_size ** stack_sizes,uint32_t * num_stack_sizes,bool * found_in_application_cache)295 radv_create_shader_variants_from_pipeline_cache(
296 struct radv_device *device, struct radv_pipeline_cache *cache, const unsigned char *sha1,
297 struct radv_shader_variant **variants, struct radv_pipeline_shader_stack_size **stack_sizes,
298 uint32_t *num_stack_sizes, bool *found_in_application_cache)
299 {
300 struct cache_entry *entry;
301
302 if (!cache) {
303 cache = device->mem_cache;
304 *found_in_application_cache = false;
305 }
306
307 radv_pipeline_cache_lock(cache);
308
309 entry = radv_pipeline_cache_search_unlocked(cache, sha1);
310
311 if (!entry) {
312 *found_in_application_cache = false;
313
314 /* Don't cache when we want debug info, since this isn't
315 * present in the cache.
316 */
317 if (radv_is_cache_disabled(device) || !device->physical_device->disk_cache) {
318 radv_pipeline_cache_unlock(cache);
319 return false;
320 }
321
322 uint8_t disk_sha1[20];
323 disk_cache_compute_key(device->physical_device->disk_cache, sha1, 20, disk_sha1);
324
325 entry =
326 (struct cache_entry *)disk_cache_get(device->physical_device->disk_cache, disk_sha1, NULL);
327 if (!entry) {
328 radv_pipeline_cache_unlock(cache);
329 return false;
330 } else {
331 size_t size = entry_size(entry);
332 struct cache_entry *new_entry =
333 vk_alloc(&cache->alloc, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
334 if (!new_entry) {
335 free(entry);
336 radv_pipeline_cache_unlock(cache);
337 return false;
338 }
339
340 memcpy(new_entry, entry, entry_size(entry));
341 free(entry);
342 entry = new_entry;
343
344 if (!(device->instance->debug_flags & RADV_DEBUG_NO_MEMORY_CACHE) ||
345 cache != device->mem_cache)
346 radv_pipeline_cache_add_entry(cache, new_entry);
347 }
348 }
349
350 char *p = entry->code;
351 for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
352 if (!entry->variants[i] && entry->binary_sizes[i]) {
353 struct radv_shader_binary *binary = calloc(1, entry->binary_sizes[i]);
354 memcpy(binary, p, entry->binary_sizes[i]);
355 p += entry->binary_sizes[i];
356
357 entry->variants[i] = radv_shader_variant_create(device, binary, false, true);
358 free(binary);
359 } else if (entry->binary_sizes[i]) {
360 p += entry->binary_sizes[i];
361 }
362 }
363
364 memcpy(variants, entry->variants, sizeof(entry->variants));
365
366 if (num_stack_sizes) {
367 *num_stack_sizes = entry->num_stack_sizes;
368 if (entry->num_stack_sizes) {
369 *stack_sizes = malloc(entry->num_stack_sizes * sizeof(**stack_sizes));
370 memcpy(*stack_sizes, p, entry->num_stack_sizes * sizeof(**stack_sizes));
371 }
372 }
373
374 if (device->instance->debug_flags & RADV_DEBUG_NO_MEMORY_CACHE && cache == device->mem_cache)
375 vk_free(&cache->alloc, entry);
376 else {
377 for (int i = 0; i < MESA_SHADER_STAGES; ++i)
378 if (entry->variants[i])
379 p_atomic_inc(&entry->variants[i]->ref_count);
380 }
381
382 radv_pipeline_cache_unlock(cache);
383 return true;
384 }
385
386 void
radv_pipeline_cache_insert_shaders(struct radv_device * device,struct radv_pipeline_cache * cache,const unsigned char * sha1,struct radv_shader_variant ** variants,struct radv_shader_binary * const * binaries,const struct radv_pipeline_shader_stack_size * stack_sizes,uint32_t num_stack_sizes)387 radv_pipeline_cache_insert_shaders(struct radv_device *device, struct radv_pipeline_cache *cache,
388 const unsigned char *sha1, struct radv_shader_variant **variants,
389 struct radv_shader_binary *const *binaries,
390 const struct radv_pipeline_shader_stack_size *stack_sizes,
391 uint32_t num_stack_sizes)
392 {
393 if (!cache)
394 cache = device->mem_cache;
395
396 radv_pipeline_cache_lock(cache);
397 struct cache_entry *entry = radv_pipeline_cache_search_unlocked(cache, sha1);
398 if (entry) {
399 for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
400 if (entry->variants[i]) {
401 radv_shader_variant_destroy(cache->device, variants[i]);
402 variants[i] = entry->variants[i];
403 } else {
404 entry->variants[i] = variants[i];
405 }
406 if (variants[i])
407 p_atomic_inc(&variants[i]->ref_count);
408 }
409 radv_pipeline_cache_unlock(cache);
410 return;
411 }
412
413 /* Don't cache when we want debug info, since this isn't
414 * present in the cache.
415 */
416 if (radv_is_cache_disabled(device)) {
417 radv_pipeline_cache_unlock(cache);
418 return;
419 }
420
421 size_t size = sizeof(*entry) + sizeof(*stack_sizes) * num_stack_sizes;
422 for (int i = 0; i < MESA_SHADER_STAGES; ++i)
423 if (variants[i])
424 size += binaries[i]->total_size;
425 const size_t size_without_align = size;
426 size = align(size_without_align, alignof(struct cache_entry));
427
428 entry = vk_alloc(&cache->alloc, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
429 if (!entry) {
430 radv_pipeline_cache_unlock(cache);
431 return;
432 }
433
434 memset(entry, 0, sizeof(*entry));
435 memcpy(entry->sha1, sha1, 20);
436
437 char *p = entry->code;
438
439 for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
440 if (!variants[i])
441 continue;
442
443 entry->binary_sizes[i] = binaries[i]->total_size;
444
445 memcpy(p, binaries[i], binaries[i]->total_size);
446 p += binaries[i]->total_size;
447 }
448
449 if (num_stack_sizes) {
450 memcpy(p, stack_sizes, sizeof(*stack_sizes) * num_stack_sizes);
451 p += sizeof(*stack_sizes) * num_stack_sizes;
452 }
453 entry->num_stack_sizes = num_stack_sizes;
454
455 // Make valgrind happy by filling the alignment hole at the end.
456 assert(p == (char *)entry + size_without_align);
457 assert(sizeof(*entry) + (p - entry->code) == size_without_align);
458 memset((char *)entry + size_without_align, 0, size - size_without_align);
459
460 /* Always add cache items to disk. This will allow collection of
461 * compiled shaders by third parties such as steam, even if the app
462 * implements its own pipeline cache.
463 *
464 * Make sure to exclude meta shaders because they are stored in a different cache file.
465 */
466 if (device->physical_device->disk_cache && cache != &device->meta_state.cache) {
467 uint8_t disk_sha1[20];
468 disk_cache_compute_key(device->physical_device->disk_cache, sha1, 20, disk_sha1);
469
470 disk_cache_put(device->physical_device->disk_cache, disk_sha1, entry, entry_size(entry),
471 NULL);
472 }
473
474 if (device->instance->debug_flags & RADV_DEBUG_NO_MEMORY_CACHE && cache == device->mem_cache) {
475 vk_free2(&cache->alloc, NULL, entry);
476 radv_pipeline_cache_unlock(cache);
477 return;
478 }
479
480 /* We delay setting the variant so we have reproducible disk cache
481 * items.
482 */
483 for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
484 if (!variants[i])
485 continue;
486
487 entry->variants[i] = variants[i];
488 p_atomic_inc(&variants[i]->ref_count);
489 }
490
491 radv_pipeline_cache_add_entry(cache, entry);
492
493 cache->modified = true;
494 radv_pipeline_cache_unlock(cache);
495 return;
496 }
497
498 bool
radv_pipeline_cache_load(struct radv_pipeline_cache * cache,const void * data,size_t size)499 radv_pipeline_cache_load(struct radv_pipeline_cache *cache, const void *data, size_t size)
500 {
501 struct radv_device *device = cache->device;
502 struct vk_pipeline_cache_header header;
503
504 if (size < sizeof(header))
505 return false;
506 memcpy(&header, data, sizeof(header));
507 if (header.header_size < sizeof(header))
508 return false;
509 if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
510 return false;
511 if (header.vendor_id != ATI_VENDOR_ID)
512 return false;
513 if (header.device_id != device->physical_device->rad_info.pci_id)
514 return false;
515 if (memcmp(header.uuid, device->physical_device->cache_uuid, VK_UUID_SIZE) != 0)
516 return false;
517
518 char *end = (char *)data + size;
519 char *p = (char *)data + header.header_size;
520
521 while (end - p >= sizeof(struct cache_entry)) {
522 struct cache_entry *entry = (struct cache_entry *)p;
523 struct cache_entry *dest_entry;
524 size_t size_of_entry = entry_size(entry);
525 if (end - p < size_of_entry)
526 break;
527
528 dest_entry = vk_alloc(&cache->alloc, size_of_entry, 8, VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
529 if (dest_entry) {
530 memcpy(dest_entry, entry, size_of_entry);
531 for (int i = 0; i < MESA_SHADER_STAGES; ++i)
532 dest_entry->variants[i] = NULL;
533 radv_pipeline_cache_add_entry(cache, dest_entry);
534 }
535 p += size_of_entry;
536 }
537
538 return true;
539 }
540
541 VkResult
radv_CreatePipelineCache(VkDevice _device,const VkPipelineCacheCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineCache * pPipelineCache)542 radv_CreatePipelineCache(VkDevice _device, const VkPipelineCacheCreateInfo *pCreateInfo,
543 const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache)
544 {
545 RADV_FROM_HANDLE(radv_device, device, _device);
546 struct radv_pipeline_cache *cache;
547
548 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
549
550 cache = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*cache), 8,
551 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
552 if (cache == NULL)
553 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
554
555 if (pAllocator)
556 cache->alloc = *pAllocator;
557 else
558 cache->alloc = device->vk.alloc;
559
560 radv_pipeline_cache_init(cache, device);
561 cache->flags = pCreateInfo->flags;
562
563 if (pCreateInfo->initialDataSize > 0) {
564 radv_pipeline_cache_load(cache, pCreateInfo->pInitialData, pCreateInfo->initialDataSize);
565 }
566
567 *pPipelineCache = radv_pipeline_cache_to_handle(cache);
568
569 return VK_SUCCESS;
570 }
571
572 void
radv_DestroyPipelineCache(VkDevice _device,VkPipelineCache _cache,const VkAllocationCallbacks * pAllocator)573 radv_DestroyPipelineCache(VkDevice _device, VkPipelineCache _cache,
574 const VkAllocationCallbacks *pAllocator)
575 {
576 RADV_FROM_HANDLE(radv_device, device, _device);
577 RADV_FROM_HANDLE(radv_pipeline_cache, cache, _cache);
578
579 if (!cache)
580 return;
581
582 radv_pipeline_cache_finish(cache);
583 vk_free2(&device->vk.alloc, pAllocator, cache);
584 }
585
586 VkResult
radv_GetPipelineCacheData(VkDevice _device,VkPipelineCache _cache,size_t * pDataSize,void * pData)587 radv_GetPipelineCacheData(VkDevice _device, VkPipelineCache _cache, size_t *pDataSize, void *pData)
588 {
589 RADV_FROM_HANDLE(radv_device, device, _device);
590 RADV_FROM_HANDLE(radv_pipeline_cache, cache, _cache);
591 struct vk_pipeline_cache_header *header;
592 VkResult result = VK_SUCCESS;
593
594 radv_pipeline_cache_lock(cache);
595
596 const size_t size = sizeof(*header) + cache->total_size;
597 if (pData == NULL) {
598 radv_pipeline_cache_unlock(cache);
599 *pDataSize = size;
600 return VK_SUCCESS;
601 }
602 if (*pDataSize < sizeof(*header)) {
603 radv_pipeline_cache_unlock(cache);
604 *pDataSize = 0;
605 return VK_INCOMPLETE;
606 }
607 void *p = pData, *end = (char *)pData + *pDataSize;
608 header = p;
609 header->header_size = align(sizeof(*header), alignof(struct cache_entry));
610 header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
611 header->vendor_id = ATI_VENDOR_ID;
612 header->device_id = device->physical_device->rad_info.pci_id;
613 memcpy(header->uuid, device->physical_device->cache_uuid, VK_UUID_SIZE);
614 p = (char *)p + header->header_size;
615
616 struct cache_entry *entry;
617 for (uint32_t i = 0; i < cache->table_size; i++) {
618 if (!cache->hash_table[i])
619 continue;
620 entry = cache->hash_table[i];
621 const uint32_t size_of_entry = entry_size(entry);
622 if ((char *)end < (char *)p + size_of_entry) {
623 result = VK_INCOMPLETE;
624 break;
625 }
626
627 memcpy(p, entry, size_of_entry);
628 for (int j = 0; j < MESA_SHADER_STAGES; ++j)
629 ((struct cache_entry *)p)->variants[j] = NULL;
630 p = (char *)p + size_of_entry;
631 }
632 *pDataSize = (char *)p - (char *)pData;
633
634 radv_pipeline_cache_unlock(cache);
635 return result;
636 }
637
638 static void
radv_pipeline_cache_merge(struct radv_pipeline_cache * dst,struct radv_pipeline_cache * src)639 radv_pipeline_cache_merge(struct radv_pipeline_cache *dst, struct radv_pipeline_cache *src)
640 {
641 for (uint32_t i = 0; i < src->table_size; i++) {
642 struct cache_entry *entry = src->hash_table[i];
643 if (!entry || radv_pipeline_cache_search(dst, entry->sha1))
644 continue;
645
646 radv_pipeline_cache_add_entry(dst, entry);
647
648 src->hash_table[i] = NULL;
649 }
650 }
651
652 VkResult
radv_MergePipelineCaches(VkDevice _device,VkPipelineCache destCache,uint32_t srcCacheCount,const VkPipelineCache * pSrcCaches)653 radv_MergePipelineCaches(VkDevice _device, VkPipelineCache destCache, uint32_t srcCacheCount,
654 const VkPipelineCache *pSrcCaches)
655 {
656 RADV_FROM_HANDLE(radv_pipeline_cache, dst, destCache);
657
658 for (uint32_t i = 0; i < srcCacheCount; i++) {
659 RADV_FROM_HANDLE(radv_pipeline_cache, src, pSrcCaches[i]);
660
661 radv_pipeline_cache_merge(dst, src);
662 }
663
664 return VK_SUCCESS;
665 }
666