1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/blob.h"
25 #include "util/hash_table.h"
26 #include "util/debug.h"
27 #include "util/disk_cache.h"
28 #include "util/mesa-sha1.h"
29 #include "nir/nir_serialize.h"
30 #include "anv_private.h"
31 #include "nir/nir_xfb_info.h"
32 #include "vulkan/util/vk_util.h"
33
34 struct anv_shader_bin *
anv_shader_bin_create(struct anv_device * device,gl_shader_stage stage,const void * key_data,uint32_t key_size,const void * kernel_data,uint32_t kernel_size,const struct brw_stage_prog_data * prog_data_in,uint32_t prog_data_size,const struct brw_compile_stats * stats,uint32_t num_stats,const nir_xfb_info * xfb_info_in,const struct anv_pipeline_bind_map * bind_map)35 anv_shader_bin_create(struct anv_device *device,
36 gl_shader_stage stage,
37 const void *key_data, uint32_t key_size,
38 const void *kernel_data, uint32_t kernel_size,
39 const struct brw_stage_prog_data *prog_data_in,
40 uint32_t prog_data_size,
41 const struct brw_compile_stats *stats, uint32_t num_stats,
42 const nir_xfb_info *xfb_info_in,
43 const struct anv_pipeline_bind_map *bind_map)
44 {
45 VK_MULTIALLOC(ma);
46 VK_MULTIALLOC_DECL(&ma, struct anv_shader_bin, shader, 1);
47 VK_MULTIALLOC_DECL_SIZE(&ma, struct anv_shader_bin_key, key,
48 sizeof(*key) + key_size);
49 VK_MULTIALLOC_DECL_SIZE(&ma, struct brw_stage_prog_data, prog_data,
50 prog_data_size);
51 VK_MULTIALLOC_DECL(&ma, struct brw_shader_reloc, prog_data_relocs,
52 prog_data_in->num_relocs);
53 VK_MULTIALLOC_DECL(&ma, uint32_t, prog_data_param, prog_data_in->nr_params);
54
55 VK_MULTIALLOC_DECL_SIZE(&ma, nir_xfb_info, xfb_info,
56 xfb_info_in == NULL ? 0 :
57 nir_xfb_info_size(xfb_info_in->output_count));
58
59 VK_MULTIALLOC_DECL(&ma, struct anv_pipeline_binding, surface_to_descriptor,
60 bind_map->surface_count);
61 VK_MULTIALLOC_DECL(&ma, struct anv_pipeline_binding, sampler_to_descriptor,
62 bind_map->sampler_count);
63
64 if (!vk_multialloc_alloc(&ma, &device->vk.alloc,
65 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
66 return NULL;
67
68 shader->ref_cnt = 1;
69
70 shader->stage = stage;
71
72 key->size = key_size;
73 memcpy(key->data, key_data, key_size);
74 shader->key = key;
75
76 shader->kernel =
77 anv_state_pool_alloc(&device->instruction_state_pool, kernel_size, 64);
78 memcpy(shader->kernel.map, kernel_data, kernel_size);
79 shader->kernel_size = kernel_size;
80
81 uint64_t shader_data_addr = INSTRUCTION_STATE_POOL_MIN_ADDRESS +
82 shader->kernel.offset +
83 prog_data_in->const_data_offset;
84
85 int rv_count = 0;
86 struct brw_shader_reloc_value reloc_values[5];
87 reloc_values[rv_count++] = (struct brw_shader_reloc_value) {
88 .id = BRW_SHADER_RELOC_CONST_DATA_ADDR_LOW,
89 .value = shader_data_addr,
90 };
91 reloc_values[rv_count++] = (struct brw_shader_reloc_value) {
92 .id = BRW_SHADER_RELOC_CONST_DATA_ADDR_HIGH,
93 .value = shader_data_addr >> 32,
94 };
95 reloc_values[rv_count++] = (struct brw_shader_reloc_value) {
96 .id = BRW_SHADER_RELOC_SHADER_START_OFFSET,
97 .value = shader->kernel.offset,
98 };
99 if (brw_shader_stage_is_bindless(stage)) {
100 const struct brw_bs_prog_data *bs_prog_data =
101 brw_bs_prog_data_const(prog_data_in);
102 uint64_t resume_sbt_addr = INSTRUCTION_STATE_POOL_MIN_ADDRESS +
103 shader->kernel.offset +
104 bs_prog_data->resume_sbt_offset;
105 reloc_values[rv_count++] = (struct brw_shader_reloc_value) {
106 .id = BRW_SHADER_RELOC_RESUME_SBT_ADDR_LOW,
107 .value = resume_sbt_addr,
108 };
109 reloc_values[rv_count++] = (struct brw_shader_reloc_value) {
110 .id = BRW_SHADER_RELOC_RESUME_SBT_ADDR_HIGH,
111 .value = resume_sbt_addr >> 32,
112 };
113 }
114
115 brw_write_shader_relocs(&device->info, shader->kernel.map, prog_data_in,
116 reloc_values, rv_count);
117
118 memcpy(prog_data, prog_data_in, prog_data_size);
119 typed_memcpy(prog_data_relocs, prog_data_in->relocs,
120 prog_data_in->num_relocs);
121 prog_data->relocs = prog_data_relocs;
122 memset(prog_data_param, 0,
123 prog_data->nr_params * sizeof(*prog_data_param));
124 prog_data->param = prog_data_param;
125 shader->prog_data = prog_data;
126 shader->prog_data_size = prog_data_size;
127
128 assert(num_stats <= ARRAY_SIZE(shader->stats));
129 typed_memcpy(shader->stats, stats, num_stats);
130 shader->num_stats = num_stats;
131
132 if (xfb_info_in) {
133 *xfb_info = *xfb_info_in;
134 typed_memcpy(xfb_info->outputs, xfb_info_in->outputs,
135 xfb_info_in->output_count);
136 shader->xfb_info = xfb_info;
137 } else {
138 shader->xfb_info = NULL;
139 }
140
141 shader->bind_map = *bind_map;
142 typed_memcpy(surface_to_descriptor, bind_map->surface_to_descriptor,
143 bind_map->surface_count);
144 shader->bind_map.surface_to_descriptor = surface_to_descriptor;
145 typed_memcpy(sampler_to_descriptor, bind_map->sampler_to_descriptor,
146 bind_map->sampler_count);
147 shader->bind_map.sampler_to_descriptor = sampler_to_descriptor;
148
149 return shader;
150 }
151
152 void
anv_shader_bin_destroy(struct anv_device * device,struct anv_shader_bin * shader)153 anv_shader_bin_destroy(struct anv_device *device,
154 struct anv_shader_bin *shader)
155 {
156 assert(shader->ref_cnt == 0);
157 anv_state_pool_free(&device->instruction_state_pool, shader->kernel);
158 vk_free(&device->vk.alloc, shader);
159 }
160
161 static bool
anv_shader_bin_write_to_blob(const struct anv_shader_bin * shader,struct blob * blob)162 anv_shader_bin_write_to_blob(const struct anv_shader_bin *shader,
163 struct blob *blob)
164 {
165 blob_write_uint32(blob, shader->stage);
166
167 blob_write_uint32(blob, shader->key->size);
168 blob_write_bytes(blob, shader->key->data, shader->key->size);
169
170 blob_write_uint32(blob, shader->kernel_size);
171 blob_write_bytes(blob, shader->kernel.map, shader->kernel_size);
172
173 blob_write_uint32(blob, shader->prog_data_size);
174 blob_write_bytes(blob, shader->prog_data, shader->prog_data_size);
175 blob_write_bytes(blob, shader->prog_data->relocs,
176 shader->prog_data->num_relocs *
177 sizeof(shader->prog_data->relocs[0]));
178
179 blob_write_uint32(blob, shader->num_stats);
180 blob_write_bytes(blob, shader->stats,
181 shader->num_stats * sizeof(shader->stats[0]));
182
183 if (shader->xfb_info) {
184 uint32_t xfb_info_size =
185 nir_xfb_info_size(shader->xfb_info->output_count);
186 blob_write_uint32(blob, xfb_info_size);
187 blob_write_bytes(blob, shader->xfb_info, xfb_info_size);
188 } else {
189 blob_write_uint32(blob, 0);
190 }
191
192 blob_write_bytes(blob, shader->bind_map.surface_sha1,
193 sizeof(shader->bind_map.surface_sha1));
194 blob_write_bytes(blob, shader->bind_map.sampler_sha1,
195 sizeof(shader->bind_map.sampler_sha1));
196 blob_write_bytes(blob, shader->bind_map.push_sha1,
197 sizeof(shader->bind_map.push_sha1));
198 blob_write_uint32(blob, shader->bind_map.surface_count);
199 blob_write_uint32(blob, shader->bind_map.sampler_count);
200 blob_write_bytes(blob, shader->bind_map.surface_to_descriptor,
201 shader->bind_map.surface_count *
202 sizeof(*shader->bind_map.surface_to_descriptor));
203 blob_write_bytes(blob, shader->bind_map.sampler_to_descriptor,
204 shader->bind_map.sampler_count *
205 sizeof(*shader->bind_map.sampler_to_descriptor));
206 blob_write_bytes(blob, shader->bind_map.push_ranges,
207 sizeof(shader->bind_map.push_ranges));
208
209 return !blob->out_of_memory;
210 }
211
212 static struct anv_shader_bin *
anv_shader_bin_create_from_blob(struct anv_device * device,struct blob_reader * blob)213 anv_shader_bin_create_from_blob(struct anv_device *device,
214 struct blob_reader *blob)
215 {
216 gl_shader_stage stage = blob_read_uint32(blob);
217
218 uint32_t key_size = blob_read_uint32(blob);
219 const void *key_data = blob_read_bytes(blob, key_size);
220
221 uint32_t kernel_size = blob_read_uint32(blob);
222 const void *kernel_data = blob_read_bytes(blob, kernel_size);
223
224 uint32_t prog_data_size = blob_read_uint32(blob);
225 const void *prog_data_bytes = blob_read_bytes(blob, prog_data_size);
226 if (blob->overrun)
227 return NULL;
228
229 union brw_any_prog_data prog_data;
230 memcpy(&prog_data, prog_data_bytes,
231 MIN2(sizeof(prog_data), prog_data_size));
232 prog_data.base.relocs =
233 blob_read_bytes(blob, prog_data.base.num_relocs *
234 sizeof(prog_data.base.relocs[0]));
235
236 uint32_t num_stats = blob_read_uint32(blob);
237 const struct brw_compile_stats *stats =
238 blob_read_bytes(blob, num_stats * sizeof(stats[0]));
239
240 const nir_xfb_info *xfb_info = NULL;
241 uint32_t xfb_size = blob_read_uint32(blob);
242 if (xfb_size)
243 xfb_info = blob_read_bytes(blob, xfb_size);
244
245 struct anv_pipeline_bind_map bind_map;
246 blob_copy_bytes(blob, bind_map.surface_sha1, sizeof(bind_map.surface_sha1));
247 blob_copy_bytes(blob, bind_map.sampler_sha1, sizeof(bind_map.sampler_sha1));
248 blob_copy_bytes(blob, bind_map.push_sha1, sizeof(bind_map.push_sha1));
249 bind_map.surface_count = blob_read_uint32(blob);
250 bind_map.sampler_count = blob_read_uint32(blob);
251 bind_map.surface_to_descriptor = (void *)
252 blob_read_bytes(blob, bind_map.surface_count *
253 sizeof(*bind_map.surface_to_descriptor));
254 bind_map.sampler_to_descriptor = (void *)
255 blob_read_bytes(blob, bind_map.sampler_count *
256 sizeof(*bind_map.sampler_to_descriptor));
257 blob_copy_bytes(blob, bind_map.push_ranges, sizeof(bind_map.push_ranges));
258
259 if (blob->overrun)
260 return NULL;
261
262 return anv_shader_bin_create(device, stage,
263 key_data, key_size,
264 kernel_data, kernel_size,
265 &prog_data.base, prog_data_size,
266 stats, num_stats, xfb_info, &bind_map);
267 }
268
269 /* Remaining work:
270 *
271 * - Compact binding table layout so it's tight and not dependent on
272 * descriptor set layout.
273 *
274 * - Review prog_data struct for size and cacheability: struct
275 * brw_stage_prog_data has binding_table which uses a lot of uint32_t for 8
276 * bit quantities etc; use bit fields for all bools, eg dual_src_blend.
277 */
278
279 static uint32_t
shader_bin_key_hash_func(const void * void_key)280 shader_bin_key_hash_func(const void *void_key)
281 {
282 const struct anv_shader_bin_key *key = void_key;
283 return _mesa_hash_data(key->data, key->size);
284 }
285
286 static bool
shader_bin_key_compare_func(const void * void_a,const void * void_b)287 shader_bin_key_compare_func(const void *void_a, const void *void_b)
288 {
289 const struct anv_shader_bin_key *a = void_a, *b = void_b;
290 if (a->size != b->size)
291 return false;
292
293 return memcmp(a->data, b->data, a->size) == 0;
294 }
295
296 static uint32_t
sha1_hash_func(const void * sha1)297 sha1_hash_func(const void *sha1)
298 {
299 return _mesa_hash_data(sha1, 20);
300 }
301
302 static bool
sha1_compare_func(const void * sha1_a,const void * sha1_b)303 sha1_compare_func(const void *sha1_a, const void *sha1_b)
304 {
305 return memcmp(sha1_a, sha1_b, 20) == 0;
306 }
307
308 void
anv_pipeline_cache_init(struct anv_pipeline_cache * cache,struct anv_device * device,bool cache_enabled,bool external_sync)309 anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
310 struct anv_device *device,
311 bool cache_enabled,
312 bool external_sync)
313 {
314 vk_object_base_init(&device->vk, &cache->base,
315 VK_OBJECT_TYPE_PIPELINE_CACHE);
316 cache->device = device;
317 cache->external_sync = external_sync;
318 pthread_mutex_init(&cache->mutex, NULL);
319
320 if (cache_enabled) {
321 cache->cache = _mesa_hash_table_create(NULL, shader_bin_key_hash_func,
322 shader_bin_key_compare_func);
323 cache->nir_cache = _mesa_hash_table_create(NULL, sha1_hash_func,
324 sha1_compare_func);
325 } else {
326 cache->cache = NULL;
327 cache->nir_cache = NULL;
328 }
329 }
330
331 void
anv_pipeline_cache_finish(struct anv_pipeline_cache * cache)332 anv_pipeline_cache_finish(struct anv_pipeline_cache *cache)
333 {
334 pthread_mutex_destroy(&cache->mutex);
335
336 if (cache->cache) {
337 /* This is a bit unfortunate. In order to keep things from randomly
338 * going away, the shader cache has to hold a reference to all shader
339 * binaries it contains. We unref them when we destroy the cache.
340 */
341 hash_table_foreach(cache->cache, entry)
342 anv_shader_bin_unref(cache->device, entry->data);
343
344 _mesa_hash_table_destroy(cache->cache, NULL);
345 }
346
347 if (cache->nir_cache) {
348 hash_table_foreach(cache->nir_cache, entry)
349 ralloc_free(entry->data);
350
351 _mesa_hash_table_destroy(cache->nir_cache, NULL);
352 }
353
354 vk_object_base_finish(&cache->base);
355 }
356
357 static struct anv_shader_bin *
anv_pipeline_cache_search_locked(struct anv_pipeline_cache * cache,const void * key_data,uint32_t key_size)358 anv_pipeline_cache_search_locked(struct anv_pipeline_cache *cache,
359 const void *key_data, uint32_t key_size)
360 {
361 uint32_t vla[1 + DIV_ROUND_UP(key_size, sizeof(uint32_t))];
362 struct anv_shader_bin_key *key = (void *)vla;
363 key->size = key_size;
364 memcpy(key->data, key_data, key_size);
365
366 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, key);
367 if (entry)
368 return entry->data;
369 else
370 return NULL;
371 }
372
373 static inline void
anv_cache_lock(struct anv_pipeline_cache * cache)374 anv_cache_lock(struct anv_pipeline_cache *cache)
375 {
376 if (!cache->external_sync)
377 pthread_mutex_lock(&cache->mutex);
378 }
379
380 static inline void
anv_cache_unlock(struct anv_pipeline_cache * cache)381 anv_cache_unlock(struct anv_pipeline_cache *cache)
382 {
383 if (!cache->external_sync)
384 pthread_mutex_unlock(&cache->mutex);
385 }
386
387 struct anv_shader_bin *
anv_pipeline_cache_search(struct anv_pipeline_cache * cache,const void * key_data,uint32_t key_size)388 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
389 const void *key_data, uint32_t key_size)
390 {
391 if (!cache->cache)
392 return NULL;
393
394 anv_cache_lock(cache);
395
396 struct anv_shader_bin *shader =
397 anv_pipeline_cache_search_locked(cache, key_data, key_size);
398
399 anv_cache_unlock(cache);
400
401 /* We increment refcount before handing it to the caller */
402 if (shader)
403 anv_shader_bin_ref(shader);
404
405 return shader;
406 }
407
408 static void
anv_pipeline_cache_add_shader_bin(struct anv_pipeline_cache * cache,struct anv_shader_bin * bin)409 anv_pipeline_cache_add_shader_bin(struct anv_pipeline_cache *cache,
410 struct anv_shader_bin *bin)
411 {
412 if (!cache->cache)
413 return;
414
415 anv_cache_lock(cache);
416
417 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, bin->key);
418 if (entry == NULL) {
419 /* Take a reference for the cache */
420 anv_shader_bin_ref(bin);
421 _mesa_hash_table_insert(cache->cache, bin->key, bin);
422 }
423
424 anv_cache_unlock(cache);
425 }
426
427 static struct anv_shader_bin *
anv_pipeline_cache_add_shader_locked(struct anv_pipeline_cache * cache,gl_shader_stage stage,const void * key_data,uint32_t key_size,const void * kernel_data,uint32_t kernel_size,const struct brw_stage_prog_data * prog_data,uint32_t prog_data_size,const struct brw_compile_stats * stats,uint32_t num_stats,const nir_xfb_info * xfb_info,const struct anv_pipeline_bind_map * bind_map)428 anv_pipeline_cache_add_shader_locked(struct anv_pipeline_cache *cache,
429 gl_shader_stage stage,
430 const void *key_data, uint32_t key_size,
431 const void *kernel_data,
432 uint32_t kernel_size,
433 const struct brw_stage_prog_data *prog_data,
434 uint32_t prog_data_size,
435 const struct brw_compile_stats *stats,
436 uint32_t num_stats,
437 const nir_xfb_info *xfb_info,
438 const struct anv_pipeline_bind_map *bind_map)
439 {
440 struct anv_shader_bin *shader =
441 anv_pipeline_cache_search_locked(cache, key_data, key_size);
442 if (shader)
443 return shader;
444
445 struct anv_shader_bin *bin =
446 anv_shader_bin_create(cache->device, stage,
447 key_data, key_size,
448 kernel_data, kernel_size,
449 prog_data, prog_data_size,
450 stats, num_stats, xfb_info, bind_map);
451 if (!bin)
452 return NULL;
453
454 _mesa_hash_table_insert(cache->cache, bin->key, bin);
455
456 return bin;
457 }
458
459 struct anv_shader_bin *
anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache * cache,gl_shader_stage stage,const void * key_data,uint32_t key_size,const void * kernel_data,uint32_t kernel_size,const struct brw_stage_prog_data * prog_data,uint32_t prog_data_size,const struct brw_compile_stats * stats,uint32_t num_stats,const nir_xfb_info * xfb_info,const struct anv_pipeline_bind_map * bind_map)460 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
461 gl_shader_stage stage,
462 const void *key_data, uint32_t key_size,
463 const void *kernel_data, uint32_t kernel_size,
464 const struct brw_stage_prog_data *prog_data,
465 uint32_t prog_data_size,
466 const struct brw_compile_stats *stats,
467 uint32_t num_stats,
468 const nir_xfb_info *xfb_info,
469 const struct anv_pipeline_bind_map *bind_map)
470 {
471 if (cache->cache) {
472 anv_cache_lock(cache);
473
474 struct anv_shader_bin *bin =
475 anv_pipeline_cache_add_shader_locked(cache, stage, key_data, key_size,
476 kernel_data, kernel_size,
477 prog_data, prog_data_size,
478 stats, num_stats,
479 xfb_info, bind_map);
480
481 anv_cache_unlock(cache);
482
483 /* We increment refcount before handing it to the caller */
484 if (bin)
485 anv_shader_bin_ref(bin);
486
487 return bin;
488 } else {
489 /* In this case, we're not caching it so the caller owns it entirely */
490 return anv_shader_bin_create(cache->device, stage,
491 key_data, key_size,
492 kernel_data, kernel_size,
493 prog_data, prog_data_size,
494 stats, num_stats,
495 xfb_info, bind_map);
496 }
497 }
498
499 static void
anv_pipeline_cache_load(struct anv_pipeline_cache * cache,const void * data,size_t size)500 anv_pipeline_cache_load(struct anv_pipeline_cache *cache,
501 const void *data, size_t size)
502 {
503 struct anv_device *device = cache->device;
504 struct anv_physical_device *pdevice = device->physical;
505
506 if (cache->cache == NULL)
507 return;
508
509 struct blob_reader blob;
510 blob_reader_init(&blob, data, size);
511
512 struct vk_pipeline_cache_header header;
513 blob_copy_bytes(&blob, &header, sizeof(header));
514 uint32_t count = blob_read_uint32(&blob);
515 if (blob.overrun)
516 return;
517
518 if (header.header_size < sizeof(header))
519 return;
520 if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
521 return;
522 if (header.vendor_id != 0x8086)
523 return;
524 if (header.device_id != device->info.chipset_id)
525 return;
526 if (memcmp(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE) != 0)
527 return;
528
529 for (uint32_t i = 0; i < count; i++) {
530 struct anv_shader_bin *bin =
531 anv_shader_bin_create_from_blob(device, &blob);
532 if (!bin)
533 break;
534 _mesa_hash_table_insert(cache->cache, bin->key, bin);
535 }
536 }
537
anv_CreatePipelineCache(VkDevice _device,const VkPipelineCacheCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineCache * pPipelineCache)538 VkResult anv_CreatePipelineCache(
539 VkDevice _device,
540 const VkPipelineCacheCreateInfo* pCreateInfo,
541 const VkAllocationCallbacks* pAllocator,
542 VkPipelineCache* pPipelineCache)
543 {
544 ANV_FROM_HANDLE(anv_device, device, _device);
545 struct anv_pipeline_cache *cache;
546
547 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
548
549 cache = vk_alloc2(&device->vk.alloc, pAllocator,
550 sizeof(*cache), 8,
551 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
552 if (cache == NULL)
553 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
554
555 anv_pipeline_cache_init(cache, device,
556 device->physical->instance->pipeline_cache_enabled,
557 pCreateInfo->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT);
558
559 if (pCreateInfo->initialDataSize > 0)
560 anv_pipeline_cache_load(cache,
561 pCreateInfo->pInitialData,
562 pCreateInfo->initialDataSize);
563
564 *pPipelineCache = anv_pipeline_cache_to_handle(cache);
565
566 return VK_SUCCESS;
567 }
568
anv_DestroyPipelineCache(VkDevice _device,VkPipelineCache _cache,const VkAllocationCallbacks * pAllocator)569 void anv_DestroyPipelineCache(
570 VkDevice _device,
571 VkPipelineCache _cache,
572 const VkAllocationCallbacks* pAllocator)
573 {
574 ANV_FROM_HANDLE(anv_device, device, _device);
575 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
576
577 if (!cache)
578 return;
579
580 anv_pipeline_cache_finish(cache);
581
582 vk_free2(&device->vk.alloc, pAllocator, cache);
583 }
584
anv_GetPipelineCacheData(VkDevice _device,VkPipelineCache _cache,size_t * pDataSize,void * pData)585 VkResult anv_GetPipelineCacheData(
586 VkDevice _device,
587 VkPipelineCache _cache,
588 size_t* pDataSize,
589 void* pData)
590 {
591 ANV_FROM_HANDLE(anv_device, device, _device);
592 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
593
594 struct blob blob;
595 if (pData) {
596 blob_init_fixed(&blob, pData, *pDataSize);
597 } else {
598 blob_init_fixed(&blob, NULL, SIZE_MAX);
599 }
600
601 struct vk_pipeline_cache_header header = {
602 .header_size = sizeof(struct vk_pipeline_cache_header),
603 .header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
604 .vendor_id = 0x8086,
605 .device_id = device->info.chipset_id,
606 };
607 memcpy(header.uuid, device->physical->pipeline_cache_uuid, VK_UUID_SIZE);
608 blob_write_bytes(&blob, &header, sizeof(header));
609
610 uint32_t count = 0;
611 intptr_t count_offset = blob_reserve_uint32(&blob);
612 if (count_offset < 0) {
613 *pDataSize = 0;
614 blob_finish(&blob);
615 return VK_INCOMPLETE;
616 }
617
618 VkResult result = VK_SUCCESS;
619 if (cache->cache) {
620 hash_table_foreach(cache->cache, entry) {
621 struct anv_shader_bin *shader = entry->data;
622
623 size_t save_size = blob.size;
624 if (!anv_shader_bin_write_to_blob(shader, &blob)) {
625 /* If it fails reset to the previous size and bail */
626 blob.size = save_size;
627 result = VK_INCOMPLETE;
628 break;
629 }
630
631 count++;
632 }
633 }
634
635 blob_overwrite_uint32(&blob, count_offset, count);
636
637 *pDataSize = blob.size;
638
639 blob_finish(&blob);
640
641 return result;
642 }
643
anv_MergePipelineCaches(VkDevice _device,VkPipelineCache destCache,uint32_t srcCacheCount,const VkPipelineCache * pSrcCaches)644 VkResult anv_MergePipelineCaches(
645 VkDevice _device,
646 VkPipelineCache destCache,
647 uint32_t srcCacheCount,
648 const VkPipelineCache* pSrcCaches)
649 {
650 ANV_FROM_HANDLE(anv_pipeline_cache, dst, destCache);
651
652 if (!dst->cache)
653 return VK_SUCCESS;
654
655 for (uint32_t i = 0; i < srcCacheCount; i++) {
656 ANV_FROM_HANDLE(anv_pipeline_cache, src, pSrcCaches[i]);
657 if (!src->cache)
658 continue;
659
660 hash_table_foreach(src->cache, entry) {
661 struct anv_shader_bin *bin = entry->data;
662 assert(bin);
663
664 if (_mesa_hash_table_search(dst->cache, bin->key))
665 continue;
666
667 anv_shader_bin_ref(bin);
668 _mesa_hash_table_insert(dst->cache, bin->key, bin);
669 }
670 }
671
672 return VK_SUCCESS;
673 }
674
675 struct anv_shader_bin *
anv_device_search_for_kernel(struct anv_device * device,struct anv_pipeline_cache * cache,const void * key_data,uint32_t key_size,bool * user_cache_hit)676 anv_device_search_for_kernel(struct anv_device *device,
677 struct anv_pipeline_cache *cache,
678 const void *key_data, uint32_t key_size,
679 bool *user_cache_hit)
680 {
681 struct anv_shader_bin *bin;
682
683 *user_cache_hit = false;
684
685 if (cache) {
686 bin = anv_pipeline_cache_search(cache, key_data, key_size);
687 if (bin) {
688 *user_cache_hit = cache != &device->default_pipeline_cache;
689 return bin;
690 }
691 }
692
693 #ifdef ENABLE_SHADER_CACHE
694 struct disk_cache *disk_cache = device->physical->disk_cache;
695 if (disk_cache && device->physical->instance->pipeline_cache_enabled) {
696 cache_key cache_key;
697 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
698
699 size_t buffer_size;
700 uint8_t *buffer = disk_cache_get(disk_cache, cache_key, &buffer_size);
701 if (buffer) {
702 struct blob_reader blob;
703 blob_reader_init(&blob, buffer, buffer_size);
704 bin = anv_shader_bin_create_from_blob(device, &blob);
705 free(buffer);
706
707 if (bin) {
708 if (cache)
709 anv_pipeline_cache_add_shader_bin(cache, bin);
710 return bin;
711 }
712 }
713 }
714 #endif
715
716 return NULL;
717 }
718
719 struct anv_shader_bin *
anv_device_upload_kernel(struct anv_device * device,struct anv_pipeline_cache * cache,gl_shader_stage stage,const void * key_data,uint32_t key_size,const void * kernel_data,uint32_t kernel_size,const struct brw_stage_prog_data * prog_data,uint32_t prog_data_size,const struct brw_compile_stats * stats,uint32_t num_stats,const nir_xfb_info * xfb_info,const struct anv_pipeline_bind_map * bind_map)720 anv_device_upload_kernel(struct anv_device *device,
721 struct anv_pipeline_cache *cache,
722 gl_shader_stage stage,
723 const void *key_data, uint32_t key_size,
724 const void *kernel_data, uint32_t kernel_size,
725 const struct brw_stage_prog_data *prog_data,
726 uint32_t prog_data_size,
727 const struct brw_compile_stats *stats,
728 uint32_t num_stats,
729 const nir_xfb_info *xfb_info,
730 const struct anv_pipeline_bind_map *bind_map)
731 {
732 struct anv_shader_bin *bin;
733 if (cache) {
734 bin = anv_pipeline_cache_upload_kernel(cache, stage, key_data, key_size,
735 kernel_data, kernel_size,
736 prog_data, prog_data_size,
737 stats, num_stats,
738 xfb_info, bind_map);
739 } else {
740 bin = anv_shader_bin_create(device, stage, key_data, key_size,
741 kernel_data, kernel_size,
742 prog_data, prog_data_size,
743 stats, num_stats,
744 xfb_info, bind_map);
745 }
746
747 if (bin == NULL)
748 return NULL;
749
750 #ifdef ENABLE_SHADER_CACHE
751 struct disk_cache *disk_cache = device->physical->disk_cache;
752 if (disk_cache) {
753 struct blob binary;
754 blob_init(&binary);
755 if (anv_shader_bin_write_to_blob(bin, &binary)) {
756 cache_key cache_key;
757 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
758
759 disk_cache_put(disk_cache, cache_key, binary.data, binary.size, NULL);
760 }
761
762 blob_finish(&binary);
763 }
764 #endif
765
766 return bin;
767 }
768
769 struct serialized_nir {
770 unsigned char sha1_key[20];
771 size_t size;
772 char data[0];
773 };
774
775 struct nir_shader *
anv_device_search_for_nir(struct anv_device * device,struct anv_pipeline_cache * cache,const nir_shader_compiler_options * nir_options,unsigned char sha1_key[20],void * mem_ctx)776 anv_device_search_for_nir(struct anv_device *device,
777 struct anv_pipeline_cache *cache,
778 const nir_shader_compiler_options *nir_options,
779 unsigned char sha1_key[20],
780 void *mem_ctx)
781 {
782 if (cache && cache->nir_cache) {
783 const struct serialized_nir *snir = NULL;
784
785 anv_cache_lock(cache);
786 struct hash_entry *entry =
787 _mesa_hash_table_search(cache->nir_cache, sha1_key);
788 if (entry)
789 snir = entry->data;
790 anv_cache_unlock(cache);
791
792 if (snir) {
793 struct blob_reader blob;
794 blob_reader_init(&blob, snir->data, snir->size);
795
796 nir_shader *nir = nir_deserialize(mem_ctx, nir_options, &blob);
797 if (blob.overrun) {
798 ralloc_free(nir);
799 } else {
800 return nir;
801 }
802 }
803 }
804
805 return NULL;
806 }
807
808 void
anv_device_upload_nir(struct anv_device * device,struct anv_pipeline_cache * cache,const struct nir_shader * nir,unsigned char sha1_key[20])809 anv_device_upload_nir(struct anv_device *device,
810 struct anv_pipeline_cache *cache,
811 const struct nir_shader *nir,
812 unsigned char sha1_key[20])
813 {
814 if (cache && cache->nir_cache) {
815 anv_cache_lock(cache);
816 struct hash_entry *entry =
817 _mesa_hash_table_search(cache->nir_cache, sha1_key);
818 anv_cache_unlock(cache);
819 if (entry)
820 return;
821
822 struct blob blob;
823 blob_init(&blob);
824
825 nir_serialize(&blob, nir, false);
826 if (blob.out_of_memory) {
827 blob_finish(&blob);
828 return;
829 }
830
831 anv_cache_lock(cache);
832 /* Because ralloc isn't thread-safe, we have to do all this inside the
833 * lock. We could unlock for the big memcpy but it's probably not worth
834 * the hassle.
835 */
836 entry = _mesa_hash_table_search(cache->nir_cache, sha1_key);
837 if (entry) {
838 blob_finish(&blob);
839 anv_cache_unlock(cache);
840 return;
841 }
842
843 struct serialized_nir *snir =
844 ralloc_size(cache->nir_cache, sizeof(*snir) + blob.size);
845 memcpy(snir->sha1_key, sha1_key, 20);
846 snir->size = blob.size;
847 memcpy(snir->data, blob.data, blob.size);
848
849 blob_finish(&blob);
850
851 _mesa_hash_table_insert(cache->nir_cache, snir->sha1_key, snir);
852
853 anv_cache_unlock(cache);
854 }
855 }
856