1 /*
2  * Copyright 2019 Google LLC
3  * SPDX-License-Identifier: MIT
4  *
5  * based in part on anv and radv which are:
6  * Copyright © 2015 Intel Corporation
7  * Copyright © 2016 Red Hat.
8  * Copyright © 2016 Bas Nieuwenhuizen
9  */
10 
11 #include "vn_descriptor_set.h"
12 
13 #include "venus-protocol/vn_protocol_driver_descriptor_pool.h"
14 #include "venus-protocol/vn_protocol_driver_descriptor_set.h"
15 #include "venus-protocol/vn_protocol_driver_descriptor_set_layout.h"
16 #include "venus-protocol/vn_protocol_driver_descriptor_update_template.h"
17 
18 #include "vn_device.h"
19 
20 void
vn_descriptor_set_layout_destroy(struct vn_device * dev,struct vn_descriptor_set_layout * layout)21 vn_descriptor_set_layout_destroy(struct vn_device *dev,
22                                  struct vn_descriptor_set_layout *layout)
23 {
24    VkDevice dev_handle = vn_device_to_handle(dev);
25    VkDescriptorSetLayout layout_handle =
26       vn_descriptor_set_layout_to_handle(layout);
27    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
28 
29    vn_async_vkDestroyDescriptorSetLayout(dev->instance, dev_handle,
30                                          layout_handle, NULL);
31 
32    vn_object_base_fini(&layout->base);
33    vk_free(alloc, layout);
34 }
35 
36 static void
vn_descriptor_set_destroy(struct vn_device * dev,struct vn_descriptor_set * set,const VkAllocationCallbacks * alloc)37 vn_descriptor_set_destroy(struct vn_device *dev,
38                           struct vn_descriptor_set *set,
39                           const VkAllocationCallbacks *alloc)
40 {
41    list_del(&set->head);
42 
43    vn_descriptor_set_layout_unref(dev, set->layout);
44 
45    vn_object_base_fini(&set->base);
46    vk_free(alloc, set);
47 }
48 
49 /* descriptor set layout commands */
50 
51 void
vn_GetDescriptorSetLayoutSupport(VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,VkDescriptorSetLayoutSupport * pSupport)52 vn_GetDescriptorSetLayoutSupport(
53    VkDevice device,
54    const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
55    VkDescriptorSetLayoutSupport *pSupport)
56 {
57    struct vn_device *dev = vn_device_from_handle(device);
58 
59    /* TODO per-device cache */
60    vn_call_vkGetDescriptorSetLayoutSupport(dev->instance, device, pCreateInfo,
61                                            pSupport);
62 }
63 
64 static void
vn_descriptor_set_layout_init(struct vn_device * dev,const VkDescriptorSetLayoutCreateInfo * create_info,uint32_t last_binding,struct vn_descriptor_set_layout * layout)65 vn_descriptor_set_layout_init(
66    struct vn_device *dev,
67    const VkDescriptorSetLayoutCreateInfo *create_info,
68    uint32_t last_binding,
69    struct vn_descriptor_set_layout *layout)
70 {
71    VkDevice dev_handle = vn_device_to_handle(dev);
72    VkDescriptorSetLayout layout_handle =
73       vn_descriptor_set_layout_to_handle(layout);
74    const VkDescriptorSetLayoutBindingFlagsCreateInfo *binding_flags =
75       vk_find_struct_const(create_info->pNext,
76                            DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO);
77 
78    /* 14.2.1. Descriptor Set Layout
79     *
80     * If bindingCount is zero or if this structure is not included in
81     * the pNext chain, the VkDescriptorBindingFlags for each descriptor
82     * set layout binding is considered to be zero.
83     */
84    if (binding_flags && !binding_flags->bindingCount)
85       binding_flags = NULL;
86 
87    layout->refcount = VN_REFCOUNT_INIT(1);
88    layout->last_binding = last_binding;
89 
90    for (uint32_t i = 0; i < create_info->bindingCount; i++) {
91       const VkDescriptorSetLayoutBinding *binding_info =
92          &create_info->pBindings[i];
93       struct vn_descriptor_set_layout_binding *binding =
94          &layout->bindings[binding_info->binding];
95 
96       if (binding_info->binding == last_binding) {
97          /* 14.2.1. Descriptor Set Layout
98           *
99           * VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT must only be
100           * used for the last binding in the descriptor set layout (i.e. the
101           * binding with the largest value of binding).
102           *
103           * 41. Features
104           *
105           * descriptorBindingVariableDescriptorCount indicates whether the
106           * implementation supports descriptor sets with a variable-sized last
107           * binding. If this feature is not enabled,
108           * VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT must not be
109           * used.
110           */
111          layout->has_variable_descriptor_count =
112             binding_flags &&
113             (binding_flags->pBindingFlags[i] &
114              VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT);
115       }
116 
117       binding->type = binding_info->descriptorType;
118       binding->count = binding_info->descriptorCount;
119 
120       switch (binding_info->descriptorType) {
121       case VK_DESCRIPTOR_TYPE_SAMPLER:
122       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
123          binding->has_immutable_samplers = binding_info->pImmutableSamplers;
124          break;
125       default:
126          break;
127       }
128    }
129 
130    vn_async_vkCreateDescriptorSetLayout(dev->instance, dev_handle,
131                                         create_info, NULL, &layout_handle);
132 }
133 
134 VkResult
vn_CreateDescriptorSetLayout(VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorSetLayout * pSetLayout)135 vn_CreateDescriptorSetLayout(
136    VkDevice device,
137    const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
138    const VkAllocationCallbacks *pAllocator,
139    VkDescriptorSetLayout *pSetLayout)
140 {
141    struct vn_device *dev = vn_device_from_handle(device);
142    /* ignore pAllocator as the layout is reference-counted */
143    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
144 
145    uint32_t last_binding = 0;
146    VkDescriptorSetLayoutBinding *local_bindings = NULL;
147    VkDescriptorSetLayoutCreateInfo local_create_info;
148    if (pCreateInfo->bindingCount) {
149       /* the encoder does not ignore
150        * VkDescriptorSetLayoutBinding::pImmutableSamplers when it should
151        */
152       const size_t binding_size =
153          sizeof(*pCreateInfo->pBindings) * pCreateInfo->bindingCount;
154       local_bindings = vk_alloc(alloc, binding_size, VN_DEFAULT_ALIGN,
155                                 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
156       if (!local_bindings)
157          return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
158 
159       memcpy(local_bindings, pCreateInfo->pBindings, binding_size);
160       for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
161          VkDescriptorSetLayoutBinding *binding = &local_bindings[i];
162 
163          if (last_binding < binding->binding)
164             last_binding = binding->binding;
165 
166          switch (binding->descriptorType) {
167          case VK_DESCRIPTOR_TYPE_SAMPLER:
168          case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
169             break;
170          default:
171             binding->pImmutableSamplers = NULL;
172             break;
173          }
174       }
175 
176       local_create_info = *pCreateInfo;
177       local_create_info.pBindings = local_bindings;
178       pCreateInfo = &local_create_info;
179    }
180 
181    const size_t layout_size =
182       offsetof(struct vn_descriptor_set_layout, bindings[last_binding + 1]);
183    /* allocated with the device scope */
184    struct vn_descriptor_set_layout *layout =
185       vk_zalloc(alloc, layout_size, VN_DEFAULT_ALIGN,
186                 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
187    if (!layout) {
188       vk_free(alloc, local_bindings);
189       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
190    }
191 
192    vn_object_base_init(&layout->base, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT,
193                        &dev->base);
194 
195    vn_descriptor_set_layout_init(dev, pCreateInfo, last_binding, layout);
196 
197    vk_free(alloc, local_bindings);
198 
199    *pSetLayout = vn_descriptor_set_layout_to_handle(layout);
200 
201    return VK_SUCCESS;
202 }
203 
204 void
vn_DestroyDescriptorSetLayout(VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)205 vn_DestroyDescriptorSetLayout(VkDevice device,
206                               VkDescriptorSetLayout descriptorSetLayout,
207                               const VkAllocationCallbacks *pAllocator)
208 {
209    struct vn_device *dev = vn_device_from_handle(device);
210    struct vn_descriptor_set_layout *layout =
211       vn_descriptor_set_layout_from_handle(descriptorSetLayout);
212 
213    if (!layout)
214       return;
215 
216    vn_descriptor_set_layout_unref(dev, layout);
217 }
218 
219 /* descriptor pool commands */
220 
221 VkResult
vn_CreateDescriptorPool(VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)222 vn_CreateDescriptorPool(VkDevice device,
223                         const VkDescriptorPoolCreateInfo *pCreateInfo,
224                         const VkAllocationCallbacks *pAllocator,
225                         VkDescriptorPool *pDescriptorPool)
226 {
227    struct vn_device *dev = vn_device_from_handle(device);
228    const VkAllocationCallbacks *alloc =
229       pAllocator ? pAllocator : &dev->base.base.alloc;
230 
231    struct vn_descriptor_pool *pool =
232       vk_zalloc(alloc, sizeof(*pool), VN_DEFAULT_ALIGN,
233                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
234    if (!pool)
235       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
236 
237    vn_object_base_init(&pool->base, VK_OBJECT_TYPE_DESCRIPTOR_POOL,
238                        &dev->base);
239 
240    pool->allocator = *alloc;
241 
242    /* Without VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, the set
243     * allocation must not fail due to a fragmented pool per spec. In this
244     * case, set allocation can be asynchronous with pool resource tracking.
245     */
246    pool->async_set_allocation = !(
247       pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT);
248 
249    pool->max.set_count = pCreateInfo->maxSets;
250 
251    for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; i++) {
252       const VkDescriptorPoolSize *pool_size = &pCreateInfo->pPoolSizes[i];
253 
254       assert(pool_size->type < VN_NUM_DESCRIPTOR_TYPES);
255 
256       pool->max.descriptor_counts[pool_size->type] +=
257          pool_size->descriptorCount;
258    }
259 
260    list_inithead(&pool->descriptor_sets);
261 
262    VkDescriptorPool pool_handle = vn_descriptor_pool_to_handle(pool);
263    vn_async_vkCreateDescriptorPool(dev->instance, device, pCreateInfo, NULL,
264                                    &pool_handle);
265 
266    *pDescriptorPool = pool_handle;
267 
268    return VK_SUCCESS;
269 }
270 
271 void
vn_DestroyDescriptorPool(VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)272 vn_DestroyDescriptorPool(VkDevice device,
273                          VkDescriptorPool descriptorPool,
274                          const VkAllocationCallbacks *pAllocator)
275 {
276    struct vn_device *dev = vn_device_from_handle(device);
277    struct vn_descriptor_pool *pool =
278       vn_descriptor_pool_from_handle(descriptorPool);
279    const VkAllocationCallbacks *alloc;
280 
281    if (!pool)
282       return;
283 
284    alloc = pAllocator ? pAllocator : &pool->allocator;
285 
286    /* We must emit vkDestroyDescriptorPool before freeing the sets in
287     * pool->descriptor_sets.  Otherwise, another thread might reuse their
288     * object ids while they still refer to the sets in the renderer.
289     */
290    vn_async_vkDestroyDescriptorPool(dev->instance, device, descriptorPool,
291                                     NULL);
292 
293    list_for_each_entry_safe(struct vn_descriptor_set, set,
294                             &pool->descriptor_sets, head)
295       vn_descriptor_set_destroy(dev, set, alloc);
296 
297    vn_object_base_fini(&pool->base);
298    vk_free(alloc, pool);
299 }
300 
301 static bool
vn_descriptor_pool_alloc_descriptors(struct vn_descriptor_pool * pool,const struct vn_descriptor_set_layout * layout,uint32_t last_binding_descriptor_count)302 vn_descriptor_pool_alloc_descriptors(
303    struct vn_descriptor_pool *pool,
304    const struct vn_descriptor_set_layout *layout,
305    uint32_t last_binding_descriptor_count)
306 {
307    struct vn_descriptor_pool_state recovery;
308 
309    if (!pool->async_set_allocation)
310       return true;
311 
312    if (pool->used.set_count == pool->max.set_count)
313       return false;
314 
315    /* backup current pool state to recovery */
316    recovery = pool->used;
317 
318    ++pool->used.set_count;
319 
320    for (uint32_t i = 0; i <= layout->last_binding; i++) {
321       const VkDescriptorType type = layout->bindings[i].type;
322       const uint32_t count = i == layout->last_binding
323                                 ? last_binding_descriptor_count
324                                 : layout->bindings[i].count;
325 
326       pool->used.descriptor_counts[type] += count;
327 
328       if (pool->used.descriptor_counts[type] >
329           pool->max.descriptor_counts[type]) {
330          /* restore pool state before this allocation */
331          pool->used = recovery;
332          return false;
333       }
334    }
335 
336    return true;
337 }
338 
339 static void
vn_descriptor_pool_free_descriptors(struct vn_descriptor_pool * pool,const struct vn_descriptor_set_layout * layout,uint32_t last_binding_descriptor_count)340 vn_descriptor_pool_free_descriptors(
341    struct vn_descriptor_pool *pool,
342    const struct vn_descriptor_set_layout *layout,
343    uint32_t last_binding_descriptor_count)
344 {
345    if (!pool->async_set_allocation)
346       return;
347 
348    for (uint32_t i = 0; i <= layout->last_binding; i++) {
349       const uint32_t count = i == layout->last_binding
350                                 ? last_binding_descriptor_count
351                                 : layout->bindings[i].count;
352 
353       pool->used.descriptor_counts[layout->bindings[i].type] -= count;
354    }
355 
356    --pool->used.set_count;
357 }
358 
359 static void
vn_descriptor_pool_reset_descriptors(struct vn_descriptor_pool * pool)360 vn_descriptor_pool_reset_descriptors(struct vn_descriptor_pool *pool)
361 {
362    if (!pool->async_set_allocation)
363       return;
364 
365    memset(&pool->used, 0, sizeof(pool->used));
366 }
367 
368 VkResult
vn_ResetDescriptorPool(VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)369 vn_ResetDescriptorPool(VkDevice device,
370                        VkDescriptorPool descriptorPool,
371                        VkDescriptorPoolResetFlags flags)
372 {
373    struct vn_device *dev = vn_device_from_handle(device);
374    struct vn_descriptor_pool *pool =
375       vn_descriptor_pool_from_handle(descriptorPool);
376    const VkAllocationCallbacks *alloc = &pool->allocator;
377 
378    vn_async_vkResetDescriptorPool(dev->instance, device, descriptorPool,
379                                   flags);
380 
381    list_for_each_entry_safe(struct vn_descriptor_set, set,
382                             &pool->descriptor_sets, head)
383       vn_descriptor_set_destroy(dev, set, alloc);
384 
385    vn_descriptor_pool_reset_descriptors(pool);
386 
387    return VK_SUCCESS;
388 }
389 
390 /* descriptor set commands */
391 
392 VkResult
vn_AllocateDescriptorSets(VkDevice device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets)393 vn_AllocateDescriptorSets(VkDevice device,
394                           const VkDescriptorSetAllocateInfo *pAllocateInfo,
395                           VkDescriptorSet *pDescriptorSets)
396 {
397    struct vn_device *dev = vn_device_from_handle(device);
398    struct vn_descriptor_pool *pool =
399       vn_descriptor_pool_from_handle(pAllocateInfo->descriptorPool);
400    const VkAllocationCallbacks *alloc = &pool->allocator;
401    const VkDescriptorSetVariableDescriptorCountAllocateInfo *variable_info =
402       NULL;
403    VkResult result;
404 
405    /* 14.2.3. Allocation of Descriptor Sets
406     *
407     * If descriptorSetCount is zero or this structure is not included in
408     * the pNext chain, then the variable lengths are considered to be zero.
409     */
410    variable_info = vk_find_struct_const(
411       pAllocateInfo->pNext,
412       DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO);
413 
414    if (variable_info && !variable_info->descriptorSetCount)
415       variable_info = NULL;
416 
417    for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
418       struct vn_descriptor_set_layout *layout =
419          vn_descriptor_set_layout_from_handle(pAllocateInfo->pSetLayouts[i]);
420       uint32_t last_binding_descriptor_count = 0;
421       struct vn_descriptor_set *set = NULL;
422 
423       /* 14.2.3. Allocation of Descriptor Sets
424        *
425        * If VkDescriptorSetAllocateInfo::pSetLayouts[i] does not include a
426        * variable count descriptor binding, then pDescriptorCounts[i] is
427        * ignored.
428        */
429       if (!layout->has_variable_descriptor_count) {
430          last_binding_descriptor_count =
431             layout->bindings[layout->last_binding].count;
432       } else if (variable_info) {
433          last_binding_descriptor_count = variable_info->pDescriptorCounts[i];
434       }
435 
436       if (!vn_descriptor_pool_alloc_descriptors(
437              pool, layout, last_binding_descriptor_count)) {
438          pDescriptorSets[i] = VK_NULL_HANDLE;
439          result = VK_ERROR_OUT_OF_POOL_MEMORY;
440          goto fail;
441       }
442 
443       set = vk_zalloc(alloc, sizeof(*set), VN_DEFAULT_ALIGN,
444                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
445       if (!set) {
446          vn_descriptor_pool_free_descriptors(pool, layout,
447                                              last_binding_descriptor_count);
448          pDescriptorSets[i] = VK_NULL_HANDLE;
449          result = VK_ERROR_OUT_OF_HOST_MEMORY;
450          goto fail;
451       }
452 
453       vn_object_base_init(&set->base, VK_OBJECT_TYPE_DESCRIPTOR_SET,
454                           &dev->base);
455 
456       /* We might reorder vkCmdBindDescriptorSets after
457        * vkDestroyDescriptorSetLayout due to batching.  The spec says
458        *
459        *   VkDescriptorSetLayout objects may be accessed by commands that
460        *   operate on descriptor sets allocated using that layout, and those
461        *   descriptor sets must not be updated with vkUpdateDescriptorSets
462        *   after the descriptor set layout has been destroyed. Otherwise, a
463        *   VkDescriptorSetLayout object passed as a parameter to create
464        *   another object is not further accessed by that object after the
465        *   duration of the command it is passed into.
466        *
467        * It is ambiguous but the reordering is likely invalid.  Let's keep the
468        * layout alive with the set to defer vkDestroyDescriptorSetLayout.
469        */
470       set->layout = vn_descriptor_set_layout_ref(dev, layout);
471       set->last_binding_descriptor_count = last_binding_descriptor_count;
472       list_addtail(&set->head, &pool->descriptor_sets);
473 
474       VkDescriptorSet set_handle = vn_descriptor_set_to_handle(set);
475       pDescriptorSets[i] = set_handle;
476    }
477 
478    if (pool->async_set_allocation) {
479       vn_async_vkAllocateDescriptorSets(dev->instance, device, pAllocateInfo,
480                                         pDescriptorSets);
481    } else {
482       result = vn_call_vkAllocateDescriptorSets(
483          dev->instance, device, pAllocateInfo, pDescriptorSets);
484       if (result != VK_SUCCESS)
485          goto fail;
486    }
487 
488    return VK_SUCCESS;
489 
490 fail:
491    for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
492       struct vn_descriptor_set *set =
493          vn_descriptor_set_from_handle(pDescriptorSets[i]);
494       if (!set)
495          break;
496 
497       vn_descriptor_pool_free_descriptors(pool, set->layout,
498                                           set->last_binding_descriptor_count);
499 
500       vn_descriptor_set_destroy(dev, set, alloc);
501    }
502 
503    memset(pDescriptorSets, 0,
504           sizeof(*pDescriptorSets) * pAllocateInfo->descriptorSetCount);
505 
506    return vn_error(dev->instance, result);
507 }
508 
509 VkResult
vn_FreeDescriptorSets(VkDevice device,VkDescriptorPool descriptorPool,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets)510 vn_FreeDescriptorSets(VkDevice device,
511                       VkDescriptorPool descriptorPool,
512                       uint32_t descriptorSetCount,
513                       const VkDescriptorSet *pDescriptorSets)
514 {
515    struct vn_device *dev = vn_device_from_handle(device);
516    struct vn_descriptor_pool *pool =
517       vn_descriptor_pool_from_handle(descriptorPool);
518    const VkAllocationCallbacks *alloc = &pool->allocator;
519 
520    vn_async_vkFreeDescriptorSets(dev->instance, device, descriptorPool,
521                                  descriptorSetCount, pDescriptorSets);
522 
523    for (uint32_t i = 0; i < descriptorSetCount; i++) {
524       struct vn_descriptor_set *set =
525          vn_descriptor_set_from_handle(pDescriptorSets[i]);
526 
527       if (!set)
528          continue;
529 
530       vn_descriptor_set_destroy(dev, set, alloc);
531    }
532 
533    return VK_SUCCESS;
534 }
535 
536 static struct vn_update_descriptor_sets *
vn_update_descriptor_sets_alloc(uint32_t write_count,uint32_t image_count,uint32_t buffer_count,uint32_t view_count,const VkAllocationCallbacks * alloc,VkSystemAllocationScope scope)537 vn_update_descriptor_sets_alloc(uint32_t write_count,
538                                 uint32_t image_count,
539                                 uint32_t buffer_count,
540                                 uint32_t view_count,
541                                 const VkAllocationCallbacks *alloc,
542                                 VkSystemAllocationScope scope)
543 {
544    const size_t writes_offset = sizeof(struct vn_update_descriptor_sets);
545    const size_t images_offset =
546       writes_offset + sizeof(VkWriteDescriptorSet) * write_count;
547    const size_t buffers_offset =
548       images_offset + sizeof(VkDescriptorImageInfo) * image_count;
549    const size_t views_offset =
550       buffers_offset + sizeof(VkDescriptorBufferInfo) * buffer_count;
551    const size_t alloc_size = views_offset + sizeof(VkBufferView) * view_count;
552 
553    void *storage = vk_alloc(alloc, alloc_size, VN_DEFAULT_ALIGN, scope);
554    if (!storage)
555       return NULL;
556 
557    struct vn_update_descriptor_sets *update = storage;
558    update->write_count = write_count;
559    update->writes = storage + writes_offset;
560    update->images = storage + images_offset;
561    update->buffers = storage + buffers_offset;
562    update->views = storage + views_offset;
563 
564    return update;
565 }
566 
567 static struct vn_update_descriptor_sets *
vn_update_descriptor_sets_parse_writes(uint32_t write_count,const VkWriteDescriptorSet * writes,const VkAllocationCallbacks * alloc)568 vn_update_descriptor_sets_parse_writes(uint32_t write_count,
569                                        const VkWriteDescriptorSet *writes,
570                                        const VkAllocationCallbacks *alloc)
571 {
572    uint32_t img_count = 0;
573    for (uint32_t i = 0; i < write_count; i++) {
574       const VkWriteDescriptorSet *write = &writes[i];
575       switch (write->descriptorType) {
576       case VK_DESCRIPTOR_TYPE_SAMPLER:
577       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
578       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
579       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
580       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
581          img_count += write->descriptorCount;
582          break;
583       default:
584          break;
585       }
586    }
587 
588    struct vn_update_descriptor_sets *update =
589       vn_update_descriptor_sets_alloc(write_count, img_count, 0, 0, alloc,
590                                       VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
591    if (!update)
592       return NULL;
593 
594    /* the encoder does not ignore
595     * VkWriteDescriptorSet::{pImageInfo,pBufferInfo,pTexelBufferView} when it
596     * should
597     *
598     * TODO make the encoder smarter
599     */
600    memcpy(update->writes, writes, sizeof(*writes) * write_count);
601    img_count = 0;
602    for (uint32_t i = 0; i < write_count; i++) {
603       const struct vn_descriptor_set *set =
604          vn_descriptor_set_from_handle(writes[i].dstSet);
605       const struct vn_descriptor_set_layout_binding *binding =
606          &set->layout->bindings[writes[i].dstBinding];
607       VkWriteDescriptorSet *write = &update->writes[i];
608       VkDescriptorImageInfo *imgs = &update->images[img_count];
609 
610       switch (write->descriptorType) {
611       case VK_DESCRIPTOR_TYPE_SAMPLER:
612       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
613       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
614       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
615       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
616          memcpy(imgs, write->pImageInfo,
617                 sizeof(*imgs) * write->descriptorCount);
618          img_count += write->descriptorCount;
619 
620          for (uint32_t j = 0; j < write->descriptorCount; j++) {
621             switch (write->descriptorType) {
622             case VK_DESCRIPTOR_TYPE_SAMPLER:
623                imgs[j].imageView = VK_NULL_HANDLE;
624                break;
625             case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
626                if (binding->has_immutable_samplers)
627                   imgs[j].sampler = VK_NULL_HANDLE;
628                break;
629             case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
630             case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
631             case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
632                imgs[j].sampler = VK_NULL_HANDLE;
633                break;
634             default:
635                break;
636             }
637          }
638 
639          write->pImageInfo = imgs;
640          write->pBufferInfo = NULL;
641          write->pTexelBufferView = NULL;
642          break;
643       case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
644       case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
645          write->pImageInfo = NULL;
646          write->pBufferInfo = NULL;
647          break;
648       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
649       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
650       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
651       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
652          write->pImageInfo = NULL;
653          write->pTexelBufferView = NULL;
654          break;
655       default:
656          write->pImageInfo = NULL;
657          write->pBufferInfo = NULL;
658          write->pTexelBufferView = NULL;
659          break;
660       }
661    }
662 
663    return update;
664 }
665 
666 void
vn_UpdateDescriptorSets(VkDevice device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)667 vn_UpdateDescriptorSets(VkDevice device,
668                         uint32_t descriptorWriteCount,
669                         const VkWriteDescriptorSet *pDescriptorWrites,
670                         uint32_t descriptorCopyCount,
671                         const VkCopyDescriptorSet *pDescriptorCopies)
672 {
673    struct vn_device *dev = vn_device_from_handle(device);
674    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
675 
676    struct vn_update_descriptor_sets *update =
677       vn_update_descriptor_sets_parse_writes(descriptorWriteCount,
678                                              pDescriptorWrites, alloc);
679    if (!update) {
680       /* TODO update one-by-one? */
681       vn_log(dev->instance, "TODO descriptor set update ignored due to OOM");
682       return;
683    }
684 
685    vn_async_vkUpdateDescriptorSets(dev->instance, device, update->write_count,
686                                    update->writes, descriptorCopyCount,
687                                    pDescriptorCopies);
688 
689    vk_free(alloc, update);
690 }
691 
692 /* descriptor update template commands */
693 
694 static struct vn_update_descriptor_sets *
vn_update_descriptor_sets_parse_template(const VkDescriptorUpdateTemplateCreateInfo * create_info,const VkAllocationCallbacks * alloc,struct vn_descriptor_update_template_entry * entries)695 vn_update_descriptor_sets_parse_template(
696    const VkDescriptorUpdateTemplateCreateInfo *create_info,
697    const VkAllocationCallbacks *alloc,
698    struct vn_descriptor_update_template_entry *entries)
699 {
700    uint32_t img_count = 0;
701    uint32_t buf_count = 0;
702    uint32_t view_count = 0;
703    for (uint32_t i = 0; i < create_info->descriptorUpdateEntryCount; i++) {
704       const VkDescriptorUpdateTemplateEntry *entry =
705          &create_info->pDescriptorUpdateEntries[i];
706 
707       switch (entry->descriptorType) {
708       case VK_DESCRIPTOR_TYPE_SAMPLER:
709       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
710       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
711       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
712       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
713          img_count += entry->descriptorCount;
714          break;
715       case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
716       case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
717          view_count += entry->descriptorCount;
718          break;
719       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
720       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
721       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
722       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
723          buf_count += entry->descriptorCount;
724          break;
725       default:
726          unreachable("unhandled descriptor type");
727          break;
728       }
729    }
730 
731    struct vn_update_descriptor_sets *update = vn_update_descriptor_sets_alloc(
732       create_info->descriptorUpdateEntryCount, img_count, buf_count,
733       view_count, alloc, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
734    if (!update)
735       return NULL;
736 
737    img_count = 0;
738    buf_count = 0;
739    view_count = 0;
740    for (uint32_t i = 0; i < create_info->descriptorUpdateEntryCount; i++) {
741       const VkDescriptorUpdateTemplateEntry *entry =
742          &create_info->pDescriptorUpdateEntries[i];
743       VkWriteDescriptorSet *write = &update->writes[i];
744 
745       write->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
746       write->pNext = NULL;
747       write->dstBinding = entry->dstBinding;
748       write->dstArrayElement = entry->dstArrayElement;
749       write->descriptorCount = entry->descriptorCount;
750       write->descriptorType = entry->descriptorType;
751 
752       entries[i].offset = entry->offset;
753       entries[i].stride = entry->stride;
754 
755       switch (entry->descriptorType) {
756       case VK_DESCRIPTOR_TYPE_SAMPLER:
757       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
758       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
759       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
760       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
761          write->pImageInfo = &update->images[img_count];
762          write->pBufferInfo = NULL;
763          write->pTexelBufferView = NULL;
764          img_count += entry->descriptorCount;
765          break;
766       case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
767       case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
768          write->pImageInfo = NULL;
769          write->pBufferInfo = NULL;
770          write->pTexelBufferView = &update->views[view_count];
771          view_count += entry->descriptorCount;
772          break;
773       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
774       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
775       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
776       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
777          write->pImageInfo = NULL;
778          write->pBufferInfo = &update->buffers[buf_count];
779          write->pTexelBufferView = NULL;
780          buf_count += entry->descriptorCount;
781          break;
782       default:
783          break;
784       }
785    }
786 
787    return update;
788 }
789 
790 VkResult
vn_CreateDescriptorUpdateTemplate(VkDevice device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)791 vn_CreateDescriptorUpdateTemplate(
792    VkDevice device,
793    const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo,
794    const VkAllocationCallbacks *pAllocator,
795    VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate)
796 {
797    struct vn_device *dev = vn_device_from_handle(device);
798    const VkAllocationCallbacks *alloc =
799       pAllocator ? pAllocator : &dev->base.base.alloc;
800 
801    const size_t templ_size =
802       offsetof(struct vn_descriptor_update_template,
803                entries[pCreateInfo->descriptorUpdateEntryCount + 1]);
804    struct vn_descriptor_update_template *templ = vk_zalloc(
805       alloc, templ_size, VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
806    if (!templ)
807       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
808 
809    vn_object_base_init(&templ->base,
810                        VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE, &dev->base);
811 
812    templ->update = vn_update_descriptor_sets_parse_template(
813       pCreateInfo, alloc, templ->entries);
814    if (!templ->update) {
815       vk_free(alloc, templ);
816       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
817    }
818 
819    mtx_init(&templ->mutex, mtx_plain);
820 
821    /* no host object */
822    VkDescriptorUpdateTemplate templ_handle =
823       vn_descriptor_update_template_to_handle(templ);
824    *pDescriptorUpdateTemplate = templ_handle;
825 
826    return VK_SUCCESS;
827 }
828 
829 void
vn_DestroyDescriptorUpdateTemplate(VkDevice device,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const VkAllocationCallbacks * pAllocator)830 vn_DestroyDescriptorUpdateTemplate(
831    VkDevice device,
832    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
833    const VkAllocationCallbacks *pAllocator)
834 {
835    struct vn_device *dev = vn_device_from_handle(device);
836    struct vn_descriptor_update_template *templ =
837       vn_descriptor_update_template_from_handle(descriptorUpdateTemplate);
838    const VkAllocationCallbacks *alloc =
839       pAllocator ? pAllocator : &dev->base.base.alloc;
840 
841    if (!templ)
842       return;
843 
844    /* no host object */
845    vk_free(alloc, templ->update);
846    mtx_destroy(&templ->mutex);
847 
848    vn_object_base_fini(&templ->base);
849    vk_free(alloc, templ);
850 }
851 
852 void
vn_UpdateDescriptorSetWithTemplate(VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)853 vn_UpdateDescriptorSetWithTemplate(
854    VkDevice device,
855    VkDescriptorSet descriptorSet,
856    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
857    const void *pData)
858 {
859    struct vn_device *dev = vn_device_from_handle(device);
860    struct vn_descriptor_set *set =
861       vn_descriptor_set_from_handle(descriptorSet);
862    struct vn_descriptor_update_template *templ =
863       vn_descriptor_update_template_from_handle(descriptorUpdateTemplate);
864    struct vn_update_descriptor_sets *update = templ->update;
865 
866    /* duplicate update instead to avoid locking? */
867    mtx_lock(&templ->mutex);
868 
869    for (uint32_t i = 0; i < update->write_count; i++) {
870       const struct vn_descriptor_update_template_entry *entry =
871          &templ->entries[i];
872       const struct vn_descriptor_set_layout_binding *binding =
873          &set->layout->bindings[update->writes[i].dstBinding];
874       VkWriteDescriptorSet *write = &update->writes[i];
875 
876       write->dstSet = vn_descriptor_set_to_handle(set);
877 
878       switch (write->descriptorType) {
879       case VK_DESCRIPTOR_TYPE_SAMPLER:
880       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
881       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
882       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
883       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
884          for (uint32_t j = 0; j < write->descriptorCount; j++) {
885             const bool need_sampler =
886                (write->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER ||
887                 write->descriptorType ==
888                    VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) &&
889                !binding->has_immutable_samplers;
890             const bool need_view =
891                write->descriptorType != VK_DESCRIPTOR_TYPE_SAMPLER;
892             const VkDescriptorImageInfo *src =
893                pData + entry->offset + entry->stride * j;
894             VkDescriptorImageInfo *dst =
895                (VkDescriptorImageInfo *)&write->pImageInfo[j];
896 
897             dst->sampler = need_sampler ? src->sampler : VK_NULL_HANDLE;
898             dst->imageView = need_view ? src->imageView : VK_NULL_HANDLE;
899             dst->imageLayout = src->imageLayout;
900          }
901          break;
902       case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
903       case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
904          for (uint32_t j = 0; j < write->descriptorCount; j++) {
905             const VkBufferView *src =
906                pData + entry->offset + entry->stride * j;
907             VkBufferView *dst = (VkBufferView *)&write->pTexelBufferView[j];
908             *dst = *src;
909          }
910          break;
911       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
912       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
913       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
914       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
915          for (uint32_t j = 0; j < write->descriptorCount; j++) {
916             const VkDescriptorBufferInfo *src =
917                pData + entry->offset + entry->stride * j;
918             VkDescriptorBufferInfo *dst =
919                (VkDescriptorBufferInfo *)&write->pBufferInfo[j];
920             *dst = *src;
921          }
922          break;
923       default:
924          unreachable("unhandled descriptor type");
925          break;
926       }
927    }
928 
929    vn_async_vkUpdateDescriptorSets(dev->instance, device, update->write_count,
930                                    update->writes, 0, NULL);
931 
932    mtx_unlock(&templ->mutex);
933 }
934