1 /*
2  * Copyright 2019 Google LLC
3  * SPDX-License-Identifier: MIT
4  *
5  * based in part on anv and radv which are:
6  * Copyright © 2015 Intel Corporation
7  * Copyright © 2016 Red Hat.
8  * Copyright © 2016 Bas Nieuwenhuizen
9  */
10 
11 #include "vn_descriptor_set.h"
12 
13 #include "venus-protocol/vn_protocol_driver_descriptor_pool.h"
14 #include "venus-protocol/vn_protocol_driver_descriptor_set.h"
15 #include "venus-protocol/vn_protocol_driver_descriptor_set_layout.h"
16 #include "venus-protocol/vn_protocol_driver_descriptor_update_template.h"
17 
18 #include "vn_device.h"
19 
20 static void
vn_descriptor_set_layout_destroy(struct vn_device * dev,struct vn_descriptor_set_layout * layout)21 vn_descriptor_set_layout_destroy(struct vn_device *dev,
22                                  struct vn_descriptor_set_layout *layout)
23 {
24    VkDevice dev_handle = vn_device_to_handle(dev);
25    VkDescriptorSetLayout layout_handle =
26       vn_descriptor_set_layout_to_handle(layout);
27    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
28 
29    vn_async_vkDestroyDescriptorSetLayout(dev->instance, dev_handle,
30                                          layout_handle, NULL);
31 
32    vn_object_base_fini(&layout->base);
33    vk_free(alloc, layout);
34 }
35 
36 static inline struct vn_descriptor_set_layout *
vn_descriptor_set_layout_ref(struct vn_device * dev,struct vn_descriptor_set_layout * layout)37 vn_descriptor_set_layout_ref(struct vn_device *dev,
38                              struct vn_descriptor_set_layout *layout)
39 {
40    vn_refcount_inc(&layout->refcount);
41    return layout;
42 }
43 
44 static inline void
vn_descriptor_set_layout_unref(struct vn_device * dev,struct vn_descriptor_set_layout * layout)45 vn_descriptor_set_layout_unref(struct vn_device *dev,
46                                struct vn_descriptor_set_layout *layout)
47 {
48    if (vn_refcount_dec(&layout->refcount))
49       vn_descriptor_set_layout_destroy(dev, layout);
50 }
51 
52 static void
vn_descriptor_set_destroy(struct vn_device * dev,struct vn_descriptor_set * set,const VkAllocationCallbacks * alloc)53 vn_descriptor_set_destroy(struct vn_device *dev,
54                           struct vn_descriptor_set *set,
55                           const VkAllocationCallbacks *alloc)
56 {
57    list_del(&set->head);
58 
59    vn_descriptor_set_layout_unref(dev, set->layout);
60 
61    vn_object_base_fini(&set->base);
62    vk_free(alloc, set);
63 }
64 
65 /* descriptor set layout commands */
66 
67 void
vn_GetDescriptorSetLayoutSupport(VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,VkDescriptorSetLayoutSupport * pSupport)68 vn_GetDescriptorSetLayoutSupport(
69    VkDevice device,
70    const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
71    VkDescriptorSetLayoutSupport *pSupport)
72 {
73    struct vn_device *dev = vn_device_from_handle(device);
74 
75    /* TODO per-device cache */
76    vn_call_vkGetDescriptorSetLayoutSupport(dev->instance, device, pCreateInfo,
77                                            pSupport);
78 }
79 
80 static void
vn_descriptor_set_layout_init(struct vn_device * dev,const VkDescriptorSetLayoutCreateInfo * create_info,uint32_t last_binding,struct vn_descriptor_set_layout * layout)81 vn_descriptor_set_layout_init(
82    struct vn_device *dev,
83    const VkDescriptorSetLayoutCreateInfo *create_info,
84    uint32_t last_binding,
85    struct vn_descriptor_set_layout *layout)
86 {
87    VkDevice dev_handle = vn_device_to_handle(dev);
88    VkDescriptorSetLayout layout_handle =
89       vn_descriptor_set_layout_to_handle(layout);
90    const VkDescriptorSetLayoutBindingFlagsCreateInfo *binding_flags =
91       vk_find_struct_const(create_info->pNext,
92                            DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO);
93 
94    /* 14.2.1. Descriptor Set Layout
95     *
96     * If bindingCount is zero or if this structure is not included in
97     * the pNext chain, the VkDescriptorBindingFlags for each descriptor
98     * set layout binding is considered to be zero.
99     */
100    if (binding_flags && !binding_flags->bindingCount)
101       binding_flags = NULL;
102 
103    layout->refcount = VN_REFCOUNT_INIT(1);
104    layout->last_binding = last_binding;
105 
106    for (uint32_t i = 0; i < create_info->bindingCount; i++) {
107       const VkDescriptorSetLayoutBinding *binding_info =
108          &create_info->pBindings[i];
109       struct vn_descriptor_set_layout_binding *binding =
110          &layout->bindings[binding_info->binding];
111 
112       if (binding_info->binding == last_binding) {
113          /* 14.2.1. Descriptor Set Layout
114           *
115           * VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT must only be
116           * used for the last binding in the descriptor set layout (i.e. the
117           * binding with the largest value of binding).
118           *
119           * 41. Features
120           *
121           * descriptorBindingVariableDescriptorCount indicates whether the
122           * implementation supports descriptor sets with a variable-sized last
123           * binding. If this feature is not enabled,
124           * VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT must not be
125           * used.
126           */
127          layout->has_variable_descriptor_count =
128             binding_flags &&
129             (binding_flags->pBindingFlags[i] &
130              VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT);
131       }
132 
133       binding->type = binding_info->descriptorType;
134       binding->count = binding_info->descriptorCount;
135 
136       switch (binding_info->descriptorType) {
137       case VK_DESCRIPTOR_TYPE_SAMPLER:
138       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
139          binding->has_immutable_samplers = binding_info->pImmutableSamplers;
140          break;
141       default:
142          break;
143       }
144    }
145 
146    vn_async_vkCreateDescriptorSetLayout(dev->instance, dev_handle,
147                                         create_info, NULL, &layout_handle);
148 }
149 
150 VkResult
vn_CreateDescriptorSetLayout(VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorSetLayout * pSetLayout)151 vn_CreateDescriptorSetLayout(
152    VkDevice device,
153    const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
154    const VkAllocationCallbacks *pAllocator,
155    VkDescriptorSetLayout *pSetLayout)
156 {
157    struct vn_device *dev = vn_device_from_handle(device);
158    /* ignore pAllocator as the layout is reference-counted */
159    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
160 
161    uint32_t last_binding = 0;
162    VkDescriptorSetLayoutBinding *local_bindings = NULL;
163    VkDescriptorSetLayoutCreateInfo local_create_info;
164    if (pCreateInfo->bindingCount) {
165       /* the encoder does not ignore
166        * VkDescriptorSetLayoutBinding::pImmutableSamplers when it should
167        */
168       const size_t binding_size =
169          sizeof(*pCreateInfo->pBindings) * pCreateInfo->bindingCount;
170       local_bindings = vk_alloc(alloc, binding_size, VN_DEFAULT_ALIGN,
171                                 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
172       if (!local_bindings)
173          return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
174 
175       memcpy(local_bindings, pCreateInfo->pBindings, binding_size);
176       for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
177          VkDescriptorSetLayoutBinding *binding = &local_bindings[i];
178 
179          if (last_binding < binding->binding)
180             last_binding = binding->binding;
181 
182          switch (binding->descriptorType) {
183          case VK_DESCRIPTOR_TYPE_SAMPLER:
184          case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
185             break;
186          default:
187             binding->pImmutableSamplers = NULL;
188             break;
189          }
190       }
191 
192       local_create_info = *pCreateInfo;
193       local_create_info.pBindings = local_bindings;
194       pCreateInfo = &local_create_info;
195    }
196 
197    const size_t layout_size =
198       offsetof(struct vn_descriptor_set_layout, bindings[last_binding + 1]);
199    /* allocated with the device scope */
200    struct vn_descriptor_set_layout *layout =
201       vk_zalloc(alloc, layout_size, VN_DEFAULT_ALIGN,
202                 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
203    if (!layout) {
204       vk_free(alloc, local_bindings);
205       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
206    }
207 
208    vn_object_base_init(&layout->base, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT,
209                        &dev->base);
210 
211    vn_descriptor_set_layout_init(dev, pCreateInfo, last_binding, layout);
212 
213    vk_free(alloc, local_bindings);
214 
215    *pSetLayout = vn_descriptor_set_layout_to_handle(layout);
216 
217    return VK_SUCCESS;
218 }
219 
220 void
vn_DestroyDescriptorSetLayout(VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)221 vn_DestroyDescriptorSetLayout(VkDevice device,
222                               VkDescriptorSetLayout descriptorSetLayout,
223                               const VkAllocationCallbacks *pAllocator)
224 {
225    struct vn_device *dev = vn_device_from_handle(device);
226    struct vn_descriptor_set_layout *layout =
227       vn_descriptor_set_layout_from_handle(descriptorSetLayout);
228 
229    if (!layout)
230       return;
231 
232    vn_descriptor_set_layout_unref(dev, layout);
233 }
234 
235 /* descriptor pool commands */
236 
237 VkResult
vn_CreateDescriptorPool(VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)238 vn_CreateDescriptorPool(VkDevice device,
239                         const VkDescriptorPoolCreateInfo *pCreateInfo,
240                         const VkAllocationCallbacks *pAllocator,
241                         VkDescriptorPool *pDescriptorPool)
242 {
243    struct vn_device *dev = vn_device_from_handle(device);
244    const VkAllocationCallbacks *alloc =
245       pAllocator ? pAllocator : &dev->base.base.alloc;
246 
247    struct vn_descriptor_pool *pool =
248       vk_zalloc(alloc, sizeof(*pool), VN_DEFAULT_ALIGN,
249                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
250    if (!pool)
251       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
252 
253    vn_object_base_init(&pool->base, VK_OBJECT_TYPE_DESCRIPTOR_POOL,
254                        &dev->base);
255 
256    pool->allocator = *alloc;
257 
258    /* Without VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, the set
259     * allocation must not fail due to a fragmented pool per spec. In this
260     * case, set allocation can be asynchronous with pool resource tracking.
261     */
262    pool->async_set_allocation = !(
263       pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT);
264 
265    pool->max.set_count = pCreateInfo->maxSets;
266 
267    for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; i++) {
268       const VkDescriptorPoolSize *pool_size = &pCreateInfo->pPoolSizes[i];
269 
270       assert(pool_size->type < VN_NUM_DESCRIPTOR_TYPES);
271 
272       pool->max.descriptor_counts[pool_size->type] +=
273          pool_size->descriptorCount;
274    }
275 
276    list_inithead(&pool->descriptor_sets);
277 
278    VkDescriptorPool pool_handle = vn_descriptor_pool_to_handle(pool);
279    vn_async_vkCreateDescriptorPool(dev->instance, device, pCreateInfo, NULL,
280                                    &pool_handle);
281 
282    *pDescriptorPool = pool_handle;
283 
284    return VK_SUCCESS;
285 }
286 
287 void
vn_DestroyDescriptorPool(VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)288 vn_DestroyDescriptorPool(VkDevice device,
289                          VkDescriptorPool descriptorPool,
290                          const VkAllocationCallbacks *pAllocator)
291 {
292    struct vn_device *dev = vn_device_from_handle(device);
293    struct vn_descriptor_pool *pool =
294       vn_descriptor_pool_from_handle(descriptorPool);
295    const VkAllocationCallbacks *alloc;
296 
297    if (!pool)
298       return;
299 
300    alloc = pAllocator ? pAllocator : &pool->allocator;
301 
302    /* We must emit vkDestroyDescriptorPool before freeing the sets in
303     * pool->descriptor_sets.  Otherwise, another thread might reuse their
304     * object ids while they still refer to the sets in the renderer.
305     */
306    vn_async_vkDestroyDescriptorPool(dev->instance, device, descriptorPool,
307                                     NULL);
308 
309    list_for_each_entry_safe(struct vn_descriptor_set, set,
310                             &pool->descriptor_sets, head)
311       vn_descriptor_set_destroy(dev, set, alloc);
312 
313    vn_object_base_fini(&pool->base);
314    vk_free(alloc, pool);
315 }
316 
317 static bool
vn_descriptor_pool_alloc_descriptors(struct vn_descriptor_pool * pool,const struct vn_descriptor_set_layout * layout,uint32_t last_binding_descriptor_count)318 vn_descriptor_pool_alloc_descriptors(
319    struct vn_descriptor_pool *pool,
320    const struct vn_descriptor_set_layout *layout,
321    uint32_t last_binding_descriptor_count)
322 {
323    struct vn_descriptor_pool_state recovery;
324 
325    if (!pool->async_set_allocation)
326       return true;
327 
328    if (pool->used.set_count == pool->max.set_count)
329       return false;
330 
331    /* backup current pool state to recovery */
332    recovery = pool->used;
333 
334    ++pool->used.set_count;
335 
336    for (uint32_t i = 0; i <= layout->last_binding; i++) {
337       const VkDescriptorType type = layout->bindings[i].type;
338       const uint32_t count = i == layout->last_binding
339                                 ? last_binding_descriptor_count
340                                 : layout->bindings[i].count;
341 
342       pool->used.descriptor_counts[type] += count;
343 
344       if (pool->used.descriptor_counts[type] >
345           pool->max.descriptor_counts[type]) {
346          /* restore pool state before this allocation */
347          pool->used = recovery;
348          return false;
349       }
350    }
351 
352    return true;
353 }
354 
355 static void
vn_descriptor_pool_free_descriptors(struct vn_descriptor_pool * pool,const struct vn_descriptor_set_layout * layout,uint32_t last_binding_descriptor_count)356 vn_descriptor_pool_free_descriptors(
357    struct vn_descriptor_pool *pool,
358    const struct vn_descriptor_set_layout *layout,
359    uint32_t last_binding_descriptor_count)
360 {
361    if (!pool->async_set_allocation)
362       return;
363 
364    for (uint32_t i = 0; i <= layout->last_binding; i++) {
365       const uint32_t count = i == layout->last_binding
366                                 ? last_binding_descriptor_count
367                                 : layout->bindings[i].count;
368 
369       pool->used.descriptor_counts[layout->bindings[i].type] -= count;
370    }
371 
372    --pool->used.set_count;
373 }
374 
375 static void
vn_descriptor_pool_reset_descriptors(struct vn_descriptor_pool * pool)376 vn_descriptor_pool_reset_descriptors(struct vn_descriptor_pool *pool)
377 {
378    if (!pool->async_set_allocation)
379       return;
380 
381    memset(&pool->used, 0, sizeof(pool->used));
382 }
383 
384 VkResult
vn_ResetDescriptorPool(VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)385 vn_ResetDescriptorPool(VkDevice device,
386                        VkDescriptorPool descriptorPool,
387                        VkDescriptorPoolResetFlags flags)
388 {
389    struct vn_device *dev = vn_device_from_handle(device);
390    struct vn_descriptor_pool *pool =
391       vn_descriptor_pool_from_handle(descriptorPool);
392    const VkAllocationCallbacks *alloc = &pool->allocator;
393 
394    vn_async_vkResetDescriptorPool(dev->instance, device, descriptorPool,
395                                   flags);
396 
397    list_for_each_entry_safe(struct vn_descriptor_set, set,
398                             &pool->descriptor_sets, head)
399       vn_descriptor_set_destroy(dev, set, alloc);
400 
401    vn_descriptor_pool_reset_descriptors(pool);
402 
403    return VK_SUCCESS;
404 }
405 
406 /* descriptor set commands */
407 
408 VkResult
vn_AllocateDescriptorSets(VkDevice device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets)409 vn_AllocateDescriptorSets(VkDevice device,
410                           const VkDescriptorSetAllocateInfo *pAllocateInfo,
411                           VkDescriptorSet *pDescriptorSets)
412 {
413    struct vn_device *dev = vn_device_from_handle(device);
414    struct vn_descriptor_pool *pool =
415       vn_descriptor_pool_from_handle(pAllocateInfo->descriptorPool);
416    const VkAllocationCallbacks *alloc = &pool->allocator;
417    const VkDescriptorSetVariableDescriptorCountAllocateInfo *variable_info =
418       NULL;
419    VkResult result;
420 
421    /* 14.2.3. Allocation of Descriptor Sets
422     *
423     * If descriptorSetCount is zero or this structure is not included in
424     * the pNext chain, then the variable lengths are considered to be zero.
425     */
426    variable_info = vk_find_struct_const(
427       pAllocateInfo->pNext,
428       DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO);
429 
430    if (variable_info && !variable_info->descriptorSetCount)
431       variable_info = NULL;
432 
433    for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
434       struct vn_descriptor_set_layout *layout =
435          vn_descriptor_set_layout_from_handle(pAllocateInfo->pSetLayouts[i]);
436       uint32_t last_binding_descriptor_count = 0;
437       struct vn_descriptor_set *set = NULL;
438 
439       /* 14.2.3. Allocation of Descriptor Sets
440        *
441        * If VkDescriptorSetAllocateInfo::pSetLayouts[i] does not include a
442        * variable count descriptor binding, then pDescriptorCounts[i] is
443        * ignored.
444        */
445       if (!layout->has_variable_descriptor_count) {
446          last_binding_descriptor_count =
447             layout->bindings[layout->last_binding].count;
448       } else if (variable_info) {
449          last_binding_descriptor_count = variable_info->pDescriptorCounts[i];
450       }
451 
452       if (!vn_descriptor_pool_alloc_descriptors(
453              pool, layout, last_binding_descriptor_count)) {
454          pDescriptorSets[i] = VK_NULL_HANDLE;
455          result = VK_ERROR_OUT_OF_POOL_MEMORY;
456          goto fail;
457       }
458 
459       set = vk_zalloc(alloc, sizeof(*set), VN_DEFAULT_ALIGN,
460                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
461       if (!set) {
462          vn_descriptor_pool_free_descriptors(pool, layout,
463                                              last_binding_descriptor_count);
464          pDescriptorSets[i] = VK_NULL_HANDLE;
465          result = VK_ERROR_OUT_OF_HOST_MEMORY;
466          goto fail;
467       }
468 
469       vn_object_base_init(&set->base, VK_OBJECT_TYPE_DESCRIPTOR_SET,
470                           &dev->base);
471 
472       /* We might reorder vkCmdBindDescriptorSets after
473        * vkDestroyDescriptorSetLayout due to batching.  The spec says
474        *
475        *   VkDescriptorSetLayout objects may be accessed by commands that
476        *   operate on descriptor sets allocated using that layout, and those
477        *   descriptor sets must not be updated with vkUpdateDescriptorSets
478        *   after the descriptor set layout has been destroyed. Otherwise, a
479        *   VkDescriptorSetLayout object passed as a parameter to create
480        *   another object is not further accessed by that object after the
481        *   duration of the command it is passed into.
482        *
483        * It is ambiguous but the reordering is likely invalid.  Let's keep the
484        * layout alive with the set to defer vkDestroyDescriptorSetLayout.
485        */
486       set->layout = vn_descriptor_set_layout_ref(dev, layout);
487       set->last_binding_descriptor_count = last_binding_descriptor_count;
488       list_addtail(&set->head, &pool->descriptor_sets);
489 
490       VkDescriptorSet set_handle = vn_descriptor_set_to_handle(set);
491       pDescriptorSets[i] = set_handle;
492    }
493 
494    if (pool->async_set_allocation) {
495       vn_async_vkAllocateDescriptorSets(dev->instance, device, pAllocateInfo,
496                                         pDescriptorSets);
497    } else {
498       result = vn_call_vkAllocateDescriptorSets(
499          dev->instance, device, pAllocateInfo, pDescriptorSets);
500       if (result != VK_SUCCESS)
501          goto fail;
502    }
503 
504    return VK_SUCCESS;
505 
506 fail:
507    for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
508       struct vn_descriptor_set *set =
509          vn_descriptor_set_from_handle(pDescriptorSets[i]);
510       if (!set)
511          break;
512 
513       vn_descriptor_pool_free_descriptors(pool, set->layout,
514                                           set->last_binding_descriptor_count);
515 
516       vn_descriptor_set_destroy(dev, set, alloc);
517    }
518 
519    memset(pDescriptorSets, 0,
520           sizeof(*pDescriptorSets) * pAllocateInfo->descriptorSetCount);
521 
522    return vn_error(dev->instance, result);
523 }
524 
525 VkResult
vn_FreeDescriptorSets(VkDevice device,VkDescriptorPool descriptorPool,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets)526 vn_FreeDescriptorSets(VkDevice device,
527                       VkDescriptorPool descriptorPool,
528                       uint32_t descriptorSetCount,
529                       const VkDescriptorSet *pDescriptorSets)
530 {
531    struct vn_device *dev = vn_device_from_handle(device);
532    struct vn_descriptor_pool *pool =
533       vn_descriptor_pool_from_handle(descriptorPool);
534    const VkAllocationCallbacks *alloc = &pool->allocator;
535 
536    vn_async_vkFreeDescriptorSets(dev->instance, device, descriptorPool,
537                                  descriptorSetCount, pDescriptorSets);
538 
539    for (uint32_t i = 0; i < descriptorSetCount; i++) {
540       struct vn_descriptor_set *set =
541          vn_descriptor_set_from_handle(pDescriptorSets[i]);
542 
543       if (!set)
544          continue;
545 
546       vn_descriptor_set_destroy(dev, set, alloc);
547    }
548 
549    return VK_SUCCESS;
550 }
551 
552 static struct vn_update_descriptor_sets *
vn_update_descriptor_sets_alloc(uint32_t write_count,uint32_t image_count,uint32_t buffer_count,uint32_t view_count,const VkAllocationCallbacks * alloc,VkSystemAllocationScope scope)553 vn_update_descriptor_sets_alloc(uint32_t write_count,
554                                 uint32_t image_count,
555                                 uint32_t buffer_count,
556                                 uint32_t view_count,
557                                 const VkAllocationCallbacks *alloc,
558                                 VkSystemAllocationScope scope)
559 {
560    const size_t writes_offset = sizeof(struct vn_update_descriptor_sets);
561    const size_t images_offset =
562       writes_offset + sizeof(VkWriteDescriptorSet) * write_count;
563    const size_t buffers_offset =
564       images_offset + sizeof(VkDescriptorImageInfo) * image_count;
565    const size_t views_offset =
566       buffers_offset + sizeof(VkDescriptorBufferInfo) * buffer_count;
567    const size_t alloc_size = views_offset + sizeof(VkBufferView) * view_count;
568 
569    void *storage = vk_alloc(alloc, alloc_size, VN_DEFAULT_ALIGN, scope);
570    if (!storage)
571       return NULL;
572 
573    struct vn_update_descriptor_sets *update = storage;
574    update->write_count = write_count;
575    update->writes = storage + writes_offset;
576    update->images = storage + images_offset;
577    update->buffers = storage + buffers_offset;
578    update->views = storage + views_offset;
579 
580    return update;
581 }
582 
583 static struct vn_update_descriptor_sets *
vn_update_descriptor_sets_parse_writes(uint32_t write_count,const VkWriteDescriptorSet * writes,const VkAllocationCallbacks * alloc)584 vn_update_descriptor_sets_parse_writes(uint32_t write_count,
585                                        const VkWriteDescriptorSet *writes,
586                                        const VkAllocationCallbacks *alloc)
587 {
588    uint32_t img_count = 0;
589    for (uint32_t i = 0; i < write_count; i++) {
590       const VkWriteDescriptorSet *write = &writes[i];
591       switch (write->descriptorType) {
592       case VK_DESCRIPTOR_TYPE_SAMPLER:
593       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
594       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
595       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
596       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
597          img_count += write->descriptorCount;
598          break;
599       default:
600          break;
601       }
602    }
603 
604    struct vn_update_descriptor_sets *update =
605       vn_update_descriptor_sets_alloc(write_count, img_count, 0, 0, alloc,
606                                       VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
607    if (!update)
608       return NULL;
609 
610    /* the encoder does not ignore
611     * VkWriteDescriptorSet::{pImageInfo,pBufferInfo,pTexelBufferView} when it
612     * should
613     *
614     * TODO make the encoder smarter
615     */
616    memcpy(update->writes, writes, sizeof(*writes) * write_count);
617    img_count = 0;
618    for (uint32_t i = 0; i < write_count; i++) {
619       const struct vn_descriptor_set *set =
620          vn_descriptor_set_from_handle(writes[i].dstSet);
621       const struct vn_descriptor_set_layout_binding *binding =
622          &set->layout->bindings[writes[i].dstBinding];
623       VkWriteDescriptorSet *write = &update->writes[i];
624       VkDescriptorImageInfo *imgs = &update->images[img_count];
625 
626       switch (write->descriptorType) {
627       case VK_DESCRIPTOR_TYPE_SAMPLER:
628       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
629       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
630       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
631       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
632          memcpy(imgs, write->pImageInfo,
633                 sizeof(*imgs) * write->descriptorCount);
634          img_count += write->descriptorCount;
635 
636          for (uint32_t j = 0; j < write->descriptorCount; j++) {
637             switch (write->descriptorType) {
638             case VK_DESCRIPTOR_TYPE_SAMPLER:
639                imgs[j].imageView = VK_NULL_HANDLE;
640                break;
641             case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
642                if (binding->has_immutable_samplers)
643                   imgs[j].sampler = VK_NULL_HANDLE;
644                break;
645             case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
646             case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
647             case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
648                imgs[j].sampler = VK_NULL_HANDLE;
649                break;
650             default:
651                break;
652             }
653          }
654 
655          write->pImageInfo = imgs;
656          write->pBufferInfo = NULL;
657          write->pTexelBufferView = NULL;
658          break;
659       case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
660       case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
661          write->pImageInfo = NULL;
662          write->pBufferInfo = NULL;
663          break;
664       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
665       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
666       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
667       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
668          write->pImageInfo = NULL;
669          write->pTexelBufferView = NULL;
670          break;
671       default:
672          write->pImageInfo = NULL;
673          write->pBufferInfo = NULL;
674          write->pTexelBufferView = NULL;
675          break;
676       }
677    }
678 
679    return update;
680 }
681 
682 void
vn_UpdateDescriptorSets(VkDevice device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)683 vn_UpdateDescriptorSets(VkDevice device,
684                         uint32_t descriptorWriteCount,
685                         const VkWriteDescriptorSet *pDescriptorWrites,
686                         uint32_t descriptorCopyCount,
687                         const VkCopyDescriptorSet *pDescriptorCopies)
688 {
689    struct vn_device *dev = vn_device_from_handle(device);
690    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
691 
692    struct vn_update_descriptor_sets *update =
693       vn_update_descriptor_sets_parse_writes(descriptorWriteCount,
694                                              pDescriptorWrites, alloc);
695    if (!update) {
696       /* TODO update one-by-one? */
697       vn_log(dev->instance, "TODO descriptor set update ignored due to OOM");
698       return;
699    }
700 
701    vn_async_vkUpdateDescriptorSets(dev->instance, device, update->write_count,
702                                    update->writes, descriptorCopyCount,
703                                    pDescriptorCopies);
704 
705    vk_free(alloc, update);
706 }
707 
708 /* descriptor update template commands */
709 
710 static struct vn_update_descriptor_sets *
vn_update_descriptor_sets_parse_template(const VkDescriptorUpdateTemplateCreateInfo * create_info,const VkAllocationCallbacks * alloc,struct vn_descriptor_update_template_entry * entries)711 vn_update_descriptor_sets_parse_template(
712    const VkDescriptorUpdateTemplateCreateInfo *create_info,
713    const VkAllocationCallbacks *alloc,
714    struct vn_descriptor_update_template_entry *entries)
715 {
716    uint32_t img_count = 0;
717    uint32_t buf_count = 0;
718    uint32_t view_count = 0;
719    for (uint32_t i = 0; i < create_info->descriptorUpdateEntryCount; i++) {
720       const VkDescriptorUpdateTemplateEntry *entry =
721          &create_info->pDescriptorUpdateEntries[i];
722 
723       switch (entry->descriptorType) {
724       case VK_DESCRIPTOR_TYPE_SAMPLER:
725       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
726       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
727       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
728       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
729          img_count += entry->descriptorCount;
730          break;
731       case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
732       case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
733          view_count += entry->descriptorCount;
734          break;
735       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
736       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
737       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
738       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
739          buf_count += entry->descriptorCount;
740          break;
741       default:
742          unreachable("unhandled descriptor type");
743          break;
744       }
745    }
746 
747    struct vn_update_descriptor_sets *update = vn_update_descriptor_sets_alloc(
748       create_info->descriptorUpdateEntryCount, img_count, buf_count,
749       view_count, alloc, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
750    if (!update)
751       return NULL;
752 
753    img_count = 0;
754    buf_count = 0;
755    view_count = 0;
756    for (uint32_t i = 0; i < create_info->descriptorUpdateEntryCount; i++) {
757       const VkDescriptorUpdateTemplateEntry *entry =
758          &create_info->pDescriptorUpdateEntries[i];
759       VkWriteDescriptorSet *write = &update->writes[i];
760 
761       write->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
762       write->pNext = NULL;
763       write->dstBinding = entry->dstBinding;
764       write->dstArrayElement = entry->dstArrayElement;
765       write->descriptorCount = entry->descriptorCount;
766       write->descriptorType = entry->descriptorType;
767 
768       entries[i].offset = entry->offset;
769       entries[i].stride = entry->stride;
770 
771       switch (entry->descriptorType) {
772       case VK_DESCRIPTOR_TYPE_SAMPLER:
773       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
774       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
775       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
776       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
777          write->pImageInfo = &update->images[img_count];
778          write->pBufferInfo = NULL;
779          write->pTexelBufferView = NULL;
780          img_count += entry->descriptorCount;
781          break;
782       case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
783       case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
784          write->pImageInfo = NULL;
785          write->pBufferInfo = NULL;
786          write->pTexelBufferView = &update->views[view_count];
787          view_count += entry->descriptorCount;
788          break;
789       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
790       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
791       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
792       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
793          write->pImageInfo = NULL;
794          write->pBufferInfo = &update->buffers[buf_count];
795          write->pTexelBufferView = NULL;
796          buf_count += entry->descriptorCount;
797          break;
798       default:
799          break;
800       }
801    }
802 
803    return update;
804 }
805 
806 VkResult
vn_CreateDescriptorUpdateTemplate(VkDevice device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)807 vn_CreateDescriptorUpdateTemplate(
808    VkDevice device,
809    const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo,
810    const VkAllocationCallbacks *pAllocator,
811    VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate)
812 {
813    struct vn_device *dev = vn_device_from_handle(device);
814    const VkAllocationCallbacks *alloc =
815       pAllocator ? pAllocator : &dev->base.base.alloc;
816 
817    const size_t templ_size =
818       offsetof(struct vn_descriptor_update_template,
819                entries[pCreateInfo->descriptorUpdateEntryCount + 1]);
820    struct vn_descriptor_update_template *templ = vk_zalloc(
821       alloc, templ_size, VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
822    if (!templ)
823       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
824 
825    vn_object_base_init(&templ->base,
826                        VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE, &dev->base);
827 
828    templ->update = vn_update_descriptor_sets_parse_template(
829       pCreateInfo, alloc, templ->entries);
830    if (!templ->update) {
831       vk_free(alloc, templ);
832       return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
833    }
834 
835    mtx_init(&templ->mutex, mtx_plain);
836 
837    /* no host object */
838    VkDescriptorUpdateTemplate templ_handle =
839       vn_descriptor_update_template_to_handle(templ);
840    *pDescriptorUpdateTemplate = templ_handle;
841 
842    return VK_SUCCESS;
843 }
844 
845 void
vn_DestroyDescriptorUpdateTemplate(VkDevice device,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const VkAllocationCallbacks * pAllocator)846 vn_DestroyDescriptorUpdateTemplate(
847    VkDevice device,
848    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
849    const VkAllocationCallbacks *pAllocator)
850 {
851    struct vn_device *dev = vn_device_from_handle(device);
852    struct vn_descriptor_update_template *templ =
853       vn_descriptor_update_template_from_handle(descriptorUpdateTemplate);
854    const VkAllocationCallbacks *alloc =
855       pAllocator ? pAllocator : &dev->base.base.alloc;
856 
857    if (!templ)
858       return;
859 
860    /* no host object */
861    vk_free(alloc, templ->update);
862    mtx_destroy(&templ->mutex);
863 
864    vn_object_base_fini(&templ->base);
865    vk_free(alloc, templ);
866 }
867 
868 void
vn_UpdateDescriptorSetWithTemplate(VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)869 vn_UpdateDescriptorSetWithTemplate(
870    VkDevice device,
871    VkDescriptorSet descriptorSet,
872    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
873    const void *pData)
874 {
875    struct vn_device *dev = vn_device_from_handle(device);
876    struct vn_descriptor_set *set =
877       vn_descriptor_set_from_handle(descriptorSet);
878    struct vn_descriptor_update_template *templ =
879       vn_descriptor_update_template_from_handle(descriptorUpdateTemplate);
880    struct vn_update_descriptor_sets *update = templ->update;
881 
882    /* duplicate update instead to avoid locking? */
883    mtx_lock(&templ->mutex);
884 
885    for (uint32_t i = 0; i < update->write_count; i++) {
886       const struct vn_descriptor_update_template_entry *entry =
887          &templ->entries[i];
888       const struct vn_descriptor_set_layout_binding *binding =
889          &set->layout->bindings[update->writes[i].dstBinding];
890       VkWriteDescriptorSet *write = &update->writes[i];
891 
892       write->dstSet = vn_descriptor_set_to_handle(set);
893 
894       switch (write->descriptorType) {
895       case VK_DESCRIPTOR_TYPE_SAMPLER:
896       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
897       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
898       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
899       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
900          for (uint32_t j = 0; j < write->descriptorCount; j++) {
901             const bool need_sampler =
902                (write->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER ||
903                 write->descriptorType ==
904                    VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) &&
905                !binding->has_immutable_samplers;
906             const bool need_view =
907                write->descriptorType != VK_DESCRIPTOR_TYPE_SAMPLER;
908             const VkDescriptorImageInfo *src =
909                pData + entry->offset + entry->stride * j;
910             VkDescriptorImageInfo *dst =
911                (VkDescriptorImageInfo *)&write->pImageInfo[j];
912 
913             dst->sampler = need_sampler ? src->sampler : VK_NULL_HANDLE;
914             dst->imageView = need_view ? src->imageView : VK_NULL_HANDLE;
915             dst->imageLayout = src->imageLayout;
916          }
917          break;
918       case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
919       case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
920          for (uint32_t j = 0; j < write->descriptorCount; j++) {
921             const VkBufferView *src =
922                pData + entry->offset + entry->stride * j;
923             VkBufferView *dst = (VkBufferView *)&write->pTexelBufferView[j];
924             *dst = *src;
925          }
926          break;
927       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
928       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
929       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
930       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
931          for (uint32_t j = 0; j < write->descriptorCount; j++) {
932             const VkDescriptorBufferInfo *src =
933                pData + entry->offset + entry->stride * j;
934             VkDescriptorBufferInfo *dst =
935                (VkDescriptorBufferInfo *)&write->pBufferInfo[j];
936             *dst = *src;
937          }
938          break;
939       default:
940          unreachable("unhandled descriptor type");
941          break;
942       }
943    }
944 
945    vn_async_vkUpdateDescriptorSets(dev->instance, device, update->write_count,
946                                    update->writes, 0, NULL);
947 
948    mtx_unlock(&templ->mutex);
949 }
950