1 /*
2 * Copyright © 2019 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "lvp_private.h"
25 #include "pipe/p_context.h"
26 #include "vk_util.h"
27
lvp_create_cmd_buffer(struct lvp_device * device,struct lvp_cmd_pool * pool,VkCommandBufferLevel level,VkCommandBuffer * pCommandBuffer)28 static VkResult lvp_create_cmd_buffer(
29 struct lvp_device * device,
30 struct lvp_cmd_pool * pool,
31 VkCommandBufferLevel level,
32 VkCommandBuffer* pCommandBuffer)
33 {
34 struct lvp_cmd_buffer *cmd_buffer;
35
36 cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
37 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
38 if (cmd_buffer == NULL)
39 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
40
41 VkResult result = vk_command_buffer_init(&cmd_buffer->vk, &device->vk);
42 if (result != VK_SUCCESS) {
43 vk_free(&pool->alloc, cmd_buffer);
44 return result;
45 }
46
47 cmd_buffer->device = device;
48 cmd_buffer->pool = pool;
49
50 cmd_buffer->queue.alloc = &pool->alloc;
51 list_inithead(&cmd_buffer->queue.cmds);
52
53 cmd_buffer->status = LVP_CMD_BUFFER_STATUS_INITIAL;
54 if (pool) {
55 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
56 } else {
57 /* Init the pool_link so we can safefly call list_del when we destroy
58 * the command buffer
59 */
60 list_inithead(&cmd_buffer->pool_link);
61 }
62 *pCommandBuffer = lvp_cmd_buffer_to_handle(cmd_buffer);
63
64 return VK_SUCCESS;
65 }
66
lvp_reset_cmd_buffer(struct lvp_cmd_buffer * cmd_buffer)67 static VkResult lvp_reset_cmd_buffer(struct lvp_cmd_buffer *cmd_buffer)
68 {
69 vk_command_buffer_reset(&cmd_buffer->vk);
70
71 vk_free_queue(&cmd_buffer->queue);
72 list_inithead(&cmd_buffer->queue.cmds);
73 cmd_buffer->status = LVP_CMD_BUFFER_STATUS_INITIAL;
74 return VK_SUCCESS;
75 }
76
lvp_AllocateCommandBuffers(VkDevice _device,const VkCommandBufferAllocateInfo * pAllocateInfo,VkCommandBuffer * pCommandBuffers)77 VKAPI_ATTR VkResult VKAPI_CALL lvp_AllocateCommandBuffers(
78 VkDevice _device,
79 const VkCommandBufferAllocateInfo* pAllocateInfo,
80 VkCommandBuffer* pCommandBuffers)
81 {
82 LVP_FROM_HANDLE(lvp_device, device, _device);
83 LVP_FROM_HANDLE(lvp_cmd_pool, pool, pAllocateInfo->commandPool);
84
85 VkResult result = VK_SUCCESS;
86 uint32_t i;
87
88 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
89
90 if (!list_is_empty(&pool->free_cmd_buffers)) {
91 struct lvp_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct lvp_cmd_buffer, pool_link);
92
93 list_del(&cmd_buffer->pool_link);
94 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
95
96 result = lvp_reset_cmd_buffer(cmd_buffer);
97 cmd_buffer->level = pAllocateInfo->level;
98 vk_command_buffer_finish(&cmd_buffer->vk);
99 VkResult init_result =
100 vk_command_buffer_init(&cmd_buffer->vk, &device->vk);
101 if (init_result != VK_SUCCESS)
102 result = init_result;
103
104 pCommandBuffers[i] = lvp_cmd_buffer_to_handle(cmd_buffer);
105 } else {
106 result = lvp_create_cmd_buffer(device, pool, pAllocateInfo->level,
107 &pCommandBuffers[i]);
108 if (result != VK_SUCCESS)
109 break;
110 }
111 }
112
113 if (result != VK_SUCCESS) {
114 lvp_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
115 i, pCommandBuffers);
116 memset(pCommandBuffers, 0,
117 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
118 }
119
120 return result;
121 }
122
123 static void
lvp_cmd_buffer_destroy(struct lvp_cmd_buffer * cmd_buffer)124 lvp_cmd_buffer_destroy(struct lvp_cmd_buffer *cmd_buffer)
125 {
126 vk_free_queue(&cmd_buffer->queue);
127 list_del(&cmd_buffer->pool_link);
128 vk_command_buffer_finish(&cmd_buffer->vk);
129 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
130 }
131
lvp_FreeCommandBuffers(VkDevice device,VkCommandPool commandPool,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)132 VKAPI_ATTR void VKAPI_CALL lvp_FreeCommandBuffers(
133 VkDevice device,
134 VkCommandPool commandPool,
135 uint32_t commandBufferCount,
136 const VkCommandBuffer* pCommandBuffers)
137 {
138 for (uint32_t i = 0; i < commandBufferCount; i++) {
139 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
140
141 if (cmd_buffer) {
142 if (cmd_buffer->pool) {
143 list_del(&cmd_buffer->pool_link);
144 list_addtail(&cmd_buffer->pool_link, &cmd_buffer->pool->free_cmd_buffers);
145 } else
146 lvp_cmd_buffer_destroy(cmd_buffer);
147 }
148 }
149 }
150
lvp_ResetCommandBuffer(VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)151 VKAPI_ATTR VkResult VKAPI_CALL lvp_ResetCommandBuffer(
152 VkCommandBuffer commandBuffer,
153 VkCommandBufferResetFlags flags)
154 {
155 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
156
157 return lvp_reset_cmd_buffer(cmd_buffer);
158 }
159
lvp_BeginCommandBuffer(VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)160 VKAPI_ATTR VkResult VKAPI_CALL lvp_BeginCommandBuffer(
161 VkCommandBuffer commandBuffer,
162 const VkCommandBufferBeginInfo* pBeginInfo)
163 {
164 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
165 VkResult result;
166 if (cmd_buffer->status != LVP_CMD_BUFFER_STATUS_INITIAL) {
167 result = lvp_reset_cmd_buffer(cmd_buffer);
168 if (result != VK_SUCCESS)
169 return result;
170 }
171 cmd_buffer->status = LVP_CMD_BUFFER_STATUS_RECORDING;
172 return VK_SUCCESS;
173 }
174
lvp_EndCommandBuffer(VkCommandBuffer commandBuffer)175 VKAPI_ATTR VkResult VKAPI_CALL lvp_EndCommandBuffer(
176 VkCommandBuffer commandBuffer)
177 {
178 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
179 cmd_buffer->status = LVP_CMD_BUFFER_STATUS_EXECUTABLE;
180 return VK_SUCCESS;
181 }
182
lvp_CreateCommandPool(VkDevice _device,const VkCommandPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkCommandPool * pCmdPool)183 VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateCommandPool(
184 VkDevice _device,
185 const VkCommandPoolCreateInfo* pCreateInfo,
186 const VkAllocationCallbacks* pAllocator,
187 VkCommandPool* pCmdPool)
188 {
189 LVP_FROM_HANDLE(lvp_device, device, _device);
190 struct lvp_cmd_pool *pool;
191
192 pool = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*pool), 8,
193 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
194 if (pool == NULL)
195 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
196
197 vk_object_base_init(&device->vk, &pool->base,
198 VK_OBJECT_TYPE_COMMAND_POOL);
199 if (pAllocator)
200 pool->alloc = *pAllocator;
201 else
202 pool->alloc = device->vk.alloc;
203
204 list_inithead(&pool->cmd_buffers);
205 list_inithead(&pool->free_cmd_buffers);
206
207 *pCmdPool = lvp_cmd_pool_to_handle(pool);
208
209 return VK_SUCCESS;
210 }
211
lvp_DestroyCommandPool(VkDevice _device,VkCommandPool commandPool,const VkAllocationCallbacks * pAllocator)212 VKAPI_ATTR void VKAPI_CALL lvp_DestroyCommandPool(
213 VkDevice _device,
214 VkCommandPool commandPool,
215 const VkAllocationCallbacks* pAllocator)
216 {
217 LVP_FROM_HANDLE(lvp_device, device, _device);
218 LVP_FROM_HANDLE(lvp_cmd_pool, pool, commandPool);
219
220 if (!pool)
221 return;
222
223 list_for_each_entry_safe(struct lvp_cmd_buffer, cmd_buffer,
224 &pool->cmd_buffers, pool_link) {
225 lvp_cmd_buffer_destroy(cmd_buffer);
226 }
227
228 list_for_each_entry_safe(struct lvp_cmd_buffer, cmd_buffer,
229 &pool->free_cmd_buffers, pool_link) {
230 lvp_cmd_buffer_destroy(cmd_buffer);
231 }
232
233 vk_object_base_finish(&pool->base);
234 vk_free2(&device->vk.alloc, pAllocator, pool);
235 }
236
lvp_ResetCommandPool(VkDevice device,VkCommandPool commandPool,VkCommandPoolResetFlags flags)237 VKAPI_ATTR VkResult VKAPI_CALL lvp_ResetCommandPool(
238 VkDevice device,
239 VkCommandPool commandPool,
240 VkCommandPoolResetFlags flags)
241 {
242 LVP_FROM_HANDLE(lvp_cmd_pool, pool, commandPool);
243 VkResult result;
244
245 list_for_each_entry(struct lvp_cmd_buffer, cmd_buffer,
246 &pool->cmd_buffers, pool_link) {
247 result = lvp_reset_cmd_buffer(cmd_buffer);
248 if (result != VK_SUCCESS)
249 return result;
250 }
251 return VK_SUCCESS;
252 }
253
lvp_TrimCommandPool(VkDevice device,VkCommandPool commandPool,VkCommandPoolTrimFlags flags)254 VKAPI_ATTR void VKAPI_CALL lvp_TrimCommandPool(
255 VkDevice device,
256 VkCommandPool commandPool,
257 VkCommandPoolTrimFlags flags)
258 {
259 LVP_FROM_HANDLE(lvp_cmd_pool, pool, commandPool);
260
261 if (!pool)
262 return;
263
264 list_for_each_entry_safe(struct lvp_cmd_buffer, cmd_buffer,
265 &pool->free_cmd_buffers, pool_link) {
266 lvp_cmd_buffer_destroy(cmd_buffer);
267 }
268 }
269
lvp_CmdDrawMultiEXT(VkCommandBuffer commandBuffer,uint32_t drawCount,const VkMultiDrawInfoEXT * pVertexInfo,uint32_t instanceCount,uint32_t firstInstance,uint32_t stride)270 VKAPI_ATTR void VKAPI_CALL lvp_CmdDrawMultiEXT(
271 VkCommandBuffer commandBuffer,
272 uint32_t drawCount,
273 const VkMultiDrawInfoEXT *pVertexInfo,
274 uint32_t instanceCount,
275 uint32_t firstInstance,
276 uint32_t stride)
277 {
278 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
279
280 struct vk_cmd_queue_entry *cmd = vk_zalloc(cmd_buffer->queue.alloc,
281 sizeof(*cmd), 8,
282 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
283 if (!cmd)
284 return;
285
286 cmd->type = VK_CMD_DRAW_MULTI_EXT;
287 list_addtail(&cmd->cmd_link, &cmd_buffer->queue.cmds);
288
289 cmd->u.draw_multi_ext.draw_count = drawCount;
290 if (pVertexInfo) {
291 unsigned i = 0;
292 cmd->u.draw_multi_ext.vertex_info = vk_zalloc(cmd_buffer->queue.alloc,
293 sizeof(*cmd->u.draw_multi_ext.vertex_info) * drawCount,
294 8,
295 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
296 vk_foreach_multi_draw(draw, i, pVertexInfo, drawCount, stride)
297 memcpy(&cmd->u.draw_multi_ext.vertex_info[i], draw, sizeof(*cmd->u.draw_multi_ext.vertex_info));
298 }
299 cmd->u.draw_multi_ext.instance_count = instanceCount;
300 cmd->u.draw_multi_ext.first_instance = firstInstance;
301 cmd->u.draw_multi_ext.stride = stride;
302 }
303
lvp_CmdDrawMultiIndexedEXT(VkCommandBuffer commandBuffer,uint32_t drawCount,const VkMultiDrawIndexedInfoEXT * pIndexInfo,uint32_t instanceCount,uint32_t firstInstance,uint32_t stride,const int32_t * pVertexOffset)304 VKAPI_ATTR void VKAPI_CALL lvp_CmdDrawMultiIndexedEXT(
305 VkCommandBuffer commandBuffer,
306 uint32_t drawCount,
307 const VkMultiDrawIndexedInfoEXT *pIndexInfo,
308 uint32_t instanceCount,
309 uint32_t firstInstance,
310 uint32_t stride,
311 const int32_t *pVertexOffset)
312 {
313 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
314
315 struct vk_cmd_queue_entry *cmd = vk_zalloc(cmd_buffer->queue.alloc,
316 sizeof(*cmd), 8,
317 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
318 if (!cmd)
319 return;
320
321 cmd->type = VK_CMD_DRAW_MULTI_INDEXED_EXT;
322 list_addtail(&cmd->cmd_link, &cmd_buffer->queue.cmds);
323
324 cmd->u.draw_multi_indexed_ext.draw_count = drawCount;
325
326 if (pIndexInfo) {
327 unsigned i = 0;
328 cmd->u.draw_multi_indexed_ext.index_info = vk_zalloc(cmd_buffer->queue.alloc,
329 sizeof(*cmd->u.draw_multi_indexed_ext.index_info) * drawCount,
330 8,
331 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
332 vk_foreach_multi_draw_indexed(draw, i, pIndexInfo, drawCount, stride) {
333 cmd->u.draw_multi_indexed_ext.index_info[i].firstIndex = draw->firstIndex;
334 cmd->u.draw_multi_indexed_ext.index_info[i].indexCount = draw->indexCount;
335 if (pVertexOffset == NULL)
336 cmd->u.draw_multi_indexed_ext.index_info[i].vertexOffset = draw->vertexOffset;
337 }
338 }
339
340 cmd->u.draw_multi_indexed_ext.instance_count = instanceCount;
341 cmd->u.draw_multi_indexed_ext.first_instance = firstInstance;
342 cmd->u.draw_multi_indexed_ext.stride = stride;
343
344 if (pVertexOffset) {
345 cmd->u.draw_multi_indexed_ext.vertex_offset = vk_zalloc(cmd_buffer->queue.alloc, sizeof(*cmd->u.draw_multi_indexed_ext.vertex_offset), 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
346 memcpy(cmd->u.draw_multi_indexed_ext.vertex_offset, pVertexOffset, sizeof(*cmd->u.draw_multi_indexed_ext.vertex_offset));
347 }
348 }
349
lvp_CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t set,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites)350 VKAPI_ATTR void VKAPI_CALL lvp_CmdPushDescriptorSetKHR(
351 VkCommandBuffer commandBuffer,
352 VkPipelineBindPoint pipelineBindPoint,
353 VkPipelineLayout layout,
354 uint32_t set,
355 uint32_t descriptorWriteCount,
356 const VkWriteDescriptorSet* pDescriptorWrites)
357 {
358 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
359 struct vk_cmd_push_descriptor_set_khr *pds;
360
361 struct vk_cmd_queue_entry *cmd = vk_zalloc(cmd_buffer->queue.alloc,
362 sizeof(*cmd), 8,
363 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
364 if (!cmd)
365 return;
366
367 pds = &cmd->u.push_descriptor_set_khr;
368
369 cmd->type = VK_CMD_PUSH_DESCRIPTOR_SET_KHR;
370 list_addtail(&cmd->cmd_link, &cmd_buffer->queue.cmds);
371
372 pds->pipeline_bind_point = pipelineBindPoint;
373 pds->layout = layout;
374 pds->set = set;
375 pds->descriptor_write_count = descriptorWriteCount;
376
377 if (pDescriptorWrites) {
378 pds->descriptor_writes = vk_zalloc(cmd_buffer->queue.alloc,
379 sizeof(*pds->descriptor_writes) * descriptorWriteCount,
380 8,
381 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
382 memcpy(pds->descriptor_writes,
383 pDescriptorWrites,
384 sizeof(*pds->descriptor_writes) * descriptorWriteCount);
385
386 for (unsigned i = 0; i < descriptorWriteCount; i++) {
387 switch (pds->descriptor_writes[i].descriptorType) {
388 case VK_DESCRIPTOR_TYPE_SAMPLER:
389 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
390 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
391 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
392 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
393 pds->descriptor_writes[i].pImageInfo = vk_zalloc(cmd_buffer->queue.alloc,
394 sizeof(VkDescriptorImageInfo) * pds->descriptor_writes[i].descriptorCount,
395 8,
396 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
397 memcpy((VkDescriptorImageInfo *)pds->descriptor_writes[i].pImageInfo,
398 pDescriptorWrites[i].pImageInfo,
399 sizeof(VkDescriptorImageInfo) * pds->descriptor_writes[i].descriptorCount);
400 break;
401 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
402 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
403 pds->descriptor_writes[i].pTexelBufferView = vk_zalloc(cmd_buffer->queue.alloc,
404 sizeof(VkBufferView) * pds->descriptor_writes[i].descriptorCount,
405 8,
406 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
407 memcpy((VkBufferView *)pds->descriptor_writes[i].pTexelBufferView,
408 pDescriptorWrites[i].pTexelBufferView,
409 sizeof(VkBufferView) * pds->descriptor_writes[i].descriptorCount);
410 break;
411 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
412 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
413 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
414 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
415 default:
416 pds->descriptor_writes[i].pBufferInfo = vk_zalloc(cmd_buffer->queue.alloc,
417 sizeof(VkDescriptorBufferInfo) * pds->descriptor_writes[i].descriptorCount,
418 8,
419 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
420 memcpy((VkDescriptorBufferInfo *)pds->descriptor_writes[i].pBufferInfo,
421 pDescriptorWrites[i].pBufferInfo,
422 sizeof(VkDescriptorBufferInfo) * pds->descriptor_writes[i].descriptorCount);
423 break;
424 }
425 }
426 }
427 }
428
lvp_CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,VkDescriptorUpdateTemplate descriptorUpdateTemplate,VkPipelineLayout layout,uint32_t set,const void * pData)429 VKAPI_ATTR void VKAPI_CALL lvp_CmdPushDescriptorSetWithTemplateKHR(
430 VkCommandBuffer commandBuffer,
431 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
432 VkPipelineLayout layout,
433 uint32_t set,
434 const void* pData)
435 {
436 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
437 LVP_FROM_HANDLE(lvp_descriptor_update_template, templ, descriptorUpdateTemplate);
438 size_t info_size = 0;
439 struct vk_cmd_queue_entry *cmd = vk_zalloc(cmd_buffer->queue.alloc,
440 sizeof(*cmd), 8,
441 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
442 if (!cmd)
443 return;
444
445 cmd->type = VK_CMD_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE_KHR;
446
447 list_addtail(&cmd->cmd_link, &cmd_buffer->queue.cmds);
448
449 cmd->u.push_descriptor_set_with_template_khr.descriptor_update_template = descriptorUpdateTemplate;
450 cmd->u.push_descriptor_set_with_template_khr.layout = layout;
451 cmd->u.push_descriptor_set_with_template_khr.set = set;
452
453 for (unsigned i = 0; i < templ->entry_count; i++) {
454 VkDescriptorUpdateTemplateEntry *entry = &templ->entry[i];
455
456 switch (entry->descriptorType) {
457 case VK_DESCRIPTOR_TYPE_SAMPLER:
458 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
459 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
460 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
461 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
462 info_size += sizeof(VkDescriptorImageInfo) * entry->descriptorCount;
463 break;
464 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
465 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
466 info_size += sizeof(VkBufferView) * entry->descriptorCount;
467 break;
468 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
469 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
470 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
471 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
472 default:
473 info_size += sizeof(VkDescriptorBufferInfo) * entry->descriptorCount;
474 break;
475 }
476 }
477
478 cmd->u.push_descriptor_set_with_template_khr.data = vk_zalloc(cmd_buffer->queue.alloc, info_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
479
480 uint64_t offset = 0;
481 for (unsigned i = 0; i < templ->entry_count; i++) {
482 VkDescriptorUpdateTemplateEntry *entry = &templ->entry[i];
483
484 unsigned size = 0;
485 switch (entry->descriptorType) {
486 case VK_DESCRIPTOR_TYPE_SAMPLER:
487 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
488 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
489 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
490 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
491 size = sizeof(VkDescriptorImageInfo);
492 break;
493 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
494 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
495 size = sizeof(VkBufferView);
496 break;
497 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
498 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
499 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
500 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
501 default:
502 size = sizeof(VkDescriptorBufferInfo);
503 break;
504 }
505 for (unsigned i = 0; i < entry->descriptorCount; i++) {
506 memcpy((uint8_t*)cmd->u.push_descriptor_set_with_template_khr.data + offset, (const uint8_t*)pData + entry->offset + i * entry->stride, size);
507 offset += size;
508 }
509 }
510 }
511
lvp_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout _layout,uint32_t firstSet,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)512 VKAPI_ATTR void VKAPI_CALL lvp_CmdBindDescriptorSets(
513 VkCommandBuffer commandBuffer,
514 VkPipelineBindPoint pipelineBindPoint,
515 VkPipelineLayout _layout,
516 uint32_t firstSet,
517 uint32_t descriptorSetCount,
518 const VkDescriptorSet* pDescriptorSets,
519 uint32_t dynamicOffsetCount,
520 const uint32_t* pDynamicOffsets)
521 {
522 LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
523 LVP_FROM_HANDLE(lvp_pipeline_layout, layout, _layout);
524 struct vk_cmd_queue_entry *cmd = vk_zalloc(cmd_buffer->queue.alloc,
525 sizeof(*cmd), 8,
526 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
527 if (!cmd)
528 return;
529
530 cmd->type = VK_CMD_BIND_DESCRIPTOR_SETS;
531 list_addtail(&cmd->cmd_link, &cmd_buffer->queue.cmds);
532
533 /* _layout could have been destroyed by when this command executes */
534 struct lvp_descriptor_set_layout **set_layout = vk_zalloc(cmd_buffer->queue.alloc, sizeof(*set_layout) * layout->num_sets, 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
535 cmd->driver_data = set_layout;
536 for (unsigned i = 0; i < layout->num_sets; i++)
537 set_layout[i] = layout->set[i].layout;
538
539 cmd->u.bind_descriptor_sets.pipeline_bind_point = pipelineBindPoint;
540 cmd->u.bind_descriptor_sets.first_set = firstSet;
541 cmd->u.bind_descriptor_sets.descriptor_set_count = descriptorSetCount;
542 if (pDescriptorSets) {
543 cmd->u.bind_descriptor_sets.descriptor_sets = vk_zalloc(cmd_buffer->queue.alloc, sizeof(*cmd->u.bind_descriptor_sets.descriptor_sets) * descriptorSetCount, 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
544 memcpy(( VkDescriptorSet* )cmd->u.bind_descriptor_sets.descriptor_sets, pDescriptorSets, sizeof(*cmd->u.bind_descriptor_sets.descriptor_sets) * descriptorSetCount);
545 }
546 cmd->u.bind_descriptor_sets.dynamic_offset_count = dynamicOffsetCount;
547 if (pDynamicOffsets) {
548 cmd->u.bind_descriptor_sets.dynamic_offsets = vk_zalloc(cmd_buffer->queue.alloc, sizeof(*cmd->u.bind_descriptor_sets.dynamic_offsets) * dynamicOffsetCount, 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
549 memcpy(( uint32_t* )cmd->u.bind_descriptor_sets.dynamic_offsets, pDynamicOffsets, sizeof(*cmd->u.bind_descriptor_sets.dynamic_offsets) * dynamicOffsetCount);
550 }
551 }
552