1 /*
2  * Copyright © 2020 Google, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "tu_private.h"
25 
26 #include <errno.h>
27 #include <fcntl.h>
28 #include <stdint.h>
29 #include <sys/ioctl.h>
30 #include <sys/mman.h>
31 
32 #include "msm_kgsl.h"
33 #include "vk_util.h"
34 
35 struct tu_syncobj {
36    struct vk_object_base base;
37    uint32_t timestamp;
38    bool timestamp_valid;
39 };
40 
41 static int
safe_ioctl(int fd,unsigned long request,void * arg)42 safe_ioctl(int fd, unsigned long request, void *arg)
43 {
44    int ret;
45 
46    do {
47       ret = ioctl(fd, request, arg);
48    } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
49 
50    return ret;
51 }
52 
53 int
tu_drm_submitqueue_new(const struct tu_device * dev,int priority,uint32_t * queue_id)54 tu_drm_submitqueue_new(const struct tu_device *dev,
55                        int priority,
56                        uint32_t *queue_id)
57 {
58    struct kgsl_drawctxt_create req = {
59       .flags = KGSL_CONTEXT_SAVE_GMEM |
60               KGSL_CONTEXT_NO_GMEM_ALLOC |
61               KGSL_CONTEXT_PREAMBLE,
62    };
63 
64    int ret = safe_ioctl(dev->physical_device->local_fd, IOCTL_KGSL_DRAWCTXT_CREATE, &req);
65    if (ret)
66       return ret;
67 
68    *queue_id = req.drawctxt_id;
69 
70    return 0;
71 }
72 
73 void
tu_drm_submitqueue_close(const struct tu_device * dev,uint32_t queue_id)74 tu_drm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id)
75 {
76    struct kgsl_drawctxt_destroy req = {
77       .drawctxt_id = queue_id,
78    };
79 
80    safe_ioctl(dev->physical_device->local_fd, IOCTL_KGSL_DRAWCTXT_DESTROY, &req);
81 }
82 
83 VkResult
tu_bo_init_new(struct tu_device * dev,struct tu_bo * bo,uint64_t size,enum tu_bo_alloc_flags flags)84 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size,
85                enum tu_bo_alloc_flags flags)
86 {
87    struct kgsl_gpumem_alloc_id req = {
88       .size = size,
89    };
90 
91    if (flags & TU_BO_ALLOC_GPU_READ_ONLY)
92       req.flags |= KGSL_MEMFLAGS_GPUREADONLY;
93 
94    int ret;
95 
96    ret = safe_ioctl(dev->physical_device->local_fd,
97                     IOCTL_KGSL_GPUMEM_ALLOC_ID, &req);
98    if (ret) {
99       return vk_errorf(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY,
100                        "GPUMEM_ALLOC_ID failed (%s)", strerror(errno));
101    }
102 
103    *bo = (struct tu_bo) {
104       .gem_handle = req.id,
105       .size = req.mmapsize,
106       .iova = req.gpuaddr,
107    };
108 
109    return VK_SUCCESS;
110 }
111 
112 VkResult
tu_bo_init_dmabuf(struct tu_device * dev,struct tu_bo * bo,uint64_t size,int fd)113 tu_bo_init_dmabuf(struct tu_device *dev,
114                   struct tu_bo *bo,
115                   uint64_t size,
116                   int fd)
117 {
118    struct kgsl_gpuobj_import_dma_buf import_dmabuf = {
119       .fd = fd,
120    };
121    struct kgsl_gpuobj_import req = {
122       .priv = (uintptr_t)&import_dmabuf,
123       .priv_len = sizeof(import_dmabuf),
124       .flags = 0,
125       .type = KGSL_USER_MEM_TYPE_DMABUF,
126    };
127    int ret;
128 
129    ret = safe_ioctl(dev->physical_device->local_fd,
130                     IOCTL_KGSL_GPUOBJ_IMPORT, &req);
131    if (ret)
132       return vk_errorf(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY,
133                        "Failed to import dma-buf (%s)\n", strerror(errno));
134 
135    struct kgsl_gpuobj_info info_req = {
136       .id = req.id,
137    };
138 
139    ret = safe_ioctl(dev->physical_device->local_fd,
140                     IOCTL_KGSL_GPUOBJ_INFO, &info_req);
141    if (ret)
142       return vk_errorf(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY,
143                        "Failed to get dma-buf info (%s)\n", strerror(errno));
144 
145    *bo = (struct tu_bo) {
146       .gem_handle = req.id,
147       .size = info_req.size,
148       .iova = info_req.gpuaddr,
149    };
150 
151    return VK_SUCCESS;
152 }
153 
154 int
tu_bo_export_dmabuf(struct tu_device * dev,struct tu_bo * bo)155 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
156 {
157    tu_stub();
158 
159    return -1;
160 }
161 
162 VkResult
tu_bo_map(struct tu_device * dev,struct tu_bo * bo)163 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
164 {
165    if (bo->map)
166       return VK_SUCCESS;
167 
168    uint64_t offset = bo->gem_handle << 12;
169    void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
170                     dev->physical_device->local_fd, offset);
171    if (map == MAP_FAILED)
172       return vk_error(dev, VK_ERROR_MEMORY_MAP_FAILED);
173 
174    bo->map = map;
175 
176    return VK_SUCCESS;
177 }
178 
179 void
tu_bo_finish(struct tu_device * dev,struct tu_bo * bo)180 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
181 {
182    assert(bo->gem_handle);
183 
184    if (bo->map)
185       munmap(bo->map, bo->size);
186 
187    struct kgsl_gpumem_free_id req = {
188       .id = bo->gem_handle
189    };
190 
191    safe_ioctl(dev->physical_device->local_fd, IOCTL_KGSL_GPUMEM_FREE_ID, &req);
192 }
193 
194 static VkResult
get_kgsl_prop(int fd,unsigned int type,void * value,size_t size)195 get_kgsl_prop(int fd, unsigned int type, void *value, size_t size)
196 {
197    struct kgsl_device_getproperty getprop = {
198       .type = type,
199       .value = value,
200       .sizebytes = size,
201    };
202 
203    return safe_ioctl(fd, IOCTL_KGSL_DEVICE_GETPROPERTY, &getprop);
204 }
205 
206 VkResult
tu_enumerate_devices(struct tu_instance * instance)207 tu_enumerate_devices(struct tu_instance *instance)
208 {
209    static const char path[] = "/dev/kgsl-3d0";
210    int fd;
211 
212    struct tu_physical_device *device = &instance->physical_devices[0];
213 
214    if (instance->vk.enabled_extensions.KHR_display)
215       return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
216                        "I can't KHR_display");
217 
218    fd = open(path, O_RDWR | O_CLOEXEC);
219    if (fd < 0) {
220       instance->physical_device_count = 0;
221       return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
222                        "failed to open device %s", path);
223    }
224 
225    struct kgsl_devinfo info;
226    if (get_kgsl_prop(fd, KGSL_PROP_DEVICE_INFO, &info, sizeof(info)))
227       goto fail;
228 
229    uint64_t gmem_iova;
230    if (get_kgsl_prop(fd, KGSL_PROP_UCHE_GMEM_VADDR, &gmem_iova, sizeof(gmem_iova)))
231       goto fail;
232 
233    /* kgsl version check? */
234 
235    if (instance->debug_flags & TU_DEBUG_STARTUP)
236       mesa_logi("Found compatible device '%s'.", path);
237 
238    device->instance = instance;
239    device->master_fd = -1;
240    device->local_fd = fd;
241 
242    device->dev_id.gpu_id =
243       ((info.chip_id >> 24) & 0xff) * 100 +
244       ((info.chip_id >> 16) & 0xff) * 10 +
245       ((info.chip_id >>  8) & 0xff);
246    device->dev_id.chip_id = info.chip_id;
247    device->gmem_size = info.gmem_sizebytes;
248    device->gmem_base = gmem_iova;
249 
250    device->heap.size = tu_get_system_heap_size();
251    device->heap.used = 0u;
252    device->heap.flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
253 
254    if (tu_physical_device_init(device, instance) != VK_SUCCESS)
255       goto fail;
256 
257    instance->physical_device_count = 1;
258 
259    return VK_SUCCESS;
260 
261 fail:
262    close(fd);
263    return VK_ERROR_INITIALIZATION_FAILED;
264 }
265 
266 static int
timestamp_to_fd(struct tu_queue * queue,uint32_t timestamp)267 timestamp_to_fd(struct tu_queue *queue, uint32_t timestamp)
268 {
269    int fd;
270    struct kgsl_timestamp_event event = {
271       .type = KGSL_TIMESTAMP_EVENT_FENCE,
272       .context_id = queue->msm_queue_id,
273       .timestamp = timestamp,
274       .priv = &fd,
275       .len = sizeof(fd),
276    };
277 
278    int ret = safe_ioctl(queue->device->fd, IOCTL_KGSL_TIMESTAMP_EVENT, &event);
279    if (ret)
280       return -1;
281 
282    return fd;
283 }
284 
285 /* return true if timestamp a is greater (more recent) then b
286  * this relies on timestamps never having a difference > (1<<31)
287  */
288 static inline bool
timestamp_cmp(uint32_t a,uint32_t b)289 timestamp_cmp(uint32_t a, uint32_t b)
290 {
291    return (int32_t) (a - b) >= 0;
292 }
293 
294 static uint32_t
max_ts(uint32_t a,uint32_t b)295 max_ts(uint32_t a, uint32_t b)
296 {
297    return timestamp_cmp(a, b) ? a : b;
298 }
299 
300 static uint32_t
min_ts(uint32_t a,uint32_t b)301 min_ts(uint32_t a, uint32_t b)
302 {
303    return timestamp_cmp(a, b) ? b : a;
304 }
305 
306 static struct tu_syncobj
sync_merge(const VkSemaphore * syncobjs,uint32_t count,bool wait_all,bool reset)307 sync_merge(const VkSemaphore *syncobjs, uint32_t count, bool wait_all, bool reset)
308 {
309    struct tu_syncobj ret;
310 
311    ret.timestamp_valid = false;
312 
313    for (uint32_t i = 0; i < count; ++i) {
314       TU_FROM_HANDLE(tu_syncobj, sync, syncobjs[i]);
315 
316       /* TODO: this means the fence is unsignaled and will never become signaled */
317       if (!sync->timestamp_valid)
318          continue;
319 
320       if (!ret.timestamp_valid)
321          ret.timestamp = sync->timestamp;
322       else if (wait_all)
323          ret.timestamp = max_ts(ret.timestamp, sync->timestamp);
324       else
325          ret.timestamp = min_ts(ret.timestamp, sync->timestamp);
326 
327       ret.timestamp_valid = true;
328       if (reset)
329          sync->timestamp_valid = false;
330 
331    }
332    return ret;
333 }
334 
335 VKAPI_ATTR VkResult VKAPI_CALL
tu_QueueSubmit(VkQueue _queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence _fence)336 tu_QueueSubmit(VkQueue _queue,
337                uint32_t submitCount,
338                const VkSubmitInfo *pSubmits,
339                VkFence _fence)
340 {
341    TU_FROM_HANDLE(tu_queue, queue, _queue);
342    TU_FROM_HANDLE(tu_syncobj, fence, _fence);
343    VkResult result = VK_SUCCESS;
344 
345    uint32_t max_entry_count = 0;
346    for (uint32_t i = 0; i < submitCount; ++i) {
347       const VkSubmitInfo *submit = pSubmits + i;
348 
349       const VkPerformanceQuerySubmitInfoKHR *perf_info =
350          vk_find_struct_const(pSubmits[i].pNext,
351                               PERFORMANCE_QUERY_SUBMIT_INFO_KHR);
352 
353       uint32_t entry_count = 0;
354       for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
355          TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
356          entry_count += cmdbuf->cs.entry_count;
357          if (perf_info)
358             entry_count++;
359       }
360 
361       max_entry_count = MAX2(max_entry_count, entry_count);
362    }
363 
364    struct kgsl_command_object *cmds =
365       vk_alloc(&queue->device->vk.alloc,
366                sizeof(cmds[0]) * max_entry_count, 8,
367                VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
368    if (cmds == NULL)
369       return vk_error(queue, VK_ERROR_OUT_OF_HOST_MEMORY);
370 
371    for (uint32_t i = 0; i < submitCount; ++i) {
372       const VkSubmitInfo *submit = pSubmits + i;
373       uint32_t entry_idx = 0;
374       const VkPerformanceQuerySubmitInfoKHR *perf_info =
375          vk_find_struct_const(pSubmits[i].pNext,
376                               PERFORMANCE_QUERY_SUBMIT_INFO_KHR);
377 
378 
379       for (uint32_t j = 0; j < submit->commandBufferCount; j++) {
380          TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
381          struct tu_cs *cs = &cmdbuf->cs;
382 
383          if (perf_info) {
384             struct tu_cs_entry *perf_cs_entry =
385                &cmdbuf->device->perfcntrs_pass_cs_entries[perf_info->counterPassIndex];
386 
387             cmds[entry_idx++] = (struct kgsl_command_object) {
388                .offset = perf_cs_entry->offset,
389                .gpuaddr = perf_cs_entry->bo->iova,
390                .size = perf_cs_entry->size,
391                .flags = KGSL_CMDLIST_IB,
392                .id = perf_cs_entry->bo->gem_handle,
393             };
394          }
395 
396          for (unsigned k = 0; k < cs->entry_count; k++) {
397             cmds[entry_idx++] = (struct kgsl_command_object) {
398                .offset = cs->entries[k].offset,
399                .gpuaddr = cs->entries[k].bo->iova,
400                .size = cs->entries[k].size,
401                .flags = KGSL_CMDLIST_IB,
402                .id = cs->entries[k].bo->gem_handle,
403             };
404          }
405       }
406 
407       struct tu_syncobj s = sync_merge(submit->pWaitSemaphores,
408                                        submit->waitSemaphoreCount,
409                                        true, true);
410 
411       struct kgsl_cmd_syncpoint_timestamp ts = {
412          .context_id = queue->msm_queue_id,
413          .timestamp = s.timestamp,
414       };
415       struct kgsl_command_syncpoint sync = {
416          .type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP,
417          .size = sizeof(ts),
418          .priv = (uintptr_t) &ts,
419       };
420 
421       struct kgsl_gpu_command req = {
422          .flags = KGSL_CMDBATCH_SUBMIT_IB_LIST,
423          .context_id = queue->msm_queue_id,
424          .cmdlist = (uint64_t) (uintptr_t) cmds,
425          .numcmds = entry_idx,
426          .cmdsize = sizeof(struct kgsl_command_object),
427          .synclist = (uintptr_t) &sync,
428          .syncsize = sizeof(struct kgsl_command_syncpoint),
429          .numsyncs = s.timestamp_valid ? 1 : 0,
430       };
431 
432       int ret = safe_ioctl(queue->device->physical_device->local_fd,
433                            IOCTL_KGSL_GPU_COMMAND, &req);
434       if (ret) {
435          result = tu_device_set_lost(queue->device,
436                                      "submit failed: %s\n", strerror(errno));
437          goto fail;
438       }
439 
440       for (uint32_t i = 0; i < submit->signalSemaphoreCount; i++) {
441          TU_FROM_HANDLE(tu_syncobj, sem, submit->pSignalSemaphores[i]);
442          sem->timestamp = req.timestamp;
443          sem->timestamp_valid = true;
444       }
445 
446       /* no need to merge fences as queue execution is serialized */
447       if (i == submitCount - 1) {
448          int fd = timestamp_to_fd(queue, req.timestamp);
449          if (fd < 0) {
450             result = tu_device_set_lost(queue->device,
451                                         "Failed to create sync file for timestamp: %s\n",
452                                         strerror(errno));
453             goto fail;
454          }
455 
456          if (queue->fence >= 0)
457             close(queue->fence);
458          queue->fence = fd;
459 
460          if (fence) {
461             fence->timestamp = req.timestamp;
462             fence->timestamp_valid = true;
463          }
464       }
465    }
466 fail:
467    vk_free(&queue->device->vk.alloc, cmds);
468 
469    return result;
470 }
471 
472 static VkResult
sync_create(VkDevice _device,bool signaled,bool fence,const VkAllocationCallbacks * pAllocator,void ** p_sync)473 sync_create(VkDevice _device,
474             bool signaled,
475             bool fence,
476             const VkAllocationCallbacks *pAllocator,
477             void **p_sync)
478 {
479    TU_FROM_HANDLE(tu_device, device, _device);
480 
481    struct tu_syncobj *sync =
482          vk_object_alloc(&device->vk, pAllocator, sizeof(*sync),
483                          fence ? VK_OBJECT_TYPE_FENCE : VK_OBJECT_TYPE_SEMAPHORE);
484    if (!sync)
485       return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
486 
487    if (signaled)
488       tu_finishme("CREATE FENCE SIGNALED");
489 
490    sync->timestamp_valid = false;
491    *p_sync = sync;
492 
493    return VK_SUCCESS;
494 }
495 
496 VKAPI_ATTR VkResult VKAPI_CALL
tu_ImportSemaphoreFdKHR(VkDevice _device,const VkImportSemaphoreFdInfoKHR * pImportSemaphoreFdInfo)497 tu_ImportSemaphoreFdKHR(VkDevice _device,
498                         const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
499 {
500    tu_finishme("ImportSemaphoreFdKHR");
501    return VK_SUCCESS;
502 }
503 
504 VKAPI_ATTR VkResult VKAPI_CALL
tu_GetSemaphoreFdKHR(VkDevice _device,const VkSemaphoreGetFdInfoKHR * pGetFdInfo,int * pFd)505 tu_GetSemaphoreFdKHR(VkDevice _device,
506                      const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
507                      int *pFd)
508 {
509    tu_finishme("GetSemaphoreFdKHR");
510    return VK_SUCCESS;
511 }
512 
513 VKAPI_ATTR VkResult VKAPI_CALL
tu_CreateSemaphore(VkDevice device,const VkSemaphoreCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSemaphore * pSemaphore)514 tu_CreateSemaphore(VkDevice device,
515                    const VkSemaphoreCreateInfo *pCreateInfo,
516                    const VkAllocationCallbacks *pAllocator,
517                    VkSemaphore *pSemaphore)
518 {
519    return sync_create(device, false, false, pAllocator, (void**) pSemaphore);
520 }
521 
522 VKAPI_ATTR void VKAPI_CALL
tu_DestroySemaphore(VkDevice _device,VkSemaphore semaphore,const VkAllocationCallbacks * pAllocator)523 tu_DestroySemaphore(VkDevice _device,
524                     VkSemaphore semaphore,
525                     const VkAllocationCallbacks *pAllocator)
526 {
527    TU_FROM_HANDLE(tu_device, device, _device);
528    TU_FROM_HANDLE(tu_syncobj, sync, semaphore);
529 
530    if (!sync)
531       return;
532 
533    vk_object_free(&device->vk, pAllocator, sync);
534 }
535 
536 VKAPI_ATTR VkResult VKAPI_CALL
tu_ImportFenceFdKHR(VkDevice _device,const VkImportFenceFdInfoKHR * pImportFenceFdInfo)537 tu_ImportFenceFdKHR(VkDevice _device,
538                     const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
539 {
540    tu_stub();
541 
542    return VK_SUCCESS;
543 }
544 
545 VKAPI_ATTR VkResult VKAPI_CALL
tu_GetFenceFdKHR(VkDevice _device,const VkFenceGetFdInfoKHR * pGetFdInfo,int * pFd)546 tu_GetFenceFdKHR(VkDevice _device,
547                  const VkFenceGetFdInfoKHR *pGetFdInfo,
548                  int *pFd)
549 {
550    tu_stub();
551 
552    return VK_SUCCESS;
553 }
554 
555 VKAPI_ATTR VkResult VKAPI_CALL
tu_CreateFence(VkDevice device,const VkFenceCreateInfo * info,const VkAllocationCallbacks * pAllocator,VkFence * pFence)556 tu_CreateFence(VkDevice device,
557                const VkFenceCreateInfo *info,
558                const VkAllocationCallbacks *pAllocator,
559                VkFence *pFence)
560 {
561    return sync_create(device, info->flags & VK_FENCE_CREATE_SIGNALED_BIT, true,
562                       pAllocator, (void**) pFence);
563 }
564 
565 VKAPI_ATTR void VKAPI_CALL
tu_DestroyFence(VkDevice _device,VkFence fence,const VkAllocationCallbacks * pAllocator)566 tu_DestroyFence(VkDevice _device, VkFence fence, const VkAllocationCallbacks *pAllocator)
567 {
568    TU_FROM_HANDLE(tu_device, device, _device);
569    TU_FROM_HANDLE(tu_syncobj, sync, fence);
570 
571    if (!sync)
572       return;
573 
574    vk_object_free(&device->vk, pAllocator, sync);
575 }
576 
577 VKAPI_ATTR VkResult VKAPI_CALL
tu_WaitForFences(VkDevice _device,uint32_t count,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout)578 tu_WaitForFences(VkDevice _device,
579                  uint32_t count,
580                  const VkFence *pFences,
581                  VkBool32 waitAll,
582                  uint64_t timeout)
583 {
584    TU_FROM_HANDLE(tu_device, device, _device);
585    struct tu_syncobj s = sync_merge((const VkSemaphore*) pFences, count, waitAll, false);
586 
587    if (!s.timestamp_valid)
588       return VK_SUCCESS;
589 
590    int ret = ioctl(device->fd, IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID,
591                    &(struct kgsl_device_waittimestamp_ctxtid) {
592       .context_id = device->queues[0]->msm_queue_id,
593       .timestamp = s.timestamp,
594       .timeout = timeout / 1000000,
595    });
596    if (ret) {
597       assert(errno == ETIME);
598       return VK_TIMEOUT;
599    }
600 
601    return VK_SUCCESS;
602 }
603 
604 VKAPI_ATTR VkResult VKAPI_CALL
tu_ResetFences(VkDevice _device,uint32_t count,const VkFence * pFences)605 tu_ResetFences(VkDevice _device, uint32_t count, const VkFence *pFences)
606 {
607    for (uint32_t i = 0; i < count; i++) {
608       TU_FROM_HANDLE(tu_syncobj, sync, pFences[i]);
609       sync->timestamp_valid = false;
610    }
611    return VK_SUCCESS;
612 }
613 
614 VKAPI_ATTR VkResult VKAPI_CALL
tu_GetFenceStatus(VkDevice _device,VkFence _fence)615 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
616 {
617    TU_FROM_HANDLE(tu_device, device, _device);
618    TU_FROM_HANDLE(tu_syncobj, sync, _fence);
619 
620    if (!sync->timestamp_valid)
621       return VK_NOT_READY;
622 
623    int ret = ioctl(device->fd, IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID,
624                &(struct kgsl_device_waittimestamp_ctxtid) {
625       .context_id = device->queues[0]->msm_queue_id,
626       .timestamp = sync->timestamp,
627       .timeout = 0,
628    });
629    if (ret) {
630       assert(errno == ETIME);
631       return VK_NOT_READY;
632    }
633 
634    return VK_SUCCESS;
635 }
636 
637 int
tu_signal_fences(struct tu_device * device,struct tu_syncobj * fence1,struct tu_syncobj * fence2)638 tu_signal_fences(struct tu_device *device, struct tu_syncobj *fence1, struct tu_syncobj *fence2)
639 {
640    tu_finishme("tu_signal_fences");
641    return 0;
642 }
643 
644 int
tu_syncobj_to_fd(struct tu_device * device,struct tu_syncobj * sync)645 tu_syncobj_to_fd(struct tu_device *device, struct tu_syncobj *sync)
646 {
647    tu_finishme("tu_syncobj_to_fd");
648    return -1;
649 }
650 
651 VkResult
tu_device_submit_deferred_locked(struct tu_device * dev)652 tu_device_submit_deferred_locked(struct tu_device *dev)
653 {
654    tu_finishme("tu_device_submit_deferred_locked");
655 
656    return VK_SUCCESS;
657 }
658 
659 VkResult
tu_device_wait_u_trace(struct tu_device * dev,struct tu_u_trace_syncobj * syncobj)660 tu_device_wait_u_trace(struct tu_device *dev, struct tu_u_trace_syncobj *syncobj)
661 {
662    tu_finishme("tu_device_wait_u_trace");
663    return VK_SUCCESS;
664 }
665 
666 int
tu_drm_get_timestamp(struct tu_physical_device * device,uint64_t * ts)667 tu_drm_get_timestamp(struct tu_physical_device *device, uint64_t *ts)
668 {
669    tu_finishme("tu_drm_get_timestamp");
670    return 0;
671 }
672 
673 #ifdef ANDROID
674 VKAPI_ATTR VkResult VKAPI_CALL
tu_QueueSignalReleaseImageANDROID(VkQueue _queue,uint32_t waitSemaphoreCount,const VkSemaphore * pWaitSemaphores,VkImage image,int * pNativeFenceFd)675 tu_QueueSignalReleaseImageANDROID(VkQueue _queue,
676                                   uint32_t waitSemaphoreCount,
677                                   const VkSemaphore *pWaitSemaphores,
678                                   VkImage image,
679                                   int *pNativeFenceFd)
680 {
681    TU_FROM_HANDLE(tu_queue, queue, _queue);
682    if (!pNativeFenceFd)
683       return VK_SUCCESS;
684 
685    struct tu_syncobj s = sync_merge(pWaitSemaphores, waitSemaphoreCount, true, true);
686 
687    if (!s.timestamp_valid) {
688       *pNativeFenceFd = -1;
689       return VK_SUCCESS;
690    }
691 
692    *pNativeFenceFd = timestamp_to_fd(queue, s.timestamp);
693 
694    return VK_SUCCESS;
695 }
696 #endif
697