1 /*
2  * Copyright © 2019 Google LLC
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "tu_private.h"
25 
26 #include <fcntl.h>
27 #include <libsync.h>
28 #include <unistd.h>
29 
30 #include "util/os_file.h"
31 #include "util/os_time.h"
32 
33 /**
34  * Internally, a fence can be in one of these states.
35  */
36 enum tu_fence_state
37 {
38    TU_FENCE_STATE_RESET,
39    TU_FENCE_STATE_PENDING,
40    TU_FENCE_STATE_SIGNALED,
41 };
42 
43 static enum tu_fence_state
tu_fence_get_state(const struct tu_fence * fence)44 tu_fence_get_state(const struct tu_fence *fence)
45 {
46    if (fence->signaled)
47       assert(fence->fd < 0);
48 
49    if (fence->signaled)
50       return TU_FENCE_STATE_SIGNALED;
51    else if (fence->fd >= 0)
52       return TU_FENCE_STATE_PENDING;
53    else
54       return TU_FENCE_STATE_RESET;
55 }
56 
57 static void
tu_fence_set_state(struct tu_fence * fence,enum tu_fence_state state,int fd)58 tu_fence_set_state(struct tu_fence *fence, enum tu_fence_state state, int fd)
59 {
60    if (fence->fd >= 0)
61       close(fence->fd);
62 
63    switch (state) {
64    case TU_FENCE_STATE_RESET:
65       assert(fd < 0);
66       fence->signaled = false;
67       fence->fd = -1;
68       break;
69    case TU_FENCE_STATE_PENDING:
70       assert(fd >= 0);
71       fence->signaled = false;
72       fence->fd = fd;
73       break;
74    case TU_FENCE_STATE_SIGNALED:
75       assert(fd < 0);
76       fence->signaled = true;
77       fence->fd = -1;
78       break;
79    default:
80       unreachable("unknown fence state");
81       break;
82    }
83 }
84 
85 void
tu_fence_init(struct tu_fence * fence,bool signaled)86 tu_fence_init(struct tu_fence *fence, bool signaled)
87 {
88    fence->signaled = signaled;
89    fence->fd = -1;
90    fence->fence_wsi = NULL;
91 }
92 
93 void
tu_fence_finish(struct tu_fence * fence)94 tu_fence_finish(struct tu_fence *fence)
95 {
96    if (fence->fd >= 0)
97       close(fence->fd);
98    if (fence->fence_wsi)
99       fence->fence_wsi->destroy(fence->fence_wsi);
100 }
101 
102 /**
103  * Update the associated fd of a fence.  Ownership of \a fd is transferred to
104  * \a fence.
105  *
106  * This function does not block.  \a fence can also be in any state when this
107  * function is called.  To be able to do that, the caller must make sure that,
108  * when both the currently associated fd and the new fd are valid, they are on
109  * the same timeline with the new fd being later on the timeline.
110  */
111 void
tu_fence_update_fd(struct tu_fence * fence,int fd)112 tu_fence_update_fd(struct tu_fence *fence, int fd)
113 {
114    const enum tu_fence_state state =
115       fd >= 0 ? TU_FENCE_STATE_PENDING : TU_FENCE_STATE_SIGNALED;
116    tu_fence_set_state(fence, state, fd);
117 }
118 
119 /**
120  * Make a fence a copy of another fence.  \a fence must be in the reset state.
121  */
122 void
tu_fence_copy(struct tu_fence * fence,const struct tu_fence * src)123 tu_fence_copy(struct tu_fence *fence, const struct tu_fence *src)
124 {
125    assert(tu_fence_get_state(fence) == TU_FENCE_STATE_RESET);
126 
127    /* dup src->fd */
128    int fd = -1;
129    if (src->fd >= 0) {
130       fd = os_dupfd_cloexec(src->fd);
131       if (fd < 0) {
132          tu_loge("failed to dup fd %d for fence", src->fd);
133          sync_wait(src->fd, -1);
134       }
135    }
136 
137    tu_fence_update_fd(fence, fd);
138 }
139 
140 /**
141  * Signal a fence.  \a fence must be in the reset state.
142  */
143 void
tu_fence_signal(struct tu_fence * fence)144 tu_fence_signal(struct tu_fence *fence)
145 {
146    assert(tu_fence_get_state(fence) == TU_FENCE_STATE_RESET);
147    tu_fence_set_state(fence, TU_FENCE_STATE_SIGNALED, -1);
148 }
149 
150 /**
151  * Wait until a fence is idle (i.e., not pending).
152  */
153 void
tu_fence_wait_idle(struct tu_fence * fence)154 tu_fence_wait_idle(struct tu_fence *fence)
155 {
156    if (fence->fd >= 0) {
157       if (sync_wait(fence->fd, -1))
158          tu_loge("sync_wait on fence fd %d failed", fence->fd);
159 
160       tu_fence_set_state(fence, TU_FENCE_STATE_SIGNALED, -1);
161    }
162 }
163 
164 VkResult
tu_CreateFence(VkDevice _device,const VkFenceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFence * pFence)165 tu_CreateFence(VkDevice _device,
166                const VkFenceCreateInfo *pCreateInfo,
167                const VkAllocationCallbacks *pAllocator,
168                VkFence *pFence)
169 {
170    TU_FROM_HANDLE(tu_device, device, _device);
171 
172    struct tu_fence *fence =
173          vk_object_alloc(&device->vk, pAllocator, sizeof(*fence),
174                          VK_OBJECT_TYPE_FENCE);
175    if (!fence)
176       return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
177 
178    tu_fence_init(fence, pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT);
179 
180    *pFence = tu_fence_to_handle(fence);
181 
182    return VK_SUCCESS;
183 }
184 
185 void
tu_DestroyFence(VkDevice _device,VkFence _fence,const VkAllocationCallbacks * pAllocator)186 tu_DestroyFence(VkDevice _device,
187                 VkFence _fence,
188                 const VkAllocationCallbacks *pAllocator)
189 {
190    TU_FROM_HANDLE(tu_device, device, _device);
191    TU_FROM_HANDLE(tu_fence, fence, _fence);
192 
193    if (!fence)
194       return;
195 
196    tu_fence_finish(fence);
197 
198    vk_object_free(&device->vk, pAllocator, fence);
199 }
200 
201 /**
202  * Initialize a pollfd array from fences.
203  */
204 static nfds_t
tu_fence_init_poll_fds(uint32_t fence_count,const VkFence * fences,bool wait_all,struct pollfd * fds)205 tu_fence_init_poll_fds(uint32_t fence_count,
206                        const VkFence *fences,
207                        bool wait_all,
208                        struct pollfd *fds)
209 {
210    nfds_t nfds = 0;
211    for (uint32_t i = 0; i < fence_count; i++) {
212       TU_FROM_HANDLE(tu_fence, fence, fences[i]);
213 
214       /* skip wsi fences */
215       if (fence->fence_wsi)
216             continue;
217 
218       if (fence->signaled) {
219          if (wait_all) {
220             /* skip signaled fences */
221             continue;
222          } else {
223             /* no need to poll any fd */
224             nfds = 0;
225             break;
226          }
227       }
228 
229       /* negative fds are never ready, which is the desired behavior */
230       fds[nfds].fd = fence->fd;
231       fds[nfds].events = POLLIN;
232       fds[nfds].revents = 0;
233       nfds++;
234    }
235 
236    return nfds;
237 }
238 
239 /**
240  * Translate timeout from nanoseconds to milliseconds for poll().
241  */
242 static int
tu_fence_get_poll_timeout(uint64_t timeout_ns)243 tu_fence_get_poll_timeout(uint64_t timeout_ns)
244 {
245    const uint64_t ns_per_ms = 1000 * 1000;
246    uint64_t timeout_ms = timeout_ns / ns_per_ms;
247 
248    /* round up if needed */
249    if (timeout_ns - timeout_ms * ns_per_ms >= ns_per_ms / 2)
250       timeout_ms++;
251 
252    return timeout_ms < INT_MAX ? timeout_ms : INT_MAX;
253 }
254 
255 /**
256  * Poll a pollfd array.
257  */
258 static VkResult
tu_fence_poll_fds(struct pollfd * fds,nfds_t nfds,uint64_t * timeout_ns)259 tu_fence_poll_fds(struct pollfd *fds, nfds_t nfds, uint64_t *timeout_ns)
260 {
261    while (true) {
262       /* poll */
263       uint64_t duration = os_time_get_nano();
264       int ret = poll(fds, nfds, tu_fence_get_poll_timeout(*timeout_ns));
265       duration = os_time_get_nano() - duration;
266 
267       /* update timeout_ns */
268       if (*timeout_ns > duration)
269          *timeout_ns -= duration;
270       else
271          *timeout_ns = 0;
272 
273       if (ret > 0) {
274          return VK_SUCCESS;
275       } else if (ret == 0) {
276          if (!*timeout_ns)
277             return VK_TIMEOUT;
278       } else if (errno != EINTR && errno != EAGAIN) {
279          return VK_ERROR_OUT_OF_HOST_MEMORY;
280       }
281    }
282 }
283 
284 /**
285  * Update a pollfd array and the fence states.  This should be called after a
286  * successful call to tu_fence_poll_fds.
287  */
288 static nfds_t
tu_fence_update_fences_and_poll_fds(uint32_t fence_count,const VkFence * fences,bool wait_all,struct pollfd * fds)289 tu_fence_update_fences_and_poll_fds(uint32_t fence_count,
290                                     const VkFence *fences,
291                                     bool wait_all,
292                                     struct pollfd *fds)
293 {
294    uint32_t nfds = 0;
295    uint32_t fds_idx = 0;
296    for (uint32_t i = 0; i < fence_count; i++) {
297       TU_FROM_HANDLE(tu_fence, fence, fences[i]);
298 
299       /* skip wsi fences */
300       if (fence->fence_wsi)
301             continue;
302 
303       /* no signaled fence in fds */
304       if (fence->signaled)
305          continue;
306 
307       /* fds[fds_idx] corresponds to fences[i] */
308       assert(fence->fd == fds[fds_idx].fd);
309 
310       assert(nfds <= fds_idx && fds_idx <= i);
311 
312       /* fd is ready (errors are treated as ready) */
313       if (fds[fds_idx].revents) {
314          tu_fence_set_state(fence, TU_FENCE_STATE_SIGNALED, -1);
315       } else if (wait_all) {
316          /* add to fds again for another poll */
317          fds[nfds].fd = fence->fd;
318          fds[nfds].events = POLLIN;
319          fds[nfds].revents = 0;
320          nfds++;
321       }
322 
323       fds_idx++;
324    }
325 
326    return nfds;
327 }
328 
329 VkResult
tu_WaitForFences(VkDevice _device,uint32_t fenceCount,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout)330 tu_WaitForFences(VkDevice _device,
331                  uint32_t fenceCount,
332                  const VkFence *pFences,
333                  VkBool32 waitAll,
334                  uint64_t timeout)
335 {
336    TU_FROM_HANDLE(tu_device, device, _device);
337 
338    if (tu_device_is_lost(device))
339       return VK_ERROR_DEVICE_LOST;
340 
341    /* add a simpler path for when fenceCount == 1? */
342 
343    struct pollfd stack_fds[8];
344    struct pollfd *fds = stack_fds;
345    if (fenceCount > ARRAY_SIZE(stack_fds)) {
346       fds = vk_alloc(&device->vk.alloc, sizeof(*fds) * fenceCount, 8,
347                      VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
348       if (!fds)
349          return VK_ERROR_OUT_OF_HOST_MEMORY;
350    }
351 
352    /* set up pollfd array and start polling */
353    nfds_t nfds = tu_fence_init_poll_fds(fenceCount, pFences, waitAll, fds);
354    VkResult result = VK_SUCCESS;
355    while (nfds) {
356       result = tu_fence_poll_fds(fds, nfds, &timeout);
357       if (result != VK_SUCCESS)
358          break;
359       nfds = tu_fence_update_fences_and_poll_fds(fenceCount, pFences, waitAll,
360                                                  fds);
361    }
362 
363    if (fds != stack_fds)
364       vk_free(&device->vk.alloc, fds);
365 
366    if (result != VK_SUCCESS)
367       return result;
368 
369    for (uint32_t i = 0; i < fenceCount; ++i) {
370       TU_FROM_HANDLE(tu_fence, fence, pFences[i]);
371       if (fence->fence_wsi) {
372          VkResult result = fence->fence_wsi->wait(fence->fence_wsi, timeout);
373          if (result != VK_SUCCESS)
374             return result;
375       }
376    }
377 
378    return result;
379 }
380 
381 VkResult
tu_ResetFences(VkDevice _device,uint32_t fenceCount,const VkFence * pFences)382 tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
383 {
384    for (unsigned i = 0; i < fenceCount; ++i) {
385       TU_FROM_HANDLE(tu_fence, fence, pFences[i]);
386       assert(tu_fence_get_state(fence) != TU_FENCE_STATE_PENDING);
387       tu_fence_set_state(fence, TU_FENCE_STATE_RESET, -1);
388    }
389 
390    return VK_SUCCESS;
391 }
392 
393 VkResult
tu_GetFenceStatus(VkDevice _device,VkFence _fence)394 tu_GetFenceStatus(VkDevice _device, VkFence _fence)
395 {
396    TU_FROM_HANDLE(tu_fence, fence, _fence);
397 
398    if (fence->fd >= 0) {
399       int err = sync_wait(fence->fd, 0);
400       if (!err)
401          tu_fence_set_state(fence, TU_FENCE_STATE_SIGNALED, -1);
402       else if (err && errno != ETIME)
403          return VK_ERROR_OUT_OF_HOST_MEMORY;
404    }
405    if (fence->fence_wsi) {
406       VkResult result = fence->fence_wsi->wait(fence->fence_wsi, 0);
407 
408       if (result != VK_SUCCESS) {
409          if (result == VK_TIMEOUT)
410             return VK_NOT_READY;
411          return result;
412       }
413    }
414 
415    return fence->signaled ? VK_SUCCESS : VK_NOT_READY;
416 }
417