1 /*
2  * Copyright 2018 Collabora Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "zink_batch.h"
25 #include "zink_context.h"
26 #include "zink_fence.h"
27 
28 #include "zink_resource.h"
29 #include "zink_screen.h"
30 
31 #include "util/set.h"
32 #include "util/u_memory.h"
33 
34 static void
destroy_fence(struct zink_screen * screen,struct zink_tc_fence * mfence)35 destroy_fence(struct zink_screen *screen, struct zink_tc_fence *mfence)
36 {
37    mfence->fence = NULL;
38    tc_unflushed_batch_token_reference(&mfence->tc_token, NULL);
39    FREE(mfence);
40 }
41 
42 struct zink_tc_fence *
zink_create_tc_fence(void)43 zink_create_tc_fence(void)
44 {
45    struct zink_tc_fence *mfence = CALLOC_STRUCT(zink_tc_fence);
46    if (!mfence)
47       return NULL;
48    pipe_reference_init(&mfence->reference, 1);
49    util_queue_fence_init(&mfence->ready);
50    return mfence;
51 }
52 
53 struct pipe_fence_handle *
zink_create_tc_fence_for_tc(struct pipe_context * pctx,struct tc_unflushed_batch_token * tc_token)54 zink_create_tc_fence_for_tc(struct pipe_context *pctx, struct tc_unflushed_batch_token *tc_token)
55 {
56    struct zink_tc_fence *mfence = zink_create_tc_fence();
57    if (!mfence)
58       return NULL;
59    util_queue_fence_reset(&mfence->ready);
60    tc_unflushed_batch_token_reference(&mfence->tc_token, tc_token);
61    return (struct pipe_fence_handle*)mfence;
62 }
63 
64 void
zink_fence_reference(struct zink_screen * screen,struct zink_tc_fence ** ptr,struct zink_tc_fence * mfence)65 zink_fence_reference(struct zink_screen *screen,
66                      struct zink_tc_fence **ptr,
67                      struct zink_tc_fence *mfence)
68 {
69    if (pipe_reference(&(*ptr)->reference, &mfence->reference))
70       destroy_fence(screen, *ptr);
71 
72    *ptr = mfence;
73 }
74 
75 static void
fence_reference(struct pipe_screen * pscreen,struct pipe_fence_handle ** pptr,struct pipe_fence_handle * pfence)76 fence_reference(struct pipe_screen *pscreen,
77                 struct pipe_fence_handle **pptr,
78                 struct pipe_fence_handle *pfence)
79 {
80    zink_fence_reference(zink_screen(pscreen), (struct zink_tc_fence **)pptr,
81                         zink_tc_fence(pfence));
82 }
83 
84 static bool
tc_fence_finish(struct zink_context * ctx,struct zink_tc_fence * mfence,uint64_t * timeout_ns)85 tc_fence_finish(struct zink_context *ctx, struct zink_tc_fence *mfence, uint64_t *timeout_ns)
86 {
87    if (!util_queue_fence_is_signalled(&mfence->ready)) {
88       int64_t abs_timeout = os_time_get_absolute_timeout(*timeout_ns);
89       if (mfence->tc_token) {
90          /* Ensure that zink_flush will be called for
91           * this mfence, but only if we're in the API thread
92           * where the context is current.
93           *
94           * Note that the batch containing the flush may already
95           * be in flight in the driver thread, so the mfence
96           * may not be ready yet when this call returns.
97           */
98          threaded_context_flush(&ctx->base, mfence->tc_token, *timeout_ns == 0);
99       }
100 
101       /* this is a tc mfence, so we're just waiting on the queue mfence to complete
102        * after being signaled by the real mfence
103        */
104       if (*timeout_ns == PIPE_TIMEOUT_INFINITE) {
105          util_queue_fence_wait(&mfence->ready);
106       } else {
107          if (!util_queue_fence_wait_timeout(&mfence->ready, abs_timeout))
108             return false;
109       }
110       if (*timeout_ns && *timeout_ns != PIPE_TIMEOUT_INFINITE) {
111          int64_t time_ns = os_time_get_nano();
112          *timeout_ns = abs_timeout > time_ns ? abs_timeout - time_ns : 0;
113       }
114    }
115 
116    return true;
117 }
118 
119 bool
zink_vkfence_wait(struct zink_screen * screen,struct zink_fence * fence,uint64_t timeout_ns)120 zink_vkfence_wait(struct zink_screen *screen, struct zink_fence *fence, uint64_t timeout_ns)
121 {
122    if (screen->device_lost)
123       return true;
124    if (p_atomic_read(&fence->completed))
125       return true;
126 
127    assert(fence->batch_id);
128    assert(fence->submitted);
129 
130    bool success = false;
131 
132    VkResult ret;
133    if (timeout_ns)
134       ret = VKSCR(WaitForFences)(screen->dev, 1, &fence->fence, VK_TRUE, timeout_ns);
135    else
136       ret = VKSCR(GetFenceStatus)(screen->dev, fence->fence);
137    success = zink_screen_handle_vkresult(screen, ret);
138 
139    if (success) {
140       p_atomic_set(&fence->completed, true);
141       zink_batch_state(fence)->usage.usage = 0;
142       zink_screen_update_last_finished(screen, fence->batch_id);
143    }
144    return success;
145 }
146 
147 static bool
zink_fence_finish(struct zink_screen * screen,struct pipe_context * pctx,struct zink_tc_fence * mfence,uint64_t timeout_ns)148 zink_fence_finish(struct zink_screen *screen, struct pipe_context *pctx, struct zink_tc_fence *mfence,
149                   uint64_t timeout_ns)
150 {
151    pctx = threaded_context_unwrap_sync(pctx);
152    struct zink_context *ctx = zink_context(pctx);
153 
154    if (screen->device_lost)
155       return true;
156 
157    if (pctx && mfence->deferred_ctx == pctx) {
158       if (mfence->fence == ctx->deferred_fence) {
159          zink_context(pctx)->batch.has_work = true;
160          /* this must be the current batch */
161          pctx->flush(pctx, NULL, !timeout_ns ? PIPE_FLUSH_ASYNC : 0);
162          if (!timeout_ns)
163             return false;
164       }
165    }
166 
167    /* need to ensure the tc mfence has been flushed before we wait */
168    bool tc_finish = tc_fence_finish(ctx, mfence, &timeout_ns);
169    /* the submit thread hasn't finished yet */
170    if (!tc_finish)
171       return false;
172    /* this was an invalid flush, just return completed */
173    if (!mfence->fence)
174       return true;
175 
176    struct zink_fence *fence = mfence->fence;
177 
178    unsigned submit_diff = zink_batch_state(mfence->fence)->submit_count - mfence->submit_count;
179    /* this batch is known to have finished because it has been submitted more than 1 time
180     * since the tc fence last saw it
181     */
182    if (submit_diff > 1)
183       return true;
184 
185    if (fence->submitted && zink_screen_check_last_finished(screen, fence->batch_id))
186       return true;
187 
188    return zink_vkfence_wait(screen, fence, timeout_ns);
189 }
190 
191 static bool
fence_finish(struct pipe_screen * pscreen,struct pipe_context * pctx,struct pipe_fence_handle * pfence,uint64_t timeout_ns)192 fence_finish(struct pipe_screen *pscreen, struct pipe_context *pctx,
193                   struct pipe_fence_handle *pfence, uint64_t timeout_ns)
194 {
195    return zink_fence_finish(zink_screen(pscreen), pctx, zink_tc_fence(pfence),
196                             timeout_ns);
197 }
198 
199 void
zink_fence_server_sync(struct pipe_context * pctx,struct pipe_fence_handle * pfence)200 zink_fence_server_sync(struct pipe_context *pctx, struct pipe_fence_handle *pfence)
201 {
202    struct zink_tc_fence *mfence = zink_tc_fence(pfence);
203 
204    if (mfence->deferred_ctx == pctx)
205       return;
206 
207    if (mfence->deferred_ctx) {
208       zink_context(pctx)->batch.has_work = true;
209       /* this must be the current batch */
210       pctx->flush(pctx, NULL, 0);
211    }
212    zink_fence_finish(zink_screen(pctx->screen), pctx, mfence, PIPE_TIMEOUT_INFINITE);
213 }
214 
215 void
zink_screen_fence_init(struct pipe_screen * pscreen)216 zink_screen_fence_init(struct pipe_screen *pscreen)
217 {
218    pscreen->fence_reference = fence_reference;
219    pscreen->fence_finish = fence_finish;
220 }
221