1 /*
2  * Copyright © 2018 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included
12  * in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 /**
24  * @file iris_fence.c
25  *
26  * Fences for driver and IPC serialisation, scheduling and synchronisation.
27  */
28 
29 #include "drm-uapi/sync_file.h"
30 #include "util/u_debug.h"
31 #include "util/u_inlines.h"
32 #include "intel/common/intel_gem.h"
33 
34 #include "iris_batch.h"
35 #include "iris_bufmgr.h"
36 #include "iris_context.h"
37 #include "iris_fence.h"
38 #include "iris_screen.h"
39 
40 static uint32_t
gem_syncobj_create(int fd,uint32_t flags)41 gem_syncobj_create(int fd, uint32_t flags)
42 {
43    struct drm_syncobj_create args = {
44       .flags = flags,
45    };
46 
47    intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
48 
49    return args.handle;
50 }
51 
52 static void
gem_syncobj_destroy(int fd,uint32_t handle)53 gem_syncobj_destroy(int fd, uint32_t handle)
54 {
55    struct drm_syncobj_destroy args = {
56       .handle = handle,
57    };
58 
59    intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
60 }
61 
62 /**
63  * Make a new sync-point.
64  */
65 struct iris_syncobj *
iris_create_syncobj(struct iris_bufmgr * bufmgr)66 iris_create_syncobj(struct iris_bufmgr *bufmgr)
67 {
68    int fd = iris_bufmgr_get_fd(bufmgr);
69    struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
70 
71    if (!syncobj)
72       return NULL;
73 
74    syncobj->handle = gem_syncobj_create(fd, 0);
75    assert(syncobj->handle);
76 
77    pipe_reference_init(&syncobj->ref, 1);
78 
79    return syncobj;
80 }
81 
82 void
iris_syncobj_destroy(struct iris_bufmgr * bufmgr,struct iris_syncobj * syncobj)83 iris_syncobj_destroy(struct iris_bufmgr *bufmgr, struct iris_syncobj *syncobj)
84 {
85    int fd = iris_bufmgr_get_fd(bufmgr);
86    gem_syncobj_destroy(fd, syncobj->handle);
87    free(syncobj);
88 }
89 
90 void
iris_syncobj_signal(struct iris_bufmgr * bufmgr,struct iris_syncobj * syncobj)91 iris_syncobj_signal(struct iris_bufmgr *bufmgr, struct iris_syncobj *syncobj)
92 {
93    int fd = iris_bufmgr_get_fd(bufmgr);
94    struct drm_syncobj_array args = {
95       .handles = (uintptr_t)&syncobj->handle,
96       .count_handles = 1,
97    };
98 
99    if (intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &args)) {
100       fprintf(stderr, "failed to signal syncobj %"PRIu32"\n",
101               syncobj->handle);
102    }
103 }
104 
105 /**
106  * Add a sync-point to the batch, with the given flags.
107  *
108  * \p flags   One of I915_EXEC_FENCE_WAIT or I915_EXEC_FENCE_SIGNAL.
109  */
110 void
iris_batch_add_syncobj(struct iris_batch * batch,struct iris_syncobj * syncobj,unsigned flags)111 iris_batch_add_syncobj(struct iris_batch *batch,
112                        struct iris_syncobj *syncobj,
113                        unsigned flags)
114 {
115    struct drm_i915_gem_exec_fence *fence =
116       util_dynarray_grow(&batch->exec_fences, struct drm_i915_gem_exec_fence, 1);
117 
118    *fence = (struct drm_i915_gem_exec_fence) {
119       .handle = syncobj->handle,
120       .flags = flags,
121    };
122 
123    struct iris_syncobj **store =
124       util_dynarray_grow(&batch->syncobjs, struct iris_syncobj *, 1);
125 
126    *store = NULL;
127    iris_syncobj_reference(batch->screen->bufmgr, store, syncobj);
128 }
129 
130 /**
131  * Walk through a batch's dependencies (any I915_EXEC_FENCE_WAIT syncobjs)
132  * and unreference any which have already passed.
133  *
134  * Sometimes the compute batch is seldom used, and accumulates references
135  * to stale render batches that are no longer of interest, so we can free
136  * those up.
137  */
138 static void
clear_stale_syncobjs(struct iris_batch * batch)139 clear_stale_syncobjs(struct iris_batch *batch)
140 {
141    struct iris_screen *screen = batch->screen;
142    struct iris_bufmgr *bufmgr = screen->bufmgr;
143 
144    int n = util_dynarray_num_elements(&batch->syncobjs, struct iris_syncobj *);
145 
146    assert(n == util_dynarray_num_elements(&batch->exec_fences,
147                                           struct drm_i915_gem_exec_fence));
148 
149    /* Skip the first syncobj, as it's the signalling one. */
150    for (int i = n - 1; i > 1; i--) {
151       struct iris_syncobj **syncobj =
152          util_dynarray_element(&batch->syncobjs, struct iris_syncobj *, i);
153       struct drm_i915_gem_exec_fence *fence =
154          util_dynarray_element(&batch->exec_fences,
155                                struct drm_i915_gem_exec_fence, i);
156       assert(fence->flags & I915_EXEC_FENCE_WAIT);
157 
158       if (iris_wait_syncobj(bufmgr, *syncobj, 0))
159          continue;
160 
161       /* This sync object has already passed, there's no need to continue
162        * marking it as a dependency; we can stop holding on to the reference.
163        */
164       iris_syncobj_reference(bufmgr, syncobj, NULL);
165 
166       /* Remove it from the lists; move the last element here. */
167       struct iris_syncobj **nth_syncobj =
168          util_dynarray_pop_ptr(&batch->syncobjs, struct iris_syncobj *);
169       struct drm_i915_gem_exec_fence *nth_fence =
170          util_dynarray_pop_ptr(&batch->exec_fences,
171                                struct drm_i915_gem_exec_fence);
172 
173       if (syncobj != nth_syncobj) {
174          *syncobj = *nth_syncobj;
175          memcpy(fence, nth_fence, sizeof(*fence));
176       }
177    }
178 }
179 
180 /* ------------------------------------------------------------------- */
181 
182 struct pipe_fence_handle {
183    struct pipe_reference ref;
184 
185    struct pipe_context *unflushed_ctx;
186 
187    struct iris_fine_fence *fine[IRIS_BATCH_COUNT];
188 };
189 
190 static void
iris_fence_destroy(struct pipe_screen * p_screen,struct pipe_fence_handle * fence)191 iris_fence_destroy(struct pipe_screen *p_screen,
192                    struct pipe_fence_handle *fence)
193 {
194    struct iris_screen *screen = (struct iris_screen *)p_screen;
195 
196    for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++)
197       iris_fine_fence_reference(screen, &fence->fine[i], NULL);
198 
199    free(fence);
200 }
201 
202 static void
iris_fence_reference(struct pipe_screen * p_screen,struct pipe_fence_handle ** dst,struct pipe_fence_handle * src)203 iris_fence_reference(struct pipe_screen *p_screen,
204                      struct pipe_fence_handle **dst,
205                      struct pipe_fence_handle *src)
206 {
207    if (pipe_reference(*dst ? &(*dst)->ref : NULL,
208                       src ? &src->ref : NULL))
209       iris_fence_destroy(p_screen, *dst);
210 
211    *dst = src;
212 }
213 
214 bool
iris_wait_syncobj(struct iris_bufmgr * bufmgr,struct iris_syncobj * syncobj,int64_t timeout_nsec)215 iris_wait_syncobj(struct iris_bufmgr *bufmgr,
216                   struct iris_syncobj *syncobj,
217                   int64_t timeout_nsec)
218 {
219    if (!syncobj)
220       return false;
221 
222    int fd = iris_bufmgr_get_fd(bufmgr);
223 
224    struct drm_syncobj_wait args = {
225       .handles = (uintptr_t)&syncobj->handle,
226       .count_handles = 1,
227       .timeout_nsec = timeout_nsec,
228    };
229    return intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
230 }
231 
232 #define CSI "\e["
233 #define BLUE_HEADER  CSI "0;97;44m"
234 #define NORMAL       CSI "0m"
235 
236 static void
iris_fence_flush(struct pipe_context * ctx,struct pipe_fence_handle ** out_fence,unsigned flags)237 iris_fence_flush(struct pipe_context *ctx,
238                  struct pipe_fence_handle **out_fence,
239                  unsigned flags)
240 {
241    struct iris_screen *screen = (void *) ctx->screen;
242    struct iris_context *ice = (struct iris_context *)ctx;
243 
244    /* We require DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (kernel 5.2+) for
245     * deferred flushes.  Just ignore the request to defer on older kernels.
246     */
247    if (!(screen->kernel_features & KERNEL_HAS_WAIT_FOR_SUBMIT))
248       flags &= ~PIPE_FLUSH_DEFERRED;
249 
250    const bool deferred = flags & PIPE_FLUSH_DEFERRED;
251 
252    if (flags & PIPE_FLUSH_END_OF_FRAME) {
253       ice->frame++;
254 
255       if (INTEL_DEBUG(DEBUG_SUBMIT)) {
256          fprintf(stderr, "%s ::: FRAME %-10u (ctx %p)%-35c%s\n",
257                  INTEL_DEBUG(DEBUG_COLOR) ? BLUE_HEADER : "",
258                  ice->frame, ctx, ' ',
259                  INTEL_DEBUG(DEBUG_COLOR) ? NORMAL : "");
260       }
261    }
262 
263    iris_flush_dirty_dmabufs(ice);
264 
265    if (!deferred) {
266       for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++)
267          iris_batch_flush(&ice->batches[i]);
268    }
269 
270    if (flags & PIPE_FLUSH_END_OF_FRAME) {
271       iris_measure_frame_end(ice);
272    }
273 
274    if (!out_fence)
275       return;
276 
277    struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
278    if (!fence)
279       return;
280 
281    pipe_reference_init(&fence->ref, 1);
282 
283    if (deferred)
284       fence->unflushed_ctx = ctx;
285 
286    for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
287       struct iris_batch *batch = &ice->batches[b];
288 
289       if (deferred && iris_batch_bytes_used(batch) > 0) {
290          struct iris_fine_fence *fine =
291             iris_fine_fence_new(batch, IRIS_FENCE_BOTTOM_OF_PIPE);
292          iris_fine_fence_reference(screen, &fence->fine[b], fine);
293          iris_fine_fence_reference(screen, &fine, NULL);
294       } else {
295          /* This batch has no commands queued up (perhaps we just flushed,
296           * or all the commands are on the other batch).  Wait for the last
297           * syncobj on this engine - unless it's already finished by now.
298           */
299          if (iris_fine_fence_signaled(batch->last_fence))
300             continue;
301 
302          iris_fine_fence_reference(screen, &fence->fine[b], batch->last_fence);
303       }
304    }
305 
306    iris_fence_reference(ctx->screen, out_fence, NULL);
307    *out_fence = fence;
308 }
309 
310 static void
iris_fence_await(struct pipe_context * ctx,struct pipe_fence_handle * fence)311 iris_fence_await(struct pipe_context *ctx,
312                  struct pipe_fence_handle *fence)
313 {
314    struct iris_context *ice = (struct iris_context *)ctx;
315 
316    /* Unflushed fences from the same context are no-ops. */
317    if (ctx && ctx == fence->unflushed_ctx)
318       return;
319 
320    /* XXX: We can't safely flush the other context, because it might be
321     *      bound to another thread, and poking at its internals wouldn't
322     *      be safe.  In the future we should use MI_SEMAPHORE_WAIT and
323     *      block until the other job has been submitted, relying on
324     *      kernel timeslicing to preempt us until the other job is
325     *      actually flushed and the seqno finally passes.
326     */
327    if (fence->unflushed_ctx) {
328       pipe_debug_message(&ice->dbg, CONFORMANCE, "%s",
329                          "glWaitSync on unflushed fence from another context "
330                          "is unlikely to work without kernel 5.8+\n");
331    }
332 
333    for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
334       struct iris_fine_fence *fine = fence->fine[i];
335 
336       if (iris_fine_fence_signaled(fine))
337          continue;
338 
339       for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
340          struct iris_batch *batch = &ice->batches[b];
341 
342          /* We're going to make any future work in this batch wait for our
343           * fence to have gone by.  But any currently queued work doesn't
344           * need to wait.  Flush the batch now, so it can happen sooner.
345           */
346          iris_batch_flush(batch);
347 
348          /* Before adding a new reference, clean out any stale ones. */
349          clear_stale_syncobjs(batch);
350 
351          iris_batch_add_syncobj(batch, fine->syncobj, I915_EXEC_FENCE_WAIT);
352       }
353    }
354 }
355 
356 #define NSEC_PER_SEC (1000 * USEC_PER_SEC)
357 #define USEC_PER_SEC (1000 * MSEC_PER_SEC)
358 #define MSEC_PER_SEC (1000)
359 
360 static uint64_t
gettime_ns(void)361 gettime_ns(void)
362 {
363    struct timespec current;
364    clock_gettime(CLOCK_MONOTONIC, &current);
365    return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
366 }
367 
368 static uint64_t
rel2abs(uint64_t timeout)369 rel2abs(uint64_t timeout)
370 {
371    if (timeout == 0)
372       return 0;
373 
374    uint64_t current_time = gettime_ns();
375    uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
376 
377    timeout = MIN2(max_timeout, timeout);
378 
379    return current_time + timeout;
380 }
381 
382 static bool
iris_fence_finish(struct pipe_screen * p_screen,struct pipe_context * ctx,struct pipe_fence_handle * fence,uint64_t timeout)383 iris_fence_finish(struct pipe_screen *p_screen,
384                   struct pipe_context *ctx,
385                   struct pipe_fence_handle *fence,
386                   uint64_t timeout)
387 {
388    ctx = threaded_context_unwrap_sync(ctx);
389 
390    struct iris_context *ice = (struct iris_context *)ctx;
391    struct iris_screen *screen = (struct iris_screen *)p_screen;
392 
393    /* If we created the fence with PIPE_FLUSH_DEFERRED, we may not have
394     * flushed yet.  Check if our syncobj is the current batch's signalling
395     * syncobj - if so, we haven't flushed and need to now.
396     *
397     * The Gallium docs mention that a flush will occur if \p ctx matches
398     * the context the fence was created with.  It may be NULL, so we check
399     * that it matches first.
400     */
401    if (ctx && ctx == fence->unflushed_ctx) {
402       for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++) {
403          struct iris_fine_fence *fine = fence->fine[i];
404 
405          if (iris_fine_fence_signaled(fine))
406             continue;
407 
408          if (fine->syncobj == iris_batch_get_signal_syncobj(&ice->batches[i]))
409             iris_batch_flush(&ice->batches[i]);
410       }
411 
412       /* The fence is no longer deferred. */
413       fence->unflushed_ctx = NULL;
414    }
415 
416    unsigned int handle_count = 0;
417    uint32_t handles[ARRAY_SIZE(fence->fine)];
418    for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
419       struct iris_fine_fence *fine = fence->fine[i];
420 
421       if (iris_fine_fence_signaled(fine))
422          continue;
423 
424       handles[handle_count++] = fine->syncobj->handle;
425    }
426 
427    if (handle_count == 0)
428       return true;
429 
430    struct drm_syncobj_wait args = {
431       .handles = (uintptr_t)handles,
432       .count_handles = handle_count,
433       .timeout_nsec = rel2abs(timeout),
434       .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
435    };
436 
437    if (fence->unflushed_ctx) {
438       /* This fence had a deferred flush from another context.  We can't
439        * safely flush it here, because the context might be bound to a
440        * different thread, and poking at its internals wouldn't be safe.
441        *
442        * Instead, use the WAIT_FOR_SUBMIT flag to block and hope that
443        * another thread submits the work.
444        */
445       args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
446    }
447 
448    return intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args) == 0;
449 }
450 
451 static int
sync_merge_fd(int sync_fd,int new_fd)452 sync_merge_fd(int sync_fd, int new_fd)
453 {
454    if (sync_fd == -1)
455       return new_fd;
456 
457    if (new_fd == -1)
458       return sync_fd;
459 
460    struct sync_merge_data args = {
461       .name = "iris fence",
462       .fd2 = new_fd,
463       .fence = -1,
464    };
465 
466    intel_ioctl(sync_fd, SYNC_IOC_MERGE, &args);
467    close(new_fd);
468    close(sync_fd);
469 
470    return args.fence;
471 }
472 
473 static int
iris_fence_get_fd(struct pipe_screen * p_screen,struct pipe_fence_handle * fence)474 iris_fence_get_fd(struct pipe_screen *p_screen,
475                   struct pipe_fence_handle *fence)
476 {
477    struct iris_screen *screen = (struct iris_screen *)p_screen;
478    int fd = -1;
479 
480    /* Deferred fences aren't supported. */
481    if (fence->unflushed_ctx)
482       return -1;
483 
484    for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
485       struct iris_fine_fence *fine = fence->fine[i];
486 
487       if (iris_fine_fence_signaled(fine))
488          continue;
489 
490       struct drm_syncobj_handle args = {
491          .handle = fine->syncobj->handle,
492          .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
493          .fd = -1,
494       };
495 
496       intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
497       fd = sync_merge_fd(fd, args.fd);
498    }
499 
500    if (fd == -1) {
501       /* Our fence has no syncobj's recorded.  This means that all of the
502        * batches had already completed, their syncobj's had been signalled,
503        * and so we didn't bother to record them.  But we're being asked to
504        * export such a fence.  So export a dummy already-signalled syncobj.
505        */
506       struct drm_syncobj_handle args = {
507          .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE, .fd = -1,
508       };
509 
510       args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);
511       intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
512       gem_syncobj_destroy(screen->fd, args.handle);
513       return args.fd;
514    }
515 
516    return fd;
517 }
518 
519 static void
iris_fence_create_fd(struct pipe_context * ctx,struct pipe_fence_handle ** out,int fd,enum pipe_fd_type type)520 iris_fence_create_fd(struct pipe_context *ctx,
521                      struct pipe_fence_handle **out,
522                      int fd,
523                      enum pipe_fd_type type)
524 {
525    assert(type == PIPE_FD_TYPE_NATIVE_SYNC || type == PIPE_FD_TYPE_SYNCOBJ);
526 
527    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
528    struct drm_syncobj_handle args = {
529       .fd = fd,
530    };
531 
532    if (type == PIPE_FD_TYPE_NATIVE_SYNC) {
533       args.flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE;
534       args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);
535    }
536 
537    if (intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args) == -1) {
538       fprintf(stderr, "DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %s\n",
539               strerror(errno));
540       if (type == PIPE_FD_TYPE_NATIVE_SYNC)
541          gem_syncobj_destroy(screen->fd, args.handle);
542       *out = NULL;
543       return;
544    }
545 
546    struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
547    if (!syncobj) {
548       *out = NULL;
549       return;
550    }
551    syncobj->handle = args.handle;
552    pipe_reference_init(&syncobj->ref, 1);
553 
554    struct iris_fine_fence *fine = calloc(1, sizeof(*fine));
555    if (!fine) {
556       free(syncobj);
557       *out = NULL;
558       return;
559    }
560 
561    static const uint32_t zero = 0;
562 
563    /* Fences work in terms of iris_fine_fence, but we don't actually have a
564     * seqno for an imported fence.  So, create a fake one which always
565     * returns as 'not signaled' so we fall back to using the sync object.
566     */
567    fine->seqno = UINT32_MAX;
568    fine->map = &zero;
569    fine->syncobj = syncobj;
570    fine->flags = IRIS_FENCE_END;
571    pipe_reference_init(&fine->reference, 1);
572 
573    struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
574    if (!fence) {
575       free(fine);
576       free(syncobj);
577       *out = NULL;
578       return;
579    }
580    pipe_reference_init(&fence->ref, 1);
581    fence->fine[0] = fine;
582 
583    *out = fence;
584 }
585 
586 static void
iris_fence_signal(struct pipe_context * ctx,struct pipe_fence_handle * fence)587 iris_fence_signal(struct pipe_context *ctx,
588                   struct pipe_fence_handle *fence)
589 {
590    struct iris_context *ice = (struct iris_context *)ctx;
591 
592    if (ctx == fence->unflushed_ctx)
593       return;
594 
595    for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
596       for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
597          struct iris_fine_fence *fine = fence->fine[i];
598 
599          /* already signaled fence skipped */
600          if (iris_fine_fence_signaled(fine))
601             continue;
602 
603          ice->batches[b].contains_fence_signal = true;
604          iris_batch_add_syncobj(&ice->batches[b], fine->syncobj,
605                                 I915_EXEC_FENCE_SIGNAL);
606       }
607    }
608 }
609 
610 void
iris_init_screen_fence_functions(struct pipe_screen * screen)611 iris_init_screen_fence_functions(struct pipe_screen *screen)
612 {
613    screen->fence_reference = iris_fence_reference;
614    screen->fence_finish = iris_fence_finish;
615    screen->fence_get_fd = iris_fence_get_fd;
616 }
617 
618 void
iris_init_context_fence_functions(struct pipe_context * ctx)619 iris_init_context_fence_functions(struct pipe_context *ctx)
620 {
621    ctx->flush = iris_fence_flush;
622    ctx->create_fence_fd = iris_fence_create_fd;
623    ctx->fence_server_sync = iris_fence_await;
624    ctx->fence_server_signal = iris_fence_signal;
625 }
626