1 /**************************************************************************
2 *
3 * Copyright 2007-2010 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * \file
30 * Implementation of fenced buffers.
31 *
32 * \author Jose Fonseca <jfonseca-at-vmware-dot-com>
33 * \author Thomas Hellström <thellstrom-at-vmware-dot-com>
34 */
35
36
37 #include "pipe/p_config.h"
38
39 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
40 #include <unistd.h>
41 #include <sched.h>
42 #endif
43 #include <inttypes.h>
44
45 #include "pipe/p_compiler.h"
46 #include "pipe/p_defines.h"
47 #include "util/u_debug.h"
48 #include "os/os_thread.h"
49 #include "util/u_memory.h"
50 #include "util/list.h"
51
52 #include "pb_buffer.h"
53 #include "pb_buffer_fenced.h"
54 #include "pb_bufmgr.h"
55
56
57
58 /**
59 * Convenience macro (type safe).
60 */
61 #define SUPER(__derived) (&(__derived)->base)
62
63
64 struct fenced_manager
65 {
66 struct pb_manager base;
67 struct pb_manager *provider;
68 struct pb_fence_ops *ops;
69
70 /**
71 * Maximum buffer size that can be safely allocated.
72 */
73 pb_size max_buffer_size;
74
75 /**
76 * Maximum cpu memory we can allocate before we start waiting for the
77 * GPU to idle.
78 */
79 pb_size max_cpu_total_size;
80
81 /**
82 * Following members are mutable and protected by this mutex.
83 */
84 mtx_t mutex;
85
86 /**
87 * Fenced buffer list.
88 *
89 * All fenced buffers are placed in this listed, ordered from the oldest
90 * fence to the newest fence.
91 */
92 struct list_head fenced;
93 pb_size num_fenced;
94
95 struct list_head unfenced;
96 pb_size num_unfenced;
97
98 /**
99 * How much temporary CPU memory is being used to hold unvalidated buffers.
100 */
101 pb_size cpu_total_size;
102 };
103
104
105 /**
106 * Fenced buffer.
107 *
108 * Wrapper around a pipe buffer which adds fencing and reference counting.
109 */
110 struct fenced_buffer
111 {
112 /**
113 * Immutable members.
114 */
115
116 struct pb_buffer base;
117 struct fenced_manager *mgr;
118
119 /**
120 * Following members are mutable and protected by fenced_manager::mutex.
121 */
122
123 struct list_head head;
124
125 /**
126 * Buffer with storage.
127 */
128 struct pb_buffer *buffer;
129 pb_size size;
130 struct pb_desc desc;
131
132 /**
133 * Temporary CPU storage data. Used when there isn't enough GPU memory to
134 * store the buffer.
135 */
136 void *data;
137
138 /**
139 * A bitmask of PB_USAGE_CPU/GPU_READ/WRITE describing the current
140 * buffer usage.
141 */
142 enum pb_usage_flags flags;
143
144 unsigned mapcount;
145
146 struct pb_validate *vl;
147 unsigned validation_flags;
148
149 struct pipe_fence_handle *fence;
150 };
151
152
153 static inline struct fenced_manager *
fenced_manager(struct pb_manager * mgr)154 fenced_manager(struct pb_manager *mgr)
155 {
156 assert(mgr);
157 return (struct fenced_manager *)mgr;
158 }
159
160
161 static inline struct fenced_buffer *
fenced_buffer(struct pb_buffer * buf)162 fenced_buffer(struct pb_buffer *buf)
163 {
164 assert(buf);
165 return (struct fenced_buffer *)buf;
166 }
167
168
169 static void
170 fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf);
171
172 static enum pipe_error
173 fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
174 struct fenced_buffer *fenced_buf);
175
176 static void
177 fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf);
178
179 static enum pipe_error
180 fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
181 struct fenced_buffer *fenced_buf,
182 boolean wait);
183
184 static enum pipe_error
185 fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf);
186
187 static enum pipe_error
188 fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf);
189
190
191 /**
192 * Dump the fenced buffer list.
193 *
194 * Useful to understand failures to allocate buffers.
195 */
196 static void
fenced_manager_dump_locked(struct fenced_manager * fenced_mgr)197 fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)
198 {
199 #ifdef DEBUG
200 struct pb_fence_ops *ops = fenced_mgr->ops;
201 struct list_head *curr, *next;
202 struct fenced_buffer *fenced_buf;
203
204 debug_printf("%10s %7s %8s %7s %10s %s\n",
205 "buffer", "size", "refcount", "storage", "fence", "signalled");
206
207 curr = fenced_mgr->unfenced.next;
208 next = curr->next;
209 while (curr != &fenced_mgr->unfenced) {
210 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
211 assert(!fenced_buf->fence);
212 debug_printf("%10p %"PRIu64" %8u %7s\n",
213 (void *) fenced_buf,
214 fenced_buf->base.size,
215 p_atomic_read(&fenced_buf->base.reference.count),
216 fenced_buf->buffer ? "gpu" : (fenced_buf->data ? "cpu" : "none"));
217 curr = next;
218 next = curr->next;
219 }
220
221 curr = fenced_mgr->fenced.next;
222 next = curr->next;
223 while (curr != &fenced_mgr->fenced) {
224 int signaled;
225 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
226 assert(fenced_buf->buffer);
227 signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
228 debug_printf("%10p %"PRIu64" %8u %7s %10p %s\n",
229 (void *) fenced_buf,
230 fenced_buf->base.size,
231 p_atomic_read(&fenced_buf->base.reference.count),
232 "gpu",
233 (void *) fenced_buf->fence,
234 signaled == 0 ? "y" : "n");
235 curr = next;
236 next = curr->next;
237 }
238 #else
239 (void)fenced_mgr;
240 #endif
241 }
242
243
244 static inline void
fenced_buffer_destroy_locked(struct fenced_manager * fenced_mgr,struct fenced_buffer * fenced_buf)245 fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr,
246 struct fenced_buffer *fenced_buf)
247 {
248 assert(!pipe_is_referenced(&fenced_buf->base.reference));
249
250 assert(!fenced_buf->fence);
251 assert(fenced_buf->head.prev);
252 assert(fenced_buf->head.next);
253 list_del(&fenced_buf->head);
254 assert(fenced_mgr->num_unfenced);
255 --fenced_mgr->num_unfenced;
256
257 fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
258 fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
259
260 FREE(fenced_buf);
261 }
262
263
264 /**
265 * Add the buffer to the fenced list.
266 *
267 * Reference count should be incremented before calling this function.
268 */
269 static inline void
fenced_buffer_add_locked(struct fenced_manager * fenced_mgr,struct fenced_buffer * fenced_buf)270 fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,
271 struct fenced_buffer *fenced_buf)
272 {
273 assert(pipe_is_referenced(&fenced_buf->base.reference));
274 assert(fenced_buf->flags & PB_USAGE_GPU_READ_WRITE);
275 assert(fenced_buf->fence);
276
277 p_atomic_inc(&fenced_buf->base.reference.count);
278
279 list_del(&fenced_buf->head);
280 assert(fenced_mgr->num_unfenced);
281 --fenced_mgr->num_unfenced;
282 list_addtail(&fenced_buf->head, &fenced_mgr->fenced);
283 ++fenced_mgr->num_fenced;
284 }
285
286
287 /**
288 * Remove the buffer from the fenced list, and potentially destroy the buffer
289 * if the reference count reaches zero.
290 *
291 * Returns TRUE if the buffer was detroyed.
292 */
293 static inline boolean
fenced_buffer_remove_locked(struct fenced_manager * fenced_mgr,struct fenced_buffer * fenced_buf)294 fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr,
295 struct fenced_buffer *fenced_buf)
296 {
297 struct pb_fence_ops *ops = fenced_mgr->ops;
298
299 assert(fenced_buf->fence);
300 assert(fenced_buf->mgr == fenced_mgr);
301
302 ops->fence_reference(ops, &fenced_buf->fence, NULL);
303 fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE;
304
305 assert(fenced_buf->head.prev);
306 assert(fenced_buf->head.next);
307
308 list_del(&fenced_buf->head);
309 assert(fenced_mgr->num_fenced);
310 --fenced_mgr->num_fenced;
311
312 list_addtail(&fenced_buf->head, &fenced_mgr->unfenced);
313 ++fenced_mgr->num_unfenced;
314
315 if (p_atomic_dec_zero(&fenced_buf->base.reference.count)) {
316 fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
317 return TRUE;
318 }
319
320 return FALSE;
321 }
322
323
324 /**
325 * Wait for the fence to expire, and remove it from the fenced list.
326 *
327 * This function will release and re-acquire the mutex, so any copy of mutable
328 * state must be discarded after calling it.
329 */
330 static inline enum pipe_error
fenced_buffer_finish_locked(struct fenced_manager * fenced_mgr,struct fenced_buffer * fenced_buf)331 fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
332 struct fenced_buffer *fenced_buf)
333 {
334 struct pb_fence_ops *ops = fenced_mgr->ops;
335 enum pipe_error ret = PIPE_ERROR;
336
337 #if 0
338 debug_warning("waiting for GPU");
339 #endif
340
341 assert(pipe_is_referenced(&fenced_buf->base.reference));
342 assert(fenced_buf->fence);
343
344 if (fenced_buf->fence) {
345 struct pipe_fence_handle *fence = NULL;
346 int finished;
347 boolean proceed;
348
349 ops->fence_reference(ops, &fence, fenced_buf->fence);
350
351 mtx_unlock(&fenced_mgr->mutex);
352
353 finished = ops->fence_finish(ops, fenced_buf->fence, 0);
354
355 mtx_lock(&fenced_mgr->mutex);
356
357 assert(pipe_is_referenced(&fenced_buf->base.reference));
358
359 /* Only proceed if the fence object didn't change in the meanwhile.
360 * Otherwise assume the work has been already carried out by another
361 * thread that re-aquired the lock before us.
362 */
363 proceed = fence == fenced_buf->fence ? TRUE : FALSE;
364
365 ops->fence_reference(ops, &fence, NULL);
366
367 if (proceed && finished == 0) {
368 /* Remove from the fenced list. */
369 boolean destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
370
371 /* TODO: remove consequents buffers with the same fence? */
372
373 assert(!destroyed);
374 (void) destroyed; /* silence unused var warning for non-debug build */
375
376 fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE;
377
378 ret = PIPE_OK;
379 }
380 }
381
382 return ret;
383 }
384
385
386 /**
387 * Remove as many fenced buffers from the fenced list as possible.
388 *
389 * Returns TRUE if at least one buffer was removed.
390 */
391 static boolean
fenced_manager_check_signalled_locked(struct fenced_manager * fenced_mgr,boolean wait)392 fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr,
393 boolean wait)
394 {
395 struct pb_fence_ops *ops = fenced_mgr->ops;
396 struct list_head *curr, *next;
397 struct fenced_buffer *fenced_buf;
398 struct pipe_fence_handle *prev_fence = NULL;
399 boolean ret = FALSE;
400
401 curr = fenced_mgr->fenced.next;
402 next = curr->next;
403 while (curr != &fenced_mgr->fenced) {
404 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
405
406 if (fenced_buf->fence != prev_fence) {
407 int signaled;
408
409 if (wait) {
410 signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
411
412 /* Don't return just now. Instead preemptively check if the
413 * following buffers' fences already expired, without further waits.
414 */
415 wait = FALSE;
416 } else {
417 signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
418 }
419
420 if (signaled != 0) {
421 return ret;
422 }
423
424 prev_fence = fenced_buf->fence;
425 } else {
426 /* This buffer's fence object is identical to the previous buffer's
427 * fence object, so no need to check the fence again.
428 */
429 assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
430 }
431
432 fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
433
434 ret = TRUE;
435
436 curr = next;
437 next = curr->next;
438 }
439
440 return ret;
441 }
442
443
444 /**
445 * Try to free some GPU memory by backing it up into CPU memory.
446 *
447 * Returns TRUE if at least one buffer was freed.
448 */
449 static boolean
fenced_manager_free_gpu_storage_locked(struct fenced_manager * fenced_mgr)450 fenced_manager_free_gpu_storage_locked(struct fenced_manager *fenced_mgr)
451 {
452 struct list_head *curr, *next;
453 struct fenced_buffer *fenced_buf;
454
455 curr = fenced_mgr->unfenced.next;
456 next = curr->next;
457 while (curr != &fenced_mgr->unfenced) {
458 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
459
460 /* We can only move storage if the buffer is not mapped and not
461 * validated.
462 */
463 if (fenced_buf->buffer &&
464 !fenced_buf->mapcount &&
465 !fenced_buf->vl) {
466 enum pipe_error ret;
467
468 ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
469 if (ret == PIPE_OK) {
470 ret = fenced_buffer_copy_storage_to_cpu_locked(fenced_buf);
471 if (ret == PIPE_OK) {
472 fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
473 return TRUE;
474 }
475 fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
476 }
477 }
478
479 curr = next;
480 next = curr->next;
481 }
482
483 return FALSE;
484 }
485
486
487 /**
488 * Destroy CPU storage for this buffer.
489 */
490 static void
fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer * fenced_buf)491 fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf)
492 {
493 if (fenced_buf->data) {
494 align_free(fenced_buf->data);
495 fenced_buf->data = NULL;
496 assert(fenced_buf->mgr->cpu_total_size >= fenced_buf->size);
497 fenced_buf->mgr->cpu_total_size -= fenced_buf->size;
498 }
499 }
500
501
502 /**
503 * Create CPU storage for this buffer.
504 */
505 static enum pipe_error
fenced_buffer_create_cpu_storage_locked(struct fenced_manager * fenced_mgr,struct fenced_buffer * fenced_buf)506 fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
507 struct fenced_buffer *fenced_buf)
508 {
509 assert(!fenced_buf->data);
510 if (fenced_buf->data)
511 return PIPE_OK;
512
513 if (fenced_mgr->cpu_total_size + fenced_buf->size > fenced_mgr->max_cpu_total_size)
514 return PIPE_ERROR_OUT_OF_MEMORY;
515
516 fenced_buf->data = align_malloc(fenced_buf->size, fenced_buf->desc.alignment);
517 if (!fenced_buf->data)
518 return PIPE_ERROR_OUT_OF_MEMORY;
519
520 fenced_mgr->cpu_total_size += fenced_buf->size;
521
522 return PIPE_OK;
523 }
524
525
526 /**
527 * Destroy the GPU storage.
528 */
529 static void
fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer * fenced_buf)530 fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf)
531 {
532 if (fenced_buf->buffer) {
533 pb_reference(&fenced_buf->buffer, NULL);
534 }
535 }
536
537
538 /**
539 * Try to create GPU storage for this buffer.
540 *
541 * This function is a shorthand around pb_manager::create_buffer for
542 * fenced_buffer_create_gpu_storage_locked()'s benefit.
543 */
544 static inline boolean
fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager * fenced_mgr,struct fenced_buffer * fenced_buf)545 fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
546 struct fenced_buffer *fenced_buf)
547 {
548 struct pb_manager *provider = fenced_mgr->provider;
549
550 assert(!fenced_buf->buffer);
551
552 fenced_buf->buffer = provider->create_buffer(fenced_mgr->provider,
553 fenced_buf->size,
554 &fenced_buf->desc);
555 return fenced_buf->buffer ? TRUE : FALSE;
556 }
557
558
559 /**
560 * Create GPU storage for this buffer.
561 */
562 static enum pipe_error
fenced_buffer_create_gpu_storage_locked(struct fenced_manager * fenced_mgr,struct fenced_buffer * fenced_buf,boolean wait)563 fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
564 struct fenced_buffer *fenced_buf,
565 boolean wait)
566 {
567 assert(!fenced_buf->buffer);
568
569 /* Check for signaled buffers before trying to allocate. */
570 fenced_manager_check_signalled_locked(fenced_mgr, FALSE);
571
572 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
573
574 /* Keep trying while there is some sort of progress:
575 * - fences are expiring,
576 * - or buffers are being being swapped out from GPU memory into CPU memory.
577 */
578 while (!fenced_buf->buffer &&
579 (fenced_manager_check_signalled_locked(fenced_mgr, FALSE) ||
580 fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
581 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
582 }
583
584 if (!fenced_buf->buffer && wait) {
585 /* Same as before, but this time around, wait to free buffers if
586 * necessary.
587 */
588 while (!fenced_buf->buffer &&
589 (fenced_manager_check_signalled_locked(fenced_mgr, TRUE) ||
590 fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
591 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
592 }
593 }
594
595 if (!fenced_buf->buffer) {
596 if (0)
597 fenced_manager_dump_locked(fenced_mgr);
598
599 /* Give up. */
600 return PIPE_ERROR_OUT_OF_MEMORY;
601 }
602
603 return PIPE_OK;
604 }
605
606
607 static enum pipe_error
fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer * fenced_buf)608 fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf)
609 {
610 uint8_t *map;
611
612 assert(fenced_buf->data);
613 assert(fenced_buf->buffer);
614
615 map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_WRITE, NULL);
616 if (!map)
617 return PIPE_ERROR;
618
619 memcpy(map, fenced_buf->data, fenced_buf->size);
620
621 pb_unmap(fenced_buf->buffer);
622
623 return PIPE_OK;
624 }
625
626
627 static enum pipe_error
fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer * fenced_buf)628 fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf)
629 {
630 const uint8_t *map;
631
632 assert(fenced_buf->data);
633 assert(fenced_buf->buffer);
634
635 map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_READ, NULL);
636 if (!map)
637 return PIPE_ERROR;
638
639 memcpy(fenced_buf->data, map, fenced_buf->size);
640
641 pb_unmap(fenced_buf->buffer);
642
643 return PIPE_OK;
644 }
645
646
647 static void
fenced_buffer_destroy(void * winsys,struct pb_buffer * buf)648 fenced_buffer_destroy(void *winsys, struct pb_buffer *buf)
649 {
650 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
651 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
652
653 assert(!pipe_is_referenced(&fenced_buf->base.reference));
654
655 mtx_lock(&fenced_mgr->mutex);
656
657 fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
658
659 mtx_unlock(&fenced_mgr->mutex);
660 }
661
662
663 static void *
fenced_buffer_map(struct pb_buffer * buf,enum pb_usage_flags flags,void * flush_ctx)664 fenced_buffer_map(struct pb_buffer *buf,
665 enum pb_usage_flags flags, void *flush_ctx)
666 {
667 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
668 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
669 struct pb_fence_ops *ops = fenced_mgr->ops;
670 void *map = NULL;
671
672 mtx_lock(&fenced_mgr->mutex);
673
674 assert(!(flags & PB_USAGE_GPU_READ_WRITE));
675
676 /* Serialize writes. */
677 while ((fenced_buf->flags & PB_USAGE_GPU_WRITE) ||
678 ((fenced_buf->flags & PB_USAGE_GPU_READ) &&
679 (flags & PB_USAGE_CPU_WRITE))) {
680
681 /* Don't wait for the GPU to finish accessing it,
682 * if blocking is forbidden.
683 */
684 if ((flags & PB_USAGE_DONTBLOCK) &&
685 ops->fence_signalled(ops, fenced_buf->fence, 0) != 0) {
686 goto done;
687 }
688
689 if (flags & PB_USAGE_UNSYNCHRONIZED) {
690 break;
691 }
692
693 /* Wait for the GPU to finish accessing. This will release and re-acquire
694 * the mutex, so all copies of mutable state must be discarded.
695 */
696 fenced_buffer_finish_locked(fenced_mgr, fenced_buf);
697 }
698
699 if (fenced_buf->buffer) {
700 map = pb_map(fenced_buf->buffer, flags, flush_ctx);
701 } else {
702 assert(fenced_buf->data);
703 map = fenced_buf->data;
704 }
705
706 if (map) {
707 ++fenced_buf->mapcount;
708 fenced_buf->flags |= flags & PB_USAGE_CPU_READ_WRITE;
709 }
710
711 done:
712 mtx_unlock(&fenced_mgr->mutex);
713
714 return map;
715 }
716
717
718 static void
fenced_buffer_unmap(struct pb_buffer * buf)719 fenced_buffer_unmap(struct pb_buffer *buf)
720 {
721 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
722 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
723
724 mtx_lock(&fenced_mgr->mutex);
725
726 assert(fenced_buf->mapcount);
727 if (fenced_buf->mapcount) {
728 if (fenced_buf->buffer)
729 pb_unmap(fenced_buf->buffer);
730 --fenced_buf->mapcount;
731 if (!fenced_buf->mapcount)
732 fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE;
733 }
734
735 mtx_unlock(&fenced_mgr->mutex);
736 }
737
738
739 static enum pipe_error
fenced_buffer_validate(struct pb_buffer * buf,struct pb_validate * vl,enum pb_usage_flags flags)740 fenced_buffer_validate(struct pb_buffer *buf,
741 struct pb_validate *vl,
742 enum pb_usage_flags flags)
743 {
744 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
745 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
746 enum pipe_error ret;
747
748 mtx_lock(&fenced_mgr->mutex);
749
750 if (!vl) {
751 /* Invalidate. */
752 fenced_buf->vl = NULL;
753 fenced_buf->validation_flags = 0;
754 ret = PIPE_OK;
755 goto done;
756 }
757
758 assert(flags & PB_USAGE_GPU_READ_WRITE);
759 assert(!(flags & ~PB_USAGE_GPU_READ_WRITE));
760 flags &= PB_USAGE_GPU_READ_WRITE;
761
762 /* Buffer cannot be validated in two different lists. */
763 if (fenced_buf->vl && fenced_buf->vl != vl) {
764 ret = PIPE_ERROR_RETRY;
765 goto done;
766 }
767
768 if (fenced_buf->vl == vl &&
769 (fenced_buf->validation_flags & flags) == flags) {
770 /* Nothing to do -- buffer already validated. */
771 ret = PIPE_OK;
772 goto done;
773 }
774
775 /* Create and update GPU storage. */
776 if (!fenced_buf->buffer) {
777 assert(!fenced_buf->mapcount);
778
779 ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
780 if (ret != PIPE_OK) {
781 goto done;
782 }
783
784 ret = fenced_buffer_copy_storage_to_gpu_locked(fenced_buf);
785 if (ret != PIPE_OK) {
786 fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
787 goto done;
788 }
789
790 if (fenced_buf->mapcount) {
791 debug_printf("warning: validating a buffer while it is still mapped\n");
792 } else {
793 fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
794 }
795 }
796
797 ret = pb_validate(fenced_buf->buffer, vl, flags);
798 if (ret != PIPE_OK)
799 goto done;
800
801 fenced_buf->vl = vl;
802 fenced_buf->validation_flags |= flags;
803
804 done:
805 mtx_unlock(&fenced_mgr->mutex);
806
807 return ret;
808 }
809
810
811 static void
fenced_buffer_fence(struct pb_buffer * buf,struct pipe_fence_handle * fence)812 fenced_buffer_fence(struct pb_buffer *buf,
813 struct pipe_fence_handle *fence)
814 {
815 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
816 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
817 struct pb_fence_ops *ops = fenced_mgr->ops;
818
819 mtx_lock(&fenced_mgr->mutex);
820
821 assert(pipe_is_referenced(&fenced_buf->base.reference));
822 assert(fenced_buf->buffer);
823
824 if (fence != fenced_buf->fence) {
825 assert(fenced_buf->vl);
826 assert(fenced_buf->validation_flags);
827
828 if (fenced_buf->fence) {
829 ASSERTED boolean destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
830 assert(!destroyed);
831 }
832 if (fence) {
833 ops->fence_reference(ops, &fenced_buf->fence, fence);
834 fenced_buf->flags |= fenced_buf->validation_flags;
835 fenced_buffer_add_locked(fenced_mgr, fenced_buf);
836 }
837
838 pb_fence(fenced_buf->buffer, fence);
839
840 fenced_buf->vl = NULL;
841 fenced_buf->validation_flags = 0;
842 }
843
844 mtx_unlock(&fenced_mgr->mutex);
845 }
846
847
848 static void
fenced_buffer_get_base_buffer(struct pb_buffer * buf,struct pb_buffer ** base_buf,pb_size * offset)849 fenced_buffer_get_base_buffer(struct pb_buffer *buf,
850 struct pb_buffer **base_buf,
851 pb_size *offset)
852 {
853 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
854 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
855
856 mtx_lock(&fenced_mgr->mutex);
857
858 /* This should only be called when the buffer is validated. Typically
859 * when processing relocations.
860 */
861 assert(fenced_buf->vl);
862 assert(fenced_buf->buffer);
863
864 if (fenced_buf->buffer) {
865 pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
866 } else {
867 *base_buf = buf;
868 *offset = 0;
869 }
870
871 mtx_unlock(&fenced_mgr->mutex);
872 }
873
874
875 static const struct pb_vtbl
876 fenced_buffer_vtbl = {
877 fenced_buffer_destroy,
878 fenced_buffer_map,
879 fenced_buffer_unmap,
880 fenced_buffer_validate,
881 fenced_buffer_fence,
882 fenced_buffer_get_base_buffer
883 };
884
885
886 /**
887 * Wrap a buffer in a fenced buffer.
888 */
889 static struct pb_buffer *
fenced_bufmgr_create_buffer(struct pb_manager * mgr,pb_size size,const struct pb_desc * desc)890 fenced_bufmgr_create_buffer(struct pb_manager *mgr,
891 pb_size size,
892 const struct pb_desc *desc)
893 {
894 struct fenced_manager *fenced_mgr = fenced_manager(mgr);
895 struct fenced_buffer *fenced_buf;
896 enum pipe_error ret;
897
898 /* Don't stall the GPU, waste time evicting buffers, or waste memory
899 * trying to create a buffer that will most likely never fit into the
900 * graphics aperture.
901 */
902 if (size > fenced_mgr->max_buffer_size) {
903 goto no_buffer;
904 }
905
906 fenced_buf = CALLOC_STRUCT(fenced_buffer);
907 if (!fenced_buf)
908 goto no_buffer;
909
910 pipe_reference_init(&fenced_buf->base.reference, 1);
911 fenced_buf->base.alignment_log2 = util_logbase2(desc->alignment);
912 fenced_buf->base.usage = desc->usage;
913 fenced_buf->base.size = size;
914 fenced_buf->size = size;
915 fenced_buf->desc = *desc;
916
917 fenced_buf->base.vtbl = &fenced_buffer_vtbl;
918 fenced_buf->mgr = fenced_mgr;
919
920 mtx_lock(&fenced_mgr->mutex);
921
922 /* Try to create GPU storage without stalling. */
923 ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE);
924
925 /* Attempt to use CPU memory to avoid stalling the GPU. */
926 if (ret != PIPE_OK) {
927 ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
928 }
929
930 /* Create GPU storage, waiting for some to be available. */
931 if (ret != PIPE_OK) {
932 ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
933 }
934
935 /* Give up. */
936 if (ret != PIPE_OK) {
937 goto no_storage;
938 }
939
940 assert(fenced_buf->buffer || fenced_buf->data);
941
942 list_addtail(&fenced_buf->head, &fenced_mgr->unfenced);
943 ++fenced_mgr->num_unfenced;
944 mtx_unlock(&fenced_mgr->mutex);
945
946 return &fenced_buf->base;
947
948 no_storage:
949 mtx_unlock(&fenced_mgr->mutex);
950 FREE(fenced_buf);
951 no_buffer:
952 return NULL;
953 }
954
955
956 static void
fenced_bufmgr_flush(struct pb_manager * mgr)957 fenced_bufmgr_flush(struct pb_manager *mgr)
958 {
959 struct fenced_manager *fenced_mgr = fenced_manager(mgr);
960
961 mtx_lock(&fenced_mgr->mutex);
962 while (fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
963 ;
964 mtx_unlock(&fenced_mgr->mutex);
965
966 assert(fenced_mgr->provider->flush);
967 if (fenced_mgr->provider->flush)
968 fenced_mgr->provider->flush(fenced_mgr->provider);
969 }
970
971
972 static void
fenced_bufmgr_destroy(struct pb_manager * mgr)973 fenced_bufmgr_destroy(struct pb_manager *mgr)
974 {
975 struct fenced_manager *fenced_mgr = fenced_manager(mgr);
976
977 mtx_lock(&fenced_mgr->mutex);
978
979 /* Wait on outstanding fences. */
980 while (fenced_mgr->num_fenced) {
981 mtx_unlock(&fenced_mgr->mutex);
982 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
983 sched_yield();
984 #endif
985 mtx_lock(&fenced_mgr->mutex);
986 while (fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
987 ;
988 }
989
990 #ifdef DEBUG
991 /* assert(!fenced_mgr->num_unfenced); */
992 #endif
993
994 mtx_unlock(&fenced_mgr->mutex);
995 mtx_destroy(&fenced_mgr->mutex);
996
997 if (fenced_mgr->provider)
998 fenced_mgr->provider->destroy(fenced_mgr->provider);
999
1000 fenced_mgr->ops->destroy(fenced_mgr->ops);
1001
1002 FREE(fenced_mgr);
1003 }
1004
1005
1006 struct pb_manager *
fenced_bufmgr_create(struct pb_manager * provider,struct pb_fence_ops * ops,pb_size max_buffer_size,pb_size max_cpu_total_size)1007 fenced_bufmgr_create(struct pb_manager *provider,
1008 struct pb_fence_ops *ops,
1009 pb_size max_buffer_size,
1010 pb_size max_cpu_total_size)
1011 {
1012 struct fenced_manager *fenced_mgr;
1013
1014 if (!provider)
1015 return NULL;
1016
1017 fenced_mgr = CALLOC_STRUCT(fenced_manager);
1018 if (!fenced_mgr)
1019 return NULL;
1020
1021 fenced_mgr->base.destroy = fenced_bufmgr_destroy;
1022 fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer;
1023 fenced_mgr->base.flush = fenced_bufmgr_flush;
1024
1025 fenced_mgr->provider = provider;
1026 fenced_mgr->ops = ops;
1027 fenced_mgr->max_buffer_size = max_buffer_size;
1028 fenced_mgr->max_cpu_total_size = max_cpu_total_size;
1029
1030 list_inithead(&fenced_mgr->fenced);
1031 fenced_mgr->num_fenced = 0;
1032
1033 list_inithead(&fenced_mgr->unfenced);
1034 fenced_mgr->num_unfenced = 0;
1035
1036 (void) mtx_init(&fenced_mgr->mutex, mtx_plain);
1037
1038 return &fenced_mgr->base;
1039 }
1040