xref: /openbsd/sys/dev/pci/drm/ttm/ttm_bo.c (revision a6445c1d)
1 /*	$OpenBSD: ttm_bo.c,v 1.9 2014/11/16 12:31:00 deraadt Exp $	*/
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31 
32 #define pr_fmt(fmt) "[TTM] " fmt
33 
34 #include <dev/pci/drm/ttm/ttm_module.h>
35 #include <dev/pci/drm/ttm/ttm_bo_driver.h>
36 #include <dev/pci/drm/ttm/ttm_placement.h>
37 #include <dev/pci/drm/refcount.h>
38 
39 #define TTM_ASSERT_LOCKED(param)
40 #define TTM_DEBUG(fmt, arg...)
41 #define TTM_BO_HASH_ORDER 13
42 
43 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
44 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
45 static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob);
46 void	 ttm_bo_delayed_workqueue(void *, void *);
47 
48 int ttm_bo_move_buffer(struct ttm_buffer_object *, struct ttm_placement *,
49     bool, bool);
50 
51 #ifdef notyet
52 static struct attribute ttm_bo_count = {
53 	.name = "bo_count",
54 	.mode = S_IRUGO
55 };
56 #endif
57 
58 static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
59 {
60 	int i;
61 
62 	for (i = 0; i <= TTM_PL_PRIV5; i++)
63 		if (flags & (1 << i)) {
64 			*mem_type = i;
65 			return 0;
66 		}
67 	return -EINVAL;
68 }
69 
70 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
71 {
72 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
73 
74 	printf("    has_type: %d\n", man->has_type);
75 	printf("    use_type: %d\n", man->use_type);
76 	printf("    flags: 0x%08X\n", man->flags);
77 	printf("    gpu_offset: 0x%08lX\n", man->gpu_offset);
78 	printf("    size: %llu\n", man->size);
79 	printf("    available_caching: 0x%08X\n", man->available_caching);
80 	printf("    default_caching: 0x%08X\n", man->default_caching);
81 	if (mem_type != TTM_PL_SYSTEM)
82 		(*man->func->debug)(man, TTM_PFX);
83 }
84 
85 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
86 					struct ttm_placement *placement)
87 {
88 	int i, ret, mem_type;
89 
90 	printf("No space for %p (%lu pages, %luK, %luM)\n",
91 	       bo, bo->mem.num_pages, bo->mem.size >> 10,
92 	       bo->mem.size >> 20);
93 	for (i = 0; i < placement->num_placement; i++) {
94 		ret = ttm_mem_type_from_flags(placement->placement[i],
95 						&mem_type);
96 		if (ret)
97 			return;
98 		printf("  placement[%d]=0x%08X (%d)\n",
99 		       i, placement->placement[i], mem_type);
100 		ttm_mem_type_debug(bo->bdev, mem_type);
101 	}
102 }
103 
104 #ifdef notyet
105 static ssize_t ttm_bo_global_show(struct kobject *kobj,
106 				  struct attribute *attr,
107 				  char *buffer)
108 {
109 	struct ttm_bo_global *glob =
110 		container_of(kobj, struct ttm_bo_global, kobj);
111 
112 	return snprintf(buffer, PAGE_SIZE, "%lu\n",
113 			(unsigned long) atomic_read(&glob->bo_count));
114 }
115 
116 static struct attribute *ttm_bo_global_attrs[] = {
117 	&ttm_bo_count,
118 	NULL
119 };
120 
121 static const struct sysfs_ops ttm_bo_global_ops = {
122 	.show = &ttm_bo_global_show
123 };
124 
125 static struct kobj_type ttm_bo_glob_kobj_type  = {
126 	.release = &ttm_bo_global_kobj_release,
127 	.sysfs_ops = &ttm_bo_global_ops,
128 	.default_attrs = ttm_bo_global_attrs
129 };
130 #endif
131 
132 
133 static inline uint32_t ttm_bo_type_flags(unsigned type)
134 {
135 	return 1 << (type);
136 }
137 
138 static void ttm_bo_release_list(struct ttm_buffer_object *bo)
139 {
140 	struct ttm_bo_device *bdev = bo->bdev;
141 	size_t acc_size = bo->acc_size;
142 
143 	BUG_ON(atomic_read(&bo->list_kref));
144 	BUG_ON(atomic_read(&bo->kref));
145 	BUG_ON(atomic_read(&bo->cpu_writers));
146 	BUG_ON(bo->sync_obj != NULL);
147 	BUG_ON(bo->mem.mm_node != NULL);
148 	BUG_ON(!list_empty(&bo->lru));
149 	BUG_ON(!list_empty(&bo->ddestroy));
150 
151 	if (bo->ttm)
152 		ttm_tt_destroy(bo->ttm);
153 	atomic_dec(&bo->glob->bo_count);
154 	if (bo->destroy)
155 		bo->destroy(bo);
156 	else {
157 		kfree(bo);
158 	}
159 	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
160 }
161 
162 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
163 {
164 	int ret = 0;
165 
166 	while (ret == 0) {
167 		if (!ttm_bo_is_reserved(bo))
168 			break;
169 		ret = -tsleep(&bo->event_queue,
170 		    PZERO | (interruptible ? PCATCH : 0), "ttmwt", 0);
171 
172 	}
173 
174 	return (ret);
175 }
176 EXPORT_SYMBOL(ttm_bo_wait_unreserved);
177 
178 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
179 {
180 	struct ttm_bo_device *bdev = bo->bdev;
181 	struct ttm_mem_type_manager *man;
182 
183 	BUG_ON(!ttm_bo_is_reserved(bo));
184 
185 	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
186 
187 		BUG_ON(!list_empty(&bo->lru));
188 
189 		man = &bdev->man[bo->mem.mem_type];
190 		list_add_tail(&bo->lru, &man->lru);
191 		refcount_acquire(&bo->list_kref);
192 
193 		if (bo->ttm != NULL) {
194 			list_add_tail(&bo->swap, &bo->glob->swap_lru);
195 			refcount_acquire(&bo->list_kref);
196 		}
197 	}
198 }
199 
200 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
201 {
202 	int put_count = 0;
203 
204 	if (!list_empty(&bo->swap)) {
205 		list_del_init(&bo->swap);
206 		++put_count;
207 	}
208 	if (!list_empty(&bo->lru)) {
209 		list_del_init(&bo->lru);
210 		++put_count;
211 	}
212 
213 	/*
214 	 * TODO: Add a driver hook to delete from
215 	 * driver-specific LRU's here.
216 	 */
217 
218 	return put_count;
219 }
220 
221 int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
222 			  bool interruptible,
223 			  bool no_wait, bool use_sequence, uint32_t sequence)
224 {
225 	struct ttm_bo_global *glob = bo->glob;
226 	int ret;
227 
228 	while (unlikely(atomic_read(&bo->reserved) != 0)) {
229 		/**
230 		 * Deadlock avoidance for multi-bo reserving.
231 		 */
232 		if (use_sequence && bo->seq_valid) {
233 			/**
234 			 * We've already reserved this one.
235 			 */
236 			if (unlikely(sequence == bo->val_seq))
237 				return -EDEADLK;
238 			/**
239 			 * Already reserved by a thread that will not back
240 			 * off for us. We need to back off.
241 			 */
242 			if (unlikely(sequence - bo->val_seq < (1 << 31)))
243 				return -EAGAIN;
244 		}
245 
246 		if (no_wait)
247 			return -EBUSY;
248 
249 		mtx_leave(&glob->lru_lock);
250 		ret = ttm_bo_wait_unreserved(bo, interruptible);
251 		mtx_enter(&glob->lru_lock);
252 
253 		if (unlikely(ret))
254 			return ret;
255 	}
256 
257 	atomic_set(&bo->reserved, 1);
258 	if (use_sequence) {
259 		/**
260 		 * Wake up waiters that may need to recheck for deadlock,
261 		 * if we decreased the sequence number.
262 		 */
263 		if (unlikely((bo->val_seq - sequence < (1 << 31))
264 			     || !bo->seq_valid))
265 			wakeup(&bo->event_queue);
266 
267 		bo->val_seq = sequence;
268 		bo->seq_valid = true;
269 	} else {
270 		bo->seq_valid = false;
271 	}
272 
273 	return 0;
274 }
275 EXPORT_SYMBOL(ttm_bo_reserve);
276 
277 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
278 			 bool never_free)
279 {
280 	u_int old;
281 
282 	old = atomic_fetchadd_int(&bo->list_kref, -count);
283 	if (old <= count) {
284 		if (never_free)
285 			panic("ttm_bo_ref_buf");
286 		ttm_bo_release_list(bo);
287 	}
288 }
289 
290 int ttm_bo_reserve(struct ttm_buffer_object *bo,
291 		   bool interruptible,
292 		   bool no_wait, bool use_sequence, uint32_t sequence)
293 {
294 	struct ttm_bo_global *glob = bo->glob;
295 	int put_count = 0;
296 	int ret;
297 
298 	mtx_enter(&glob->lru_lock);
299 	ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
300 				    sequence);
301 	if (likely(ret == 0))
302 		put_count = ttm_bo_del_from_lru(bo);
303 	mtx_leave(&glob->lru_lock);
304 
305 	ttm_bo_list_ref_sub(bo, put_count, true);
306 
307 	return ret;
308 }
309 
310 void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
311 {
312 	ttm_bo_add_to_lru(bo);
313 	atomic_set(&bo->reserved, 0);
314 	wakeup(&bo->event_queue);
315 }
316 
317 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
318 {
319 	struct ttm_bo_global *glob = bo->glob;
320 
321 	mtx_enter(&glob->lru_lock);
322 	ttm_bo_unreserve_locked(bo);
323 	mtx_leave(&glob->lru_lock);
324 }
325 EXPORT_SYMBOL(ttm_bo_unreserve);
326 
327 /*
328  * Call bo->rwlock locked.
329  */
330 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
331 {
332 	struct ttm_bo_device *bdev = bo->bdev;
333 	struct ttm_bo_global *glob = bo->glob;
334 	int ret = 0;
335 	uint32_t page_flags = 0;
336 
337 #ifdef notyet
338 	rw_assert_wrlock(&bo->rwlock);
339 #endif
340 	bo->ttm = NULL;
341 
342 	if (bdev->need_dma32)
343 		page_flags |= TTM_PAGE_FLAG_DMA32;
344 
345 	switch (bo->type) {
346 	case ttm_bo_type_device:
347 		if (zero_alloc)
348 			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
349 	case ttm_bo_type_kernel:
350 		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
351 						      page_flags, glob->dummy_read_page);
352 		if (unlikely(bo->ttm == NULL))
353 			ret = -ENOMEM;
354 		break;
355 	case ttm_bo_type_sg:
356 		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
357 						      page_flags | TTM_PAGE_FLAG_SG,
358 						      glob->dummy_read_page);
359 		if (unlikely(bo->ttm == NULL)) {
360 			ret = -ENOMEM;
361 			break;
362 		}
363 		bo->ttm->sg = bo->sg;
364 		break;
365 	default:
366 		printf("Illegal buffer object type\n");
367 		ret = -EINVAL;
368 		break;
369 	}
370 
371 	return ret;
372 }
373 
374 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
375 				  struct ttm_mem_reg *mem,
376 				  bool evict, bool interruptible,
377 				  bool no_wait_gpu)
378 {
379 	struct ttm_bo_device *bdev = bo->bdev;
380 	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
381 	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
382 	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
383 	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
384 	int ret = 0;
385 
386 	if (old_is_pci || new_is_pci ||
387 	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
388 		ret = ttm_mem_io_lock(old_man, true);
389 		if (unlikely(ret != 0))
390 			goto out_err;
391 		ttm_bo_unmap_virtual_locked(bo);
392 		ttm_mem_io_unlock(old_man);
393 	}
394 
395 	/*
396 	 * Create and bind a ttm if required.
397 	 */
398 
399 	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
400 		if (bo->ttm == NULL) {
401 			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
402 			ret = ttm_bo_add_ttm(bo, zero);
403 			if (ret)
404 				goto out_err;
405 		}
406 
407 		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
408 		if (ret)
409 			goto out_err;
410 
411 		if (mem->mem_type != TTM_PL_SYSTEM) {
412 			ret = ttm_tt_bind(bo->ttm, mem);
413 			if (ret)
414 				goto out_err;
415 		}
416 
417 		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
418 			if (bdev->driver->move_notify)
419 				bdev->driver->move_notify(bo, mem);
420 			bo->mem = *mem;
421 			mem->mm_node = NULL;
422 			goto moved;
423 		}
424 	}
425 
426 	if (bdev->driver->move_notify)
427 		bdev->driver->move_notify(bo, mem);
428 
429 	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
430 	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
431 		ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
432 	else if (bdev->driver->move)
433 		ret = bdev->driver->move(bo, evict, interruptible,
434 					 no_wait_gpu, mem);
435 	else
436 		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
437 
438 	if (ret) {
439 		if (bdev->driver->move_notify) {
440 			struct ttm_mem_reg tmp_mem = *mem;
441 			*mem = bo->mem;
442 			bo->mem = tmp_mem;
443 			bdev->driver->move_notify(bo, mem);
444 			bo->mem = *mem;
445 			*mem = tmp_mem;
446 		}
447 
448 		goto out_err;
449 	}
450 
451 moved:
452 	if (bo->evicted) {
453 		if (bdev->driver->invalidate_caches) {
454 			ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
455 			if (ret)
456 				printf("Can not flush read caches\n");
457 		}
458 		bo->evicted = false;
459 	}
460 
461 	if (bo->mem.mm_node) {
462 		bo->offset = (bo->mem.start << PAGE_SHIFT) +
463 		    bdev->man[bo->mem.mem_type].gpu_offset;
464 		bo->cur_placement = bo->mem.placement;
465 	} else
466 		bo->offset = 0;
467 
468 	return 0;
469 
470 out_err:
471 	new_man = &bdev->man[bo->mem.mem_type];
472 	if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
473 		ttm_tt_unbind(bo->ttm);
474 		ttm_tt_destroy(bo->ttm);
475 		bo->ttm = NULL;
476 	}
477 
478 	return ret;
479 }
480 
481 /**
482  * Call bo::reserved.
483  * Will release GPU memory type usage on destruction.
484  * This is the place to put in driver specific hooks to release
485  * driver private resources.
486  * Will release the bo::reserved lock.
487  */
488 
489 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
490 {
491 	if (bo->bdev->driver->move_notify)
492 		bo->bdev->driver->move_notify(bo, NULL);
493 
494 	if (bo->ttm) {
495 		ttm_tt_unbind(bo->ttm);
496 		ttm_tt_destroy(bo->ttm);
497 		bo->ttm = NULL;
498 	}
499 	ttm_bo_mem_put(bo, &bo->mem);
500 
501 	atomic_set(&bo->reserved, 0);
502 	wakeup(&bo->event_queue);
503 
504 	/*
505 	 * Since the final reference to this bo may not be dropped by
506 	 * the current task we have to put a memory barrier here to make
507 	 * sure the changes done in this function are always visible.
508 	 *
509 	 * This function only needs protection against the final kref_put.
510 	 */
511 	smp_mb__before_atomic_dec();
512 }
513 
514 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
515 {
516 	struct ttm_bo_device *bdev = bo->bdev;
517 	struct ttm_bo_global *glob = bo->glob;
518 	struct ttm_bo_driver *driver = bdev->driver;
519 	void *sync_obj = NULL;
520 	int put_count;
521 	int ret;
522 
523 	mtx_enter(&glob->lru_lock);
524 	ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
525 
526 	mtx_enter(&bdev->fence_lock);
527 	(void) ttm_bo_wait(bo, false, false, true);
528 	if (!ret && !bo->sync_obj) {
529 		mtx_leave(&bdev->fence_lock);
530 		put_count = ttm_bo_del_from_lru(bo);
531 
532 		mtx_leave(&glob->lru_lock);
533 		ttm_bo_cleanup_memtype_use(bo);
534 
535 		ttm_bo_list_ref_sub(bo, put_count, true);
536 
537 		return;
538 	}
539 	if (bo->sync_obj)
540 		sync_obj = driver->sync_obj_ref(bo->sync_obj);
541 	mtx_leave(&bdev->fence_lock);
542 
543 	if (!ret) {
544 		atomic_set(&bo->reserved, 0);
545 		wakeup(&bo->event_queue);
546 	}
547 
548 	refcount_acquire(&bo->list_kref);
549 	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
550 	mtx_leave(&glob->lru_lock);
551 
552 	if (sync_obj) {
553 		driver->sync_obj_flush(sync_obj);
554 		driver->sync_obj_unref(&sync_obj);
555 	}
556 	timeout_add(&bdev->to,
557 			      ((hz / 100) < 1) ? 1 : hz / 100);
558 }
559 
560 /**
561  * function ttm_bo_cleanup_refs_and_unlock
562  * If bo idle, remove from delayed- and lru lists, and unref.
563  * If not idle, do nothing.
564  *
565  * Must be called with lru_lock and reservation held, this function
566  * will drop both before returning.
567  *
568  * @interruptible         Any sleeps should occur interruptibly.
569  * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
570  */
571 
572 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
573 					  bool interruptible,
574 					  bool no_wait_gpu)
575 {
576 	struct ttm_bo_device *bdev = bo->bdev;
577 	struct ttm_bo_driver *driver = bdev->driver;
578 	struct ttm_bo_global *glob = bo->glob;
579 	int put_count;
580 	int ret;
581 
582 	mtx_enter(&bdev->fence_lock);
583 	ret = ttm_bo_wait(bo, false, false, true);
584 
585 	if (ret && !no_wait_gpu) {
586 		void *sync_obj;
587 
588 		/*
589 		 * Take a reference to the fence and unreserve,
590 		 * at this point the buffer should be dead, so
591 		 * no new sync objects can be attached.
592 		 */
593 		sync_obj = driver->sync_obj_ref(bo->sync_obj);
594 		mtx_leave(&bdev->fence_lock);
595 
596 		atomic_set(&bo->reserved, 0);
597 		wakeup(&bo->event_queue);
598 		mtx_leave(&glob->lru_lock);
599 
600 		ret = driver->sync_obj_wait(sync_obj, false, interruptible);
601 		driver->sync_obj_unref(&sync_obj);
602 		if (ret)
603 			return ret;
604 
605 		/*
606 		 * remove sync_obj with ttm_bo_wait, the wait should be
607 		 * finished, and no new wait object should have been added.
608 		 */
609 		mtx_enter(&bdev->fence_lock);
610 		ret = ttm_bo_wait(bo, false, false, true);
611 		WARN_ON(ret);
612 		mtx_leave(&bdev->fence_lock);
613 		if (ret)
614 			return ret;
615 
616 		mtx_enter(&glob->lru_lock);
617 		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
618 
619 		/*
620 		 * We raced, and lost, someone else holds the reservation now,
621 		 * and is probably busy in ttm_bo_cleanup_memtype_use.
622 		 *
623 		 * Even if it's not the case, because we finished waiting any
624 		 * delayed destruction would succeed, so just return success
625 		 * here.
626 		 */
627 		if (ret) {
628 			mtx_leave(&glob->lru_lock);
629 			return 0;
630 		}
631 	} else
632 		mtx_leave(&bdev->fence_lock);
633 
634 	if (ret || unlikely(list_empty(&bo->ddestroy))) {
635 		atomic_set(&bo->reserved, 0);
636 		wakeup(&bo->event_queue);
637 		mtx_leave(&glob->lru_lock);
638 		return ret;
639 	}
640 
641 	put_count = ttm_bo_del_from_lru(bo);
642 	list_del_init(&bo->ddestroy);
643 	++put_count;
644 
645 	mtx_leave(&glob->lru_lock);
646 	ttm_bo_cleanup_memtype_use(bo);
647 
648 	ttm_bo_list_ref_sub(bo, put_count, true);
649 
650 	return 0;
651 }
652 
653 /**
654  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
655  * encountered buffers.
656  */
657 
658 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
659 {
660 	struct ttm_bo_global *glob = bdev->glob;
661 	struct ttm_buffer_object *entry = NULL;
662 	int ret = 0;
663 
664 	mtx_enter(&glob->lru_lock);
665 	if (list_empty(&bdev->ddestroy))
666 		goto out_unlock;
667 
668 	entry = list_first_entry(&bdev->ddestroy,
669 		struct ttm_buffer_object, ddestroy);
670 	refcount_acquire(&entry->list_kref);
671 
672 	for (;;) {
673 		struct ttm_buffer_object *nentry = NULL;
674 
675 		if (entry->ddestroy.next != &bdev->ddestroy) {
676 			nentry = list_first_entry(&entry->ddestroy,
677 				struct ttm_buffer_object, ddestroy);
678 			refcount_acquire(&nentry->list_kref);
679 		}
680 
681 		ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
682 		if (!ret)
683 			ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
684 							     !remove_all);
685 		else
686 			mtx_leave(&glob->lru_lock);
687 
688 		if (refcount_release(&entry->list_kref))
689 			ttm_bo_release_list(entry);
690 		entry = nentry;
691 
692 		if (ret || !entry)
693 			goto out;
694 
695 		mtx_enter(&glob->lru_lock);
696 		if (list_empty(&entry->ddestroy))
697 			break;
698 	}
699 
700 out_unlock:
701 	mtx_leave(&glob->lru_lock);
702 out:
703 	if (entry && refcount_release(&entry->list_kref))
704 		ttm_bo_release_list(entry);
705 	return ret;
706 }
707 
708 static void ttm_bo_delayed_tick(void *arg)
709 {
710 	struct ttm_bo_device *bdev = arg;
711 
712 	task_add(systq, &bdev->task);
713 }
714 
715 void
716 ttm_bo_delayed_workqueue(void *arg1, void *arg2)
717 {
718 	struct ttm_bo_device *bdev = arg1;
719 
720 	if (ttm_bo_delayed_delete(bdev, false)) {
721 		timeout_add(&bdev->to,
722 				      ((hz / 100) < 1) ? 1 : hz / 100);
723 	}
724 }
725 
726 static void ttm_bo_release(struct ttm_buffer_object *bo)
727 {
728 	struct ttm_bo_device *bdev = bo->bdev;
729 	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
730 
731 	rw_enter_write(&bdev->vm_lock);
732 	if (likely(bo->vm_node != NULL)) {
733 		RB_REMOVE(ttm_bo_device_buffer_objects,
734 		    &bdev->addr_space_rb, bo);
735 		drm_mm_put_block(bo->vm_node);
736 		bo->vm_node = NULL;
737 	}
738 	rw_exit_write(&bdev->vm_lock);
739 	ttm_mem_io_lock(man, false);
740 	ttm_mem_io_free_vm(bo);
741 	ttm_mem_io_unlock(man);
742 	ttm_bo_cleanup_refs_or_queue(bo);
743 	if (refcount_release(&bo->list_kref))
744 		ttm_bo_release_list(bo);
745 }
746 
747 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
748 {
749 	struct ttm_buffer_object *bo = *p_bo;
750 
751 	*p_bo = NULL;
752 	if (refcount_release(&bo->kref))
753 		ttm_bo_release(bo);
754 }
755 EXPORT_SYMBOL(ttm_bo_unref);
756 
757 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
758 {
759 	timeout_del(&bdev->to);
760 	task_del(systq, &bdev->task);
761 	return 0;
762 }
763 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
764 
765 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
766 {
767 	if (resched)
768 		timeout_add(&bdev->to,
769 				      ((hz / 100) < 1) ? 1 : hz / 100);
770 }
771 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
772 
773 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
774 			bool no_wait_gpu)
775 {
776 	struct ttm_bo_device *bdev = bo->bdev;
777 	struct ttm_mem_reg evict_mem;
778 	struct ttm_placement placement;
779 	int ret = 0;
780 
781 	mtx_enter(&bdev->fence_lock);
782 	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
783 	mtx_leave(&bdev->fence_lock);
784 
785 	if (unlikely(ret != 0)) {
786 		if (ret != -ERESTART) {
787 			printf("Failed to expire sync object before buffer eviction\n");
788 		}
789 		goto out;
790 	}
791 
792 	BUG_ON(!ttm_bo_is_reserved(bo));
793 
794 	evict_mem = bo->mem;
795 	evict_mem.mm_node = NULL;
796 	evict_mem.bus.io_reserved_vm = false;
797 	evict_mem.bus.io_reserved_count = 0;
798 
799 	placement.fpfn = 0;
800 	placement.lpfn = 0;
801 	placement.num_placement = 0;
802 	placement.num_busy_placement = 0;
803 	bdev->driver->evict_flags(bo, &placement);
804 	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
805 				no_wait_gpu);
806 	if (ret) {
807 		if (ret != -ERESTART) {
808 			printf("Failed to find memory space for buffer 0x%p eviction\n",
809 			       bo);
810 			ttm_bo_mem_space_debug(bo, &placement);
811 		}
812 		goto out;
813 	}
814 
815 	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
816 				     no_wait_gpu);
817 	if (ret) {
818 		if (ret != -ERESTART)
819 			printf("Buffer eviction failed\n");
820 		ttm_bo_mem_put(bo, &evict_mem);
821 		goto out;
822 	}
823 	bo->evicted = true;
824 out:
825 	return ret;
826 }
827 
828 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
829 				uint32_t mem_type,
830 				bool interruptible,
831 				bool no_wait_gpu)
832 {
833 	struct ttm_bo_global *glob = bdev->glob;
834 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
835 	struct ttm_buffer_object *bo;
836 	int ret = -EBUSY, put_count;
837 
838 	mtx_enter(&glob->lru_lock);
839 	list_for_each_entry(bo, &man->lru, lru) {
840 		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
841 		if (!ret)
842 			break;
843 	}
844 
845 	if (ret) {
846 		mtx_leave(&glob->lru_lock);
847 		return ret;
848 	}
849 
850 	refcount_acquire(&bo->list_kref);
851 
852 	if (!list_empty(&bo->ddestroy)) {
853 		ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
854 						     no_wait_gpu);
855 		if (refcount_release(&bo->list_kref))
856 			ttm_bo_release_list(bo);
857 		return ret;
858 	}
859 
860 	put_count = ttm_bo_del_from_lru(bo);
861 	mtx_leave(&glob->lru_lock);
862 
863 	BUG_ON(ret != 0);
864 
865 	ttm_bo_list_ref_sub(bo, put_count, true);
866 
867 	ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
868 	ttm_bo_unreserve(bo);
869 
870 	if (refcount_release(&bo->list_kref))
871 		ttm_bo_release_list(bo);
872 	return ret;
873 }
874 
875 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
876 {
877 	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
878 
879 	if (mem->mm_node)
880 		(*man->func->put_node)(man, mem);
881 }
882 EXPORT_SYMBOL(ttm_bo_mem_put);
883 
884 /**
885  * Repeatedly evict memory from the LRU for @mem_type until we create enough
886  * space, or we've evicted everything and there isn't enough space.
887  */
888 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
889 					uint32_t mem_type,
890 					struct ttm_placement *placement,
891 					struct ttm_mem_reg *mem,
892 					bool interruptible,
893 					bool no_wait_gpu)
894 {
895 	struct ttm_bo_device *bdev = bo->bdev;
896 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
897 	int ret;
898 
899 	do {
900 		ret = (*man->func->get_node)(man, bo, placement, mem);
901 		if (unlikely(ret != 0))
902 			return ret;
903 		if (mem->mm_node)
904 			break;
905 		ret = ttm_mem_evict_first(bdev, mem_type,
906 					  interruptible, no_wait_gpu);
907 		if (unlikely(ret != 0))
908 			return ret;
909 	} while (1);
910 	if (mem->mm_node == NULL)
911 		return -ENOMEM;
912 	mem->mem_type = mem_type;
913 	return 0;
914 }
915 
916 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
917 				      uint32_t cur_placement,
918 				      uint32_t proposed_placement)
919 {
920 	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
921 	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
922 
923 	/**
924 	 * Keep current caching if possible.
925 	 */
926 
927 	if ((cur_placement & caching) != 0)
928 		result |= (cur_placement & caching);
929 	else if ((man->default_caching & caching) != 0)
930 		result |= man->default_caching;
931 	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
932 		result |= TTM_PL_FLAG_CACHED;
933 	else if ((TTM_PL_FLAG_WC & caching) != 0)
934 		result |= TTM_PL_FLAG_WC;
935 	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
936 		result |= TTM_PL_FLAG_UNCACHED;
937 
938 	return result;
939 }
940 
941 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
942 				 uint32_t mem_type,
943 				 uint32_t proposed_placement,
944 				 uint32_t *masked_placement)
945 {
946 	uint32_t cur_flags = ttm_bo_type_flags(mem_type);
947 
948 	if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
949 		return false;
950 
951 	if ((proposed_placement & man->available_caching) == 0)
952 		return false;
953 
954 	cur_flags |= (proposed_placement & man->available_caching);
955 
956 	*masked_placement = cur_flags;
957 	return true;
958 }
959 
960 /**
961  * Creates space for memory region @mem according to its type.
962  *
963  * This function first searches for free space in compatible memory types in
964  * the priority order defined by the driver.  If free space isn't found, then
965  * ttm_bo_mem_force_space is attempted in priority order to evict and find
966  * space.
967  */
968 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
969 			struct ttm_placement *placement,
970 			struct ttm_mem_reg *mem,
971 			bool interruptible,
972 			bool no_wait_gpu)
973 {
974 	struct ttm_bo_device *bdev = bo->bdev;
975 	struct ttm_mem_type_manager *man;
976 	uint32_t mem_type = TTM_PL_SYSTEM;
977 	uint32_t cur_flags = 0;
978 	bool type_found = false;
979 	bool type_ok = false;
980 	bool has_erestartsys = false;
981 	int i, ret;
982 
983 	mem->mm_node = NULL;
984 	for (i = 0; i < placement->num_placement; ++i) {
985 		ret = ttm_mem_type_from_flags(placement->placement[i],
986 						&mem_type);
987 		if (ret)
988 			return ret;
989 		man = &bdev->man[mem_type];
990 
991 		type_ok = ttm_bo_mt_compatible(man,
992 						mem_type,
993 						placement->placement[i],
994 						&cur_flags);
995 
996 		if (!type_ok)
997 			continue;
998 
999 		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1000 						  cur_flags);
1001 		/*
1002 		 * Use the access and other non-mapping-related flag bits from
1003 		 * the memory placement flags to the current flags
1004 		 */
1005 		ttm_flag_masked(&cur_flags, placement->placement[i],
1006 				~TTM_PL_MASK_MEMTYPE);
1007 
1008 		if (mem_type == TTM_PL_SYSTEM)
1009 			break;
1010 
1011 		if (man->has_type && man->use_type) {
1012 			type_found = true;
1013 			ret = (*man->func->get_node)(man, bo, placement, mem);
1014 			if (unlikely(ret))
1015 				return ret;
1016 		}
1017 		if (mem->mm_node)
1018 			break;
1019 	}
1020 
1021 	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
1022 		mem->mem_type = mem_type;
1023 		mem->placement = cur_flags;
1024 		return 0;
1025 	}
1026 
1027 	if (!type_found)
1028 		return -EINVAL;
1029 
1030 	for (i = 0; i < placement->num_busy_placement; ++i) {
1031 		ret = ttm_mem_type_from_flags(placement->busy_placement[i],
1032 						&mem_type);
1033 		if (ret)
1034 			return ret;
1035 		man = &bdev->man[mem_type];
1036 		if (!man->has_type)
1037 			continue;
1038 		if (!ttm_bo_mt_compatible(man,
1039 						mem_type,
1040 						placement->busy_placement[i],
1041 						&cur_flags))
1042 			continue;
1043 
1044 		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1045 						  cur_flags);
1046 		/*
1047 		 * Use the access and other non-mapping-related flag bits from
1048 		 * the memory placement flags to the current flags
1049 		 */
1050 		ttm_flag_masked(&cur_flags, placement->busy_placement[i],
1051 				~TTM_PL_MASK_MEMTYPE);
1052 
1053 
1054 		if (mem_type == TTM_PL_SYSTEM) {
1055 			mem->mem_type = mem_type;
1056 			mem->placement = cur_flags;
1057 			mem->mm_node = NULL;
1058 			return 0;
1059 		}
1060 
1061 		ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
1062 						interruptible, no_wait_gpu);
1063 		if (ret == 0 && mem->mm_node) {
1064 			mem->placement = cur_flags;
1065 			return 0;
1066 		}
1067 		if (ret == -ERESTART)
1068 			has_erestartsys = true;
1069 	}
1070 	ret = (has_erestartsys) ? -ERESTART: -ENOMEM;
1071 	return ret;
1072 }
1073 EXPORT_SYMBOL(ttm_bo_mem_space);
1074 
1075 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1076 			struct ttm_placement *placement,
1077 			bool interruptible,
1078 			bool no_wait_gpu)
1079 {
1080 	int ret = 0;
1081 	struct ttm_mem_reg mem;
1082 	struct ttm_bo_device *bdev = bo->bdev;
1083 
1084 	BUG_ON(!ttm_bo_is_reserved(bo));
1085 
1086 	/*
1087 	 * FIXME: It's possible to pipeline buffer moves.
1088 	 * Have the driver move function wait for idle when necessary,
1089 	 * instead of doing it here.
1090 	 */
1091 	mtx_enter(&bdev->fence_lock);
1092 	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1093 	mtx_leave(&bdev->fence_lock);
1094 	if (ret)
1095 		return ret;
1096 	mem.num_pages = bo->num_pages;
1097 	mem.size = mem.num_pages << PAGE_SHIFT;
1098 	mem.page_alignment = bo->mem.page_alignment;
1099 	mem.bus.io_reserved_vm = false;
1100 	mem.bus.io_reserved_count = 0;
1101 	/*
1102 	 * Determine where to move the buffer.
1103 	 */
1104 	ret = ttm_bo_mem_space(bo, placement, &mem,
1105 			       interruptible, no_wait_gpu);
1106 	if (ret)
1107 		goto out_unlock;
1108 	ret = ttm_bo_handle_move_mem(bo, &mem, false,
1109 				     interruptible, no_wait_gpu);
1110 out_unlock:
1111 	if (ret && mem.mm_node)
1112 		ttm_bo_mem_put(bo, &mem);
1113 	return ret;
1114 }
1115 
1116 static bool ttm_bo_mem_compat(struct ttm_placement *placement,
1117 			      struct ttm_mem_reg *mem,
1118 			      uint32_t *new_flags)
1119 {
1120 	int i;
1121 
1122 	if (mem->mm_node && placement->lpfn != 0 &&
1123 	    (mem->start < placement->fpfn ||
1124 	     mem->start + mem->num_pages > placement->lpfn))
1125 		return false;
1126 
1127 	for (i = 0; i < placement->num_placement; i++) {
1128 		*new_flags = placement->placement[i];
1129 		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1130 		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
1131 			return true;
1132 	}
1133 
1134 	for (i = 0; i < placement->num_busy_placement; i++) {
1135 		*new_flags = placement->busy_placement[i];
1136 		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1137 		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
1138 			return true;
1139 	}
1140 
1141 	return false;
1142 }
1143 
1144 int ttm_bo_validate(struct ttm_buffer_object *bo,
1145 			struct ttm_placement *placement,
1146 			bool interruptible,
1147 			bool no_wait_gpu)
1148 {
1149 	int ret;
1150 	uint32_t new_flags;
1151 
1152 	BUG_ON(!ttm_bo_is_reserved(bo));
1153 	/* Check that range is valid */
1154 	if (placement->lpfn || placement->fpfn)
1155 		if (placement->fpfn > placement->lpfn ||
1156 			(placement->lpfn - placement->fpfn) < bo->num_pages)
1157 			return -EINVAL;
1158 	/*
1159 	 * Check whether we need to move buffer.
1160 	 */
1161 	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1162 		ret = ttm_bo_move_buffer(bo, placement, interruptible,
1163 					 no_wait_gpu);
1164 		if (ret)
1165 			return ret;
1166 	} else {
1167 		/*
1168 		 * Use the access and other non-mapping-related flag bits from
1169 		 * the compatible memory placement flags to the active flags
1170 		 */
1171 		ttm_flag_masked(&bo->mem.placement, new_flags,
1172 				~TTM_PL_MASK_MEMTYPE);
1173 	}
1174 	/*
1175 	 * We might need to add a TTM.
1176 	 */
1177 	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1178 		ret = ttm_bo_add_ttm(bo, true);
1179 		if (ret)
1180 			return ret;
1181 	}
1182 	return 0;
1183 }
1184 EXPORT_SYMBOL(ttm_bo_validate);
1185 
1186 int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1187 				struct ttm_placement *placement)
1188 {
1189 	BUG_ON((placement->fpfn || placement->lpfn) &&
1190 	       (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1191 
1192 	return 0;
1193 }
1194 
1195 int ttm_bo_init(struct ttm_bo_device *bdev,
1196 		struct ttm_buffer_object *bo,
1197 		unsigned long size,
1198 		enum ttm_bo_type type,
1199 		struct ttm_placement *placement,
1200 		uint32_t page_alignment,
1201 		bool interruptible,
1202 		struct uvm_object *persistent_swap_storage,
1203 		size_t acc_size,
1204 		struct sg_table *sg,
1205 		void (*destroy) (struct ttm_buffer_object *))
1206 {
1207 	int ret = 0;
1208 	unsigned long num_pages;
1209 	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1210 
1211 	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1212 	if (ret) {
1213 		printf("Out of kernel memory\n");
1214 		if (destroy)
1215 			(*destroy)(bo);
1216 		else
1217 			kfree(bo);
1218 		return -ENOMEM;
1219 	}
1220 
1221 	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1222 	if (num_pages == 0) {
1223 		printf("Illegal buffer object size\n");
1224 		if (destroy)
1225 			(*destroy)(bo);
1226 		else
1227 			kfree(bo);
1228 		ttm_mem_global_free(mem_glob, acc_size);
1229 		return -EINVAL;
1230 	}
1231 	bo->destroy = destroy;
1232 
1233 	uvm_objinit(&bo->uobj, NULL, 1);
1234 	refcount_init(&bo->kref, 1);
1235 	refcount_init(&bo->list_kref, 1);
1236 	atomic_set(&bo->cpu_writers, 0);
1237 	atomic_set(&bo->reserved, 1);
1238 #ifdef notyet
1239 	init_waitqueue_head(&bo->event_queue);
1240 #endif
1241 	INIT_LIST_HEAD(&bo->lru);
1242 	INIT_LIST_HEAD(&bo->ddestroy);
1243 	INIT_LIST_HEAD(&bo->swap);
1244 	INIT_LIST_HEAD(&bo->io_reserve_lru);
1245 	bo->bdev = bdev;
1246 	bo->glob = bdev->glob;
1247 	bo->type = type;
1248 	bo->num_pages = num_pages;
1249 	bo->mem.size = num_pages << PAGE_SHIFT;
1250 	bo->mem.mem_type = TTM_PL_SYSTEM;
1251 	bo->mem.num_pages = bo->num_pages;
1252 	bo->mem.mm_node = NULL;
1253 	bo->mem.page_alignment = page_alignment;
1254 	bo->mem.bus.io_reserved_vm = false;
1255 	bo->mem.bus.io_reserved_count = 0;
1256 	bo->priv_flags = 0;
1257 	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1258 	bo->seq_valid = false;
1259 	bo->persistent_swap_storage = persistent_swap_storage;
1260 	bo->acc_size = acc_size;
1261 	bo->sg = sg;
1262 	atomic_inc(&bo->glob->bo_count);
1263 
1264 	ret = ttm_bo_check_placement(bo, placement);
1265 	if (unlikely(ret != 0))
1266 		goto out_err;
1267 
1268 	/*
1269 	 * For ttm_bo_type_device buffers, allocate
1270 	 * address space from the device.
1271 	 */
1272 	if (bo->type == ttm_bo_type_device ||
1273 	    bo->type == ttm_bo_type_sg) {
1274 		ret = ttm_bo_setup_vm(bo);
1275 		if (ret)
1276 			goto out_err;
1277 	}
1278 
1279 	ret = ttm_bo_validate(bo, placement, interruptible, false);
1280 	if (ret)
1281 		goto out_err;
1282 
1283 	ttm_bo_unreserve(bo);
1284 	return 0;
1285 
1286 out_err:
1287 	ttm_bo_unreserve(bo);
1288 	ttm_bo_unref(&bo);
1289 
1290 	return ret;
1291 }
1292 EXPORT_SYMBOL(ttm_bo_init);
1293 
1294 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1295 		       unsigned long bo_size,
1296 		       unsigned struct_size)
1297 {
1298 	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1299 	size_t size = 0;
1300 
1301 	size += ttm_round_pot(struct_size);
1302 	size += PAGE_ALIGN(npages * sizeof(void *));
1303 	size += ttm_round_pot(sizeof(struct ttm_tt));
1304 	return size;
1305 }
1306 EXPORT_SYMBOL(ttm_bo_acc_size);
1307 
1308 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1309 			   unsigned long bo_size,
1310 			   unsigned struct_size)
1311 {
1312 	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1313 	size_t size = 0;
1314 
1315 	size += ttm_round_pot(struct_size);
1316 	size += PAGE_ALIGN(npages * sizeof(void *));
1317 	size += PAGE_ALIGN(npages * sizeof(bus_addr_t));
1318 	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1319 	return size;
1320 }
1321 EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1322 
1323 int ttm_bo_create(struct ttm_bo_device *bdev,
1324 			unsigned long size,
1325 			enum ttm_bo_type type,
1326 			struct ttm_placement *placement,
1327 			uint32_t page_alignment,
1328 			bool interruptible,
1329 			struct uvm_object *persistent_swap_storage,
1330 			struct ttm_buffer_object **p_bo)
1331 {
1332 	struct ttm_buffer_object *bo;
1333 	size_t acc_size;
1334 	int ret;
1335 
1336 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1337 	if (unlikely(bo == NULL))
1338 		return -ENOMEM;
1339 
1340 	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1341 	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1342 			  interruptible, persistent_swap_storage, acc_size,
1343 			  NULL, NULL);
1344 	if (likely(ret == 0))
1345 		*p_bo = bo;
1346 
1347 	return ret;
1348 }
1349 EXPORT_SYMBOL(ttm_bo_create);
1350 
1351 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1352 					unsigned mem_type, bool allow_errors)
1353 {
1354 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1355 	struct ttm_bo_global *glob = bdev->glob;
1356 	int ret;
1357 
1358 	/*
1359 	 * Can't use standard list traversal since we're unlocking.
1360 	 */
1361 
1362 	mtx_enter(&glob->lru_lock);
1363 	while (!list_empty(&man->lru)) {
1364 		mtx_leave(&glob->lru_lock);
1365 		ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1366 		if (ret) {
1367 			if (allow_errors) {
1368 				return ret;
1369 			} else {
1370 				printf("Cleanup eviction failed\n");
1371 			}
1372 		}
1373 		mtx_enter(&glob->lru_lock);
1374 	}
1375 	mtx_leave(&glob->lru_lock);
1376 	return 0;
1377 }
1378 
1379 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1380 {
1381 	struct ttm_mem_type_manager *man;
1382 	int ret = -EINVAL;
1383 
1384 	if (mem_type >= TTM_NUM_MEM_TYPES) {
1385 		printf("Illegal memory type %d\n", mem_type);
1386 		return ret;
1387 	}
1388 	man = &bdev->man[mem_type];
1389 
1390 	if (!man->has_type) {
1391 		printf("Trying to take down uninitialized memory manager type %u\n",
1392 		       mem_type);
1393 		return ret;
1394 	}
1395 
1396 	man->use_type = false;
1397 	man->has_type = false;
1398 
1399 	ret = 0;
1400 	if (mem_type > 0) {
1401 		ttm_bo_force_list_clean(bdev, mem_type, false);
1402 
1403 		ret = (*man->func->takedown)(man);
1404 	}
1405 
1406 	return ret;
1407 }
1408 EXPORT_SYMBOL(ttm_bo_clean_mm);
1409 
1410 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1411 {
1412 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1413 
1414 	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1415 		printf("Illegal memory manager memory type %u\n", mem_type);
1416 		return -EINVAL;
1417 	}
1418 
1419 	if (!man->has_type) {
1420 		printf("Memory type %u has not been initialized\n", mem_type);
1421 		return 0;
1422 	}
1423 
1424 	return ttm_bo_force_list_clean(bdev, mem_type, true);
1425 }
1426 EXPORT_SYMBOL(ttm_bo_evict_mm);
1427 
1428 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1429 			unsigned long p_size)
1430 {
1431 	int ret = -EINVAL;
1432 	struct ttm_mem_type_manager *man;
1433 
1434 	BUG_ON(type >= TTM_NUM_MEM_TYPES);
1435 	man = &bdev->man[type];
1436 	BUG_ON(man->has_type);
1437 	man->io_reserve_fastpath = true;
1438 	man->use_io_reserve_lru = false;
1439 	rw_init(&man->io_reserve_rwlock, "ttm_iores");
1440 	INIT_LIST_HEAD(&man->io_reserve_lru);
1441 
1442 	ret = bdev->driver->init_mem_type(bdev, type, man);
1443 	if (ret)
1444 		return ret;
1445 	man->bdev = bdev;
1446 
1447 	ret = 0;
1448 	if (type != TTM_PL_SYSTEM) {
1449 		ret = (*man->func->init)(man, p_size);
1450 		if (ret)
1451 			return ret;
1452 	}
1453 	man->has_type = true;
1454 	man->use_type = true;
1455 	man->size = p_size;
1456 
1457 	INIT_LIST_HEAD(&man->lru);
1458 
1459 	return 0;
1460 }
1461 EXPORT_SYMBOL(ttm_bo_init_mm);
1462 
1463 static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob)
1464 {
1465 
1466 	ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1467 	km_free(glob->dummy_read_page, PAGE_SIZE, &kv_any, &kp_dma_zero);
1468 	kfree(glob);
1469 }
1470 
1471 void ttm_bo_global_release(struct drm_global_reference *ref)
1472 {
1473 	struct ttm_bo_global *glob = ref->object;
1474 
1475 	if (refcount_release(&glob->kobj_ref))
1476 		ttm_bo_global_kobj_release(glob);
1477 }
1478 EXPORT_SYMBOL(ttm_bo_global_release);
1479 
1480 int ttm_bo_global_init(struct drm_global_reference *ref)
1481 {
1482 	struct ttm_bo_global_ref *bo_ref =
1483 		container_of(ref, struct ttm_bo_global_ref, ref);
1484 	struct ttm_bo_global *glob = ref->object;
1485 	int ret;
1486 
1487 	rw_init(&glob->device_list_rwlock, "ttm_devlist");
1488 	mtx_init(&glob->lru_lock, IPL_NONE);
1489 	glob->mem_glob = bo_ref->mem_glob;
1490 	glob->dummy_read_page = km_alloc(PAGE_SIZE, &kv_any, &kp_dma_zero,
1491 	    &kd_waitok);
1492 
1493 	if (unlikely(glob->dummy_read_page == NULL)) {
1494 		ret = -ENOMEM;
1495 		goto out_no_drp;
1496 	}
1497 
1498 	INIT_LIST_HEAD(&glob->swap_lru);
1499 	INIT_LIST_HEAD(&glob->device_list);
1500 
1501 	ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1502 	ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1503 	if (unlikely(ret != 0)) {
1504 		printf("Could not register buffer object swapout\n");
1505 		goto out_no_shrink;
1506 	}
1507 
1508 	atomic_set(&glob->bo_count, 0);
1509 
1510 	refcount_init(&glob->kobj_ref, 1);
1511 	return (0);
1512 
1513 out_no_shrink:
1514 	km_free(glob->dummy_read_page, PAGE_SIZE, &kv_any, &kp_dma_zero);
1515 out_no_drp:
1516 	kfree(glob);
1517 	return ret;
1518 }
1519 EXPORT_SYMBOL(ttm_bo_global_init);
1520 
1521 
1522 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1523 {
1524 	int ret = 0;
1525 	unsigned i = TTM_NUM_MEM_TYPES;
1526 	struct ttm_mem_type_manager *man;
1527 	struct ttm_bo_global *glob = bdev->glob;
1528 
1529 	while (i--) {
1530 		man = &bdev->man[i];
1531 		if (man->has_type) {
1532 			man->use_type = false;
1533 			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1534 				ret = -EBUSY;
1535 				printf("DRM memory manager type %d is not clean\n",
1536 				       i);
1537 			}
1538 			man->has_type = false;
1539 		}
1540 	}
1541 
1542 	rw_enter_write(&glob->device_list_rwlock);
1543 	list_del(&bdev->device_list);
1544 	rw_exit_write(&glob->device_list_rwlock);
1545 
1546 	timeout_del(&bdev->to);
1547 	task_del(systq, &bdev->task);
1548 
1549 	while (ttm_bo_delayed_delete(bdev, true))
1550 		;
1551 
1552 	mtx_enter(&glob->lru_lock);
1553 	if (list_empty(&bdev->ddestroy))
1554 		TTM_DEBUG("Delayed destroy list was clean\n");
1555 
1556 	if (list_empty(&bdev->man[0].lru))
1557 		TTM_DEBUG("Swap list was clean\n");
1558 	mtx_leave(&glob->lru_lock);
1559 
1560 	BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1561 	rw_enter_write(&bdev->vm_lock);
1562 	drm_mm_takedown(&bdev->addr_space_mm);
1563 	rw_exit_write(&bdev->vm_lock);
1564 
1565 	return ret;
1566 }
1567 EXPORT_SYMBOL(ttm_bo_device_release);
1568 
1569 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1570 		       struct ttm_bo_global *glob,
1571 		       struct ttm_bo_driver *driver,
1572 		       uint64_t file_page_offset,
1573 		       bool need_dma32)
1574 {
1575 	int ret = -EINVAL;
1576 
1577 	rw_init(&bdev->vm_lock, "ttmvm");
1578 	bdev->driver = driver;
1579 
1580 	memset(bdev->man, 0, sizeof(bdev->man));
1581 
1582 	/*
1583 	 * Initialize the system memory buffer type.
1584 	 * Other types need to be driver / IOCTL initialized.
1585 	 */
1586 	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1587 	if (unlikely(ret != 0))
1588 		goto out_no_sys;
1589 
1590 	RB_INIT(&bdev->addr_space_rb);
1591 	ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1592 	if (unlikely(ret != 0))
1593 		goto out_no_addr_mm;
1594 
1595 	task_set(&bdev->task, ttm_bo_delayed_workqueue, bdev, NULL);
1596 	timeout_set(&bdev->to, ttm_bo_delayed_tick, bdev);
1597 	INIT_LIST_HEAD(&bdev->ddestroy);
1598 	bdev->dev_mapping = NULL;
1599 	bdev->glob = glob;
1600 	bdev->need_dma32 = need_dma32;
1601 	bdev->val_seq = 0;
1602 	mtx_init(&bdev->fence_lock, IPL_NONE);
1603 	rw_enter_write(&glob->device_list_rwlock);
1604 	list_add_tail(&bdev->device_list, &glob->device_list);
1605 	rw_exit_write(&glob->device_list_rwlock);
1606 
1607 	return 0;
1608 out_no_addr_mm:
1609 	ttm_bo_clean_mm(bdev, 0);
1610 out_no_sys:
1611 	return ret;
1612 }
1613 EXPORT_SYMBOL(ttm_bo_device_init);
1614 
1615 /*
1616  * buffer object vm functions.
1617  */
1618 
1619 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1620 {
1621 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1622 
1623 	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1624 		if (mem->mem_type == TTM_PL_SYSTEM)
1625 			return false;
1626 
1627 		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1628 			return false;
1629 
1630 		if (mem->placement & TTM_PL_FLAG_CACHED)
1631 			return false;
1632 	}
1633 	return true;
1634 }
1635 
1636 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1637 {
1638 	struct ttm_tt *ttm = bo->ttm;
1639 	struct vm_page *page;
1640 	bus_addr_t addr;
1641 	paddr_t paddr;
1642 	int i;
1643 
1644 	if (bo->mem.bus.is_iomem) {
1645 		for (i = 0; i < bo->mem.num_pages; ++i) {
1646 			addr = bo->mem.bus.base + bo->mem.bus.offset;
1647 			paddr = bus_space_mmap(bo->bdev->memt, addr,
1648 					       i << PAGE_SHIFT, 0, 0);
1649 			page = PHYS_TO_VM_PAGE(paddr);
1650 			if (unlikely(page == NULL))
1651 				continue;
1652 			pmap_page_protect(page, PROT_NONE);
1653 		}
1654 	} else if (ttm) {
1655 		for (i = 0; i < ttm->num_pages; ++i) {
1656 			page = ttm->pages[i];
1657 			if (unlikely(page == NULL))
1658 				continue;
1659 			pmap_page_protect(page, PROT_NONE);
1660 		}
1661 	}
1662 	ttm_mem_io_free_vm(bo);
1663 }
1664 
1665 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1666 {
1667 	struct ttm_bo_device *bdev = bo->bdev;
1668 	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1669 
1670 	ttm_mem_io_lock(man, false);
1671 	ttm_bo_unmap_virtual_locked(bo);
1672 	ttm_mem_io_unlock(man);
1673 }
1674 
1675 
1676 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1677 
1678 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1679 {
1680 	struct ttm_bo_device *bdev = bo->bdev;
1681 
1682 	/* The caller acquired bdev->vm_lock. */
1683 	RB_INSERT(ttm_bo_device_buffer_objects, &bdev->addr_space_rb, bo);
1684 }
1685 
1686 /**
1687  * ttm_bo_setup_vm:
1688  *
1689  * @bo: the buffer to allocate address space for
1690  *
1691  * Allocate address space in the drm device so that applications
1692  * can mmap the buffer and access the contents. This only
1693  * applies to ttm_bo_type_device objects as others are not
1694  * placed in the drm device address space.
1695  */
1696 
1697 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1698 {
1699 	struct ttm_bo_device *bdev = bo->bdev;
1700 	int ret;
1701 
1702 retry_pre_get:
1703 	ret = drm_mm_pre_get(&bdev->addr_space_mm);
1704 	if (unlikely(ret != 0))
1705 		return ret;
1706 
1707 	rw_enter_write(&bdev->vm_lock);
1708 	bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1709 					 bo->mem.num_pages, 0, 0);
1710 
1711 	if (unlikely(bo->vm_node == NULL)) {
1712 		ret = -ENOMEM;
1713 		goto out_unlock;
1714 	}
1715 
1716 	bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1717 					      bo->mem.num_pages, 0);
1718 
1719 	if (unlikely(bo->vm_node == NULL)) {
1720 		rw_exit_write(&bdev->vm_lock);
1721 		goto retry_pre_get;
1722 	}
1723 
1724 	ttm_bo_vm_insert_rb(bo);
1725 	rw_exit_write(&bdev->vm_lock);
1726 	bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1727 
1728 	return 0;
1729 out_unlock:
1730 	rw_exit_write(&bdev->vm_lock);
1731 	return ret;
1732 }
1733 
1734 int ttm_bo_wait(struct ttm_buffer_object *bo,
1735 		bool lazy, bool interruptible, bool no_wait)
1736 {
1737 	struct ttm_bo_driver *driver = bo->bdev->driver;
1738 	struct ttm_bo_device *bdev = bo->bdev;
1739 	void *sync_obj;
1740 	int ret = 0;
1741 
1742 	if (likely(bo->sync_obj == NULL))
1743 		return 0;
1744 
1745 	while (bo->sync_obj) {
1746 
1747 		if (driver->sync_obj_signaled(bo->sync_obj)) {
1748 			void *tmp_obj = bo->sync_obj;
1749 			bo->sync_obj = NULL;
1750 			clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1751 			mtx_leave(&bdev->fence_lock);
1752 			driver->sync_obj_unref(&tmp_obj);
1753 			mtx_enter(&bdev->fence_lock);
1754 			continue;
1755 		}
1756 
1757 		if (no_wait)
1758 			return -EBUSY;
1759 
1760 		sync_obj = driver->sync_obj_ref(bo->sync_obj);
1761 		mtx_leave(&bdev->fence_lock);
1762 		ret = driver->sync_obj_wait(sync_obj,
1763 					    lazy, interruptible);
1764 		if (unlikely(ret != 0)) {
1765 			driver->sync_obj_unref(&sync_obj);
1766 			mtx_enter(&bdev->fence_lock);
1767 			return ret;
1768 		}
1769 		mtx_enter(&bdev->fence_lock);
1770 		if (likely(bo->sync_obj == sync_obj)) {
1771 			void *tmp_obj = bo->sync_obj;
1772 			bo->sync_obj = NULL;
1773 			clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1774 				  &bo->priv_flags);
1775 			mtx_leave(&bdev->fence_lock);
1776 			driver->sync_obj_unref(&sync_obj);
1777 			driver->sync_obj_unref(&tmp_obj);
1778 			mtx_enter(&bdev->fence_lock);
1779 		} else {
1780 			mtx_leave(&bdev->fence_lock);
1781 			driver->sync_obj_unref(&sync_obj);
1782 			mtx_enter(&bdev->fence_lock);
1783 		}
1784 	}
1785 	return 0;
1786 }
1787 EXPORT_SYMBOL(ttm_bo_wait);
1788 
1789 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1790 {
1791 	struct ttm_bo_device *bdev = bo->bdev;
1792 	int ret = 0;
1793 
1794 	/*
1795 	 * Using ttm_bo_reserve makes sure the lru lists are updated.
1796 	 */
1797 
1798 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1799 	if (unlikely(ret != 0))
1800 		return ret;
1801 	mtx_enter(&bdev->fence_lock);
1802 	ret = ttm_bo_wait(bo, false, true, no_wait);
1803 	mtx_leave(&bdev->fence_lock);
1804 	if (likely(ret == 0))
1805 		atomic_inc(&bo->cpu_writers);
1806 	ttm_bo_unreserve(bo);
1807 	return ret;
1808 }
1809 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1810 
1811 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1812 {
1813 	atomic_dec(&bo->cpu_writers);
1814 }
1815 EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1816 
1817 /**
1818  * A buffer object shrink method that tries to swap out the first
1819  * buffer object on the bo_global::swap_lru list.
1820  */
1821 
1822 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1823 {
1824 	struct ttm_bo_global *glob =
1825 	    container_of(shrink, struct ttm_bo_global, shrink);
1826 	struct ttm_buffer_object *bo;
1827 	int ret = -EBUSY;
1828 	int put_count;
1829 	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1830 
1831 	mtx_enter(&glob->lru_lock);
1832 	list_for_each_entry(bo, &glob->swap_lru, swap) {
1833 		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1834 		if (!ret)
1835 			break;
1836 	}
1837 
1838 	if (ret) {
1839 		mtx_leave(&glob->lru_lock);
1840 		return ret;
1841 	}
1842 
1843 	refcount_acquire(&bo->list_kref);
1844 
1845 	if (!list_empty(&bo->ddestroy)) {
1846 		ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
1847 		if (refcount_release(&bo->list_kref))
1848 			ttm_bo_release_list(bo);
1849 		return ret;
1850 	}
1851 
1852 	put_count = ttm_bo_del_from_lru(bo);
1853 	mtx_leave(&glob->lru_lock);
1854 
1855 	ttm_bo_list_ref_sub(bo, put_count, true);
1856 
1857 	/**
1858 	 * Wait for GPU, then move to system cached.
1859 	 */
1860 
1861 	mtx_enter(&bo->bdev->fence_lock);
1862 	ret = ttm_bo_wait(bo, false, false, false);
1863 	mtx_leave(&bo->bdev->fence_lock);
1864 
1865 	if (unlikely(ret != 0))
1866 		goto out;
1867 
1868 	if ((bo->mem.placement & swap_placement) != swap_placement) {
1869 		struct ttm_mem_reg evict_mem;
1870 
1871 		evict_mem = bo->mem;
1872 		evict_mem.mm_node = NULL;
1873 		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1874 		evict_mem.mem_type = TTM_PL_SYSTEM;
1875 
1876 		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1877 					     false, false);
1878 		if (unlikely(ret != 0))
1879 			goto out;
1880 	}
1881 
1882 	ttm_bo_unmap_virtual(bo);
1883 
1884 	/**
1885 	 * Swap out. Buffer will be swapped in again as soon as
1886 	 * anyone tries to access a ttm page.
1887 	 */
1888 
1889 	if (bo->bdev->driver->swap_notify)
1890 		bo->bdev->driver->swap_notify(bo);
1891 
1892 	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1893 out:
1894 
1895 	/**
1896 	 *
1897 	 * Unreserve without putting on LRU to avoid swapping out an
1898 	 * already swapped buffer.
1899 	 */
1900 
1901 	atomic_set(&bo->reserved, 0);
1902 	wakeup(&bo->event_queue);
1903 	if (refcount_release(&bo->list_kref))
1904 		ttm_bo_release_list(bo);
1905 	return ret;
1906 }
1907 
1908 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1909 {
1910 	while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1911 		;
1912 }
1913 EXPORT_SYMBOL(ttm_bo_swapout_all);
1914