xref: /dragonfly/sys/dev/drm/ttm/ttm_bo.c (revision 70675b40)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #define pr_fmt(fmt) "[TTM] " fmt
32 
33 #include <drm/ttm/ttm_module.h>
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <linux/atomic.h>
37 #include <linux/errno.h>
38 #include <linux/export.h>
39 #include <linux/wait.h>
40 
41 #define TTM_ASSERT_LOCKED(param)	do { } while (0)
42 #define TTM_DEBUG(fmt, arg...)		do { } while (0)
43 #define TTM_BO_HASH_ORDER 13
44 
45 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
46 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
47 static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob);
48 
49 static inline int ttm_mem_type_from_place(const struct ttm_place *place,
50 					  uint32_t *mem_type)
51 {
52 	int i;
53 
54 	for (i = 0; i <= TTM_PL_PRIV5; i++)
55 		if (place->flags & (1 << i)) {
56 			*mem_type = i;
57 			return 0;
58 		}
59 	return -EINVAL;
60 }
61 
62 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
63 {
64 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
65 
66 	pr_err("    has_type: %d\n", man->has_type);
67 	pr_err("    use_type: %d\n", man->use_type);
68 	pr_err("    flags: 0x%08X\n", man->flags);
69 	pr_err("    gpu_offset: 0x%08lX\n", man->gpu_offset);
70 	pr_err("    size: %ju\n", (uintmax_t)man->size);
71 	pr_err("    available_caching: 0x%08X\n", man->available_caching);
72 	pr_err("    default_caching: 0x%08X\n", man->default_caching);
73 	if (mem_type != TTM_PL_SYSTEM)
74 		(*man->func->debug)(man, TTM_PFX);
75 }
76 
77 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
78 					struct ttm_placement *placement)
79 {
80 	int i, ret, mem_type;
81 
82 	pr_err("No space for %p (%lu pages, %luK, %luM)\n",
83 	       bo, bo->mem.num_pages, bo->mem.size >> 10,
84 	       bo->mem.size >> 20);
85 	for (i = 0; i < placement->num_placement; i++) {
86 		ret = ttm_mem_type_from_place(&placement->placement[i],
87 						&mem_type);
88 		if (ret)
89 			return;
90 		pr_err("  placement[%d]=0x%08X (%d)\n",
91 		       i, placement->placement[i].flags, mem_type);
92 		ttm_mem_type_debug(bo->bdev, mem_type);
93 	}
94 }
95 
96 #if 0
97 static ssize_t ttm_bo_global_show(struct ttm_bo_global *glob,
98     char *buffer)
99 {
100 
101 	return snprintf(buffer, PAGE_SIZE, "%lu\n",
102 			(unsigned long) atomic_read(&glob->bo_count));
103 }
104 #endif
105 
106 static inline uint32_t ttm_bo_type_flags(unsigned type)
107 {
108 	return 1 << (type);
109 }
110 
111 static void ttm_bo_release_list(struct kref *list_kref)
112 {
113 	struct ttm_buffer_object *bo =
114 	    container_of(list_kref, struct ttm_buffer_object, list_kref);
115 	struct ttm_bo_device *bdev = bo->bdev;
116 	size_t acc_size = bo->acc_size;
117 
118 	BUG_ON(atomic_read(&bo->list_kref.refcount));
119 	BUG_ON(atomic_read(&bo->kref.refcount));
120 	BUG_ON(atomic_read(&bo->cpu_writers));
121 	BUG_ON(bo->sync_obj != NULL);
122 	BUG_ON(bo->mem.mm_node != NULL);
123 	BUG_ON(!list_empty(&bo->lru));
124 	BUG_ON(!list_empty(&bo->ddestroy));
125 
126 	if (bo->ttm)
127 		ttm_tt_destroy(bo->ttm);
128 	atomic_dec(&bo->glob->bo_count);
129 	if (bo->destroy)
130 		bo->destroy(bo);
131 	else {
132 		kfree(bo);
133 	}
134 	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
135 }
136 
137 static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
138 				  bool interruptible)
139 {
140 	if (interruptible) {
141 		return wait_event_interruptible(bo->event_queue,
142 					       !ttm_bo_is_reserved(bo));
143 	} else {
144 		wait_event(bo->event_queue, !ttm_bo_is_reserved(bo));
145 		return 0;
146 	}
147 }
148 
149 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
150 {
151 	struct ttm_bo_device *bdev = bo->bdev;
152 	struct ttm_mem_type_manager *man;
153 
154 	BUG_ON(!ttm_bo_is_reserved(bo));
155 
156 	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
157 
158 		BUG_ON(!list_empty(&bo->lru));
159 
160 		man = &bdev->man[bo->mem.mem_type];
161 		list_add_tail(&bo->lru, &man->lru);
162 		kref_get(&bo->list_kref);
163 
164 		if (bo->ttm != NULL) {
165 			list_add_tail(&bo->swap, &bo->glob->swap_lru);
166 			kref_get(&bo->list_kref);
167 		}
168 	}
169 }
170 
171 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
172 {
173 	int put_count = 0;
174 
175 	if (!list_empty(&bo->swap)) {
176 		list_del_init(&bo->swap);
177 		++put_count;
178 	}
179 	if (!list_empty(&bo->lru)) {
180 		list_del_init(&bo->lru);
181 		++put_count;
182 	}
183 
184 	/*
185 	 * TODO: Add a driver hook to delete from
186 	 * driver-specific LRU's here.
187 	 */
188 
189 	return put_count;
190 }
191 
192 int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
193 			  bool interruptible,
194 			  bool no_wait, bool use_ticket,
195 			  struct ww_acquire_ctx *ticket)
196 {
197 	int ret;
198 
199 	while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
200 		/**
201 		 * Deadlock avoidance for multi-bo reserving.
202 		 */
203 		if (use_ticket && bo->seq_valid) {
204 			/**
205 			 * We've already reserved this one.
206 			 */
207 			if (unlikely(ticket->stamp == bo->val_seq))
208 				return -EDEADLK;
209 			/**
210 			 * Already reserved by a thread that will not back
211 			 * off for us. We need to back off.
212 			 */
213 			if (unlikely(ticket->stamp - bo->val_seq <= LONG_MAX))
214 				return -EAGAIN;
215 		}
216 
217 		if (no_wait)
218 			return -EBUSY;
219 
220 		ret = ttm_bo_wait_unreserved(bo, interruptible);
221 
222 		if (unlikely(ret))
223 			return ret;
224 	}
225 
226 	if (use_ticket) {
227 		bool wake_up = false;
228 
229 		/**
230 		 * Wake up waiters that may need to recheck for deadlock,
231 		 * if we decreased the sequence number.
232 		 */
233 		if (unlikely((bo->val_seq - ticket->stamp <= LONG_MAX)
234 			     || !bo->seq_valid))
235 			wake_up = true;
236 
237 		/*
238 		 * In the worst case with memory ordering these values can be
239 		 * seen in the wrong order. However since we call wake_up_all
240 		 * in that case, this will hopefully not pose a problem,
241 		 * and the worst case would only cause someone to accidentally
242 		 * hit -EAGAIN in ttm_bo_reserve when they see old value of
243 		 * val_seq. However this would only happen if seq_valid was
244 		 * written before val_seq was, and just means some slightly
245 		 * increased cpu usage
246 		 */
247 		bo->val_seq = ticket->stamp;
248 		bo->seq_valid = true;
249 		if (wake_up)
250 			wake_up_all(&bo->event_queue);
251 	} else {
252 		bo->seq_valid = false;
253 	}
254 
255 	return 0;
256 }
257 EXPORT_SYMBOL(ttm_bo_reserve);
258 
259 static void ttm_bo_ref_bug(struct kref *list_kref)
260 {
261 	BUG();
262 }
263 
264 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
265 			 bool never_free)
266 {
267 	kref_sub(&bo->list_kref, count,
268 		 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
269 }
270 
271 int ttm_bo_reserve(struct ttm_buffer_object *bo,
272 		   bool interruptible,
273 		   bool no_wait, bool use_ticket,
274 		   struct ww_acquire_ctx *ticket)
275 {
276 	struct ttm_bo_global *glob = bo->glob;
277 	int put_count = 0;
278 	int ret;
279 
280 	ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket,
281 				    ticket);
282 	if (likely(ret == 0)) {
283 		lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
284 		put_count = ttm_bo_del_from_lru(bo);
285 		lockmgr(&glob->lru_lock, LK_RELEASE);
286 		ttm_bo_list_ref_sub(bo, put_count, true);
287 	}
288 
289 	return ret;
290 }
291 
292 int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
293 				  bool interruptible,
294 				  struct ww_acquire_ctx *ticket)
295 {
296 	bool wake_up = false;
297 	int ret;
298 
299 	while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
300 		WARN_ON(bo->seq_valid && ticket->stamp == bo->val_seq);
301 
302 		ret = ttm_bo_wait_unreserved(bo, interruptible);
303 
304 		if (unlikely(ret))
305 			return ret;
306 	}
307 
308 	if (bo->val_seq - ticket->stamp < LONG_MAX || !bo->seq_valid)
309 		wake_up = true;
310 
311 	/**
312 	 * Wake up waiters that may need to recheck for deadlock,
313 	 * if we decreased the sequence number.
314 	 */
315 	bo->val_seq = ticket->stamp;
316 	bo->seq_valid = true;
317 	if (wake_up)
318 		wake_up_all(&bo->event_queue);
319 
320 	return 0;
321 }
322 
323 int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
324 			    bool interruptible, struct ww_acquire_ctx *ticket)
325 {
326 	struct ttm_bo_global *glob = bo->glob;
327 	int put_count, ret;
328 
329 	ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, ticket);
330 	if (likely(!ret)) {
331 		lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
332 		put_count = ttm_bo_del_from_lru(bo);
333 		lockmgr(&glob->lru_lock, LK_RELEASE);
334 		ttm_bo_list_ref_sub(bo, put_count, true);
335 	}
336 	return ret;
337 }
338 EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
339 
340 /*
341  * Must interlock with event_queue to avoid race against
342  * wait_event_common() which can cause wait_event_common()
343  * to become stuck.
344  */
345 static void
346 ttm_bo_unreserve_core(struct ttm_buffer_object *bo)
347 {
348 	lockmgr(&bo->event_queue.lock, LK_EXCLUSIVE);
349 	atomic_set(&bo->reserved, 0);
350 	lockmgr(&bo->event_queue.lock, LK_RELEASE);
351 	wake_up_all(&bo->event_queue);
352 }
353 
354 void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket)
355 {
356 	ttm_bo_add_to_lru(bo);
357 	ttm_bo_unreserve_core(bo);
358 }
359 
360 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
361 {
362 	struct ttm_bo_global *glob = bo->glob;
363 
364 	lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
365 	ttm_bo_unreserve_ticket_locked(bo, NULL);
366 	lockmgr(&glob->lru_lock, LK_RELEASE);
367 }
368 EXPORT_SYMBOL(ttm_bo_unreserve);
369 
370 void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket)
371 {
372 	struct ttm_bo_global *glob = bo->glob;
373 
374 	lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
375 	ttm_bo_unreserve_ticket_locked(bo, ticket);
376 	lockmgr(&glob->lru_lock, LK_RELEASE);
377 }
378 EXPORT_SYMBOL(ttm_bo_unreserve_ticket);
379 
380 /*
381  * Call bo->mutex locked.
382  */
383 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
384 {
385 	struct ttm_bo_device *bdev = bo->bdev;
386 	struct ttm_bo_global *glob = bo->glob;
387 	int ret = 0;
388 	uint32_t page_flags = 0;
389 
390 	TTM_ASSERT_LOCKED(&bo->mutex);
391 	bo->ttm = NULL;
392 
393 	if (bdev->need_dma32)
394 		page_flags |= TTM_PAGE_FLAG_DMA32;
395 
396 	switch (bo->type) {
397 	case ttm_bo_type_device:
398 		if (zero_alloc)
399 			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
400 	case ttm_bo_type_kernel:
401 		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
402 						      page_flags, glob->dummy_read_page);
403 		if (unlikely(bo->ttm == NULL))
404 			ret = -ENOMEM;
405 		break;
406 	case ttm_bo_type_sg:
407 		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
408 						      page_flags | TTM_PAGE_FLAG_SG,
409 						      glob->dummy_read_page);
410 		if (unlikely(bo->ttm == NULL)) {
411 			ret = -ENOMEM;
412 			break;
413 		}
414 		bo->ttm->sg = bo->sg;
415 		break;
416 	default:
417 		pr_err("Illegal buffer object type\n");
418 		ret = -EINVAL;
419 		break;
420 	}
421 
422 	return ret;
423 }
424 
425 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
426 				  struct ttm_mem_reg *mem,
427 				  bool evict, bool interruptible,
428 				  bool no_wait_gpu)
429 {
430 	struct ttm_bo_device *bdev = bo->bdev;
431 	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
432 	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
433 	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
434 	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
435 	int ret = 0;
436 
437 	if (old_is_pci || new_is_pci ||
438 	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
439 		ret = ttm_mem_io_lock(old_man, true);
440 		if (unlikely(ret != 0))
441 			goto out_err;
442 		ttm_bo_unmap_virtual_locked(bo);
443 		ttm_mem_io_unlock(old_man);
444 	}
445 
446 	/*
447 	 * Create and bind a ttm if required.
448 	 */
449 
450 	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
451 		if (bo->ttm == NULL) {
452 			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
453 			ret = ttm_bo_add_ttm(bo, zero);
454 			if (ret)
455 				goto out_err;
456 		}
457 
458 		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
459 		if (ret)
460 			goto out_err;
461 
462 		if (mem->mem_type != TTM_PL_SYSTEM) {
463 			ret = ttm_tt_bind(bo->ttm, mem);
464 			if (ret)
465 				goto out_err;
466 		}
467 
468 		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
469 			if (bdev->driver->move_notify)
470 				bdev->driver->move_notify(bo, mem);
471 			bo->mem = *mem;
472 			mem->mm_node = NULL;
473 			goto moved;
474 		}
475 	}
476 
477 	if (bdev->driver->move_notify)
478 		bdev->driver->move_notify(bo, mem);
479 
480 	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
481 	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
482 		ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
483 	else if (bdev->driver->move)
484 		ret = bdev->driver->move(bo, evict, interruptible,
485 					 no_wait_gpu, mem);
486 	else
487 		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
488 
489 	if (ret) {
490 		if (bdev->driver->move_notify) {
491 			struct ttm_mem_reg tmp_mem = *mem;
492 			*mem = bo->mem;
493 			bo->mem = tmp_mem;
494 			bdev->driver->move_notify(bo, mem);
495 			bo->mem = *mem;
496 			*mem = tmp_mem;
497 		}
498 
499 		goto out_err;
500 	}
501 
502 moved:
503 	if (bo->evicted) {
504 		ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
505 		if (ret)
506 			pr_err("Can not flush read caches\n");
507 		bo->evicted = false;
508 	}
509 
510 	if (bo->mem.mm_node) {
511 		bo->offset = (bo->mem.start << PAGE_SHIFT) +
512 		    bdev->man[bo->mem.mem_type].gpu_offset;
513 		bo->cur_placement = bo->mem.placement;
514 	} else
515 		bo->offset = 0;
516 
517 	return 0;
518 
519 out_err:
520 	new_man = &bdev->man[bo->mem.mem_type];
521 	if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
522 		ttm_tt_unbind(bo->ttm);
523 		ttm_tt_destroy(bo->ttm);
524 		bo->ttm = NULL;
525 	}
526 
527 	return ret;
528 }
529 
530 /**
531  * Call bo::reserved.
532  * Will release GPU memory type usage on destruction.
533  * This is the place to put in driver specific hooks to release
534  * driver private resources.
535  * Will release the bo::reserved lock.
536  */
537 
538 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
539 {
540 	if (bo->bdev->driver->move_notify)
541 		bo->bdev->driver->move_notify(bo, NULL);
542 
543 	if (bo->ttm) {
544 		ttm_tt_unbind(bo->ttm);
545 		ttm_tt_destroy(bo->ttm);
546 		bo->ttm = NULL;
547 	}
548 	ttm_bo_mem_put(bo, &bo->mem);
549 	ttm_bo_unreserve_core(bo);
550 
551 	/*
552 	 * Since the final reference to this bo may not be dropped by
553 	 * the current task we have to put a memory barrier here to make
554 	 * sure the changes done in this function are always visible.
555 	 *
556 	 * This function only needs protection against the final kref_put.
557 	 */
558 	cpu_mfence();
559 }
560 
561 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
562 {
563 	struct ttm_bo_device *bdev = bo->bdev;
564 	struct ttm_bo_global *glob = bo->glob;
565 	struct ttm_bo_driver *driver = bdev->driver;
566 	void *sync_obj = NULL;
567 	int put_count;
568 	int ret;
569 
570 	lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
571 	ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
572 
573 	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
574 	(void) ttm_bo_wait(bo, false, false, true);
575 	if (!ret && !bo->sync_obj) {
576 		lockmgr(&bdev->fence_lock, LK_RELEASE);
577 		put_count = ttm_bo_del_from_lru(bo);
578 
579 		lockmgr(&glob->lru_lock, LK_RELEASE);
580 		ttm_bo_cleanup_memtype_use(bo);
581 
582 		ttm_bo_list_ref_sub(bo, put_count, true);
583 
584 		return;
585 	}
586 	if (bo->sync_obj)
587 		sync_obj = driver->sync_obj_ref(bo->sync_obj);
588 	lockmgr(&bdev->fence_lock, LK_RELEASE);
589 
590 	if (!ret) {
591 
592 		/*
593 		 * Make NO_EVICT bos immediately available to
594 		 * shrinkers, now that they are queued for
595 		 * destruction.
596 		 */
597 		if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
598 			bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
599 			ttm_bo_add_to_lru(bo);
600 		}
601 
602 		ttm_bo_unreserve_core(bo);
603 	}
604 
605 	kref_get(&bo->list_kref);
606 	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
607 	lockmgr(&glob->lru_lock, LK_RELEASE);
608 
609 	if (sync_obj) {
610 		driver->sync_obj_flush(sync_obj);
611 		driver->sync_obj_unref(&sync_obj);
612 	}
613 	schedule_delayed_work(&bdev->wq,
614 			      ((hz / 100) < 1) ? 1 : hz / 100);
615 }
616 
617 /**
618  * function ttm_bo_cleanup_refs_and_unlock
619  * If bo idle, remove from delayed- and lru lists, and unref.
620  * If not idle, do nothing.
621  *
622  * Must be called with lru_lock and reservation held, this function
623  * will drop both before returning.
624  *
625  * @interruptible         Any sleeps should occur interruptibly.
626  * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
627  */
628 
629 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
630 					  bool interruptible,
631 					  bool no_wait_gpu)
632 {
633 	struct ttm_bo_device *bdev = bo->bdev;
634 	struct ttm_bo_driver *driver = bdev->driver;
635 	struct ttm_bo_global *glob = bo->glob;
636 	int put_count;
637 	int ret;
638 
639 	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
640 	ret = ttm_bo_wait(bo, false, false, true);
641 
642 	if (ret && !no_wait_gpu) {
643 		void *sync_obj;
644 
645 		/*
646 		 * Take a reference to the fence and unreserve,
647 		 * at this point the buffer should be dead, so
648 		 * no new sync objects can be attached.
649 		 */
650 		sync_obj = driver->sync_obj_ref(bo->sync_obj);
651 		lockmgr(&bdev->fence_lock, LK_RELEASE);
652 
653 		ttm_bo_unreserve_core(bo);
654 		lockmgr(&glob->lru_lock, LK_RELEASE);
655 
656 		ret = driver->sync_obj_wait(sync_obj, false, interruptible);
657 		driver->sync_obj_unref(&sync_obj);
658 		if (ret)
659 			return ret;
660 
661 		/*
662 		 * remove sync_obj with ttm_bo_wait, the wait should be
663 		 * finished, and no new wait object should have been added.
664 		 */
665 		lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
666 		ret = ttm_bo_wait(bo, false, false, true);
667 		WARN_ON(ret);
668 		lockmgr(&bdev->fence_lock, LK_RELEASE);
669 		if (ret)
670 			return ret;
671 
672 		lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
673 		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
674 
675 		/*
676 		 * We raced, and lost, someone else holds the reservation now,
677 		 * and is probably busy in ttm_bo_cleanup_memtype_use.
678 		 *
679 		 * Even if it's not the case, because we finished waiting any
680 		 * delayed destruction would succeed, so just return success
681 		 * here.
682 		 */
683 		if (ret) {
684 			lockmgr(&glob->lru_lock, LK_RELEASE);
685 			return 0;
686 		}
687 	} else
688 		lockmgr(&bdev->fence_lock, LK_RELEASE);
689 
690 	if (ret || unlikely(list_empty(&bo->ddestroy))) {
691 		ttm_bo_unreserve_core(bo);
692 		lockmgr(&glob->lru_lock, LK_RELEASE);
693 		return ret;
694 	}
695 
696 	put_count = ttm_bo_del_from_lru(bo);
697 	list_del_init(&bo->ddestroy);
698 	++put_count;
699 
700 	lockmgr(&glob->lru_lock, LK_RELEASE);
701 	ttm_bo_cleanup_memtype_use(bo);
702 
703 	ttm_bo_list_ref_sub(bo, put_count, true);
704 
705 	return 0;
706 }
707 
708 /**
709  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
710  * encountered buffers.
711  */
712 
713 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
714 {
715 	struct ttm_bo_global *glob = bdev->glob;
716 	struct ttm_buffer_object *entry = NULL;
717 	int ret = 0;
718 
719 	lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
720 	if (list_empty(&bdev->ddestroy))
721 		goto out_unlock;
722 
723 	entry = list_first_entry(&bdev->ddestroy,
724 		struct ttm_buffer_object, ddestroy);
725 	kref_get(&entry->list_kref);
726 
727 	for (;;) {
728 		struct ttm_buffer_object *nentry = NULL;
729 
730 		if (entry->ddestroy.next != &bdev->ddestroy) {
731 			nentry = list_first_entry(&entry->ddestroy,
732 				struct ttm_buffer_object, ddestroy);
733 			kref_get(&nentry->list_kref);
734 		}
735 
736 		ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
737 		if (remove_all && ret) {
738 			lockmgr(&glob->lru_lock, LK_RELEASE);
739 			ret = ttm_bo_reserve_nolru(entry, false, false,
740 						   false, 0);
741 			lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
742 		}
743 
744 		if (!ret)
745 			ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
746 							     !remove_all);
747 		else
748 			lockmgr(&glob->lru_lock, LK_RELEASE);
749 
750 		kref_put(&entry->list_kref, ttm_bo_release_list);
751 		entry = nentry;
752 
753 		if (ret || !entry)
754 			goto out;
755 
756 		lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
757 		if (list_empty(&entry->ddestroy))
758 			break;
759 	}
760 
761 out_unlock:
762 	lockmgr(&glob->lru_lock, LK_RELEASE);
763 out:
764 	if (entry)
765 		kref_put(&entry->list_kref, ttm_bo_release_list);
766 	return ret;
767 }
768 
769 static void ttm_bo_delayed_workqueue(struct work_struct *work)
770 {
771 	struct ttm_bo_device *bdev =
772 	    container_of(work, struct ttm_bo_device, wq.work);
773 
774 	if (ttm_bo_delayed_delete(bdev, false)) {
775 		schedule_delayed_work(&bdev->wq,
776 				      ((hz / 100) < 1) ? 1 : hz / 100);
777 	}
778 }
779 
780 /*
781  * NOTE: bdev->vm_lock already held on call, this function release it.
782  */
783 static void ttm_bo_release(struct kref *kref)
784 {
785 	struct ttm_buffer_object *bo =
786 	    container_of(kref, struct ttm_buffer_object, kref);
787 	struct ttm_bo_device *bdev = bo->bdev;
788 	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
789 	int release_active;
790 
791 	if (atomic_read(&bo->kref.refcount) > 0) {
792 		lockmgr(&bdev->vm_lock, LK_RELEASE);
793 		return;
794 	}
795 	if (likely(bo->vm_node != NULL)) {
796 		RB_REMOVE(ttm_bo_device_buffer_objects,
797 				&bdev->addr_space_rb, bo);
798 		drm_mm_put_block(bo->vm_node);
799 		bo->vm_node = NULL;
800 	}
801 
802 	/*
803 	 * Should we clean up our implied list_kref?  Because ttm_bo_release()
804 	 * can be called reentrantly due to races (this may not be true any
805 	 * more with the lock management changes in the deref), it is possible
806 	 * to get here twice, but there's only one list_kref ref to drop and
807 	 * in the other path 'bo' can be kfree()d by another thread the
808 	 * instant we release our lock.
809 	 */
810 	release_active = test_bit(TTM_BO_PRIV_FLAG_ACTIVE, &bo->priv_flags);
811 	if (release_active) {
812 		clear_bit(TTM_BO_PRIV_FLAG_ACTIVE, &bo->priv_flags);
813 		lockmgr(&bdev->vm_lock, LK_RELEASE);
814 		ttm_mem_io_lock(man, false);
815 		ttm_mem_io_free_vm(bo);
816 		ttm_mem_io_unlock(man);
817 		ttm_bo_cleanup_refs_or_queue(bo);
818 		kref_put(&bo->list_kref, ttm_bo_release_list);
819 	} else {
820 		lockmgr(&bdev->vm_lock, LK_RELEASE);
821 	}
822 }
823 
824 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
825 {
826 	struct ttm_buffer_object *bo = *p_bo;
827 	struct ttm_bo_device *bdev = bo->bdev;
828 
829 	*p_bo = NULL;
830 	lockmgr(&bdev->vm_lock, LK_EXCLUSIVE);
831 	if (kref_put(&bo->kref, ttm_bo_release) == 0)
832 		lockmgr(&bdev->vm_lock, LK_RELEASE);
833 }
834 EXPORT_SYMBOL(ttm_bo_unref);
835 
836 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
837 {
838 	return cancel_delayed_work_sync(&bdev->wq);
839 }
840 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
841 
842 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
843 {
844 	if (resched)
845 		schedule_delayed_work(&bdev->wq,
846 				      ((hz / 100) < 1) ? 1 : hz / 100);
847 }
848 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
849 
850 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
851 			bool no_wait_gpu)
852 {
853 	struct ttm_bo_device *bdev = bo->bdev;
854 	struct ttm_mem_reg evict_mem;
855 	struct ttm_placement placement;
856 	int ret = 0;
857 
858 	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
859 	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
860 	lockmgr(&bdev->fence_lock, LK_RELEASE);
861 
862 	if (unlikely(ret != 0)) {
863 		if (ret != -ERESTARTSYS) {
864 			pr_err("Failed to expire sync object before buffer eviction\n");
865 		}
866 		goto out;
867 	}
868 
869 	BUG_ON(!ttm_bo_is_reserved(bo));
870 
871 	evict_mem = bo->mem;
872 	evict_mem.mm_node = NULL;
873 	evict_mem.bus.io_reserved_vm = false;
874 	evict_mem.bus.io_reserved_count = 0;
875 
876 	placement.num_placement = 0;
877 	placement.num_busy_placement = 0;
878 	bdev->driver->evict_flags(bo, &placement);
879 	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
880 				no_wait_gpu);
881 	if (ret) {
882 		if (ret != -ERESTARTSYS) {
883 			pr_err("Failed to find memory space for buffer 0x%p eviction\n",
884 			       bo);
885 			ttm_bo_mem_space_debug(bo, &placement);
886 		}
887 		goto out;
888 	}
889 
890 	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
891 				     no_wait_gpu);
892 	if (ret) {
893 		if (ret != -ERESTARTSYS)
894 			pr_err("Buffer eviction failed\n");
895 		ttm_bo_mem_put(bo, &evict_mem);
896 		goto out;
897 	}
898 	bo->evicted = true;
899 out:
900 	return ret;
901 }
902 
903 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
904 				uint32_t mem_type,
905 				bool interruptible,
906 				bool no_wait_gpu)
907 {
908 	struct ttm_bo_global *glob = bdev->glob;
909 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
910 	struct ttm_buffer_object *bo;
911 	int ret = -EBUSY, put_count;
912 
913 	lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
914 	list_for_each_entry(bo, &man->lru, lru) {
915 		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
916 		if (!ret)
917 			break;
918 	}
919 
920 	if (ret) {
921 		lockmgr(&glob->lru_lock, LK_RELEASE);
922 		return ret;
923 	}
924 
925 	kref_get(&bo->list_kref);
926 
927 	if (!list_empty(&bo->ddestroy)) {
928 		ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
929 						     no_wait_gpu);
930 		kref_put(&bo->list_kref, ttm_bo_release_list);
931 		return ret;
932 	}
933 
934 	put_count = ttm_bo_del_from_lru(bo);
935 	lockmgr(&glob->lru_lock, LK_RELEASE);
936 
937 	BUG_ON(ret != 0);
938 
939 	ttm_bo_list_ref_sub(bo, put_count, true);
940 
941 	ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
942 	ttm_bo_unreserve(bo);
943 
944 	kref_put(&bo->list_kref, ttm_bo_release_list);
945 	return ret;
946 }
947 
948 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
949 {
950 	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
951 
952 	if (mem->mm_node)
953 		(*man->func->put_node)(man, mem);
954 }
955 EXPORT_SYMBOL(ttm_bo_mem_put);
956 
957 /**
958  * Repeatedly evict memory from the LRU for @mem_type until we create enough
959  * space, or we've evicted everything and there isn't enough space.
960  */
961 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
962 					uint32_t mem_type,
963 					const struct ttm_place *place,
964 					struct ttm_mem_reg *mem,
965 					bool interruptible,
966 					bool no_wait_gpu)
967 {
968 	struct ttm_bo_device *bdev = bo->bdev;
969 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
970 	int ret;
971 
972 	do {
973 		ret = (*man->func->get_node)(man, bo, place, mem);
974 		if (unlikely(ret != 0))
975 			return ret;
976 		if (mem->mm_node)
977 			break;
978 		ret = ttm_mem_evict_first(bdev, mem_type,
979 					  interruptible, no_wait_gpu);
980 		if (unlikely(ret != 0))
981 			return ret;
982 	} while (1);
983 	if (mem->mm_node == NULL)
984 		return -ENOMEM;
985 	mem->mem_type = mem_type;
986 	return 0;
987 }
988 
989 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
990 				      uint32_t cur_placement,
991 				      uint32_t proposed_placement)
992 {
993 	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
994 	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
995 
996 	/**
997 	 * Keep current caching if possible.
998 	 */
999 
1000 	if ((cur_placement & caching) != 0)
1001 		result |= (cur_placement & caching);
1002 	else if ((man->default_caching & caching) != 0)
1003 		result |= man->default_caching;
1004 	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
1005 		result |= TTM_PL_FLAG_CACHED;
1006 	else if ((TTM_PL_FLAG_WC & caching) != 0)
1007 		result |= TTM_PL_FLAG_WC;
1008 	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
1009 		result |= TTM_PL_FLAG_UNCACHED;
1010 
1011 	return result;
1012 }
1013 
1014 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
1015 				 uint32_t mem_type,
1016 				 const struct ttm_place *place,
1017 				 uint32_t *masked_placement)
1018 {
1019 	uint32_t cur_flags = ttm_bo_type_flags(mem_type);
1020 
1021 	if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
1022 		return false;
1023 
1024 	if ((place->flags & man->available_caching) == 0)
1025 		return false;
1026 
1027 	cur_flags |= (place->flags & man->available_caching);
1028 
1029 	*masked_placement = cur_flags;
1030 	return true;
1031 }
1032 
1033 /**
1034  * Creates space for memory region @mem according to its type.
1035  *
1036  * This function first searches for free space in compatible memory types in
1037  * the priority order defined by the driver.  If free space isn't found, then
1038  * ttm_bo_mem_force_space is attempted in priority order to evict and find
1039  * space.
1040  */
1041 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
1042 			struct ttm_placement *placement,
1043 			struct ttm_mem_reg *mem,
1044 			bool interruptible,
1045 			bool no_wait_gpu)
1046 {
1047 	struct ttm_bo_device *bdev = bo->bdev;
1048 	struct ttm_mem_type_manager *man;
1049 	uint32_t mem_type = TTM_PL_SYSTEM;
1050 	uint32_t cur_flags = 0;
1051 	bool type_found = false;
1052 	bool type_ok = false;
1053 	bool has_erestartsys = false;
1054 	int i, ret;
1055 
1056 	mem->mm_node = NULL;
1057 	for (i = 0; i < placement->num_placement; ++i) {
1058 		const struct ttm_place *place = &placement->placement[i];
1059 
1060 		ret = ttm_mem_type_from_place(place, &mem_type);
1061 		if (ret)
1062 			return ret;
1063 		man = &bdev->man[mem_type];
1064 
1065 		type_ok = ttm_bo_mt_compatible(man, mem_type, place,
1066 						&cur_flags);
1067 
1068 		if (!type_ok)
1069 			continue;
1070 
1071 		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1072 						  cur_flags);
1073 		/*
1074 		 * Use the access and other non-mapping-related flag bits from
1075 		 * the memory placement flags to the current flags
1076 		 */
1077 		ttm_flag_masked(&cur_flags, place->flags,
1078 				~TTM_PL_MASK_MEMTYPE);
1079 
1080 		if (mem_type == TTM_PL_SYSTEM)
1081 			break;
1082 
1083 		if (man->has_type && man->use_type) {
1084 			type_found = true;
1085 			ret = (*man->func->get_node)(man, bo, place, mem);
1086 			if (unlikely(ret))
1087 				return ret;
1088 		}
1089 		if (mem->mm_node)
1090 			break;
1091 	}
1092 
1093 	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
1094 		mem->mem_type = mem_type;
1095 		mem->placement = cur_flags;
1096 		return 0;
1097 	}
1098 
1099 	if (!type_found)
1100 		return -EINVAL;
1101 
1102 	for (i = 0; i < placement->num_busy_placement; ++i) {
1103 		const struct ttm_place *place = &placement->busy_placement[i];
1104 
1105 		ret = ttm_mem_type_from_place(place, &mem_type);
1106 		if (ret)
1107 			return ret;
1108 		man = &bdev->man[mem_type];
1109 		if (!man->has_type)
1110 			continue;
1111 		if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
1112 			continue;
1113 
1114 		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1115 						  cur_flags);
1116 		/*
1117 		 * Use the access and other non-mapping-related flag bits from
1118 		 * the memory placement flags to the current flags
1119 		 */
1120 		ttm_flag_masked(&cur_flags, place->flags,
1121 				~TTM_PL_MASK_MEMTYPE);
1122 
1123 		if (mem_type == TTM_PL_SYSTEM) {
1124 			mem->mem_type = mem_type;
1125 			mem->placement = cur_flags;
1126 			mem->mm_node = NULL;
1127 			return 0;
1128 		}
1129 
1130 		ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
1131 						interruptible, no_wait_gpu);
1132 		if (ret == 0 && mem->mm_node) {
1133 			mem->placement = cur_flags;
1134 			return 0;
1135 		}
1136 		if (ret == -ERESTARTSYS)
1137 			has_erestartsys = true;
1138 	}
1139 	ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
1140 	return ret;
1141 }
1142 EXPORT_SYMBOL(ttm_bo_mem_space);
1143 
1144 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1145 			struct ttm_placement *placement,
1146 			bool interruptible,
1147 			bool no_wait_gpu)
1148 {
1149 	int ret = 0;
1150 	struct ttm_mem_reg mem;
1151 	struct ttm_bo_device *bdev = bo->bdev;
1152 
1153 	BUG_ON(!ttm_bo_is_reserved(bo));
1154 
1155 	/*
1156 	 * FIXME: It's possible to pipeline buffer moves.
1157 	 * Have the driver move function wait for idle when necessary,
1158 	 * instead of doing it here.
1159 	 */
1160 	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
1161 	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1162 	lockmgr(&bdev->fence_lock, LK_RELEASE);
1163 	if (ret)
1164 		return ret;
1165 	mem.num_pages = bo->num_pages;
1166 	mem.size = mem.num_pages << PAGE_SHIFT;
1167 	mem.page_alignment = bo->mem.page_alignment;
1168 	mem.bus.io_reserved_vm = false;
1169 	mem.bus.io_reserved_count = 0;
1170 	/*
1171 	 * Determine where to move the buffer.
1172 	 */
1173 	ret = ttm_bo_mem_space(bo, placement, &mem,
1174 			       interruptible, no_wait_gpu);
1175 	if (ret)
1176 		goto out_unlock;
1177 	ret = ttm_bo_handle_move_mem(bo, &mem, false,
1178 				     interruptible, no_wait_gpu);
1179 out_unlock:
1180 	if (ret && mem.mm_node)
1181 		ttm_bo_mem_put(bo, &mem);
1182 	return ret;
1183 }
1184 
1185 static bool ttm_bo_mem_compat(struct ttm_placement *placement,
1186 			      struct ttm_mem_reg *mem,
1187 			      uint32_t *new_flags)
1188 {
1189 	int i;
1190 
1191 	for (i = 0; i < placement->num_placement; i++) {
1192 		const struct ttm_place *heap = &placement->placement[i];
1193 		if (mem->mm_node &&
1194 		    (mem->start < heap->fpfn ||
1195 		     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1196 			continue;
1197 
1198 		*new_flags = heap->flags;
1199 		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1200 		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
1201 			return true;
1202 	}
1203 
1204 	for (i = 0; i < placement->num_busy_placement; i++) {
1205 		const struct ttm_place *heap = &placement->busy_placement[i];
1206 		if (mem->mm_node &&
1207 		    (mem->start < heap->fpfn ||
1208 		     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1209 			continue;
1210 
1211 		*new_flags = heap->flags;
1212 		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1213 		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
1214 			return true;
1215 	}
1216 
1217 	return false;
1218 }
1219 
1220 int ttm_bo_validate(struct ttm_buffer_object *bo,
1221 			struct ttm_placement *placement,
1222 			bool interruptible,
1223 			bool no_wait_gpu)
1224 {
1225 	int ret;
1226 	uint32_t new_flags;
1227 
1228 	BUG_ON(!ttm_bo_is_reserved(bo));
1229 	/*
1230 	 * Check whether we need to move buffer.
1231 	 */
1232 	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1233 		ret = ttm_bo_move_buffer(bo, placement, interruptible,
1234 					 no_wait_gpu);
1235 		if (ret)
1236 			return ret;
1237 	} else {
1238 		/*
1239 		 * Use the access and other non-mapping-related flag bits from
1240 		 * the compatible memory placement flags to the active flags
1241 		 */
1242 		ttm_flag_masked(&bo->mem.placement, new_flags,
1243 				~TTM_PL_MASK_MEMTYPE);
1244 	}
1245 	/*
1246 	 * We might need to add a TTM.
1247 	 */
1248 	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1249 		ret = ttm_bo_add_ttm(bo, true);
1250 		if (ret)
1251 			return ret;
1252 	}
1253 	return 0;
1254 }
1255 EXPORT_SYMBOL(ttm_bo_validate);
1256 
1257 int ttm_bo_init(struct ttm_bo_device *bdev,
1258 		struct ttm_buffer_object *bo,
1259 		unsigned long size,
1260 		enum ttm_bo_type type,
1261 		struct ttm_placement *placement,
1262 		uint32_t page_alignment,
1263 		bool interruptible,
1264 		struct vm_object *persistent_swap_storage,
1265 		size_t acc_size,
1266 		struct sg_table *sg,
1267 		void (*destroy) (struct ttm_buffer_object *))
1268 {
1269 	int ret = 0;
1270 	unsigned long num_pages;
1271 	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1272 
1273 	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1274 	if (ret) {
1275 		pr_err("Out of kernel memory\n");
1276 		if (destroy)
1277 			(*destroy)(bo);
1278 		else
1279 			kfree(bo);
1280 		return -ENOMEM;
1281 	}
1282 
1283 	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1284 	if (num_pages == 0) {
1285 		pr_err("Illegal buffer object size\n");
1286 		if (destroy)
1287 			(*destroy)(bo);
1288 		else
1289 			kfree(bo);
1290 		ttm_mem_global_free(mem_glob, acc_size);
1291 		return -EINVAL;
1292 	}
1293 	bo->destroy = destroy;
1294 
1295 	kref_init(&bo->kref);
1296 	kref_init(&bo->list_kref);
1297 	atomic_set(&bo->cpu_writers, 0);
1298 	atomic_set(&bo->reserved, 1);
1299 	init_waitqueue_head(&bo->event_queue);
1300 	INIT_LIST_HEAD(&bo->lru);
1301 	INIT_LIST_HEAD(&bo->ddestroy);
1302 	INIT_LIST_HEAD(&bo->swap);
1303 	INIT_LIST_HEAD(&bo->io_reserve_lru);
1304 	/*bzero(&bo->vm_rb, sizeof(bo->vm_rb));*/
1305 	bo->bdev = bdev;
1306 	bo->glob = bdev->glob;
1307 	bo->type = type;
1308 	bo->num_pages = num_pages;
1309 	bo->mem.size = num_pages << PAGE_SHIFT;
1310 	bo->mem.mem_type = TTM_PL_SYSTEM;
1311 	bo->mem.num_pages = bo->num_pages;
1312 	bo->mem.mm_node = NULL;
1313 	bo->mem.page_alignment = page_alignment;
1314 	bo->mem.bus.io_reserved_vm = false;
1315 	bo->mem.bus.io_reserved_count = 0;
1316 	bo->priv_flags = 0;
1317 	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1318 	bo->seq_valid = false;
1319 	bo->persistent_swap_storage = persistent_swap_storage;
1320 	bo->acc_size = acc_size;
1321 	bo->sg = sg;
1322 	atomic_inc(&bo->glob->bo_count);
1323 
1324 	/*
1325 	 * Mirror ref from kref_init() for list_kref.
1326 	 */
1327 	set_bit(TTM_BO_PRIV_FLAG_ACTIVE, &bo->priv_flags);
1328 
1329 	/*
1330 	 * For ttm_bo_type_device buffers, allocate
1331 	 * address space from the device.
1332 	 */
1333 	if (bo->type == ttm_bo_type_device ||
1334 	    bo->type == ttm_bo_type_sg) {
1335 		ret = ttm_bo_setup_vm(bo);
1336 		if (ret)
1337 			goto out_err;
1338 	}
1339 
1340 	ret = ttm_bo_validate(bo, placement, interruptible, false);
1341 	if (ret)
1342 		goto out_err;
1343 
1344 	ttm_bo_unreserve(bo);
1345 	return 0;
1346 
1347 out_err:
1348 	ttm_bo_unreserve(bo);
1349 	ttm_bo_unref(&bo);
1350 
1351 	return ret;
1352 }
1353 EXPORT_SYMBOL(ttm_bo_init);
1354 
1355 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1356 		       unsigned long bo_size,
1357 		       unsigned struct_size)
1358 {
1359 	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1360 	size_t size = 0;
1361 
1362 	size += ttm_round_pot(struct_size);
1363 	size += PAGE_ALIGN(npages * sizeof(void *));
1364 	size += ttm_round_pot(sizeof(struct ttm_tt));
1365 	return size;
1366 }
1367 EXPORT_SYMBOL(ttm_bo_acc_size);
1368 
1369 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1370 			   unsigned long bo_size,
1371 			   unsigned struct_size)
1372 {
1373 	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1374 	size_t size = 0;
1375 
1376 	size += ttm_round_pot(struct_size);
1377 	size += PAGE_ALIGN(npages * sizeof(void *));
1378 	size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
1379 	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1380 	return size;
1381 }
1382 EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1383 
1384 int ttm_bo_create(struct ttm_bo_device *bdev,
1385 			unsigned long size,
1386 			enum ttm_bo_type type,
1387 			struct ttm_placement *placement,
1388 			uint32_t page_alignment,
1389 			bool interruptible,
1390 			struct vm_object *persistent_swap_storage,
1391 			struct ttm_buffer_object **p_bo)
1392 {
1393 	struct ttm_buffer_object *bo;
1394 	size_t acc_size;
1395 	int ret;
1396 
1397 	*p_bo = NULL;
1398 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1399 	if (unlikely(bo == NULL))
1400 		return -ENOMEM;
1401 
1402 	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1403 	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1404 			  interruptible, persistent_swap_storage, acc_size,
1405 			  NULL, NULL);
1406 	if (likely(ret == 0))
1407 		*p_bo = bo;
1408 
1409 	return ret;
1410 }
1411 EXPORT_SYMBOL(ttm_bo_create);
1412 
1413 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1414 					unsigned mem_type, bool allow_errors)
1415 {
1416 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1417 	struct ttm_bo_global *glob = bdev->glob;
1418 	int ret;
1419 
1420 	/*
1421 	 * Can't use standard list traversal since we're unlocking.
1422 	 */
1423 
1424 	lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
1425 	while (!list_empty(&man->lru)) {
1426 		lockmgr(&glob->lru_lock, LK_RELEASE);
1427 		ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1428 		if (ret) {
1429 			if (allow_errors) {
1430 				return ret;
1431 			} else {
1432 				pr_err("Cleanup eviction failed\n");
1433 			}
1434 		}
1435 		lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
1436 	}
1437 	lockmgr(&glob->lru_lock, LK_RELEASE);
1438 	return 0;
1439 }
1440 
1441 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1442 {
1443 	struct ttm_mem_type_manager *man;
1444 	int ret = -EINVAL;
1445 
1446 	if (mem_type >= TTM_NUM_MEM_TYPES) {
1447 		pr_err("Illegal memory type %d\n", mem_type);
1448 		return ret;
1449 	}
1450 	man = &bdev->man[mem_type];
1451 
1452 	if (!man->has_type) {
1453 		pr_err("Trying to take down uninitialized memory manager type %u\n",
1454 		       mem_type);
1455 		return ret;
1456 	}
1457 
1458 	man->use_type = false;
1459 	man->has_type = false;
1460 
1461 	ret = 0;
1462 	if (mem_type > 0) {
1463 		ttm_bo_force_list_clean(bdev, mem_type, false);
1464 
1465 		ret = (*man->func->takedown)(man);
1466 	}
1467 
1468 	return ret;
1469 }
1470 EXPORT_SYMBOL(ttm_bo_clean_mm);
1471 
1472 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1473 {
1474 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1475 
1476 	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1477 		pr_err("Illegal memory manager memory type %u\n", mem_type);
1478 		return -EINVAL;
1479 	}
1480 
1481 	if (!man->has_type) {
1482 		pr_err("Memory type %u has not been initialized\n", mem_type);
1483 		return 0;
1484 	}
1485 
1486 	return ttm_bo_force_list_clean(bdev, mem_type, true);
1487 }
1488 EXPORT_SYMBOL(ttm_bo_evict_mm);
1489 
1490 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1491 			unsigned long p_size)
1492 {
1493 	int ret = -EINVAL;
1494 	struct ttm_mem_type_manager *man;
1495 
1496 	BUG_ON(type >= TTM_NUM_MEM_TYPES);
1497 	man = &bdev->man[type];
1498 	BUG_ON(man->has_type);
1499 	man->io_reserve_fastpath = true;
1500 	man->use_io_reserve_lru = false;
1501 	lockinit(&man->io_reserve_mutex, "ttmman", 0, LK_CANRECURSE);
1502 	INIT_LIST_HEAD(&man->io_reserve_lru);
1503 
1504 	ret = bdev->driver->init_mem_type(bdev, type, man);
1505 	if (ret)
1506 		return ret;
1507 	man->bdev = bdev;
1508 
1509 	ret = 0;
1510 	if (type != TTM_PL_SYSTEM) {
1511 		ret = (*man->func->init)(man, p_size);
1512 		if (ret)
1513 			return ret;
1514 	}
1515 	man->has_type = true;
1516 	man->use_type = true;
1517 	man->size = p_size;
1518 
1519 	INIT_LIST_HEAD(&man->lru);
1520 
1521 	return 0;
1522 }
1523 EXPORT_SYMBOL(ttm_bo_init_mm);
1524 
1525 static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob)
1526 {
1527 	ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1528 	vm_page_free_contig(glob->dummy_read_page, PAGE_SIZE);
1529 	glob->dummy_read_page = NULL;
1530 	/*
1531 	vm_page_free(glob->dummy_read_page);
1532 	*/
1533 }
1534 
1535 void ttm_bo_global_release(struct drm_global_reference *ref)
1536 {
1537 	struct ttm_bo_global *glob = ref->object;
1538 
1539 	if (refcount_release(&glob->kobj_ref))
1540 		ttm_bo_global_kobj_release(glob);
1541 }
1542 EXPORT_SYMBOL(ttm_bo_global_release);
1543 
1544 int ttm_bo_global_init(struct drm_global_reference *ref)
1545 {
1546 	struct ttm_bo_global_ref *bo_ref =
1547 		container_of(ref, struct ttm_bo_global_ref, ref);
1548 	struct ttm_bo_global *glob = ref->object;
1549 	int ret;
1550 
1551 	lockinit(&glob->device_list_mutex, "ttmdlm", 0, LK_CANRECURSE);
1552 	lockinit(&glob->lru_lock, "ttmlru", 0, LK_CANRECURSE);
1553 	glob->mem_glob = bo_ref->mem_glob;
1554 	glob->dummy_read_page = vm_page_alloc_contig(
1555 	    0, VM_MAX_ADDRESS, PAGE_SIZE, 0, 1*PAGE_SIZE, VM_MEMATTR_UNCACHEABLE);
1556 
1557 	if (unlikely(glob->dummy_read_page == NULL)) {
1558 		ret = -ENOMEM;
1559 		goto out_no_drp;
1560 	}
1561 
1562 	INIT_LIST_HEAD(&glob->swap_lru);
1563 	INIT_LIST_HEAD(&glob->device_list);
1564 
1565 	ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1566 	ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1567 	if (unlikely(ret != 0)) {
1568 		pr_err("Could not register buffer object swapout\n");
1569 		goto out_no_shrink;
1570 	}
1571 
1572 	atomic_set(&glob->bo_count, 0);
1573 
1574 	refcount_init(&glob->kobj_ref, 1);
1575 	return (0);
1576 
1577 out_no_shrink:
1578 	vm_page_free_contig(glob->dummy_read_page, PAGE_SIZE);
1579 	glob->dummy_read_page = NULL;
1580 	/*
1581 	vm_page_free(glob->dummy_read_page);
1582 	*/
1583 out_no_drp:
1584 	kfree(glob);
1585 	return ret;
1586 }
1587 EXPORT_SYMBOL(ttm_bo_global_init);
1588 
1589 
1590 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1591 {
1592 	int ret = 0;
1593 	unsigned i = TTM_NUM_MEM_TYPES;
1594 	struct ttm_mem_type_manager *man;
1595 	struct ttm_bo_global *glob = bdev->glob;
1596 
1597 	while (i--) {
1598 		man = &bdev->man[i];
1599 		if (man->has_type) {
1600 			man->use_type = false;
1601 			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1602 				ret = -EBUSY;
1603 				pr_err("DRM memory manager type %d is not clean\n",
1604 				       i);
1605 			}
1606 			man->has_type = false;
1607 		}
1608 	}
1609 
1610 	lockmgr(&glob->device_list_mutex, LK_EXCLUSIVE);
1611 	list_del(&bdev->device_list);
1612 	lockmgr(&glob->device_list_mutex, LK_RELEASE);
1613 
1614 	cancel_delayed_work_sync(&bdev->wq);
1615 
1616 	while (ttm_bo_delayed_delete(bdev, true))
1617 		;
1618 
1619 	lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
1620 	if (list_empty(&bdev->ddestroy))
1621 		TTM_DEBUG("Delayed destroy list was clean\n");
1622 
1623 	if (list_empty(&bdev->man[0].lru))
1624 		TTM_DEBUG("Swap list was clean\n");
1625 	lockmgr(&glob->lru_lock, LK_RELEASE);
1626 
1627 	BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1628 	lockmgr(&bdev->vm_lock, LK_EXCLUSIVE);
1629 	drm_mm_takedown(&bdev->addr_space_mm);
1630 	lockmgr(&bdev->vm_lock, LK_RELEASE);
1631 
1632 	return ret;
1633 }
1634 EXPORT_SYMBOL(ttm_bo_device_release);
1635 
1636 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1637 		       struct ttm_bo_global *glob,
1638 		       struct ttm_bo_driver *driver,
1639 		       uint64_t file_page_offset,
1640 		       bool need_dma32)
1641 {
1642 	int ret = -EINVAL;
1643 
1644 	lockinit(&bdev->vm_lock, "ttmvml", 0, LK_CANRECURSE);
1645 	bdev->driver = driver;
1646 
1647 	memset(bdev->man, 0, sizeof(bdev->man));
1648 
1649 	/*
1650 	 * Initialize the system memory buffer type.
1651 	 * Other types need to be driver / IOCTL initialized.
1652 	 */
1653 	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1654 	if (unlikely(ret != 0))
1655 		goto out_no_sys;
1656 
1657 	RB_INIT(&bdev->addr_space_rb);
1658 	drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1659 
1660 	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1661 	INIT_LIST_HEAD(&bdev->ddestroy);
1662 	bdev->dev_mapping = NULL;
1663 	bdev->glob = glob;
1664 	bdev->need_dma32 = need_dma32;
1665 	bdev->val_seq = 0;
1666 	lockinit(&bdev->fence_lock, "ttmfence", 0, LK_CANRECURSE);
1667 	lockmgr(&glob->device_list_mutex, LK_EXCLUSIVE);
1668 	list_add_tail(&bdev->device_list, &glob->device_list);
1669 	lockmgr(&glob->device_list_mutex, LK_RELEASE);
1670 
1671 	return 0;
1672 out_no_sys:
1673 	return ret;
1674 }
1675 EXPORT_SYMBOL(ttm_bo_device_init);
1676 
1677 /*
1678  * buffer object vm functions.
1679  */
1680 
1681 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1682 {
1683 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1684 
1685 	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1686 		if (mem->mem_type == TTM_PL_SYSTEM)
1687 			return false;
1688 
1689 		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1690 			return false;
1691 
1692 		if (mem->placement & TTM_PL_FLAG_CACHED)
1693 			return false;
1694 	}
1695 	return true;
1696 }
1697 
1698 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1699 {
1700 
1701 	ttm_bo_release_mmap(bo);
1702 	ttm_mem_io_free_vm(bo);
1703 }
1704 
1705 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1706 {
1707 	struct ttm_bo_device *bdev = bo->bdev;
1708 	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1709 
1710 	ttm_mem_io_lock(man, false);
1711 	ttm_bo_unmap_virtual_locked(bo);
1712 	ttm_mem_io_unlock(man);
1713 }
1714 
1715 
1716 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1717 
1718 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1719 {
1720 	struct ttm_bo_device *bdev = bo->bdev;
1721 
1722 	/* The caller acquired bdev->vm_lock. */
1723 	RB_INSERT(ttm_bo_device_buffer_objects, &bdev->addr_space_rb, bo);
1724 }
1725 
1726 /**
1727  * ttm_bo_setup_vm:
1728  *
1729  * @bo: the buffer to allocate address space for
1730  *
1731  * Allocate address space in the drm device so that applications
1732  * can mmap the buffer and access the contents. This only
1733  * applies to ttm_bo_type_device objects as others are not
1734  * placed in the drm device address space.
1735  */
1736 
1737 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1738 {
1739 	struct ttm_bo_device *bdev = bo->bdev;
1740 	int ret;
1741 
1742 retry_pre_get:
1743 	ret = drm_mm_pre_get(&bdev->addr_space_mm);
1744 	if (unlikely(ret != 0))
1745 		return ret;
1746 
1747 	lockmgr(&bdev->vm_lock, LK_EXCLUSIVE);
1748 	bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1749 					 bo->mem.num_pages, 0, 0);
1750 
1751 	if (unlikely(bo->vm_node == NULL)) {
1752 		ret = -ENOMEM;
1753 		goto out_unlock;
1754 	}
1755 
1756 	bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1757 					      bo->mem.num_pages, 0);
1758 
1759 	if (unlikely(bo->vm_node == NULL)) {
1760 		lockmgr(&bdev->vm_lock, LK_RELEASE);
1761 		goto retry_pre_get;
1762 	}
1763 
1764 	ttm_bo_vm_insert_rb(bo);
1765 	lockmgr(&bdev->vm_lock, LK_RELEASE);
1766 	bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1767 
1768 	return 0;
1769 out_unlock:
1770 	lockmgr(&bdev->vm_lock, LK_RELEASE);
1771 	return ret;
1772 }
1773 
1774 int ttm_bo_wait(struct ttm_buffer_object *bo,
1775 		bool lazy, bool interruptible, bool no_wait)
1776 {
1777 	struct ttm_bo_driver *driver = bo->bdev->driver;
1778 	struct ttm_bo_device *bdev = bo->bdev;
1779 	void *sync_obj;
1780 	int ret = 0;
1781 
1782 	if (likely(bo->sync_obj == NULL))
1783 		return 0;
1784 
1785 	while (bo->sync_obj) {
1786 
1787 		if (driver->sync_obj_signaled(bo->sync_obj)) {
1788 			void *tmp_obj = bo->sync_obj;
1789 			bo->sync_obj = NULL;
1790 			clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1791 			lockmgr(&bdev->fence_lock, LK_RELEASE);
1792 			driver->sync_obj_unref(&tmp_obj);
1793 			lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
1794 			continue;
1795 		}
1796 
1797 		if (no_wait)
1798 			return -EBUSY;
1799 
1800 		sync_obj = driver->sync_obj_ref(bo->sync_obj);
1801 		lockmgr(&bdev->fence_lock, LK_RELEASE);
1802 		ret = driver->sync_obj_wait(sync_obj,
1803 					    lazy, interruptible);
1804 		if (unlikely(ret != 0)) {
1805 			driver->sync_obj_unref(&sync_obj);
1806 			lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
1807 			return ret;
1808 		}
1809 		lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
1810 		if (likely(bo->sync_obj == sync_obj)) {
1811 			void *tmp_obj = bo->sync_obj;
1812 			bo->sync_obj = NULL;
1813 			clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1814 				  &bo->priv_flags);
1815 			lockmgr(&bdev->fence_lock, LK_RELEASE);
1816 			driver->sync_obj_unref(&sync_obj);
1817 			driver->sync_obj_unref(&tmp_obj);
1818 			lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
1819 		} else {
1820 			lockmgr(&bdev->fence_lock, LK_RELEASE);
1821 			driver->sync_obj_unref(&sync_obj);
1822 			lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
1823 		}
1824 	}
1825 	return 0;
1826 }
1827 EXPORT_SYMBOL(ttm_bo_wait);
1828 
1829 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1830 {
1831 	struct ttm_bo_device *bdev = bo->bdev;
1832 	int ret = 0;
1833 
1834 	/*
1835 	 * Using ttm_bo_reserve makes sure the lru lists are updated.
1836 	 */
1837 
1838 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1839 	if (unlikely(ret != 0))
1840 		return ret;
1841 	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
1842 	ret = ttm_bo_wait(bo, false, true, no_wait);
1843 	lockmgr(&bdev->fence_lock, LK_RELEASE);
1844 	if (likely(ret == 0))
1845 		atomic_inc(&bo->cpu_writers);
1846 	ttm_bo_unreserve(bo);
1847 	return ret;
1848 }
1849 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1850 
1851 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1852 {
1853 	atomic_dec(&bo->cpu_writers);
1854 }
1855 EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1856 
1857 /**
1858  * A buffer object shrink method that tries to swap out the first
1859  * buffer object on the bo_global::swap_lru list.
1860  */
1861 
1862 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1863 {
1864 	struct ttm_bo_global *glob =
1865 	    container_of(shrink, struct ttm_bo_global, shrink);
1866 	struct ttm_buffer_object *bo;
1867 	int ret = -EBUSY;
1868 	int put_count;
1869 	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1870 
1871 	lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
1872 	list_for_each_entry(bo, &glob->swap_lru, swap) {
1873 		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
1874 		if (!ret)
1875 			break;
1876 	}
1877 
1878 	if (ret) {
1879 		lockmgr(&glob->lru_lock, LK_RELEASE);
1880 		return ret;
1881 	}
1882 
1883 	kref_get(&bo->list_kref);
1884 
1885 	if (!list_empty(&bo->ddestroy)) {
1886 		ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
1887 		kref_put(&bo->list_kref, ttm_bo_release_list);
1888 		return ret;
1889 	}
1890 
1891 	put_count = ttm_bo_del_from_lru(bo);
1892 	lockmgr(&glob->lru_lock, LK_RELEASE);
1893 
1894 	ttm_bo_list_ref_sub(bo, put_count, true);
1895 
1896 	/**
1897 	 * Wait for GPU, then move to system cached.
1898 	 */
1899 
1900 	lockmgr(&bo->bdev->fence_lock, LK_EXCLUSIVE);
1901 	ret = ttm_bo_wait(bo, false, false, false);
1902 	lockmgr(&bo->bdev->fence_lock, LK_RELEASE);
1903 
1904 	if (unlikely(ret != 0))
1905 		goto out;
1906 
1907 	if ((bo->mem.placement & swap_placement) != swap_placement) {
1908 		struct ttm_mem_reg evict_mem;
1909 
1910 		evict_mem = bo->mem;
1911 		evict_mem.mm_node = NULL;
1912 		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1913 		evict_mem.mem_type = TTM_PL_SYSTEM;
1914 
1915 		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1916 					     false, false);
1917 		if (unlikely(ret != 0))
1918 			goto out;
1919 	}
1920 
1921 	ttm_bo_unmap_virtual(bo);
1922 
1923 	/**
1924 	 * Swap out. Buffer will be swapped in again as soon as
1925 	 * anyone tries to access a ttm page.
1926 	 */
1927 
1928 	if (bo->bdev->driver->swap_notify)
1929 		bo->bdev->driver->swap_notify(bo);
1930 
1931 	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1932 out:
1933 
1934 	/**
1935 	 *
1936 	 * Unreserve without putting on LRU to avoid swapping out an
1937 	 * already swapped buffer.
1938 	 */
1939 
1940 	ttm_bo_unreserve_core(bo);
1941 	kref_put(&bo->list_kref, ttm_bo_release_list);
1942 	return ret;
1943 }
1944 
1945 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1946 {
1947 	while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1948 		;
1949 }
1950 EXPORT_SYMBOL(ttm_bo_swapout_all);
1951