xref: /dragonfly/sys/dev/drm/ttm/ttm_bo_vm.c (revision 47ec0953)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #define pr_fmt(fmt) "[TTM] " fmt
32 
33 #include <ttm/ttm_module.h>
34 #include <ttm/ttm_bo_driver.h>
35 #include <ttm/ttm_bo_api.h>
36 #include <ttm/ttm_placement.h>
37 #include <drm/drm_vma_manager.h>
38 #include <linux/mm.h>
39 #include <linux/rbtree.h>
40 #include <linux/module.h>
41 #include <linux/uaccess.h>
42 
43 #include <sys/sysctl.h>
44 #include <vm/vm.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_page2.h>
47 
48 #define TTM_BO_VM_NUM_PREFAULT 16
49 
50 /*
51  * Always unstall on unexpected vm_page alias, fatal bus fault.
52  * Set to 0 to stall, set to positive count to unstall N times,
53  * then stall again.
54  */
55 static int drm_unstall = -1;
56 SYSCTL_INT(_debug, OID_AUTO, unstall, CTLFLAG_RW, &drm_unstall, 0, "");
57 
58 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
59 {
60 	/* see ttm_bo_mmap_single() at end of this file */
61 	/* ttm_bo_vm_ops not currently used, no entry should occur */
62 	panic("ttm_bo_vm_fault");
63 #if 0
64 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
65 	    vma->vm_private_data;
66 	struct ttm_bo_device *bdev = bo->bdev;
67 	unsigned long page_offset;
68 	unsigned long page_last;
69 	unsigned long pfn;
70 	struct ttm_tt *ttm = NULL;
71 	struct page *page;
72 	int ret;
73 	int i;
74 	unsigned long address = (unsigned long)vmf->virtual_address;
75 	int retval = VM_FAULT_NOPAGE;
76 	struct ttm_mem_type_manager *man =
77 		&bdev->man[bo->mem.mem_type];
78 
79 	/*
80 	 * Work around locking order reversal in fault / nopfn
81 	 * between mmap_sem and bo_reserve: Perform a trylock operation
82 	 * for reserve, and if it fails, retry the fault after scheduling.
83 	 */
84 
85 	ret = ttm_bo_reserve(bo, true, true, false, 0);
86 	if (unlikely(ret != 0)) {
87 		if (ret == -EBUSY)
88 			set_need_resched();
89 		return VM_FAULT_NOPAGE;
90 	}
91 
92 	if (bdev->driver->fault_reserve_notify) {
93 		ret = bdev->driver->fault_reserve_notify(bo);
94 		switch (ret) {
95 		case 0:
96 			break;
97 		case -EBUSY:
98 			set_need_resched();
99 		case -ERESTARTSYS:
100 			retval = VM_FAULT_NOPAGE;
101 			goto out_unlock;
102 		default:
103 			retval = VM_FAULT_SIGBUS;
104 			goto out_unlock;
105 		}
106 	}
107 
108 	/*
109 	 * Wait for buffer data in transit, due to a pipelined
110 	 * move.
111 	 */
112 
113 	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
114 	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
115 		ret = ttm_bo_wait(bo, false, true, false);
116 		lockmgr(&bdev->fence_lock, LK_RELEASE);
117 		if (unlikely(ret != 0)) {
118 			retval = (ret != -ERESTARTSYS) ?
119 			    VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
120 			goto out_unlock;
121 		}
122 	} else
123 		lockmgr(&bdev->fence_lock, LK_RELEASE);
124 
125 	ret = ttm_mem_io_lock(man, true);
126 	if (unlikely(ret != 0)) {
127 		retval = VM_FAULT_NOPAGE;
128 		goto out_unlock;
129 	}
130 	ret = ttm_mem_io_reserve_vm(bo);
131 	if (unlikely(ret != 0)) {
132 		retval = VM_FAULT_SIGBUS;
133 		goto out_io_unlock;
134 	}
135 
136 	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
137 	    drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
138 	page_last = vma_pages(vma) +
139 	    drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
140 
141 	if (unlikely(page_offset >= bo->num_pages)) {
142 		retval = VM_FAULT_SIGBUS;
143 		goto out_io_unlock;
144 	}
145 
146 	/*
147 	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
148 	 * since the mmap_sem is only held in read mode. However, we
149 	 * modify only the caching bits of vma->vm_page_prot and
150 	 * consider those bits protected by
151 	 * the bo->mutex, as we should be the only writers.
152 	 * There shouldn't really be any readers of these bits except
153 	 * within vm_insert_mixed()? fork?
154 	 *
155 	 * TODO: Add a list of vmas to the bo, and change the
156 	 * vma->vm_page_prot when the object changes caching policy, with
157 	 * the correct locks held.
158 	 */
159 	if (bo->mem.bus.is_iomem) {
160 		vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
161 						vma->vm_page_prot);
162 	} else {
163 		ttm = bo->ttm;
164 		vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
165 		    vm_get_page_prot(vma->vm_flags) :
166 		    ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
167 
168 		/* Allocate all page at once, most common usage */
169 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
170 			retval = VM_FAULT_OOM;
171 			goto out_io_unlock;
172 		}
173 	}
174 
175 	/*
176 	 * Speculatively prefault a number of pages. Only error on
177 	 * first page.
178 	 */
179 	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
180 		if (bo->mem.bus.is_iomem)
181 			pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
182 		else {
183 			page = ttm->pages[page_offset];
184 			if (unlikely(!page && i == 0)) {
185 				retval = VM_FAULT_OOM;
186 				goto out_io_unlock;
187 			} else if (unlikely(!page)) {
188 				break;
189 			}
190 			pfn = page_to_pfn(page);
191 		}
192 
193 		ret = vm_insert_mixed(vma, address, pfn);
194 		/*
195 		 * Somebody beat us to this PTE or prefaulting to
196 		 * an already populated PTE, or prefaulting error.
197 		 */
198 
199 		if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
200 			break;
201 		else if (unlikely(ret != 0)) {
202 			retval =
203 			    (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
204 			goto out_io_unlock;
205 		}
206 
207 		address += PAGE_SIZE;
208 		if (unlikely(++page_offset >= page_last))
209 			break;
210 	}
211 out_io_unlock:
212 	ttm_mem_io_unlock(man);
213 out_unlock:
214 	ttm_bo_unreserve(bo);
215 	return retval;
216 #endif
217 }
218 
219 /* ttm_bo_vm_ops not currently used, no entry should occur */
220 static void ttm_bo_vm_open(struct vm_area_struct *vma)
221 {
222 	struct ttm_buffer_object *bo =
223 	    (struct ttm_buffer_object *)vma->vm_private_data;
224 
225 	(void)ttm_bo_reference(bo);
226 }
227 
228 /* ttm_bo_vm_ops not currently used, no entry should occur */
229 static void ttm_bo_vm_close(struct vm_area_struct *vma)
230 {
231 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
232 
233 	ttm_bo_unref(&bo);
234 	vma->vm_private_data = NULL;
235 }
236 
237 static const struct vm_operations_struct ttm_bo_vm_ops = {
238 	.fault = ttm_bo_vm_fault,
239 	.open = ttm_bo_vm_open,
240 	.close = ttm_bo_vm_close
241 };
242 
243 static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
244 						  unsigned long offset,
245 						  unsigned long pages)
246 {
247 	struct drm_vma_offset_node *node;
248 	struct ttm_buffer_object *bo = NULL;
249 
250 	drm_vma_offset_lock_lookup(&bdev->vma_manager);
251 
252 	node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
253 	if (likely(node)) {
254 		bo = container_of(node, struct ttm_buffer_object, vma_node);
255 		if (!kref_get_unless_zero(&bo->kref))
256 			bo = NULL;
257 	}
258 
259 	drm_vma_offset_unlock_lookup(&bdev->vma_manager);
260 
261 	if (!bo)
262 		pr_err("Could not find buffer object to map\n");
263 
264 	return bo;
265 }
266 
267 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
268 		struct ttm_bo_device *bdev)
269 {
270 	struct ttm_bo_driver *driver;
271 	struct ttm_buffer_object *bo;
272 	int ret;
273 
274 	bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
275 	if (unlikely(!bo))
276 		return -EINVAL;
277 
278 	driver = bo->bdev->driver;
279 	if (unlikely(!driver->verify_access)) {
280 		ret = -EPERM;
281 		goto out_unref;
282 	}
283 	ret = driver->verify_access(bo, filp);
284 	if (unlikely(ret != 0))
285 		goto out_unref;
286 
287 	vma->vm_ops = &ttm_bo_vm_ops;
288 
289 	/*
290 	 * Note: We're transferring the bo reference to
291 	 * vma->vm_private_data here.
292 	 */
293 
294 	vma->vm_private_data = bo;
295 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
296 	return 0;
297 out_unref:
298 	ttm_bo_unref(&bo);
299 	return ret;
300 }
301 EXPORT_SYMBOL(ttm_bo_mmap);
302 
303 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
304 {
305 	if (vma->vm_pgoff != 0)
306 		return -EACCES;
307 
308 	vma->vm_ops = &ttm_bo_vm_ops;
309 	vma->vm_private_data = ttm_bo_reference(bo);
310 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
311 	return 0;
312 }
313 EXPORT_SYMBOL(ttm_fbdev_mmap);
314 
315 
316 ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
317 		  const char __user *wbuf, char __user *rbuf, size_t count,
318 		  loff_t *f_pos, bool write)
319 {
320 	struct ttm_buffer_object *bo;
321 	struct ttm_bo_driver *driver;
322 	struct ttm_bo_kmap_obj map;
323 	unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
324 	unsigned long kmap_offset;
325 	unsigned long kmap_end;
326 	unsigned long kmap_num;
327 	size_t io_size;
328 	unsigned int page_offset;
329 	char *virtual;
330 	int ret;
331 	bool no_wait = false;
332 	bool dummy;
333 
334 	bo = ttm_bo_vm_lookup(bdev, dev_offset, 1);
335 	if (unlikely(bo == NULL))
336 		return -EFAULT;
337 
338 	driver = bo->bdev->driver;
339 	if (unlikely(!driver->verify_access)) {
340 		ret = -EPERM;
341 		goto out_unref;
342 	}
343 
344 	ret = driver->verify_access(bo, filp);
345 	if (unlikely(ret != 0))
346 		goto out_unref;
347 
348 	kmap_offset = dev_offset - drm_vma_node_start(&bo->vma_node);
349 	if (unlikely(kmap_offset >= bo->num_pages)) {
350 		ret = -EFBIG;
351 		goto out_unref;
352 	}
353 
354 	page_offset = *f_pos & ~PAGE_MASK;
355 	io_size = bo->num_pages - kmap_offset;
356 	io_size = (io_size << PAGE_SHIFT) - page_offset;
357 	if (count < io_size)
358 		io_size = count;
359 
360 	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
361 	kmap_num = kmap_end - kmap_offset + 1;
362 
363 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
364 
365 	switch (ret) {
366 	case 0:
367 		break;
368 	case -EBUSY:
369 		ret = -EAGAIN;
370 		goto out_unref;
371 	default:
372 		goto out_unref;
373 	}
374 
375 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
376 	if (unlikely(ret != 0)) {
377 		ttm_bo_unreserve(bo);
378 		goto out_unref;
379 	}
380 
381 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
382 	virtual += page_offset;
383 
384 	if (write)
385 		ret = copy_from_user(virtual, wbuf, io_size);
386 	else
387 		ret = copy_to_user(rbuf, virtual, io_size);
388 
389 	ttm_bo_kunmap(&map);
390 	ttm_bo_unreserve(bo);
391 	ttm_bo_unref(&bo);
392 
393 	if (unlikely(ret != 0))
394 		return -EFBIG;
395 
396 	*f_pos += io_size;
397 
398 	return io_size;
399 out_unref:
400 	ttm_bo_unref(&bo);
401 	return ret;
402 }
403 
404 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
405 			char __user *rbuf, size_t count, loff_t *f_pos,
406 			bool write)
407 {
408 	struct ttm_bo_kmap_obj map;
409 	unsigned long kmap_offset;
410 	unsigned long kmap_end;
411 	unsigned long kmap_num;
412 	size_t io_size;
413 	unsigned int page_offset;
414 	char *virtual;
415 	int ret;
416 	bool no_wait = false;
417 	bool dummy;
418 
419 	kmap_offset = (*f_pos >> PAGE_SHIFT);
420 	if (unlikely(kmap_offset >= bo->num_pages))
421 		return -EFBIG;
422 
423 	page_offset = *f_pos & ~PAGE_MASK;
424 	io_size = bo->num_pages - kmap_offset;
425 	io_size = (io_size << PAGE_SHIFT) - page_offset;
426 	if (count < io_size)
427 		io_size = count;
428 
429 	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
430 	kmap_num = kmap_end - kmap_offset + 1;
431 
432 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
433 
434 	switch (ret) {
435 	case 0:
436 		break;
437 	case -EBUSY:
438 		return -EAGAIN;
439 	default:
440 		return ret;
441 	}
442 
443 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
444 	if (unlikely(ret != 0)) {
445 		ttm_bo_unreserve(bo);
446 		return ret;
447 	}
448 
449 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
450 	virtual += page_offset;
451 
452 	if (write)
453 		ret = copy_from_user(virtual, wbuf, io_size);
454 	else
455 		ret = copy_to_user(rbuf, virtual, io_size);
456 
457 	ttm_bo_kunmap(&map);
458 	ttm_bo_unreserve(bo);
459 	ttm_bo_unref(&bo);
460 
461 	if (unlikely(ret != 0))
462 		return ret;
463 
464 	*f_pos += io_size;
465 
466 	return io_size;
467 }
468 
469 /*
470  * DragonFlyBSD Interface
471  */
472 
473 #include "opt_vm.h"
474 
475 #include <vm/vm.h>
476 #include <vm/vm_page.h>
477 #include <linux/errno.h>
478 #include <linux/export.h>
479 
480 #include <vm/vm_page2.h>
481 
482 static int
483 ttm_bo_vm_fault_dfly(vm_object_t vm_obj, vm_ooffset_t offset,
484 		     int prot, vm_page_t *mres)
485 {
486 	struct ttm_buffer_object *bo = vm_obj->handle;
487 	struct ttm_bo_device *bdev = bo->bdev;
488 	struct ttm_tt *ttm = NULL;
489 	vm_page_t m, oldm;
490 	int ret;
491 	int retval = VM_PAGER_OK;
492 	struct ttm_mem_type_manager *man;
493 
494 	man = &bdev->man[bo->mem.mem_type];
495 
496 	/*kprintf("FAULT %p %p/%ld\n", vm_obj, bo, offset);*/
497 
498 	vm_object_pip_add(vm_obj, 1);
499 	oldm = *mres;
500 	*mres = NULL;
501 
502 retry:
503 	VM_OBJECT_UNLOCK(vm_obj);
504 	m = NULL;
505 
506 	/*
507 	 * NOTE: set nowait to false, we don't have ttm_bo_wait_unreserved()
508 	 * 	 for the -BUSY case yet.
509 	 */
510 	ret = ttm_bo_reserve(bo, true, false, false, 0);
511 	if (unlikely(ret != 0)) {
512 		retval = VM_PAGER_ERROR;
513 		VM_OBJECT_LOCK(vm_obj);
514 		goto out_unlock2;
515 	}
516 
517 	if (bdev->driver->fault_reserve_notify) {
518 		ret = bdev->driver->fault_reserve_notify(bo);
519 		switch (ret) {
520 		case 0:
521 			break;
522 		case -EBUSY:
523 			lwkt_yield();
524 			/* fall through */
525 		case -ERESTARTSYS:
526 		case -EINTR:
527 			retval = VM_PAGER_ERROR;
528 			goto out_unlock;
529 		default:
530 			retval = VM_PAGER_ERROR;
531 			goto out_unlock;
532 		}
533 	}
534 
535 	/*
536 	 * Wait for buffer data in transit, due to a pipelined
537 	 * move.
538 	 */
539 	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
540 	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
541 		/*
542 		 * Here, the behavior differs between Linux and FreeBSD.
543 		 *
544 		 * On Linux, the wait is interruptible (3rd argument to
545 		 * ttm_bo_wait). There must be some mechanism to resume
546 		 * page fault handling, once the signal is processed.
547 		 *
548 		 * On FreeBSD, the wait is uninteruptible. This is not a
549 		 * problem as we can't end up with an unkillable process
550 		 * here, because the wait will eventually time out.
551 		 *
552 		 * An example of this situation is the Xorg process
553 		 * which uses SIGALRM internally. The signal could
554 		 * interrupt the wait, causing the page fault to fail
555 		 * and the process to receive SIGSEGV.
556 		 */
557 		ret = ttm_bo_wait(bo, false, false, false);
558 		lockmgr(&bdev->fence_lock, LK_RELEASE);
559 		if (unlikely(ret != 0)) {
560 			retval = VM_PAGER_ERROR;
561 			goto out_unlock;
562 		}
563 	} else {
564 		lockmgr(&bdev->fence_lock, LK_RELEASE);
565 	}
566 
567 	ret = ttm_mem_io_lock(man, true);
568 	if (unlikely(ret != 0)) {
569 		retval = VM_PAGER_ERROR;
570 		goto out_unlock;
571 	}
572 	ret = ttm_mem_io_reserve_vm(bo);
573 	if (unlikely(ret != 0)) {
574 		retval = VM_PAGER_ERROR;
575 		goto out_io_unlock;
576 	}
577 	if (unlikely(OFF_TO_IDX(offset) >= bo->num_pages)) {
578 		retval = VM_PAGER_ERROR;
579 		goto out_io_unlock;
580 	}
581 
582 	/*
583 	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
584 	 * since the mmap_sem is only held in read mode. However, we
585 	 * modify only the caching bits of vma->vm_page_prot and
586 	 * consider those bits protected by
587 	 * the bo->mutex, as we should be the only writers.
588 	 * There shouldn't really be any readers of these bits except
589 	 * within vm_insert_mixed()? fork?
590 	 *
591 	 * TODO: Add a list of vmas to the bo, and change the
592 	 * vma->vm_page_prot when the object changes caching policy, with
593 	 * the correct locks held.
594 	 */
595 
596 	if (bo->mem.bus.is_iomem) {
597 		m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
598 						  bo->mem.bus.offset + offset);
599 		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement, 0));
600 	} else {
601 		/* Allocate all page at once, most common usage */
602 		ttm = bo->ttm;
603 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
604 			retval = VM_PAGER_ERROR;
605 			goto out_io_unlock;
606 		}
607 		ttm = bo->ttm;
608 
609 		m = (struct vm_page *)ttm->pages[OFF_TO_IDX(offset)];
610 		if (unlikely(!m)) {
611 			retval = VM_PAGER_ERROR;
612 			goto out_io_unlock;
613 		}
614 		pmap_page_set_memattr(m,
615 		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
616 		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement, 0));
617 	}
618 
619 	VM_OBJECT_LOCK(vm_obj);
620 
621 	if (vm_page_busy_try(m, FALSE)) {
622 		kprintf("r");
623 		vm_page_sleep_busy(m, FALSE, "ttmvmf");
624 		ttm_mem_io_unlock(man);
625 		ttm_bo_unreserve(bo);
626 		goto retry;
627 	}
628 
629 	/*
630 	 * We want our fake page in the VM object, not the page the OS
631 	 * allocatedd for us as a placeholder.
632 	 */
633 	m->valid = VM_PAGE_BITS_ALL;
634 	*mres = m;
635 	if (oldm != NULL) {
636 		vm_page_remove(oldm);
637 		if (m->object) {
638 			retval = VM_PAGER_ERROR;
639 			kprintf("ttm_bo_vm_fault_dfly: m(%p) already inserted "
640 				"in obj %p, attempt obj %p\n",
641 				m, m->object, vm_obj);
642 			while (drm_unstall == 0) {
643 				tsleep(&retval, 0, "DEBUG", hz/10);
644 			}
645 			if (drm_unstall > 0)
646 				--drm_unstall;
647 		} else {
648 			vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
649 		}
650 		vm_page_free(oldm);
651 		oldm = NULL;
652 	} else {
653 		vm_page_t mtmp;
654 
655 		kprintf("oldm NULL\n");
656 
657 		mtmp = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
658 		KASSERT(mtmp == NULL || mtmp == m,
659 		    ("inconsistent insert bo %p m %p mtmp %p offset %jx",
660 		    bo, m, mtmp, (uintmax_t)offset));
661 		if (mtmp == NULL)
662 			vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
663 	}
664 	/*vm_page_busy_try(m, FALSE);*/
665 
666 out_io_unlock1:
667 	ttm_mem_io_unlock(man);
668 out_unlock1:
669 	ttm_bo_unreserve(bo);
670 out_unlock2:
671 	if (oldm) {
672 		vm_page_remove(oldm);
673 		vm_page_free(oldm);
674 	}
675 	vm_object_pip_wakeup(vm_obj);
676 	return (retval);
677 
678 out_io_unlock:
679 	VM_OBJECT_LOCK(vm_obj);
680 	goto out_io_unlock1;
681 
682 out_unlock:
683 	VM_OBJECT_LOCK(vm_obj);
684 	goto out_unlock1;
685 }
686 
687 static int
688 ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
689 	       vm_ooffset_t foff, struct ucred *cred, u_short *color)
690 {
691 
692 	/*
693 	 * On Linux, a reference to the buffer object is acquired here.
694 	 * The reason is that this function is not called when the
695 	 * mmap() is initialized, but only when a process forks for
696 	 * instance. Therefore on Linux, the reference on the bo is
697 	 * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's
698 	 * then released in ttm_bo_vm_close().
699 	 *
700 	 * Here, this function is called during mmap() intialization.
701 	 * Thus, the reference acquired in ttm_bo_mmap_single() is
702 	 * sufficient.
703 	 */
704 	*color = 0;
705 	return (0);
706 }
707 
708 static void
709 ttm_bo_vm_dtor(void *handle)
710 {
711 	struct ttm_buffer_object *bo = handle;
712 
713 	ttm_bo_unref(&bo);
714 }
715 
716 static struct cdev_pager_ops ttm_pager_ops = {
717 	.cdev_pg_fault = ttm_bo_vm_fault_dfly,
718 	.cdev_pg_ctor = ttm_bo_vm_ctor,
719 	.cdev_pg_dtor = ttm_bo_vm_dtor
720 };
721 
722 /*
723  * Called from drm_drv.c
724  *
725  * *offset - object offset in bytes
726  * size	   - map size in bytes
727  *
728  * We setup a dummy vma (for now) and call ttm_bo_mmap().  Then we setup
729  * our own VM object and dfly ops.  Note that the ops supplied by
730  * ttm_bo_mmap() are not currently used.
731  */
732 int
733 ttm_bo_mmap_single(struct drm_device *dev, vm_ooffset_t *offset,
734 		   vm_size_t size, struct vm_object **obj_res, int nprot)
735 {
736 	struct ttm_bo_device *bdev = dev->drm_ttm_bdev;
737 	struct ttm_buffer_object *bo;
738 	struct vm_object *vm_obj;
739 	struct vm_area_struct vma;
740 	int ret;
741 
742 	*obj_res = NULL;
743 
744 	bzero(&vma, sizeof(vma));
745 	vma.vm_start = *offset;		/* bdev-relative offset */
746 	vma.vm_end = vma.vm_start + size;
747 	vma.vm_pgoff = vma.vm_start >> PAGE_SHIFT;
748 	/* vma.vm_page_prot */
749 	/* vma.vm_flags */
750 
751 	/*
752 	 * Call the linux-ported code to do the work, and on success just
753 	 * setup our own VM object and ignore what the linux code did other
754 	 * then supplying us the 'bo'.
755 	 */
756 	ret = ttm_bo_mmap(NULL, &vma, bdev);
757 
758 	if (ret == 0) {
759 		bo = vma.vm_private_data;
760 		vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE,
761 					     &ttm_pager_ops,
762 					     size, nprot, 0,
763 					     curthread->td_ucred);
764 		if (vm_obj) {
765 			*obj_res = vm_obj;
766 			*offset = 0;		/* object-relative offset */
767 		} else {
768 			ttm_bo_unref(&bo);
769 			ret = EINVAL;
770 		}
771 	}
772 	return ret;
773 }
774 EXPORT_SYMBOL(ttm_bo_mmap_single);
775 
776 void
777 ttm_bo_release_mmap(struct ttm_buffer_object *bo)
778 {
779 	vm_object_t vm_obj;
780 	vm_page_t m;
781 	int i;
782 
783 	vm_obj = cdev_pager_lookup(bo);
784 	if (vm_obj == NULL)
785 		return;
786 
787 	VM_OBJECT_LOCK(vm_obj);
788 	for (i = 0; i < bo->num_pages; i++) {
789 		m = vm_page_lookup_busy_wait(vm_obj, i, TRUE, "ttm_unm");
790 		if (m == NULL)
791 			continue;
792 		cdev_pager_free_page(vm_obj, m);
793 	}
794 	VM_OBJECT_UNLOCK(vm_obj);
795 
796 	vm_object_deallocate(vm_obj);
797 }
798 
799 #if 0
800 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
801 {
802 	if (vma->vm_pgoff != 0)
803 		return -EACCES;
804 
805 	vma->vm_ops = &ttm_bo_vm_ops;
806 	vma->vm_private_data = ttm_bo_reference(bo);
807 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
808 	return 0;
809 }
810 EXPORT_SYMBOL(ttm_fbdev_mmap);
811 #endif
812