xref: /dragonfly/sys/dev/drm/ttm/ttm_bo_vm.c (revision b608d1d3)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #define pr_fmt(fmt) "[TTM] " fmt
32 
33 #include <ttm/ttm_module.h>
34 #include <ttm/ttm_bo_driver.h>
35 #include <ttm/ttm_bo_api.h>
36 #include <ttm/ttm_placement.h>
37 #include <drm/drm_vma_manager.h>
38 #include <linux/mm.h>
39 #include <linux/rbtree.h>
40 #include <linux/module.h>
41 #include <linux/uaccess.h>
42 
43 #include <sys/sysctl.h>
44 #include <vm/vm.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_page2.h>
47 
48 #define TTM_BO_VM_NUM_PREFAULT 16
49 
50 #if 0
51 static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
52 				struct vm_area_struct *vma,
53 				struct vm_fault *vmf)
54 {
55 	int ret = 0;
56 
57 	if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
58 		goto out_unlock;
59 
60 	/*
61 	 * Quick non-stalling check for idle.
62 	 */
63 	ret = ttm_bo_wait(bo, false, false, true);
64 	if (likely(ret == 0))
65 		goto out_unlock;
66 
67 	/*
68 	 * If possible, avoid waiting for GPU with mmap_sem
69 	 * held.
70 	 */
71 	if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
72 		ret = VM_FAULT_RETRY;
73 		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
74 			goto out_unlock;
75 
76 		up_read(&vma->vm_mm->mmap_sem);
77 		(void) ttm_bo_wait(bo, false, true, false);
78 		goto out_unlock;
79 	}
80 
81 	/*
82 	 * Ordinary wait.
83 	 */
84 	ret = ttm_bo_wait(bo, false, true, false);
85 	if (unlikely(ret != 0))
86 		ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
87 			VM_FAULT_NOPAGE;
88 
89 out_unlock:
90 	return ret;
91 }
92 #endif
93 
94 /*
95  * Always unstall on unexpected vm_page alias, fatal bus fault.
96  * Set to 0 to stall, set to positive count to unstall N times,
97  * then stall again.
98  */
99 static int drm_unstall = -1;
100 SYSCTL_INT(_debug, OID_AUTO, unstall, CTLFLAG_RW, &drm_unstall, 0, "");
101 
102 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
103 {
104 	/* see ttm_bo_mmap_single() at end of this file */
105 	/* ttm_bo_vm_ops not currently used, no entry should occur */
106 	panic("ttm_bo_vm_fault");
107 #if 0
108 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
109 	    vma->vm_private_data;
110 	struct ttm_bo_device *bdev = bo->bdev;
111 	unsigned long page_offset;
112 	unsigned long page_last;
113 	unsigned long pfn;
114 	struct ttm_tt *ttm = NULL;
115 	struct page *page;
116 	int ret;
117 	int i;
118 	unsigned long address = (unsigned long)vmf->virtual_address;
119 	int retval = VM_FAULT_NOPAGE;
120 	struct ttm_mem_type_manager *man =
121 		&bdev->man[bo->mem.mem_type];
122 	struct vm_area_struct cvma;
123 
124 	/*
125 	 * Work around locking order reversal in fault / nopfn
126 	 * between mmap_sem and bo_reserve: Perform a trylock operation
127 	 * for reserve, and if it fails, retry the fault after waiting
128 	 * for the buffer to become unreserved.
129 	 */
130 	ret = ttm_bo_reserve(bo, true, true, false, NULL);
131 	if (unlikely(ret != 0)) {
132 		if (ret != -EBUSY)
133 			return VM_FAULT_NOPAGE;
134 
135 		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
136 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
137 				up_read(&vma->vm_mm->mmap_sem);
138 				(void) ttm_bo_wait_unreserved(bo);
139 			}
140 
141 			return VM_FAULT_RETRY;
142 		}
143 
144 		/*
145 		 * If we'd want to change locking order to
146 		 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
147 		 * instead of retrying the fault...
148 		 */
149 		return VM_FAULT_NOPAGE;
150 	}
151 
152 	/*
153 	 * Refuse to fault imported pages. This should be handled
154 	 * (if at all) by redirecting mmap to the exporter.
155 	 */
156 	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
157 		retval = VM_FAULT_SIGBUS;
158 		goto out_unlock;
159 	}
160 
161 	if (bdev->driver->fault_reserve_notify) {
162 		ret = bdev->driver->fault_reserve_notify(bo);
163 		switch (ret) {
164 		case 0:
165 			break;
166 		case -EBUSY:
167 		case -ERESTARTSYS:
168 			retval = VM_FAULT_NOPAGE;
169 			goto out_unlock;
170 		default:
171 			retval = VM_FAULT_SIGBUS;
172 			goto out_unlock;
173 		}
174 	}
175 
176 	/*
177 	 * Wait for buffer data in transit, due to a pipelined
178 	 * move.
179 	 */
180 
181 	ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
182 	if (unlikely(ret != 0)) {
183 		retval = ret;
184 		goto out_unlock;
185 	}
186 
187 	ret = ttm_mem_io_lock(man, true);
188 	if (unlikely(ret != 0)) {
189 		retval = VM_FAULT_NOPAGE;
190 		goto out_unlock;
191 	}
192 	ret = ttm_mem_io_reserve_vm(bo);
193 	if (unlikely(ret != 0)) {
194 		retval = VM_FAULT_SIGBUS;
195 		goto out_io_unlock;
196 	}
197 
198 	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
199 		vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
200 	page_last = vma_pages(vma) + vma->vm_pgoff -
201 		drm_vma_node_start(&bo->vma_node);
202 
203 	if (unlikely(page_offset >= bo->num_pages)) {
204 		retval = VM_FAULT_SIGBUS;
205 		goto out_io_unlock;
206 	}
207 
208 	/*
209 	 * Make a local vma copy to modify the page_prot member
210 	 * and vm_flags if necessary. The vma parameter is protected
211 	 * by mmap_sem in write mode.
212 	 */
213 	cvma = *vma;
214 	cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
215 
216 	if (bo->mem.bus.is_iomem) {
217 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
218 						cvma.vm_page_prot);
219 	} else {
220 		ttm = bo->ttm;
221 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
222 						cvma.vm_page_prot);
223 
224 		/* Allocate all page at once, most common usage */
225 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
226 			retval = VM_FAULT_OOM;
227 			goto out_io_unlock;
228 		}
229 	}
230 
231 	/*
232 	 * Speculatively prefault a number of pages. Only error on
233 	 * first page.
234 	 */
235 	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
236 		if (bo->mem.bus.is_iomem)
237 			pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
238 		else {
239 			page = ttm->pages[page_offset];
240 			if (unlikely(!page && i == 0)) {
241 				retval = VM_FAULT_OOM;
242 				goto out_io_unlock;
243 			} else if (unlikely(!page)) {
244 				break;
245 			}
246 			page->mapping = vma->vm_file->f_mapping;
247 			page->index = drm_vma_node_start(&bo->vma_node) +
248 				page_offset;
249 			pfn = page_to_pfn(page);
250 		}
251 
252 		if (vma->vm_flags & VM_MIXEDMAP)
253 			ret = vm_insert_mixed(&cvma, address, pfn);
254 		else
255 			ret = vm_insert_pfn(&cvma, address, pfn);
256 
257 		/*
258 		 * Somebody beat us to this PTE or prefaulting to
259 		 * an already populated PTE, or prefaulting error.
260 		 */
261 
262 		if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
263 			break;
264 		else if (unlikely(ret != 0)) {
265 			retval =
266 			    (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
267 			goto out_io_unlock;
268 		}
269 
270 		address += PAGE_SIZE;
271 		if (unlikely(++page_offset >= page_last))
272 			break;
273 	}
274 out_io_unlock:
275 	ttm_mem_io_unlock(man);
276 out_unlock:
277 	ttm_bo_unreserve(bo);
278 	return retval;
279 #endif
280 }
281 
282 /* ttm_bo_vm_ops not currently used, no entry should occur */
283 static void ttm_bo_vm_open(struct vm_area_struct *vma)
284 {
285 	struct ttm_buffer_object *bo =
286 	    (struct ttm_buffer_object *)vma->vm_private_data;
287 
288 #if 0
289 	WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
290 #endif
291 
292 	(void)ttm_bo_reference(bo);
293 }
294 
295 /* ttm_bo_vm_ops not currently used, no entry should occur */
296 static void ttm_bo_vm_close(struct vm_area_struct *vma)
297 {
298 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
299 
300 	ttm_bo_unref(&bo);
301 	vma->vm_private_data = NULL;
302 }
303 
304 static const struct vm_operations_struct ttm_bo_vm_ops = {
305 	.fault = ttm_bo_vm_fault,
306 	.open = ttm_bo_vm_open,
307 	.close = ttm_bo_vm_close
308 };
309 
310 static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
311 						  unsigned long offset,
312 						  unsigned long pages)
313 {
314 	struct drm_vma_offset_node *node;
315 	struct ttm_buffer_object *bo = NULL;
316 
317 	drm_vma_offset_lock_lookup(&bdev->vma_manager);
318 
319 	node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
320 	if (likely(node)) {
321 		bo = container_of(node, struct ttm_buffer_object, vma_node);
322 		if (!kref_get_unless_zero(&bo->kref))
323 			bo = NULL;
324 	}
325 
326 	drm_vma_offset_unlock_lookup(&bdev->vma_manager);
327 
328 	if (!bo)
329 		pr_err("Could not find buffer object to map\n");
330 
331 	return bo;
332 }
333 
334 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
335 		struct ttm_bo_device *bdev)
336 {
337 	struct ttm_bo_driver *driver;
338 	struct ttm_buffer_object *bo;
339 	int ret;
340 
341 	bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
342 	if (unlikely(!bo))
343 		return -EINVAL;
344 
345 	driver = bo->bdev->driver;
346 	if (unlikely(!driver->verify_access)) {
347 		ret = -EPERM;
348 		goto out_unref;
349 	}
350 	ret = driver->verify_access(bo, filp);
351 	if (unlikely(ret != 0))
352 		goto out_unref;
353 
354 	vma->vm_ops = &ttm_bo_vm_ops;
355 
356 	/*
357 	 * Note: We're transferring the bo reference to
358 	 * vma->vm_private_data here.
359 	 */
360 
361 	vma->vm_private_data = bo;
362 
363 	/*
364 	 * We'd like to use VM_PFNMAP on shared mappings, where
365 	 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
366 	 * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
367 	 * bad for performance. Until that has been sorted out, use
368 	 * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
369 	 */
370 	vma->vm_flags |= VM_MIXEDMAP;
371 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
372 	return 0;
373 out_unref:
374 	ttm_bo_unref(&bo);
375 	return ret;
376 }
377 EXPORT_SYMBOL(ttm_bo_mmap);
378 
379 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
380 {
381 	if (vma->vm_pgoff != 0)
382 		return -EACCES;
383 
384 	vma->vm_ops = &ttm_bo_vm_ops;
385 	vma->vm_private_data = ttm_bo_reference(bo);
386 	vma->vm_flags |= VM_MIXEDMAP;
387 	vma->vm_flags |= VM_IO | VM_DONTEXPAND;
388 	return 0;
389 }
390 EXPORT_SYMBOL(ttm_fbdev_mmap);
391 
392 /*
393  * DragonFlyBSD Interface
394  */
395 
396 #include "opt_vm.h"
397 
398 #include <vm/vm.h>
399 #include <vm/vm_page.h>
400 #include <linux/errno.h>
401 #include <linux/export.h>
402 
403 #include <vm/vm_page2.h>
404 
405 /*
406  * NOTE: This code is fragile.  This code can only be entered with *mres
407  *	 not NULL when *mres is a placeholder page allocated by the kernel.
408  */
409 static int
410 ttm_bo_vm_fault_dfly(vm_object_t vm_obj, vm_ooffset_t offset,
411 		     int prot, vm_page_t *mres)
412 {
413 	struct ttm_buffer_object *bo = vm_obj->handle;
414 	struct ttm_bo_device *bdev = bo->bdev;
415 	struct ttm_tt *ttm = NULL;
416 	vm_page_t m, mtmp;
417 	int ret;
418 	int retval = VM_PAGER_OK;
419 	struct ttm_mem_type_manager *man;
420 
421 	man = &bdev->man[bo->mem.mem_type];
422 	vm_object_pip_add(vm_obj, 1);
423 
424 	/*
425 	 * We must atomically clean up any possible placeholder page to avoid
426 	 * the DRM subsystem attempting to use it.  We can determine if this
427 	 * is a place holder page by checking m->valid.
428 	 *
429 	 * We have to do this before any potential fault_reserve_notify()
430 	 * which might try to free the map (and thus deadlock on our busy
431 	 * page).
432 	 */
433 	m = *mres;
434 	*mres = NULL;
435 	if (m) {
436 		if (m->valid == VM_PAGE_BITS_ALL) {
437 			/* actual page */
438 			vm_page_wakeup(m);
439 		} else {
440 			/* placeholder page */
441 			KKASSERT((m->flags & PG_FICTITIOUS) == 0);
442 			vm_page_remove(m);
443 			vm_page_free(m);
444 		}
445 	}
446 
447 retry:
448 	VM_OBJECT_UNLOCK(vm_obj);
449 	m = NULL;
450 
451 	/*
452 	 * NOTE: set nowait to false, we don't have ttm_bo_wait_unreserved()
453 	 * 	 for the -BUSY case yet.
454 	 */
455 	ret = ttm_bo_reserve(bo, true, false, false, 0);
456 	if (unlikely(ret != 0)) {
457 		retval = VM_PAGER_ERROR;
458 		VM_OBJECT_LOCK(vm_obj);
459 		goto out_unlock2;
460 	}
461 
462 	if (bdev->driver->fault_reserve_notify) {
463 		ret = bdev->driver->fault_reserve_notify(bo);
464 		switch (ret) {
465 		case 0:
466 			break;
467 		case -EBUSY:
468 			lwkt_yield();
469 			/* fall through */
470 		case -ERESTARTSYS:
471 		case -EINTR:
472 			retval = VM_PAGER_ERROR;
473 			goto out_unlock;
474 		default:
475 			retval = VM_PAGER_ERROR;
476 			goto out_unlock;
477 		}
478 	}
479 
480 	/*
481 	 * Wait for buffer data in transit, due to a pipelined
482 	 * move.
483 	 */
484 	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
485 		/*
486 		 * Here, the behavior differs between Linux and FreeBSD.
487 		 *
488 		 * On Linux, the wait is interruptible (3rd argument to
489 		 * ttm_bo_wait). There must be some mechanism to resume
490 		 * page fault handling, once the signal is processed.
491 		 *
492 		 * On FreeBSD, the wait is uninteruptible. This is not a
493 		 * problem as we can't end up with an unkillable process
494 		 * here, because the wait will eventually time out.
495 		 *
496 		 * An example of this situation is the Xorg process
497 		 * which uses SIGALRM internally. The signal could
498 		 * interrupt the wait, causing the page fault to fail
499 		 * and the process to receive SIGSEGV.
500 		 */
501 		ret = ttm_bo_wait(bo, false, false, false);
502 		if (unlikely(ret != 0)) {
503 			retval = VM_PAGER_ERROR;
504 			goto out_unlock;
505 		}
506 	}
507 
508 	ret = ttm_mem_io_lock(man, true);
509 	if (unlikely(ret != 0)) {
510 		retval = VM_PAGER_ERROR;
511 		goto out_unlock;
512 	}
513 	ret = ttm_mem_io_reserve_vm(bo);
514 	if (unlikely(ret != 0)) {
515 		retval = VM_PAGER_ERROR;
516 		goto out_io_unlock;
517 	}
518 	if (unlikely(OFF_TO_IDX(offset) >= bo->num_pages)) {
519 		retval = VM_PAGER_ERROR;
520 		goto out_io_unlock;
521 	}
522 
523 	/*
524 	 * Lookup the real page.
525 	 *
526 	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
527 	 * since the mmap_sem is only held in read mode. However, we
528 	 * modify only the caching bits of vma->vm_page_prot and
529 	 * consider those bits protected by
530 	 * the bo->mutex, as we should be the only writers.
531 	 * There shouldn't really be any readers of these bits except
532 	 * within vm_insert_mixed()? fork?
533 	 *
534 	 * TODO: Add a list of vmas to the bo, and change the
535 	 * vma->vm_page_prot when the object changes caching policy, with
536 	 * the correct locks held.
537 	 */
538 	if (bo->mem.bus.is_iomem) {
539 		m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
540 						  bo->mem.bus.offset + offset);
541 		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement, 0));
542 	} else {
543 		/* Allocate all page at once, most common usage */
544 		ttm = bo->ttm;
545 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
546 			retval = VM_PAGER_ERROR;
547 			goto out_io_unlock;
548 		}
549 		ttm = bo->ttm;
550 
551 		m = (struct vm_page *)ttm->pages[OFF_TO_IDX(offset)];
552 		if (unlikely(!m)) {
553 			retval = VM_PAGER_ERROR;
554 			goto out_io_unlock;
555 		}
556 		pmap_page_set_memattr(m,
557 		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
558 		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement, 0));
559 	}
560 
561 	VM_OBJECT_LOCK(vm_obj);
562 
563 	if (vm_page_busy_try(m, FALSE)) {
564 		kprintf("r");
565 		vm_page_sleep_busy(m, FALSE, "ttmvmf");
566 		ttm_mem_io_unlock(man);
567 		ttm_bo_unreserve(bo);
568 		goto retry;
569 	}
570 
571 	/*
572 	 * We want our fake page in the VM object, not the page the OS
573 	 * allocatedd for us as a placeholder.
574 	 */
575 	m->valid = VM_PAGE_BITS_ALL;
576 	*mres = m;
577 
578 	/*
579 	 * Insert the page into the object if not already inserted.
580 	 */
581 	if (m->object) {
582 		if (m->object != vm_obj || m->pindex != OFF_TO_IDX(offset)) {
583 			retval = VM_PAGER_ERROR;
584 			kprintf("ttm_bo_vm_fault_dfly: m(%p) already inserted "
585 				"in obj %p, attempt obj %p\n",
586 				m, m->object, vm_obj);
587 			while (drm_unstall == 0) {
588 				tsleep(&retval, 0, "DEBUG", hz/10);
589 			}
590 			if (drm_unstall > 0)
591 				--drm_unstall;
592 		}
593 	} else {
594 		mtmp = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
595 		if (mtmp == NULL) {
596 			vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
597 		} else {
598 			panic("inconsistent insert bo %p m %p mtmp %p "
599 			      "offset %jx",
600 			      bo, m, mtmp,
601 			      (uintmax_t)offset);
602 		}
603 	}
604 
605 out_io_unlock1:
606 	ttm_mem_io_unlock(man);
607 out_unlock1:
608 	ttm_bo_unreserve(bo);
609 out_unlock2:
610 	vm_object_pip_wakeup(vm_obj);
611 	return (retval);
612 
613 out_io_unlock:
614 	VM_OBJECT_LOCK(vm_obj);
615 	goto out_io_unlock1;
616 
617 out_unlock:
618 	VM_OBJECT_LOCK(vm_obj);
619 	goto out_unlock1;
620 }
621 
622 static int
623 ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
624 	       vm_ooffset_t foff, struct ucred *cred, u_short *color)
625 {
626 
627 	/*
628 	 * On Linux, a reference to the buffer object is acquired here.
629 	 * The reason is that this function is not called when the
630 	 * mmap() is initialized, but only when a process forks for
631 	 * instance. Therefore on Linux, the reference on the bo is
632 	 * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's
633 	 * then released in ttm_bo_vm_close().
634 	 *
635 	 * Here, this function is called during mmap() intialization.
636 	 * Thus, the reference acquired in ttm_bo_mmap_single() is
637 	 * sufficient.
638 	 */
639 	*color = 0;
640 	return (0);
641 }
642 
643 static void
644 ttm_bo_vm_dtor(void *handle)
645 {
646 	struct ttm_buffer_object *bo = handle;
647 
648 	ttm_bo_unref(&bo);
649 }
650 
651 static struct cdev_pager_ops ttm_pager_ops = {
652 	.cdev_pg_fault = ttm_bo_vm_fault_dfly,
653 	.cdev_pg_ctor = ttm_bo_vm_ctor,
654 	.cdev_pg_dtor = ttm_bo_vm_dtor
655 };
656 
657 /*
658  * Called from drm_drv.c
659  *
660  * *offset - object offset in bytes
661  * size	   - map size in bytes
662  *
663  * We setup a dummy vma (for now) and call ttm_bo_mmap().  Then we setup
664  * our own VM object and dfly ops.  Note that the ops supplied by
665  * ttm_bo_mmap() are not currently used.
666  */
667 int
668 ttm_bo_mmap_single(struct drm_device *dev, vm_ooffset_t *offset,
669 		   vm_size_t size, struct vm_object **obj_res, int nprot)
670 {
671 	struct ttm_bo_device *bdev = dev->drm_ttm_bdev;
672 	struct ttm_buffer_object *bo;
673 	struct vm_object *vm_obj;
674 	struct vm_area_struct vma;
675 	int ret;
676 
677 	*obj_res = NULL;
678 
679 	bzero(&vma, sizeof(vma));
680 	vma.vm_start = *offset;		/* bdev-relative offset */
681 	vma.vm_end = vma.vm_start + size;
682 	vma.vm_pgoff = vma.vm_start >> PAGE_SHIFT;
683 	/* vma.vm_page_prot */
684 	/* vma.vm_flags */
685 
686 	/*
687 	 * Call the linux-ported code to do the work, and on success just
688 	 * setup our own VM object and ignore what the linux code did other
689 	 * then supplying us the 'bo'.
690 	 */
691 	ret = ttm_bo_mmap(NULL, &vma, bdev);
692 
693 	if (ret == 0) {
694 		bo = vma.vm_private_data;
695 		vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE,
696 					     &ttm_pager_ops,
697 					     size, nprot, 0,
698 					     curthread->td_ucred);
699 		if (vm_obj) {
700 			*obj_res = vm_obj;
701 			*offset = 0;		/* object-relative offset */
702 		} else {
703 			ttm_bo_unref(&bo);
704 			ret = EINVAL;
705 		}
706 	}
707 	return ret;
708 }
709 EXPORT_SYMBOL(ttm_bo_mmap_single);
710 
711 #ifdef __DragonFly__
712 void ttm_bo_release_mmap(struct ttm_buffer_object *bo);
713 
714 void
715 ttm_bo_release_mmap(struct ttm_buffer_object *bo)
716 {
717 	vm_object_t vm_obj;
718 	vm_page_t m;
719 	int i;
720 
721 	vm_obj = cdev_pager_lookup(bo);
722 	if (vm_obj == NULL)
723 		return;
724 
725 	VM_OBJECT_LOCK(vm_obj);
726 	for (i = 0; i < bo->num_pages; i++) {
727 		m = vm_page_lookup_busy_wait(vm_obj, i, TRUE, "ttm_unm");
728 		if (m == NULL)
729 			continue;
730 		cdev_pager_free_page(vm_obj, m);
731 	}
732 	VM_OBJECT_UNLOCK(vm_obj);
733 
734 	vm_object_deallocate(vm_obj);
735 }
736 #endif
737 
738 #if 0
739 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
740 {
741 	if (vma->vm_pgoff != 0)
742 		return -EACCES;
743 
744 	vma->vm_ops = &ttm_bo_vm_ops;
745 	vma->vm_private_data = ttm_bo_reference(bo);
746 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
747 	return 0;
748 }
749 EXPORT_SYMBOL(ttm_fbdev_mmap);
750 #endif
751