xref: /dragonfly/sys/dev/drm/ttm/ttm_bo_vm.c (revision 3c7e5806)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #define pr_fmt(fmt) "[TTM] " fmt
32 
33 #include <ttm/ttm_module.h>
34 #include <ttm/ttm_bo_driver.h>
35 #include <ttm/ttm_bo_api.h>
36 #include <ttm/ttm_placement.h>
37 #include <drm/drm_vma_manager.h>
38 #include <linux/mm.h>
39 #include <linux/rbtree.h>
40 #include <linux/module.h>
41 #include <linux/uaccess.h>
42 
43 #include <sys/sysctl.h>
44 #include <vm/vm.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_page2.h>
47 
48 #define TTM_BO_VM_NUM_PREFAULT 16
49 
50 #if 0
51 static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
52 				struct vm_area_struct *vma,
53 				struct vm_fault *vmf)
54 {
55 	struct ttm_bo_device *bdev = bo->bdev;
56 	int ret = 0;
57 
58 	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
59 	if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
60 		goto out_unlock;
61 
62 	/*
63 	 * Quick non-stalling check for idle.
64 	 */
65 	ret = ttm_bo_wait(bo, false, false, true);
66 	if (likely(ret == 0))
67 		goto out_unlock;
68 
69 	/*
70 	 * If possible, avoid waiting for GPU with mmap_sem
71 	 * held.
72 	 */
73 	if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
74 		ret = VM_FAULT_RETRY;
75 		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
76 			goto out_unlock;
77 
78 		up_read(&vma->vm_mm->mmap_sem);
79 		(void) ttm_bo_wait(bo, false, true, false);
80 		goto out_unlock;
81 	}
82 
83 	/*
84 	 * Ordinary wait.
85 	 */
86 	ret = ttm_bo_wait(bo, false, true, false);
87 	if (unlikely(ret != 0))
88 		ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
89 			VM_FAULT_NOPAGE;
90 
91 out_unlock:
92 	lockmgr(&bdev->fence_lock, LK_RELEASE);
93 	return ret;
94 }
95 #endif
96 
97 /*
98  * Always unstall on unexpected vm_page alias, fatal bus fault.
99  * Set to 0 to stall, set to positive count to unstall N times,
100  * then stall again.
101  */
102 static int drm_unstall = -1;
103 SYSCTL_INT(_debug, OID_AUTO, unstall, CTLFLAG_RW, &drm_unstall, 0, "");
104 
105 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
106 {
107 	/* see ttm_bo_mmap_single() at end of this file */
108 	/* ttm_bo_vm_ops not currently used, no entry should occur */
109 	panic("ttm_bo_vm_fault");
110 #if 0
111 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
112 	    vma->vm_private_data;
113 	struct ttm_bo_device *bdev = bo->bdev;
114 	unsigned long page_offset;
115 	unsigned long page_last;
116 	unsigned long pfn;
117 	struct ttm_tt *ttm = NULL;
118 	struct page *page;
119 	int ret;
120 	int i;
121 	unsigned long address = (unsigned long)vmf->virtual_address;
122 	int retval = VM_FAULT_NOPAGE;
123 	struct ttm_mem_type_manager *man =
124 		&bdev->man[bo->mem.mem_type];
125 	struct vm_area_struct cvma;
126 
127 	/*
128 	 * Work around locking order reversal in fault / nopfn
129 	 * between mmap_sem and bo_reserve: Perform a trylock operation
130 	 * for reserve, and if it fails, retry the fault after waiting
131 	 * for the buffer to become unreserved.
132 	 */
133 	ret = ttm_bo_reserve(bo, true, true, false, NULL);
134 	if (unlikely(ret != 0)) {
135 		if (ret != -EBUSY)
136 			return VM_FAULT_NOPAGE;
137 
138 		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
139 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
140 				up_read(&vma->vm_mm->mmap_sem);
141 				(void) ttm_bo_wait_unreserved(bo);
142 			}
143 
144 			return VM_FAULT_RETRY;
145 		}
146 
147 		/*
148 		 * If we'd want to change locking order to
149 		 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
150 		 * instead of retrying the fault...
151 		 */
152 		return VM_FAULT_NOPAGE;
153 	}
154 
155 	/*
156 	 * Refuse to fault imported pages. This should be handled
157 	 * (if at all) by redirecting mmap to the exporter.
158 	 */
159 	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
160 		retval = VM_FAULT_SIGBUS;
161 		goto out_unlock;
162 	}
163 
164 	if (bdev->driver->fault_reserve_notify) {
165 		ret = bdev->driver->fault_reserve_notify(bo);
166 		switch (ret) {
167 		case 0:
168 			break;
169 		case -EBUSY:
170 		case -ERESTARTSYS:
171 			retval = VM_FAULT_NOPAGE;
172 			goto out_unlock;
173 		default:
174 			retval = VM_FAULT_SIGBUS;
175 			goto out_unlock;
176 		}
177 	}
178 
179 	/*
180 	 * Wait for buffer data in transit, due to a pipelined
181 	 * move.
182 	 */
183 
184 	ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
185 	if (unlikely(ret != 0)) {
186 		retval = ret;
187 		goto out_unlock;
188 	}
189 
190 	ret = ttm_mem_io_lock(man, true);
191 	if (unlikely(ret != 0)) {
192 		retval = VM_FAULT_NOPAGE;
193 		goto out_unlock;
194 	}
195 	ret = ttm_mem_io_reserve_vm(bo);
196 	if (unlikely(ret != 0)) {
197 		retval = VM_FAULT_SIGBUS;
198 		goto out_io_unlock;
199 	}
200 
201 	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
202 		vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
203 	page_last = vma_pages(vma) + vma->vm_pgoff -
204 		drm_vma_node_start(&bo->vma_node);
205 
206 	if (unlikely(page_offset >= bo->num_pages)) {
207 		retval = VM_FAULT_SIGBUS;
208 		goto out_io_unlock;
209 	}
210 
211 	/*
212 	 * Make a local vma copy to modify the page_prot member
213 	 * and vm_flags if necessary. The vma parameter is protected
214 	 * by mmap_sem in write mode.
215 	 */
216 	cvma = *vma;
217 	cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
218 
219 	if (bo->mem.bus.is_iomem) {
220 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
221 						cvma.vm_page_prot);
222 	} else {
223 		ttm = bo->ttm;
224 		if (!(bo->mem.placement & TTM_PL_FLAG_CACHED))
225 			cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
226 							cvma.vm_page_prot);
227 
228 		/* Allocate all page at once, most common usage */
229 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
230 			retval = VM_FAULT_OOM;
231 			goto out_io_unlock;
232 		}
233 	}
234 
235 	/*
236 	 * Speculatively prefault a number of pages. Only error on
237 	 * first page.
238 	 */
239 	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
240 		if (bo->mem.bus.is_iomem)
241 			pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
242 		else {
243 			page = ttm->pages[page_offset];
244 			if (unlikely(!page && i == 0)) {
245 				retval = VM_FAULT_OOM;
246 				goto out_io_unlock;
247 			} else if (unlikely(!page)) {
248 				break;
249 			}
250 			page->mapping = vma->vm_file->f_mapping;
251 			page->index = drm_vma_node_start(&bo->vma_node) +
252 				page_offset;
253 			pfn = page_to_pfn(page);
254 		}
255 
256 		if (vma->vm_flags & VM_MIXEDMAP)
257 			ret = vm_insert_mixed(&cvma, address, pfn);
258 		else
259 			ret = vm_insert_pfn(&cvma, address, pfn);
260 
261 		/*
262 		 * Somebody beat us to this PTE or prefaulting to
263 		 * an already populated PTE, or prefaulting error.
264 		 */
265 
266 		if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
267 			break;
268 		else if (unlikely(ret != 0)) {
269 			retval =
270 			    (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
271 			goto out_io_unlock;
272 		}
273 
274 		address += PAGE_SIZE;
275 		if (unlikely(++page_offset >= page_last))
276 			break;
277 	}
278 out_io_unlock:
279 	ttm_mem_io_unlock(man);
280 out_unlock:
281 	ttm_bo_unreserve(bo);
282 	return retval;
283 #endif
284 }
285 
286 /* ttm_bo_vm_ops not currently used, no entry should occur */
287 static void ttm_bo_vm_open(struct vm_area_struct *vma)
288 {
289 	struct ttm_buffer_object *bo =
290 	    (struct ttm_buffer_object *)vma->vm_private_data;
291 
292 #if 0
293 	WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
294 #endif
295 
296 	(void)ttm_bo_reference(bo);
297 }
298 
299 /* ttm_bo_vm_ops not currently used, no entry should occur */
300 static void ttm_bo_vm_close(struct vm_area_struct *vma)
301 {
302 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
303 
304 	ttm_bo_unref(&bo);
305 	vma->vm_private_data = NULL;
306 }
307 
308 static const struct vm_operations_struct ttm_bo_vm_ops = {
309 	.fault = ttm_bo_vm_fault,
310 	.open = ttm_bo_vm_open,
311 	.close = ttm_bo_vm_close
312 };
313 
314 static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
315 						  unsigned long offset,
316 						  unsigned long pages)
317 {
318 	struct drm_vma_offset_node *node;
319 	struct ttm_buffer_object *bo = NULL;
320 
321 	drm_vma_offset_lock_lookup(&bdev->vma_manager);
322 
323 	node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
324 	if (likely(node)) {
325 		bo = container_of(node, struct ttm_buffer_object, vma_node);
326 		if (!kref_get_unless_zero(&bo->kref))
327 			bo = NULL;
328 	}
329 
330 	drm_vma_offset_unlock_lookup(&bdev->vma_manager);
331 
332 	if (!bo)
333 		pr_err("Could not find buffer object to map\n");
334 
335 	return bo;
336 }
337 
338 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
339 		struct ttm_bo_device *bdev)
340 {
341 	struct ttm_bo_driver *driver;
342 	struct ttm_buffer_object *bo;
343 	int ret;
344 
345 	bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
346 	if (unlikely(!bo))
347 		return -EINVAL;
348 
349 	driver = bo->bdev->driver;
350 	if (unlikely(!driver->verify_access)) {
351 		ret = -EPERM;
352 		goto out_unref;
353 	}
354 	ret = driver->verify_access(bo, filp);
355 	if (unlikely(ret != 0))
356 		goto out_unref;
357 
358 	vma->vm_ops = &ttm_bo_vm_ops;
359 
360 	/*
361 	 * Note: We're transferring the bo reference to
362 	 * vma->vm_private_data here.
363 	 */
364 
365 	vma->vm_private_data = bo;
366 
367 	/*
368 	 * We'd like to use VM_PFNMAP on shared mappings, where
369 	 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
370 	 * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
371 	 * bad for performance. Until that has been sorted out, use
372 	 * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
373 	 */
374 	vma->vm_flags |= VM_MIXEDMAP;
375 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
376 	return 0;
377 out_unref:
378 	ttm_bo_unref(&bo);
379 	return ret;
380 }
381 EXPORT_SYMBOL(ttm_bo_mmap);
382 
383 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
384 {
385 	if (vma->vm_pgoff != 0)
386 		return -EACCES;
387 
388 	vma->vm_ops = &ttm_bo_vm_ops;
389 	vma->vm_private_data = ttm_bo_reference(bo);
390 	vma->vm_flags |= VM_MIXEDMAP;
391 	vma->vm_flags |= VM_IO | VM_DONTEXPAND;
392 	return 0;
393 }
394 EXPORT_SYMBOL(ttm_fbdev_mmap);
395 
396 /*
397  * DragonFlyBSD Interface
398  */
399 
400 #include "opt_vm.h"
401 
402 #include <vm/vm.h>
403 #include <vm/vm_page.h>
404 #include <linux/errno.h>
405 #include <linux/export.h>
406 
407 #include <vm/vm_page2.h>
408 
409 static int
410 ttm_bo_vm_fault_dfly(vm_object_t vm_obj, vm_ooffset_t offset,
411 		     int prot, vm_page_t *mres)
412 {
413 	struct ttm_buffer_object *bo = vm_obj->handle;
414 	struct ttm_bo_device *bdev = bo->bdev;
415 	struct ttm_tt *ttm = NULL;
416 	vm_page_t m, oldm;
417 	int ret;
418 	int retval = VM_PAGER_OK;
419 	struct ttm_mem_type_manager *man;
420 
421 	man = &bdev->man[bo->mem.mem_type];
422 
423 	/*kprintf("FAULT %p %p/%ld\n", vm_obj, bo, offset);*/
424 
425 	vm_object_pip_add(vm_obj, 1);
426 	oldm = *mres;
427 	*mres = NULL;
428 
429 retry:
430 	VM_OBJECT_UNLOCK(vm_obj);
431 	m = NULL;
432 
433 	/*
434 	 * NOTE: set nowait to false, we don't have ttm_bo_wait_unreserved()
435 	 * 	 for the -BUSY case yet.
436 	 */
437 	ret = ttm_bo_reserve(bo, true, false, false, 0);
438 	if (unlikely(ret != 0)) {
439 		retval = VM_PAGER_ERROR;
440 		VM_OBJECT_LOCK(vm_obj);
441 		goto out_unlock2;
442 	}
443 
444 	if (bdev->driver->fault_reserve_notify) {
445 		ret = bdev->driver->fault_reserve_notify(bo);
446 		switch (ret) {
447 		case 0:
448 			break;
449 		case -EBUSY:
450 			lwkt_yield();
451 			/* fall through */
452 		case -ERESTARTSYS:
453 		case -EINTR:
454 			retval = VM_PAGER_ERROR;
455 			goto out_unlock;
456 		default:
457 			retval = VM_PAGER_ERROR;
458 			goto out_unlock;
459 		}
460 	}
461 
462 	/*
463 	 * Wait for buffer data in transit, due to a pipelined
464 	 * move.
465 	 */
466 	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
467 	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
468 		/*
469 		 * Here, the behavior differs between Linux and FreeBSD.
470 		 *
471 		 * On Linux, the wait is interruptible (3rd argument to
472 		 * ttm_bo_wait). There must be some mechanism to resume
473 		 * page fault handling, once the signal is processed.
474 		 *
475 		 * On FreeBSD, the wait is uninteruptible. This is not a
476 		 * problem as we can't end up with an unkillable process
477 		 * here, because the wait will eventually time out.
478 		 *
479 		 * An example of this situation is the Xorg process
480 		 * which uses SIGALRM internally. The signal could
481 		 * interrupt the wait, causing the page fault to fail
482 		 * and the process to receive SIGSEGV.
483 		 */
484 		ret = ttm_bo_wait(bo, false, false, false);
485 		lockmgr(&bdev->fence_lock, LK_RELEASE);
486 		if (unlikely(ret != 0)) {
487 			retval = VM_PAGER_ERROR;
488 			goto out_unlock;
489 		}
490 	} else {
491 		lockmgr(&bdev->fence_lock, LK_RELEASE);
492 	}
493 
494 	ret = ttm_mem_io_lock(man, true);
495 	if (unlikely(ret != 0)) {
496 		retval = VM_PAGER_ERROR;
497 		goto out_unlock;
498 	}
499 	ret = ttm_mem_io_reserve_vm(bo);
500 	if (unlikely(ret != 0)) {
501 		retval = VM_PAGER_ERROR;
502 		goto out_io_unlock;
503 	}
504 	if (unlikely(OFF_TO_IDX(offset) >= bo->num_pages)) {
505 		retval = VM_PAGER_ERROR;
506 		goto out_io_unlock;
507 	}
508 
509 	/*
510 	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
511 	 * since the mmap_sem is only held in read mode. However, we
512 	 * modify only the caching bits of vma->vm_page_prot and
513 	 * consider those bits protected by
514 	 * the bo->mutex, as we should be the only writers.
515 	 * There shouldn't really be any readers of these bits except
516 	 * within vm_insert_mixed()? fork?
517 	 *
518 	 * TODO: Add a list of vmas to the bo, and change the
519 	 * vma->vm_page_prot when the object changes caching policy, with
520 	 * the correct locks held.
521 	 */
522 
523 	if (bo->mem.bus.is_iomem) {
524 		m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
525 						  bo->mem.bus.offset + offset);
526 		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement, 0));
527 	} else {
528 		/* Allocate all page at once, most common usage */
529 		ttm = bo->ttm;
530 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
531 			retval = VM_PAGER_ERROR;
532 			goto out_io_unlock;
533 		}
534 		ttm = bo->ttm;
535 
536 		m = (struct vm_page *)ttm->pages[OFF_TO_IDX(offset)];
537 		if (unlikely(!m)) {
538 			retval = VM_PAGER_ERROR;
539 			goto out_io_unlock;
540 		}
541 		pmap_page_set_memattr(m,
542 		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
543 		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement, 0));
544 	}
545 
546 	VM_OBJECT_LOCK(vm_obj);
547 
548 	if (vm_page_busy_try(m, FALSE)) {
549 		kprintf("r");
550 		vm_page_sleep_busy(m, FALSE, "ttmvmf");
551 		ttm_mem_io_unlock(man);
552 		ttm_bo_unreserve(bo);
553 		goto retry;
554 	}
555 
556 	/*
557 	 * We want our fake page in the VM object, not the page the OS
558 	 * allocatedd for us as a placeholder.
559 	 */
560 	m->valid = VM_PAGE_BITS_ALL;
561 	*mres = m;
562 	if (oldm != NULL) {
563 		vm_page_remove(oldm);
564 		if (m->object) {
565 			retval = VM_PAGER_ERROR;
566 			kprintf("ttm_bo_vm_fault_dfly: m(%p) already inserted "
567 				"in obj %p, attempt obj %p\n",
568 				m, m->object, vm_obj);
569 			while (drm_unstall == 0) {
570 				tsleep(&retval, 0, "DEBUG", hz/10);
571 			}
572 			if (drm_unstall > 0)
573 				--drm_unstall;
574 		} else {
575 			vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
576 		}
577 		vm_page_free(oldm);
578 		oldm = NULL;
579 	} else {
580 		vm_page_t mtmp;
581 
582 		kprintf("oldm NULL\n");
583 
584 		mtmp = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
585 		KASSERT(mtmp == NULL || mtmp == m,
586 		    ("inconsistent insert bo %p m %p mtmp %p offset %jx",
587 		    bo, m, mtmp, (uintmax_t)offset));
588 		if (mtmp == NULL)
589 			vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
590 	}
591 	/*vm_page_busy_try(m, FALSE);*/
592 
593 out_io_unlock1:
594 	ttm_mem_io_unlock(man);
595 out_unlock1:
596 	ttm_bo_unreserve(bo);
597 out_unlock2:
598 	if (oldm) {
599 		vm_page_remove(oldm);
600 		vm_page_free(oldm);
601 	}
602 	vm_object_pip_wakeup(vm_obj);
603 	return (retval);
604 
605 out_io_unlock:
606 	VM_OBJECT_LOCK(vm_obj);
607 	goto out_io_unlock1;
608 
609 out_unlock:
610 	VM_OBJECT_LOCK(vm_obj);
611 	goto out_unlock1;
612 }
613 
614 static int
615 ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
616 	       vm_ooffset_t foff, struct ucred *cred, u_short *color)
617 {
618 
619 	/*
620 	 * On Linux, a reference to the buffer object is acquired here.
621 	 * The reason is that this function is not called when the
622 	 * mmap() is initialized, but only when a process forks for
623 	 * instance. Therefore on Linux, the reference on the bo is
624 	 * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's
625 	 * then released in ttm_bo_vm_close().
626 	 *
627 	 * Here, this function is called during mmap() intialization.
628 	 * Thus, the reference acquired in ttm_bo_mmap_single() is
629 	 * sufficient.
630 	 */
631 	*color = 0;
632 	return (0);
633 }
634 
635 static void
636 ttm_bo_vm_dtor(void *handle)
637 {
638 	struct ttm_buffer_object *bo = handle;
639 
640 	ttm_bo_unref(&bo);
641 }
642 
643 static struct cdev_pager_ops ttm_pager_ops = {
644 	.cdev_pg_fault = ttm_bo_vm_fault_dfly,
645 	.cdev_pg_ctor = ttm_bo_vm_ctor,
646 	.cdev_pg_dtor = ttm_bo_vm_dtor
647 };
648 
649 /*
650  * Called from drm_drv.c
651  *
652  * *offset - object offset in bytes
653  * size	   - map size in bytes
654  *
655  * We setup a dummy vma (for now) and call ttm_bo_mmap().  Then we setup
656  * our own VM object and dfly ops.  Note that the ops supplied by
657  * ttm_bo_mmap() are not currently used.
658  */
659 int
660 ttm_bo_mmap_single(struct drm_device *dev, vm_ooffset_t *offset,
661 		   vm_size_t size, struct vm_object **obj_res, int nprot)
662 {
663 	struct ttm_bo_device *bdev = dev->drm_ttm_bdev;
664 	struct ttm_buffer_object *bo;
665 	struct vm_object *vm_obj;
666 	struct vm_area_struct vma;
667 	int ret;
668 
669 	*obj_res = NULL;
670 
671 	bzero(&vma, sizeof(vma));
672 	vma.vm_start = *offset;		/* bdev-relative offset */
673 	vma.vm_end = vma.vm_start + size;
674 	vma.vm_pgoff = vma.vm_start >> PAGE_SHIFT;
675 	/* vma.vm_page_prot */
676 	/* vma.vm_flags */
677 
678 	/*
679 	 * Call the linux-ported code to do the work, and on success just
680 	 * setup our own VM object and ignore what the linux code did other
681 	 * then supplying us the 'bo'.
682 	 */
683 	ret = ttm_bo_mmap(NULL, &vma, bdev);
684 
685 	if (ret == 0) {
686 		bo = vma.vm_private_data;
687 		vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE,
688 					     &ttm_pager_ops,
689 					     size, nprot, 0,
690 					     curthread->td_ucred);
691 		if (vm_obj) {
692 			*obj_res = vm_obj;
693 			*offset = 0;		/* object-relative offset */
694 		} else {
695 			ttm_bo_unref(&bo);
696 			ret = EINVAL;
697 		}
698 	}
699 	return ret;
700 }
701 EXPORT_SYMBOL(ttm_bo_mmap_single);
702 
703 #ifdef __DragonFly__
704 void ttm_bo_release_mmap(struct ttm_buffer_object *bo);
705 
706 void
707 ttm_bo_release_mmap(struct ttm_buffer_object *bo)
708 {
709 	vm_object_t vm_obj;
710 	vm_page_t m;
711 	int i;
712 
713 	vm_obj = cdev_pager_lookup(bo);
714 	if (vm_obj == NULL)
715 		return;
716 
717 	VM_OBJECT_LOCK(vm_obj);
718 	for (i = 0; i < bo->num_pages; i++) {
719 		m = vm_page_lookup_busy_wait(vm_obj, i, TRUE, "ttm_unm");
720 		if (m == NULL)
721 			continue;
722 		cdev_pager_free_page(vm_obj, m);
723 	}
724 	VM_OBJECT_UNLOCK(vm_obj);
725 
726 	vm_object_deallocate(vm_obj);
727 }
728 #endif
729 
730 #if 0
731 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
732 {
733 	if (vma->vm_pgoff != 0)
734 		return -EACCES;
735 
736 	vma->vm_ops = &ttm_bo_vm_ops;
737 	vma->vm_private_data = ttm_bo_reference(bo);
738 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
739 	return 0;
740 }
741 EXPORT_SYMBOL(ttm_fbdev_mmap);
742 #endif
743