xref: /dragonfly/sys/dev/drm/ttm/ttm_bo_vm.c (revision 7c4f4eee)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #define pr_fmt(fmt) "[TTM] " fmt
32 
33 #include <ttm/ttm_module.h>
34 #include <ttm/ttm_bo_driver.h>
35 #include <ttm/ttm_bo_api.h>
36 #include <ttm/ttm_placement.h>
37 #include <drm/drm_vma_manager.h>
38 #include <linux/mm.h>
39 #include <linux/pfn_t.h>
40 #include <linux/rbtree.h>
41 #include <linux/module.h>
42 #include <linux/uaccess.h>
43 
44 #include <sys/sysctl.h>
45 #include <vm/vm.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_page2.h>
48 
49 #define TTM_BO_VM_NUM_PREFAULT 16
50 
51 static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
52 				struct vm_area_struct *vma,
53 				struct vm_fault *vmf)
54 {
55 	int ret = 0;
56 
57 	if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
58 		goto out_unlock;
59 
60 	/*
61 	 * Quick non-stalling check for idle.
62 	 */
63 	ret = ttm_bo_wait(bo, false, true);
64 	if (likely(ret == 0))
65 		goto out_unlock;
66 
67 	/*
68 	 * If possible, avoid waiting for GPU with mmap_sem
69 	 * held.
70 	 */
71 	if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
72 		ret = VM_FAULT_RETRY;
73 		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
74 			goto out_unlock;
75 
76 #if 0
77 		up_read(&vma->vm_mm->mmap_sem);
78 #endif
79 		(void) ttm_bo_wait(bo, true, false);
80 		goto out_unlock;
81 	}
82 
83 	/*
84 	 * Ordinary wait.
85 	 */
86 	ret = ttm_bo_wait(bo, true, false);
87 	if (unlikely(ret != 0))
88 		ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
89 			VM_FAULT_NOPAGE;
90 
91 out_unlock:
92 	return ret;
93 }
94 
95 /*
96  * Always unstall on unexpected vm_page alias, fatal bus fault.
97  * Set to 0 to stall, set to positive count to unstall N times,
98  * then stall again.
99  */
100 static int drm_unstall = -1;
101 SYSCTL_INT(_debug, OID_AUTO, unstall, CTLFLAG_RW, &drm_unstall, 0, "");
102 
103 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
104 {
105 	/* see ttm_bo_mmap_single() at end of this file */
106 	/* ttm_bo_vm_ops not currently used, no entry should occur */
107 	panic("ttm_bo_vm_fault");
108 #if 0
109 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
110 	    vma->vm_private_data;
111 	struct ttm_bo_device *bdev = bo->bdev;
112 	unsigned long page_offset;
113 	unsigned long page_last;
114 	unsigned long pfn;
115 	struct ttm_tt *ttm = NULL;
116 	struct page *page;
117 	int ret;
118 	int i;
119 	unsigned long address = (unsigned long)vmf->virtual_address;
120 	int retval = VM_FAULT_NOPAGE;
121 	struct ttm_mem_type_manager *man =
122 		&bdev->man[bo->mem.mem_type];
123 	struct vm_area_struct cvma;
124 
125 	/*
126 	 * Work around locking order reversal in fault / nopfn
127 	 * between mmap_sem and bo_reserve: Perform a trylock operation
128 	 * for reserve, and if it fails, retry the fault after waiting
129 	 * for the buffer to become unreserved.
130 	 */
131 	ret = ttm_bo_reserve(bo, true, true, NULL);
132 	if (unlikely(ret != 0)) {
133 		if (ret != -EBUSY)
134 			return VM_FAULT_NOPAGE;
135 
136 		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
137 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
138 				up_read(&vma->vm_mm->mmap_sem);
139 				(void) ttm_bo_wait_unreserved(bo);
140 			}
141 
142 			return VM_FAULT_RETRY;
143 		}
144 
145 		/*
146 		 * If we'd want to change locking order to
147 		 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
148 		 * instead of retrying the fault...
149 		 */
150 		return VM_FAULT_NOPAGE;
151 	}
152 
153 	/*
154 	 * Refuse to fault imported pages. This should be handled
155 	 * (if at all) by redirecting mmap to the exporter.
156 	 */
157 	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
158 		retval = VM_FAULT_SIGBUS;
159 		goto out_unlock;
160 	}
161 
162 	if (bdev->driver->fault_reserve_notify) {
163 		ret = bdev->driver->fault_reserve_notify(bo);
164 		switch (ret) {
165 		case 0:
166 			break;
167 		case -EBUSY:
168 		case -ERESTARTSYS:
169 			retval = VM_FAULT_NOPAGE;
170 			goto out_unlock;
171 		default:
172 			retval = VM_FAULT_SIGBUS;
173 			goto out_unlock;
174 		}
175 	}
176 
177 	/*
178 	 * Wait for buffer data in transit, due to a pipelined
179 	 * move.
180 	 */
181 
182 	ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
183 	if (unlikely(ret != 0)) {
184 		retval = ret;
185 		goto out_unlock;
186 	}
187 
188 	ret = ttm_mem_io_lock(man, true);
189 	if (unlikely(ret != 0)) {
190 		retval = VM_FAULT_NOPAGE;
191 		goto out_unlock;
192 	}
193 	ret = ttm_mem_io_reserve_vm(bo);
194 	if (unlikely(ret != 0)) {
195 		retval = VM_FAULT_SIGBUS;
196 		goto out_io_unlock;
197 	}
198 
199 	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
200 		vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
201 	page_last = vma_pages(vma) + vma->vm_pgoff -
202 		drm_vma_node_start(&bo->vma_node);
203 
204 	if (unlikely(page_offset >= bo->num_pages)) {
205 		retval = VM_FAULT_SIGBUS;
206 		goto out_io_unlock;
207 	}
208 
209 	/*
210 	 * Make a local vma copy to modify the page_prot member
211 	 * and vm_flags if necessary. The vma parameter is protected
212 	 * by mmap_sem in write mode.
213 	 */
214 	cvma = *vma;
215 	cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
216 
217 	if (bo->mem.bus.is_iomem) {
218 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
219 						cvma.vm_page_prot);
220 	} else {
221 		ttm = bo->ttm;
222 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
223 						cvma.vm_page_prot);
224 
225 		/* Allocate all page at once, most common usage */
226 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
227 			retval = VM_FAULT_OOM;
228 			goto out_io_unlock;
229 		}
230 	}
231 
232 	/*
233 	 * Speculatively prefault a number of pages. Only error on
234 	 * first page.
235 	 */
236 	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
237 		if (bo->mem.bus.is_iomem)
238 			pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
239 		else {
240 			page = ttm->pages[page_offset];
241 			if (unlikely(!page && i == 0)) {
242 				retval = VM_FAULT_OOM;
243 				goto out_io_unlock;
244 			} else if (unlikely(!page)) {
245 				break;
246 			}
247 			page->mapping = vma->vm_file->f_mapping;
248 			page->index = drm_vma_node_start(&bo->vma_node) +
249 				page_offset;
250 			pfn = page_to_pfn(page);
251 		}
252 
253 		if (vma->vm_flags & VM_MIXEDMAP)
254 			ret = vm_insert_mixed(&cvma, address,
255 					__pfn_to_pfn_t(pfn, PFN_DEV));
256 		else
257 			ret = vm_insert_pfn(&cvma, address, pfn);
258 
259 		/*
260 		 * Somebody beat us to this PTE or prefaulting to
261 		 * an already populated PTE, or prefaulting error.
262 		 */
263 
264 		if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
265 			break;
266 		else if (unlikely(ret != 0)) {
267 			retval =
268 			    (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
269 			goto out_io_unlock;
270 		}
271 
272 		address += PAGE_SIZE;
273 		if (unlikely(++page_offset >= page_last))
274 			break;
275 	}
276 out_io_unlock:
277 	ttm_mem_io_unlock(man);
278 out_unlock:
279 	ttm_bo_unreserve(bo);
280 	return retval;
281 #endif
282 }
283 
284 /* ttm_bo_vm_ops not currently used, no entry should occur */
285 static void ttm_bo_vm_open(struct vm_area_struct *vma)
286 {
287 	struct ttm_buffer_object *bo =
288 	    (struct ttm_buffer_object *)vma->vm_private_data;
289 
290 #if 0
291 	WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
292 #endif
293 
294 	(void)ttm_bo_reference(bo);
295 }
296 
297 /* ttm_bo_vm_ops not currently used, no entry should occur */
298 static void ttm_bo_vm_close(struct vm_area_struct *vma)
299 {
300 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
301 
302 	ttm_bo_unref(&bo);
303 	vma->vm_private_data = NULL;
304 }
305 
306 static const struct vm_operations_struct ttm_bo_vm_ops = {
307 	.fault = ttm_bo_vm_fault,
308 	.open = ttm_bo_vm_open,
309 	.close = ttm_bo_vm_close
310 };
311 
312 static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
313 						  unsigned long offset,
314 						  unsigned long pages)
315 {
316 	struct drm_vma_offset_node *node;
317 	struct ttm_buffer_object *bo = NULL;
318 
319 	drm_vma_offset_lock_lookup(&bdev->vma_manager);
320 
321 	node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
322 	if (likely(node)) {
323 		bo = container_of(node, struct ttm_buffer_object, vma_node);
324 		if (!kref_get_unless_zero(&bo->kref))
325 			bo = NULL;
326 	}
327 
328 	drm_vma_offset_unlock_lookup(&bdev->vma_manager);
329 
330 	if (!bo)
331 		pr_err("Could not find buffer object to map\n");
332 
333 	return bo;
334 }
335 
336 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
337 		struct ttm_bo_device *bdev)
338 {
339 	struct ttm_bo_driver *driver;
340 	struct ttm_buffer_object *bo;
341 	int ret;
342 
343 	bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
344 	if (unlikely(!bo))
345 		return -EINVAL;
346 
347 	driver = bo->bdev->driver;
348 	if (unlikely(!driver->verify_access)) {
349 		ret = -EPERM;
350 		goto out_unref;
351 	}
352 	ret = driver->verify_access(bo, filp);
353 	if (unlikely(ret != 0))
354 		goto out_unref;
355 
356 	vma->vm_ops = &ttm_bo_vm_ops;
357 
358 	/*
359 	 * Note: We're transferring the bo reference to
360 	 * vma->vm_private_data here.
361 	 */
362 
363 	vma->vm_private_data = bo;
364 
365 	/*
366 	 * We'd like to use VM_PFNMAP on shared mappings, where
367 	 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
368 	 * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
369 	 * bad for performance. Until that has been sorted out, use
370 	 * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
371 	 */
372 	vma->vm_flags |= VM_MIXEDMAP;
373 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
374 	return 0;
375 out_unref:
376 	ttm_bo_unref(&bo);
377 	return ret;
378 }
379 EXPORT_SYMBOL(ttm_bo_mmap);
380 
381 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
382 {
383 	if (vma->vm_pgoff != 0)
384 		return -EACCES;
385 
386 	vma->vm_ops = &ttm_bo_vm_ops;
387 	vma->vm_private_data = ttm_bo_reference(bo);
388 	vma->vm_flags |= VM_MIXEDMAP;
389 	vma->vm_flags |= VM_IO | VM_DONTEXPAND;
390 	return 0;
391 }
392 EXPORT_SYMBOL(ttm_fbdev_mmap);
393 
394 /*
395  * DragonFlyBSD Interface
396  */
397 
398 #include "opt_vm.h"
399 
400 #include <vm/vm.h>
401 #include <vm/vm_page.h>
402 #include <linux/errno.h>
403 #include <linux/export.h>
404 
405 #include <vm/vm_page2.h>
406 
407 /*
408  * NOTE: This code is fragile.  This code can only be entered with *mres
409  *	 not NULL when *mres is a placeholder page allocated by the kernel.
410  */
411 static int
412 ttm_bo_vm_fault_dfly(vm_object_t vm_obj, vm_ooffset_t offset,
413 		     int prot, vm_page_t *mres)
414 {
415 	struct ttm_buffer_object *bo = vm_obj->handle;
416 	struct ttm_bo_device *bdev = bo->bdev;
417 	struct ttm_tt *ttm = NULL;
418 	vm_page_t m, mtmp;
419 	int ret;
420 	int retval = VM_PAGER_OK;
421 	struct ttm_mem_type_manager *man =
422 		&bdev->man[bo->mem.mem_type];
423 	struct vm_area_struct cvma;
424 
425 /*
426    The Linux code expects to receive these arguments:
427    - struct vm_area_struct *vma
428    - struct vm_fault *vmf
429 */
430 #ifdef __DragonFly__
431 	struct vm_area_struct vmas;
432 	struct vm_area_struct *vma = &vmas;
433 	struct vm_fault vmfs;
434 	struct vm_fault *vmf = &vmfs;
435 
436 	memset(vma, 0, sizeof(*vma));
437 	memset(vmf, 0, sizeof(*vmf));
438 #endif
439 
440 	vm_object_pip_add(vm_obj, 1);
441 
442 	/*
443 	 * We must atomically clean up any possible placeholder page to avoid
444 	 * the DRM subsystem attempting to use it.  We can determine if this
445 	 * is a place holder page by checking m->valid.
446 	 *
447 	 * We have to do this before any potential fault_reserve_notify()
448 	 * which might try to free the map (and thus deadlock on our busy
449 	 * page).
450 	 */
451 	m = *mres;
452 	*mres = NULL;
453 	if (m) {
454 		if (m->valid == VM_PAGE_BITS_ALL) {
455 			/* actual page */
456 			vm_page_wakeup(m);
457 		} else {
458 			/* placeholder page */
459 			KKASSERT((m->flags & PG_FICTITIOUS) == 0);
460 			vm_page_remove(m);
461 			vm_page_free(m);
462 		}
463 	}
464 
465 retry:
466 	VM_OBJECT_UNLOCK(vm_obj);
467 	m = NULL;
468 
469 	/*
470 	 * Work around locking order reversal in fault / nopfn
471 	 * between mmap_sem and bo_reserve: Perform a trylock operation
472 	 * for reserve, and if it fails, retry the fault after waiting
473 	 * for the buffer to become unreserved.
474 	 */
475 	ret = ttm_bo_reserve(bo, true, true, NULL);
476 	if (unlikely(ret != 0)) {
477 		if (ret != -EBUSY) {
478 			retval = VM_PAGER_ERROR;
479 			VM_OBJECT_LOCK(vm_obj);
480 			goto out_unlock2;
481 		}
482 
483 		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY || 1) {
484 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
485 #if 0
486 				up_read(&vma->vm_mm->mmap_sem);
487 #endif
488 				(void) ttm_bo_wait_unreserved(bo);
489 			}
490 
491 #ifndef __DragonFly__
492 			return VM_FAULT_RETRY;
493 #else
494 			VM_OBJECT_LOCK(vm_obj);
495 			lwkt_yield();
496 			goto retry;
497 #endif
498 		}
499 
500 		/*
501 		 * If we'd want to change locking order to
502 		 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
503 		 * instead of retrying the fault...
504 		 */
505 #ifndef __DragonFly__
506 		return VM_FAULT_NOPAGE;
507 #else
508 		retval = VM_PAGER_ERROR;
509 		VM_OBJECT_LOCK(vm_obj);
510 		goto out_unlock2;
511 #endif
512 	}
513 
514 	if (bdev->driver->fault_reserve_notify) {
515 		ret = bdev->driver->fault_reserve_notify(bo);
516 		switch (ret) {
517 		case 0:
518 			break;
519 		case -EBUSY:
520 			lwkt_yield();
521 			/* fall through */
522 		case -ERESTARTSYS:
523 		case -EINTR:
524 			retval = VM_PAGER_ERROR;
525 			goto out_unlock;
526 		default:
527 			retval = VM_PAGER_ERROR;
528 			goto out_unlock;
529 		}
530 	}
531 
532 	/*
533 	 * Wait for buffer data in transit, due to a pipelined
534 	 * move.
535 	 */
536 	ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
537 	if (unlikely(ret != 0)) {
538 		retval = ret;
539 #ifdef __DragonFly__
540 		retval = VM_PAGER_ERROR;
541 #endif
542 		goto out_unlock;
543 	}
544 
545 	ret = ttm_mem_io_lock(man, true);
546 	if (unlikely(ret != 0)) {
547 		retval = VM_PAGER_ERROR;
548 		goto out_unlock;
549 	}
550 	ret = ttm_mem_io_reserve_vm(bo);
551 	if (unlikely(ret != 0)) {
552 		retval = VM_PAGER_ERROR;
553 		goto out_io_unlock;
554 	}
555 	if (unlikely(OFF_TO_IDX(offset) >= bo->num_pages)) {
556 		retval = VM_PAGER_ERROR;
557 		goto out_io_unlock;
558 	}
559 
560 	/*
561 	 * Lookup the real page.
562 	 *
563 	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
564 	 * since the mmap_sem is only held in read mode. However, we
565 	 * modify only the caching bits of vma->vm_page_prot and
566 	 * consider those bits protected by
567 	 * the bo->mutex, as we should be the only writers.
568 	 * There shouldn't really be any readers of these bits except
569 	 * within vm_insert_mixed()? fork?
570 	 *
571 	 * TODO: Add a list of vmas to the bo, and change the
572 	 * vma->vm_page_prot when the object changes caching policy, with
573 	 * the correct locks held.
574 	 */
575 
576 	/*
577 	 * Make a local vma copy to modify the page_prot member
578 	 * and vm_flags if necessary. The vma parameter is protected
579 	 * by mmap_sem in write mode.
580 	 */
581 	cvma = *vma;
582 #if 0
583 	cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
584 #else
585 	cvma.vm_page_prot = 0;
586 #endif
587 
588 	if (bo->mem.bus.is_iomem) {
589 #ifdef __DragonFly__
590 		m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
591 						  bo->mem.bus.offset + offset);
592 		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement, 0));
593 #endif
594 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
595 						cvma.vm_page_prot);
596 	} else {
597 		ttm = bo->ttm;
598 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
599 						cvma.vm_page_prot);
600 
601 		/* Allocate all page at once, most common usage */
602 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
603 			retval = VM_PAGER_ERROR;
604 			goto out_io_unlock;
605 		}
606 
607 		m = (struct vm_page *)ttm->pages[OFF_TO_IDX(offset)];
608 		if (unlikely(!m)) {
609 			retval = VM_PAGER_ERROR;
610 			goto out_io_unlock;
611 		}
612 		pmap_page_set_memattr(m,
613 		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
614 		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement, 0));
615 	}
616 
617 	VM_OBJECT_LOCK(vm_obj);
618 
619 	if (vm_page_busy_try(m, FALSE)) {
620 		kprintf("r");
621 		vm_page_sleep_busy(m, FALSE, "ttmvmf");
622 		ttm_mem_io_unlock(man);
623 		ttm_bo_unreserve(bo);
624 		goto retry;
625 	}
626 
627 	/*
628 	 * We want our fake page in the VM object, not the page the OS
629 	 * allocatedd for us as a placeholder.
630 	 */
631 	m->valid = VM_PAGE_BITS_ALL;
632 	*mres = m;
633 
634 	/*
635 	 * Insert the page into the object if not already inserted.
636 	 */
637 	if (m->object) {
638 		if (m->object != vm_obj || m->pindex != OFF_TO_IDX(offset)) {
639 			retval = VM_PAGER_ERROR;
640 			kprintf("ttm_bo_vm_fault_dfly: m(%p) already inserted "
641 				"in obj %p, attempt obj %p\n",
642 				m, m->object, vm_obj);
643 			while (drm_unstall == 0) {
644 				tsleep(&retval, 0, "DEBUG", hz/10);
645 			}
646 			if (drm_unstall > 0)
647 				--drm_unstall;
648 		}
649 	} else {
650 		mtmp = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
651 		if (mtmp == NULL) {
652 			vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
653 		} else {
654 			panic("inconsistent insert bo %p m %p mtmp %p "
655 			      "offset %jx",
656 			      bo, m, mtmp,
657 			      (uintmax_t)offset);
658 		}
659 	}
660 
661 out_io_unlock1:
662 	ttm_mem_io_unlock(man);
663 out_unlock1:
664 	ttm_bo_unreserve(bo);
665 out_unlock2:
666 	vm_object_pip_wakeup(vm_obj);
667 	return (retval);
668 
669 out_io_unlock:
670 	VM_OBJECT_LOCK(vm_obj);
671 	goto out_io_unlock1;
672 
673 out_unlock:
674 	VM_OBJECT_LOCK(vm_obj);
675 	goto out_unlock1;
676 }
677 
678 static int
679 ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
680 	       vm_ooffset_t foff, struct ucred *cred, u_short *color)
681 {
682 
683 	/*
684 	 * On Linux, a reference to the buffer object is acquired here.
685 	 * The reason is that this function is not called when the
686 	 * mmap() is initialized, but only when a process forks for
687 	 * instance. Therefore on Linux, the reference on the bo is
688 	 * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's
689 	 * then released in ttm_bo_vm_close().
690 	 *
691 	 * Here, this function is called during mmap() intialization.
692 	 * Thus, the reference acquired in ttm_bo_mmap_single() is
693 	 * sufficient.
694 	 */
695 	*color = 0;
696 	return (0);
697 }
698 
699 static void
700 ttm_bo_vm_dtor(void *handle)
701 {
702 	struct ttm_buffer_object *bo = handle;
703 
704 	ttm_bo_unref(&bo);
705 }
706 
707 static struct cdev_pager_ops ttm_pager_ops = {
708 	.cdev_pg_fault = ttm_bo_vm_fault_dfly,
709 	.cdev_pg_ctor = ttm_bo_vm_ctor,
710 	.cdev_pg_dtor = ttm_bo_vm_dtor
711 };
712 
713 /*
714  * Called from drm_drv.c
715  *
716  * *offset - object offset in bytes
717  * size	   - map size in bytes
718  *
719  * We setup a dummy vma (for now) and call ttm_bo_mmap().  Then we setup
720  * our own VM object and dfly ops.  Note that the ops supplied by
721  * ttm_bo_mmap() are not currently used.
722  */
723 int
724 ttm_bo_mmap_single(struct drm_device *dev, vm_ooffset_t *offset,
725 		   vm_size_t size, struct vm_object **obj_res, int nprot)
726 {
727 	struct ttm_bo_device *bdev = dev->drm_ttm_bdev;
728 	struct ttm_buffer_object *bo;
729 	struct vm_object *vm_obj;
730 	struct vm_area_struct vma;
731 	int ret;
732 
733 	*obj_res = NULL;
734 
735 	bzero(&vma, sizeof(vma));
736 	vma.vm_start = *offset;		/* bdev-relative offset */
737 	vma.vm_end = vma.vm_start + size;
738 	vma.vm_pgoff = vma.vm_start >> PAGE_SHIFT;
739 	/* vma.vm_page_prot */
740 	/* vma.vm_flags */
741 
742 	/*
743 	 * Call the linux-ported code to do the work, and on success just
744 	 * setup our own VM object and ignore what the linux code did other
745 	 * then supplying us the 'bo'.
746 	 */
747 	ret = ttm_bo_mmap(NULL, &vma, bdev);
748 
749 	if (ret == 0) {
750 		bo = vma.vm_private_data;
751 		vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE,
752 					     &ttm_pager_ops,
753 					     size, nprot, 0,
754 					     curthread->td_ucred);
755 		if (vm_obj) {
756 			*obj_res = vm_obj;
757 			*offset = 0;		/* object-relative offset */
758 		} else {
759 			ttm_bo_unref(&bo);
760 			ret = EINVAL;
761 		}
762 	}
763 	return ret;
764 }
765 EXPORT_SYMBOL(ttm_bo_mmap_single);
766 
767 #ifdef __DragonFly__
768 void ttm_bo_release_mmap(struct ttm_buffer_object *bo);
769 
770 void
771 ttm_bo_release_mmap(struct ttm_buffer_object *bo)
772 {
773 	vm_object_t vm_obj;
774 	vm_page_t m;
775 	int i;
776 
777 	vm_obj = cdev_pager_lookup(bo);
778 	if (vm_obj == NULL)
779 		return;
780 
781 	VM_OBJECT_LOCK(vm_obj);
782 	for (i = 0; i < bo->num_pages; i++) {
783 		m = vm_page_lookup_busy_wait(vm_obj, i, TRUE, "ttm_unm");
784 		if (m == NULL)
785 			continue;
786 		cdev_pager_free_page(vm_obj, m);
787 	}
788 	VM_OBJECT_UNLOCK(vm_obj);
789 
790 	vm_object_deallocate(vm_obj);
791 }
792 #endif
793 
794 #if 0
795 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
796 {
797 	if (vma->vm_pgoff != 0)
798 		return -EACCES;
799 
800 	vma->vm_ops = &ttm_bo_vm_ops;
801 	vma->vm_private_data = ttm_bo_reference(bo);
802 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
803 	return 0;
804 }
805 EXPORT_SYMBOL(ttm_fbdev_mmap);
806 #endif
807