xref: /dragonfly/sys/dev/drm/ttm/ttm_bo_vm.c (revision dda92f98)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #define pr_fmt(fmt) "[TTM] " fmt
32 
33 #include <ttm/ttm_module.h>
34 #include <ttm/ttm_bo_driver.h>
35 #include <ttm/ttm_bo_api.h>
36 #include <ttm/ttm_placement.h>
37 #include <drm/drm_vma_manager.h>
38 #include <linux/mm.h>
39 #include <linux/pfn_t.h>
40 #include <linux/rbtree.h>
41 #include <linux/module.h>
42 #include <linux/uaccess.h>
43 
44 #include <sys/sysctl.h>
45 #include <vm/vm.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_page2.h>
48 
49 #define TTM_BO_VM_NUM_PREFAULT 16
50 
51 static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
52 				struct vm_area_struct *vma,
53 				struct vm_fault *vmf)
54 {
55 	int ret = 0;
56 
57 	if (likely(!bo->moving))
58 		goto out_unlock;
59 
60 	/*
61 	 * Quick non-stalling check for idle.
62 	 */
63 	if (fence_is_signaled(bo->moving))
64 		goto out_clear;
65 
66 	/*
67 	 * If possible, avoid waiting for GPU with mmap_sem
68 	 * held.
69 	 */
70 	if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
71 		ret = VM_FAULT_RETRY;
72 		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
73 			goto out_unlock;
74 
75 #if 0
76 		up_read(&vma->vm_mm->mmap_sem);
77 #endif
78 		(void) fence_wait(bo->moving, true);
79 		goto out_unlock;
80 	}
81 
82 	/*
83 	 * Ordinary wait.
84 	 */
85 	ret = fence_wait(bo->moving, true);
86 	if (unlikely(ret != 0)) {
87 		ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
88 			VM_FAULT_NOPAGE;
89 		goto out_unlock;
90 	}
91 
92 out_clear:
93 	fence_put(bo->moving);
94 	bo->moving = NULL;
95 
96 out_unlock:
97 	return ret;
98 }
99 
100 /*
101  * Always unstall on unexpected vm_page alias, fatal bus fault.
102  * Set to 0 to stall, set to positive count to unstall N times,
103  * then stall again.
104  */
105 static int drm_unstall = -1;
106 SYSCTL_INT(_debug, OID_AUTO, unstall, CTLFLAG_RW, &drm_unstall, 0, "");
107 
108 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
109 {
110 	/* see ttm_bo_mmap_single() at end of this file */
111 	/* ttm_bo_vm_ops not currently used, no entry should occur */
112 	panic("ttm_bo_vm_fault");
113 #if 0
114 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
115 	    vma->vm_private_data;
116 	struct ttm_bo_device *bdev = bo->bdev;
117 	unsigned long page_offset;
118 	unsigned long page_last;
119 	unsigned long pfn;
120 	struct ttm_tt *ttm = NULL;
121 	struct page *page;
122 	int ret;
123 	int i;
124 	unsigned long address = (unsigned long)vmf->virtual_address;
125 	int retval = VM_FAULT_NOPAGE;
126 	struct ttm_mem_type_manager *man =
127 		&bdev->man[bo->mem.mem_type];
128 	struct vm_area_struct cvma;
129 
130 	/*
131 	 * Work around locking order reversal in fault / nopfn
132 	 * between mmap_sem and bo_reserve: Perform a trylock operation
133 	 * for reserve, and if it fails, retry the fault after waiting
134 	 * for the buffer to become unreserved.
135 	 */
136 	ret = ttm_bo_reserve(bo, true, true, NULL);
137 	if (unlikely(ret != 0)) {
138 		if (ret != -EBUSY)
139 			return VM_FAULT_NOPAGE;
140 
141 		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
142 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
143 				up_read(&vma->vm_mm->mmap_sem);
144 				(void) ttm_bo_wait_unreserved(bo);
145 			}
146 
147 			return VM_FAULT_RETRY;
148 		}
149 
150 		/*
151 		 * If we'd want to change locking order to
152 		 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
153 		 * instead of retrying the fault...
154 		 */
155 		return VM_FAULT_NOPAGE;
156 	}
157 
158 	/*
159 	 * Refuse to fault imported pages. This should be handled
160 	 * (if at all) by redirecting mmap to the exporter.
161 	 */
162 	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
163 		retval = VM_FAULT_SIGBUS;
164 		goto out_unlock;
165 	}
166 
167 	if (bdev->driver->fault_reserve_notify) {
168 		ret = bdev->driver->fault_reserve_notify(bo);
169 		switch (ret) {
170 		case 0:
171 			break;
172 		case -EBUSY:
173 		case -ERESTARTSYS:
174 			retval = VM_FAULT_NOPAGE;
175 			goto out_unlock;
176 		default:
177 			retval = VM_FAULT_SIGBUS;
178 			goto out_unlock;
179 		}
180 	}
181 
182 	/*
183 	 * Wait for buffer data in transit, due to a pipelined
184 	 * move.
185 	 */
186 
187 	ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
188 	if (unlikely(ret != 0)) {
189 		retval = ret;
190 		goto out_unlock;
191 	}
192 
193 	ret = ttm_mem_io_lock(man, true);
194 	if (unlikely(ret != 0)) {
195 		retval = VM_FAULT_NOPAGE;
196 		goto out_unlock;
197 	}
198 	ret = ttm_mem_io_reserve_vm(bo);
199 	if (unlikely(ret != 0)) {
200 		retval = VM_FAULT_SIGBUS;
201 		goto out_io_unlock;
202 	}
203 
204 	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
205 		vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
206 	page_last = vma_pages(vma) + vma->vm_pgoff -
207 		drm_vma_node_start(&bo->vma_node);
208 
209 	if (unlikely(page_offset >= bo->num_pages)) {
210 		retval = VM_FAULT_SIGBUS;
211 		goto out_io_unlock;
212 	}
213 
214 	/*
215 	 * Make a local vma copy to modify the page_prot member
216 	 * and vm_flags if necessary. The vma parameter is protected
217 	 * by mmap_sem in write mode.
218 	 */
219 	cvma = *vma;
220 	cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
221 
222 	if (bo->mem.bus.is_iomem) {
223 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
224 						cvma.vm_page_prot);
225 	} else {
226 		ttm = bo->ttm;
227 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
228 						cvma.vm_page_prot);
229 
230 		/* Allocate all page at once, most common usage */
231 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
232 			retval = VM_FAULT_OOM;
233 			goto out_io_unlock;
234 		}
235 	}
236 
237 	/*
238 	 * Speculatively prefault a number of pages. Only error on
239 	 * first page.
240 	 */
241 	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
242 		if (bo->mem.bus.is_iomem)
243 			pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
244 		else {
245 			page = ttm->pages[page_offset];
246 			if (unlikely(!page && i == 0)) {
247 				retval = VM_FAULT_OOM;
248 				goto out_io_unlock;
249 			} else if (unlikely(!page)) {
250 				break;
251 			}
252 			page->mapping = vma->vm_file->f_mapping;
253 			page->index = drm_vma_node_start(&bo->vma_node) +
254 				page_offset;
255 			pfn = page_to_pfn(page);
256 		}
257 
258 		if (vma->vm_flags & VM_MIXEDMAP)
259 			ret = vm_insert_mixed(&cvma, address,
260 					__pfn_to_pfn_t(pfn, PFN_DEV));
261 		else
262 			ret = vm_insert_pfn(&cvma, address, pfn);
263 
264 		/*
265 		 * Somebody beat us to this PTE or prefaulting to
266 		 * an already populated PTE, or prefaulting error.
267 		 */
268 
269 		if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
270 			break;
271 		else if (unlikely(ret != 0)) {
272 			retval =
273 			    (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
274 			goto out_io_unlock;
275 		}
276 
277 		address += PAGE_SIZE;
278 		if (unlikely(++page_offset >= page_last))
279 			break;
280 	}
281 out_io_unlock:
282 	ttm_mem_io_unlock(man);
283 out_unlock:
284 	ttm_bo_unreserve(bo);
285 	return retval;
286 #endif
287 }
288 
289 /* ttm_bo_vm_ops not currently used, no entry should occur */
290 static void ttm_bo_vm_open(struct vm_area_struct *vma)
291 {
292 	struct ttm_buffer_object *bo =
293 	    (struct ttm_buffer_object *)vma->vm_private_data;
294 
295 #if 0
296 	WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
297 #endif
298 
299 	(void)ttm_bo_reference(bo);
300 }
301 
302 /* ttm_bo_vm_ops not currently used, no entry should occur */
303 static void ttm_bo_vm_close(struct vm_area_struct *vma)
304 {
305 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
306 
307 	ttm_bo_unref(&bo);
308 	vma->vm_private_data = NULL;
309 }
310 
311 static const struct vm_operations_struct ttm_bo_vm_ops = {
312 	.fault = ttm_bo_vm_fault,
313 	.open = ttm_bo_vm_open,
314 	.close = ttm_bo_vm_close
315 };
316 
317 static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
318 						  unsigned long offset,
319 						  unsigned long pages)
320 {
321 	struct drm_vma_offset_node *node;
322 	struct ttm_buffer_object *bo = NULL;
323 
324 	drm_vma_offset_lock_lookup(&bdev->vma_manager);
325 
326 	node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
327 	if (likely(node)) {
328 		bo = container_of(node, struct ttm_buffer_object, vma_node);
329 		if (!kref_get_unless_zero(&bo->kref))
330 			bo = NULL;
331 	}
332 
333 	drm_vma_offset_unlock_lookup(&bdev->vma_manager);
334 
335 	if (!bo)
336 		pr_err("Could not find buffer object to map\n");
337 
338 	return bo;
339 }
340 
341 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
342 		struct ttm_bo_device *bdev)
343 {
344 	struct ttm_bo_driver *driver;
345 	struct ttm_buffer_object *bo;
346 	int ret;
347 
348 	bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
349 	if (unlikely(!bo))
350 		return -EINVAL;
351 
352 	driver = bo->bdev->driver;
353 	if (unlikely(!driver->verify_access)) {
354 		ret = -EPERM;
355 		goto out_unref;
356 	}
357 	ret = driver->verify_access(bo, filp);
358 	if (unlikely(ret != 0))
359 		goto out_unref;
360 
361 	vma->vm_ops = &ttm_bo_vm_ops;
362 
363 	/*
364 	 * Note: We're transferring the bo reference to
365 	 * vma->vm_private_data here.
366 	 */
367 
368 	vma->vm_private_data = bo;
369 
370 	/*
371 	 * We'd like to use VM_PFNMAP on shared mappings, where
372 	 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
373 	 * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
374 	 * bad for performance. Until that has been sorted out, use
375 	 * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
376 	 */
377 	vma->vm_flags |= VM_MIXEDMAP;
378 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
379 	return 0;
380 out_unref:
381 	ttm_bo_unref(&bo);
382 	return ret;
383 }
384 EXPORT_SYMBOL(ttm_bo_mmap);
385 
386 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
387 {
388 	if (vma->vm_pgoff != 0)
389 		return -EACCES;
390 
391 	vma->vm_ops = &ttm_bo_vm_ops;
392 	vma->vm_private_data = ttm_bo_reference(bo);
393 	vma->vm_flags |= VM_MIXEDMAP;
394 	vma->vm_flags |= VM_IO | VM_DONTEXPAND;
395 	return 0;
396 }
397 EXPORT_SYMBOL(ttm_fbdev_mmap);
398 
399 /*
400  * DragonFlyBSD Interface
401  */
402 
403 #include "opt_vm.h"
404 
405 #include <vm/vm.h>
406 #include <vm/vm_page.h>
407 #include <linux/errno.h>
408 #include <linux/export.h>
409 
410 #include <vm/vm_page2.h>
411 
412 /*
413  * NOTE: This code is fragile.  This code can only be entered with *mres
414  *	 not NULL when *mres is a placeholder page allocated by the kernel.
415  */
416 static int
417 ttm_bo_vm_fault_dfly(vm_object_t vm_obj, vm_ooffset_t offset,
418 		     int prot, vm_page_t *mres)
419 {
420 	struct ttm_buffer_object *bo = vm_obj->handle;
421 	struct ttm_bo_device *bdev = bo->bdev;
422 	struct ttm_tt *ttm = NULL;
423 	vm_page_t m, mtmp;
424 	int ret;
425 	int retval = VM_PAGER_OK;
426 	struct ttm_mem_type_manager *man =
427 		&bdev->man[bo->mem.mem_type];
428 	struct vm_area_struct cvma;
429 
430 /*
431    The Linux code expects to receive these arguments:
432    - struct vm_area_struct *vma
433    - struct vm_fault *vmf
434 */
435 #ifdef __DragonFly__
436 	struct vm_area_struct vmas;
437 	struct vm_area_struct *vma = &vmas;
438 	struct vm_fault vmfs;
439 	struct vm_fault *vmf = &vmfs;
440 
441 	memset(vma, 0, sizeof(*vma));
442 	memset(vmf, 0, sizeof(*vmf));
443 #endif
444 
445 	vm_object_pip_add(vm_obj, 1);
446 
447 	/*
448 	 * We must atomically clean up any possible placeholder page to avoid
449 	 * the DRM subsystem attempting to use it.  We can determine if this
450 	 * is a place holder page by checking m->valid.
451 	 *
452 	 * We have to do this before any potential fault_reserve_notify()
453 	 * which might try to free the map (and thus deadlock on our busy
454 	 * page).
455 	 */
456 	m = *mres;
457 	*mres = NULL;
458 	if (m) {
459 		if (m->valid == VM_PAGE_BITS_ALL) {
460 			/* actual page */
461 			vm_page_wakeup(m);
462 		} else {
463 			/* placeholder page */
464 			KKASSERT((m->flags & PG_FICTITIOUS) == 0);
465 			vm_page_remove(m);
466 			vm_page_free(m);
467 		}
468 	}
469 
470 retry:
471 	VM_OBJECT_UNLOCK(vm_obj);
472 	m = NULL;
473 
474 	/*
475 	 * Work around locking order reversal in fault / nopfn
476 	 * between mmap_sem and bo_reserve: Perform a trylock operation
477 	 * for reserve, and if it fails, retry the fault after waiting
478 	 * for the buffer to become unreserved.
479 	 */
480 	ret = ttm_bo_reserve(bo, true, true, NULL);
481 	if (unlikely(ret != 0)) {
482 		if (ret != -EBUSY) {
483 			retval = VM_PAGER_ERROR;
484 			VM_OBJECT_LOCK(vm_obj);
485 			goto out_unlock2;
486 		}
487 
488 		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY || 1) {
489 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
490 #if 0
491 				up_read(&vma->vm_mm->mmap_sem);
492 #endif
493 				(void) ttm_bo_wait_unreserved(bo);
494 			}
495 
496 #ifndef __DragonFly__
497 			return VM_FAULT_RETRY;
498 #else
499 			VM_OBJECT_LOCK(vm_obj);
500 			lwkt_yield();
501 			goto retry;
502 #endif
503 		}
504 
505 		/*
506 		 * If we'd want to change locking order to
507 		 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
508 		 * instead of retrying the fault...
509 		 */
510 #ifndef __DragonFly__
511 		return VM_FAULT_NOPAGE;
512 #else
513 		retval = VM_PAGER_ERROR;
514 		VM_OBJECT_LOCK(vm_obj);
515 		goto out_unlock2;
516 #endif
517 	}
518 
519 	if (bdev->driver->fault_reserve_notify) {
520 		ret = bdev->driver->fault_reserve_notify(bo);
521 		switch (ret) {
522 		case 0:
523 			break;
524 		case -EBUSY:
525 			lwkt_yield();
526 			/* fall through */
527 		case -ERESTARTSYS:
528 		case -EINTR:
529 			retval = VM_PAGER_ERROR;
530 			goto out_unlock;
531 		default:
532 			retval = VM_PAGER_ERROR;
533 			goto out_unlock;
534 		}
535 	}
536 
537 	/*
538 	 * Wait for buffer data in transit, due to a pipelined
539 	 * move.
540 	 */
541 	ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
542 	if (unlikely(ret != 0)) {
543 		retval = ret;
544 #ifdef __DragonFly__
545 		retval = VM_PAGER_ERROR;
546 #endif
547 		goto out_unlock;
548 	}
549 
550 	ret = ttm_mem_io_lock(man, true);
551 	if (unlikely(ret != 0)) {
552 		retval = VM_PAGER_ERROR;
553 		goto out_unlock;
554 	}
555 	ret = ttm_mem_io_reserve_vm(bo);
556 	if (unlikely(ret != 0)) {
557 		retval = VM_PAGER_ERROR;
558 		goto out_io_unlock;
559 	}
560 	if (unlikely(OFF_TO_IDX(offset) >= bo->num_pages)) {
561 		retval = VM_PAGER_ERROR;
562 		goto out_io_unlock;
563 	}
564 
565 	/*
566 	 * Lookup the real page.
567 	 *
568 	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
569 	 * since the mmap_sem is only held in read mode. However, we
570 	 * modify only the caching bits of vma->vm_page_prot and
571 	 * consider those bits protected by
572 	 * the bo->mutex, as we should be the only writers.
573 	 * There shouldn't really be any readers of these bits except
574 	 * within vm_insert_mixed()? fork?
575 	 *
576 	 * TODO: Add a list of vmas to the bo, and change the
577 	 * vma->vm_page_prot when the object changes caching policy, with
578 	 * the correct locks held.
579 	 */
580 
581 	/*
582 	 * Make a local vma copy to modify the page_prot member
583 	 * and vm_flags if necessary. The vma parameter is protected
584 	 * by mmap_sem in write mode.
585 	 */
586 	cvma = *vma;
587 #if 0
588 	cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
589 #else
590 	cvma.vm_page_prot = 0;
591 #endif
592 
593 	if (bo->mem.bus.is_iomem) {
594 #ifdef __DragonFly__
595 		m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
596 						  bo->mem.bus.offset + offset);
597 		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement, 0));
598 #endif
599 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
600 						cvma.vm_page_prot);
601 	} else {
602 		ttm = bo->ttm;
603 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
604 						cvma.vm_page_prot);
605 
606 		/* Allocate all page at once, most common usage */
607 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
608 			retval = VM_PAGER_ERROR;
609 			goto out_io_unlock;
610 		}
611 
612 		m = (struct vm_page *)ttm->pages[OFF_TO_IDX(offset)];
613 		if (unlikely(!m)) {
614 			retval = VM_PAGER_ERROR;
615 			goto out_io_unlock;
616 		}
617 		pmap_page_set_memattr(m,
618 		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
619 		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement, 0));
620 	}
621 
622 	VM_OBJECT_LOCK(vm_obj);
623 
624 	if (vm_page_busy_try(m, FALSE)) {
625 		kprintf("r");
626 		vm_page_sleep_busy(m, FALSE, "ttmvmf");
627 		ttm_mem_io_unlock(man);
628 		ttm_bo_unreserve(bo);
629 		goto retry;
630 	}
631 
632 	/*
633 	 * We want our fake page in the VM object, not the page the OS
634 	 * allocatedd for us as a placeholder.
635 	 */
636 	m->valid = VM_PAGE_BITS_ALL;
637 	*mres = m;
638 
639 	/*
640 	 * Insert the page into the object if not already inserted.
641 	 */
642 	if (m->object) {
643 		if (m->object != vm_obj || m->pindex != OFF_TO_IDX(offset)) {
644 			retval = VM_PAGER_ERROR;
645 			kprintf("ttm_bo_vm_fault_dfly: m(%p) already inserted "
646 				"in obj %p, attempt obj %p\n",
647 				m, m->object, vm_obj);
648 			while (drm_unstall == 0) {
649 				tsleep(&retval, 0, "DEBUG", hz/10);
650 			}
651 			if (drm_unstall > 0)
652 				--drm_unstall;
653 		}
654 	} else {
655 		mtmp = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
656 		if (mtmp == NULL) {
657 			vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
658 		} else {
659 			panic("inconsistent insert bo %p m %p mtmp %p "
660 			      "offset %jx",
661 			      bo, m, mtmp,
662 			      (uintmax_t)offset);
663 		}
664 	}
665 
666 out_io_unlock1:
667 	ttm_mem_io_unlock(man);
668 out_unlock1:
669 	ttm_bo_unreserve(bo);
670 out_unlock2:
671 	vm_object_pip_wakeup(vm_obj);
672 	return (retval);
673 
674 out_io_unlock:
675 	VM_OBJECT_LOCK(vm_obj);
676 	goto out_io_unlock1;
677 
678 out_unlock:
679 	VM_OBJECT_LOCK(vm_obj);
680 	goto out_unlock1;
681 }
682 
683 static int
684 ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
685 	       vm_ooffset_t foff, struct ucred *cred, u_short *color)
686 {
687 
688 	/*
689 	 * On Linux, a reference to the buffer object is acquired here.
690 	 * The reason is that this function is not called when the
691 	 * mmap() is initialized, but only when a process forks for
692 	 * instance. Therefore on Linux, the reference on the bo is
693 	 * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's
694 	 * then released in ttm_bo_vm_close().
695 	 *
696 	 * Here, this function is called during mmap() intialization.
697 	 * Thus, the reference acquired in ttm_bo_mmap_single() is
698 	 * sufficient.
699 	 */
700 	*color = 0;
701 	return (0);
702 }
703 
704 static void
705 ttm_bo_vm_dtor(void *handle)
706 {
707 	struct ttm_buffer_object *bo = handle;
708 
709 	ttm_bo_unref(&bo);
710 }
711 
712 static struct cdev_pager_ops ttm_pager_ops = {
713 	.cdev_pg_fault = ttm_bo_vm_fault_dfly,
714 	.cdev_pg_ctor = ttm_bo_vm_ctor,
715 	.cdev_pg_dtor = ttm_bo_vm_dtor
716 };
717 
718 /*
719  * Called from drm_drv.c
720  *
721  * *offset - object offset in bytes
722  * size	   - map size in bytes
723  *
724  * We setup a dummy vma (for now) and call ttm_bo_mmap().  Then we setup
725  * our own VM object and dfly ops.  Note that the ops supplied by
726  * ttm_bo_mmap() are not currently used.
727  */
728 int
729 ttm_bo_mmap_single(struct drm_device *dev, vm_ooffset_t *offset,
730 		   vm_size_t size, struct vm_object **obj_res, int nprot)
731 {
732 	struct ttm_bo_device *bdev = dev->drm_ttm_bdev;
733 	struct ttm_buffer_object *bo;
734 	struct vm_object *vm_obj;
735 	struct vm_area_struct vma;
736 	int ret;
737 
738 	*obj_res = NULL;
739 
740 	bzero(&vma, sizeof(vma));
741 	vma.vm_start = *offset;		/* bdev-relative offset */
742 	vma.vm_end = vma.vm_start + size;
743 	vma.vm_pgoff = vma.vm_start >> PAGE_SHIFT;
744 	/* vma.vm_page_prot */
745 	/* vma.vm_flags */
746 
747 	/*
748 	 * Call the linux-ported code to do the work, and on success just
749 	 * setup our own VM object and ignore what the linux code did other
750 	 * then supplying us the 'bo'.
751 	 */
752 	ret = ttm_bo_mmap(NULL, &vma, bdev);
753 
754 	if (ret == 0) {
755 		bo = vma.vm_private_data;
756 		vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE,
757 					     &ttm_pager_ops,
758 					     size, nprot, 0,
759 					     curthread->td_ucred);
760 		if (vm_obj) {
761 			*obj_res = vm_obj;
762 			*offset = 0;		/* object-relative offset */
763 		} else {
764 			ttm_bo_unref(&bo);
765 			ret = EINVAL;
766 		}
767 	}
768 	return ret;
769 }
770 EXPORT_SYMBOL(ttm_bo_mmap_single);
771 
772 #ifdef __DragonFly__
773 void ttm_bo_release_mmap(struct ttm_buffer_object *bo);
774 
775 void
776 ttm_bo_release_mmap(struct ttm_buffer_object *bo)
777 {
778 	vm_object_t vm_obj;
779 	vm_page_t m;
780 	int i;
781 
782 	vm_obj = cdev_pager_lookup(bo);
783 	if (vm_obj == NULL)
784 		return;
785 
786 	VM_OBJECT_LOCK(vm_obj);
787 	for (i = 0; i < bo->num_pages; i++) {
788 		m = vm_page_lookup_busy_wait(vm_obj, i, TRUE, "ttm_unm");
789 		if (m == NULL)
790 			continue;
791 		cdev_pager_free_page(vm_obj, m);
792 	}
793 	VM_OBJECT_UNLOCK(vm_obj);
794 
795 	vm_object_deallocate(vm_obj);
796 }
797 #endif
798 
799 #if 0
800 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
801 {
802 	if (vma->vm_pgoff != 0)
803 		return -EACCES;
804 
805 	vma->vm_ops = &ttm_bo_vm_ops;
806 	vma->vm_private_data = ttm_bo_reference(bo);
807 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
808 	return 0;
809 }
810 EXPORT_SYMBOL(ttm_fbdev_mmap);
811 #endif
812