xref: /dragonfly/sys/dev/drm/ttm/ttm_bo_vm.c (revision 55f88487)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #define pr_fmt(fmt) "[TTM] " fmt
32 
33 #include <drm/ttm/ttm_module.h>
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <drm/drm_vma_manager.h>
37 #include <linux/mm.h>
38 #include <linux/pfn_t.h>
39 #include <linux/rbtree.h>
40 #include <linux/module.h>
41 #include <linux/uaccess.h>
42 #include <linux/mem_encrypt.h>
43 
44 #include <sys/sysctl.h>
45 #include <vm/vm.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_page2.h>
48 
49 #define TTM_BO_VM_NUM_PREFAULT 16
50 
51 static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
52 				struct vm_area_struct *vma,
53 				struct vm_fault *vmf)
54 {
55 	int ret = 0;
56 
57 	if (likely(!bo->moving))
58 		goto out_unlock;
59 
60 	/*
61 	 * Quick non-stalling check for idle.
62 	 */
63 	if (dma_fence_is_signaled(bo->moving))
64 		goto out_clear;
65 
66 	/*
67 	 * If possible, avoid waiting for GPU with mmap_sem
68 	 * held.
69 	 */
70 	if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
71 		ret = VM_FAULT_RETRY;
72 		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
73 			goto out_unlock;
74 
75 		ttm_bo_get(bo);
76 		up_read(&vmf->vma->vm_mm->mmap_sem);		/* release */
77 		(void) dma_fence_wait(bo->moving, true);
78 		ttm_bo_unreserve(bo);
79 		ttm_bo_put(bo);
80 		down_read(&vmf->vma->vm_mm->mmap_sem);		/* acquire */
81 		goto out_unlock;
82 	}
83 
84 	/*
85 	 * Ordinary wait.
86 	 */
87 	ret = dma_fence_wait(bo->moving, true);
88 	if (unlikely(ret != 0)) {
89 		ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
90 			VM_FAULT_NOPAGE;
91 		goto out_unlock;
92 	}
93 
94 out_clear:
95 	dma_fence_put(bo->moving);
96 	bo->moving = NULL;
97 
98 out_unlock:
99 	return ret;
100 }
101 
102 /*
103  * Always unstall on unexpected vm_page alias, fatal bus fault.
104  * Set to 0 to stall, set to positive count to unstall N times,
105  * then stall again.
106  */
107 static int drm_unstall = -1;
108 SYSCTL_INT(_debug, OID_AUTO, unstall, CTLFLAG_RW, &drm_unstall, 0, "");
109 
110 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
111 {
112 	/* see ttm_bo_mmap_single() at end of this file */
113 	/* ttm_bo_vm_ops not currently used, no entry should occur */
114 	panic("ttm_bo_vm_fault");
115 #if 0
116 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
117 	    vma->vm_private_data;
118 	struct ttm_bo_device *bdev = bo->bdev;
119 	unsigned long page_offset;
120 	unsigned long page_last;
121 	unsigned long pfn;
122 	struct ttm_tt *ttm = NULL;
123 	struct page *page;
124 	int ret;
125 	int i;
126 	unsigned long address = vmf->address;
127 	int retval = VM_FAULT_NOPAGE;
128 	struct ttm_mem_type_manager *man =
129 		&bdev->man[bo->mem.mem_type];
130 	struct vm_area_struct cvma;
131 
132 	/*
133 	 * Work around locking order reversal in fault / nopfn
134 	 * between mmap_sem and bo_reserve: Perform a trylock operation
135 	 * for reserve, and if it fails, retry the fault after waiting
136 	 * for the buffer to become unreserved.
137 	 */
138 	ret = ttm_bo_reserve(bo, true, true, NULL);
139 	if (unlikely(ret != 0)) {
140 		if (ret != -EBUSY)
141 			return VM_FAULT_NOPAGE;
142 
143 		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
144 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
145 				ttm_bo_reference(bo);
146 				up_read(&vma->vm_mm->mmap_sem);
147 				(void) ttm_bo_wait_unreserved(bo);
148 				ttm_bo_unref(&bo);
149 			}
150 
151 			return VM_FAULT_RETRY;
152 		}
153 
154 		/*
155 		 * If we'd want to change locking order to
156 		 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
157 		 * instead of retrying the fault...
158 		 */
159 		return VM_FAULT_NOPAGE;
160 	}
161 
162 	/*
163 	 * Refuse to fault imported pages. This should be handled
164 	 * (if at all) by redirecting mmap to the exporter.
165 	 */
166 	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
167 		retval = VM_FAULT_SIGBUS;
168 		goto out_unlock;
169 	}
170 
171 	if (bdev->driver->fault_reserve_notify) {
172 		ret = bdev->driver->fault_reserve_notify(bo);
173 		switch (ret) {
174 		case 0:
175 			break;
176 		case -EBUSY:
177 		case -ERESTARTSYS:
178 			retval = VM_FAULT_NOPAGE;
179 			goto out_unlock;
180 		default:
181 			retval = VM_FAULT_SIGBUS;
182 			goto out_unlock;
183 		}
184 	}
185 
186 	/*
187 	 * Wait for buffer data in transit, due to a pipelined
188 	 * move.
189 	 */
190 
191 	ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
192 	if (unlikely(ret != 0)) {
193 		retval = ret;
194 
195 		if (retval == VM_FAULT_RETRY &&
196 		    !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
197 			/* The BO has already been unreserved. */
198 			return retval;
199 		}
200 
201 		goto out_unlock;
202 	}
203 
204 	ret = ttm_mem_io_lock(man, true);
205 	if (unlikely(ret != 0)) {
206 		retval = VM_FAULT_NOPAGE;
207 		goto out_unlock;
208 	}
209 	ret = ttm_mem_io_reserve_vm(bo);
210 	if (unlikely(ret != 0)) {
211 		retval = VM_FAULT_SIGBUS;
212 		goto out_io_unlock;
213 	}
214 
215 	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
216 		vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
217 	page_last = vma_pages(vma) + vma->vm_pgoff -
218 		drm_vma_node_start(&bo->vma_node);
219 
220 	if (unlikely(page_offset >= bo->num_pages)) {
221 		retval = VM_FAULT_SIGBUS;
222 		goto out_io_unlock;
223 	}
224 
225 	/*
226 	 * Make a local vma copy to modify the page_prot member
227 	 * and vm_flags if necessary. The vma parameter is protected
228 	 * by mmap_sem in write mode.
229 	 */
230 	cvma = *vma;
231 	cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
232 
233 	if (bo->mem.bus.is_iomem) {
234 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
235 						cvma.vm_page_prot);
236 	} else {
237 		struct ttm_operation_ctx ctx = {
238 			.interruptible = false,
239 			.no_wait_gpu = false,
240 			.flags = TTM_OPT_FLAG_FORCE_ALLOC
241 
242 		};
243 
244 		ttm = bo->ttm;
245 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
246 						cvma.vm_page_prot);
247 
248 		/* Allocate all page at once, most common usage */
249 		if (ttm_tt_populate(ttm, &ctx)) {
250 			retval = VM_FAULT_OOM;
251 			goto out_io_unlock;
252 		}
253 	}
254 
255 	/*
256 	 * Speculatively prefault a number of pages. Only error on
257 	 * first page.
258 	 */
259 	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
260 		if (bo->mem.bus.is_iomem)
261 			pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
262 		else {
263 			page = ttm->pages[page_offset];
264 			if (unlikely(!page && i == 0)) {
265 				retval = VM_FAULT_OOM;
266 				goto out_io_unlock;
267 			} else if (unlikely(!page)) {
268 				break;
269 			}
270 			page->mapping = vma->vm_file->f_mapping;
271 			page->index = drm_vma_node_start(&bo->vma_node) +
272 				page_offset;
273 			pfn = page_to_pfn(page);
274 		}
275 
276 		if (vma->vm_flags & VM_MIXEDMAP)
277 			ret = vm_insert_mixed(&cvma, address,
278 					__pfn_to_pfn_t(pfn, PFN_DEV));
279 		else
280 			ret = vm_insert_pfn(&cvma, address, pfn);
281 
282 		/*
283 		 * Somebody beat us to this PTE or prefaulting to
284 		 * an already populated PTE, or prefaulting error.
285 		 */
286 
287 		if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
288 			break;
289 		else if (unlikely(ret != 0)) {
290 			retval =
291 			    (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
292 			goto out_io_unlock;
293 		}
294 
295 		address += PAGE_SIZE;
296 		if (unlikely(++page_offset >= page_last))
297 			break;
298 	}
299 out_io_unlock:
300 	ttm_mem_io_unlock(man);
301 out_unlock:
302 	ttm_bo_unreserve(bo);
303 	return retval;
304 #endif
305 }
306 
307 /* ttm_bo_vm_ops not currently used, no entry should occur */
308 static void ttm_bo_vm_open(struct vm_area_struct *vma)
309 {
310 	struct ttm_buffer_object *bo =
311 	    (struct ttm_buffer_object *)vma->vm_private_data;
312 
313 #if 0
314 	WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
315 #endif
316 
317 	ttm_bo_get(bo);
318 }
319 
320 /* ttm_bo_vm_ops not currently used, no entry should occur */
321 static void ttm_bo_vm_close(struct vm_area_struct *vma)
322 {
323 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
324 
325 	ttm_bo_put(bo);
326 	vma->vm_private_data = NULL;
327 }
328 
329 static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
330 				 unsigned long offset,
331 				 uint8_t *buf, int len, int write)
332 {
333 	unsigned long page = offset >> PAGE_SHIFT;
334 	unsigned long bytes_left = len;
335 	int ret;
336 
337 	/* Copy a page at a time, that way no extra virtual address
338 	 * mapping is needed
339 	 */
340 	offset -= page << PAGE_SHIFT;
341 	do {
342 		unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
343 		struct ttm_bo_kmap_obj map;
344 		void *ptr;
345 		bool is_iomem;
346 
347 		ret = ttm_bo_kmap(bo, page, 1, &map);
348 		if (ret)
349 			return ret;
350 
351 		ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
352 		WARN_ON_ONCE(is_iomem);
353 		if (write)
354 			memcpy(ptr, buf, bytes);
355 		else
356 			memcpy(buf, ptr, bytes);
357 		ttm_bo_kunmap(&map);
358 
359 		page++;
360 		buf += bytes;
361 		bytes_left -= bytes;
362 		offset = 0;
363 	} while (bytes_left);
364 
365 	return len;
366 }
367 
368 static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
369 			    void *buf, int len, int write)
370 {
371 	unsigned long offset = (addr) - vma->vm_start;
372 	struct ttm_buffer_object *bo = vma->vm_private_data;
373 	int ret;
374 
375 	if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
376 		return -EIO;
377 
378 	ret = ttm_bo_reserve(bo, true, false, NULL);
379 	if (ret)
380 		return ret;
381 
382 	switch (bo->mem.mem_type) {
383 	case TTM_PL_SYSTEM:
384 		if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
385 			ret = ttm_tt_swapin(bo->ttm);
386 			if (unlikely(ret != 0))
387 				return ret;
388 		}
389 		/* fall through */
390 	case TTM_PL_TT:
391 		ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
392 		break;
393 	default:
394 		if (bo->bdev->driver->access_memory)
395 			ret = bo->bdev->driver->access_memory(
396 				bo, offset, buf, len, write);
397 		else
398 			ret = -EIO;
399 	}
400 
401 	ttm_bo_unreserve(bo);
402 
403 	return ret;
404 }
405 
406 static const struct vm_operations_struct ttm_bo_vm_ops = {
407 	.fault = ttm_bo_vm_fault,
408 	.open = ttm_bo_vm_open,
409 	.close = ttm_bo_vm_close,
410 	.access = ttm_bo_vm_access
411 };
412 
413 static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
414 						  unsigned long offset,
415 						  unsigned long pages)
416 {
417 	struct drm_vma_offset_node *node;
418 	struct ttm_buffer_object *bo = NULL;
419 
420 	drm_vma_offset_lock_lookup(&bdev->vma_manager);
421 
422 	node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
423 	if (likely(node)) {
424 		bo = container_of(node, struct ttm_buffer_object, vma_node);
425 		if (!kref_get_unless_zero(&bo->kref))
426 			bo = NULL;
427 	}
428 
429 	drm_vma_offset_unlock_lookup(&bdev->vma_manager);
430 
431 	if (!bo)
432 		pr_err("Could not find buffer object to map\n");
433 
434 	return bo;
435 }
436 
437 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
438 		struct ttm_bo_device *bdev)
439 {
440 	struct ttm_bo_driver *driver;
441 	struct ttm_buffer_object *bo;
442 	int ret;
443 
444 	bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
445 	if (unlikely(!bo))
446 		return -EINVAL;
447 
448 	driver = bo->bdev->driver;
449 	if (unlikely(!driver->verify_access)) {
450 		ret = -EPERM;
451 		goto out_unref;
452 	}
453 	ret = driver->verify_access(bo, filp);
454 	if (unlikely(ret != 0))
455 		goto out_unref;
456 
457 	vma->vm_ops = &ttm_bo_vm_ops;
458 
459 	/*
460 	 * Note: We're transferring the bo reference to
461 	 * vma->vm_private_data here.
462 	 */
463 
464 	vma->vm_private_data = bo;
465 
466 	/*
467 	 * We'd like to use VM_PFNMAP on shared mappings, where
468 	 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
469 	 * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
470 	 * bad for performance. Until that has been sorted out, use
471 	 * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
472 	 */
473 	vma->vm_flags |= VM_MIXEDMAP;
474 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
475 	return 0;
476 out_unref:
477 	ttm_bo_put(bo);
478 	return ret;
479 }
480 EXPORT_SYMBOL(ttm_bo_mmap);
481 
482 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
483 {
484 	if (vma->vm_pgoff != 0)
485 		return -EACCES;
486 
487 	ttm_bo_get(bo);
488 
489 	vma->vm_ops = &ttm_bo_vm_ops;
490 	vma->vm_private_data = bo;
491 	vma->vm_flags |= VM_MIXEDMAP;
492 	vma->vm_flags |= VM_IO | VM_DONTEXPAND;
493 	return 0;
494 }
495 EXPORT_SYMBOL(ttm_fbdev_mmap);
496 
497 /*
498  * DragonFlyBSD Interface
499  */
500 
501 /*
502  * NOTE: This code is fragile.  This code can only be entered with *mres
503  *	 not NULL when *mres is a placeholder page allocated by the kernel.
504  */
505 static int
506 ttm_bo_vm_fault_dfly(vm_object_t vm_obj, vm_ooffset_t offset,
507 		     int prot, vm_page_t *mres)
508 {
509 	struct ttm_buffer_object *bo = vm_obj->handle;
510 	struct ttm_bo_device *bdev = bo->bdev;
511 	struct ttm_tt *ttm = NULL;
512 	vm_page_t m;
513 	int ret;
514 	int retval = VM_PAGER_OK;
515 	struct ttm_mem_type_manager *man =
516 		&bdev->man[bo->mem.mem_type];
517 	struct vm_area_struct cvma;
518 
519 /*
520    The Linux code expects to receive these arguments:
521    - struct vm_area_struct *vma
522    - struct vm_fault *vmf
523 */
524 #ifdef __DragonFly__
525 	struct vm_area_struct vmas;
526 	struct vm_area_struct *vma = &vmas;
527 	struct vm_fault vmfs;
528 	struct vm_fault *vmf = &vmfs;
529 
530 	memset(vma, 0, sizeof(*vma));
531 	memset(vmf, 0, sizeof(*vmf));
532 
533 	vma->vm_mm = current->mm;
534 	vmf->vma = vma;
535 
536 	int retry_count = 0;
537 #endif
538 
539 	vm_object_pip_add(vm_obj, 1);
540 
541 	/*
542 	 * OBJT_MGTDEVICE does not pre-allocate the page.
543 	 */
544 	KKASSERT(*mres == NULL);
545 
546 retry:
547 	/* The Linux page fault handler acquires mmap_sem */
548 	down_read(&vma->vm_mm->mmap_sem);
549 
550 	m = NULL;
551 
552 	/*
553 	 * Work around locking order reversal in fault / nopfn
554 	 * between mmap_sem and bo_reserve: Perform a trylock operation
555 	 * for reserve, and if it fails, retry the fault after waiting
556 	 * for the buffer to become unreserved.
557 	 */
558 	ret = ttm_bo_reserve(bo, true, true, NULL);
559 	if (unlikely(ret != 0)) {
560 		if (ret != -EBUSY) {
561 			retval = VM_PAGER_ERROR;
562 			goto out_unlock2;
563 		}
564 
565 		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY || 1) {
566 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
567 				up_read(&vma->vm_mm->mmap_sem);
568 				(void) ttm_bo_wait_unreserved(bo);
569 				down_read(&vma->vm_mm->mmap_sem);
570 			}
571 
572 #ifndef __DragonFly__
573 			return VM_FAULT_RETRY;
574 #else
575 			up_read(&vma->vm_mm->mmap_sem);
576 			lwkt_yield();
577 			goto retry;
578 #endif
579 		}
580 
581 		/*
582 		 * If we'd want to change locking order to
583 		 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
584 		 * instead of retrying the fault...
585 		 */
586 		retval = VM_PAGER_ERROR;
587 		goto out_unlock2;
588 	}
589 
590 	if (bdev->driver->fault_reserve_notify) {
591 		ret = bdev->driver->fault_reserve_notify(bo);
592 		switch (ret) {
593 		case 0:
594 			break;
595 		case -EBUSY:
596 			lwkt_yield();
597 			/* fall through */
598 		case -ERESTARTSYS:
599 		case -EINTR:
600 			retval = VM_PAGER_ERROR;
601 			goto out_unlock1;
602 		default:
603 			retval = VM_PAGER_ERROR;
604 			goto out_unlock1;
605 		}
606 	}
607 
608 	/*
609 	 * Wait for buffer data in transit, due to a pipelined
610 	 * move.
611 	 */
612 	ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
613 	if (unlikely(ret != 0)) {
614 		if (retry_count >= 100) {
615 			retval = VM_PAGER_ERROR;
616 			goto out_unlock1;
617 		} else {
618 			retry_count++;
619 			ttm_bo_unreserve(bo);
620 			up_read(&vma->vm_mm->mmap_sem);
621 			int dummy;
622 			tsleep(&dummy, 0, "blah", 1);
623 
624 			goto retry;
625 		}
626 	}
627 
628 	ret = ttm_mem_io_lock(man, true);
629 	if (unlikely(ret != 0)) {
630 		retval = VM_PAGER_ERROR;
631 		goto out_unlock1;
632 	}
633 	ret = ttm_mem_io_reserve_vm(bo);
634 	if (unlikely(ret != 0)) {
635 		retval = VM_PAGER_ERROR;
636 		goto out_io_unlock1;
637 	}
638 	if (unlikely(OFF_TO_IDX(offset) >= bo->num_pages)) {
639 		retval = VM_PAGER_ERROR;
640 		goto out_io_unlock1;
641 	}
642 
643 	/*
644 	 * Lookup the real page.
645 	 *
646 	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
647 	 * since the mmap_sem is only held in read mode. However, we
648 	 * modify only the caching bits of vma->vm_page_prot and
649 	 * consider those bits protected by
650 	 * the bo->mutex, as we should be the only writers.
651 	 * There shouldn't really be any readers of these bits except
652 	 * within vm_insert_mixed()? fork?
653 	 *
654 	 * TODO: Add a list of vmas to the bo, and change the
655 	 * vma->vm_page_prot when the object changes caching policy, with
656 	 * the correct locks held.
657 	 */
658 
659 	/*
660 	 * Make a local vma copy to modify the page_prot member
661 	 * and vm_flags if necessary. The vma parameter is protected
662 	 * by mmap_sem in write mode.
663 	 */
664 	cvma = *vma;
665 #if 0
666 	cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
667 #else
668 	cvma.vm_page_prot = 0;
669 #endif
670 
671 	if (bo->mem.bus.is_iomem) {
672 #ifdef __DragonFly__
673 		m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
674 						  bo->mem.bus.offset + offset);
675 		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement, 0));
676 #endif
677 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
678 						cvma.vm_page_prot);
679 	} else {
680 		ttm = bo->ttm;
681 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
682 						cvma.vm_page_prot);
683 
684 		/* Allocate all page at once, most common usage */
685 		struct ttm_operation_ctx ctx = {
686 			.interruptible = false,
687 			.no_wait_gpu = false,
688 			.flags = TTM_OPT_FLAG_FORCE_ALLOC
689 
690 		};
691 		if (ttm_tt_populate(ttm, &ctx)) {
692 			retval = VM_PAGER_ERROR;
693 			goto out_io_unlock1;
694 		}
695 
696 		m = (struct vm_page *)ttm->pages[OFF_TO_IDX(offset)];
697 		if (unlikely(!m)) {
698 			retval = VM_PAGER_ERROR;
699 			goto out_io_unlock1;
700 		}
701 		pmap_page_set_memattr(m,
702 		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
703 		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement, 0));
704 	}
705 
706 	if (vm_page_busy_try(m, FALSE)) {
707 		kprintf("r");
708 		vm_page_sleep_busy(m, FALSE, "ttmvmf");
709 		ttm_mem_io_unlock(man);
710 		ttm_bo_unreserve(bo);
711 		up_read(&vma->vm_mm->mmap_sem);
712 		goto retry;
713 	}
714 
715 	/*
716 	 * Return our fake page BUSYd.  Do not index it into the VM object.
717 	 * The caller will enter it into the pmap.
718 	 */
719 	m->valid = VM_PAGE_BITS_ALL;
720 	*mres = m;
721 
722 out_io_unlock1:
723 	ttm_mem_io_unlock(man);
724 out_unlock1:
725 	ttm_bo_unreserve(bo);
726 out_unlock2:
727 	vm_object_pip_wakeup(vm_obj);
728 	up_read(&vma->vm_mm->mmap_sem);
729 
730 	return (retval);
731 }
732 
733 static int
734 ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
735 	       vm_ooffset_t foff, struct ucred *cred, u_short *color)
736 {
737 
738 	/*
739 	 * On Linux, a reference to the buffer object is acquired here.
740 	 * The reason is that this function is not called when the
741 	 * mmap() is initialized, but only when a process forks for
742 	 * instance. Therefore on Linux, the reference on the bo is
743 	 * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's
744 	 * then released in ttm_bo_vm_close().
745 	 *
746 	 * Here, this function is called during mmap() intialization.
747 	 * Thus, the reference acquired in ttm_bo_mmap_single() is
748 	 * sufficient.
749 	 */
750 	*color = 0;
751 	return (0);
752 }
753 
754 static void
755 ttm_bo_vm_dtor(void *handle)
756 {
757 	struct ttm_buffer_object *bo = handle;
758 
759 	ttm_bo_unref(&bo);
760 }
761 
762 static struct cdev_pager_ops ttm_pager_ops = {
763 	.cdev_pg_fault = ttm_bo_vm_fault_dfly,
764 	.cdev_pg_ctor = ttm_bo_vm_ctor,
765 	.cdev_pg_dtor = ttm_bo_vm_dtor
766 };
767 
768 /*
769  * Called from drm_drv.c
770  *
771  * *offset - object offset in bytes
772  * size	   - map size in bytes
773  *
774  * We setup a dummy vma (for now) and call ttm_bo_mmap().  Then we setup
775  * our own VM object and dfly ops.  Note that the ops supplied by
776  * ttm_bo_mmap() are not currently used.
777  */
778 int
779 ttm_bo_mmap_single(struct file *fp, struct drm_device *dev,
780 		   vm_ooffset_t *offset, vm_size_t size,
781 		   struct vm_object **obj_res, int nprot)
782 {
783 	struct ttm_bo_device *bdev = dev->drm_ttm_bdev;
784 	struct ttm_buffer_object *bo;
785 	struct vm_object *vm_obj;
786 	struct vm_area_struct vma;
787 	int ret;
788 
789 	*obj_res = NULL;
790 
791 	bzero(&vma, sizeof(vma));
792 	vma.vm_start = *offset;		/* bdev-relative offset */
793 	vma.vm_end = vma.vm_start + size;
794 	vma.vm_pgoff = vma.vm_start >> PAGE_SHIFT;
795 	/* vma.vm_page_prot */
796 	/* vma.vm_flags */
797 
798 	/*
799 	 * Call the linux-ported code to do the work, and on success just
800 	 * setup our own VM object and ignore what the linux code did other
801 	 * then supplying us the 'bo'.
802 	 */
803 	ret = ttm_bo_mmap(fp, &vma, bdev);
804 
805 	if (ret == 0) {
806 		bo = vma.vm_private_data;
807 		vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE,
808 					     &ttm_pager_ops,
809 					     size, nprot, 0,
810 					     curthread->td_ucred);
811 		if (vm_obj) {
812 			*obj_res = vm_obj;
813 			*offset = 0;		/* object-relative offset */
814 		} else {
815 			ttm_bo_unref(&bo);
816 			ret = EINVAL;
817 		}
818 	}
819 	return ret;
820 }
821 EXPORT_SYMBOL(ttm_bo_mmap_single);
822 
823 #ifdef __DragonFly__
824 void ttm_bo_release_mmap(struct ttm_buffer_object *bo);
825 
826 void
827 ttm_bo_release_mmap(struct ttm_buffer_object *bo)
828 {
829 	vm_object_t vm_obj;
830 
831 	vm_obj = cdev_pager_lookup(bo);
832 	if (vm_obj == NULL)
833 		return;
834 
835 	VM_OBJECT_LOCK(vm_obj);
836 #if 1
837 	vm_object_page_remove(vm_obj, 0, 0, false);
838 #else
839 	/*
840 	 * XXX REMOVED
841 	 *
842 	 * We no longer manage the vm pages inside the MGTDEVICE
843 	 * objects.
844 	 */
845 	vm_page_t m;
846 	int i;
847 
848 	for (i = 0; i < bo->num_pages; i++) {
849 		m = vm_page_lookup_busy_wait(vm_obj, i, TRUE, "ttm_unm");
850 		if (m == NULL)
851 			continue;
852 		cdev_pager_free_page(vm_obj, m);
853 	}
854 #endif
855 	VM_OBJECT_UNLOCK(vm_obj);
856 
857 	vm_object_deallocate(vm_obj);
858 }
859 #endif
860 
861 #if 0
862 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
863 {
864 	if (vma->vm_pgoff != 0)
865 		return -EACCES;
866 
867 	vma->vm_ops = &ttm_bo_vm_ops;
868 	vma->vm_private_data = ttm_bo_reference(bo);
869 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
870 	return 0;
871 }
872 EXPORT_SYMBOL(ttm_fbdev_mmap);
873 #endif
874