xref: /dragonfly/sys/dev/drm/ttm/ttm_bo_vm.c (revision 5ca0a96d)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #define pr_fmt(fmt) "[TTM] " fmt
32 
33 #include <ttm/ttm_module.h>
34 #include <ttm/ttm_bo_driver.h>
35 #include <ttm/ttm_bo_api.h>
36 #include <ttm/ttm_placement.h>
37 #include <drm/drm_vma_manager.h>
38 #include <linux/mm.h>
39 #include <linux/pfn_t.h>
40 #include <linux/rbtree.h>
41 #include <linux/module.h>
42 #include <linux/uaccess.h>
43 #include <linux/mem_encrypt.h>
44 
45 #include <sys/sysctl.h>
46 #include <vm/vm.h>
47 #include <vm/vm_page.h>
48 #include <vm/vm_page2.h>
49 
50 #define TTM_BO_VM_NUM_PREFAULT 16
51 
52 static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
53 				struct vm_area_struct *vma,
54 				struct vm_fault *vmf)
55 {
56 	int ret = 0;
57 
58 	if (likely(!bo->moving))
59 		goto out_unlock;
60 
61 	/*
62 	 * Quick non-stalling check for idle.
63 	 */
64 	if (dma_fence_is_signaled(bo->moving))
65 		goto out_clear;
66 
67 	/*
68 	 * If possible, avoid waiting for GPU with mmap_sem
69 	 * held.
70 	 */
71 	if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
72 		ret = VM_FAULT_RETRY;
73 		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
74 			goto out_unlock;
75 
76 		ttm_bo_reference(bo);
77 		up_read(&vma->vm_mm->mmap_sem);
78 		(void) dma_fence_wait(bo->moving, true);
79 		ttm_bo_unreserve(bo);
80 		ttm_bo_unref(&bo);
81 		goto out_unlock;
82 	}
83 
84 	/*
85 	 * Ordinary wait.
86 	 */
87 	ret = dma_fence_wait(bo->moving, true);
88 	if (unlikely(ret != 0)) {
89 		ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
90 			VM_FAULT_NOPAGE;
91 		goto out_unlock;
92 	}
93 
94 out_clear:
95 	dma_fence_put(bo->moving);
96 	bo->moving = NULL;
97 
98 out_unlock:
99 	return ret;
100 }
101 
102 /*
103  * Always unstall on unexpected vm_page alias, fatal bus fault.
104  * Set to 0 to stall, set to positive count to unstall N times,
105  * then stall again.
106  */
107 static int drm_unstall = -1;
108 SYSCTL_INT(_debug, OID_AUTO, unstall, CTLFLAG_RW, &drm_unstall, 0, "");
109 
110 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
111 {
112 	/* see ttm_bo_mmap_single() at end of this file */
113 	/* ttm_bo_vm_ops not currently used, no entry should occur */
114 	panic("ttm_bo_vm_fault");
115 #if 0
116 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
117 	    vma->vm_private_data;
118 	struct ttm_bo_device *bdev = bo->bdev;
119 	unsigned long page_offset;
120 	unsigned long page_last;
121 	unsigned long pfn;
122 	struct ttm_tt *ttm = NULL;
123 	struct page *page;
124 	int ret;
125 	int i;
126 	unsigned long address = vmf->address;
127 	int retval = VM_FAULT_NOPAGE;
128 	struct ttm_mem_type_manager *man =
129 		&bdev->man[bo->mem.mem_type];
130 	struct vm_area_struct cvma;
131 
132 	/*
133 	 * Work around locking order reversal in fault / nopfn
134 	 * between mmap_sem and bo_reserve: Perform a trylock operation
135 	 * for reserve, and if it fails, retry the fault after waiting
136 	 * for the buffer to become unreserved.
137 	 */
138 	ret = ttm_bo_reserve(bo, true, true, NULL);
139 	if (unlikely(ret != 0)) {
140 		if (ret != -EBUSY)
141 			return VM_FAULT_NOPAGE;
142 
143 		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
144 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
145 				ttm_bo_reference(bo);
146 				up_read(&vma->vm_mm->mmap_sem);
147 				(void) ttm_bo_wait_unreserved(bo);
148 				ttm_bo_unref(&bo);
149 			}
150 
151 			return VM_FAULT_RETRY;
152 		}
153 
154 		/*
155 		 * If we'd want to change locking order to
156 		 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
157 		 * instead of retrying the fault...
158 		 */
159 		return VM_FAULT_NOPAGE;
160 	}
161 
162 	/*
163 	 * Refuse to fault imported pages. This should be handled
164 	 * (if at all) by redirecting mmap to the exporter.
165 	 */
166 	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
167 		retval = VM_FAULT_SIGBUS;
168 		goto out_unlock;
169 	}
170 
171 	if (bdev->driver->fault_reserve_notify) {
172 		ret = bdev->driver->fault_reserve_notify(bo);
173 		switch (ret) {
174 		case 0:
175 			break;
176 		case -EBUSY:
177 		case -ERESTARTSYS:
178 			retval = VM_FAULT_NOPAGE;
179 			goto out_unlock;
180 		default:
181 			retval = VM_FAULT_SIGBUS;
182 			goto out_unlock;
183 		}
184 	}
185 
186 	/*
187 	 * Wait for buffer data in transit, due to a pipelined
188 	 * move.
189 	 */
190 
191 	ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
192 	if (unlikely(ret != 0)) {
193 		retval = ret;
194 
195 		if (retval == VM_FAULT_RETRY &&
196 		    !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
197 			/* The BO has already been unreserved. */
198 			return retval;
199 		}
200 
201 		goto out_unlock;
202 	}
203 
204 	ret = ttm_mem_io_lock(man, true);
205 	if (unlikely(ret != 0)) {
206 		retval = VM_FAULT_NOPAGE;
207 		goto out_unlock;
208 	}
209 	ret = ttm_mem_io_reserve_vm(bo);
210 	if (unlikely(ret != 0)) {
211 		retval = VM_FAULT_SIGBUS;
212 		goto out_io_unlock;
213 	}
214 
215 	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
216 		vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
217 	page_last = vma_pages(vma) + vma->vm_pgoff -
218 		drm_vma_node_start(&bo->vma_node);
219 
220 	if (unlikely(page_offset >= bo->num_pages)) {
221 		retval = VM_FAULT_SIGBUS;
222 		goto out_io_unlock;
223 	}
224 
225 	/*
226 	 * Make a local vma copy to modify the page_prot member
227 	 * and vm_flags if necessary. The vma parameter is protected
228 	 * by mmap_sem in write mode.
229 	 */
230 	cvma = *vma;
231 	cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
232 
233 	if (bo->mem.bus.is_iomem) {
234 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
235 						cvma.vm_page_prot);
236 	} else {
237 		ttm = bo->ttm;
238 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
239 						cvma.vm_page_prot);
240 
241 		/* Allocate all page at once, most common usage */
242 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
243 			retval = VM_FAULT_OOM;
244 			goto out_io_unlock;
245 		}
246 	}
247 
248 	/*
249 	 * Speculatively prefault a number of pages. Only error on
250 	 * first page.
251 	 */
252 	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
253 		if (bo->mem.bus.is_iomem)
254 			pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
255 		else {
256 			page = ttm->pages[page_offset];
257 			if (unlikely(!page && i == 0)) {
258 				retval = VM_FAULT_OOM;
259 				goto out_io_unlock;
260 			} else if (unlikely(!page)) {
261 				break;
262 			}
263 			page->mapping = vma->vm_file->f_mapping;
264 			page->index = drm_vma_node_start(&bo->vma_node) +
265 				page_offset;
266 			pfn = page_to_pfn(page);
267 		}
268 
269 		if (vma->vm_flags & VM_MIXEDMAP)
270 			ret = vm_insert_mixed(&cvma, address,
271 					__pfn_to_pfn_t(pfn, PFN_DEV));
272 		else
273 			ret = vm_insert_pfn(&cvma, address, pfn);
274 
275 		/*
276 		 * Somebody beat us to this PTE or prefaulting to
277 		 * an already populated PTE, or prefaulting error.
278 		 */
279 
280 		if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
281 			break;
282 		else if (unlikely(ret != 0)) {
283 			retval =
284 			    (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
285 			goto out_io_unlock;
286 		}
287 
288 		address += PAGE_SIZE;
289 		if (unlikely(++page_offset >= page_last))
290 			break;
291 	}
292 out_io_unlock:
293 	ttm_mem_io_unlock(man);
294 out_unlock:
295 	ttm_bo_unreserve(bo);
296 	return retval;
297 #endif
298 }
299 
300 /* ttm_bo_vm_ops not currently used, no entry should occur */
301 static void ttm_bo_vm_open(struct vm_area_struct *vma)
302 {
303 	struct ttm_buffer_object *bo =
304 	    (struct ttm_buffer_object *)vma->vm_private_data;
305 
306 #if 0
307 	WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
308 #endif
309 
310 	(void)ttm_bo_reference(bo);
311 }
312 
313 /* ttm_bo_vm_ops not currently used, no entry should occur */
314 static void ttm_bo_vm_close(struct vm_area_struct *vma)
315 {
316 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
317 
318 	ttm_bo_unref(&bo);
319 	vma->vm_private_data = NULL;
320 }
321 
322 static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
323 				 unsigned long offset,
324 				 uint8_t *buf, int len, int write)
325 {
326 	unsigned long page = offset >> PAGE_SHIFT;
327 	unsigned long bytes_left = len;
328 	int ret;
329 
330 	/* Copy a page at a time, that way no extra virtual address
331 	 * mapping is needed
332 	 */
333 	offset -= page << PAGE_SHIFT;
334 	do {
335 		unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
336 		struct ttm_bo_kmap_obj map;
337 		void *ptr;
338 		bool is_iomem;
339 
340 		ret = ttm_bo_kmap(bo, page, 1, &map);
341 		if (ret)
342 			return ret;
343 
344 		ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
345 		WARN_ON_ONCE(is_iomem);
346 		if (write)
347 			memcpy(ptr, buf, bytes);
348 		else
349 			memcpy(buf, ptr, bytes);
350 		ttm_bo_kunmap(&map);
351 
352 		page++;
353 		buf += bytes;
354 		bytes_left -= bytes;
355 		offset = 0;
356 	} while (bytes_left);
357 
358 	return len;
359 }
360 
361 static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
362 			    void *buf, int len, int write)
363 {
364 	unsigned long offset = (addr) - vma->vm_start;
365 	struct ttm_buffer_object *bo = vma->vm_private_data;
366 	int ret;
367 
368 	if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
369 		return -EIO;
370 
371 	ret = ttm_bo_reserve(bo, true, false, NULL);
372 	if (ret)
373 		return ret;
374 
375 	switch (bo->mem.mem_type) {
376 	case TTM_PL_SYSTEM:
377 		if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
378 			ret = ttm_tt_swapin(bo->ttm);
379 			if (unlikely(ret != 0))
380 				return ret;
381 		}
382 		/* fall through */
383 	case TTM_PL_TT:
384 		ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
385 		break;
386 	default:
387 		if (bo->bdev->driver->access_memory)
388 			ret = bo->bdev->driver->access_memory(
389 				bo, offset, buf, len, write);
390 		else
391 			ret = -EIO;
392 	}
393 
394 	ttm_bo_unreserve(bo);
395 
396 	return ret;
397 }
398 
399 static const struct vm_operations_struct ttm_bo_vm_ops = {
400 	.fault = ttm_bo_vm_fault,
401 	.open = ttm_bo_vm_open,
402 	.close = ttm_bo_vm_close,
403 	.access = ttm_bo_vm_access
404 };
405 
406 static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
407 						  unsigned long offset,
408 						  unsigned long pages)
409 {
410 	struct drm_vma_offset_node *node;
411 	struct ttm_buffer_object *bo = NULL;
412 
413 	drm_vma_offset_lock_lookup(&bdev->vma_manager);
414 
415 	node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
416 	if (likely(node)) {
417 		bo = container_of(node, struct ttm_buffer_object, vma_node);
418 		if (!kref_get_unless_zero(&bo->kref))
419 			bo = NULL;
420 	}
421 
422 	drm_vma_offset_unlock_lookup(&bdev->vma_manager);
423 
424 	if (!bo)
425 		pr_err("Could not find buffer object to map\n");
426 
427 	return bo;
428 }
429 
430 unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo,
431 					unsigned long page_offset)
432 {
433 	return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT)
434 		+ page_offset;
435 }
436 EXPORT_SYMBOL(ttm_bo_default_io_mem_pfn);
437 
438 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
439 		struct ttm_bo_device *bdev)
440 {
441 	struct ttm_bo_driver *driver;
442 	struct ttm_buffer_object *bo;
443 	int ret;
444 
445 	bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
446 	if (unlikely(!bo))
447 		return -EINVAL;
448 
449 	driver = bo->bdev->driver;
450 	if (unlikely(!driver->verify_access)) {
451 		ret = -EPERM;
452 		goto out_unref;
453 	}
454 	ret = driver->verify_access(bo, filp);
455 	if (unlikely(ret != 0))
456 		goto out_unref;
457 
458 	vma->vm_ops = &ttm_bo_vm_ops;
459 
460 	/*
461 	 * Note: We're transferring the bo reference to
462 	 * vma->vm_private_data here.
463 	 */
464 
465 	vma->vm_private_data = bo;
466 
467 	/*
468 	 * We'd like to use VM_PFNMAP on shared mappings, where
469 	 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
470 	 * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
471 	 * bad for performance. Until that has been sorted out, use
472 	 * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
473 	 */
474 	vma->vm_flags |= VM_MIXEDMAP;
475 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
476 	return 0;
477 out_unref:
478 	ttm_bo_unref(&bo);
479 	return ret;
480 }
481 EXPORT_SYMBOL(ttm_bo_mmap);
482 
483 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
484 {
485 	if (vma->vm_pgoff != 0)
486 		return -EACCES;
487 
488 	vma->vm_ops = &ttm_bo_vm_ops;
489 	vma->vm_private_data = ttm_bo_reference(bo);
490 	vma->vm_flags |= VM_MIXEDMAP;
491 	vma->vm_flags |= VM_IO | VM_DONTEXPAND;
492 	return 0;
493 }
494 EXPORT_SYMBOL(ttm_fbdev_mmap);
495 
496 /*
497  * DragonFlyBSD Interface
498  */
499 
500 #include "opt_vm.h"
501 
502 /*
503  * NOTE: This code is fragile.  This code can only be entered with *mres
504  *	 not NULL when *mres is a placeholder page allocated by the kernel.
505  */
506 static int
507 ttm_bo_vm_fault_dfly(vm_object_t vm_obj, vm_ooffset_t offset,
508 		     int prot, vm_page_t *mres)
509 {
510 	struct ttm_buffer_object *bo = vm_obj->handle;
511 	struct ttm_bo_device *bdev = bo->bdev;
512 	struct ttm_tt *ttm = NULL;
513 	vm_page_t m;
514 	int ret;
515 	int retval = VM_PAGER_OK;
516 	struct ttm_mem_type_manager *man =
517 		&bdev->man[bo->mem.mem_type];
518 	struct vm_area_struct cvma;
519 
520 /*
521    The Linux code expects to receive these arguments:
522    - struct vm_area_struct *vma
523    - struct vm_fault *vmf
524 */
525 #ifdef __DragonFly__
526 	struct vm_area_struct vmas;
527 	struct vm_area_struct *vma = &vmas;
528 	struct vm_fault vmfs;
529 	struct vm_fault *vmf = &vmfs;
530 
531 	memset(vma, 0, sizeof(*vma));
532 	memset(vmf, 0, sizeof(*vmf));
533 
534 	vma->vm_mm = current->mm;
535 	vmf->vma = vma;
536 #endif
537 
538 	vm_object_pip_add(vm_obj, 1);
539 
540 	/*
541 	 * OBJT_MGTDEVICE does not pre-allocate the page.
542 	 */
543 	KKASSERT(*mres == NULL);
544 
545 retry:
546 	/* The Linux page fault handler acquires mmap_sem */
547 	down_read(&vma->vm_mm->mmap_sem);
548 
549 	m = NULL;
550 
551 	/*
552 	 * Work around locking order reversal in fault / nopfn
553 	 * between mmap_sem and bo_reserve: Perform a trylock operation
554 	 * for reserve, and if it fails, retry the fault after waiting
555 	 * for the buffer to become unreserved.
556 	 */
557 	ret = ttm_bo_reserve(bo, true, true, NULL);
558 	if (unlikely(ret != 0)) {
559 		if (ret != -EBUSY) {
560 			retval = VM_PAGER_ERROR;
561 			goto out_unlock2;
562 		}
563 
564 		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY || 1) {
565 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
566 				up_read(&vma->vm_mm->mmap_sem);
567 				(void) ttm_bo_wait_unreserved(bo);
568 			}
569 
570 #ifndef __DragonFly__
571 			return VM_FAULT_RETRY;
572 #else
573 			up_read(&vma->vm_mm->mmap_sem);
574 			lwkt_yield();
575 			goto retry;
576 #endif
577 		}
578 
579 		/*
580 		 * If we'd want to change locking order to
581 		 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
582 		 * instead of retrying the fault...
583 		 */
584 		retval = VM_PAGER_ERROR;
585 		goto out_unlock2;
586 	}
587 
588 	if (bdev->driver->fault_reserve_notify) {
589 		ret = bdev->driver->fault_reserve_notify(bo);
590 		switch (ret) {
591 		case 0:
592 			break;
593 		case -EBUSY:
594 			lwkt_yield();
595 			/* fall through */
596 		case -ERESTARTSYS:
597 		case -EINTR:
598 			retval = VM_PAGER_ERROR;
599 			goto out_unlock1;
600 		default:
601 			retval = VM_PAGER_ERROR;
602 			goto out_unlock1;
603 		}
604 	}
605 
606 	/*
607 	 * Wait for buffer data in transit, due to a pipelined
608 	 * move.
609 	 */
610 	ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
611 	if (unlikely(ret != 0)) {
612 		retval = VM_PAGER_ERROR;
613 		goto out_unlock1;
614 	}
615 
616 	ret = ttm_mem_io_lock(man, true);
617 	if (unlikely(ret != 0)) {
618 		retval = VM_PAGER_ERROR;
619 		goto out_unlock1;
620 	}
621 	ret = ttm_mem_io_reserve_vm(bo);
622 	if (unlikely(ret != 0)) {
623 		retval = VM_PAGER_ERROR;
624 		goto out_io_unlock1;
625 	}
626 	if (unlikely(OFF_TO_IDX(offset) >= bo->num_pages)) {
627 		retval = VM_PAGER_ERROR;
628 		goto out_io_unlock1;
629 	}
630 
631 	/*
632 	 * Lookup the real page.
633 	 *
634 	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
635 	 * since the mmap_sem is only held in read mode. However, we
636 	 * modify only the caching bits of vma->vm_page_prot and
637 	 * consider those bits protected by
638 	 * the bo->mutex, as we should be the only writers.
639 	 * There shouldn't really be any readers of these bits except
640 	 * within vm_insert_mixed()? fork?
641 	 *
642 	 * TODO: Add a list of vmas to the bo, and change the
643 	 * vma->vm_page_prot when the object changes caching policy, with
644 	 * the correct locks held.
645 	 */
646 
647 	/*
648 	 * Make a local vma copy to modify the page_prot member
649 	 * and vm_flags if necessary. The vma parameter is protected
650 	 * by mmap_sem in write mode.
651 	 */
652 	cvma = *vma;
653 #if 0
654 	cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
655 #else
656 	cvma.vm_page_prot = 0;
657 #endif
658 
659 	if (bo->mem.bus.is_iomem) {
660 #ifdef __DragonFly__
661 		m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
662 						  bo->mem.bus.offset + offset);
663 		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement, 0));
664 #endif
665 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
666 						cvma.vm_page_prot);
667 	} else {
668 		ttm = bo->ttm;
669 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
670 						cvma.vm_page_prot);
671 
672 		/* Allocate all page at once, most common usage */
673 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
674 			retval = VM_PAGER_ERROR;
675 			goto out_io_unlock1;
676 		}
677 
678 		m = (struct vm_page *)ttm->pages[OFF_TO_IDX(offset)];
679 		if (unlikely(!m)) {
680 			retval = VM_PAGER_ERROR;
681 			goto out_io_unlock1;
682 		}
683 		pmap_page_set_memattr(m,
684 		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
685 		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement, 0));
686 	}
687 
688 	if (vm_page_busy_try(m, FALSE)) {
689 		kprintf("r");
690 		vm_page_sleep_busy(m, FALSE, "ttmvmf");
691 		ttm_mem_io_unlock(man);
692 		ttm_bo_unreserve(bo);
693 		up_read(&vma->vm_mm->mmap_sem);
694 		goto retry;
695 	}
696 
697 	/*
698 	 * Return our fake page BUSYd.  Do not index it into the VM object.
699 	 * The caller will enter it into the pmap.
700 	 */
701 	m->valid = VM_PAGE_BITS_ALL;
702 	*mres = m;
703 
704 out_io_unlock1:
705 	ttm_mem_io_unlock(man);
706 out_unlock1:
707 	ttm_bo_unreserve(bo);
708 out_unlock2:
709 	vm_object_pip_wakeup(vm_obj);
710 	return (retval);
711 }
712 
713 static int
714 ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
715 	       vm_ooffset_t foff, struct ucred *cred, u_short *color)
716 {
717 
718 	/*
719 	 * On Linux, a reference to the buffer object is acquired here.
720 	 * The reason is that this function is not called when the
721 	 * mmap() is initialized, but only when a process forks for
722 	 * instance. Therefore on Linux, the reference on the bo is
723 	 * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's
724 	 * then released in ttm_bo_vm_close().
725 	 *
726 	 * Here, this function is called during mmap() intialization.
727 	 * Thus, the reference acquired in ttm_bo_mmap_single() is
728 	 * sufficient.
729 	 */
730 	*color = 0;
731 	return (0);
732 }
733 
734 static void
735 ttm_bo_vm_dtor(void *handle)
736 {
737 	struct ttm_buffer_object *bo = handle;
738 
739 	ttm_bo_unref(&bo);
740 }
741 
742 static struct cdev_pager_ops ttm_pager_ops = {
743 	.cdev_pg_fault = ttm_bo_vm_fault_dfly,
744 	.cdev_pg_ctor = ttm_bo_vm_ctor,
745 	.cdev_pg_dtor = ttm_bo_vm_dtor
746 };
747 
748 /*
749  * Called from drm_drv.c
750  *
751  * *offset - object offset in bytes
752  * size	   - map size in bytes
753  *
754  * We setup a dummy vma (for now) and call ttm_bo_mmap().  Then we setup
755  * our own VM object and dfly ops.  Note that the ops supplied by
756  * ttm_bo_mmap() are not currently used.
757  */
758 int
759 ttm_bo_mmap_single(struct drm_device *dev, vm_ooffset_t *offset,
760 		   vm_size_t size, struct vm_object **obj_res, int nprot)
761 {
762 	struct ttm_bo_device *bdev = dev->drm_ttm_bdev;
763 	struct ttm_buffer_object *bo;
764 	struct vm_object *vm_obj;
765 	struct vm_area_struct vma;
766 	int ret;
767 
768 	*obj_res = NULL;
769 
770 	bzero(&vma, sizeof(vma));
771 	vma.vm_start = *offset;		/* bdev-relative offset */
772 	vma.vm_end = vma.vm_start + size;
773 	vma.vm_pgoff = vma.vm_start >> PAGE_SHIFT;
774 	/* vma.vm_page_prot */
775 	/* vma.vm_flags */
776 
777 	/*
778 	 * Call the linux-ported code to do the work, and on success just
779 	 * setup our own VM object and ignore what the linux code did other
780 	 * then supplying us the 'bo'.
781 	 */
782 	ret = ttm_bo_mmap(NULL, &vma, bdev);
783 
784 	if (ret == 0) {
785 		bo = vma.vm_private_data;
786 		vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE,
787 					     &ttm_pager_ops,
788 					     size, nprot, 0,
789 					     curthread->td_ucred);
790 		if (vm_obj) {
791 			*obj_res = vm_obj;
792 			*offset = 0;		/* object-relative offset */
793 		} else {
794 			ttm_bo_unref(&bo);
795 			ret = EINVAL;
796 		}
797 	}
798 	return ret;
799 }
800 EXPORT_SYMBOL(ttm_bo_mmap_single);
801 
802 #ifdef __DragonFly__
803 void ttm_bo_release_mmap(struct ttm_buffer_object *bo);
804 
805 void
806 ttm_bo_release_mmap(struct ttm_buffer_object *bo)
807 {
808 	vm_object_t vm_obj;
809 
810 	vm_obj = cdev_pager_lookup(bo);
811 	if (vm_obj == NULL)
812 		return;
813 
814 	VM_OBJECT_LOCK(vm_obj);
815 #if 1
816 	vm_object_page_remove(vm_obj, 0, 0, false);
817 #else
818 	/*
819 	 * XXX REMOVED
820 	 *
821 	 * We no longer manage the vm pages inside the MGTDEVICE
822 	 * objects.
823 	 */
824 	vm_page_t m;
825 	int i;
826 
827 	for (i = 0; i < bo->num_pages; i++) {
828 		m = vm_page_lookup_busy_wait(vm_obj, i, TRUE, "ttm_unm");
829 		if (m == NULL)
830 			continue;
831 		cdev_pager_free_page(vm_obj, m);
832 	}
833 #endif
834 	VM_OBJECT_UNLOCK(vm_obj);
835 
836 	vm_object_deallocate(vm_obj);
837 }
838 #endif
839 
840 #if 0
841 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
842 {
843 	if (vma->vm_pgoff != 0)
844 		return -EACCES;
845 
846 	vma->vm_ops = &ttm_bo_vm_ops;
847 	vma->vm_private_data = ttm_bo_reference(bo);
848 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
849 	return 0;
850 }
851 EXPORT_SYMBOL(ttm_fbdev_mmap);
852 #endif
853