xref: /dragonfly/sys/dev/drm/ttm/ttm_bo_vm.c (revision 4d31f6b9)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #define pr_fmt(fmt) "[TTM] " fmt
32 
33 #include <drm/ttm/ttm_module.h>
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <drm/drm_vma_manager.h>
37 #include <linux/mm.h>
38 #include <linux/pfn_t.h>
39 #include <linux/rbtree.h>
40 #include <linux/module.h>
41 #include <linux/uaccess.h>
42 #include <linux/mem_encrypt.h>
43 
44 #include <sys/sysctl.h>
45 #include <vm/vm.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_page2.h>
48 
49 #define TTM_BO_VM_NUM_PREFAULT 16
50 
51 static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
52 				struct vm_area_struct *vma,
53 				struct vm_fault *vmf)
54 {
55 	int ret = 0;
56 
57 	if (likely(!bo->moving))
58 		goto out_unlock;
59 
60 	/*
61 	 * Quick non-stalling check for idle.
62 	 */
63 	if (dma_fence_is_signaled(bo->moving))
64 		goto out_clear;
65 
66 	/*
67 	 * If possible, avoid waiting for GPU with mmap_sem
68 	 * held.
69 	 */
70 	if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
71 		ret = VM_FAULT_RETRY;
72 		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
73 			goto out_unlock;
74 
75 		ttm_bo_get(bo);
76 		up_read(&vmf->vma->vm_mm->mmap_sem);
77 		(void) dma_fence_wait(bo->moving, true);
78 		ttm_bo_unreserve(bo);
79 		ttm_bo_put(bo);
80 		goto out_unlock;
81 	}
82 
83 	/*
84 	 * Ordinary wait.
85 	 */
86 	ret = dma_fence_wait(bo->moving, true);
87 	if (unlikely(ret != 0)) {
88 		ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
89 			VM_FAULT_NOPAGE;
90 		goto out_unlock;
91 	}
92 
93 out_clear:
94 	dma_fence_put(bo->moving);
95 	bo->moving = NULL;
96 
97 out_unlock:
98 	return ret;
99 }
100 
101 /*
102  * Always unstall on unexpected vm_page alias, fatal bus fault.
103  * Set to 0 to stall, set to positive count to unstall N times,
104  * then stall again.
105  */
106 static int drm_unstall = -1;
107 SYSCTL_INT(_debug, OID_AUTO, unstall, CTLFLAG_RW, &drm_unstall, 0, "");
108 
109 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
110 {
111 	/* see ttm_bo_mmap_single() at end of this file */
112 	/* ttm_bo_vm_ops not currently used, no entry should occur */
113 	panic("ttm_bo_vm_fault");
114 #if 0
115 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
116 	    vma->vm_private_data;
117 	struct ttm_bo_device *bdev = bo->bdev;
118 	unsigned long page_offset;
119 	unsigned long page_last;
120 	unsigned long pfn;
121 	struct ttm_tt *ttm = NULL;
122 	struct page *page;
123 	int ret;
124 	int i;
125 	unsigned long address = vmf->address;
126 	int retval = VM_FAULT_NOPAGE;
127 	struct ttm_mem_type_manager *man =
128 		&bdev->man[bo->mem.mem_type];
129 	struct vm_area_struct cvma;
130 
131 	/*
132 	 * Work around locking order reversal in fault / nopfn
133 	 * between mmap_sem and bo_reserve: Perform a trylock operation
134 	 * for reserve, and if it fails, retry the fault after waiting
135 	 * for the buffer to become unreserved.
136 	 */
137 	ret = ttm_bo_reserve(bo, true, true, NULL);
138 	if (unlikely(ret != 0)) {
139 		if (ret != -EBUSY)
140 			return VM_FAULT_NOPAGE;
141 
142 		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
143 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
144 				ttm_bo_reference(bo);
145 				up_read(&vma->vm_mm->mmap_sem);
146 				(void) ttm_bo_wait_unreserved(bo);
147 				ttm_bo_unref(&bo);
148 			}
149 
150 			return VM_FAULT_RETRY;
151 		}
152 
153 		/*
154 		 * If we'd want to change locking order to
155 		 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
156 		 * instead of retrying the fault...
157 		 */
158 		return VM_FAULT_NOPAGE;
159 	}
160 
161 	/*
162 	 * Refuse to fault imported pages. This should be handled
163 	 * (if at all) by redirecting mmap to the exporter.
164 	 */
165 	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
166 		retval = VM_FAULT_SIGBUS;
167 		goto out_unlock;
168 	}
169 
170 	if (bdev->driver->fault_reserve_notify) {
171 		ret = bdev->driver->fault_reserve_notify(bo);
172 		switch (ret) {
173 		case 0:
174 			break;
175 		case -EBUSY:
176 		case -ERESTARTSYS:
177 			retval = VM_FAULT_NOPAGE;
178 			goto out_unlock;
179 		default:
180 			retval = VM_FAULT_SIGBUS;
181 			goto out_unlock;
182 		}
183 	}
184 
185 	/*
186 	 * Wait for buffer data in transit, due to a pipelined
187 	 * move.
188 	 */
189 
190 	ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
191 	if (unlikely(ret != 0)) {
192 		retval = ret;
193 
194 		if (retval == VM_FAULT_RETRY &&
195 		    !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
196 			/* The BO has already been unreserved. */
197 			return retval;
198 		}
199 
200 		goto out_unlock;
201 	}
202 
203 	ret = ttm_mem_io_lock(man, true);
204 	if (unlikely(ret != 0)) {
205 		retval = VM_FAULT_NOPAGE;
206 		goto out_unlock;
207 	}
208 	ret = ttm_mem_io_reserve_vm(bo);
209 	if (unlikely(ret != 0)) {
210 		retval = VM_FAULT_SIGBUS;
211 		goto out_io_unlock;
212 	}
213 
214 	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
215 		vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
216 	page_last = vma_pages(vma) + vma->vm_pgoff -
217 		drm_vma_node_start(&bo->vma_node);
218 
219 	if (unlikely(page_offset >= bo->num_pages)) {
220 		retval = VM_FAULT_SIGBUS;
221 		goto out_io_unlock;
222 	}
223 
224 	/*
225 	 * Make a local vma copy to modify the page_prot member
226 	 * and vm_flags if necessary. The vma parameter is protected
227 	 * by mmap_sem in write mode.
228 	 */
229 	cvma = *vma;
230 	cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
231 
232 	if (bo->mem.bus.is_iomem) {
233 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
234 						cvma.vm_page_prot);
235 	} else {
236 		struct ttm_operation_ctx ctx = {
237 			.interruptible = false,
238 			.no_wait_gpu = false,
239 			.flags = TTM_OPT_FLAG_FORCE_ALLOC
240 
241 		};
242 
243 		ttm = bo->ttm;
244 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
245 						cvma.vm_page_prot);
246 
247 		/* Allocate all page at once, most common usage */
248 		if (ttm_tt_populate(ttm, &ctx)) {
249 			retval = VM_FAULT_OOM;
250 			goto out_io_unlock;
251 		}
252 	}
253 
254 	/*
255 	 * Speculatively prefault a number of pages. Only error on
256 	 * first page.
257 	 */
258 	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
259 		if (bo->mem.bus.is_iomem)
260 			pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
261 		else {
262 			page = ttm->pages[page_offset];
263 			if (unlikely(!page && i == 0)) {
264 				retval = VM_FAULT_OOM;
265 				goto out_io_unlock;
266 			} else if (unlikely(!page)) {
267 				break;
268 			}
269 			page->mapping = vma->vm_file->f_mapping;
270 			page->index = drm_vma_node_start(&bo->vma_node) +
271 				page_offset;
272 			pfn = page_to_pfn(page);
273 		}
274 
275 		if (vma->vm_flags & VM_MIXEDMAP)
276 			ret = vm_insert_mixed(&cvma, address,
277 					__pfn_to_pfn_t(pfn, PFN_DEV));
278 		else
279 			ret = vm_insert_pfn(&cvma, address, pfn);
280 
281 		/*
282 		 * Somebody beat us to this PTE or prefaulting to
283 		 * an already populated PTE, or prefaulting error.
284 		 */
285 
286 		if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
287 			break;
288 		else if (unlikely(ret != 0)) {
289 			retval =
290 			    (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
291 			goto out_io_unlock;
292 		}
293 
294 		address += PAGE_SIZE;
295 		if (unlikely(++page_offset >= page_last))
296 			break;
297 	}
298 out_io_unlock:
299 	ttm_mem_io_unlock(man);
300 out_unlock:
301 	ttm_bo_unreserve(bo);
302 	return retval;
303 #endif
304 }
305 
306 /* ttm_bo_vm_ops not currently used, no entry should occur */
307 static void ttm_bo_vm_open(struct vm_area_struct *vma)
308 {
309 	struct ttm_buffer_object *bo =
310 	    (struct ttm_buffer_object *)vma->vm_private_data;
311 
312 #if 0
313 	WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
314 #endif
315 
316 	ttm_bo_get(bo);
317 }
318 
319 /* ttm_bo_vm_ops not currently used, no entry should occur */
320 static void ttm_bo_vm_close(struct vm_area_struct *vma)
321 {
322 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
323 
324 	ttm_bo_put(bo);
325 	vma->vm_private_data = NULL;
326 }
327 
328 static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
329 				 unsigned long offset,
330 				 uint8_t *buf, int len, int write)
331 {
332 	unsigned long page = offset >> PAGE_SHIFT;
333 	unsigned long bytes_left = len;
334 	int ret;
335 
336 	/* Copy a page at a time, that way no extra virtual address
337 	 * mapping is needed
338 	 */
339 	offset -= page << PAGE_SHIFT;
340 	do {
341 		unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
342 		struct ttm_bo_kmap_obj map;
343 		void *ptr;
344 		bool is_iomem;
345 
346 		ret = ttm_bo_kmap(bo, page, 1, &map);
347 		if (ret)
348 			return ret;
349 
350 		ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
351 		WARN_ON_ONCE(is_iomem);
352 		if (write)
353 			memcpy(ptr, buf, bytes);
354 		else
355 			memcpy(buf, ptr, bytes);
356 		ttm_bo_kunmap(&map);
357 
358 		page++;
359 		buf += bytes;
360 		bytes_left -= bytes;
361 		offset = 0;
362 	} while (bytes_left);
363 
364 	return len;
365 }
366 
367 static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
368 			    void *buf, int len, int write)
369 {
370 	unsigned long offset = (addr) - vma->vm_start;
371 	struct ttm_buffer_object *bo = vma->vm_private_data;
372 	int ret;
373 
374 	if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
375 		return -EIO;
376 
377 	ret = ttm_bo_reserve(bo, true, false, NULL);
378 	if (ret)
379 		return ret;
380 
381 	switch (bo->mem.mem_type) {
382 	case TTM_PL_SYSTEM:
383 		if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
384 			ret = ttm_tt_swapin(bo->ttm);
385 			if (unlikely(ret != 0))
386 				return ret;
387 		}
388 		/* fall through */
389 	case TTM_PL_TT:
390 		ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
391 		break;
392 	default:
393 		if (bo->bdev->driver->access_memory)
394 			ret = bo->bdev->driver->access_memory(
395 				bo, offset, buf, len, write);
396 		else
397 			ret = -EIO;
398 	}
399 
400 	ttm_bo_unreserve(bo);
401 
402 	return ret;
403 }
404 
405 static const struct vm_operations_struct ttm_bo_vm_ops = {
406 	.fault = ttm_bo_vm_fault,
407 	.open = ttm_bo_vm_open,
408 	.close = ttm_bo_vm_close,
409 	.access = ttm_bo_vm_access
410 };
411 
412 static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
413 						  unsigned long offset,
414 						  unsigned long pages)
415 {
416 	struct drm_vma_offset_node *node;
417 	struct ttm_buffer_object *bo = NULL;
418 
419 	drm_vma_offset_lock_lookup(&bdev->vma_manager);
420 
421 	node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
422 	if (likely(node)) {
423 		bo = container_of(node, struct ttm_buffer_object, vma_node);
424 		if (!kref_get_unless_zero(&bo->kref))
425 			bo = NULL;
426 	}
427 
428 	drm_vma_offset_unlock_lookup(&bdev->vma_manager);
429 
430 	if (!bo)
431 		pr_err("Could not find buffer object to map\n");
432 
433 	return bo;
434 }
435 
436 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
437 		struct ttm_bo_device *bdev)
438 {
439 	struct ttm_bo_driver *driver;
440 	struct ttm_buffer_object *bo;
441 	int ret;
442 
443 	bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
444 	if (unlikely(!bo))
445 		return -EINVAL;
446 
447 	driver = bo->bdev->driver;
448 	if (unlikely(!driver->verify_access)) {
449 		ret = -EPERM;
450 		goto out_unref;
451 	}
452 	ret = driver->verify_access(bo, filp);
453 	if (unlikely(ret != 0))
454 		goto out_unref;
455 
456 	vma->vm_ops = &ttm_bo_vm_ops;
457 
458 	/*
459 	 * Note: We're transferring the bo reference to
460 	 * vma->vm_private_data here.
461 	 */
462 
463 	vma->vm_private_data = bo;
464 
465 	/*
466 	 * We'd like to use VM_PFNMAP on shared mappings, where
467 	 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
468 	 * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
469 	 * bad for performance. Until that has been sorted out, use
470 	 * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
471 	 */
472 	vma->vm_flags |= VM_MIXEDMAP;
473 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
474 	return 0;
475 out_unref:
476 	ttm_bo_put(bo);
477 	return ret;
478 }
479 EXPORT_SYMBOL(ttm_bo_mmap);
480 
481 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
482 {
483 	if (vma->vm_pgoff != 0)
484 		return -EACCES;
485 
486 	ttm_bo_get(bo);
487 
488 	vma->vm_ops = &ttm_bo_vm_ops;
489 	vma->vm_private_data = bo;
490 	vma->vm_flags |= VM_MIXEDMAP;
491 	vma->vm_flags |= VM_IO | VM_DONTEXPAND;
492 	return 0;
493 }
494 EXPORT_SYMBOL(ttm_fbdev_mmap);
495 
496 /*
497  * DragonFlyBSD Interface
498  */
499 
500 #include "opt_vm.h"
501 
502 /*
503  * NOTE: This code is fragile.  This code can only be entered with *mres
504  *	 not NULL when *mres is a placeholder page allocated by the kernel.
505  */
506 static int
507 ttm_bo_vm_fault_dfly(vm_object_t vm_obj, vm_ooffset_t offset,
508 		     int prot, vm_page_t *mres)
509 {
510 	struct ttm_buffer_object *bo = vm_obj->handle;
511 	struct ttm_bo_device *bdev = bo->bdev;
512 	struct ttm_tt *ttm = NULL;
513 	vm_page_t m;
514 	int ret;
515 	int retval = VM_PAGER_OK;
516 	struct ttm_mem_type_manager *man =
517 		&bdev->man[bo->mem.mem_type];
518 	struct vm_area_struct cvma;
519 
520 /*
521    The Linux code expects to receive these arguments:
522    - struct vm_area_struct *vma
523    - struct vm_fault *vmf
524 */
525 #ifdef __DragonFly__
526 	struct vm_area_struct vmas;
527 	struct vm_area_struct *vma = &vmas;
528 	struct vm_fault vmfs;
529 	struct vm_fault *vmf = &vmfs;
530 
531 	memset(vma, 0, sizeof(*vma));
532 	memset(vmf, 0, sizeof(*vmf));
533 
534 	vma->vm_mm = current->mm;
535 	vmf->vma = vma;
536 #endif
537 
538 	vm_object_pip_add(vm_obj, 1);
539 
540 	/*
541 	 * OBJT_MGTDEVICE does not pre-allocate the page.
542 	 */
543 	KKASSERT(*mres == NULL);
544 
545 retry:
546 	/* The Linux page fault handler acquires mmap_sem */
547 	down_read(&vma->vm_mm->mmap_sem);
548 
549 	m = NULL;
550 
551 	/*
552 	 * Work around locking order reversal in fault / nopfn
553 	 * between mmap_sem and bo_reserve: Perform a trylock operation
554 	 * for reserve, and if it fails, retry the fault after waiting
555 	 * for the buffer to become unreserved.
556 	 */
557 	ret = ttm_bo_reserve(bo, true, true, NULL);
558 	if (unlikely(ret != 0)) {
559 		if (ret != -EBUSY) {
560 			retval = VM_PAGER_ERROR;
561 			goto out_unlock2;
562 		}
563 
564 		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY || 1) {
565 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
566 				up_read(&vma->vm_mm->mmap_sem);
567 				(void) ttm_bo_wait_unreserved(bo);
568 			}
569 
570 #ifndef __DragonFly__
571 			return VM_FAULT_RETRY;
572 #else
573 			up_read(&vma->vm_mm->mmap_sem);
574 			lwkt_yield();
575 			goto retry;
576 #endif
577 		}
578 
579 		/*
580 		 * If we'd want to change locking order to
581 		 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
582 		 * instead of retrying the fault...
583 		 */
584 		retval = VM_PAGER_ERROR;
585 		goto out_unlock2;
586 	}
587 
588 	if (bdev->driver->fault_reserve_notify) {
589 		ret = bdev->driver->fault_reserve_notify(bo);
590 		switch (ret) {
591 		case 0:
592 			break;
593 		case -EBUSY:
594 			lwkt_yield();
595 			/* fall through */
596 		case -ERESTARTSYS:
597 		case -EINTR:
598 			retval = VM_PAGER_ERROR;
599 			goto out_unlock1;
600 		default:
601 			retval = VM_PAGER_ERROR;
602 			goto out_unlock1;
603 		}
604 	}
605 
606 	/*
607 	 * Wait for buffer data in transit, due to a pipelined
608 	 * move.
609 	 */
610 	ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
611 	if (unlikely(ret != 0)) {
612 		retval = VM_PAGER_ERROR;
613 		goto out_unlock1;
614 	}
615 
616 	ret = ttm_mem_io_lock(man, true);
617 	if (unlikely(ret != 0)) {
618 		retval = VM_PAGER_ERROR;
619 		goto out_unlock1;
620 	}
621 	ret = ttm_mem_io_reserve_vm(bo);
622 	if (unlikely(ret != 0)) {
623 		retval = VM_PAGER_ERROR;
624 		goto out_io_unlock1;
625 	}
626 	if (unlikely(OFF_TO_IDX(offset) >= bo->num_pages)) {
627 		retval = VM_PAGER_ERROR;
628 		goto out_io_unlock1;
629 	}
630 
631 	/*
632 	 * Lookup the real page.
633 	 *
634 	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
635 	 * since the mmap_sem is only held in read mode. However, we
636 	 * modify only the caching bits of vma->vm_page_prot and
637 	 * consider those bits protected by
638 	 * the bo->mutex, as we should be the only writers.
639 	 * There shouldn't really be any readers of these bits except
640 	 * within vm_insert_mixed()? fork?
641 	 *
642 	 * TODO: Add a list of vmas to the bo, and change the
643 	 * vma->vm_page_prot when the object changes caching policy, with
644 	 * the correct locks held.
645 	 */
646 
647 	/*
648 	 * Make a local vma copy to modify the page_prot member
649 	 * and vm_flags if necessary. The vma parameter is protected
650 	 * by mmap_sem in write mode.
651 	 */
652 	cvma = *vma;
653 #if 0
654 	cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
655 #else
656 	cvma.vm_page_prot = 0;
657 #endif
658 
659 	if (bo->mem.bus.is_iomem) {
660 #ifdef __DragonFly__
661 		m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
662 						  bo->mem.bus.offset + offset);
663 		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement, 0));
664 #endif
665 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
666 						cvma.vm_page_prot);
667 	} else {
668 		ttm = bo->ttm;
669 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
670 						cvma.vm_page_prot);
671 
672 		/* Allocate all page at once, most common usage */
673 		struct ttm_operation_ctx ctx = {
674 			.interruptible = false,
675 			.no_wait_gpu = false,
676 			.flags = TTM_OPT_FLAG_FORCE_ALLOC
677 
678 		};
679 		if (ttm_tt_populate(ttm, &ctx)) {
680 			retval = VM_PAGER_ERROR;
681 			goto out_io_unlock1;
682 		}
683 
684 		m = (struct vm_page *)ttm->pages[OFF_TO_IDX(offset)];
685 		if (unlikely(!m)) {
686 			retval = VM_PAGER_ERROR;
687 			goto out_io_unlock1;
688 		}
689 		pmap_page_set_memattr(m,
690 		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
691 		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement, 0));
692 	}
693 
694 	if (vm_page_busy_try(m, FALSE)) {
695 		kprintf("r");
696 		vm_page_sleep_busy(m, FALSE, "ttmvmf");
697 		ttm_mem_io_unlock(man);
698 		ttm_bo_unreserve(bo);
699 		up_read(&vma->vm_mm->mmap_sem);
700 		goto retry;
701 	}
702 
703 	/*
704 	 * Return our fake page BUSYd.  Do not index it into the VM object.
705 	 * The caller will enter it into the pmap.
706 	 */
707 	m->valid = VM_PAGE_BITS_ALL;
708 	*mres = m;
709 
710 out_io_unlock1:
711 	ttm_mem_io_unlock(man);
712 out_unlock1:
713 	ttm_bo_unreserve(bo);
714 out_unlock2:
715 	vm_object_pip_wakeup(vm_obj);
716 	return (retval);
717 }
718 
719 static int
720 ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
721 	       vm_ooffset_t foff, struct ucred *cred, u_short *color)
722 {
723 
724 	/*
725 	 * On Linux, a reference to the buffer object is acquired here.
726 	 * The reason is that this function is not called when the
727 	 * mmap() is initialized, but only when a process forks for
728 	 * instance. Therefore on Linux, the reference on the bo is
729 	 * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's
730 	 * then released in ttm_bo_vm_close().
731 	 *
732 	 * Here, this function is called during mmap() intialization.
733 	 * Thus, the reference acquired in ttm_bo_mmap_single() is
734 	 * sufficient.
735 	 */
736 	*color = 0;
737 	return (0);
738 }
739 
740 static void
741 ttm_bo_vm_dtor(void *handle)
742 {
743 	struct ttm_buffer_object *bo = handle;
744 
745 	ttm_bo_unref(&bo);
746 }
747 
748 static struct cdev_pager_ops ttm_pager_ops = {
749 	.cdev_pg_fault = ttm_bo_vm_fault_dfly,
750 	.cdev_pg_ctor = ttm_bo_vm_ctor,
751 	.cdev_pg_dtor = ttm_bo_vm_dtor
752 };
753 
754 /*
755  * Called from drm_drv.c
756  *
757  * *offset - object offset in bytes
758  * size	   - map size in bytes
759  *
760  * We setup a dummy vma (for now) and call ttm_bo_mmap().  Then we setup
761  * our own VM object and dfly ops.  Note that the ops supplied by
762  * ttm_bo_mmap() are not currently used.
763  */
764 int
765 ttm_bo_mmap_single(struct file *fp, struct drm_device *dev,
766 		   vm_ooffset_t *offset, vm_size_t size,
767 		   struct vm_object **obj_res, int nprot)
768 {
769 	struct ttm_bo_device *bdev = dev->drm_ttm_bdev;
770 	struct ttm_buffer_object *bo;
771 	struct vm_object *vm_obj;
772 	struct vm_area_struct vma;
773 	int ret;
774 
775 	*obj_res = NULL;
776 
777 	bzero(&vma, sizeof(vma));
778 	vma.vm_start = *offset;		/* bdev-relative offset */
779 	vma.vm_end = vma.vm_start + size;
780 	vma.vm_pgoff = vma.vm_start >> PAGE_SHIFT;
781 	/* vma.vm_page_prot */
782 	/* vma.vm_flags */
783 
784 	/*
785 	 * Call the linux-ported code to do the work, and on success just
786 	 * setup our own VM object and ignore what the linux code did other
787 	 * then supplying us the 'bo'.
788 	 */
789 	ret = ttm_bo_mmap(fp, &vma, bdev);
790 
791 	if (ret == 0) {
792 		bo = vma.vm_private_data;
793 		vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE,
794 					     &ttm_pager_ops,
795 					     size, nprot, 0,
796 					     curthread->td_ucred);
797 		if (vm_obj) {
798 			*obj_res = vm_obj;
799 			*offset = 0;		/* object-relative offset */
800 		} else {
801 			ttm_bo_unref(&bo);
802 			ret = EINVAL;
803 		}
804 	}
805 	return ret;
806 }
807 EXPORT_SYMBOL(ttm_bo_mmap_single);
808 
809 #ifdef __DragonFly__
810 void ttm_bo_release_mmap(struct ttm_buffer_object *bo);
811 
812 void
813 ttm_bo_release_mmap(struct ttm_buffer_object *bo)
814 {
815 	vm_object_t vm_obj;
816 
817 	vm_obj = cdev_pager_lookup(bo);
818 	if (vm_obj == NULL)
819 		return;
820 
821 	VM_OBJECT_LOCK(vm_obj);
822 #if 1
823 	vm_object_page_remove(vm_obj, 0, 0, false);
824 #else
825 	/*
826 	 * XXX REMOVED
827 	 *
828 	 * We no longer manage the vm pages inside the MGTDEVICE
829 	 * objects.
830 	 */
831 	vm_page_t m;
832 	int i;
833 
834 	for (i = 0; i < bo->num_pages; i++) {
835 		m = vm_page_lookup_busy_wait(vm_obj, i, TRUE, "ttm_unm");
836 		if (m == NULL)
837 			continue;
838 		cdev_pager_free_page(vm_obj, m);
839 	}
840 #endif
841 	VM_OBJECT_UNLOCK(vm_obj);
842 
843 	vm_object_deallocate(vm_obj);
844 }
845 #endif
846 
847 #if 0
848 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
849 {
850 	if (vma->vm_pgoff != 0)
851 		return -EACCES;
852 
853 	vma->vm_ops = &ttm_bo_vm_ops;
854 	vma->vm_private_data = ttm_bo_reference(bo);
855 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
856 	return 0;
857 }
858 EXPORT_SYMBOL(ttm_fbdev_mmap);
859 #endif
860