xref: /dragonfly/sys/dev/drm/ttm/ttm_bo_vm.c (revision 5f38e86d)
15718399fSFrançois Tigeot /**************************************************************************
25718399fSFrançois Tigeot  *
35718399fSFrançois Tigeot  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
45718399fSFrançois Tigeot  * All Rights Reserved.
55718399fSFrançois Tigeot  *
65718399fSFrançois Tigeot  * Permission is hereby granted, free of charge, to any person obtaining a
75718399fSFrançois Tigeot  * copy of this software and associated documentation files (the
85718399fSFrançois Tigeot  * "Software"), to deal in the Software without restriction, including
95718399fSFrançois Tigeot  * without limitation the rights to use, copy, modify, merge, publish,
105718399fSFrançois Tigeot  * distribute, sub license, and/or sell copies of the Software, and to
115718399fSFrançois Tigeot  * permit persons to whom the Software is furnished to do so, subject to
125718399fSFrançois Tigeot  * the following conditions:
135718399fSFrançois Tigeot  *
145718399fSFrançois Tigeot  * The above copyright notice and this permission notice (including the
155718399fSFrançois Tigeot  * next paragraph) shall be included in all copies or substantial portions
165718399fSFrançois Tigeot  * of the Software.
175718399fSFrançois Tigeot  *
185718399fSFrançois Tigeot  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
195718399fSFrançois Tigeot  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
205718399fSFrançois Tigeot  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
215718399fSFrançois Tigeot  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
225718399fSFrançois Tigeot  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
235718399fSFrançois Tigeot  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
245718399fSFrançois Tigeot  * USE OR OTHER DEALINGS IN THE SOFTWARE.
255718399fSFrançois Tigeot  *
265718399fSFrançois Tigeot  **************************************************************************/
275718399fSFrançois Tigeot /*
285718399fSFrançois Tigeot  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
295718399fSFrançois Tigeot  */
305718399fSFrançois Tigeot 
31c66857ebSFrançois Tigeot #define pr_fmt(fmt) "[TTM] " fmt
32c66857ebSFrançois Tigeot 
33a34b4168SMatthew Dillon #include <ttm/ttm_module.h>
34a34b4168SMatthew Dillon #include <ttm/ttm_bo_driver.h>
35a34b4168SMatthew Dillon #include <ttm/ttm_bo_api.h>
36a34b4168SMatthew Dillon #include <ttm/ttm_placement.h>
37a34b4168SMatthew Dillon #include <drm/drm_vma_manager.h>
38a34b4168SMatthew Dillon #include <linux/mm.h>
39d78d3a22SFrançois Tigeot #include <linux/pfn_t.h>
40a34b4168SMatthew Dillon #include <linux/rbtree.h>
41a34b4168SMatthew Dillon #include <linux/module.h>
42a34b4168SMatthew Dillon #include <linux/uaccess.h>
43a34b4168SMatthew Dillon 
44a34b4168SMatthew Dillon #include <sys/sysctl.h>
455718399fSFrançois Tigeot #include <vm/vm.h>
465718399fSFrançois Tigeot #include <vm/vm_page.h>
4762bba8f6SFrançois Tigeot #include <vm/vm_page2.h>
4862bba8f6SFrançois Tigeot 
495718399fSFrançois Tigeot #define TTM_BO_VM_NUM_PREFAULT 16
505718399fSFrançois Tigeot 
5143e748b9SFrançois Tigeot static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
5243e748b9SFrançois Tigeot 				struct vm_area_struct *vma,
5343e748b9SFrançois Tigeot 				struct vm_fault *vmf)
5443e748b9SFrançois Tigeot {
5543e748b9SFrançois Tigeot 	int ret = 0;
5643e748b9SFrançois Tigeot 
5743e748b9SFrançois Tigeot 	if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
5843e748b9SFrançois Tigeot 		goto out_unlock;
5943e748b9SFrançois Tigeot 
6043e748b9SFrançois Tigeot 	/*
6143e748b9SFrançois Tigeot 	 * Quick non-stalling check for idle.
6243e748b9SFrançois Tigeot 	 */
63d78d3a22SFrançois Tigeot 	ret = ttm_bo_wait(bo, false, true);
6443e748b9SFrançois Tigeot 	if (likely(ret == 0))
6543e748b9SFrançois Tigeot 		goto out_unlock;
6643e748b9SFrançois Tigeot 
6743e748b9SFrançois Tigeot 	/*
6843e748b9SFrançois Tigeot 	 * If possible, avoid waiting for GPU with mmap_sem
6943e748b9SFrançois Tigeot 	 * held.
7043e748b9SFrançois Tigeot 	 */
7143e748b9SFrançois Tigeot 	if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
7243e748b9SFrançois Tigeot 		ret = VM_FAULT_RETRY;
7343e748b9SFrançois Tigeot 		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
7443e748b9SFrançois Tigeot 			goto out_unlock;
7543e748b9SFrançois Tigeot 
76*5f38e86dSFrançois Tigeot #if 0
7743e748b9SFrançois Tigeot 		up_read(&vma->vm_mm->mmap_sem);
78*5f38e86dSFrançois Tigeot #endif
79d78d3a22SFrançois Tigeot 		(void) ttm_bo_wait(bo, true, false);
8043e748b9SFrançois Tigeot 		goto out_unlock;
8143e748b9SFrançois Tigeot 	}
8243e748b9SFrançois Tigeot 
8343e748b9SFrançois Tigeot 	/*
8443e748b9SFrançois Tigeot 	 * Ordinary wait.
8543e748b9SFrançois Tigeot 	 */
86d78d3a22SFrançois Tigeot 	ret = ttm_bo_wait(bo, true, false);
8743e748b9SFrançois Tigeot 	if (unlikely(ret != 0))
8843e748b9SFrançois Tigeot 		ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
8943e748b9SFrançois Tigeot 			VM_FAULT_NOPAGE;
9043e748b9SFrançois Tigeot 
9143e748b9SFrançois Tigeot out_unlock:
9243e748b9SFrançois Tigeot 	return ret;
9343e748b9SFrançois Tigeot }
9443e748b9SFrançois Tigeot 
95a34b4168SMatthew Dillon /*
96a34b4168SMatthew Dillon  * Always unstall on unexpected vm_page alias, fatal bus fault.
97a34b4168SMatthew Dillon  * Set to 0 to stall, set to positive count to unstall N times,
98a34b4168SMatthew Dillon  * then stall again.
99a34b4168SMatthew Dillon  */
100a34b4168SMatthew Dillon static int drm_unstall = -1;
101a34b4168SMatthew Dillon SYSCTL_INT(_debug, OID_AUTO, unstall, CTLFLAG_RW, &drm_unstall, 0, "");
102a34b4168SMatthew Dillon 
103a34b4168SMatthew Dillon static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
104ddea3d8dSzrj {
105a34b4168SMatthew Dillon 	/* see ttm_bo_mmap_single() at end of this file */
106a34b4168SMatthew Dillon 	/* ttm_bo_vm_ops not currently used, no entry should occur */
107a34b4168SMatthew Dillon 	panic("ttm_bo_vm_fault");
108a34b4168SMatthew Dillon #if 0
109a34b4168SMatthew Dillon 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
110a34b4168SMatthew Dillon 	    vma->vm_private_data;
1115718399fSFrançois Tigeot 	struct ttm_bo_device *bdev = bo->bdev;
112a34b4168SMatthew Dillon 	unsigned long page_offset;
113a34b4168SMatthew Dillon 	unsigned long page_last;
114a34b4168SMatthew Dillon 	unsigned long pfn;
1155718399fSFrançois Tigeot 	struct ttm_tt *ttm = NULL;
116a34b4168SMatthew Dillon 	struct page *page;
1175718399fSFrançois Tigeot 	int ret;
118a34b4168SMatthew Dillon 	int i;
119a34b4168SMatthew Dillon 	unsigned long address = (unsigned long)vmf->virtual_address;
120a34b4168SMatthew Dillon 	int retval = VM_FAULT_NOPAGE;
1215718399fSFrançois Tigeot 	struct ttm_mem_type_manager *man =
1225718399fSFrançois Tigeot 		&bdev->man[bo->mem.mem_type];
12343e748b9SFrançois Tigeot 	struct vm_area_struct cvma;
1245718399fSFrançois Tigeot 
125a34b4168SMatthew Dillon 	/*
126a34b4168SMatthew Dillon 	 * Work around locking order reversal in fault / nopfn
127a34b4168SMatthew Dillon 	 * between mmap_sem and bo_reserve: Perform a trylock operation
12843e748b9SFrançois Tigeot 	 * for reserve, and if it fails, retry the fault after waiting
12943e748b9SFrançois Tigeot 	 * for the buffer to become unreserved.
130a34b4168SMatthew Dillon 	 */
131d78d3a22SFrançois Tigeot 	ret = ttm_bo_reserve(bo, true, true, NULL);
1325718399fSFrançois Tigeot 	if (unlikely(ret != 0)) {
13343e748b9SFrançois Tigeot 		if (ret != -EBUSY)
134a34b4168SMatthew Dillon 			return VM_FAULT_NOPAGE;
13543e748b9SFrançois Tigeot 
13643e748b9SFrançois Tigeot 		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
13743e748b9SFrançois Tigeot 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
13843e748b9SFrançois Tigeot 				up_read(&vma->vm_mm->mmap_sem);
13943e748b9SFrançois Tigeot 				(void) ttm_bo_wait_unreserved(bo);
14043e748b9SFrançois Tigeot 			}
14143e748b9SFrançois Tigeot 
14243e748b9SFrançois Tigeot 			return VM_FAULT_RETRY;
14343e748b9SFrançois Tigeot 		}
14443e748b9SFrançois Tigeot 
14543e748b9SFrançois Tigeot 		/*
14643e748b9SFrançois Tigeot 		 * If we'd want to change locking order to
14743e748b9SFrançois Tigeot 		 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
14843e748b9SFrançois Tigeot 		 * instead of retrying the fault...
14943e748b9SFrançois Tigeot 		 */
15043e748b9SFrançois Tigeot 		return VM_FAULT_NOPAGE;
15143e748b9SFrançois Tigeot 	}
15243e748b9SFrançois Tigeot 
15343e748b9SFrançois Tigeot 	/*
15443e748b9SFrançois Tigeot 	 * Refuse to fault imported pages. This should be handled
15543e748b9SFrançois Tigeot 	 * (if at all) by redirecting mmap to the exporter.
15643e748b9SFrançois Tigeot 	 */
15743e748b9SFrançois Tigeot 	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
15843e748b9SFrançois Tigeot 		retval = VM_FAULT_SIGBUS;
15943e748b9SFrançois Tigeot 		goto out_unlock;
1605718399fSFrançois Tigeot 	}
1615718399fSFrançois Tigeot 
1625718399fSFrançois Tigeot 	if (bdev->driver->fault_reserve_notify) {
1635718399fSFrançois Tigeot 		ret = bdev->driver->fault_reserve_notify(bo);
1645718399fSFrançois Tigeot 		switch (ret) {
1655718399fSFrançois Tigeot 		case 0:
1665718399fSFrançois Tigeot 			break;
1675718399fSFrançois Tigeot 		case -EBUSY:
168a34b4168SMatthew Dillon 		case -ERESTARTSYS:
169a34b4168SMatthew Dillon 			retval = VM_FAULT_NOPAGE;
170a34b4168SMatthew Dillon 			goto out_unlock;
171a34b4168SMatthew Dillon 		default:
172a34b4168SMatthew Dillon 			retval = VM_FAULT_SIGBUS;
173a34b4168SMatthew Dillon 			goto out_unlock;
174a34b4168SMatthew Dillon 		}
175a34b4168SMatthew Dillon 	}
176a34b4168SMatthew Dillon 
177a34b4168SMatthew Dillon 	/*
178a34b4168SMatthew Dillon 	 * Wait for buffer data in transit, due to a pipelined
179a34b4168SMatthew Dillon 	 * move.
180a34b4168SMatthew Dillon 	 */
181a34b4168SMatthew Dillon 
18243e748b9SFrançois Tigeot 	ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
183a34b4168SMatthew Dillon 	if (unlikely(ret != 0)) {
18443e748b9SFrançois Tigeot 		retval = ret;
185a34b4168SMatthew Dillon 		goto out_unlock;
186a34b4168SMatthew Dillon 	}
187a34b4168SMatthew Dillon 
188a34b4168SMatthew Dillon 	ret = ttm_mem_io_lock(man, true);
189a34b4168SMatthew Dillon 	if (unlikely(ret != 0)) {
190a34b4168SMatthew Dillon 		retval = VM_FAULT_NOPAGE;
191a34b4168SMatthew Dillon 		goto out_unlock;
192a34b4168SMatthew Dillon 	}
193a34b4168SMatthew Dillon 	ret = ttm_mem_io_reserve_vm(bo);
194a34b4168SMatthew Dillon 	if (unlikely(ret != 0)) {
195a34b4168SMatthew Dillon 		retval = VM_FAULT_SIGBUS;
196a34b4168SMatthew Dillon 		goto out_io_unlock;
197a34b4168SMatthew Dillon 	}
198a34b4168SMatthew Dillon 
199a34b4168SMatthew Dillon 	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
20043e748b9SFrançois Tigeot 		vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
20143e748b9SFrançois Tigeot 	page_last = vma_pages(vma) + vma->vm_pgoff -
20243e748b9SFrançois Tigeot 		drm_vma_node_start(&bo->vma_node);
203a34b4168SMatthew Dillon 
204a34b4168SMatthew Dillon 	if (unlikely(page_offset >= bo->num_pages)) {
205a34b4168SMatthew Dillon 		retval = VM_FAULT_SIGBUS;
206a34b4168SMatthew Dillon 		goto out_io_unlock;
207a34b4168SMatthew Dillon 	}
208a34b4168SMatthew Dillon 
209a34b4168SMatthew Dillon 	/*
21043e748b9SFrançois Tigeot 	 * Make a local vma copy to modify the page_prot member
21143e748b9SFrançois Tigeot 	 * and vm_flags if necessary. The vma parameter is protected
21243e748b9SFrançois Tigeot 	 * by mmap_sem in write mode.
213a34b4168SMatthew Dillon 	 */
21443e748b9SFrançois Tigeot 	cvma = *vma;
21543e748b9SFrançois Tigeot 	cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
21643e748b9SFrançois Tigeot 
217a34b4168SMatthew Dillon 	if (bo->mem.bus.is_iomem) {
21843e748b9SFrançois Tigeot 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
21943e748b9SFrançois Tigeot 						cvma.vm_page_prot);
220a34b4168SMatthew Dillon 	} else {
221a34b4168SMatthew Dillon 		ttm = bo->ttm;
22243e748b9SFrançois Tigeot 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
22343e748b9SFrançois Tigeot 						cvma.vm_page_prot);
224a34b4168SMatthew Dillon 
225a34b4168SMatthew Dillon 		/* Allocate all page at once, most common usage */
226a34b4168SMatthew Dillon 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
227a34b4168SMatthew Dillon 			retval = VM_FAULT_OOM;
228a34b4168SMatthew Dillon 			goto out_io_unlock;
229a34b4168SMatthew Dillon 		}
230a34b4168SMatthew Dillon 	}
231a34b4168SMatthew Dillon 
232a34b4168SMatthew Dillon 	/*
233a34b4168SMatthew Dillon 	 * Speculatively prefault a number of pages. Only error on
234a34b4168SMatthew Dillon 	 * first page.
235a34b4168SMatthew Dillon 	 */
236a34b4168SMatthew Dillon 	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
237a34b4168SMatthew Dillon 		if (bo->mem.bus.is_iomem)
238a34b4168SMatthew Dillon 			pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
239a34b4168SMatthew Dillon 		else {
240a34b4168SMatthew Dillon 			page = ttm->pages[page_offset];
241a34b4168SMatthew Dillon 			if (unlikely(!page && i == 0)) {
242a34b4168SMatthew Dillon 				retval = VM_FAULT_OOM;
243a34b4168SMatthew Dillon 				goto out_io_unlock;
244a34b4168SMatthew Dillon 			} else if (unlikely(!page)) {
245a34b4168SMatthew Dillon 				break;
246a34b4168SMatthew Dillon 			}
24743e748b9SFrançois Tigeot 			page->mapping = vma->vm_file->f_mapping;
24843e748b9SFrançois Tigeot 			page->index = drm_vma_node_start(&bo->vma_node) +
24943e748b9SFrançois Tigeot 				page_offset;
250a34b4168SMatthew Dillon 			pfn = page_to_pfn(page);
251a34b4168SMatthew Dillon 		}
252a34b4168SMatthew Dillon 
25343e748b9SFrançois Tigeot 		if (vma->vm_flags & VM_MIXEDMAP)
254d78d3a22SFrançois Tigeot 			ret = vm_insert_mixed(&cvma, address,
255d78d3a22SFrançois Tigeot 					__pfn_to_pfn_t(pfn, PFN_DEV));
25643e748b9SFrançois Tigeot 		else
25743e748b9SFrançois Tigeot 			ret = vm_insert_pfn(&cvma, address, pfn);
25843e748b9SFrançois Tigeot 
259a34b4168SMatthew Dillon 		/*
260a34b4168SMatthew Dillon 		 * Somebody beat us to this PTE or prefaulting to
261a34b4168SMatthew Dillon 		 * an already populated PTE, or prefaulting error.
262a34b4168SMatthew Dillon 		 */
263a34b4168SMatthew Dillon 
264a34b4168SMatthew Dillon 		if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
265a34b4168SMatthew Dillon 			break;
266a34b4168SMatthew Dillon 		else if (unlikely(ret != 0)) {
267a34b4168SMatthew Dillon 			retval =
268a34b4168SMatthew Dillon 			    (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
269a34b4168SMatthew Dillon 			goto out_io_unlock;
270a34b4168SMatthew Dillon 		}
271a34b4168SMatthew Dillon 
272a34b4168SMatthew Dillon 		address += PAGE_SIZE;
273a34b4168SMatthew Dillon 		if (unlikely(++page_offset >= page_last))
274a34b4168SMatthew Dillon 			break;
275a34b4168SMatthew Dillon 	}
276a34b4168SMatthew Dillon out_io_unlock:
277a34b4168SMatthew Dillon 	ttm_mem_io_unlock(man);
278a34b4168SMatthew Dillon out_unlock:
279a34b4168SMatthew Dillon 	ttm_bo_unreserve(bo);
280a34b4168SMatthew Dillon 	return retval;
281a34b4168SMatthew Dillon #endif
282a34b4168SMatthew Dillon }
283a34b4168SMatthew Dillon 
284a34b4168SMatthew Dillon /* ttm_bo_vm_ops not currently used, no entry should occur */
285a34b4168SMatthew Dillon static void ttm_bo_vm_open(struct vm_area_struct *vma)
286a34b4168SMatthew Dillon {
287a34b4168SMatthew Dillon 	struct ttm_buffer_object *bo =
288a34b4168SMatthew Dillon 	    (struct ttm_buffer_object *)vma->vm_private_data;
289a34b4168SMatthew Dillon 
29043e748b9SFrançois Tigeot #if 0
29143e748b9SFrançois Tigeot 	WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
29243e748b9SFrançois Tigeot #endif
29343e748b9SFrançois Tigeot 
294a34b4168SMatthew Dillon 	(void)ttm_bo_reference(bo);
295a34b4168SMatthew Dillon }
296a34b4168SMatthew Dillon 
297a34b4168SMatthew Dillon /* ttm_bo_vm_ops not currently used, no entry should occur */
298a34b4168SMatthew Dillon static void ttm_bo_vm_close(struct vm_area_struct *vma)
299a34b4168SMatthew Dillon {
300a34b4168SMatthew Dillon 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
301a34b4168SMatthew Dillon 
302a34b4168SMatthew Dillon 	ttm_bo_unref(&bo);
303a34b4168SMatthew Dillon 	vma->vm_private_data = NULL;
304a34b4168SMatthew Dillon }
305a34b4168SMatthew Dillon 
306a34b4168SMatthew Dillon static const struct vm_operations_struct ttm_bo_vm_ops = {
307a34b4168SMatthew Dillon 	.fault = ttm_bo_vm_fault,
308a34b4168SMatthew Dillon 	.open = ttm_bo_vm_open,
309a34b4168SMatthew Dillon 	.close = ttm_bo_vm_close
310a34b4168SMatthew Dillon };
311a34b4168SMatthew Dillon 
312a34b4168SMatthew Dillon static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
313a34b4168SMatthew Dillon 						  unsigned long offset,
314a34b4168SMatthew Dillon 						  unsigned long pages)
315a34b4168SMatthew Dillon {
316a34b4168SMatthew Dillon 	struct drm_vma_offset_node *node;
317a34b4168SMatthew Dillon 	struct ttm_buffer_object *bo = NULL;
318a34b4168SMatthew Dillon 
319a34b4168SMatthew Dillon 	drm_vma_offset_lock_lookup(&bdev->vma_manager);
320a34b4168SMatthew Dillon 
321a34b4168SMatthew Dillon 	node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
322a34b4168SMatthew Dillon 	if (likely(node)) {
323a34b4168SMatthew Dillon 		bo = container_of(node, struct ttm_buffer_object, vma_node);
324a34b4168SMatthew Dillon 		if (!kref_get_unless_zero(&bo->kref))
325a34b4168SMatthew Dillon 			bo = NULL;
326a34b4168SMatthew Dillon 	}
327a34b4168SMatthew Dillon 
328a34b4168SMatthew Dillon 	drm_vma_offset_unlock_lookup(&bdev->vma_manager);
329a34b4168SMatthew Dillon 
330a34b4168SMatthew Dillon 	if (!bo)
331a34b4168SMatthew Dillon 		pr_err("Could not find buffer object to map\n");
332a34b4168SMatthew Dillon 
333a34b4168SMatthew Dillon 	return bo;
334a34b4168SMatthew Dillon }
335a34b4168SMatthew Dillon 
336a34b4168SMatthew Dillon int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
337a34b4168SMatthew Dillon 		struct ttm_bo_device *bdev)
338a34b4168SMatthew Dillon {
339a34b4168SMatthew Dillon 	struct ttm_bo_driver *driver;
340a34b4168SMatthew Dillon 	struct ttm_buffer_object *bo;
341a34b4168SMatthew Dillon 	int ret;
342a34b4168SMatthew Dillon 
343a34b4168SMatthew Dillon 	bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
344a34b4168SMatthew Dillon 	if (unlikely(!bo))
345a34b4168SMatthew Dillon 		return -EINVAL;
346a34b4168SMatthew Dillon 
347a34b4168SMatthew Dillon 	driver = bo->bdev->driver;
348a34b4168SMatthew Dillon 	if (unlikely(!driver->verify_access)) {
349a34b4168SMatthew Dillon 		ret = -EPERM;
350a34b4168SMatthew Dillon 		goto out_unref;
351a34b4168SMatthew Dillon 	}
352a34b4168SMatthew Dillon 	ret = driver->verify_access(bo, filp);
353a34b4168SMatthew Dillon 	if (unlikely(ret != 0))
354a34b4168SMatthew Dillon 		goto out_unref;
355a34b4168SMatthew Dillon 
356a34b4168SMatthew Dillon 	vma->vm_ops = &ttm_bo_vm_ops;
357a34b4168SMatthew Dillon 
358a34b4168SMatthew Dillon 	/*
359a34b4168SMatthew Dillon 	 * Note: We're transferring the bo reference to
360a34b4168SMatthew Dillon 	 * vma->vm_private_data here.
361a34b4168SMatthew Dillon 	 */
362a34b4168SMatthew Dillon 
363a34b4168SMatthew Dillon 	vma->vm_private_data = bo;
36443e748b9SFrançois Tigeot 
36543e748b9SFrançois Tigeot 	/*
36643e748b9SFrançois Tigeot 	 * We'd like to use VM_PFNMAP on shared mappings, where
36743e748b9SFrançois Tigeot 	 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
36843e748b9SFrançois Tigeot 	 * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
36943e748b9SFrançois Tigeot 	 * bad for performance. Until that has been sorted out, use
37043e748b9SFrançois Tigeot 	 * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
37143e748b9SFrançois Tigeot 	 */
37243e748b9SFrançois Tigeot 	vma->vm_flags |= VM_MIXEDMAP;
37343e748b9SFrançois Tigeot 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
374a34b4168SMatthew Dillon 	return 0;
375a34b4168SMatthew Dillon out_unref:
376a34b4168SMatthew Dillon 	ttm_bo_unref(&bo);
377a34b4168SMatthew Dillon 	return ret;
378a34b4168SMatthew Dillon }
379a34b4168SMatthew Dillon EXPORT_SYMBOL(ttm_bo_mmap);
380a34b4168SMatthew Dillon 
381a34b4168SMatthew Dillon int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
382a34b4168SMatthew Dillon {
383a34b4168SMatthew Dillon 	if (vma->vm_pgoff != 0)
384a34b4168SMatthew Dillon 		return -EACCES;
385a34b4168SMatthew Dillon 
386a34b4168SMatthew Dillon 	vma->vm_ops = &ttm_bo_vm_ops;
387a34b4168SMatthew Dillon 	vma->vm_private_data = ttm_bo_reference(bo);
38843e748b9SFrançois Tigeot 	vma->vm_flags |= VM_MIXEDMAP;
38943e748b9SFrançois Tigeot 	vma->vm_flags |= VM_IO | VM_DONTEXPAND;
390a34b4168SMatthew Dillon 	return 0;
391a34b4168SMatthew Dillon }
392a34b4168SMatthew Dillon EXPORT_SYMBOL(ttm_fbdev_mmap);
393a34b4168SMatthew Dillon 
394a34b4168SMatthew Dillon /*
395a34b4168SMatthew Dillon  * DragonFlyBSD Interface
396a34b4168SMatthew Dillon  */
397a34b4168SMatthew Dillon 
398a34b4168SMatthew Dillon #include "opt_vm.h"
399a34b4168SMatthew Dillon 
400a34b4168SMatthew Dillon #include <vm/vm.h>
401a34b4168SMatthew Dillon #include <vm/vm_page.h>
402a34b4168SMatthew Dillon #include <linux/errno.h>
403a34b4168SMatthew Dillon #include <linux/export.h>
404a34b4168SMatthew Dillon 
405a34b4168SMatthew Dillon #include <vm/vm_page2.h>
406a34b4168SMatthew Dillon 
40707cb5299SMatthew Dillon /*
40807cb5299SMatthew Dillon  * NOTE: This code is fragile.  This code can only be entered with *mres
40907cb5299SMatthew Dillon  *	 not NULL when *mres is a placeholder page allocated by the kernel.
41007cb5299SMatthew Dillon  */
411a34b4168SMatthew Dillon static int
412a34b4168SMatthew Dillon ttm_bo_vm_fault_dfly(vm_object_t vm_obj, vm_ooffset_t offset,
413a34b4168SMatthew Dillon 		     int prot, vm_page_t *mres)
414a34b4168SMatthew Dillon {
415a34b4168SMatthew Dillon 	struct ttm_buffer_object *bo = vm_obj->handle;
416a34b4168SMatthew Dillon 	struct ttm_bo_device *bdev = bo->bdev;
417a34b4168SMatthew Dillon 	struct ttm_tt *ttm = NULL;
41807cb5299SMatthew Dillon 	vm_page_t m, mtmp;
419a34b4168SMatthew Dillon 	int ret;
420a34b4168SMatthew Dillon 	int retval = VM_PAGER_OK;
421*5f38e86dSFrançois Tigeot 	struct ttm_mem_type_manager *man =
422*5f38e86dSFrançois Tigeot 		&bdev->man[bo->mem.mem_type];
423*5f38e86dSFrançois Tigeot 	struct vm_area_struct cvma;
424a34b4168SMatthew Dillon 
425*5f38e86dSFrançois Tigeot /*
426*5f38e86dSFrançois Tigeot    The Linux code expects to receive these arguments:
427*5f38e86dSFrançois Tigeot    - struct vm_area_struct *vma
428*5f38e86dSFrançois Tigeot    - struct vm_fault *vmf
429*5f38e86dSFrançois Tigeot */
430*5f38e86dSFrançois Tigeot #ifdef __DragonFly__
431*5f38e86dSFrançois Tigeot 	struct vm_area_struct vmas;
432*5f38e86dSFrançois Tigeot 	struct vm_area_struct *vma = &vmas;
433*5f38e86dSFrançois Tigeot 	struct vm_fault vmfs;
434*5f38e86dSFrançois Tigeot 	struct vm_fault *vmf = &vmfs;
435*5f38e86dSFrançois Tigeot 
436*5f38e86dSFrançois Tigeot 	memset(vma, 0, sizeof(*vma));
437*5f38e86dSFrançois Tigeot 	memset(vmf, 0, sizeof(*vmf));
438*5f38e86dSFrançois Tigeot #endif
439*5f38e86dSFrançois Tigeot 
440a34b4168SMatthew Dillon 	vm_object_pip_add(vm_obj, 1);
44107cb5299SMatthew Dillon 
44207cb5299SMatthew Dillon 	/*
44307cb5299SMatthew Dillon 	 * We must atomically clean up any possible placeholder page to avoid
44407cb5299SMatthew Dillon 	 * the DRM subsystem attempting to use it.  We can determine if this
44507cb5299SMatthew Dillon 	 * is a place holder page by checking m->valid.
44607cb5299SMatthew Dillon 	 *
44707cb5299SMatthew Dillon 	 * We have to do this before any potential fault_reserve_notify()
44807cb5299SMatthew Dillon 	 * which might try to free the map (and thus deadlock on our busy
44907cb5299SMatthew Dillon 	 * page).
45007cb5299SMatthew Dillon 	 */
45107cb5299SMatthew Dillon 	m = *mres;
452a34b4168SMatthew Dillon 	*mres = NULL;
45307cb5299SMatthew Dillon 	if (m) {
45407cb5299SMatthew Dillon 		if (m->valid == VM_PAGE_BITS_ALL) {
45507cb5299SMatthew Dillon 			/* actual page */
45607cb5299SMatthew Dillon 			vm_page_wakeup(m);
45707cb5299SMatthew Dillon 		} else {
45807cb5299SMatthew Dillon 			/* placeholder page */
45907cb5299SMatthew Dillon 			KKASSERT((m->flags & PG_FICTITIOUS) == 0);
46007cb5299SMatthew Dillon 			vm_page_remove(m);
46107cb5299SMatthew Dillon 			vm_page_free(m);
46207cb5299SMatthew Dillon 		}
46307cb5299SMatthew Dillon 	}
464a34b4168SMatthew Dillon 
465a34b4168SMatthew Dillon retry:
466a34b4168SMatthew Dillon 	VM_OBJECT_UNLOCK(vm_obj);
467a34b4168SMatthew Dillon 	m = NULL;
468a34b4168SMatthew Dillon 
469a34b4168SMatthew Dillon 	/*
470*5f38e86dSFrançois Tigeot 	 * Work around locking order reversal in fault / nopfn
471*5f38e86dSFrançois Tigeot 	 * between mmap_sem and bo_reserve: Perform a trylock operation
472*5f38e86dSFrançois Tigeot 	 * for reserve, and if it fails, retry the fault after waiting
473*5f38e86dSFrançois Tigeot 	 * for the buffer to become unreserved.
474a34b4168SMatthew Dillon 	 */
475*5f38e86dSFrançois Tigeot 	ret = ttm_bo_reserve(bo, true, true, NULL);
476a34b4168SMatthew Dillon 	if (unlikely(ret != 0)) {
477*5f38e86dSFrançois Tigeot 		if (ret != -EBUSY) {
478a34b4168SMatthew Dillon 			retval = VM_PAGER_ERROR;
479a34b4168SMatthew Dillon 			VM_OBJECT_LOCK(vm_obj);
480a34b4168SMatthew Dillon 			goto out_unlock2;
481a34b4168SMatthew Dillon 		}
482a34b4168SMatthew Dillon 
483*5f38e86dSFrançois Tigeot 		if (vmf->flags & FAULT_FLAG_ALLOW_RETRY || 1) {
484*5f38e86dSFrançois Tigeot 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
485*5f38e86dSFrançois Tigeot #if 0
486*5f38e86dSFrançois Tigeot 				up_read(&vma->vm_mm->mmap_sem);
487*5f38e86dSFrançois Tigeot #endif
488*5f38e86dSFrançois Tigeot 				(void) ttm_bo_wait_unreserved(bo);
489*5f38e86dSFrançois Tigeot 			}
490*5f38e86dSFrançois Tigeot 
491*5f38e86dSFrançois Tigeot #ifndef __DragonFly__
492*5f38e86dSFrançois Tigeot 			return VM_FAULT_RETRY;
493*5f38e86dSFrançois Tigeot #else
494*5f38e86dSFrançois Tigeot 			VM_OBJECT_LOCK(vm_obj);
495*5f38e86dSFrançois Tigeot 			lwkt_yield();
496*5f38e86dSFrançois Tigeot 			goto retry;
497*5f38e86dSFrançois Tigeot #endif
498*5f38e86dSFrançois Tigeot 		}
499*5f38e86dSFrançois Tigeot 
500*5f38e86dSFrançois Tigeot 		/*
501*5f38e86dSFrançois Tigeot 		 * If we'd want to change locking order to
502*5f38e86dSFrançois Tigeot 		 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
503*5f38e86dSFrançois Tigeot 		 * instead of retrying the fault...
504*5f38e86dSFrançois Tigeot 		 */
505*5f38e86dSFrançois Tigeot #ifndef __DragonFly__
506*5f38e86dSFrançois Tigeot 		return VM_FAULT_NOPAGE;
507*5f38e86dSFrançois Tigeot #else
508*5f38e86dSFrançois Tigeot 		retval = VM_PAGER_ERROR;
509*5f38e86dSFrançois Tigeot 		VM_OBJECT_LOCK(vm_obj);
510*5f38e86dSFrançois Tigeot 		goto out_unlock2;
511*5f38e86dSFrançois Tigeot #endif
512*5f38e86dSFrançois Tigeot 	}
513*5f38e86dSFrançois Tigeot 
514a34b4168SMatthew Dillon 	if (bdev->driver->fault_reserve_notify) {
515a34b4168SMatthew Dillon 		ret = bdev->driver->fault_reserve_notify(bo);
516a34b4168SMatthew Dillon 		switch (ret) {
517a34b4168SMatthew Dillon 		case 0:
518a34b4168SMatthew Dillon 			break;
519a34b4168SMatthew Dillon 		case -EBUSY:
520a34b4168SMatthew Dillon 			lwkt_yield();
521a34b4168SMatthew Dillon 			/* fall through */
522797013cfSFrançois Tigeot 		case -ERESTARTSYS:
5235718399fSFrançois Tigeot 		case -EINTR:
524a34b4168SMatthew Dillon 			retval = VM_PAGER_ERROR;
525a34b4168SMatthew Dillon 			goto out_unlock;
5265718399fSFrançois Tigeot 		default:
5275718399fSFrançois Tigeot 			retval = VM_PAGER_ERROR;
5285718399fSFrançois Tigeot 			goto out_unlock;
5295718399fSFrançois Tigeot 		}
5305718399fSFrançois Tigeot 	}
5315718399fSFrançois Tigeot 
5325718399fSFrançois Tigeot 	/*
5335718399fSFrançois Tigeot 	 * Wait for buffer data in transit, due to a pipelined
5345718399fSFrançois Tigeot 	 * move.
5355718399fSFrançois Tigeot 	 */
536*5f38e86dSFrançois Tigeot 	ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
5375718399fSFrançois Tigeot 	if (unlikely(ret != 0)) {
538*5f38e86dSFrançois Tigeot 		retval = ret;
539*5f38e86dSFrançois Tigeot #ifdef __DragonFly__
5405718399fSFrançois Tigeot 		retval = VM_PAGER_ERROR;
541*5f38e86dSFrançois Tigeot #endif
5425718399fSFrançois Tigeot 		goto out_unlock;
5435718399fSFrançois Tigeot 	}
5445718399fSFrançois Tigeot 
5455718399fSFrançois Tigeot 	ret = ttm_mem_io_lock(man, true);
5465718399fSFrançois Tigeot 	if (unlikely(ret != 0)) {
5475718399fSFrançois Tigeot 		retval = VM_PAGER_ERROR;
5485718399fSFrançois Tigeot 		goto out_unlock;
5495718399fSFrançois Tigeot 	}
5505718399fSFrançois Tigeot 	ret = ttm_mem_io_reserve_vm(bo);
5515718399fSFrançois Tigeot 	if (unlikely(ret != 0)) {
5525718399fSFrançois Tigeot 		retval = VM_PAGER_ERROR;
5535718399fSFrançois Tigeot 		goto out_io_unlock;
5545718399fSFrançois Tigeot 	}
555a34b4168SMatthew Dillon 	if (unlikely(OFF_TO_IDX(offset) >= bo->num_pages)) {
556a34b4168SMatthew Dillon 		retval = VM_PAGER_ERROR;
557a34b4168SMatthew Dillon 		goto out_io_unlock;
558a34b4168SMatthew Dillon 	}
5595718399fSFrançois Tigeot 
5605718399fSFrançois Tigeot 	/*
56107cb5299SMatthew Dillon 	 * Lookup the real page.
56207cb5299SMatthew Dillon 	 *
5635718399fSFrançois Tigeot 	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
5645718399fSFrançois Tigeot 	 * since the mmap_sem is only held in read mode. However, we
5655718399fSFrançois Tigeot 	 * modify only the caching bits of vma->vm_page_prot and
5665718399fSFrançois Tigeot 	 * consider those bits protected by
5675718399fSFrançois Tigeot 	 * the bo->mutex, as we should be the only writers.
5685718399fSFrançois Tigeot 	 * There shouldn't really be any readers of these bits except
5695718399fSFrançois Tigeot 	 * within vm_insert_mixed()? fork?
5705718399fSFrançois Tigeot 	 *
5715718399fSFrançois Tigeot 	 * TODO: Add a list of vmas to the bo, and change the
5725718399fSFrançois Tigeot 	 * vma->vm_page_prot when the object changes caching policy, with
5735718399fSFrançois Tigeot 	 * the correct locks held.
5745718399fSFrançois Tigeot 	 */
575*5f38e86dSFrançois Tigeot 
576*5f38e86dSFrançois Tigeot 	/*
577*5f38e86dSFrançois Tigeot 	 * Make a local vma copy to modify the page_prot member
578*5f38e86dSFrançois Tigeot 	 * and vm_flags if necessary. The vma parameter is protected
579*5f38e86dSFrançois Tigeot 	 * by mmap_sem in write mode.
580*5f38e86dSFrançois Tigeot 	 */
581*5f38e86dSFrançois Tigeot 	cvma = *vma;
582*5f38e86dSFrançois Tigeot #if 0
583*5f38e86dSFrançois Tigeot 	cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
584*5f38e86dSFrançois Tigeot #else
585*5f38e86dSFrançois Tigeot 	cvma.vm_page_prot = 0;
586*5f38e86dSFrançois Tigeot #endif
587*5f38e86dSFrançois Tigeot 
5885718399fSFrançois Tigeot 	if (bo->mem.bus.is_iomem) {
589*5f38e86dSFrançois Tigeot #ifdef __DragonFly__
5905718399fSFrançois Tigeot 		m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
5915718399fSFrançois Tigeot 						  bo->mem.bus.offset + offset);
5924f125aeaSFrançois Tigeot 		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement, 0));
593*5f38e86dSFrançois Tigeot #endif
594*5f38e86dSFrançois Tigeot 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
595*5f38e86dSFrançois Tigeot 						cvma.vm_page_prot);
5965718399fSFrançois Tigeot 	} else {
5975718399fSFrançois Tigeot 		ttm = bo->ttm;
598*5f38e86dSFrançois Tigeot 		cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
599*5f38e86dSFrançois Tigeot 						cvma.vm_page_prot);
600*5f38e86dSFrançois Tigeot 
601*5f38e86dSFrançois Tigeot 		/* Allocate all page at once, most common usage */
602a34b4168SMatthew Dillon 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
603a34b4168SMatthew Dillon 			retval = VM_PAGER_ERROR;
604a34b4168SMatthew Dillon 			goto out_io_unlock;
605a34b4168SMatthew Dillon 		}
606a34b4168SMatthew Dillon 
607f0bba3d1SFrançois Tigeot 		m = (struct vm_page *)ttm->pages[OFF_TO_IDX(offset)];
6085718399fSFrançois Tigeot 		if (unlikely(!m)) {
6095718399fSFrançois Tigeot 			retval = VM_PAGER_ERROR;
6105718399fSFrançois Tigeot 			goto out_io_unlock;
6115718399fSFrançois Tigeot 		}
6125718399fSFrançois Tigeot 		pmap_page_set_memattr(m,
6135718399fSFrançois Tigeot 		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
6144f125aeaSFrançois Tigeot 		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement, 0));
6155718399fSFrançois Tigeot 	}
6165718399fSFrançois Tigeot 
61704b45e6fSzrj 	VM_OBJECT_LOCK(vm_obj);
618a34b4168SMatthew Dillon 
619a34b4168SMatthew Dillon 	if (vm_page_busy_try(m, FALSE)) {
620a34b4168SMatthew Dillon 		kprintf("r");
621a34b4168SMatthew Dillon 		vm_page_sleep_busy(m, FALSE, "ttmvmf");
6225718399fSFrançois Tigeot 		ttm_mem_io_unlock(man);
6235718399fSFrançois Tigeot 		ttm_bo_unreserve(bo);
6245718399fSFrançois Tigeot 		goto retry;
6255718399fSFrançois Tigeot 	}
626a34b4168SMatthew Dillon 
627a34b4168SMatthew Dillon 	/*
628a34b4168SMatthew Dillon 	 * We want our fake page in the VM object, not the page the OS
629a34b4168SMatthew Dillon 	 * allocatedd for us as a placeholder.
630a34b4168SMatthew Dillon 	 */
6315718399fSFrançois Tigeot 	m->valid = VM_PAGE_BITS_ALL;
6325718399fSFrançois Tigeot 	*mres = m;
63307cb5299SMatthew Dillon 
63407cb5299SMatthew Dillon 	/*
63507cb5299SMatthew Dillon 	 * Insert the page into the object if not already inserted.
63607cb5299SMatthew Dillon 	 */
637a34b4168SMatthew Dillon 	if (m->object) {
63807cb5299SMatthew Dillon 		if (m->object != vm_obj || m->pindex != OFF_TO_IDX(offset)) {
639a34b4168SMatthew Dillon 			retval = VM_PAGER_ERROR;
640a34b4168SMatthew Dillon 			kprintf("ttm_bo_vm_fault_dfly: m(%p) already inserted "
641a34b4168SMatthew Dillon 				"in obj %p, attempt obj %p\n",
642a34b4168SMatthew Dillon 				m, m->object, vm_obj);
643a34b4168SMatthew Dillon 			while (drm_unstall == 0) {
644a34b4168SMatthew Dillon 				tsleep(&retval, 0, "DEBUG", hz/10);
6455718399fSFrançois Tigeot 			}
646a34b4168SMatthew Dillon 			if (drm_unstall > 0)
647a34b4168SMatthew Dillon 				--drm_unstall;
648a34b4168SMatthew Dillon 		}
649a34b4168SMatthew Dillon 	} else {
650a34b4168SMatthew Dillon 		mtmp = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
65107cb5299SMatthew Dillon 		if (mtmp == NULL) {
652a34b4168SMatthew Dillon 			vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
65307cb5299SMatthew Dillon 		} else {
65407cb5299SMatthew Dillon 			panic("inconsistent insert bo %p m %p mtmp %p "
65507cb5299SMatthew Dillon 			      "offset %jx",
65607cb5299SMatthew Dillon 			      bo, m, mtmp,
65707cb5299SMatthew Dillon 			      (uintmax_t)offset);
658a34b4168SMatthew Dillon 		}
65907cb5299SMatthew Dillon 	}
6605718399fSFrançois Tigeot 
6615718399fSFrançois Tigeot out_io_unlock1:
6625718399fSFrançois Tigeot 	ttm_mem_io_unlock(man);
6635718399fSFrançois Tigeot out_unlock1:
6645718399fSFrançois Tigeot 	ttm_bo_unreserve(bo);
665a34b4168SMatthew Dillon out_unlock2:
6665718399fSFrançois Tigeot 	vm_object_pip_wakeup(vm_obj);
6675718399fSFrançois Tigeot 	return (retval);
6685718399fSFrançois Tigeot 
6695718399fSFrançois Tigeot out_io_unlock:
67004b45e6fSzrj 	VM_OBJECT_LOCK(vm_obj);
6715718399fSFrançois Tigeot 	goto out_io_unlock1;
6725718399fSFrançois Tigeot 
6735718399fSFrançois Tigeot out_unlock:
67404b45e6fSzrj 	VM_OBJECT_LOCK(vm_obj);
6755718399fSFrançois Tigeot 	goto out_unlock1;
6765718399fSFrançois Tigeot }
6775718399fSFrançois Tigeot 
6785718399fSFrançois Tigeot static int
6795718399fSFrançois Tigeot ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
6805718399fSFrançois Tigeot 	       vm_ooffset_t foff, struct ucred *cred, u_short *color)
6815718399fSFrançois Tigeot {
6825718399fSFrançois Tigeot 
6835718399fSFrançois Tigeot 	/*
6846f486c69SFrançois Tigeot 	 * On Linux, a reference to the buffer object is acquired here.
6856f486c69SFrançois Tigeot 	 * The reason is that this function is not called when the
6866f486c69SFrançois Tigeot 	 * mmap() is initialized, but only when a process forks for
6876f486c69SFrançois Tigeot 	 * instance. Therefore on Linux, the reference on the bo is
6886f486c69SFrançois Tigeot 	 * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's
6896f486c69SFrançois Tigeot 	 * then released in ttm_bo_vm_close().
6906f486c69SFrançois Tigeot 	 *
6916f486c69SFrançois Tigeot 	 * Here, this function is called during mmap() intialization.
6926f486c69SFrançois Tigeot 	 * Thus, the reference acquired in ttm_bo_mmap_single() is
6936f486c69SFrançois Tigeot 	 * sufficient.
6945718399fSFrançois Tigeot 	 */
6955718399fSFrançois Tigeot 	*color = 0;
6965718399fSFrançois Tigeot 	return (0);
6975718399fSFrançois Tigeot }
6985718399fSFrançois Tigeot 
6995718399fSFrançois Tigeot static void
7005718399fSFrançois Tigeot ttm_bo_vm_dtor(void *handle)
7015718399fSFrançois Tigeot {
7025718399fSFrançois Tigeot 	struct ttm_buffer_object *bo = handle;
7035718399fSFrançois Tigeot 
7045718399fSFrançois Tigeot 	ttm_bo_unref(&bo);
7055718399fSFrançois Tigeot }
7065718399fSFrançois Tigeot 
7075718399fSFrançois Tigeot static struct cdev_pager_ops ttm_pager_ops = {
708a34b4168SMatthew Dillon 	.cdev_pg_fault = ttm_bo_vm_fault_dfly,
7095718399fSFrançois Tigeot 	.cdev_pg_ctor = ttm_bo_vm_ctor,
7105718399fSFrançois Tigeot 	.cdev_pg_dtor = ttm_bo_vm_dtor
7115718399fSFrançois Tigeot };
7125718399fSFrançois Tigeot 
713a34b4168SMatthew Dillon /*
714a34b4168SMatthew Dillon  * Called from drm_drv.c
715a34b4168SMatthew Dillon  *
716a34b4168SMatthew Dillon  * *offset - object offset in bytes
717a34b4168SMatthew Dillon  * size	   - map size in bytes
718a34b4168SMatthew Dillon  *
719a34b4168SMatthew Dillon  * We setup a dummy vma (for now) and call ttm_bo_mmap().  Then we setup
720a34b4168SMatthew Dillon  * our own VM object and dfly ops.  Note that the ops supplied by
721a34b4168SMatthew Dillon  * ttm_bo_mmap() are not currently used.
722a34b4168SMatthew Dillon  */
7235718399fSFrançois Tigeot int
724a34b4168SMatthew Dillon ttm_bo_mmap_single(struct drm_device *dev, vm_ooffset_t *offset,
725a34b4168SMatthew Dillon 		   vm_size_t size, struct vm_object **obj_res, int nprot)
7265718399fSFrançois Tigeot {
727a34b4168SMatthew Dillon 	struct ttm_bo_device *bdev = dev->drm_ttm_bdev;
7285718399fSFrançois Tigeot 	struct ttm_buffer_object *bo;
7295718399fSFrançois Tigeot 	struct vm_object *vm_obj;
730a34b4168SMatthew Dillon 	struct vm_area_struct vma;
7315718399fSFrançois Tigeot 	int ret;
7325718399fSFrançois Tigeot 
733f6201ebfSMatthew Dillon 	*obj_res = NULL;
734f6201ebfSMatthew Dillon 
735a34b4168SMatthew Dillon 	bzero(&vma, sizeof(vma));
736a34b4168SMatthew Dillon 	vma.vm_start = *offset;		/* bdev-relative offset */
737a34b4168SMatthew Dillon 	vma.vm_end = vma.vm_start + size;
738a34b4168SMatthew Dillon 	vma.vm_pgoff = vma.vm_start >> PAGE_SHIFT;
739a34b4168SMatthew Dillon 	/* vma.vm_page_prot */
740a34b4168SMatthew Dillon 	/* vma.vm_flags */
7415718399fSFrançois Tigeot 
7425718399fSFrançois Tigeot 	/*
743a34b4168SMatthew Dillon 	 * Call the linux-ported code to do the work, and on success just
744a34b4168SMatthew Dillon 	 * setup our own VM object and ignore what the linux code did other
745a34b4168SMatthew Dillon 	 * then supplying us the 'bo'.
7465718399fSFrançois Tigeot 	 */
747a34b4168SMatthew Dillon 	ret = ttm_bo_mmap(NULL, &vma, bdev);
748a34b4168SMatthew Dillon 
749a34b4168SMatthew Dillon 	if (ret == 0) {
750a34b4168SMatthew Dillon 		bo = vma.vm_private_data;
751a34b4168SMatthew Dillon 		vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE,
752a34b4168SMatthew Dillon 					     &ttm_pager_ops,
753a34b4168SMatthew Dillon 					     size, nprot, 0,
754a34b4168SMatthew Dillon 					     curthread->td_ucred);
755a34b4168SMatthew Dillon 		if (vm_obj) {
7565718399fSFrançois Tigeot 			*obj_res = vm_obj;
757a34b4168SMatthew Dillon 			*offset = 0;		/* object-relative offset */
758a34b4168SMatthew Dillon 		} else {
7595718399fSFrançois Tigeot 			ttm_bo_unref(&bo);
760a34b4168SMatthew Dillon 			ret = EINVAL;
761a34b4168SMatthew Dillon 		}
762a34b4168SMatthew Dillon 	}
7635718399fSFrançois Tigeot 	return ret;
7645718399fSFrançois Tigeot }
765a34b4168SMatthew Dillon EXPORT_SYMBOL(ttm_bo_mmap_single);
7665718399fSFrançois Tigeot 
76743e748b9SFrançois Tigeot #ifdef __DragonFly__
76843e748b9SFrançois Tigeot void ttm_bo_release_mmap(struct ttm_buffer_object *bo);
76943e748b9SFrançois Tigeot 
7706f486c69SFrançois Tigeot void
7716f486c69SFrançois Tigeot ttm_bo_release_mmap(struct ttm_buffer_object *bo)
7726f486c69SFrançois Tigeot {
7736f486c69SFrançois Tigeot 	vm_object_t vm_obj;
7746f486c69SFrançois Tigeot 	vm_page_t m;
7756f486c69SFrançois Tigeot 	int i;
7766f486c69SFrançois Tigeot 
7776f486c69SFrançois Tigeot 	vm_obj = cdev_pager_lookup(bo);
7786f486c69SFrançois Tigeot 	if (vm_obj == NULL)
7796f486c69SFrançois Tigeot 		return;
7806f486c69SFrançois Tigeot 
78104b45e6fSzrj 	VM_OBJECT_LOCK(vm_obj);
7826f486c69SFrançois Tigeot 	for (i = 0; i < bo->num_pages; i++) {
7836f486c69SFrançois Tigeot 		m = vm_page_lookup_busy_wait(vm_obj, i, TRUE, "ttm_unm");
7846f486c69SFrançois Tigeot 		if (m == NULL)
7856f486c69SFrançois Tigeot 			continue;
7866f486c69SFrançois Tigeot 		cdev_pager_free_page(vm_obj, m);
7876f486c69SFrançois Tigeot 	}
78804b45e6fSzrj 	VM_OBJECT_UNLOCK(vm_obj);
7896f486c69SFrançois Tigeot 
7906f486c69SFrançois Tigeot 	vm_object_deallocate(vm_obj);
7916f486c69SFrançois Tigeot }
79243e748b9SFrançois Tigeot #endif
7936f486c69SFrançois Tigeot 
7945718399fSFrançois Tigeot #if 0
7955718399fSFrançois Tigeot int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
7965718399fSFrançois Tigeot {
7975718399fSFrançois Tigeot 	if (vma->vm_pgoff != 0)
7985718399fSFrançois Tigeot 		return -EACCES;
7995718399fSFrançois Tigeot 
8005718399fSFrançois Tigeot 	vma->vm_ops = &ttm_bo_vm_ops;
8015718399fSFrançois Tigeot 	vma->vm_private_data = ttm_bo_reference(bo);
8025718399fSFrançois Tigeot 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
8035718399fSFrançois Tigeot 	return 0;
8045718399fSFrançois Tigeot }
805c66857ebSFrançois Tigeot EXPORT_SYMBOL(ttm_fbdev_mmap);
8065718399fSFrançois Tigeot #endif
807