xref: /netbsd/sys/external/bsd/drm2/ttm/ttm_bo_vm.c (revision f24580e3)
1*f24580e3Sriastradh /*	$NetBSD: ttm_bo_vm.c,v 1.22 2022/07/21 08:07:56 riastradh Exp $	*/
27b7df45fSriastradh 
37b7df45fSriastradh /*-
47b7df45fSriastradh  * Copyright (c) 2014 The NetBSD Foundation, Inc.
57b7df45fSriastradh  * All rights reserved.
67b7df45fSriastradh  *
77b7df45fSriastradh  * This code is derived from software contributed to The NetBSD Foundation
87b7df45fSriastradh  * by Taylor R. Campbell.
97b7df45fSriastradh  *
107b7df45fSriastradh  * Redistribution and use in source and binary forms, with or without
117b7df45fSriastradh  * modification, are permitted provided that the following conditions
127b7df45fSriastradh  * are met:
137b7df45fSriastradh  * 1. Redistributions of source code must retain the above copyright
147b7df45fSriastradh  *    notice, this list of conditions and the following disclaimer.
157b7df45fSriastradh  * 2. Redistributions in binary form must reproduce the above copyright
167b7df45fSriastradh  *    notice, this list of conditions and the following disclaimer in the
177b7df45fSriastradh  *    documentation and/or other materials provided with the distribution.
187b7df45fSriastradh  *
197b7df45fSriastradh  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
207b7df45fSriastradh  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
217b7df45fSriastradh  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
227b7df45fSriastradh  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
237b7df45fSriastradh  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
247b7df45fSriastradh  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
257b7df45fSriastradh  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
267b7df45fSriastradh  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
277b7df45fSriastradh  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
287b7df45fSriastradh  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
297b7df45fSriastradh  * POSSIBILITY OF SUCH DAMAGE.
307b7df45fSriastradh  */
317b7df45fSriastradh 
327b7df45fSriastradh #include <sys/cdefs.h>
33*f24580e3Sriastradh __KERNEL_RCSID(0, "$NetBSD: ttm_bo_vm.c,v 1.22 2022/07/21 08:07:56 riastradh Exp $");
347b7df45fSriastradh 
357b7df45fSriastradh #include <sys/types.h>
367b7df45fSriastradh 
377b7df45fSriastradh #include <uvm/uvm.h>
387b7df45fSriastradh #include <uvm/uvm_extern.h>
397b7df45fSriastradh #include <uvm/uvm_fault.h>
407b7df45fSriastradh 
417b7df45fSriastradh #include <linux/bitops.h>
427b7df45fSriastradh 
437b7df45fSriastradh #include <drm/drm_vma_manager.h>
447b7df45fSriastradh 
457b7df45fSriastradh #include <ttm/ttm_bo_driver.h>
467b7df45fSriastradh 
477b7df45fSriastradh static int	ttm_bo_uvm_fault_idle(struct ttm_buffer_object *,
48fde20ad8Sriastradh 		    struct uvm_faultinfo *);
497b7df45fSriastradh static int	ttm_bo_uvm_lookup(struct ttm_bo_device *, unsigned long,
507b7df45fSriastradh 		    unsigned long, struct ttm_buffer_object **);
517b7df45fSriastradh 
527b7df45fSriastradh void
ttm_bo_uvm_reference(struct uvm_object * uobj)537b7df45fSriastradh ttm_bo_uvm_reference(struct uvm_object *uobj)
547b7df45fSriastradh {
557b7df45fSriastradh 	struct ttm_buffer_object *const bo = container_of(uobj,
567b7df45fSriastradh 	    struct ttm_buffer_object, uvmobj);
577b7df45fSriastradh 
58e133e838Sriastradh 	(void)ttm_bo_get(bo);
597b7df45fSriastradh }
607b7df45fSriastradh 
617b7df45fSriastradh void
ttm_bo_uvm_detach(struct uvm_object * uobj)627b7df45fSriastradh ttm_bo_uvm_detach(struct uvm_object *uobj)
637b7df45fSriastradh {
647b7df45fSriastradh 	struct ttm_buffer_object *bo = container_of(uobj,
657b7df45fSriastradh 	    struct ttm_buffer_object, uvmobj);
667b7df45fSriastradh 
67e133e838Sriastradh 	ttm_bo_put(bo);
687b7df45fSriastradh }
697b7df45fSriastradh 
707b7df45fSriastradh int
ttm_bo_uvm_fault(struct uvm_faultinfo * ufi,vaddr_t vaddr,struct vm_page ** pps,int npages,int centeridx,vm_prot_t access_type,int flags)717b7df45fSriastradh ttm_bo_uvm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr,
727b7df45fSriastradh     struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
737b7df45fSriastradh     int flags)
747b7df45fSriastradh {
757b7df45fSriastradh 	struct uvm_object *const uobj = ufi->entry->object.uvm_obj;
767b7df45fSriastradh 	struct ttm_buffer_object *const bo = container_of(uobj,
777b7df45fSriastradh 	    struct ttm_buffer_object, uvmobj);
787b7df45fSriastradh 	struct ttm_bo_device *const bdev = bo->bdev;
797b7df45fSriastradh 	struct ttm_mem_type_manager *man =
807b7df45fSriastradh 	    &bdev->man[bo->mem.mem_type];
817b7df45fSriastradh 	union {
827b7df45fSriastradh 		bus_addr_t base;
837b7df45fSriastradh 		struct ttm_tt *ttm;
847b7df45fSriastradh 	} u;
857b7df45fSriastradh 	size_t size __diagused;
867b7df45fSriastradh 	voff_t uoffset;		/* offset in bytes into bo */
877b7df45fSriastradh 	unsigned startpage;	/* offset in pages into bo */
887b7df45fSriastradh 	unsigned i;
897b7df45fSriastradh 	vm_prot_t vm_prot;	/* VM_PROT_* */
907b7df45fSriastradh 	pgprot_t pgprot;	/* VM_PROT_* | PMAP_* cacheability flags */
917b7df45fSriastradh 	int ret;
927b7df45fSriastradh 
931a660f36Sriastradh 	/* Thanks, uvm, but we don't need this lock.  */
94b4dac182Sad 	rw_exit(uobj->vmobjlock);
951a660f36Sriastradh 
967b7df45fSriastradh 	/* Copy-on-write mappings make no sense for the graphics aperture.  */
977b7df45fSriastradh 	if (UVM_ET_ISCOPYONWRITE(ufi->entry)) {
987b7df45fSriastradh 		ret = -EIO;
997b7df45fSriastradh 		goto out0;
1007b7df45fSriastradh 	}
1017b7df45fSriastradh 
1027b7df45fSriastradh 	/* Try to lock the buffer.  */
103f363f065Sriastradh 	ret = ttm_bo_reserve(bo, true, true, NULL);
1047b7df45fSriastradh 	if (ret) {
1057b7df45fSriastradh 		if (ret != -EBUSY)
1067b7df45fSriastradh 			goto out0;
1077b7df45fSriastradh 		/*
108af410cbfSriastradh 		 * It's currently locked.  Unlock the fault, wait for
109af410cbfSriastradh 		 * it, and start over.
1107b7df45fSriastradh 		 */
111fde20ad8Sriastradh 		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
11236ad5dbbSriastradh 		if (!dma_resv_lock_interruptible(bo->base.resv, NULL))
11336ad5dbbSriastradh 			dma_resv_unlock(bo->base.resv);
11436ad5dbbSriastradh 
115eadc492bSmrg 		return ERESTART;
1167b7df45fSriastradh 	}
1177b7df45fSriastradh 
1187b7df45fSriastradh 	/* drm prime buffers are not mappable.  XXX Catch this earlier?  */
1197b7df45fSriastradh 	if (bo->ttm && ISSET(bo->ttm->page_flags, TTM_PAGE_FLAG_SG)) {
1207b7df45fSriastradh 		ret = -EINVAL;
1214140ab2dSriastradh 		goto out1;
1227b7df45fSriastradh 	}
1237b7df45fSriastradh 
1247b7df45fSriastradh 	/* Notify the driver of a fault if it wants.  */
1257b7df45fSriastradh 	if (bdev->driver->fault_reserve_notify) {
1267b7df45fSriastradh 		ret = (*bdev->driver->fault_reserve_notify)(bo);
1277b7df45fSriastradh 		if (ret) {
1287b7df45fSriastradh 			if (ret == -ERESTART)
1297b7df45fSriastradh 				ret = -EIO;
1304140ab2dSriastradh 			goto out1;
1317b7df45fSriastradh 		}
1327b7df45fSriastradh 	}
1337b7df45fSriastradh 
1346c9009afSriastradh 	ret = ttm_bo_uvm_fault_idle(bo, ufi);
1357b7df45fSriastradh 	if (ret) {
136f363f065Sriastradh 		KASSERT(ret == -ERESTART || ret == -EFAULT);
137ef3432a9Sriastradh 		/* ttm_bo_uvm_fault_idle calls uvmfault_unlockall for us.  */
138ef3432a9Sriastradh 		ttm_bo_unreserve(bo);
1397b7df45fSriastradh 		/* XXX errno Linux->NetBSD */
1407b7df45fSriastradh 		return -ret;
1417b7df45fSriastradh 	}
1427b7df45fSriastradh 
1437b7df45fSriastradh 	ret = ttm_mem_io_lock(man, true);
1447b7df45fSriastradh 	if (ret) {
1457b7df45fSriastradh 		ret = -EIO;
1467b7df45fSriastradh 		goto out1;
1477b7df45fSriastradh 	}
1487b7df45fSriastradh 	ret = ttm_mem_io_reserve_vm(bo);
1497b7df45fSriastradh 	if (ret) {
1507b7df45fSriastradh 		ret = -EIO;
1517b7df45fSriastradh 		goto out2;
1527b7df45fSriastradh 	}
1537b7df45fSriastradh 
1547b7df45fSriastradh 	vm_prot = ufi->entry->protection;
1557b7df45fSriastradh 	if (bo->mem.bus.is_iomem) {
1567b7df45fSriastradh 		u.base = (bo->mem.bus.base + bo->mem.bus.offset);
1577b7df45fSriastradh 		size = bo->mem.bus.size;
1587b7df45fSriastradh 		pgprot = ttm_io_prot(bo->mem.placement, vm_prot);
1597b7df45fSriastradh 	} else {
160f363f065Sriastradh 		struct ttm_operation_ctx ctx = {
161f363f065Sriastradh 			.interruptible = false,
162f363f065Sriastradh 			.no_wait_gpu = false,
163f363f065Sriastradh 			.flags = TTM_OPT_FLAG_FORCE_ALLOC,
164f363f065Sriastradh 		};
1657b7df45fSriastradh 		u.ttm = bo->ttm;
166*f24580e3Sriastradh 		size = (size_t)bo->ttm->num_pages << PAGE_SHIFT;
1677b7df45fSriastradh 		if (ISSET(bo->mem.placement, TTM_PL_FLAG_CACHED))
1687b7df45fSriastradh 			pgprot = vm_prot;
1697b7df45fSriastradh 		else
1707b7df45fSriastradh 			pgprot = ttm_io_prot(bo->mem.placement, vm_prot);
171f363f065Sriastradh 		if (ttm_tt_populate(u.ttm, &ctx)) {
1727b7df45fSriastradh 			ret = -ENOMEM;
1737b7df45fSriastradh 			goto out2;
1747b7df45fSriastradh 		}
1757b7df45fSriastradh 	}
1767b7df45fSriastradh 
1777b7df45fSriastradh 	KASSERT(ufi->entry->start <= vaddr);
1787b7df45fSriastradh 	KASSERT((ufi->entry->offset & (PAGE_SIZE - 1)) == 0);
1797b7df45fSriastradh 	KASSERT(ufi->entry->offset <= size);
1807b7df45fSriastradh 	KASSERT((vaddr - ufi->entry->start) <= (size - ufi->entry->offset));
1817b505eb8Sriastradh 	KASSERTMSG(((size_t)npages << PAGE_SHIFT <=
1827b505eb8Sriastradh 		((size - ufi->entry->offset) - (vaddr - ufi->entry->start))),
1837b505eb8Sriastradh 	    "vaddr=%jx npages=%d bo=%p is_iomem=%d size=%zu"
1847b505eb8Sriastradh 	    " start=%jx offset=%jx",
1857b505eb8Sriastradh 	    (uintmax_t)vaddr, npages, bo, (int)bo->mem.bus.is_iomem, size,
1867b505eb8Sriastradh 	    (uintmax_t)ufi->entry->start, (uintmax_t)ufi->entry->offset);
1877b7df45fSriastradh 	uoffset = (ufi->entry->offset + (vaddr - ufi->entry->start));
1887b7df45fSriastradh 	startpage = (uoffset >> PAGE_SHIFT);
1897b7df45fSriastradh 	for (i = 0; i < npages; i++) {
1907b7df45fSriastradh 		paddr_t paddr;
1917b7df45fSriastradh 
1927b7df45fSriastradh 		/* XXX PGO_ALLPAGES?  */
1937b7df45fSriastradh 		if (pps[i] == PGO_DONTCARE)
1947b7df45fSriastradh 			continue;
19513056ae9Sriastradh 		if (!bo->mem.bus.is_iomem) {
19613056ae9Sriastradh 			paddr = page_to_phys(u.ttm->pages[startpage + i]);
19713056ae9Sriastradh 		} else if (bdev->driver->io_mem_pfn) {
198*f24580e3Sriastradh 			paddr = (paddr_t)(*bdev->driver->io_mem_pfn)(bo,
199*f24580e3Sriastradh 			    startpage + i) << PAGE_SHIFT;
20013056ae9Sriastradh 		} else {
201a8db679eSriastradh 			const paddr_t cookie = bus_space_mmap(bdev->memt,
202*f24580e3Sriastradh 			    u.base, (off_t)(startpage + i) << PAGE_SHIFT,
203*f24580e3Sriastradh 			    vm_prot, 0);
204a8db679eSriastradh 
205a8db679eSriastradh 			paddr = pmap_phys_address(cookie);
206a8db679eSriastradh 		}
20739445809Sriastradh 		ret = -pmap_enter(ufi->orig_map->pmap, vaddr + i*PAGE_SIZE,
20821862bb9Sjmcneill 		    paddr, vm_prot, (PMAP_CANFAIL | pgprot));
2097b7df45fSriastradh 		if (ret)
2107b7df45fSriastradh 			goto out3;
2117b7df45fSriastradh 	}
2127b7df45fSriastradh 
2137b7df45fSriastradh out3:	pmap_update(ufi->orig_map->pmap);
2147b7df45fSriastradh out2:	ttm_mem_io_unlock(man);
2157b7df45fSriastradh out1:	ttm_bo_unreserve(bo);
216fde20ad8Sriastradh out0:	uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
2177b7df45fSriastradh 	/* XXX errno Linux->NetBSD */
2187b7df45fSriastradh 	return -ret;
2197b7df45fSriastradh }
2207b7df45fSriastradh 
2217b7df45fSriastradh static int
ttm_bo_uvm_fault_idle(struct ttm_buffer_object * bo,struct uvm_faultinfo * ufi)222fde20ad8Sriastradh ttm_bo_uvm_fault_idle(struct ttm_buffer_object *bo, struct uvm_faultinfo *ufi)
2237b7df45fSriastradh {
224f363f065Sriastradh 	int ret = 0;
2257b7df45fSriastradh 
226f363f065Sriastradh 	if (__predict_true(!bo->moving))
227f363f065Sriastradh 		goto out0;
2287b7df45fSriastradh 
229f363f065Sriastradh 	if (dma_fence_is_signaled(bo->moving))
230f363f065Sriastradh 		goto out1;
231f363f065Sriastradh 
232f363f065Sriastradh 	if (dma_fence_wait(bo->moving, true) != 0) {
233f363f065Sriastradh 		ret = -EFAULT;
234f363f065Sriastradh 		goto out2;
235f363f065Sriastradh 	}
236f363f065Sriastradh 
237f363f065Sriastradh 	ret = -ERESTART;
238f363f065Sriastradh out2:	uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
239f363f065Sriastradh out1:	dma_fence_put(bo->moving);
240f363f065Sriastradh 	bo->moving = NULL;
241f363f065Sriastradh out0:	return ret;
2427b7df45fSriastradh }
2437b7df45fSriastradh 
2447b7df45fSriastradh int
ttm_bo_mmap_object(struct ttm_bo_device * bdev,off_t offset,size_t size,vm_prot_t prot,struct uvm_object ** uobjp,voff_t * uoffsetp,struct file * file)2457b7df45fSriastradh ttm_bo_mmap_object(struct ttm_bo_device *bdev, off_t offset, size_t size,
2467b7df45fSriastradh     vm_prot_t prot, struct uvm_object **uobjp, voff_t *uoffsetp,
2477b7df45fSriastradh     struct file *file)
2487b7df45fSriastradh {
2497b7df45fSriastradh 	const unsigned long startpage = (offset >> PAGE_SHIFT);
2507b7df45fSriastradh 	const unsigned long npages = (size >> PAGE_SHIFT);
2517b7df45fSriastradh 	struct ttm_buffer_object *bo;
2527b7df45fSriastradh 	int ret;
2537b7df45fSriastradh 
2547b7df45fSriastradh 	KASSERT(0 == (offset & (PAGE_SIZE - 1)));
2557b7df45fSriastradh 	KASSERT(0 == (size & (PAGE_SIZE - 1)));
2567b7df45fSriastradh 
2577b7df45fSriastradh 	ret = ttm_bo_uvm_lookup(bdev, startpage, npages, &bo);
2587b7df45fSriastradh 	if (ret)
2597b7df45fSriastradh 		goto fail0;
26001ad47dcSriastradh 	KASSERTMSG((drm_vma_node_start(&bo->base.vma_node) <= startpage),
26101ad47dcSriastradh 	    "mapping npages=0x%jx @ pfn=0x%jx"
26201ad47dcSriastradh 	    " from vma npages=0x%jx @ pfn=0x%jx",
26301ad47dcSriastradh 	    (uintmax_t)npages,
26401ad47dcSriastradh 	    (uintmax_t)startpage,
26501ad47dcSriastradh 	    (uintmax_t)drm_vma_node_size(&bo->base.vma_node),
26601ad47dcSriastradh 	    (uintmax_t)drm_vma_node_start(&bo->base.vma_node));
26701ad47dcSriastradh 	KASSERTMSG((npages <= drm_vma_node_size(&bo->base.vma_node)),
26801ad47dcSriastradh 	    "mapping npages=0x%jx @ pfn=0x%jx"
26901ad47dcSriastradh 	    " from vma npages=0x%jx @ pfn=0x%jx",
27001ad47dcSriastradh 	    (uintmax_t)npages,
27101ad47dcSriastradh 	    (uintmax_t)startpage,
27201ad47dcSriastradh 	    (uintmax_t)drm_vma_node_size(&bo->base.vma_node),
27301ad47dcSriastradh 	    (uintmax_t)drm_vma_node_start(&bo->base.vma_node));
27401ad47dcSriastradh 	KASSERTMSG(((startpage - drm_vma_node_start(&bo->base.vma_node))
27501ad47dcSriastradh 		<= (drm_vma_node_size(&bo->base.vma_node) - npages)),
27601ad47dcSriastradh 	    "mapping npages=0x%jx @ pfn=0x%jx"
27701ad47dcSriastradh 	    " from vma npages=0x%jx @ pfn=0x%jx",
27801ad47dcSriastradh 	    (uintmax_t)npages,
27901ad47dcSriastradh 	    (uintmax_t)startpage,
28001ad47dcSriastradh 	    (uintmax_t)drm_vma_node_size(&bo->base.vma_node),
28101ad47dcSriastradh 	    (uintmax_t)drm_vma_node_start(&bo->base.vma_node));
28201ad47dcSriastradh 
2837b7df45fSriastradh 	/* XXX Just assert this?  */
2847b7df45fSriastradh 	if (__predict_false(bdev->driver->verify_access == NULL)) {
2857b7df45fSriastradh 		ret = -EPERM;
2867b7df45fSriastradh 		goto fail1;
2877b7df45fSriastradh 	}
2887b7df45fSriastradh 	ret = (*bdev->driver->verify_access)(bo, file);
2897b7df45fSriastradh 	if (ret)
2907b7df45fSriastradh 		goto fail1;
2917b7df45fSriastradh 
2927b7df45fSriastradh 	/* Success!  */
2937b7df45fSriastradh 	*uobjp = &bo->uvmobj;
2947b7df45fSriastradh 	*uoffsetp = (offset -
295*f24580e3Sriastradh 	    ((off_t)drm_vma_node_start(&bo->base.vma_node) << PAGE_SHIFT));
2967b7df45fSriastradh 	return 0;
2977b7df45fSriastradh 
298e133e838Sriastradh fail1:	ttm_bo_put(bo);
2997b7df45fSriastradh fail0:	KASSERT(ret);
3007b7df45fSriastradh 	return ret;
3017b7df45fSriastradh }
3027b7df45fSriastradh 
3037b7df45fSriastradh static int
ttm_bo_uvm_lookup(struct ttm_bo_device * bdev,unsigned long startpage,unsigned long npages,struct ttm_buffer_object ** bop)3047b7df45fSriastradh ttm_bo_uvm_lookup(struct ttm_bo_device *bdev, unsigned long startpage,
3057b7df45fSriastradh     unsigned long npages, struct ttm_buffer_object **bop)
3067b7df45fSriastradh {
3077b7df45fSriastradh 	struct ttm_buffer_object *bo = NULL;
3087b7df45fSriastradh 	struct drm_vma_offset_node *node;
3097b7df45fSriastradh 
310e133e838Sriastradh 	drm_vma_offset_lock_lookup(bdev->vma_manager);
311e133e838Sriastradh 	node = drm_vma_offset_lookup_locked(bdev->vma_manager, startpage,
3127b7df45fSriastradh 	    npages);
3137b7df45fSriastradh 	if (node != NULL) {
314e133e838Sriastradh 		bo = container_of(node, struct ttm_buffer_object, base.vma_node);
3157b7df45fSriastradh 		if (!kref_get_unless_zero(&bo->kref))
3167b7df45fSriastradh 			bo = NULL;
3177b7df45fSriastradh 	}
318e133e838Sriastradh 	drm_vma_offset_unlock_lookup(bdev->vma_manager);
3197b7df45fSriastradh 
3207b7df45fSriastradh 	if (bo == NULL)
3217b7df45fSriastradh 		return -ENOENT;
3227b7df45fSriastradh 
3237b7df45fSriastradh 	*bop = bo;
3247b7df45fSriastradh 	return 0;
3257b7df45fSriastradh }
326