1 /* $NetBSD: ttm_bo_vm.c,v 1.22 2022/07/21 08:07:56 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: ttm_bo_vm.c,v 1.22 2022/07/21 08:07:56 riastradh Exp $");
34
35 #include <sys/types.h>
36
37 #include <uvm/uvm.h>
38 #include <uvm/uvm_extern.h>
39 #include <uvm/uvm_fault.h>
40
41 #include <linux/bitops.h>
42
43 #include <drm/drm_vma_manager.h>
44
45 #include <ttm/ttm_bo_driver.h>
46
47 static int ttm_bo_uvm_fault_idle(struct ttm_buffer_object *,
48 struct uvm_faultinfo *);
49 static int ttm_bo_uvm_lookup(struct ttm_bo_device *, unsigned long,
50 unsigned long, struct ttm_buffer_object **);
51
52 void
ttm_bo_uvm_reference(struct uvm_object * uobj)53 ttm_bo_uvm_reference(struct uvm_object *uobj)
54 {
55 struct ttm_buffer_object *const bo = container_of(uobj,
56 struct ttm_buffer_object, uvmobj);
57
58 (void)ttm_bo_get(bo);
59 }
60
61 void
ttm_bo_uvm_detach(struct uvm_object * uobj)62 ttm_bo_uvm_detach(struct uvm_object *uobj)
63 {
64 struct ttm_buffer_object *bo = container_of(uobj,
65 struct ttm_buffer_object, uvmobj);
66
67 ttm_bo_put(bo);
68 }
69
70 int
ttm_bo_uvm_fault(struct uvm_faultinfo * ufi,vaddr_t vaddr,struct vm_page ** pps,int npages,int centeridx,vm_prot_t access_type,int flags)71 ttm_bo_uvm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr,
72 struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
73 int flags)
74 {
75 struct uvm_object *const uobj = ufi->entry->object.uvm_obj;
76 struct ttm_buffer_object *const bo = container_of(uobj,
77 struct ttm_buffer_object, uvmobj);
78 struct ttm_bo_device *const bdev = bo->bdev;
79 struct ttm_mem_type_manager *man =
80 &bdev->man[bo->mem.mem_type];
81 union {
82 bus_addr_t base;
83 struct ttm_tt *ttm;
84 } u;
85 size_t size __diagused;
86 voff_t uoffset; /* offset in bytes into bo */
87 unsigned startpage; /* offset in pages into bo */
88 unsigned i;
89 vm_prot_t vm_prot; /* VM_PROT_* */
90 pgprot_t pgprot; /* VM_PROT_* | PMAP_* cacheability flags */
91 int ret;
92
93 /* Thanks, uvm, but we don't need this lock. */
94 rw_exit(uobj->vmobjlock);
95
96 /* Copy-on-write mappings make no sense for the graphics aperture. */
97 if (UVM_ET_ISCOPYONWRITE(ufi->entry)) {
98 ret = -EIO;
99 goto out0;
100 }
101
102 /* Try to lock the buffer. */
103 ret = ttm_bo_reserve(bo, true, true, NULL);
104 if (ret) {
105 if (ret != -EBUSY)
106 goto out0;
107 /*
108 * It's currently locked. Unlock the fault, wait for
109 * it, and start over.
110 */
111 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
112 if (!dma_resv_lock_interruptible(bo->base.resv, NULL))
113 dma_resv_unlock(bo->base.resv);
114
115 return ERESTART;
116 }
117
118 /* drm prime buffers are not mappable. XXX Catch this earlier? */
119 if (bo->ttm && ISSET(bo->ttm->page_flags, TTM_PAGE_FLAG_SG)) {
120 ret = -EINVAL;
121 goto out1;
122 }
123
124 /* Notify the driver of a fault if it wants. */
125 if (bdev->driver->fault_reserve_notify) {
126 ret = (*bdev->driver->fault_reserve_notify)(bo);
127 if (ret) {
128 if (ret == -ERESTART)
129 ret = -EIO;
130 goto out1;
131 }
132 }
133
134 ret = ttm_bo_uvm_fault_idle(bo, ufi);
135 if (ret) {
136 KASSERT(ret == -ERESTART || ret == -EFAULT);
137 /* ttm_bo_uvm_fault_idle calls uvmfault_unlockall for us. */
138 ttm_bo_unreserve(bo);
139 /* XXX errno Linux->NetBSD */
140 return -ret;
141 }
142
143 ret = ttm_mem_io_lock(man, true);
144 if (ret) {
145 ret = -EIO;
146 goto out1;
147 }
148 ret = ttm_mem_io_reserve_vm(bo);
149 if (ret) {
150 ret = -EIO;
151 goto out2;
152 }
153
154 vm_prot = ufi->entry->protection;
155 if (bo->mem.bus.is_iomem) {
156 u.base = (bo->mem.bus.base + bo->mem.bus.offset);
157 size = bo->mem.bus.size;
158 pgprot = ttm_io_prot(bo->mem.placement, vm_prot);
159 } else {
160 struct ttm_operation_ctx ctx = {
161 .interruptible = false,
162 .no_wait_gpu = false,
163 .flags = TTM_OPT_FLAG_FORCE_ALLOC,
164 };
165 u.ttm = bo->ttm;
166 size = (size_t)bo->ttm->num_pages << PAGE_SHIFT;
167 if (ISSET(bo->mem.placement, TTM_PL_FLAG_CACHED))
168 pgprot = vm_prot;
169 else
170 pgprot = ttm_io_prot(bo->mem.placement, vm_prot);
171 if (ttm_tt_populate(u.ttm, &ctx)) {
172 ret = -ENOMEM;
173 goto out2;
174 }
175 }
176
177 KASSERT(ufi->entry->start <= vaddr);
178 KASSERT((ufi->entry->offset & (PAGE_SIZE - 1)) == 0);
179 KASSERT(ufi->entry->offset <= size);
180 KASSERT((vaddr - ufi->entry->start) <= (size - ufi->entry->offset));
181 KASSERTMSG(((size_t)npages << PAGE_SHIFT <=
182 ((size - ufi->entry->offset) - (vaddr - ufi->entry->start))),
183 "vaddr=%jx npages=%d bo=%p is_iomem=%d size=%zu"
184 " start=%jx offset=%jx",
185 (uintmax_t)vaddr, npages, bo, (int)bo->mem.bus.is_iomem, size,
186 (uintmax_t)ufi->entry->start, (uintmax_t)ufi->entry->offset);
187 uoffset = (ufi->entry->offset + (vaddr - ufi->entry->start));
188 startpage = (uoffset >> PAGE_SHIFT);
189 for (i = 0; i < npages; i++) {
190 paddr_t paddr;
191
192 /* XXX PGO_ALLPAGES? */
193 if (pps[i] == PGO_DONTCARE)
194 continue;
195 if (!bo->mem.bus.is_iomem) {
196 paddr = page_to_phys(u.ttm->pages[startpage + i]);
197 } else if (bdev->driver->io_mem_pfn) {
198 paddr = (paddr_t)(*bdev->driver->io_mem_pfn)(bo,
199 startpage + i) << PAGE_SHIFT;
200 } else {
201 const paddr_t cookie = bus_space_mmap(bdev->memt,
202 u.base, (off_t)(startpage + i) << PAGE_SHIFT,
203 vm_prot, 0);
204
205 paddr = pmap_phys_address(cookie);
206 }
207 ret = -pmap_enter(ufi->orig_map->pmap, vaddr + i*PAGE_SIZE,
208 paddr, vm_prot, (PMAP_CANFAIL | pgprot));
209 if (ret)
210 goto out3;
211 }
212
213 out3: pmap_update(ufi->orig_map->pmap);
214 out2: ttm_mem_io_unlock(man);
215 out1: ttm_bo_unreserve(bo);
216 out0: uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
217 /* XXX errno Linux->NetBSD */
218 return -ret;
219 }
220
221 static int
ttm_bo_uvm_fault_idle(struct ttm_buffer_object * bo,struct uvm_faultinfo * ufi)222 ttm_bo_uvm_fault_idle(struct ttm_buffer_object *bo, struct uvm_faultinfo *ufi)
223 {
224 int ret = 0;
225
226 if (__predict_true(!bo->moving))
227 goto out0;
228
229 if (dma_fence_is_signaled(bo->moving))
230 goto out1;
231
232 if (dma_fence_wait(bo->moving, true) != 0) {
233 ret = -EFAULT;
234 goto out2;
235 }
236
237 ret = -ERESTART;
238 out2: uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
239 out1: dma_fence_put(bo->moving);
240 bo->moving = NULL;
241 out0: return ret;
242 }
243
244 int
ttm_bo_mmap_object(struct ttm_bo_device * bdev,off_t offset,size_t size,vm_prot_t prot,struct uvm_object ** uobjp,voff_t * uoffsetp,struct file * file)245 ttm_bo_mmap_object(struct ttm_bo_device *bdev, off_t offset, size_t size,
246 vm_prot_t prot, struct uvm_object **uobjp, voff_t *uoffsetp,
247 struct file *file)
248 {
249 const unsigned long startpage = (offset >> PAGE_SHIFT);
250 const unsigned long npages = (size >> PAGE_SHIFT);
251 struct ttm_buffer_object *bo;
252 int ret;
253
254 KASSERT(0 == (offset & (PAGE_SIZE - 1)));
255 KASSERT(0 == (size & (PAGE_SIZE - 1)));
256
257 ret = ttm_bo_uvm_lookup(bdev, startpage, npages, &bo);
258 if (ret)
259 goto fail0;
260 KASSERTMSG((drm_vma_node_start(&bo->base.vma_node) <= startpage),
261 "mapping npages=0x%jx @ pfn=0x%jx"
262 " from vma npages=0x%jx @ pfn=0x%jx",
263 (uintmax_t)npages,
264 (uintmax_t)startpage,
265 (uintmax_t)drm_vma_node_size(&bo->base.vma_node),
266 (uintmax_t)drm_vma_node_start(&bo->base.vma_node));
267 KASSERTMSG((npages <= drm_vma_node_size(&bo->base.vma_node)),
268 "mapping npages=0x%jx @ pfn=0x%jx"
269 " from vma npages=0x%jx @ pfn=0x%jx",
270 (uintmax_t)npages,
271 (uintmax_t)startpage,
272 (uintmax_t)drm_vma_node_size(&bo->base.vma_node),
273 (uintmax_t)drm_vma_node_start(&bo->base.vma_node));
274 KASSERTMSG(((startpage - drm_vma_node_start(&bo->base.vma_node))
275 <= (drm_vma_node_size(&bo->base.vma_node) - npages)),
276 "mapping npages=0x%jx @ pfn=0x%jx"
277 " from vma npages=0x%jx @ pfn=0x%jx",
278 (uintmax_t)npages,
279 (uintmax_t)startpage,
280 (uintmax_t)drm_vma_node_size(&bo->base.vma_node),
281 (uintmax_t)drm_vma_node_start(&bo->base.vma_node));
282
283 /* XXX Just assert this? */
284 if (__predict_false(bdev->driver->verify_access == NULL)) {
285 ret = -EPERM;
286 goto fail1;
287 }
288 ret = (*bdev->driver->verify_access)(bo, file);
289 if (ret)
290 goto fail1;
291
292 /* Success! */
293 *uobjp = &bo->uvmobj;
294 *uoffsetp = (offset -
295 ((off_t)drm_vma_node_start(&bo->base.vma_node) << PAGE_SHIFT));
296 return 0;
297
298 fail1: ttm_bo_put(bo);
299 fail0: KASSERT(ret);
300 return ret;
301 }
302
303 static int
ttm_bo_uvm_lookup(struct ttm_bo_device * bdev,unsigned long startpage,unsigned long npages,struct ttm_buffer_object ** bop)304 ttm_bo_uvm_lookup(struct ttm_bo_device *bdev, unsigned long startpage,
305 unsigned long npages, struct ttm_buffer_object **bop)
306 {
307 struct ttm_buffer_object *bo = NULL;
308 struct drm_vma_offset_node *node;
309
310 drm_vma_offset_lock_lookup(bdev->vma_manager);
311 node = drm_vma_offset_lookup_locked(bdev->vma_manager, startpage,
312 npages);
313 if (node != NULL) {
314 bo = container_of(node, struct ttm_buffer_object, base.vma_node);
315 if (!kref_get_unless_zero(&bo->kref))
316 bo = NULL;
317 }
318 drm_vma_offset_unlock_lookup(bdev->vma_manager);
319
320 if (bo == NULL)
321 return -ENOENT;
322
323 *bop = bo;
324 return 0;
325 }
326