1 /* $NetBSD: i915_gem_mman.c,v 1.21 2021/12/19 12:26:55 riastradh Exp $ */
2
3 /*
4 * SPDX-License-Identifier: MIT
5 *
6 * Copyright © 2014-2016 Intel Corporation
7 */
8
9 #include <sys/cdefs.h>
10 __KERNEL_RCSID(0, "$NetBSD: i915_gem_mman.c,v 1.21 2021/12/19 12:26:55 riastradh Exp $");
11
12 #include <linux/anon_inodes.h>
13 #include <linux/mman.h>
14 #include <linux/pfn_t.h>
15 #include <linux/sizes.h>
16
17 #include "drm/drm_gem.h"
18
19 #include "gt/intel_gt.h"
20 #include "gt/intel_gt_requests.h"
21
22 #include "i915_drv.h"
23 #include "i915_gem_gtt.h"
24 #include "i915_gem_ioctls.h"
25 #include "i915_gem_object.h"
26 #include "i915_gem_mman.h"
27 #include "i915_trace.h"
28 #include "i915_user_extensions.h"
29 #include "i915_vma.h"
30
31 #ifdef __NetBSD__
32 static const struct uvm_pagerops i915_mmo_gem_uvm_ops;
33 #else
34 static inline bool
__vma_matches(struct vm_area_struct * vma,struct file * filp,unsigned long addr,unsigned long size)35 __vma_matches(struct vm_area_struct *vma, struct file *filp,
36 unsigned long addr, unsigned long size)
37 {
38 if (vma->vm_file != filp)
39 return false;
40
41 return vma->vm_start == addr &&
42 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
43 }
44 #endif
45
46 /**
47 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
48 * it is mapped to.
49 * @dev: drm device
50 * @data: ioctl data blob
51 * @file: drm file
52 *
53 * While the mapping holds a reference on the contents of the object, it doesn't
54 * imply a ref on the object itself.
55 *
56 * IMPORTANT:
57 *
58 * DRM driver writers who look a this function as an example for how to do GEM
59 * mmap support, please don't implement mmap support like here. The modern way
60 * to implement DRM mmap support is with an mmap offset ioctl (like
61 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
62 * That way debug tooling like valgrind will understand what's going on, hiding
63 * the mmap call in a driver private ioctl will break that. The i915 driver only
64 * does cpu mmaps this way because we didn't know better.
65 */
66 int
i915_gem_mmap_ioctl(struct drm_device * dev,void * data,struct drm_file * file)67 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
68 struct drm_file *file)
69 {
70 struct drm_i915_gem_mmap *args = data;
71 struct drm_i915_gem_object *obj;
72 unsigned long addr;
73
74 if (args->flags & ~(I915_MMAP_WC))
75 return -EINVAL;
76
77 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
78 return -ENODEV;
79
80 obj = i915_gem_object_lookup(file, args->handle);
81 if (!obj)
82 return -ENOENT;
83
84 #ifdef __NetBSD__
85 struct drm_i915_private *i915 = to_i915(obj->base.dev);
86 if (i915->quirks & QUIRK_NETBSD_VERSION_CALLED)
87 args->flags = 0;
88 #endif
89
90 /* prime objects have no backing filp to GEM mmap
91 * pages from.
92 */
93 if (!obj->base.filp) {
94 addr = -ENXIO;
95 goto err;
96 }
97
98 if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
99 addr = -EINVAL;
100 goto err;
101 }
102
103 #ifdef __NetBSD__
104 int error;
105
106 /* Acquire a reference for uvm_map to consume. */
107 uao_reference(obj->base.filp);
108 addr = (*curproc->p_emul->e_vm_default_addr)(curproc,
109 (vaddr_t)curproc->p_vmspace->vm_daddr, args->size,
110 curproc->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
111 error = uvm_map(&curproc->p_vmspace->vm_map, &addr, args->size,
112 obj->base.filp, args->offset, 0,
113 UVM_MAPFLAG(VM_PROT_READ|VM_PROT_WRITE,
114 VM_PROT_READ|VM_PROT_WRITE, UVM_INH_COPY, UVM_ADV_NORMAL,
115 0));
116 if (error) {
117 uao_detach(obj->base.filp);
118 /* XXX errno NetBSD->Linux */
119 addr = -error;
120 goto err;
121 }
122 #else
123 addr = vm_mmap(obj->base.filp, 0, args->size,
124 PROT_READ | PROT_WRITE, MAP_SHARED,
125 args->offset);
126 if (IS_ERR_VALUE(addr))
127 goto err;
128
129 if (args->flags & I915_MMAP_WC) {
130 struct mm_struct *mm = current->mm;
131 struct vm_area_struct *vma;
132
133 if (down_write_killable(&mm->mmap_sem)) {
134 addr = -EINTR;
135 goto err;
136 }
137 vma = find_vma(mm, addr);
138 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
139 vma->vm_page_prot =
140 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
141 else
142 addr = -ENOMEM;
143 up_write(&mm->mmap_sem);
144 if (IS_ERR_VALUE(addr))
145 goto err;
146 }
147 #endif
148 i915_gem_object_put(obj);
149
150 args->addr_ptr = (u64)addr;
151 return 0;
152
153 err:
154 i915_gem_object_put(obj);
155 return addr;
156 }
157
tile_row_pages(const struct drm_i915_gem_object * obj)158 static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
159 {
160 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
161 }
162
163 /**
164 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
165 *
166 * A history of the GTT mmap interface:
167 *
168 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
169 * aligned and suitable for fencing, and still fit into the available
170 * mappable space left by the pinned display objects. A classic problem
171 * we called the page-fault-of-doom where we would ping-pong between
172 * two objects that could not fit inside the GTT and so the memcpy
173 * would page one object in at the expense of the other between every
174 * single byte.
175 *
176 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
177 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
178 * object is too large for the available space (or simply too large
179 * for the mappable aperture!), a view is created instead and faulted
180 * into userspace. (This view is aligned and sized appropriately for
181 * fenced access.)
182 *
183 * 2 - Recognise WC as a separate cache domain so that we can flush the
184 * delayed writes via GTT before performing direct access via WC.
185 *
186 * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
187 * pagefault; swapin remains transparent.
188 *
189 * 4 - Support multiple fault handlers per object depending on object's
190 * backing storage (a.k.a. MMAP_OFFSET).
191 *
192 * Restrictions:
193 *
194 * * snoopable objects cannot be accessed via the GTT. It can cause machine
195 * hangs on some architectures, corruption on others. An attempt to service
196 * a GTT page fault from a snoopable object will generate a SIGBUS.
197 *
198 * * the object must be able to fit into RAM (physical memory, though no
199 * limited to the mappable aperture).
200 *
201 *
202 * Caveats:
203 *
204 * * a new GTT page fault will synchronize rendering from the GPU and flush
205 * all data to system memory. Subsequent access will not be synchronized.
206 *
207 * * all mappings are revoked on runtime device suspend.
208 *
209 * * there are only 8, 16 or 32 fence registers to share between all users
210 * (older machines require fence register for display and blitter access
211 * as well). Contention of the fence registers will cause the previous users
212 * to be unmapped and any new access will generate new page faults.
213 *
214 * * running out of memory while servicing a fault may generate a SIGBUS,
215 * rather than the expected SIGSEGV.
216 */
i915_gem_mmap_gtt_version(void)217 int i915_gem_mmap_gtt_version(void)
218 {
219 return 4;
220 }
221
222 static inline struct i915_ggtt_view
compute_partial_view(const struct drm_i915_gem_object * obj,pgoff_t page_offset,unsigned int chunk)223 compute_partial_view(const struct drm_i915_gem_object *obj,
224 pgoff_t page_offset,
225 unsigned int chunk)
226 {
227 struct i915_ggtt_view view;
228
229 if (i915_gem_object_is_tiled(obj))
230 chunk = roundup(chunk, tile_row_pages(obj));
231
232 view.type = I915_GGTT_VIEW_PARTIAL;
233 view.partial.offset = rounddown(page_offset, chunk);
234 view.partial.size =
235 min_t(unsigned int, chunk,
236 (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
237
238 /* If the partial covers the entire object, just create a normal VMA. */
239 if (chunk >= obj->base.size >> PAGE_SHIFT)
240 view.type = I915_GGTT_VIEW_NORMAL;
241
242 return view;
243 }
244
245 #ifdef __NetBSD__
246 /*
247 * XXX pmap_enter_default instead of pmap_enter because of a problem
248 * with using weak aliases in kernel modules.
249 *
250 * XXX This probably won't work in a Xen kernel! Maybe this should be
251 * #ifdef _MODULE?
252 */
253 int pmap_enter_default(pmap_t, vaddr_t, paddr_t, vm_prot_t, unsigned);
254 #define pmap_enter pmap_enter_default
255
256 static int
i915_error_to_vmf_fault(int err)257 i915_error_to_vmf_fault(int err)
258 {
259 return err;
260 }
261 #else
i915_error_to_vmf_fault(int err)262 static vm_fault_t i915_error_to_vmf_fault(int err)
263 {
264 switch (err) {
265 default:
266 WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
267 /* fallthrough */
268 case -EIO: /* shmemfs failure from swap device */
269 case -EFAULT: /* purged object */
270 case -ENODEV: /* bad object, how did you get here! */
271 case -ENXIO: /* unable to access backing store (on device) */
272 return VM_FAULT_SIGBUS;
273
274 case -ENOSPC: /* shmemfs allocation failure */
275 case -ENOMEM: /* our allocation failure */
276 return VM_FAULT_OOM;
277
278 case 0:
279 case -EAGAIN:
280 case -ERESTARTSYS:
281 case -EINTR:
282 case -EBUSY:
283 /*
284 * EBUSY is ok: this just means that another thread
285 * already did the job.
286 */
287 return VM_FAULT_NOPAGE;
288 }
289 }
290 #endif
291
292 #ifdef __NetBSD__
293 static int
vm_fault_cpu(struct uvm_faultinfo * ufi,struct i915_mmap_offset * mmo,vaddr_t vaddr,struct vm_page ** pps,int npages,int centeridx,int flags)294 vm_fault_cpu(struct uvm_faultinfo *ufi, struct i915_mmap_offset *mmo,
295 vaddr_t vaddr, struct vm_page **pps, int npages, int centeridx, int flags)
296 #else
297 static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
298 #endif
299 {
300 #ifndef __NetBSD__
301 struct vm_area_struct *area = vmf->vma;
302 struct i915_mmap_offset *mmo = area->vm_private_data;
303 #endif
304 struct drm_i915_gem_object *obj = mmo->obj;
305 #ifdef __NetBSD__
306 bool write = ufi->entry->protection & VM_PROT_WRITE;
307 #else
308 bool write = area->vm_flags & VM_WRITE;
309 #endif
310 resource_size_t iomap;
311 int err;
312
313 /* Sanity check that we allow writing into this object */
314 if (unlikely(i915_gem_object_is_readonly(obj) && write))
315 #ifdef __NetBSD__
316 return -EFAULT;
317 #else
318 return VM_FAULT_SIGBUS;
319 #endif
320
321 err = i915_gem_object_pin_pages(obj);
322 if (err)
323 goto out;
324
325 iomap = -1;
326 if (!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE)) {
327 iomap = obj->mm.region->iomap.base;
328 iomap -= obj->mm.region->region.start;
329 }
330
331 /* PTEs are revoked in obj->ops->put_pages() */
332 #ifdef __NetBSD__
333 /* XXX No lmem supported yet. */
334 KASSERT(i915_gem_object_type_has(obj,
335 I915_GEM_OBJECT_HAS_STRUCT_PAGE));
336
337 struct scatterlist *sg = obj->mm.pages->sgl;
338 unsigned startpage = (ufi->entry->offset + (vaddr - ufi->entry->start))
339 >> PAGE_SHIFT;
340 paddr_t paddr;
341 int i;
342
343 for (i = 0; i < npages; i++) {
344 if ((flags & PGO_ALLPAGES) == 0 && i != centeridx)
345 continue;
346 if (pps[i] == PGO_DONTCARE)
347 continue;
348 paddr = page_to_phys(sg->sg_pgs[startpage + i]);
349 /* XXX errno NetBSD->Linux */
350 err = -pmap_enter(ufi->orig_map->pmap,
351 vaddr + i*PAGE_SIZE, paddr, ufi->entry->protection,
352 PMAP_CANFAIL | ufi->entry->protection);
353 if (err)
354 break;
355 }
356 pmap_update(ufi->orig_map->pmap);
357 #else
358 err = remap_io_sg(area,
359 area->vm_start, area->vm_end - area->vm_start,
360 obj->mm.pages->sgl, iomap);
361 #endif
362
363 if (write) {
364 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
365 obj->mm.dirty = true;
366 }
367
368 i915_gem_object_unpin_pages(obj);
369
370 out:
371 return i915_error_to_vmf_fault(err);
372 }
373
374 #ifdef __NetBSD__
375 static int
vm_fault_gtt(struct uvm_faultinfo * ufi,struct i915_mmap_offset * mmo,vaddr_t vaddr,struct vm_page ** pps,int npages,int centeridx,int flags)376 vm_fault_gtt(struct uvm_faultinfo *ufi, struct i915_mmap_offset *mmo,
377 vaddr_t vaddr, struct vm_page **pps, int npages, int centeridx, int flags)
378 #else
379 static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
380 #endif
381 {
382 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
383 #ifndef __NetBSD__
384 struct vm_area_struct *area = vmf->vma;
385 struct i915_mmap_offset *mmo = area->vm_private_data;
386 #endif
387 struct drm_i915_gem_object *obj = mmo->obj;
388 struct drm_device *dev = obj->base.dev;
389 struct drm_i915_private *i915 = to_i915(dev);
390 struct intel_runtime_pm *rpm = &i915->runtime_pm;
391 struct i915_ggtt *ggtt = &i915->ggtt;
392 #ifdef __NetBSD__
393 bool write = ufi->entry->protection & VM_PROT_WRITE;
394 #else
395 bool write = area->vm_flags & VM_WRITE;
396 #endif
397 intel_wakeref_t wakeref;
398 struct i915_vma *vma;
399 pgoff_t page_offset;
400 int srcu;
401 int ret;
402
403 /* Sanity check that we allow writing into this object */
404 if (i915_gem_object_is_readonly(obj) && write)
405 #ifdef __NetBSD__
406 return -EFAULT;
407 #else
408 return VM_FAULT_SIGBUS;
409 #endif
410
411 #ifdef __NetBSD__
412 page_offset = (ufi->entry->offset + (vaddr - ufi->entry->start))
413 >> PAGE_SHIFT;
414 #else
415 /* We don't use vmf->pgoff since that has the fake offset */
416 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
417 #endif
418
419 trace_i915_gem_object_fault(obj, page_offset, true, write);
420
421 ret = i915_gem_object_pin_pages(obj);
422 if (ret)
423 goto err;
424
425 wakeref = intel_runtime_pm_get(rpm);
426
427 ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
428 if (ret)
429 goto err_rpm;
430
431 /* Now pin it into the GTT as needed */
432 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
433 PIN_MAPPABLE |
434 PIN_NONBLOCK /* NOWARN */ |
435 PIN_NOEVICT);
436 if (IS_ERR(vma)) {
437 /* Use a partial view if it is bigger than available space */
438 struct i915_ggtt_view view =
439 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
440 unsigned int flags;
441
442 flags = PIN_MAPPABLE | PIN_NOSEARCH;
443 if (view.type == I915_GGTT_VIEW_NORMAL)
444 flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
445
446 /*
447 * Userspace is now writing through an untracked VMA, abandon
448 * all hope that the hardware is able to track future writes.
449 */
450
451 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
452 if (IS_ERR(vma)) {
453 flags = PIN_MAPPABLE;
454 view.type = I915_GGTT_VIEW_PARTIAL;
455 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
456 }
457
458 /* The entire mappable GGTT is pinned? Unexpected! */
459 GEM_BUG_ON(vma == ERR_PTR(-ENOSPC));
460 }
461 if (IS_ERR(vma)) {
462 ret = PTR_ERR(vma);
463 goto err_reset;
464 }
465
466 /* Access to snoopable pages through the GTT is incoherent. */
467 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
468 ret = -EFAULT;
469 goto err_unpin;
470 }
471
472 ret = i915_vma_pin_fence(vma);
473 if (ret)
474 goto err_unpin;
475
476 /* Finally, remap it using the new GTT offset */
477 #ifdef __NetBSD__
478 unsigned startpage = page_offset;
479 paddr_t paddr;
480 int i;
481
482 for (i = 0; i < npages; i++) {
483 if ((flags & PGO_ALLPAGES) == 0 && i != centeridx)
484 continue;
485 if (pps[i] == PGO_DONTCARE)
486 continue;
487 paddr = ggtt->gmadr.start + vma->node.start
488 + (startpage + i)*PAGE_SIZE;
489 /* XXX errno NetBSD->Linux */
490 ret = -pmap_enter(ufi->orig_map->pmap,
491 vaddr + i*PAGE_SIZE, paddr, ufi->entry->protection,
492 PMAP_CANFAIL | ufi->entry->protection);
493 if (ret)
494 break;
495 }
496 pmap_update(ufi->orig_map->pmap);
497 #else
498 ret = remap_io_mapping(area,
499 area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
500 (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
501 min_t(u64, vma->size, area->vm_end - area->vm_start),
502 &ggtt->iomap);
503 #endif
504 if (ret)
505 goto err_fence;
506
507 assert_rpm_wakelock_held(rpm);
508
509 /* Mark as being mmapped into userspace for later revocation */
510 mutex_lock(&i915->ggtt.vm.mutex);
511 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
512 list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
513 mutex_unlock(&i915->ggtt.vm.mutex);
514
515 /* Track the mmo associated with the fenced vma */
516 vma->mmo = mmo;
517
518 if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
519 intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
520 msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
521
522 if (write) {
523 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
524 i915_vma_set_ggtt_write(vma);
525 obj->mm.dirty = true;
526 }
527
528 err_fence:
529 i915_vma_unpin_fence(vma);
530 err_unpin:
531 __i915_vma_unpin(vma);
532 err_reset:
533 intel_gt_reset_unlock(ggtt->vm.gt, srcu);
534 err_rpm:
535 intel_runtime_pm_put(rpm, wakeref);
536 i915_gem_object_unpin_pages(obj);
537 err:
538 return i915_error_to_vmf_fault(ret);
539 }
540
541 #ifdef __NetBSD__
542
543 static int
i915_gem_fault(struct uvm_faultinfo * ufi,vaddr_t vaddr,struct vm_page ** pps,int npages,int centeridx,vm_prot_t access_type,int flags)544 i915_gem_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, struct vm_page **pps,
545 int npages, int centeridx, vm_prot_t access_type, int flags)
546 {
547 struct uvm_object *uobj = ufi->entry->object.uvm_obj;
548 struct i915_mmap_offset *mmo =
549 container_of(uobj, struct i915_mmap_offset, uobj);
550 struct drm_i915_gem_object *obj = mmo->obj;
551 bool pinned = false;
552 int error;
553
554 KASSERT(rw_lock_held(obj->base.filp->vmobjlock));
555 KASSERT(!i915_gem_object_is_readonly(obj) ||
556 (access_type & VM_PROT_WRITE) == 0);
557 KASSERT(i915_gem_object_type_has(obj,
558 I915_GEM_OBJECT_HAS_STRUCT_PAGE|I915_GEM_OBJECT_HAS_IOMEM));
559
560 /* Actually we don't support iomem right now! */
561 KASSERT(i915_gem_object_type_has(obj,
562 I915_GEM_OBJECT_HAS_STRUCT_PAGE));
563
564 /*
565 * The lock isn't actually helpful for us and the caller in
566 * uvm_fault only just acquired it anyway so no important
567 * invariants are implied by it.
568 */
569 rw_exit(obj->base.filp->vmobjlock);
570
571 /* XXX errno Linux->NetBSD */
572 error = -i915_gem_object_pin_pages(obj);
573 if (error)
574 goto out;
575 pinned = true;
576
577 switch (mmo->mmap_type) {
578 case I915_MMAP_TYPE_WC:
579 case I915_MMAP_TYPE_WB:
580 case I915_MMAP_TYPE_UC:
581 /* XXX errno Linux->NetBSD */
582 error = -vm_fault_cpu(ufi, mmo, vaddr, pps, npages, centeridx,
583 flags);
584 break;
585 case I915_MMAP_TYPE_GTT:
586 error = -vm_fault_gtt(ufi, mmo, vaddr, pps, npages, centeridx,
587 flags);
588 break;
589 default:
590 panic("invalid i915 gem mmap offset type: %d",
591 mmo->mmap_type);
592 }
593
594 out: if (pinned)
595 i915_gem_object_unpin_pages(obj);
596 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
597
598 /*
599 * Remap EINTR to success, so that we return to userland.
600 * On the way out, we'll deliver the signal, and if the signal
601 * is not fatal then the user code which faulted will most likely
602 * fault again, and we'll come back here for another try.
603 */
604 if (error == EINTR)
605 error = 0;
606
607 return error;
608 }
609
610 #endif
611
__i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object * obj)612 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
613 {
614 struct i915_vma *vma;
615
616 GEM_BUG_ON(!obj->userfault_count);
617
618 for_each_ggtt_vma(vma, obj)
619 i915_vma_revoke_mmap(vma);
620
621 GEM_BUG_ON(obj->userfault_count);
622 }
623
624 /*
625 * It is vital that we remove the page mapping if we have mapped a tiled
626 * object through the GTT and then lose the fence register due to
627 * resource pressure. Similarly if the object has been moved out of the
628 * aperture, than pages mapped into userspace must be revoked. Removing the
629 * mapping will then trigger a page fault on the next user access, allowing
630 * fixup by vm_fault_gtt().
631 */
i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object * obj)632 static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
633 {
634 struct drm_i915_private *i915 = to_i915(obj->base.dev);
635 intel_wakeref_t wakeref;
636
637 /*
638 * Serialisation between user GTT access and our code depends upon
639 * revoking the CPU's PTE whilst the mutex is held. The next user
640 * pagefault then has to wait until we release the mutex.
641 *
642 * Note that RPM complicates somewhat by adding an additional
643 * requirement that operations to the GGTT be made holding the RPM
644 * wakeref.
645 */
646 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
647 mutex_lock(&i915->ggtt.vm.mutex);
648
649 if (!obj->userfault_count)
650 goto out;
651
652 __i915_gem_object_release_mmap_gtt(obj);
653
654 /*
655 * Ensure that the CPU's PTE are revoked and there are not outstanding
656 * memory transactions from userspace before we return. The TLB
657 * flushing implied above by changing the PTE above *should* be
658 * sufficient, an extra barrier here just provides us with a bit
659 * of paranoid documentation about our requirement to serialise
660 * memory writes before touching registers / GSM.
661 */
662 wmb();
663
664 out:
665 mutex_unlock(&i915->ggtt.vm.mutex);
666 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
667 }
668
i915_gem_object_release_mmap_offset(struct drm_i915_gem_object * obj)669 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
670 {
671 #ifdef __NetBSD__
672 struct page *page;
673 struct vm_page *vm_page;
674 unsigned i;
675
676 if (!i915_gem_object_has_pages(obj))
677 return;
678 for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
679 page = obj->mm.pages->sgl->sg_pgs[i];
680 vm_page = &page->p_vmp;
681 pmap_page_protect(vm_page, VM_PROT_NONE);
682 }
683 #else
684 struct i915_mmap_offset *mmo, *mn;
685
686 spin_lock(&obj->mmo.lock);
687 rbtree_postorder_for_each_entry_safe(mmo, mn,
688 &obj->mmo.offsets, offset) {
689 /*
690 * vma_node_unmap for GTT mmaps handled already in
691 * __i915_gem_object_release_mmap_gtt
692 */
693 if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
694 continue;
695
696 spin_unlock(&obj->mmo.lock);
697 drm_vma_node_unmap(&mmo->vma_node,
698 obj->base.dev->anon_inode->i_mapping);
699 spin_lock(&obj->mmo.lock);
700 }
701 spin_unlock(&obj->mmo.lock);
702 #endif
703 }
704
705 /**
706 * i915_gem_object_release_mmap - remove physical page mappings
707 * @obj: obj in question
708 *
709 * Preserve the reservation of the mmapping with the DRM core code, but
710 * relinquish ownership of the pages back to the system.
711 */
i915_gem_object_release_mmap(struct drm_i915_gem_object * obj)712 void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
713 {
714 i915_gem_object_release_mmap_gtt(obj);
715 i915_gem_object_release_mmap_offset(obj);
716 }
717
718 static struct i915_mmap_offset *
lookup_mmo(struct drm_i915_gem_object * obj,enum i915_mmap_type mmap_type)719 lookup_mmo(struct drm_i915_gem_object *obj,
720 enum i915_mmap_type mmap_type)
721 {
722 #ifdef __NetBSD__
723 struct i915_mmap_offset *mmo;
724
725 spin_lock(&obj->mmo.lock);
726 mmo = obj->mmo.offsets[mmap_type];
727 spin_unlock(&obj->mmo.lock);
728
729 return mmo;
730 #else
731 struct rb_node *rb;
732
733 spin_lock(&obj->mmo.lock);
734 rb = obj->mmo.offsets.rb_node;
735 while (rb) {
736 struct i915_mmap_offset *mmo =
737 rb_entry(rb, typeof(*mmo), offset);
738
739 if (mmo->mmap_type == mmap_type) {
740 spin_unlock(&obj->mmo.lock);
741 return mmo;
742 }
743
744 if (mmo->mmap_type < mmap_type)
745 rb = rb->rb_right;
746 else
747 rb = rb->rb_left;
748 }
749 spin_unlock(&obj->mmo.lock);
750
751 return NULL;
752 #endif
753 }
754
755 static struct i915_mmap_offset *
insert_mmo(struct drm_i915_gem_object * obj,struct i915_mmap_offset * mmo)756 insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
757 {
758 #ifdef __NetBSD__
759 struct i915_mmap_offset *to_free = NULL;
760
761 spin_lock(&obj->mmo.lock);
762 if (obj->mmo.offsets[mmo->mmap_type]) {
763 to_free = mmo;
764 mmo = obj->mmo.offsets[mmo->mmap_type];
765 } else {
766 obj->mmo.offsets[mmo->mmap_type] = mmo;
767 }
768 spin_unlock(&obj->mmo.lock);
769
770 if (to_free) {
771 drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
772 &to_free->vma_node);
773 uvm_obj_destroy(&to_free->uobj, /*free lock*/true);
774 drm_vma_node_destroy(&to_free->vma_node);
775 kfree(to_free);
776 }
777
778 return mmo;
779 #else
780 struct rb_node *rb, **p;
781
782 spin_lock(&obj->mmo.lock);
783 rb = NULL;
784 p = &obj->mmo.offsets.rb_node;
785 while (*p) {
786 struct i915_mmap_offset *pos;
787
788 rb = *p;
789 pos = rb_entry(rb, typeof(*pos), offset);
790
791 if (pos->mmap_type == mmo->mmap_type) {
792 spin_unlock(&obj->mmo.lock);
793 drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
794 &mmo->vma_node);
795 kfree(mmo);
796 return pos;
797 }
798
799 if (pos->mmap_type < mmo->mmap_type)
800 p = &rb->rb_right;
801 else
802 p = &rb->rb_left;
803 }
804 rb_link_node(&mmo->offset, rb, p);
805 rb_insert_color(&mmo->offset, &obj->mmo.offsets);
806 spin_unlock(&obj->mmo.lock);
807
808 return mmo;
809 #endif
810 }
811
812 static struct i915_mmap_offset *
mmap_offset_attach(struct drm_i915_gem_object * obj,enum i915_mmap_type mmap_type,struct drm_file * file)813 mmap_offset_attach(struct drm_i915_gem_object *obj,
814 enum i915_mmap_type mmap_type,
815 struct drm_file *file)
816 {
817 struct drm_i915_private *i915 = to_i915(obj->base.dev);
818 struct i915_mmap_offset *mmo;
819 int err;
820
821 mmo = lookup_mmo(obj, mmap_type);
822 if (mmo)
823 goto out;
824
825 mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
826 if (!mmo)
827 return ERR_PTR(-ENOMEM);
828
829 mmo->obj = obj;
830 mmo->mmap_type = mmap_type;
831 #ifdef __NetBSD__
832 drm_vma_node_init(&mmo->vma_node);
833 uvm_obj_init(&mmo->uobj, &i915_mmo_gem_uvm_ops, /*allocate lock*/false,
834 /*nrefs*/1);
835 uvm_obj_setlock(&mmo->uobj, obj->base.filp->vmobjlock);
836 #else
837 drm_vma_node_reset(&mmo->vma_node);
838 #endif
839
840 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
841 &mmo->vma_node, obj->base.size / PAGE_SIZE);
842 if (likely(!err))
843 goto insert;
844
845 /* Attempt to reap some mmap space from dead objects */
846 err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
847 if (err)
848 goto err;
849
850 i915_gem_drain_freed_objects(i915);
851 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
852 &mmo->vma_node, obj->base.size / PAGE_SIZE);
853 if (err)
854 goto err;
855
856 insert:
857 mmo = insert_mmo(obj, mmo);
858 GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
859 out:
860 if (file)
861 drm_vma_node_allow(&mmo->vma_node, file);
862 return mmo;
863
864 err:
865 #ifdef __NetBSD__
866 uvm_obj_destroy(&mmo->uobj, /*free lock*/true);
867 #endif
868 drm_vma_node_destroy(&mmo->vma_node);
869 kfree(mmo);
870 return ERR_PTR(err);
871 }
872
873 static int
__assign_mmap_offset(struct drm_file * file,u32 handle,enum i915_mmap_type mmap_type,u64 * offset)874 __assign_mmap_offset(struct drm_file *file,
875 u32 handle,
876 enum i915_mmap_type mmap_type,
877 u64 *offset)
878 {
879 struct drm_i915_gem_object *obj;
880 struct i915_mmap_offset *mmo;
881 int err;
882
883 obj = i915_gem_object_lookup(file, handle);
884 if (!obj)
885 return -ENOENT;
886
887 if (mmap_type == I915_MMAP_TYPE_GTT &&
888 i915_gem_object_never_bind_ggtt(obj)) {
889 err = -ENODEV;
890 goto out;
891 }
892
893 if (mmap_type != I915_MMAP_TYPE_GTT &&
894 !i915_gem_object_type_has(obj,
895 I915_GEM_OBJECT_HAS_STRUCT_PAGE |
896 I915_GEM_OBJECT_HAS_IOMEM)) {
897 err = -ENODEV;
898 goto out;
899 }
900
901 mmo = mmap_offset_attach(obj, mmap_type, file);
902 if (IS_ERR(mmo)) {
903 err = PTR_ERR(mmo);
904 goto out;
905 }
906
907 *offset = drm_vma_node_offset_addr(&mmo->vma_node);
908 err = 0;
909 out:
910 i915_gem_object_put(obj);
911 return err;
912 }
913
914 int
i915_gem_dumb_mmap_offset(struct drm_file * file,struct drm_device * dev,u32 handle,u64 * offset)915 i915_gem_dumb_mmap_offset(struct drm_file *file,
916 struct drm_device *dev,
917 u32 handle,
918 u64 *offset)
919 {
920 enum i915_mmap_type mmap_type;
921
922 if (boot_cpu_has(X86_FEATURE_PAT))
923 mmap_type = I915_MMAP_TYPE_WC;
924 else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
925 return -ENODEV;
926 else
927 mmap_type = I915_MMAP_TYPE_GTT;
928
929 return __assign_mmap_offset(file, handle, mmap_type, offset);
930 }
931
932 /**
933 * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing
934 * @dev: DRM device
935 * @data: GTT mapping ioctl data
936 * @file: GEM object info
937 *
938 * Simply returns the fake offset to userspace so it can mmap it.
939 * The mmap call will end up in drm_gem_mmap(), which will set things
940 * up so we can get faults in the handler above.
941 *
942 * The fault handler will take care of binding the object into the GTT
943 * (since it may have been evicted to make room for something), allocating
944 * a fence register, and mapping the appropriate aperture address into
945 * userspace.
946 */
947 int
i915_gem_mmap_offset_ioctl(struct drm_device * dev,void * data,struct drm_file * file)948 i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
949 struct drm_file *file)
950 {
951 struct drm_i915_private *i915 = to_i915(dev);
952 struct drm_i915_gem_mmap_offset *args = data;
953 enum i915_mmap_type type;
954 int err;
955
956 /*
957 * Historically we failed to check args.pad and args.offset
958 * and so we cannot use those fields for user input and we cannot
959 * add -EINVAL for them as the ABI is fixed, i.e. old userspace
960 * may be feeding in garbage in those fields.
961 *
962 * if (args->pad) return -EINVAL; is verbotten!
963 */
964
965 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
966 NULL, 0, NULL);
967 if (err)
968 return err;
969
970 switch (args->flags) {
971 case I915_MMAP_OFFSET_GTT:
972 if (!i915_ggtt_has_aperture(&i915->ggtt))
973 return -ENODEV;
974 type = I915_MMAP_TYPE_GTT;
975 break;
976
977 case I915_MMAP_OFFSET_WC:
978 if (!boot_cpu_has(X86_FEATURE_PAT))
979 return -ENODEV;
980 type = I915_MMAP_TYPE_WC;
981 break;
982
983 case I915_MMAP_OFFSET_WB:
984 type = I915_MMAP_TYPE_WB;
985 break;
986
987 case I915_MMAP_OFFSET_UC:
988 if (!boot_cpu_has(X86_FEATURE_PAT))
989 return -ENODEV;
990 type = I915_MMAP_TYPE_UC;
991 break;
992
993 default:
994 return -EINVAL;
995 }
996
997 return __assign_mmap_offset(file, args->handle, type, &args->offset);
998 }
999
1000 #ifdef __NetBSD__
1001
1002 static int
i915_gem_nofault(struct uvm_faultinfo * ufi,vaddr_t vaddr,struct vm_page ** pps,int npages,int centeridx,vm_prot_t access_type,int flags)1003 i915_gem_nofault(struct uvm_faultinfo *ufi, vaddr_t vaddr,
1004 struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
1005 int flags)
1006 {
1007 panic("i915 main gem object should not be mmapped directly");
1008 }
1009
1010 const struct uvm_pagerops i915_gem_uvm_ops = {
1011 .pgo_reference = drm_gem_pager_reference,
1012 .pgo_detach = drm_gem_pager_detach,
1013 .pgo_fault = i915_gem_nofault,
1014 };
1015
1016 static void
i915_mmo_reference(struct uvm_object * uobj)1017 i915_mmo_reference(struct uvm_object *uobj)
1018 {
1019 struct i915_mmap_offset *mmo =
1020 container_of(uobj, struct i915_mmap_offset, uobj);
1021 struct drm_i915_gem_object *obj = mmo->obj;
1022
1023 drm_gem_object_get(&obj->base);
1024 }
1025
1026 static void
i915_mmo_detach(struct uvm_object * uobj)1027 i915_mmo_detach(struct uvm_object *uobj)
1028 {
1029 struct i915_mmap_offset *mmo =
1030 container_of(uobj, struct i915_mmap_offset, uobj);
1031 struct drm_i915_gem_object *obj = mmo->obj;
1032
1033 drm_gem_object_put_unlocked(&obj->base);
1034 }
1035
1036 static const struct uvm_pagerops i915_mmo_gem_uvm_ops = {
1037 .pgo_reference = i915_mmo_reference,
1038 .pgo_detach = i915_mmo_detach,
1039 .pgo_fault = i915_gem_fault,
1040 };
1041
1042 int
i915_gem_mmap_object(struct drm_device * dev,off_t byte_offset,size_t nbytes,int prot,struct uvm_object ** uobjp,voff_t * uoffsetp,struct file * fp)1043 i915_gem_mmap_object(struct drm_device *dev, off_t byte_offset, size_t nbytes,
1044 int prot, struct uvm_object **uobjp, voff_t *uoffsetp, struct file *fp)
1045 {
1046 const unsigned long startpage = byte_offset >> PAGE_SHIFT;
1047 const unsigned long npages = nbytes >> PAGE_SHIFT;
1048 struct drm_file *file = fp->f_data;
1049 struct drm_vma_offset_node *node;
1050 struct drm_i915_gem_object *obj = NULL;
1051 struct i915_mmap_offset *mmo = NULL;
1052
1053 if (drm_dev_is_unplugged(dev))
1054 return -ENODEV;
1055
1056 rcu_read_lock();
1057 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1058 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1059 startpage, npages);
1060 if (node && drm_vma_node_is_allowed(node, file)) {
1061 /*
1062 * Skip 0-refcnted objects as it is in the process of being
1063 * destroyed and will be invalid when the vma manager lock
1064 * is released.
1065 */
1066 mmo = container_of(node, struct i915_mmap_offset, vma_node);
1067 obj = i915_gem_object_get_rcu(mmo->obj);
1068 }
1069 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1070 rcu_read_unlock();
1071 if (!obj)
1072 return node ? -EACCES : -EINVAL;
1073
1074 if (i915_gem_object_is_readonly(obj)) {
1075 if (prot & VM_PROT_WRITE) {
1076 i915_gem_object_put(obj);
1077 return -EINVAL;
1078 }
1079 }
1080
1081 /* Success! */
1082 *uobjp = &mmo->uobj;
1083 *uoffsetp = 0;
1084 return 0;
1085 }
1086
1087 #else
1088
vm_open(struct vm_area_struct * vma)1089 static void vm_open(struct vm_area_struct *vma)
1090 {
1091 struct i915_mmap_offset *mmo = vma->vm_private_data;
1092 struct drm_i915_gem_object *obj = mmo->obj;
1093
1094 GEM_BUG_ON(!obj);
1095 i915_gem_object_get(obj);
1096 }
1097
vm_close(struct vm_area_struct * vma)1098 static void vm_close(struct vm_area_struct *vma)
1099 {
1100 struct i915_mmap_offset *mmo = vma->vm_private_data;
1101 struct drm_i915_gem_object *obj = mmo->obj;
1102
1103 GEM_BUG_ON(!obj);
1104 i915_gem_object_put(obj);
1105 }
1106
1107 static const struct vm_operations_struct vm_ops_gtt = {
1108 .fault = vm_fault_gtt,
1109 .open = vm_open,
1110 .close = vm_close,
1111 };
1112
1113 static const struct vm_operations_struct vm_ops_cpu = {
1114 .fault = vm_fault_cpu,
1115 .open = vm_open,
1116 .close = vm_close,
1117 };
1118
singleton_release(struct inode * inode,struct file * file)1119 static int singleton_release(struct inode *inode, struct file *file)
1120 {
1121 struct drm_i915_private *i915 = file->private_data;
1122
1123 cmpxchg(&i915->gem.mmap_singleton, file, NULL);
1124 drm_dev_put(&i915->drm);
1125
1126 return 0;
1127 }
1128
1129 static const struct file_operations singleton_fops = {
1130 .owner = THIS_MODULE,
1131 .release = singleton_release,
1132 };
1133
mmap_singleton(struct drm_i915_private * i915)1134 static struct file *mmap_singleton(struct drm_i915_private *i915)
1135 {
1136 struct file *file;
1137
1138 rcu_read_lock();
1139 file = i915->gem.mmap_singleton;
1140 if (file && !get_file_rcu(file))
1141 file = NULL;
1142 rcu_read_unlock();
1143 if (file)
1144 return file;
1145
1146 file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR);
1147 if (IS_ERR(file))
1148 return file;
1149
1150 /* Everyone shares a single global address space */
1151 file->f_mapping = i915->drm.anon_inode->i_mapping;
1152
1153 smp_store_mb(i915->gem.mmap_singleton, file);
1154 drm_dev_get(&i915->drm);
1155
1156 return file;
1157 }
1158
1159 /*
1160 * This overcomes the limitation in drm_gem_mmap's assignment of a
1161 * drm_gem_object as the vma->vm_private_data. Since we need to
1162 * be able to resolve multiple mmap offsets which could be tied
1163 * to a single gem object.
1164 */
i915_gem_mmap(struct file * filp,struct vm_area_struct * vma)1165 int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1166 {
1167 struct drm_vma_offset_node *node;
1168 struct drm_file *priv = filp->private_data;
1169 struct drm_device *dev = priv->minor->dev;
1170 struct drm_i915_gem_object *obj = NULL;
1171 struct i915_mmap_offset *mmo = NULL;
1172 struct file *anon;
1173
1174 if (drm_dev_is_unplugged(dev))
1175 return -ENODEV;
1176
1177 rcu_read_lock();
1178 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1179 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1180 vma->vm_pgoff,
1181 vma_pages(vma));
1182 if (node && drm_vma_node_is_allowed(node, priv)) {
1183 /*
1184 * Skip 0-refcnted objects as it is in the process of being
1185 * destroyed and will be invalid when the vma manager lock
1186 * is released.
1187 */
1188 mmo = container_of(node, struct i915_mmap_offset, vma_node);
1189 obj = i915_gem_object_get_rcu(mmo->obj);
1190 }
1191 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1192 rcu_read_unlock();
1193 if (!obj)
1194 return node ? -EACCES : -EINVAL;
1195
1196 if (i915_gem_object_is_readonly(obj)) {
1197 if (vma->vm_flags & VM_WRITE) {
1198 i915_gem_object_put(obj);
1199 return -EINVAL;
1200 }
1201 vma->vm_flags &= ~VM_MAYWRITE;
1202 }
1203
1204 anon = mmap_singleton(to_i915(dev));
1205 if (IS_ERR(anon)) {
1206 i915_gem_object_put(obj);
1207 return PTR_ERR(anon);
1208 }
1209
1210 vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1211 vma->vm_private_data = mmo;
1212
1213 /*
1214 * We keep the ref on mmo->obj, not vm_file, but we require
1215 * vma->vm_file->f_mapping, see vma_link(), for later revocation.
1216 * Our userspace is accustomed to having per-file resource cleanup
1217 * (i.e. contexts, objects and requests) on their close(fd), which
1218 * requires avoiding extraneous references to their filp, hence why
1219 * we prefer to use an anonymous file for their mmaps.
1220 */
1221 fput(vma->vm_file);
1222 vma->vm_file = anon;
1223
1224 switch (mmo->mmap_type) {
1225 case I915_MMAP_TYPE_WC:
1226 vma->vm_page_prot =
1227 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1228 vma->vm_ops = &vm_ops_cpu;
1229 break;
1230
1231 case I915_MMAP_TYPE_WB:
1232 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1233 vma->vm_ops = &vm_ops_cpu;
1234 break;
1235
1236 case I915_MMAP_TYPE_UC:
1237 vma->vm_page_prot =
1238 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
1239 vma->vm_ops = &vm_ops_cpu;
1240 break;
1241
1242 case I915_MMAP_TYPE_GTT:
1243 vma->vm_page_prot =
1244 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1245 vma->vm_ops = &vm_ops_gtt;
1246 break;
1247 }
1248 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1249
1250 return 0;
1251 }
1252
1253 #endif /* __NetBSD__ */
1254
1255 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1256 #include "selftests/i915_gem_mman.c"
1257 #endif
1258