xref: /openbsd/sys/dev/pci/drm/i915/gem/i915_gem_mman.c (revision 097a140d)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include <linux/anon_inodes.h>
8 #include <linux/mman.h>
9 #include <linux/pfn_t.h>
10 #include <linux/sizes.h>
11 
12 #include "gt/intel_gt.h"
13 #include "gt/intel_gt_requests.h"
14 
15 #include "i915_drv.h"
16 #include "i915_gem_gtt.h"
17 #include "i915_gem_ioctls.h"
18 #include "i915_gem_object.h"
19 #include "i915_gem_mman.h"
20 #include "i915_trace.h"
21 #include "i915_user_extensions.h"
22 #include "i915_vma.h"
23 
24 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
25 
26 #ifdef __linux__
27 static inline bool
28 __vma_matches(struct vm_area_struct *vma, struct file *filp,
29 	      unsigned long addr, unsigned long size)
30 {
31 	if (vma->vm_file != filp)
32 		return false;
33 
34 	return vma->vm_start == addr &&
35 	       (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
36 }
37 #endif
38 
39 /**
40  * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
41  *			 it is mapped to.
42  * @dev: drm device
43  * @data: ioctl data blob
44  * @file: drm file
45  *
46  * While the mapping holds a reference on the contents of the object, it doesn't
47  * imply a ref on the object itself.
48  *
49  * IMPORTANT:
50  *
51  * DRM driver writers who look a this function as an example for how to do GEM
52  * mmap support, please don't implement mmap support like here. The modern way
53  * to implement DRM mmap support is with an mmap offset ioctl (like
54  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
55  * That way debug tooling like valgrind will understand what's going on, hiding
56  * the mmap call in a driver private ioctl will break that. The i915 driver only
57  * does cpu mmaps this way because we didn't know better.
58  */
59 int
60 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
61 		    struct drm_file *file)
62 {
63 	struct drm_i915_gem_mmap *args = data;
64 	struct drm_i915_gem_object *obj;
65 	vaddr_t addr;
66 	vsize_t size;
67 	int ret;
68 
69 #ifdef __OpenBSD__
70 	if (args->size == 0 || args->offset & PAGE_MASK)
71 		return -EINVAL;
72 	size = round_page(args->size);
73 	if (args->offset + size < args->offset)
74 		return -EINVAL;
75 #endif
76 
77 	if (args->flags & ~(I915_MMAP_WC))
78 		return -EINVAL;
79 
80 	if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
81 		return -ENODEV;
82 
83 	obj = i915_gem_object_lookup(file, args->handle);
84 	if (!obj)
85 		return -ENOENT;
86 
87 	/* prime objects have no backing filp to GEM mmap
88 	 * pages from.
89 	 */
90 	if (!obj->base.filp) {
91 		addr = -ENXIO;
92 		goto err;
93 	}
94 
95 	if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
96 		addr = -EINVAL;
97 		goto err;
98 	}
99 
100 #ifdef __linux__
101 	addr = vm_mmap(obj->base.filp, 0, args->size,
102 		       PROT_READ | PROT_WRITE, MAP_SHARED,
103 		       args->offset);
104 	if (IS_ERR_VALUE(addr))
105 		goto err;
106 
107 	if (args->flags & I915_MMAP_WC) {
108 		struct mm_struct *mm = current->mm;
109 		struct vm_area_struct *vma;
110 
111 		if (down_write_killable(&mm->mmap_sem)) {
112 			addr = -EINTR;
113 			goto err;
114 		}
115 		vma = find_vma(mm, addr);
116 		if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
117 			vma->vm_page_prot =
118 				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
119 		else
120 			addr = -ENOMEM;
121 		up_write(&mm->mmap_sem);
122 		if (IS_ERR_VALUE(addr))
123 			goto err;
124 	}
125 	i915_gem_object_put(obj);
126 #else
127 	addr = 0;
128 	ret = -uvm_map(&curproc->p_vmspace->vm_map, &addr, size,
129 	    obj->base.uao, args->offset, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE,
130 	    PROT_READ | PROT_WRITE, MAP_INHERIT_SHARE, MADV_RANDOM,
131 	    (args->flags & I915_MMAP_WC) ? UVM_FLAG_WC : 0));
132 	if (ret == 0)
133 		uao_reference(obj->base.uao);
134 	i915_gem_object_put(obj);
135 	if (ret)
136 		return ret;
137 #endif
138 
139 	args->addr_ptr = (u64)addr;
140 	return 0;
141 
142 err:
143 	i915_gem_object_put(obj);
144 	return addr;
145 }
146 
147 static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
148 {
149 	return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
150 }
151 
152 /**
153  * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
154  *
155  * A history of the GTT mmap interface:
156  *
157  * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
158  *     aligned and suitable for fencing, and still fit into the available
159  *     mappable space left by the pinned display objects. A classic problem
160  *     we called the page-fault-of-doom where we would ping-pong between
161  *     two objects that could not fit inside the GTT and so the memcpy
162  *     would page one object in at the expense of the other between every
163  *     single byte.
164  *
165  * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
166  *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
167  *     object is too large for the available space (or simply too large
168  *     for the mappable aperture!), a view is created instead and faulted
169  *     into userspace. (This view is aligned and sized appropriately for
170  *     fenced access.)
171  *
172  * 2 - Recognise WC as a separate cache domain so that we can flush the
173  *     delayed writes via GTT before performing direct access via WC.
174  *
175  * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
176  *     pagefault; swapin remains transparent.
177  *
178  * 4 - Support multiple fault handlers per object depending on object's
179  *     backing storage (a.k.a. MMAP_OFFSET).
180  *
181  * Restrictions:
182  *
183  *  * snoopable objects cannot be accessed via the GTT. It can cause machine
184  *    hangs on some architectures, corruption on others. An attempt to service
185  *    a GTT page fault from a snoopable object will generate a SIGBUS.
186  *
187  *  * the object must be able to fit into RAM (physical memory, though no
188  *    limited to the mappable aperture).
189  *
190  *
191  * Caveats:
192  *
193  *  * a new GTT page fault will synchronize rendering from the GPU and flush
194  *    all data to system memory. Subsequent access will not be synchronized.
195  *
196  *  * all mappings are revoked on runtime device suspend.
197  *
198  *  * there are only 8, 16 or 32 fence registers to share between all users
199  *    (older machines require fence register for display and blitter access
200  *    as well). Contention of the fence registers will cause the previous users
201  *    to be unmapped and any new access will generate new page faults.
202  *
203  *  * running out of memory while servicing a fault may generate a SIGBUS,
204  *    rather than the expected SIGSEGV.
205  */
206 int i915_gem_mmap_gtt_version(void)
207 {
208 	return 4;
209 }
210 
211 static inline struct i915_ggtt_view
212 compute_partial_view(const struct drm_i915_gem_object *obj,
213 		     pgoff_t page_offset,
214 		     unsigned int chunk)
215 {
216 	struct i915_ggtt_view view;
217 
218 	if (i915_gem_object_is_tiled(obj))
219 		chunk = roundup(chunk, tile_row_pages(obj));
220 
221 	view.type = I915_GGTT_VIEW_PARTIAL;
222 	view.partial.offset = rounddown(page_offset, chunk);
223 	view.partial.size =
224 		min_t(unsigned int, chunk,
225 		      (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
226 
227 	/* If the partial covers the entire object, just create a normal VMA. */
228 	if (chunk >= obj->base.size >> PAGE_SHIFT)
229 		view.type = I915_GGTT_VIEW_NORMAL;
230 
231 	return view;
232 }
233 
234 #ifdef __linux__
235 
236 static vm_fault_t i915_error_to_vmf_fault(int err)
237 {
238 	switch (err) {
239 	default:
240 		WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
241 		/* fallthrough */
242 	case -EIO: /* shmemfs failure from swap device */
243 	case -EFAULT: /* purged object */
244 	case -ENODEV: /* bad object, how did you get here! */
245 	case -ENXIO: /* unable to access backing store (on device) */
246 		return VM_FAULT_SIGBUS;
247 
248 	case -ENOSPC: /* shmemfs allocation failure */
249 	case -ENOMEM: /* our allocation failure */
250 		return VM_FAULT_OOM;
251 
252 	case 0:
253 	case -EAGAIN:
254 	case -ERESTARTSYS:
255 	case -EINTR:
256 	case -EBUSY:
257 		/*
258 		 * EBUSY is ok: this just means that another thread
259 		 * already did the job.
260 		 */
261 		return VM_FAULT_NOPAGE;
262 	}
263 }
264 
265 static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
266 {
267 	struct vm_area_struct *area = vmf->vma;
268 	struct i915_mmap_offset *mmo = area->vm_private_data;
269 	struct drm_i915_gem_object *obj = mmo->obj;
270 	resource_size_t iomap;
271 	int err;
272 
273 	/* Sanity check that we allow writing into this object */
274 	if (unlikely(i915_gem_object_is_readonly(obj) &&
275 		     area->vm_flags & VM_WRITE))
276 		return VM_FAULT_SIGBUS;
277 
278 	err = i915_gem_object_pin_pages(obj);
279 	if (err)
280 		goto out;
281 
282 	iomap = -1;
283 	if (!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE)) {
284 		iomap = obj->mm.region->iomap.base;
285 		iomap -= obj->mm.region->region.start;
286 	}
287 
288 	/* PTEs are revoked in obj->ops->put_pages() */
289 	err = remap_io_sg(area,
290 			  area->vm_start, area->vm_end - area->vm_start,
291 			  obj->mm.pages->sgl, iomap);
292 
293 	if (area->vm_flags & VM_WRITE) {
294 		GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
295 		obj->mm.dirty = true;
296 	}
297 
298 	i915_gem_object_unpin_pages(obj);
299 
300 out:
301 	return i915_error_to_vmf_fault(err);
302 }
303 
304 static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
305 {
306 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
307 	struct vm_area_struct *area = vmf->vma;
308 	struct i915_mmap_offset *mmo = area->vm_private_data;
309 	struct drm_i915_gem_object *obj = mmo->obj;
310 	struct drm_device *dev = obj->base.dev;
311 	struct drm_i915_private *i915 = to_i915(dev);
312 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
313 	struct i915_ggtt *ggtt = &i915->ggtt;
314 	bool write = area->vm_flags & VM_WRITE;
315 	intel_wakeref_t wakeref;
316 	struct i915_vma *vma;
317 	pgoff_t page_offset;
318 	int srcu;
319 	int ret;
320 
321 	/* Sanity check that we allow writing into this object */
322 	if (i915_gem_object_is_readonly(obj) && write)
323 		return VM_FAULT_SIGBUS;
324 
325 	/* We don't use vmf->pgoff since that has the fake offset */
326 	page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
327 
328 	trace_i915_gem_object_fault(obj, page_offset, true, write);
329 
330 	ret = i915_gem_object_pin_pages(obj);
331 	if (ret)
332 		goto err;
333 
334 	wakeref = intel_runtime_pm_get(rpm);
335 
336 	ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
337 	if (ret)
338 		goto err_rpm;
339 
340 	/* Now pin it into the GTT as needed */
341 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
342 				       PIN_MAPPABLE |
343 				       PIN_NONBLOCK /* NOWARN */ |
344 				       PIN_NOEVICT);
345 	if (IS_ERR(vma)) {
346 		/* Use a partial view if it is bigger than available space */
347 		struct i915_ggtt_view view =
348 			compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
349 		unsigned int flags;
350 
351 		flags = PIN_MAPPABLE | PIN_NOSEARCH;
352 		if (view.type == I915_GGTT_VIEW_NORMAL)
353 			flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
354 
355 		/*
356 		 * Userspace is now writing through an untracked VMA, abandon
357 		 * all hope that the hardware is able to track future writes.
358 		 */
359 
360 		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
361 		if (IS_ERR(vma)) {
362 			flags = PIN_MAPPABLE;
363 			view.type = I915_GGTT_VIEW_PARTIAL;
364 			vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
365 		}
366 
367 		/* The entire mappable GGTT is pinned? Unexpected! */
368 		GEM_BUG_ON(vma == ERR_PTR(-ENOSPC));
369 	}
370 	if (IS_ERR(vma)) {
371 		ret = PTR_ERR(vma);
372 		goto err_reset;
373 	}
374 
375 	/* Access to snoopable pages through the GTT is incoherent. */
376 	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
377 		ret = -EFAULT;
378 		goto err_unpin;
379 	}
380 
381 	ret = i915_vma_pin_fence(vma);
382 	if (ret)
383 		goto err_unpin;
384 
385 	/* Finally, remap it using the new GTT offset */
386 	ret = remap_io_mapping(area,
387 			       area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
388 			       (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
389 			       min_t(u64, vma->size, area->vm_end - area->vm_start),
390 			       &ggtt->iomap);
391 	if (ret)
392 		goto err_fence;
393 
394 	assert_rpm_wakelock_held(rpm);
395 
396 	/* Mark as being mmapped into userspace for later revocation */
397 	mutex_lock(&i915->ggtt.vm.mutex);
398 	if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
399 		list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
400 	mutex_unlock(&i915->ggtt.vm.mutex);
401 
402 	/* Track the mmo associated with the fenced vma */
403 	vma->mmo = mmo;
404 
405 	if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
406 		intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
407 				   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
408 
409 	if (write) {
410 		GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
411 		i915_vma_set_ggtt_write(vma);
412 		obj->mm.dirty = true;
413 	}
414 
415 err_fence:
416 	i915_vma_unpin_fence(vma);
417 err_unpin:
418 	__i915_vma_unpin(vma);
419 err_reset:
420 	intel_gt_reset_unlock(ggtt->vm.gt, srcu);
421 err_rpm:
422 	intel_runtime_pm_put(rpm, wakeref);
423 	i915_gem_object_unpin_pages(obj);
424 err:
425 	return i915_error_to_vmf_fault(ret);
426 }
427 
428 #else /* !__linux__ */
429 
430 static int i915_error_to_vmf_fault(int err)
431 {
432 	switch (err) {
433 	default:
434 		WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
435 		/* fallthrough */
436 	case -EIO: /* shmemfs failure from swap device */
437 	case -EFAULT: /* purged object */
438 	case -ENODEV: /* bad object, how did you get here! */
439 	case -ENXIO: /* unable to access backing store (on device) */
440 		return VM_PAGER_ERROR;
441 
442 	case -ENOSPC: /* shmemfs allocation failure */
443 	case -ENOMEM: /* our allocation failure */
444 		return VM_PAGER_ERROR;
445 
446 	case 0:
447 	case -EAGAIN:
448 	case -ERESTART:
449 	case -EINTR:
450 	case -EBUSY:
451 		/*
452 		 * EBUSY is ok: this just means that another thread
453 		 * already did the job.
454 		 */
455 		return VM_PAGER_OK;
456 	}
457 }
458 
459 static int
460 vm_fault_cpu(struct i915_mmap_offset *mmo, struct uvm_faultinfo *ufi,
461     vm_prot_t access_type)
462 {
463 	struct vm_map_entry *entry = ufi->entry;
464 	struct drm_i915_gem_object *obj = mmo->obj;
465 	int write = !!(access_type & PROT_WRITE);
466 	struct sg_table *pages;
467 	struct sg_page_iter sg_iter;
468 	vm_prot_t mapprot;
469 	vaddr_t va = entry->start;
470 	paddr_t pa, pa_flags = 0;
471 	int flags;
472 	int err;
473 
474 	/* Sanity check that we allow writing into this object */
475 	if (unlikely(i915_gem_object_is_readonly(obj) && write)) {
476 		uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
477 		return VM_PAGER_BAD;
478 	}
479 
480 	err = i915_gem_object_pin_pages(obj);
481 	if (err)
482 		goto out;
483 
484 	KASSERT(i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE));
485 
486 	flags = mapprot = entry->protection;
487 	if (write == 0)
488 		flags &= ~PROT_WRITE;
489 
490 	switch (mmo->mmap_type) {
491 	case I915_MMAP_TYPE_WC:
492 		pa_flags |= PMAP_WC;
493 		break;
494 	case I915_MMAP_TYPE_UC:
495 		pa_flags |= PMAP_NOCACHE;
496 		break;
497 	default:
498 		break;
499 	}
500 
501 	pages = obj->mm.pages;
502 	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
503 		pa = sg_page_iter_dma_address(&sg_iter);
504 		if (pmap_enter(ufi->orig_map->pmap, va, pa | pa_flags,
505 		    mapprot, PMAP_CANFAIL | flags)) {
506 			err = -ENOMEM;
507 			break;
508 		}
509 		va += PAGE_SIZE;
510 	}
511 	pmap_update(ufi->orig_map->pmap);
512 
513 	if (write) {
514 		GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
515 		obj->mm.dirty = true;
516 	}
517 
518 	i915_gem_object_unpin_pages(obj);
519 
520 out:
521 	uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
522 	return i915_error_to_vmf_fault(err);
523 }
524 
525 int
526 remap_io_mapping(pmap_t pm, vm_prot_t mapprot,
527     vaddr_t va, unsigned long pfn, unsigned long size)
528 {
529 	vaddr_t end = va + size;
530 	paddr_t pa = ptoa(pfn);
531 
532 	while (va < end) {
533 		if (pmap_enter(pm, va, pa | PMAP_WC, mapprot, PMAP_CANFAIL | mapprot))
534 			return -ENOMEM;
535 		va += PAGE_SIZE;
536 		pa += PAGE_SIZE;
537 	}
538 
539 	return 0;
540 }
541 
542 static int
543 vm_fault_gtt(struct i915_mmap_offset *mmo, struct uvm_faultinfo *ufi,
544     vaddr_t vaddr, vm_prot_t access_type)
545 {
546 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
547 	struct vm_map_entry *entry = ufi->entry;
548 	struct drm_i915_gem_object *obj = mmo->obj;
549 	struct drm_device *dev = obj->base.dev;
550 	struct drm_i915_private *i915 = to_i915(dev);
551 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
552 	struct i915_ggtt *ggtt = &i915->ggtt;
553 	int write = !!(access_type & PROT_WRITE);
554 	intel_wakeref_t wakeref;
555 	struct i915_vma *vma;
556 	pgoff_t page_offset;
557 	int srcu;
558 	int ret;
559 
560 	/* Sanity check that we allow writing into this object */
561 	if (i915_gem_object_is_readonly(obj) && write) {
562 		uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
563 		return VM_PAGER_BAD;
564 	}
565 
566 	/* We don't use vmf->pgoff since that has the fake offset */
567 	page_offset = (vaddr - entry->start) >> PAGE_SHIFT;
568 
569 	trace_i915_gem_object_fault(obj, page_offset, true, write);
570 
571 	ret = i915_gem_object_pin_pages(obj);
572 	if (ret)
573 		goto err;
574 
575 	wakeref = intel_runtime_pm_get(rpm);
576 
577 	ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
578 	if (ret)
579 		goto err_rpm;
580 
581 	/* Now pin it into the GTT as needed */
582 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
583 				       PIN_MAPPABLE |
584 				       PIN_NONBLOCK /* NOWARN */ |
585 				       PIN_NOEVICT);
586 	if (IS_ERR(vma)) {
587 		/* Use a partial view if it is bigger than available space */
588 		struct i915_ggtt_view view =
589 			compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
590 		unsigned int flags;
591 
592 		flags = PIN_MAPPABLE | PIN_NOSEARCH;
593 		if (view.type == I915_GGTT_VIEW_NORMAL)
594 			flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
595 
596 		/*
597 		 * Userspace is now writing through an untracked VMA, abandon
598 		 * all hope that the hardware is able to track future writes.
599 		 */
600 
601 		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
602 		if (IS_ERR(vma)) {
603 			flags = PIN_MAPPABLE;
604 			view.type = I915_GGTT_VIEW_PARTIAL;
605 			vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
606 		}
607 
608 		/* The entire mappable GGTT is pinned? Unexpected! */
609 		GEM_BUG_ON(vma == ERR_PTR(-ENOSPC));
610 	}
611 	if (IS_ERR(vma)) {
612 		ret = PTR_ERR(vma);
613 		goto err_reset;
614 	}
615 
616 	/* Access to snoopable pages through the GTT is incoherent. */
617 	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
618 		ret = -EFAULT;
619 		goto err_unpin;
620 	}
621 
622 	ret = i915_vma_pin_fence(vma);
623 	if (ret)
624 		goto err_unpin;
625 
626 	/* Finally, remap it using the new GTT offset */
627 	ret = remap_io_mapping(ufi->orig_map->pmap, entry->protection,
628 			       entry->start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
629 			       (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
630 			       min_t(u64, vma->size, entry->end - entry->start));
631 	if (ret)
632 		goto err_fence;
633 
634 	assert_rpm_wakelock_held(rpm);
635 
636 	/* Mark as being mmapped into userspace for later revocation */
637 	mutex_lock(&i915->ggtt.vm.mutex);
638 	if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
639 		list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
640 	mutex_unlock(&i915->ggtt.vm.mutex);
641 
642 	/* Track the mmo associated with the fenced vma */
643 	vma->mmo = mmo;
644 
645 #ifdef notyet
646 	if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
647 		intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
648 				   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
649 #endif
650 
651 	if (write) {
652 		GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
653 		i915_vma_set_ggtt_write(vma);
654 		obj->mm.dirty = true;
655 	}
656 
657 err_fence:
658 	i915_vma_unpin_fence(vma);
659 err_unpin:
660 	__i915_vma_unpin(vma);
661 err_reset:
662 	intel_gt_reset_unlock(ggtt->vm.gt, srcu);
663 err_rpm:
664 	intel_runtime_pm_put(rpm, wakeref);
665 	i915_gem_object_unpin_pages(obj);
666 err:
667 	uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
668 	return i915_error_to_vmf_fault(ret);
669 }
670 
671 int
672 i915_gem_fault(struct drm_gem_object *gem_obj, struct uvm_faultinfo *ufi,
673     off_t offset, vaddr_t vaddr, vm_page_t *pps, int npages, int centeridx,
674     vm_prot_t access_type, int flags)
675 {
676 	struct drm_vma_offset_node *node;
677 	struct drm_device *dev = gem_obj->dev;
678 	struct vm_map_entry *entry = ufi->entry;
679 	vsize_t size = entry->end - entry->start;
680 	struct i915_mmap_offset *mmo = NULL;
681 
682 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
683 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
684 						  entry->offset >> PAGE_SHIFT,
685 						  size >> PAGE_SHIFT);
686 	if (likely(node))
687 		mmo = container_of(node, struct i915_mmap_offset, vma_node);
688 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
689 	if (!mmo) {
690 		uvmfault_unlockall(ufi, NULL, &gem_obj->uobj);
691 		return VM_PAGER_BAD;
692 	}
693 
694 	KASSERT(gem_obj == &mmo->obj->base);
695 
696 	if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
697 		return vm_fault_gtt(mmo, ufi, vaddr, access_type);
698 
699 	return vm_fault_cpu(mmo, ufi, access_type);
700 }
701 
702 #endif /* !__linux__ */
703 
704 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
705 {
706 	struct i915_vma *vma;
707 
708 	GEM_BUG_ON(!obj->userfault_count);
709 
710 	for_each_ggtt_vma(vma, obj)
711 		i915_vma_revoke_mmap(vma);
712 
713 	GEM_BUG_ON(obj->userfault_count);
714 }
715 
716 /*
717  * It is vital that we remove the page mapping if we have mapped a tiled
718  * object through the GTT and then lose the fence register due to
719  * resource pressure. Similarly if the object has been moved out of the
720  * aperture, than pages mapped into userspace must be revoked. Removing the
721  * mapping will then trigger a page fault on the next user access, allowing
722  * fixup by vm_fault_gtt().
723  */
724 static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
725 {
726 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
727 	intel_wakeref_t wakeref;
728 
729 	/*
730 	 * Serialisation between user GTT access and our code depends upon
731 	 * revoking the CPU's PTE whilst the mutex is held. The next user
732 	 * pagefault then has to wait until we release the mutex.
733 	 *
734 	 * Note that RPM complicates somewhat by adding an additional
735 	 * requirement that operations to the GGTT be made holding the RPM
736 	 * wakeref.
737 	 */
738 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
739 	mutex_lock(&i915->ggtt.vm.mutex);
740 
741 	if (!obj->userfault_count)
742 		goto out;
743 
744 	__i915_gem_object_release_mmap_gtt(obj);
745 
746 	/*
747 	 * Ensure that the CPU's PTE are revoked and there are not outstanding
748 	 * memory transactions from userspace before we return. The TLB
749 	 * flushing implied above by changing the PTE above *should* be
750 	 * sufficient, an extra barrier here just provides us with a bit
751 	 * of paranoid documentation about our requirement to serialise
752 	 * memory writes before touching registers / GSM.
753 	 */
754 	wmb();
755 
756 out:
757 	mutex_unlock(&i915->ggtt.vm.mutex);
758 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
759 }
760 
761 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
762 {
763 	struct i915_mmap_offset *mmo, *mn;
764 
765 	spin_lock(&obj->mmo.lock);
766 	rbtree_postorder_for_each_entry_safe(mmo, mn,
767 					     &obj->mmo.offsets, offset) {
768 		/*
769 		 * vma_node_unmap for GTT mmaps handled already in
770 		 * __i915_gem_object_release_mmap_gtt
771 		 */
772 		if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
773 			continue;
774 
775 		spin_unlock(&obj->mmo.lock);
776 #ifdef __linux__
777 		drm_vma_node_unmap(&mmo->vma_node,
778 				   obj->base.dev->anon_inode->i_mapping);
779 #else
780 		if (drm_mm_node_allocated(&mmo->vma_node.vm_node)) {
781 			struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
782 			struct i915_vma *vma;
783 			struct vm_page *pg;
784 
785 			for_each_ggtt_vma(vma, obj) {
786 				for (pg = &dev_priv->pgs[atop(vma->node.start)];
787 				     pg != &dev_priv->pgs[atop(vma->node.start + vma->size)];
788 				     pg++)
789 					pmap_page_protect(pg, PROT_NONE);
790 			}
791 		}
792 #endif
793 		spin_lock(&obj->mmo.lock);
794 	}
795 	spin_unlock(&obj->mmo.lock);
796 }
797 
798 /**
799  * i915_gem_object_release_mmap - remove physical page mappings
800  * @obj: obj in question
801  *
802  * Preserve the reservation of the mmapping with the DRM core code, but
803  * relinquish ownership of the pages back to the system.
804  */
805 void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
806 {
807 	i915_gem_object_release_mmap_gtt(obj);
808 	i915_gem_object_release_mmap_offset(obj);
809 }
810 
811 static struct i915_mmap_offset *
812 lookup_mmo(struct drm_i915_gem_object *obj,
813 	   enum i915_mmap_type mmap_type)
814 {
815 	struct rb_node *rb;
816 
817 	spin_lock(&obj->mmo.lock);
818 	rb = obj->mmo.offsets.rb_node;
819 	while (rb) {
820 		struct i915_mmap_offset *mmo =
821 			rb_entry(rb, typeof(*mmo), offset);
822 
823 		if (mmo->mmap_type == mmap_type) {
824 			spin_unlock(&obj->mmo.lock);
825 			return mmo;
826 		}
827 
828 		if (mmo->mmap_type < mmap_type)
829 			rb = rb->rb_right;
830 		else
831 			rb = rb->rb_left;
832 	}
833 	spin_unlock(&obj->mmo.lock);
834 
835 	return NULL;
836 }
837 
838 static struct i915_mmap_offset *
839 insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
840 {
841 	struct rb_node *rb, **p;
842 
843 	spin_lock(&obj->mmo.lock);
844 	rb = NULL;
845 	p = &obj->mmo.offsets.rb_node;
846 	while (*p) {
847 		struct i915_mmap_offset *pos;
848 
849 		rb = *p;
850 		pos = rb_entry(rb, typeof(*pos), offset);
851 
852 		if (pos->mmap_type == mmo->mmap_type) {
853 			spin_unlock(&obj->mmo.lock);
854 			drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
855 					      &mmo->vma_node);
856 			kfree(mmo);
857 			return pos;
858 		}
859 
860 		if (pos->mmap_type < mmo->mmap_type)
861 			p = &rb->rb_right;
862 		else
863 			p = &rb->rb_left;
864 	}
865 	rb_link_node(&mmo->offset, rb, p);
866 	rb_insert_color(&mmo->offset, &obj->mmo.offsets);
867 	spin_unlock(&obj->mmo.lock);
868 
869 	return mmo;
870 }
871 
872 static struct i915_mmap_offset *
873 mmap_offset_attach(struct drm_i915_gem_object *obj,
874 		   enum i915_mmap_type mmap_type,
875 		   struct drm_file *file)
876 {
877 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
878 	struct i915_mmap_offset *mmo;
879 	int err;
880 
881 	mmo = lookup_mmo(obj, mmap_type);
882 	if (mmo)
883 		goto out;
884 
885 	mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
886 	if (!mmo)
887 		return ERR_PTR(-ENOMEM);
888 
889 	mmo->obj = obj;
890 	mmo->mmap_type = mmap_type;
891 	drm_vma_node_reset(&mmo->vma_node);
892 
893 	err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
894 				 &mmo->vma_node, obj->base.size / PAGE_SIZE);
895 	if (likely(!err))
896 		goto insert;
897 
898 	/* Attempt to reap some mmap space from dead objects */
899 	err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
900 	if (err)
901 		goto err;
902 
903 	i915_gem_drain_freed_objects(i915);
904 	err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
905 				 &mmo->vma_node, obj->base.size / PAGE_SIZE);
906 	if (err)
907 		goto err;
908 
909 insert:
910 	mmo = insert_mmo(obj, mmo);
911 	GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
912 out:
913 	if (file)
914 		drm_vma_node_allow(&mmo->vma_node, file);
915 	return mmo;
916 
917 err:
918 	kfree(mmo);
919 	return ERR_PTR(err);
920 }
921 
922 static int
923 __assign_mmap_offset(struct drm_file *file,
924 		     u32 handle,
925 		     enum i915_mmap_type mmap_type,
926 		     u64 *offset)
927 {
928 	struct drm_i915_gem_object *obj;
929 	struct i915_mmap_offset *mmo;
930 	int err;
931 
932 	obj = i915_gem_object_lookup(file, handle);
933 	if (!obj)
934 		return -ENOENT;
935 
936 	if (i915_gem_object_never_mmap(obj)) {
937 		err = -ENODEV;
938 		goto out;
939 	}
940 
941 	if (mmap_type != I915_MMAP_TYPE_GTT &&
942 	    !i915_gem_object_type_has(obj,
943 				      I915_GEM_OBJECT_HAS_STRUCT_PAGE |
944 				      I915_GEM_OBJECT_HAS_IOMEM)) {
945 		err = -ENODEV;
946 		goto out;
947 	}
948 
949 	mmo = mmap_offset_attach(obj, mmap_type, file);
950 	if (IS_ERR(mmo)) {
951 		err = PTR_ERR(mmo);
952 		goto out;
953 	}
954 
955 	*offset = drm_vma_node_offset_addr(&mmo->vma_node);
956 	err = 0;
957 out:
958 	i915_gem_object_put(obj);
959 	return err;
960 }
961 
962 int
963 i915_gem_dumb_mmap_offset(struct drm_file *file,
964 			  struct drm_device *dev,
965 			  u32 handle,
966 			  u64 *offset)
967 {
968 	enum i915_mmap_type mmap_type;
969 
970 	if (boot_cpu_has(X86_FEATURE_PAT))
971 		mmap_type = I915_MMAP_TYPE_WC;
972 	else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
973 		return -ENODEV;
974 	else
975 		mmap_type = I915_MMAP_TYPE_GTT;
976 
977 	return __assign_mmap_offset(file, handle, mmap_type, offset);
978 }
979 
980 /**
981  * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing
982  * @dev: DRM device
983  * @data: GTT mapping ioctl data
984  * @file: GEM object info
985  *
986  * Simply returns the fake offset to userspace so it can mmap it.
987  * The mmap call will end up in drm_gem_mmap(), which will set things
988  * up so we can get faults in the handler above.
989  *
990  * The fault handler will take care of binding the object into the GTT
991  * (since it may have been evicted to make room for something), allocating
992  * a fence register, and mapping the appropriate aperture address into
993  * userspace.
994  */
995 int
996 i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
997 			   struct drm_file *file)
998 {
999 	struct drm_i915_private *i915 = to_i915(dev);
1000 	struct drm_i915_gem_mmap_offset *args = data;
1001 	enum i915_mmap_type type;
1002 	int err;
1003 
1004 	/*
1005 	 * Historically we failed to check args.pad and args.offset
1006 	 * and so we cannot use those fields for user input and we cannot
1007 	 * add -EINVAL for them as the ABI is fixed, i.e. old userspace
1008 	 * may be feeding in garbage in those fields.
1009 	 *
1010 	 * if (args->pad) return -EINVAL; is verbotten!
1011 	 */
1012 
1013 	err = i915_user_extensions(u64_to_user_ptr(args->extensions),
1014 				   NULL, 0, NULL);
1015 	if (err)
1016 		return err;
1017 
1018 	switch (args->flags) {
1019 	case I915_MMAP_OFFSET_GTT:
1020 		if (!i915_ggtt_has_aperture(&i915->ggtt))
1021 			return -ENODEV;
1022 		type = I915_MMAP_TYPE_GTT;
1023 		break;
1024 
1025 	case I915_MMAP_OFFSET_WC:
1026 		if (!boot_cpu_has(X86_FEATURE_PAT))
1027 			return -ENODEV;
1028 		type = I915_MMAP_TYPE_WC;
1029 		break;
1030 
1031 	case I915_MMAP_OFFSET_WB:
1032 		type = I915_MMAP_TYPE_WB;
1033 		break;
1034 
1035 	case I915_MMAP_OFFSET_UC:
1036 		if (!boot_cpu_has(X86_FEATURE_PAT))
1037 			return -ENODEV;
1038 		type = I915_MMAP_TYPE_UC;
1039 		break;
1040 
1041 	default:
1042 		return -EINVAL;
1043 	}
1044 
1045 	return __assign_mmap_offset(file, args->handle, type, &args->offset);
1046 }
1047 
1048 #ifdef __linux__
1049 
1050 static void vm_open(struct vm_area_struct *vma)
1051 {
1052 	struct i915_mmap_offset *mmo = vma->vm_private_data;
1053 	struct drm_i915_gem_object *obj = mmo->obj;
1054 
1055 	GEM_BUG_ON(!obj);
1056 	i915_gem_object_get(obj);
1057 }
1058 
1059 static void vm_close(struct vm_area_struct *vma)
1060 {
1061 	struct i915_mmap_offset *mmo = vma->vm_private_data;
1062 	struct drm_i915_gem_object *obj = mmo->obj;
1063 
1064 	GEM_BUG_ON(!obj);
1065 	i915_gem_object_put(obj);
1066 }
1067 
1068 static const struct vm_operations_struct vm_ops_gtt = {
1069 	.fault = vm_fault_gtt,
1070 	.open = vm_open,
1071 	.close = vm_close,
1072 };
1073 
1074 static const struct vm_operations_struct vm_ops_cpu = {
1075 	.fault = vm_fault_cpu,
1076 	.open = vm_open,
1077 	.close = vm_close,
1078 };
1079 
1080 static int singleton_release(struct inode *inode, struct file *file)
1081 {
1082 	struct drm_i915_private *i915 = file->private_data;
1083 
1084 	cmpxchg(&i915->gem.mmap_singleton, file, NULL);
1085 	drm_dev_put(&i915->drm);
1086 
1087 	return 0;
1088 }
1089 
1090 static const struct file_operations singleton_fops = {
1091 	.owner = THIS_MODULE,
1092 	.release = singleton_release,
1093 };
1094 
1095 static struct file *mmap_singleton(struct drm_i915_private *i915)
1096 {
1097 	struct file *file;
1098 
1099 	rcu_read_lock();
1100 	file = READ_ONCE(i915->gem.mmap_singleton);
1101 	if (file && !get_file_rcu(file))
1102 		file = NULL;
1103 	rcu_read_unlock();
1104 	if (file)
1105 		return file;
1106 
1107 	file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR);
1108 	if (IS_ERR(file))
1109 		return file;
1110 
1111 	/* Everyone shares a single global address space */
1112 	file->f_mapping = i915->drm.anon_inode->i_mapping;
1113 
1114 	smp_store_mb(i915->gem.mmap_singleton, file);
1115 	drm_dev_get(&i915->drm);
1116 
1117 	return file;
1118 }
1119 
1120 /*
1121  * This overcomes the limitation in drm_gem_mmap's assignment of a
1122  * drm_gem_object as the vma->vm_private_data. Since we need to
1123  * be able to resolve multiple mmap offsets which could be tied
1124  * to a single gem object.
1125  */
1126 int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1127 {
1128 	struct drm_vma_offset_node *node;
1129 	struct drm_file *priv = filp->private_data;
1130 	struct drm_device *dev = priv->minor->dev;
1131 	struct drm_i915_gem_object *obj = NULL;
1132 	struct i915_mmap_offset *mmo = NULL;
1133 	struct file *anon;
1134 
1135 	if (drm_dev_is_unplugged(dev))
1136 		return -ENODEV;
1137 
1138 	rcu_read_lock();
1139 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1140 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1141 						  vma->vm_pgoff,
1142 						  vma_pages(vma));
1143 	if (node && drm_vma_node_is_allowed(node, priv)) {
1144 		/*
1145 		 * Skip 0-refcnted objects as it is in the process of being
1146 		 * destroyed and will be invalid when the vma manager lock
1147 		 * is released.
1148 		 */
1149 		mmo = container_of(node, struct i915_mmap_offset, vma_node);
1150 		obj = i915_gem_object_get_rcu(mmo->obj);
1151 	}
1152 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1153 	rcu_read_unlock();
1154 	if (!obj)
1155 		return node ? -EACCES : -EINVAL;
1156 
1157 	if (i915_gem_object_is_readonly(obj)) {
1158 		if (vma->vm_flags & VM_WRITE) {
1159 			i915_gem_object_put(obj);
1160 			return -EINVAL;
1161 		}
1162 		vma->vm_flags &= ~VM_MAYWRITE;
1163 	}
1164 
1165 	anon = mmap_singleton(to_i915(dev));
1166 	if (IS_ERR(anon)) {
1167 		i915_gem_object_put(obj);
1168 		return PTR_ERR(anon);
1169 	}
1170 
1171 	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1172 	vma->vm_private_data = mmo;
1173 
1174 	/*
1175 	 * We keep the ref on mmo->obj, not vm_file, but we require
1176 	 * vma->vm_file->f_mapping, see vma_link(), for later revocation.
1177 	 * Our userspace is accustomed to having per-file resource cleanup
1178 	 * (i.e. contexts, objects and requests) on their close(fd), which
1179 	 * requires avoiding extraneous references to their filp, hence why
1180 	 * we prefer to use an anonymous file for their mmaps.
1181 	 */
1182 	fput(vma->vm_file);
1183 	vma->vm_file = anon;
1184 
1185 	switch (mmo->mmap_type) {
1186 	case I915_MMAP_TYPE_WC:
1187 		vma->vm_page_prot =
1188 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1189 		vma->vm_ops = &vm_ops_cpu;
1190 		break;
1191 
1192 	case I915_MMAP_TYPE_WB:
1193 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1194 		vma->vm_ops = &vm_ops_cpu;
1195 		break;
1196 
1197 	case I915_MMAP_TYPE_UC:
1198 		vma->vm_page_prot =
1199 			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
1200 		vma->vm_ops = &vm_ops_cpu;
1201 		break;
1202 
1203 	case I915_MMAP_TYPE_GTT:
1204 		vma->vm_page_prot =
1205 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1206 		vma->vm_ops = &vm_ops_gtt;
1207 		break;
1208 	}
1209 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1210 
1211 	return 0;
1212 }
1213 
1214 #else /* !__linux__ */
1215 
1216 /*
1217  * This overcomes the limitation in drm_gem_mmap's assignment of a
1218  * drm_gem_object as the vma->vm_private_data. Since we need to
1219  * be able to resolve multiple mmap offsets which could be tied
1220  * to a single gem object.
1221  */
1222 struct uvm_object *
1223 i915_gem_mmap(struct file *filp, vm_prot_t accessprot,
1224     voff_t off, vsize_t size)
1225 {
1226 	struct drm_vma_offset_node *node;
1227 	struct drm_file *priv = (void *)filp;
1228 	struct drm_device *dev = priv->minor->dev;
1229 	struct drm_i915_gem_object *obj = NULL;
1230 	struct i915_mmap_offset *mmo = NULL;
1231 
1232 	if (unlikely(off < DRM_FILE_PAGE_OFFSET))
1233 		return NULL;
1234 
1235 	if (drm_dev_is_unplugged(dev))
1236 		return NULL;
1237 
1238 	rcu_read_lock();
1239 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1240 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1241 						  off >> PAGE_SHIFT,
1242 						  atop(round_page(size)));
1243 	if (node && drm_vma_node_is_allowed(node, priv)) {
1244 		/*
1245 		 * Skip 0-refcnted objects as it is in the process of being
1246 		 * destroyed and will be invalid when the vma manager lock
1247 		 * is released.
1248 		 */
1249 		mmo = container_of(node, struct i915_mmap_offset, vma_node);
1250 		obj = i915_gem_object_get_rcu(mmo->obj);
1251 	}
1252 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1253 	rcu_read_unlock();
1254 	if (!obj)
1255 		return NULL;
1256 
1257 	if (i915_gem_object_is_readonly(obj)) {
1258 		if (accessprot & PROT_WRITE) {
1259 			i915_gem_object_put(obj);
1260 			return NULL;
1261 		}
1262 	}
1263 
1264 	return &obj->base.uobj;
1265 }
1266 
1267 #endif /* !__linux__ */
1268 
1269 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1270 #include "selftests/i915_gem_mman.c"
1271 #endif
1272