xref: /openbsd/sys/dev/pci/drm/i915/gem/i915_gem_mman.c (revision 73471bf0)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include <linux/anon_inodes.h>
8 #include <linux/mman.h>
9 #include <linux/pfn_t.h>
10 #include <linux/sizes.h>
11 
12 #include "gt/intel_gt.h"
13 #include "gt/intel_gt_requests.h"
14 
15 #include "i915_drv.h"
16 #include "i915_gem_gtt.h"
17 #include "i915_gem_ioctls.h"
18 #include "i915_gem_object.h"
19 #include "i915_gem_mman.h"
20 #include "i915_trace.h"
21 #include "i915_user_extensions.h"
22 #include "i915_vma.h"
23 
24 #ifdef __linux__
25 static inline bool
26 __vma_matches(struct vm_area_struct *vma, struct file *filp,
27 	      unsigned long addr, unsigned long size)
28 {
29 	if (vma->vm_file != filp)
30 		return false;
31 
32 	return vma->vm_start == addr &&
33 	       (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
34 }
35 #endif
36 
37 /**
38  * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
39  *			 it is mapped to.
40  * @dev: drm device
41  * @data: ioctl data blob
42  * @file: drm file
43  *
44  * While the mapping holds a reference on the contents of the object, it doesn't
45  * imply a ref on the object itself.
46  *
47  * IMPORTANT:
48  *
49  * DRM driver writers who look a this function as an example for how to do GEM
50  * mmap support, please don't implement mmap support like here. The modern way
51  * to implement DRM mmap support is with an mmap offset ioctl (like
52  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
53  * That way debug tooling like valgrind will understand what's going on, hiding
54  * the mmap call in a driver private ioctl will break that. The i915 driver only
55  * does cpu mmaps this way because we didn't know better.
56  */
57 int
58 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
59 		    struct drm_file *file)
60 {
61 	struct drm_i915_gem_mmap *args = data;
62 	struct drm_i915_gem_object *obj;
63 	vaddr_t addr;
64 	vsize_t size;
65 	int ret;
66 
67 #ifdef __OpenBSD__
68 	if (args->size == 0 || args->offset & PAGE_MASK)
69 		return -EINVAL;
70 	size = round_page(args->size);
71 	if (args->offset + size < args->offset)
72 		return -EINVAL;
73 #endif
74 
75 	if (args->flags & ~(I915_MMAP_WC))
76 		return -EINVAL;
77 
78 	if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
79 		return -ENODEV;
80 
81 	obj = i915_gem_object_lookup(file, args->handle);
82 	if (!obj)
83 		return -ENOENT;
84 
85 	/* prime objects have no backing filp to GEM mmap
86 	 * pages from.
87 	 */
88 #ifdef __linux__
89 	if (!obj->base.filp) {
90 		addr = -ENXIO;
91 		goto err;
92 	}
93 #else
94 	if (!obj->base.uao) {
95 		addr = -ENXIO;
96 		goto err;
97 	}
98 #endif
99 
100 	if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
101 		addr = -EINVAL;
102 		goto err;
103 	}
104 
105 #ifdef __linux__
106 	addr = vm_mmap(obj->base.filp, 0, args->size,
107 		       PROT_READ | PROT_WRITE, MAP_SHARED,
108 		       args->offset);
109 	if (IS_ERR_VALUE(addr))
110 		goto err;
111 
112 	if (args->flags & I915_MMAP_WC) {
113 		struct mm_struct *mm = current->mm;
114 		struct vm_area_struct *vma;
115 
116 		if (mmap_write_lock_killable(mm)) {
117 			addr = -EINTR;
118 			goto err;
119 		}
120 		vma = find_vma(mm, addr);
121 		if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
122 			vma->vm_page_prot =
123 				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
124 		else
125 			addr = -ENOMEM;
126 		mmap_write_unlock(mm);
127 		if (IS_ERR_VALUE(addr))
128 			goto err;
129 	}
130 	i915_gem_object_put(obj);
131 #else
132 	addr = 0;
133 	ret = -uvm_map(&curproc->p_vmspace->vm_map, &addr, size,
134 	    obj->base.uao, args->offset, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE,
135 	    PROT_READ | PROT_WRITE, MAP_INHERIT_SHARE, MADV_RANDOM,
136 	    (args->flags & I915_MMAP_WC) ? UVM_FLAG_WC : 0));
137 	if (ret == 0)
138 		uao_reference(obj->base.uao);
139 	i915_gem_object_put(obj);
140 	if (ret)
141 		return ret;
142 #endif
143 
144 	args->addr_ptr = (u64)addr;
145 	return 0;
146 
147 err:
148 	i915_gem_object_put(obj);
149 	return addr;
150 }
151 
152 static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
153 {
154 	return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
155 }
156 
157 /**
158  * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
159  *
160  * A history of the GTT mmap interface:
161  *
162  * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
163  *     aligned and suitable for fencing, and still fit into the available
164  *     mappable space left by the pinned display objects. A classic problem
165  *     we called the page-fault-of-doom where we would ping-pong between
166  *     two objects that could not fit inside the GTT and so the memcpy
167  *     would page one object in at the expense of the other between every
168  *     single byte.
169  *
170  * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
171  *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
172  *     object is too large for the available space (or simply too large
173  *     for the mappable aperture!), a view is created instead and faulted
174  *     into userspace. (This view is aligned and sized appropriately for
175  *     fenced access.)
176  *
177  * 2 - Recognise WC as a separate cache domain so that we can flush the
178  *     delayed writes via GTT before performing direct access via WC.
179  *
180  * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
181  *     pagefault; swapin remains transparent.
182  *
183  * 4 - Support multiple fault handlers per object depending on object's
184  *     backing storage (a.k.a. MMAP_OFFSET).
185  *
186  * Restrictions:
187  *
188  *  * snoopable objects cannot be accessed via the GTT. It can cause machine
189  *    hangs on some architectures, corruption on others. An attempt to service
190  *    a GTT page fault from a snoopable object will generate a SIGBUS.
191  *
192  *  * the object must be able to fit into RAM (physical memory, though no
193  *    limited to the mappable aperture).
194  *
195  *
196  * Caveats:
197  *
198  *  * a new GTT page fault will synchronize rendering from the GPU and flush
199  *    all data to system memory. Subsequent access will not be synchronized.
200  *
201  *  * all mappings are revoked on runtime device suspend.
202  *
203  *  * there are only 8, 16 or 32 fence registers to share between all users
204  *    (older machines require fence register for display and blitter access
205  *    as well). Contention of the fence registers will cause the previous users
206  *    to be unmapped and any new access will generate new page faults.
207  *
208  *  * running out of memory while servicing a fault may generate a SIGBUS,
209  *    rather than the expected SIGSEGV.
210  */
211 int i915_gem_mmap_gtt_version(void)
212 {
213 	return 4;
214 }
215 
216 static inline struct i915_ggtt_view
217 compute_partial_view(const struct drm_i915_gem_object *obj,
218 		     pgoff_t page_offset,
219 		     unsigned int chunk)
220 {
221 	struct i915_ggtt_view view;
222 
223 	if (i915_gem_object_is_tiled(obj))
224 		chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
225 
226 	view.type = I915_GGTT_VIEW_PARTIAL;
227 	view.partial.offset = rounddown(page_offset, chunk);
228 	view.partial.size =
229 		min_t(unsigned int, chunk,
230 		      (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
231 
232 	/* If the partial covers the entire object, just create a normal VMA. */
233 	if (chunk >= obj->base.size >> PAGE_SHIFT)
234 		view.type = I915_GGTT_VIEW_NORMAL;
235 
236 	return view;
237 }
238 
239 #ifdef __linux__
240 
241 static vm_fault_t i915_error_to_vmf_fault(int err)
242 {
243 	switch (err) {
244 	default:
245 		WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
246 		fallthrough;
247 	case -EIO: /* shmemfs failure from swap device */
248 	case -EFAULT: /* purged object */
249 	case -ENODEV: /* bad object, how did you get here! */
250 	case -ENXIO: /* unable to access backing store (on device) */
251 		return VM_FAULT_SIGBUS;
252 
253 	case -ENOMEM: /* our allocation failure */
254 		return VM_FAULT_OOM;
255 
256 	case 0:
257 	case -EAGAIN:
258 	case -ENOSPC: /* transient failure to evict? */
259 	case -ERESTARTSYS:
260 	case -EINTR:
261 	case -EBUSY:
262 		/*
263 		 * EBUSY is ok: this just means that another thread
264 		 * already did the job.
265 		 */
266 		return VM_FAULT_NOPAGE;
267 	}
268 }
269 
270 static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
271 {
272 	struct vm_area_struct *area = vmf->vma;
273 	struct i915_mmap_offset *mmo = area->vm_private_data;
274 	struct drm_i915_gem_object *obj = mmo->obj;
275 	resource_size_t iomap;
276 	int err;
277 
278 	/* Sanity check that we allow writing into this object */
279 	if (unlikely(i915_gem_object_is_readonly(obj) &&
280 		     area->vm_flags & VM_WRITE))
281 		return VM_FAULT_SIGBUS;
282 
283 	err = i915_gem_object_pin_pages(obj);
284 	if (err)
285 		goto out;
286 
287 	iomap = -1;
288 	if (!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE)) {
289 		iomap = obj->mm.region->iomap.base;
290 		iomap -= obj->mm.region->region.start;
291 	}
292 
293 	/* PTEs are revoked in obj->ops->put_pages() */
294 	err = remap_io_sg(area,
295 			  area->vm_start, area->vm_end - area->vm_start,
296 			  obj->mm.pages->sgl, iomap);
297 
298 	if (area->vm_flags & VM_WRITE) {
299 		GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
300 		obj->mm.dirty = true;
301 	}
302 
303 	i915_gem_object_unpin_pages(obj);
304 
305 out:
306 	return i915_error_to_vmf_fault(err);
307 }
308 
309 static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
310 {
311 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
312 	struct vm_area_struct *area = vmf->vma;
313 	struct i915_mmap_offset *mmo = area->vm_private_data;
314 	struct drm_i915_gem_object *obj = mmo->obj;
315 	struct drm_device *dev = obj->base.dev;
316 	struct drm_i915_private *i915 = to_i915(dev);
317 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
318 	struct i915_ggtt *ggtt = &i915->ggtt;
319 	bool write = area->vm_flags & VM_WRITE;
320 	struct i915_gem_ww_ctx ww;
321 	intel_wakeref_t wakeref;
322 	struct i915_vma *vma;
323 	pgoff_t page_offset;
324 	int srcu;
325 	int ret;
326 
327 	/* We don't use vmf->pgoff since that has the fake offset */
328 	page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
329 
330 	trace_i915_gem_object_fault(obj, page_offset, true, write);
331 
332 	wakeref = intel_runtime_pm_get(rpm);
333 
334 	i915_gem_ww_ctx_init(&ww, true);
335 retry:
336 	ret = i915_gem_object_lock(obj, &ww);
337 	if (ret)
338 		goto err_rpm;
339 
340 	/* Sanity check that we allow writing into this object */
341 	if (i915_gem_object_is_readonly(obj) && write) {
342 		ret = -EFAULT;
343 		goto err_rpm;
344 	}
345 
346 	ret = i915_gem_object_pin_pages(obj);
347 	if (ret)
348 		goto err_rpm;
349 
350 	ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
351 	if (ret)
352 		goto err_pages;
353 
354 	/* Now pin it into the GTT as needed */
355 	vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
356 					  PIN_MAPPABLE |
357 					  PIN_NONBLOCK /* NOWARN */ |
358 					  PIN_NOEVICT);
359 	if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
360 		/* Use a partial view if it is bigger than available space */
361 		struct i915_ggtt_view view =
362 			compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
363 		unsigned int flags;
364 
365 		flags = PIN_MAPPABLE | PIN_NOSEARCH;
366 		if (view.type == I915_GGTT_VIEW_NORMAL)
367 			flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
368 
369 		/*
370 		 * Userspace is now writing through an untracked VMA, abandon
371 		 * all hope that the hardware is able to track future writes.
372 		 */
373 
374 		vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
375 		if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
376 			flags = PIN_MAPPABLE;
377 			view.type = I915_GGTT_VIEW_PARTIAL;
378 			vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
379 		}
380 
381 		/* The entire mappable GGTT is pinned? Unexpected! */
382 		GEM_BUG_ON(vma == ERR_PTR(-ENOSPC));
383 	}
384 	if (IS_ERR(vma)) {
385 		ret = PTR_ERR(vma);
386 		goto err_reset;
387 	}
388 
389 	/* Access to snoopable pages through the GTT is incoherent. */
390 	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
391 		ret = -EFAULT;
392 		goto err_unpin;
393 	}
394 
395 	ret = i915_vma_pin_fence(vma);
396 	if (ret)
397 		goto err_unpin;
398 
399 	/* Finally, remap it using the new GTT offset */
400 	ret = remap_io_mapping(area,
401 			       area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
402 			       (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
403 			       min_t(u64, vma->size, area->vm_end - area->vm_start),
404 			       &ggtt->iomap);
405 	if (ret)
406 		goto err_fence;
407 
408 	assert_rpm_wakelock_held(rpm);
409 
410 	/* Mark as being mmapped into userspace for later revocation */
411 	mutex_lock(&i915->ggtt.vm.mutex);
412 	if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
413 		list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
414 	mutex_unlock(&i915->ggtt.vm.mutex);
415 
416 	/* Track the mmo associated with the fenced vma */
417 	vma->mmo = mmo;
418 
419 	if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
420 		intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
421 				   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
422 
423 	if (write) {
424 		GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
425 		i915_vma_set_ggtt_write(vma);
426 		obj->mm.dirty = true;
427 	}
428 
429 err_fence:
430 	i915_vma_unpin_fence(vma);
431 err_unpin:
432 	__i915_vma_unpin(vma);
433 err_reset:
434 	intel_gt_reset_unlock(ggtt->vm.gt, srcu);
435 err_pages:
436 	i915_gem_object_unpin_pages(obj);
437 err_rpm:
438 	if (ret == -EDEADLK) {
439 		ret = i915_gem_ww_ctx_backoff(&ww);
440 		if (!ret)
441 			goto retry;
442 	}
443 	i915_gem_ww_ctx_fini(&ww);
444 	intel_runtime_pm_put(rpm, wakeref);
445 	return i915_error_to_vmf_fault(ret);
446 }
447 
448 static int
449 vm_access(struct vm_area_struct *area, unsigned long addr,
450 	  void *buf, int len, int write)
451 {
452 	struct i915_mmap_offset *mmo = area->vm_private_data;
453 	struct drm_i915_gem_object *obj = mmo->obj;
454 	void *vaddr;
455 
456 	if (i915_gem_object_is_readonly(obj) && write)
457 		return -EACCES;
458 
459 	addr -= area->vm_start;
460 	if (addr >= obj->base.size)
461 		return -EINVAL;
462 
463 	/* As this is primarily for debugging, let's focus on simplicity */
464 	vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC);
465 	if (IS_ERR(vaddr))
466 		return PTR_ERR(vaddr);
467 
468 	if (write) {
469 		memcpy(vaddr + addr, buf, len);
470 		__i915_gem_object_flush_map(obj, addr, len);
471 	} else {
472 		memcpy(buf, vaddr + addr, len);
473 	}
474 
475 	i915_gem_object_unpin_map(obj);
476 
477 	return len;
478 }
479 
480 #else /* !__linux__ */
481 
482 static int i915_error_to_vmf_fault(int err)
483 {
484 	switch (err) {
485 	default:
486 		WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
487 		fallthrough;
488 	case -EIO: /* shmemfs failure from swap device */
489 	case -EFAULT: /* purged object */
490 	case -ENODEV: /* bad object, how did you get here! */
491 	case -ENXIO: /* unable to access backing store (on device) */
492 		return VM_PAGER_ERROR;
493 
494 	case -ENOMEM: /* our allocation failure */
495 		return VM_PAGER_ERROR;
496 
497 	case 0:
498 	case -EAGAIN:
499 	case -ENOSPC: /* transient failure to evict? */
500 	case -ERESTART:
501 	case -EINTR:
502 	case -EBUSY:
503 		/*
504 		 * EBUSY is ok: this just means that another thread
505 		 * already did the job.
506 		 */
507 		return VM_PAGER_OK;
508 	}
509 }
510 
511 static int
512 vm_fault_cpu(struct i915_mmap_offset *mmo, struct uvm_faultinfo *ufi,
513     vm_prot_t access_type)
514 {
515 	struct vm_map_entry *entry = ufi->entry;
516 	struct drm_i915_gem_object *obj = mmo->obj;
517 	int write = !!(access_type & PROT_WRITE);
518 	struct sg_table *pages;
519 	struct sg_page_iter sg_iter;
520 	vm_prot_t mapprot;
521 	vaddr_t va = entry->start;
522 	paddr_t pa, pa_flags = 0;
523 	int flags;
524 	int err;
525 
526 	/* Sanity check that we allow writing into this object */
527 	if (unlikely(i915_gem_object_is_readonly(obj) && write)) {
528 		uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
529 		return VM_PAGER_BAD;
530 	}
531 
532 	err = i915_gem_object_pin_pages(obj);
533 	if (err)
534 		goto out;
535 
536 	KASSERT(i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE));
537 
538 	flags = mapprot = entry->protection;
539 	if (write == 0)
540 		flags &= ~PROT_WRITE;
541 
542 	switch (mmo->mmap_type) {
543 	case I915_MMAP_TYPE_WC:
544 		pa_flags |= PMAP_WC;
545 		break;
546 	case I915_MMAP_TYPE_UC:
547 		pa_flags |= PMAP_NOCACHE;
548 		break;
549 	default:
550 		break;
551 	}
552 
553 	pages = obj->mm.pages;
554 	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
555 		pa = sg_page_iter_dma_address(&sg_iter);
556 		if (pmap_enter(ufi->orig_map->pmap, va, pa | pa_flags,
557 		    mapprot, PMAP_CANFAIL | flags)) {
558 			err = -ENOMEM;
559 			break;
560 		}
561 		va += PAGE_SIZE;
562 	}
563 	pmap_update(ufi->orig_map->pmap);
564 
565 	if (write) {
566 		GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
567 		obj->mm.dirty = true;
568 	}
569 
570 	i915_gem_object_unpin_pages(obj);
571 
572 out:
573 	uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
574 	return i915_error_to_vmf_fault(err);
575 }
576 
577 int
578 remap_io_mapping(pmap_t pm, vm_prot_t mapprot,
579     vaddr_t va, unsigned long pfn, unsigned long size)
580 {
581 	vaddr_t end = va + size;
582 	paddr_t pa = ptoa(pfn);
583 
584 	while (va < end) {
585 		if (pmap_enter(pm, va, pa | PMAP_WC, mapprot, PMAP_CANFAIL | mapprot))
586 			return -ENOMEM;
587 		va += PAGE_SIZE;
588 		pa += PAGE_SIZE;
589 	}
590 
591 	return 0;
592 }
593 
594 static int
595 vm_fault_gtt(struct i915_mmap_offset *mmo, struct uvm_faultinfo *ufi,
596     vaddr_t vaddr, vm_prot_t access_type)
597 {
598 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
599 	struct vm_map_entry *entry = ufi->entry;
600 	struct drm_i915_gem_object *obj = mmo->obj;
601 	struct drm_device *dev = obj->base.dev;
602 	struct drm_i915_private *i915 = to_i915(dev);
603 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
604 	struct i915_ggtt *ggtt = &i915->ggtt;
605 	int write = !!(access_type & PROT_WRITE);
606 	struct i915_gem_ww_ctx ww;
607 	intel_wakeref_t wakeref;
608 	struct i915_vma *vma;
609 	pgoff_t page_offset;
610 	int srcu;
611 	int ret;
612 
613 	/* We don't use vmf->pgoff since that has the fake offset */
614 	page_offset = (vaddr - entry->start) >> PAGE_SHIFT;
615 
616 	trace_i915_gem_object_fault(obj, page_offset, true, write);
617 
618 	wakeref = intel_runtime_pm_get(rpm);
619 
620 	i915_gem_ww_ctx_init(&ww, true);
621 retry:
622 	ret = i915_gem_object_lock(obj, &ww);
623 	if (ret)
624 		goto err_rpm;
625 
626 	/* Sanity check that we allow writing into this object */
627 	if (i915_gem_object_is_readonly(obj) && write) {
628 		ret = -EFAULT;
629 		goto err_rpm;
630 	}
631 
632 	ret = i915_gem_object_pin_pages(obj);
633 	if (ret)
634 		goto err_rpm;
635 
636 	ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
637 	if (ret)
638 		goto err_pages;
639 
640 	/* Now pin it into the GTT as needed */
641 	vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
642 					  PIN_MAPPABLE |
643 					  PIN_NONBLOCK /* NOWARN */ |
644 					  PIN_NOEVICT);
645 	if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
646 		/* Use a partial view if it is bigger than available space */
647 		struct i915_ggtt_view view =
648 			compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
649 		unsigned int flags;
650 
651 		flags = PIN_MAPPABLE | PIN_NOSEARCH;
652 		if (view.type == I915_GGTT_VIEW_NORMAL)
653 			flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
654 
655 		/*
656 		 * Userspace is now writing through an untracked VMA, abandon
657 		 * all hope that the hardware is able to track future writes.
658 		 */
659 
660 		vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
661 		if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
662 			flags = PIN_MAPPABLE;
663 			view.type = I915_GGTT_VIEW_PARTIAL;
664 			vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
665 		}
666 
667 		/* The entire mappable GGTT is pinned? Unexpected! */
668 		GEM_BUG_ON(vma == ERR_PTR(-ENOSPC));
669 	}
670 	if (IS_ERR(vma)) {
671 		ret = PTR_ERR(vma);
672 		goto err_reset;
673 	}
674 
675 	/* Access to snoopable pages through the GTT is incoherent. */
676 	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
677 		ret = -EFAULT;
678 		goto err_unpin;
679 	}
680 
681 	ret = i915_vma_pin_fence(vma);
682 	if (ret)
683 		goto err_unpin;
684 
685 	/* Finally, remap it using the new GTT offset */
686 	ret = remap_io_mapping(ufi->orig_map->pmap, entry->protection,
687 			       entry->start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
688 			       (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
689 			       min_t(u64, vma->size, entry->end - entry->start));
690 	if (ret)
691 		goto err_fence;
692 
693 	assert_rpm_wakelock_held(rpm);
694 
695 	/* Mark as being mmapped into userspace for later revocation */
696 	mutex_lock(&i915->ggtt.vm.mutex);
697 	if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
698 		list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
699 	mutex_unlock(&i915->ggtt.vm.mutex);
700 
701 	/* Track the mmo associated with the fenced vma */
702 	vma->mmo = mmo;
703 
704 #ifdef notyet
705 	if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
706 		intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
707 				   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
708 #endif
709 
710 	if (write) {
711 		GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
712 		i915_vma_set_ggtt_write(vma);
713 		obj->mm.dirty = true;
714 	}
715 
716 err_fence:
717 	i915_vma_unpin_fence(vma);
718 err_unpin:
719 	__i915_vma_unpin(vma);
720 err_reset:
721 	intel_gt_reset_unlock(ggtt->vm.gt, srcu);
722 err_pages:
723 	i915_gem_object_unpin_pages(obj);
724 err_rpm:
725 	if (ret == -EDEADLK) {
726 		ret = i915_gem_ww_ctx_backoff(&ww);
727 		if (!ret)
728 			goto retry;
729 	}
730 	i915_gem_ww_ctx_fini(&ww);
731 	intel_runtime_pm_put(rpm, wakeref);
732 	uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
733 	return i915_error_to_vmf_fault(ret);
734 }
735 
736 int
737 i915_gem_fault(struct drm_gem_object *gem_obj, struct uvm_faultinfo *ufi,
738     off_t offset, vaddr_t vaddr, vm_page_t *pps, int npages, int centeridx,
739     vm_prot_t access_type, int flags)
740 {
741 	struct drm_vma_offset_node *node;
742 	struct drm_device *dev = gem_obj->dev;
743 	struct vm_map_entry *entry = ufi->entry;
744 	vsize_t size = entry->end - entry->start;
745 	struct i915_mmap_offset *mmo = NULL;
746 
747 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
748 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
749 						  entry->offset >> PAGE_SHIFT,
750 						  size >> PAGE_SHIFT);
751 	if (likely(node))
752 		mmo = container_of(node, struct i915_mmap_offset, vma_node);
753 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
754 	if (!mmo) {
755 		uvmfault_unlockall(ufi, NULL, &gem_obj->uobj);
756 		return VM_PAGER_BAD;
757 	}
758 
759 	KASSERT(gem_obj == &mmo->obj->base);
760 
761 	if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
762 		return vm_fault_gtt(mmo, ufi, vaddr, access_type);
763 
764 	return vm_fault_cpu(mmo, ufi, access_type);
765 }
766 
767 #endif /* !__linux__ */
768 
769 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
770 {
771 	struct i915_vma *vma;
772 
773 	GEM_BUG_ON(!obj->userfault_count);
774 
775 	for_each_ggtt_vma(vma, obj)
776 		i915_vma_revoke_mmap(vma);
777 
778 	GEM_BUG_ON(obj->userfault_count);
779 }
780 
781 /*
782  * It is vital that we remove the page mapping if we have mapped a tiled
783  * object through the GTT and then lose the fence register due to
784  * resource pressure. Similarly if the object has been moved out of the
785  * aperture, than pages mapped into userspace must be revoked. Removing the
786  * mapping will then trigger a page fault on the next user access, allowing
787  * fixup by vm_fault_gtt().
788  */
789 void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
790 {
791 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
792 	intel_wakeref_t wakeref;
793 
794 	/*
795 	 * Serialisation between user GTT access and our code depends upon
796 	 * revoking the CPU's PTE whilst the mutex is held. The next user
797 	 * pagefault then has to wait until we release the mutex.
798 	 *
799 	 * Note that RPM complicates somewhat by adding an additional
800 	 * requirement that operations to the GGTT be made holding the RPM
801 	 * wakeref.
802 	 */
803 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
804 	mutex_lock(&i915->ggtt.vm.mutex);
805 
806 	if (!obj->userfault_count)
807 		goto out;
808 
809 	__i915_gem_object_release_mmap_gtt(obj);
810 
811 	/*
812 	 * Ensure that the CPU's PTE are revoked and there are not outstanding
813 	 * memory transactions from userspace before we return. The TLB
814 	 * flushing implied above by changing the PTE above *should* be
815 	 * sufficient, an extra barrier here just provides us with a bit
816 	 * of paranoid documentation about our requirement to serialise
817 	 * memory writes before touching registers / GSM.
818 	 */
819 	wmb();
820 
821 out:
822 	mutex_unlock(&i915->ggtt.vm.mutex);
823 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
824 }
825 
826 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
827 {
828 	struct i915_mmap_offset *mmo, *mn;
829 
830 	spin_lock(&obj->mmo.lock);
831 	rbtree_postorder_for_each_entry_safe(mmo, mn,
832 					     &obj->mmo.offsets, offset) {
833 		/*
834 		 * vma_node_unmap for GTT mmaps handled already in
835 		 * __i915_gem_object_release_mmap_gtt
836 		 */
837 		if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
838 			continue;
839 
840 		spin_unlock(&obj->mmo.lock);
841 #ifdef __linux__
842 		drm_vma_node_unmap(&mmo->vma_node,
843 				   obj->base.dev->anon_inode->i_mapping);
844 #endif
845 		spin_lock(&obj->mmo.lock);
846 	}
847 	spin_unlock(&obj->mmo.lock);
848 }
849 
850 static struct i915_mmap_offset *
851 lookup_mmo(struct drm_i915_gem_object *obj,
852 	   enum i915_mmap_type mmap_type)
853 {
854 	struct rb_node *rb;
855 
856 	spin_lock(&obj->mmo.lock);
857 	rb = obj->mmo.offsets.rb_node;
858 	while (rb) {
859 		struct i915_mmap_offset *mmo =
860 			rb_entry(rb, typeof(*mmo), offset);
861 
862 		if (mmo->mmap_type == mmap_type) {
863 			spin_unlock(&obj->mmo.lock);
864 			return mmo;
865 		}
866 
867 		if (mmo->mmap_type < mmap_type)
868 			rb = rb->rb_right;
869 		else
870 			rb = rb->rb_left;
871 	}
872 	spin_unlock(&obj->mmo.lock);
873 
874 	return NULL;
875 }
876 
877 static struct i915_mmap_offset *
878 insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
879 {
880 	struct rb_node *rb, **p;
881 
882 	spin_lock(&obj->mmo.lock);
883 	rb = NULL;
884 	p = &obj->mmo.offsets.rb_node;
885 	while (*p) {
886 		struct i915_mmap_offset *pos;
887 
888 		rb = *p;
889 		pos = rb_entry(rb, typeof(*pos), offset);
890 
891 		if (pos->mmap_type == mmo->mmap_type) {
892 			spin_unlock(&obj->mmo.lock);
893 			drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
894 					      &mmo->vma_node);
895 			kfree(mmo);
896 			return pos;
897 		}
898 
899 		if (pos->mmap_type < mmo->mmap_type)
900 			p = &rb->rb_right;
901 		else
902 			p = &rb->rb_left;
903 	}
904 	rb_link_node(&mmo->offset, rb, p);
905 	rb_insert_color(&mmo->offset, &obj->mmo.offsets);
906 	spin_unlock(&obj->mmo.lock);
907 
908 	return mmo;
909 }
910 
911 static struct i915_mmap_offset *
912 mmap_offset_attach(struct drm_i915_gem_object *obj,
913 		   enum i915_mmap_type mmap_type,
914 		   struct drm_file *file)
915 {
916 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
917 	struct i915_mmap_offset *mmo;
918 	int err;
919 
920 	mmo = lookup_mmo(obj, mmap_type);
921 	if (mmo)
922 		goto out;
923 
924 	mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
925 	if (!mmo)
926 		return ERR_PTR(-ENOMEM);
927 
928 	mmo->obj = obj;
929 	mmo->mmap_type = mmap_type;
930 	drm_vma_node_reset(&mmo->vma_node);
931 
932 	err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
933 				 &mmo->vma_node, obj->base.size / PAGE_SIZE);
934 	if (likely(!err))
935 		goto insert;
936 
937 	/* Attempt to reap some mmap space from dead objects */
938 	err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
939 	if (err)
940 		goto err;
941 
942 	i915_gem_drain_freed_objects(i915);
943 	err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
944 				 &mmo->vma_node, obj->base.size / PAGE_SIZE);
945 	if (err)
946 		goto err;
947 
948 insert:
949 	mmo = insert_mmo(obj, mmo);
950 	GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
951 out:
952 	if (file)
953 		drm_vma_node_allow(&mmo->vma_node, file);
954 	return mmo;
955 
956 err:
957 	kfree(mmo);
958 	return ERR_PTR(err);
959 }
960 
961 static int
962 __assign_mmap_offset(struct drm_file *file,
963 		     u32 handle,
964 		     enum i915_mmap_type mmap_type,
965 		     u64 *offset)
966 {
967 	struct drm_i915_gem_object *obj;
968 	struct i915_mmap_offset *mmo;
969 	int err;
970 
971 	obj = i915_gem_object_lookup(file, handle);
972 	if (!obj)
973 		return -ENOENT;
974 
975 	if (i915_gem_object_never_mmap(obj)) {
976 		err = -ENODEV;
977 		goto out;
978 	}
979 
980 	if (mmap_type != I915_MMAP_TYPE_GTT &&
981 	    !i915_gem_object_type_has(obj,
982 				      I915_GEM_OBJECT_HAS_STRUCT_PAGE |
983 				      I915_GEM_OBJECT_HAS_IOMEM)) {
984 		err = -ENODEV;
985 		goto out;
986 	}
987 
988 	mmo = mmap_offset_attach(obj, mmap_type, file);
989 	if (IS_ERR(mmo)) {
990 		err = PTR_ERR(mmo);
991 		goto out;
992 	}
993 
994 	*offset = drm_vma_node_offset_addr(&mmo->vma_node);
995 	err = 0;
996 out:
997 	i915_gem_object_put(obj);
998 	return err;
999 }
1000 
1001 int
1002 i915_gem_dumb_mmap_offset(struct drm_file *file,
1003 			  struct drm_device *dev,
1004 			  u32 handle,
1005 			  u64 *offset)
1006 {
1007 	enum i915_mmap_type mmap_type;
1008 
1009 	if (boot_cpu_has(X86_FEATURE_PAT))
1010 		mmap_type = I915_MMAP_TYPE_WC;
1011 	else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
1012 		return -ENODEV;
1013 	else
1014 		mmap_type = I915_MMAP_TYPE_GTT;
1015 
1016 	return __assign_mmap_offset(file, handle, mmap_type, offset);
1017 }
1018 
1019 /**
1020  * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing
1021  * @dev: DRM device
1022  * @data: GTT mapping ioctl data
1023  * @file: GEM object info
1024  *
1025  * Simply returns the fake offset to userspace so it can mmap it.
1026  * The mmap call will end up in drm_gem_mmap(), which will set things
1027  * up so we can get faults in the handler above.
1028  *
1029  * The fault handler will take care of binding the object into the GTT
1030  * (since it may have been evicted to make room for something), allocating
1031  * a fence register, and mapping the appropriate aperture address into
1032  * userspace.
1033  */
1034 int
1035 i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
1036 			   struct drm_file *file)
1037 {
1038 	struct drm_i915_private *i915 = to_i915(dev);
1039 	struct drm_i915_gem_mmap_offset *args = data;
1040 	enum i915_mmap_type type;
1041 	int err;
1042 
1043 	/*
1044 	 * Historically we failed to check args.pad and args.offset
1045 	 * and so we cannot use those fields for user input and we cannot
1046 	 * add -EINVAL for them as the ABI is fixed, i.e. old userspace
1047 	 * may be feeding in garbage in those fields.
1048 	 *
1049 	 * if (args->pad) return -EINVAL; is verbotten!
1050 	 */
1051 
1052 	err = i915_user_extensions(u64_to_user_ptr(args->extensions),
1053 				   NULL, 0, NULL);
1054 	if (err)
1055 		return err;
1056 
1057 	switch (args->flags) {
1058 	case I915_MMAP_OFFSET_GTT:
1059 		if (!i915_ggtt_has_aperture(&i915->ggtt))
1060 			return -ENODEV;
1061 		type = I915_MMAP_TYPE_GTT;
1062 		break;
1063 
1064 	case I915_MMAP_OFFSET_WC:
1065 		if (!boot_cpu_has(X86_FEATURE_PAT))
1066 			return -ENODEV;
1067 		type = I915_MMAP_TYPE_WC;
1068 		break;
1069 
1070 	case I915_MMAP_OFFSET_WB:
1071 		type = I915_MMAP_TYPE_WB;
1072 		break;
1073 
1074 	case I915_MMAP_OFFSET_UC:
1075 		if (!boot_cpu_has(X86_FEATURE_PAT))
1076 			return -ENODEV;
1077 		type = I915_MMAP_TYPE_UC;
1078 		break;
1079 
1080 	default:
1081 		return -EINVAL;
1082 	}
1083 
1084 	return __assign_mmap_offset(file, args->handle, type, &args->offset);
1085 }
1086 
1087 #ifdef __linux__
1088 
1089 static void vm_open(struct vm_area_struct *vma)
1090 {
1091 	struct i915_mmap_offset *mmo = vma->vm_private_data;
1092 	struct drm_i915_gem_object *obj = mmo->obj;
1093 
1094 	GEM_BUG_ON(!obj);
1095 	i915_gem_object_get(obj);
1096 }
1097 
1098 static void vm_close(struct vm_area_struct *vma)
1099 {
1100 	struct i915_mmap_offset *mmo = vma->vm_private_data;
1101 	struct drm_i915_gem_object *obj = mmo->obj;
1102 
1103 	GEM_BUG_ON(!obj);
1104 	i915_gem_object_put(obj);
1105 }
1106 
1107 static const struct vm_operations_struct vm_ops_gtt = {
1108 	.fault = vm_fault_gtt,
1109 	.access = vm_access,
1110 	.open = vm_open,
1111 	.close = vm_close,
1112 };
1113 
1114 static const struct vm_operations_struct vm_ops_cpu = {
1115 	.fault = vm_fault_cpu,
1116 	.access = vm_access,
1117 	.open = vm_open,
1118 	.close = vm_close,
1119 };
1120 
1121 static int singleton_release(struct inode *inode, struct file *file)
1122 {
1123 	struct drm_i915_private *i915 = file->private_data;
1124 
1125 	cmpxchg(&i915->gem.mmap_singleton, file, NULL);
1126 	drm_dev_put(&i915->drm);
1127 
1128 	return 0;
1129 }
1130 
1131 static const struct file_operations singleton_fops = {
1132 	.owner = THIS_MODULE,
1133 	.release = singleton_release,
1134 };
1135 
1136 static struct file *mmap_singleton(struct drm_i915_private *i915)
1137 {
1138 	struct file *file;
1139 
1140 	rcu_read_lock();
1141 	file = READ_ONCE(i915->gem.mmap_singleton);
1142 	if (file && !get_file_rcu(file))
1143 		file = NULL;
1144 	rcu_read_unlock();
1145 	if (file)
1146 		return file;
1147 
1148 	file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR);
1149 	if (IS_ERR(file))
1150 		return file;
1151 
1152 	/* Everyone shares a single global address space */
1153 	file->f_mapping = i915->drm.anon_inode->i_mapping;
1154 
1155 	smp_store_mb(i915->gem.mmap_singleton, file);
1156 	drm_dev_get(&i915->drm);
1157 
1158 	return file;
1159 }
1160 
1161 /*
1162  * This overcomes the limitation in drm_gem_mmap's assignment of a
1163  * drm_gem_object as the vma->vm_private_data. Since we need to
1164  * be able to resolve multiple mmap offsets which could be tied
1165  * to a single gem object.
1166  */
1167 int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1168 {
1169 	struct drm_vma_offset_node *node;
1170 	struct drm_file *priv = filp->private_data;
1171 	struct drm_device *dev = priv->minor->dev;
1172 	struct drm_i915_gem_object *obj = NULL;
1173 	struct i915_mmap_offset *mmo = NULL;
1174 	struct file *anon;
1175 
1176 	if (drm_dev_is_unplugged(dev))
1177 		return -ENODEV;
1178 
1179 	rcu_read_lock();
1180 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1181 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1182 						  vma->vm_pgoff,
1183 						  vma_pages(vma));
1184 	if (node && drm_vma_node_is_allowed(node, priv)) {
1185 		/*
1186 		 * Skip 0-refcnted objects as it is in the process of being
1187 		 * destroyed and will be invalid when the vma manager lock
1188 		 * is released.
1189 		 */
1190 		mmo = container_of(node, struct i915_mmap_offset, vma_node);
1191 		obj = i915_gem_object_get_rcu(mmo->obj);
1192 	}
1193 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1194 	rcu_read_unlock();
1195 	if (!obj)
1196 		return node ? -EACCES : -EINVAL;
1197 
1198 	if (i915_gem_object_is_readonly(obj)) {
1199 		if (vma->vm_flags & VM_WRITE) {
1200 			i915_gem_object_put(obj);
1201 			return -EINVAL;
1202 		}
1203 		vma->vm_flags &= ~VM_MAYWRITE;
1204 	}
1205 
1206 	anon = mmap_singleton(to_i915(dev));
1207 	if (IS_ERR(anon)) {
1208 		i915_gem_object_put(obj);
1209 		return PTR_ERR(anon);
1210 	}
1211 
1212 	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1213 	vma->vm_private_data = mmo;
1214 
1215 	/*
1216 	 * We keep the ref on mmo->obj, not vm_file, but we require
1217 	 * vma->vm_file->f_mapping, see vma_link(), for later revocation.
1218 	 * Our userspace is accustomed to having per-file resource cleanup
1219 	 * (i.e. contexts, objects and requests) on their close(fd), which
1220 	 * requires avoiding extraneous references to their filp, hence why
1221 	 * we prefer to use an anonymous file for their mmaps.
1222 	 */
1223 	fput(vma->vm_file);
1224 	vma->vm_file = anon;
1225 
1226 	switch (mmo->mmap_type) {
1227 	case I915_MMAP_TYPE_WC:
1228 		vma->vm_page_prot =
1229 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1230 		vma->vm_ops = &vm_ops_cpu;
1231 		break;
1232 
1233 	case I915_MMAP_TYPE_WB:
1234 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1235 		vma->vm_ops = &vm_ops_cpu;
1236 		break;
1237 
1238 	case I915_MMAP_TYPE_UC:
1239 		vma->vm_page_prot =
1240 			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
1241 		vma->vm_ops = &vm_ops_cpu;
1242 		break;
1243 
1244 	case I915_MMAP_TYPE_GTT:
1245 		vma->vm_page_prot =
1246 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1247 		vma->vm_ops = &vm_ops_gtt;
1248 		break;
1249 	}
1250 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1251 
1252 	return 0;
1253 }
1254 
1255 #else /* !__linux__ */
1256 
1257 /*
1258  * This overcomes the limitation in drm_gem_mmap's assignment of a
1259  * drm_gem_object as the vma->vm_private_data. Since we need to
1260  * be able to resolve multiple mmap offsets which could be tied
1261  * to a single gem object.
1262  */
1263 struct uvm_object *
1264 i915_gem_mmap(struct file *filp, vm_prot_t accessprot,
1265     voff_t off, vsize_t size)
1266 {
1267 	struct drm_vma_offset_node *node;
1268 	struct drm_file *priv = (void *)filp;
1269 	struct drm_device *dev = priv->minor->dev;
1270 	struct drm_i915_gem_object *obj = NULL;
1271 	struct i915_mmap_offset *mmo = NULL;
1272 
1273 	if (drm_dev_is_unplugged(dev))
1274 		return NULL;
1275 
1276 	rcu_read_lock();
1277 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1278 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1279 						  off >> PAGE_SHIFT,
1280 						  atop(round_page(size)));
1281 	if (node && drm_vma_node_is_allowed(node, priv)) {
1282 		/*
1283 		 * Skip 0-refcnted objects as it is in the process of being
1284 		 * destroyed and will be invalid when the vma manager lock
1285 		 * is released.
1286 		 */
1287 		mmo = container_of(node, struct i915_mmap_offset, vma_node);
1288 		obj = i915_gem_object_get_rcu(mmo->obj);
1289 	}
1290 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1291 	rcu_read_unlock();
1292 	if (!obj)
1293 		return NULL;
1294 
1295 	if (i915_gem_object_is_readonly(obj)) {
1296 		if (accessprot & PROT_WRITE) {
1297 			i915_gem_object_put(obj);
1298 			return NULL;
1299 		}
1300 	}
1301 
1302 	return &obj->base.uobj;
1303 }
1304 
1305 #endif /* !__linux__ */
1306 
1307 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1308 #include "selftests/i915_gem_mman.c"
1309 #endif
1310