xref: /dragonfly/sys/dev/drm/i915/i915_gem_userptr.c (revision 31524921)
1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <drm/drmP.h>
26 #include <drm/i915_drm.h>
27 #include "i915_drv.h"
28 #include "i915_trace.h"
29 #include "intel_drv.h"
30 
31 #if defined(CONFIG_MMU_NOTIFIER)
32 #include <linux/interval_tree.h>
33 
34 struct i915_mmu_notifier {
35 	spinlock_t lock;
36 	struct hlist_node node;
37 	struct mmu_notifier mn;
38 	struct rb_root objects;
39 	struct list_head linear;
40 	struct drm_device *dev;
41 	struct mm_struct *mm;
42 	struct work_struct work;
43 	unsigned long count;
44 	unsigned long serial;
45 	bool has_linear;
46 };
47 
48 struct i915_mmu_object {
49 	struct i915_mmu_notifier *mmu;
50 	struct interval_tree_node it;
51 	struct list_head link;
52 	struct drm_i915_gem_object *obj;
53 	bool is_linear;
54 };
55 
56 static unsigned long cancel_userptr(struct drm_i915_gem_object *obj)
57 {
58 	struct drm_device *dev = obj->base.dev;
59 	unsigned long end;
60 
61 	mutex_lock(&dev->struct_mutex);
62 	/* Cancel any active worker and force us to re-evaluate gup */
63 	obj->userptr.work = NULL;
64 
65 	if (obj->pages != NULL) {
66 		struct drm_i915_private *dev_priv = to_i915(dev);
67 		struct i915_vma *vma, *tmp;
68 		bool was_interruptible;
69 
70 		was_interruptible = dev_priv->mm.interruptible;
71 		dev_priv->mm.interruptible = false;
72 
73 		list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
74 			int ret = i915_vma_unbind(vma);
75 			WARN_ON(ret && ret != -EIO);
76 		}
77 		WARN_ON(i915_gem_object_put_pages(obj));
78 
79 		dev_priv->mm.interruptible = was_interruptible;
80 	}
81 
82 	end = obj->userptr.ptr + obj->base.size;
83 
84 	drm_gem_object_unreference(&obj->base);
85 	mutex_unlock(&dev->struct_mutex);
86 
87 	return end;
88 }
89 
90 static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
91 				      struct mm_struct *mm,
92 				      unsigned long start,
93 				      unsigned long end)
94 {
95 	struct i915_mmu_object *mmu;
96 	unsigned long serial;
97 
98 restart:
99 	serial = mn->serial;
100 	list_for_each_entry(mmu, &mn->linear, link) {
101 		struct drm_i915_gem_object *obj;
102 
103 		if (mmu->it.last < start || mmu->it.start > end)
104 			continue;
105 
106 		obj = mmu->obj;
107 		drm_gem_object_reference(&obj->base);
108 		spin_unlock(&mn->lock);
109 
110 		cancel_userptr(obj);
111 
112 		spin_lock(&mn->lock);
113 		if (serial != mn->serial)
114 			goto restart;
115 	}
116 
117 	return NULL;
118 }
119 
120 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
121 						       struct mm_struct *mm,
122 						       unsigned long start,
123 						       unsigned long end)
124 {
125 	struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
126 	struct interval_tree_node *it = NULL;
127 	unsigned long next = start;
128 	unsigned long serial = 0;
129 
130 	end--; /* interval ranges are inclusive, but invalidate range is exclusive */
131 	while (next < end) {
132 		struct drm_i915_gem_object *obj = NULL;
133 
134 		spin_lock(&mn->lock);
135 		if (mn->has_linear)
136 			it = invalidate_range__linear(mn, mm, start, end);
137 		else if (serial == mn->serial)
138 			it = interval_tree_iter_next(it, next, end);
139 		else
140 			it = interval_tree_iter_first(&mn->objects, start, end);
141 		if (it != NULL) {
142 			obj = container_of(it, struct i915_mmu_object, it)->obj;
143 			drm_gem_object_reference(&obj->base);
144 			serial = mn->serial;
145 		}
146 		spin_unlock(&mn->lock);
147 		if (obj == NULL)
148 			return;
149 
150 		next = cancel_userptr(obj);
151 	}
152 }
153 
154 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
155 	.invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
156 };
157 
158 static struct i915_mmu_notifier *
159 __i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm)
160 {
161 	struct drm_i915_private *dev_priv = to_i915(dev);
162 	struct i915_mmu_notifier *mmu;
163 
164 	/* Protected by dev->struct_mutex */
165 	hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm)
166 		if (mmu->mm == mm)
167 			return mmu;
168 
169 	return NULL;
170 }
171 
172 static struct i915_mmu_notifier *
173 i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
174 {
175 	struct drm_i915_private *dev_priv = to_i915(dev);
176 	struct i915_mmu_notifier *mmu;
177 	int ret;
178 
179 	lockdep_assert_held(&dev->struct_mutex);
180 
181 	mmu = __i915_mmu_notifier_lookup(dev, mm);
182 	if (mmu)
183 		return mmu;
184 
185 	mmu = kmalloc(sizeof(*mmu), GFP_KERNEL);
186 	if (mmu == NULL)
187 		return ERR_PTR(-ENOMEM);
188 
189 	spin_lock_init(&mmu->lock);
190 	mmu->dev = dev;
191 	mmu->mn.ops = &i915_gem_userptr_notifier;
192 	mmu->mm = mm;
193 	mmu->objects = RB_ROOT;
194 	mmu->count = 0;
195 	mmu->serial = 1;
196 	INIT_LIST_HEAD(&mmu->linear);
197 	mmu->has_linear = false;
198 
199 	/* Protected by mmap_sem (write-lock) */
200 	ret = __mmu_notifier_register(&mmu->mn, mm);
201 	if (ret) {
202 		kfree(mmu);
203 		return ERR_PTR(ret);
204 	}
205 
206 	/* Protected by dev->struct_mutex */
207 	hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm);
208 	return mmu;
209 }
210 
211 static void
212 __i915_mmu_notifier_destroy_worker(struct work_struct *work)
213 {
214 	struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work);
215 	mmu_notifier_unregister(&mmu->mn, mmu->mm);
216 	kfree(mmu);
217 }
218 
219 static void
220 __i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu)
221 {
222 	lockdep_assert_held(&mmu->dev->struct_mutex);
223 
224 	/* Protected by dev->struct_mutex */
225 	hash_del(&mmu->node);
226 
227 	/* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex.
228 	 * We enter the function holding struct_mutex, therefore we need
229 	 * to drop our mutex prior to calling mmu_notifier_unregister in
230 	 * order to prevent lock inversion (and system-wide deadlock)
231 	 * between the mmap_sem and struct-mutex. Hence we defer the
232 	 * unregistration to a workqueue where we hold no locks.
233 	 */
234 	INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker);
235 	schedule_work(&mmu->work);
236 }
237 
238 static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
239 {
240 	if (++mmu->serial == 0)
241 		mmu->serial = 1;
242 }
243 
244 static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu)
245 {
246 	struct i915_mmu_object *mn;
247 
248 	list_for_each_entry(mn, &mmu->linear, link)
249 		if (mn->is_linear)
250 			return true;
251 
252 	return false;
253 }
254 
255 static void
256 i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
257 		      struct i915_mmu_object *mn)
258 {
259 	lockdep_assert_held(&mmu->dev->struct_mutex);
260 
261 	spin_lock(&mmu->lock);
262 	list_del(&mn->link);
263 	if (mn->is_linear)
264 		mmu->has_linear = i915_mmu_notifier_has_linear(mmu);
265 	else
266 		interval_tree_remove(&mn->it, &mmu->objects);
267 	__i915_mmu_notifier_update_serial(mmu);
268 	spin_unlock(&mmu->lock);
269 
270 	/* Protected against _add() by dev->struct_mutex */
271 	if (--mmu->count == 0)
272 		__i915_mmu_notifier_destroy(mmu);
273 }
274 
275 static int
276 i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
277 		      struct i915_mmu_object *mn)
278 {
279 	struct interval_tree_node *it;
280 	int ret;
281 
282 	ret = i915_mutex_lock_interruptible(mmu->dev);
283 	if (ret)
284 		return ret;
285 
286 	/* Make sure we drop the final active reference (and thereby
287 	 * remove the objects from the interval tree) before we do
288 	 * the check for overlapping objects.
289 	 */
290 	i915_gem_retire_requests(mmu->dev);
291 
292 	spin_lock(&mmu->lock);
293 	it = interval_tree_iter_first(&mmu->objects,
294 				      mn->it.start, mn->it.last);
295 	if (it) {
296 		struct drm_i915_gem_object *obj;
297 
298 		/* We only need to check the first object in the range as it
299 		 * either has cancelled gup work queued and we need to
300 		 * return back to the user to give time for the gup-workers
301 		 * to flush their object references upon which the object will
302 		 * be removed from the interval-tree, or the the range is
303 		 * still in use by another client and the overlap is invalid.
304 		 *
305 		 * If we do have an overlap, we cannot use the interval tree
306 		 * for fast range invalidation.
307 		 */
308 
309 		obj = container_of(it, struct i915_mmu_object, it)->obj;
310 		if (!obj->userptr.workers)
311 			mmu->has_linear = mn->is_linear = true;
312 		else
313 			ret = -EAGAIN;
314 	} else
315 		interval_tree_insert(&mn->it, &mmu->objects);
316 
317 	if (ret == 0) {
318 		list_add(&mn->link, &mmu->linear);
319 		__i915_mmu_notifier_update_serial(mmu);
320 	}
321 	spin_unlock(&mmu->lock);
322 	mutex_unlock(&mmu->dev->struct_mutex);
323 
324 	return ret;
325 }
326 
327 static void
328 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
329 {
330 	struct i915_mmu_object *mn;
331 
332 	mn = obj->userptr.mn;
333 	if (mn == NULL)
334 		return;
335 
336 	i915_mmu_notifier_del(mn->mmu, mn);
337 	obj->userptr.mn = NULL;
338 }
339 
340 static struct i915_mmu_notifier *
341 i915_mmu_notifier_find(struct i915_mm_struct *mm)
342 {
343 	struct i915_mmu_notifier *mn = mm->mn;
344 
345 	mn = mm->mn;
346 	if (mn)
347 		return mn;
348 
349 	down_write(&mm->mm->mmap_sem);
350 	mutex_lock(&to_i915(mm->dev)->mm_lock);
351 	if ((mn = mm->mn) == NULL) {
352 		mn = i915_mmu_notifier_create(mm->mm);
353 		if (!IS_ERR(mn))
354 			mm->mn = mn;
355 	}
356 	mutex_unlock(&to_i915(mm->dev)->mm_lock);
357 	up_write(&mm->mm->mmap_sem);
358 
359 	return mn;
360 }
361 
362 static int
363 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
364 				    unsigned flags)
365 {
366 	struct i915_mmu_notifier *mmu;
367 	struct i915_mmu_object *mn;
368 	int ret;
369 
370 	if (flags & I915_USERPTR_UNSYNCHRONIZED)
371 		return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
372 
373 	down_write(&obj->userptr.mm->mmap_sem);
374 	ret = i915_mutex_lock_interruptible(obj->base.dev);
375 	if (ret == 0) {
376 		mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm);
377 		if (!IS_ERR(mmu))
378 			mmu->count++; /* preemptive add to act as a refcount */
379 		else
380 			ret = PTR_ERR(mmu);
381 		mutex_unlock(&obj->base.dev->struct_mutex);
382 	}
383 	up_write(&obj->userptr.mm->mmap_sem);
384 	if (ret)
385 		return ret;
386 
387 	mn = kzalloc(sizeof(*mn), GFP_KERNEL);
388 	if (mn == NULL) {
389 		ret = -ENOMEM;
390 		goto destroy_mmu;
391 	}
392 
393 	mn->mmu = mmu;
394 	mn->it.start = obj->userptr.ptr;
395 	mn->it.last = mn->it.start + obj->base.size - 1;
396 	mn->obj = obj;
397 
398 	ret = i915_mmu_notifier_add(mmu, mn);
399 	if (ret)
400 		goto free_mn;
401 
402 	obj->userptr.mn = mn;
403 	return 0;
404 
405 free_mn:
406 	kfree(mn);
407 destroy_mmu:
408 	mutex_lock(&obj->base.dev->struct_mutex);
409 	if (--mmu->count == 0)
410 		__i915_mmu_notifier_destroy(mmu);
411 	mutex_unlock(&obj->base.dev->struct_mutex);
412 	return ret;
413 }
414 
415 #else
416 
417 #if 0
418 static void
419 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
420 {
421 }
422 
423 static int
424 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
425 				    unsigned flags)
426 {
427 	if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
428 		return -ENODEV;
429 
430 
431 	return 0;
432 }
433 #endif
434 #endif
435 
436 struct get_pages_work {
437 	struct work_struct work;
438 	struct drm_i915_gem_object *obj;
439 	struct task_struct *task;
440 };
441 
442 
443 #if IS_ENABLED(CONFIG_SWIOTLB)
444 #define swiotlb_active() swiotlb_nr_tbl()
445 #else
446 #define swiotlb_active() 0
447 #endif
448 
449 #if 0
450 static int
451 st_set_pages(struct sg_table **st, struct vm_page **pvec, int num_pages)
452 {
453 	struct scatterlist *sg;
454 	int ret, n;
455 
456 	*st = kmalloc(sizeof(**st), M_DRM, M_WAITOK);
457 	if (*st == NULL)
458 		return -ENOMEM;
459 
460 	if (swiotlb_active()) {
461 		ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
462 		if (ret)
463 			goto err;
464 
465 		for_each_sg((*st)->sgl, sg, num_pages, n)
466 			sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
467 	} else {
468 		ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
469 						0, num_pages << PAGE_SHIFT,
470 						GFP_KERNEL);
471 		if (ret)
472 			goto err;
473 	}
474 
475 	return 0;
476 
477 err:
478 	kfree(*st);
479 	*st = NULL;
480 	return ret;
481 }
482 
483 static void
484 __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
485 {
486 	struct get_pages_work *work = container_of(_work, typeof(*work), work);
487 	struct drm_i915_gem_object *obj = work->obj;
488 	struct drm_device *dev = obj->base.dev;
489 	const int num_pages = obj->base.size >> PAGE_SHIFT;
490 	struct page **pvec;
491 	int pinned, ret;
492 
493 	ret = -ENOMEM;
494 	pinned = 0;
495 
496 	pvec = kmalloc(num_pages*sizeof(struct page *),
497 		       GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
498 	if (pvec == NULL)
499 		pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
500 	if (pvec != NULL) {
501 		struct mm_struct *mm = obj->userptr.mm;
502 
503 		down_read(&mm->mmap_sem);
504 		while (pinned < num_pages) {
505 			ret = get_user_pages(work->task, mm,
506 					     obj->userptr.ptr + pinned * PAGE_SIZE,
507 					     num_pages - pinned,
508 					     !obj->userptr.read_only, 0,
509 					     pvec + pinned, NULL);
510 			if (ret < 0)
511 				break;
512 
513 			pinned += ret;
514 		}
515 		up_read(&mm->mmap_sem);
516 	}
517 
518 	mutex_lock(&dev->struct_mutex);
519 	if (obj->userptr.work != &work->work) {
520 		ret = 0;
521 	} else if (pinned == num_pages) {
522 		ret = st_set_pages(&obj->pages, pvec, num_pages);
523 		if (ret == 0) {
524 			list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
525 			pinned = 0;
526 		}
527 	}
528 
529 	obj->userptr.work = ERR_PTR(ret);
530 	obj->userptr.workers--;
531 	drm_gem_object_unreference(&obj->base);
532 	mutex_unlock(&dev->struct_mutex);
533 
534 	release_pages(pvec, pinned, 0);
535 	drm_free_large(pvec);
536 
537 	put_task_struct(work->task);
538 	kfree(work);
539 }
540 
541 static int
542 i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
543 {
544 	const int num_pages = obj->base.size >> PAGE_SHIFT;
545 	struct page **pvec;
546 	int pinned, ret;
547 
548 	/* If userspace should engineer that these pages are replaced in
549 	 * the vma between us binding this page into the GTT and completion
550 	 * of rendering... Their loss. If they change the mapping of their
551 	 * pages they need to create a new bo to point to the new vma.
552 	 *
553 	 * However, that still leaves open the possibility of the vma
554 	 * being copied upon fork. Which falls under the same userspace
555 	 * synchronisation issue as a regular bo, except that this time
556 	 * the process may not be expecting that a particular piece of
557 	 * memory is tied to the GPU.
558 	 *
559 	 * Fortunately, we can hook into the mmu_notifier in order to
560 	 * discard the page references prior to anything nasty happening
561 	 * to the vma (discard or cloning) which should prevent the more
562 	 * egregious cases from causing harm.
563 	 */
564 
565 	pvec = NULL;
566 	pinned = 0;
567 	if (obj->userptr.mm == current->mm) {
568 		pvec = kmalloc(num_pages*sizeof(struct page *),
569 			       GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
570 		if (pvec == NULL) {
571 			pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
572 			if (pvec == NULL)
573 				return -ENOMEM;
574 		}
575 
576 		pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
577 					       !obj->userptr.read_only, pvec);
578 	}
579 	if (pinned < num_pages) {
580 		if (pinned < 0) {
581 			ret = pinned;
582 			pinned = 0;
583 		} else {
584 			/* Spawn a worker so that we can acquire the
585 			 * user pages without holding our mutex. Access
586 			 * to the user pages requires mmap_sem, and we have
587 			 * a strict lock ordering of mmap_sem, struct_mutex -
588 			 * we already hold struct_mutex here and so cannot
589 			 * call gup without encountering a lock inversion.
590 			 *
591 			 * Userspace will keep on repeating the operation
592 			 * (thanks to EAGAIN) until either we hit the fast
593 			 * path or the worker completes. If the worker is
594 			 * cancelled or superseded, the task is still run
595 			 * but the results ignored. (This leads to
596 			 * complications that we may have a stray object
597 			 * refcount that we need to be wary of when
598 			 * checking for existing objects during creation.)
599 			 * If the worker encounters an error, it reports
600 			 * that error back to this function through
601 			 * obj->userptr.work = ERR_PTR.
602 			 */
603 			ret = -EAGAIN;
604 			if (obj->userptr.work == NULL &&
605 			    obj->userptr.workers < I915_GEM_USERPTR_MAX_WORKERS) {
606 				struct get_pages_work *work;
607 
608 				work = kmalloc(sizeof(*work), GFP_KERNEL);
609 				if (work != NULL) {
610 					obj->userptr.work = &work->work;
611 					obj->userptr.workers++;
612 
613 					work->obj = obj;
614 					drm_gem_object_reference(&obj->base);
615 
616 					work->task = current;
617 					get_task_struct(work->task);
618 
619 					INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
620 					schedule_work(&work->work);
621 				} else
622 					ret = -ENOMEM;
623 			} else {
624 				if (IS_ERR(obj->userptr.work)) {
625 					ret = PTR_ERR(obj->userptr.work);
626 					obj->userptr.work = NULL;
627 				}
628 			}
629 		}
630 	} else {
631 		ret = st_set_pages(&obj->pages, pvec, num_pages);
632 		if (ret == 0) {
633 			obj->userptr.work = NULL;
634 			pinned = 0;
635 		}
636 	}
637 
638 	release_pages(pvec, pinned, 0);
639 	drm_free_large(pvec);
640 	return ret;
641 }
642 
643 static void
644 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
645 {
646 	struct sg_page_iter sg_iter;
647 
648 	BUG_ON(obj->userptr.work != NULL);
649 
650 	if (obj->madv != I915_MADV_WILLNEED)
651 		obj->dirty = 0;
652 
653 	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
654 		struct page *page = sg_page_iter_page(&sg_iter);
655 
656 		if (obj->dirty)
657 			set_page_dirty(page);
658 
659 		mark_page_accessed(page);
660 		page_cache_release(page);
661 	}
662 	obj->dirty = 0;
663 
664 	sg_free_table(obj->pages);
665 	kfree(obj->pages);
666 }
667 
668 static void
669 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
670 {
671 	i915_gem_userptr_release__mmu_notifier(obj);
672 
673 	if (obj->userptr.mm) {
674 		mmput(obj->userptr.mm);
675 		obj->userptr.mm = NULL;
676 	}
677 }
678 
679 static int
680 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
681 {
682 	if (obj->userptr.mn)
683 		return 0;
684 
685 	return i915_gem_userptr_init__mmu_notifier(obj, 0);
686 }
687 
688 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
689 	.dmabuf_export = i915_gem_userptr_dmabuf_export,
690 	.get_pages = i915_gem_userptr_get_pages,
691 	.put_pages = i915_gem_userptr_put_pages,
692 	.release = i915_gem_userptr_release,
693 };
694 
695 /**
696  * Creates a new mm object that wraps some normal memory from the process
697  * context - user memory.
698  *
699  * We impose several restrictions upon the memory being mapped
700  * into the GPU.
701  * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
702  * 2. It must be normal system memory, not a pointer into another map of IO
703  *    space (e.g. it must not be a GTT mmapping of another object).
704  * 3. We only allow a bo as large as we could in theory map into the GTT,
705  *    that is we limit the size to the total size of the GTT.
706  * 4. The bo is marked as being snoopable. The backing pages are left
707  *    accessible directly by the CPU, but reads and writes by the GPU may
708  *    incur the cost of a snoop (unless you have an LLC architecture).
709  *
710  * Synchronisation between multiple users and the GPU is left to userspace
711  * through the normal set-domain-ioctl. The kernel will enforce that the
712  * GPU relinquishes the VMA before it is returned back to the system
713  * i.e. upon free(), munmap() or process termination. However, the userspace
714  * malloc() library may not immediately relinquish the VMA after free() and
715  * instead reuse it whilst the GPU is still reading and writing to the VMA.
716  * Caveat emptor.
717  *
718  * Also note, that the object created here is not currently a "first class"
719  * object, in that several ioctls are banned. These are the CPU access
720  * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
721  * direct access via your pointer rather than use those ioctls.
722  *
723  * If you think this is a good interface to use to pass GPU memory between
724  * drivers, please use dma-buf instead. In fact, wherever possible use
725  * dma-buf instead.
726  */
727 int
728 i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
729 {
730 	struct drm_i915_private *dev_priv = dev->dev_private;
731 	struct drm_i915_gem_userptr *args = data;
732 	struct drm_i915_gem_object *obj;
733 	int ret;
734 	u32 handle;
735 
736 	if (args->flags & ~(I915_USERPTR_READ_ONLY |
737 			    I915_USERPTR_UNSYNCHRONIZED))
738 		return -EINVAL;
739 
740 	if (offset_in_page(args->user_ptr | args->user_size))
741 		return -EINVAL;
742 
743 	if (args->user_size > dev_priv->gtt.base.total)
744 		return -E2BIG;
745 
746 	if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
747 		       (char __user *)(unsigned long)args->user_ptr, args->user_size))
748 		return -EFAULT;
749 
750 	if (args->flags & I915_USERPTR_READ_ONLY) {
751 		/* On almost all of the current hw, we cannot tell the GPU that a
752 		 * page is readonly, so this is just a placeholder in the uAPI.
753 		 */
754 		return -ENODEV;
755 	}
756 
757 	/* Allocate the new object */
758 	obj = i915_gem_object_alloc(dev);
759 	if (obj == NULL)
760 		return -ENOMEM;
761 
762 	drm_gem_private_object_init(dev, &obj->base, args->user_size);
763 	i915_gem_object_init(obj, &i915_gem_userptr_ops);
764 	obj->cache_level = I915_CACHE_LLC;
765 	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
766 	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
767 
768 	obj->userptr.ptr = args->user_ptr;
769 	obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
770 
771 	/* And keep a pointer to the current->mm for resolving the user pages
772 	 * at binding. This means that we need to hook into the mmu_notifier
773 	 * in order to detect if the mmu is destroyed.
774 	 */
775 	ret = -ENOMEM;
776 	if ((obj->userptr.mm = get_task_mm(current)))
777 		ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
778 	if (ret == 0)
779 		ret = drm_gem_handle_create(file, &obj->base, &handle);
780 
781 	/* drop reference from allocate - handle holds it now */
782 	drm_gem_object_unreference_unlocked(&obj->base);
783 	if (ret)
784 		return ret;
785 
786 	args->handle = handle;
787 	return 0;
788 }
789 #endif
790 
791 int
792 i915_gem_init_userptr(struct drm_device *dev)
793 {
794 #if defined(CONFIG_MMU_NOTIFIER)
795 	struct drm_i915_private *dev_priv = to_i915(dev);
796 	hash_init(dev_priv->mmu_notifiers);
797 #endif
798 	return 0;
799 }
800