xref: /dragonfly/sys/dev/drm/i915/i915_gem_userptr.c (revision 66932323)
1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <drm/drmP.h>
26 #include <drm/i915_drm.h>
27 #include "i915_drv.h"
28 #include "i915_trace.h"
29 #include "intel_drv.h"
30 
31 struct i915_mm_struct {
32 	struct mm_struct *mm;
33 	struct drm_i915_private *i915;
34 	struct i915_mmu_notifier *mn;
35 	struct hlist_node node;
36 	struct kref kref;
37 	struct work_struct work;
38 };
39 
40 #if defined(CONFIG_MMU_NOTIFIER)
41 #include <linux/interval_tree.h>
42 
43 struct i915_mmu_notifier {
44 	spinlock_t lock;
45 	struct hlist_node node;
46 	struct mmu_notifier mn;
47 	struct rb_root objects;
48 	struct workqueue_struct *wq;
49 };
50 
51 struct i915_mmu_object {
52 	struct i915_mmu_notifier *mn;
53 	struct drm_i915_gem_object *obj;
54 	struct interval_tree_node it;
55 	struct list_head link;
56 	struct work_struct work;
57 	bool attached;
58 };
59 
60 static void wait_rendering(struct drm_i915_gem_object *obj)
61 {
62 	struct drm_device *dev = obj->base.dev;
63 	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
64 	int i, n;
65 
66 	if (!obj->active)
67 		return;
68 
69 	n = 0;
70 	for (i = 0; i < I915_NUM_ENGINES; i++) {
71 		struct drm_i915_gem_request *req;
72 
73 		req = obj->last_read_req[i];
74 		if (req == NULL)
75 			continue;
76 
77 		requests[n++] = i915_gem_request_reference(req);
78 	}
79 
80 	mutex_unlock(&dev->struct_mutex);
81 
82 	for (i = 0; i < n; i++)
83 		__i915_wait_request(requests[i], false, NULL, NULL);
84 
85 	mutex_lock(&dev->struct_mutex);
86 
87 	for (i = 0; i < n; i++)
88 		i915_gem_request_unreference(requests[i]);
89 }
90 
91 static void cancel_userptr(struct work_struct *work)
92 {
93 	struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
94 	struct drm_i915_gem_object *obj = mo->obj;
95 	struct drm_device *dev = obj->base.dev;
96 
97 	mutex_lock(&dev->struct_mutex);
98 	/* Cancel any active worker and force us to re-evaluate gup */
99 	obj->userptr.work = NULL;
100 
101 	if (obj->pages != NULL) {
102 		struct drm_i915_private *dev_priv = to_i915(dev);
103 		struct i915_vma *vma, *tmp;
104 		bool was_interruptible;
105 
106 		wait_rendering(obj);
107 
108 		was_interruptible = dev_priv->mm.interruptible;
109 		dev_priv->mm.interruptible = false;
110 
111 		list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
112 			WARN_ON(i915_vma_unbind(vma));
113 		WARN_ON(i915_gem_object_put_pages(obj));
114 
115 		dev_priv->mm.interruptible = was_interruptible;
116 	}
117 
118 	drm_gem_object_unreference(&obj->base);
119 	mutex_unlock(&dev->struct_mutex);
120 }
121 
122 static void add_object(struct i915_mmu_object *mo)
123 {
124 	if (mo->attached)
125 		return;
126 
127 	interval_tree_insert(&mo->it, &mo->mn->objects);
128 	mo->attached = true;
129 }
130 
131 static void del_object(struct i915_mmu_object *mo)
132 {
133 	if (!mo->attached)
134 		return;
135 
136 	interval_tree_remove(&mo->it, &mo->mn->objects);
137 	mo->attached = false;
138 }
139 
140 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
141 						       struct mm_struct *mm,
142 						       unsigned long start,
143 						       unsigned long end)
144 {
145 	struct i915_mmu_notifier *mn =
146 		container_of(_mn, struct i915_mmu_notifier, mn);
147 	struct i915_mmu_object *mo;
148 	struct interval_tree_node *it;
149 	LIST_HEAD(cancelled);
150 
151 	if (RB_EMPTY_ROOT(&mn->objects))
152 		return;
153 
154 	/* interval ranges are inclusive, but invalidate range is exclusive */
155 	end--;
156 
157 	spin_lock(&mn->lock);
158 	it = interval_tree_iter_first(&mn->objects, start, end);
159 	while (it) {
160 		/* The mmu_object is released late when destroying the
161 		 * GEM object so it is entirely possible to gain a
162 		 * reference on an object in the process of being freed
163 		 * since our serialisation is via the spinlock and not
164 		 * the struct_mutex - and consequently use it after it
165 		 * is freed and then double free it. To prevent that
166 		 * use-after-free we only acquire a reference on the
167 		 * object if it is not in the process of being destroyed.
168 		 */
169 		mo = container_of(it, struct i915_mmu_object, it);
170 		if (kref_get_unless_zero(&mo->obj->base.refcount))
171 			queue_work(mn->wq, &mo->work);
172 
173 		list_add(&mo->link, &cancelled);
174 		it = interval_tree_iter_next(it, start, end);
175 	}
176 	list_for_each_entry(mo, &cancelled, link)
177 		del_object(mo);
178 	spin_unlock(&mn->lock);
179 
180 	flush_workqueue(mn->wq);
181 }
182 
183 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
184 	.invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
185 };
186 
187 static struct i915_mmu_notifier *
188 i915_mmu_notifier_create(struct mm_struct *mm)
189 {
190 	struct i915_mmu_notifier *mn;
191 	int ret;
192 
193 	mn = kmalloc(sizeof(*mn), GFP_KERNEL);
194 	if (mn == NULL)
195 		return ERR_PTR(-ENOMEM);
196 
197 	spin_lock_init(&mn->lock);
198 	mn->mn.ops = &i915_gem_userptr_notifier;
199 	mn->objects = RB_ROOT;
200 	mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
201 	if (mn->wq == NULL) {
202 		kfree(mn);
203 		return ERR_PTR(-ENOMEM);
204 	}
205 
206 	 /* Protected by mmap_sem (write-lock) */
207 	ret = __mmu_notifier_register(&mn->mn, mm);
208 	if (ret) {
209 		destroy_workqueue(mn->wq);
210 		kfree(mn);
211 		return ERR_PTR(ret);
212 	}
213 
214 	return mn;
215 }
216 
217 static void
218 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
219 {
220 	struct i915_mmu_object *mo;
221 
222 	mo = obj->userptr.mmu_object;
223 	if (mo == NULL)
224 		return;
225 
226 	spin_lock(&mo->mn->lock);
227 	del_object(mo);
228 	spin_unlock(&mo->mn->lock);
229 	kfree(mo);
230 
231 	obj->userptr.mmu_object = NULL;
232 }
233 
234 static struct i915_mmu_notifier *
235 i915_mmu_notifier_find(struct i915_mm_struct *mm)
236 {
237 	struct i915_mmu_notifier *mn = mm->mn;
238 
239 	mn = mm->mn;
240 	if (mn)
241 		return mn;
242 
243 	down_write(&mm->mm->mmap_sem);
244 	mutex_lock(&mm->i915->mm_lock);
245 	if ((mn = mm->mn) == NULL) {
246 		mn = i915_mmu_notifier_create(mm->mm);
247 		if (!IS_ERR(mn))
248 			mm->mn = mn;
249 	}
250 	mutex_unlock(&mm->i915->mm_lock);
251 	up_write(&mm->mm->mmap_sem);
252 
253 	return mn;
254 }
255 
256 static int
257 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
258 				    unsigned flags)
259 {
260 	struct i915_mmu_notifier *mn;
261 	struct i915_mmu_object *mo;
262 
263 	if (flags & I915_USERPTR_UNSYNCHRONIZED)
264 		return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
265 
266 	if (WARN_ON(obj->userptr.mm == NULL))
267 		return -EINVAL;
268 
269 	mn = i915_mmu_notifier_find(obj->userptr.mm);
270 	if (IS_ERR(mn))
271 		return PTR_ERR(mn);
272 
273 	mo = kzalloc(sizeof(*mo), GFP_KERNEL);
274 	if (mo == NULL)
275 		return -ENOMEM;
276 
277 	mo->mn = mn;
278 	mo->obj = obj;
279 	mo->it.start = obj->userptr.ptr;
280 	mo->it.last = obj->userptr.ptr + obj->base.size - 1;
281 	INIT_WORK(&mo->work, cancel_userptr);
282 
283 	obj->userptr.mmu_object = mo;
284 	return 0;
285 }
286 
287 static void
288 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
289 		       struct mm_struct *mm)
290 {
291 	if (mn == NULL)
292 		return;
293 
294 	mmu_notifier_unregister(&mn->mn, mm);
295 	destroy_workqueue(mn->wq);
296 	kfree(mn);
297 }
298 
299 #else
300 
301 static void
302 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
303 {
304 }
305 
306 static int
307 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
308 				    unsigned flags)
309 {
310 	if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
311 		return -ENODEV;
312 
313 	if (!capable(CAP_SYS_ADMIN))
314 		return -EPERM;
315 
316 	return 0;
317 }
318 
319 static void
320 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
321 		       struct mm_struct *mm)
322 {
323 }
324 
325 #endif
326 
327 #if 0
328 static struct i915_mm_struct *
329 __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
330 {
331 	struct i915_mm_struct *mm;
332 
333 	/* Protected by dev_priv->mm_lock */
334 	hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
335 		if (mm->mm == real)
336 			return mm;
337 
338 	return NULL;
339 }
340 #endif
341 
342 static int
343 i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
344 {
345 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
346 #if 0
347 	struct i915_mm_struct *mm;
348 #endif
349 	int ret = 0;
350 
351 	/* During release of the GEM object we hold the struct_mutex. This
352 	 * precludes us from calling mmput() at that time as that may be
353 	 * the last reference and so call exit_mmap(). exit_mmap() will
354 	 * attempt to reap the vma, and if we were holding a GTT mmap
355 	 * would then call drm_gem_vm_close() and attempt to reacquire
356 	 * the struct mutex. So in order to avoid that recursion, we have
357 	 * to defer releasing the mm reference until after we drop the
358 	 * struct_mutex, i.e. we need to schedule a worker to do the clean
359 	 * up.
360 	 */
361 	mutex_lock(&dev_priv->mm_lock);
362 #if 0
363 	mm = __i915_mm_struct_find(dev_priv, current->mm);
364 	if (mm == NULL) {
365 		mm = kmalloc(sizeof(*mm), GFP_KERNEL);
366 		if (mm == NULL) {
367 #endif
368 			ret = -ENOMEM;
369 #if 0
370 			goto out;
371 		}
372 
373 		kref_init(&mm->kref);
374 		mm->i915 = to_i915(obj->base.dev);
375 
376 		mm->mm = current->mm;
377 		atomic_inc(&current->mm->mm_count);
378 
379 		mm->mn = NULL;
380 
381 		/* Protected by dev_priv->mm_lock */
382 		hash_add(dev_priv->mm_structs,
383 			 &mm->node, (unsigned long)mm->mm);
384 	} else
385 		kref_get(&mm->kref);
386 
387 	obj->userptr.mm = mm;
388 out:
389 #endif
390 	mutex_unlock(&dev_priv->mm_lock);
391 	return ret;
392 }
393 
394 static void
395 __i915_mm_struct_free__worker(struct work_struct *work)
396 {
397 	struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
398 	i915_mmu_notifier_free(mm->mn, mm->mm);
399 #if 0
400 	mmdrop(mm->mm);
401 #endif
402 	kfree(mm);
403 }
404 
405 static void
406 __i915_mm_struct_free(struct kref *kref)
407 {
408 	struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
409 
410 	/* Protected by dev_priv->mm_lock */
411 #if 0
412 	hash_del(&mm->node);
413 #endif
414 	mutex_unlock(&mm->i915->mm_lock);
415 
416 	INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
417 	schedule_work(&mm->work);
418 }
419 
420 static void
421 i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
422 {
423 	if (obj->userptr.mm == NULL)
424 		return;
425 
426 	kref_put_mutex(&obj->userptr.mm->kref,
427 		       __i915_mm_struct_free,
428 		       &to_i915(obj->base.dev)->mm_lock);
429 	obj->userptr.mm = NULL;
430 }
431 
432 struct get_pages_work {
433 	struct work_struct work;
434 	struct drm_i915_gem_object *obj;
435 	struct task_struct *task;
436 };
437 
438 #if IS_ENABLED(CONFIG_SWIOTLB)
439 #define swiotlb_active() swiotlb_nr_tbl()
440 #else
441 #define swiotlb_active() 0
442 #endif
443 
444 #if 0
445 static int
446 st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
447 {
448 	struct scatterlist *sg;
449 	int ret, n;
450 
451 	*st = kmalloc(sizeof(**st), M_DRM, M_WAITOK);
452 	if (*st == NULL)
453 		return -ENOMEM;
454 
455 	if (swiotlb_active()) {
456 		ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
457 		if (ret)
458 			goto err;
459 
460 		for_each_sg((*st)->sgl, sg, num_pages, n)
461 			sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
462 	} else {
463 		ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
464 						0, num_pages << PAGE_SHIFT,
465 						GFP_KERNEL);
466 		if (ret)
467 			goto err;
468 	}
469 
470 	return 0;
471 
472 err:
473 	kfree(*st);
474 	*st = NULL;
475 	return ret;
476 }
477 
478 static int
479 __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
480 			     struct page **pvec, int num_pages)
481 {
482 	int ret;
483 
484 	ret = st_set_pages(&obj->pages, pvec, num_pages);
485 	if (ret)
486 		return ret;
487 
488 	ret = i915_gem_gtt_prepare_object(obj);
489 	if (ret) {
490 		sg_free_table(obj->pages);
491 		kfree(obj->pages);
492 		obj->pages = NULL;
493 	}
494 
495 	return ret;
496 }
497 #endif
498 
499 static int
500 __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
501 			      bool value)
502 {
503 	int ret = 0;
504 
505 	/* During mm_invalidate_range we need to cancel any userptr that
506 	 * overlaps the range being invalidated. Doing so requires the
507 	 * struct_mutex, and that risks recursion. In order to cause
508 	 * recursion, the user must alias the userptr address space with
509 	 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
510 	 * to invalidate that mmaping, mm_invalidate_range is called with
511 	 * the userptr address *and* the struct_mutex held.  To prevent that
512 	 * we set a flag under the i915_mmu_notifier spinlock to indicate
513 	 * whether this object is valid.
514 	 */
515 #if defined(CONFIG_MMU_NOTIFIER)
516 	if (obj->userptr.mmu_object == NULL)
517 		return 0;
518 
519 	spin_lock(&obj->userptr.mmu_object->mn->lock);
520 	/* In order to serialise get_pages with an outstanding
521 	 * cancel_userptr, we must drop the struct_mutex and try again.
522 	 */
523 	if (!value)
524 		del_object(obj->userptr.mmu_object);
525 	else if (!work_pending(&obj->userptr.mmu_object->work))
526 		add_object(obj->userptr.mmu_object);
527 	else
528 		ret = -EAGAIN;
529 	spin_unlock(&obj->userptr.mmu_object->mn->lock);
530 #endif
531 
532 	return ret;
533 }
534 
535 #if 0
536 static void
537 __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
538 {
539 	struct get_pages_work *work = container_of(_work, typeof(*work), work);
540 	struct drm_i915_gem_object *obj = work->obj;
541 	struct drm_device *dev = obj->base.dev;
542 	const int npages = obj->base.size >> PAGE_SHIFT;
543 	struct page **pvec;
544 	int pinned, ret;
545 
546 	ret = -ENOMEM;
547 	pinned = 0;
548 
549 	pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
550 	if (pvec != NULL) {
551 		struct mm_struct *mm = obj->userptr.mm->mm;
552 
553 		ret = -EFAULT;
554 		if (atomic_inc_not_zero(&mm->mm_users)) {
555 			down_read(&mm->mmap_sem);
556 			while (pinned < npages) {
557 				ret = get_user_pages_remote
558 					(work->task, mm,
559 					 obj->userptr.ptr + pinned * PAGE_SIZE,
560 					 npages - pinned,
561 					 !obj->userptr.read_only, 0,
562 					 pvec + pinned, NULL);
563 				if (ret < 0)
564 					break;
565 
566 				pinned += ret;
567 			}
568 			up_read(&mm->mmap_sem);
569 			mmput(mm);
570 		}
571 	}
572 
573 	mutex_lock(&dev->struct_mutex);
574 	if (obj->userptr.work == &work->work) {
575 		if (pinned == npages) {
576 			ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
577 			if (ret == 0) {
578 				list_add_tail(&obj->global_list,
579 					      &to_i915(dev)->mm.unbound_list);
580 				obj->get_page.sg = obj->pages->sgl;
581 				obj->get_page.last = 0;
582 				pinned = 0;
583 			}
584 		}
585 		obj->userptr.work = ERR_PTR(ret);
586 		if (ret)
587 			__i915_gem_userptr_set_active(obj, false);
588 	}
589 
590 	obj->userptr.workers--;
591 	drm_gem_object_unreference(&obj->base);
592 	mutex_unlock(&dev->struct_mutex);
593 
594 	release_pages(pvec, pinned, 0);
595 	drm_free_large(pvec);
596 
597 	put_task_struct(work->task);
598 	kfree(work);
599 }
600 
601 static int
602 __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
603 				      bool *active)
604 {
605 	struct get_pages_work *work;
606 
607 	/* Spawn a worker so that we can acquire the
608 	 * user pages without holding our mutex. Access
609 	 * to the user pages requires mmap_sem, and we have
610 	 * a strict lock ordering of mmap_sem, struct_mutex -
611 	 * we already hold struct_mutex here and so cannot
612 	 * call gup without encountering a lock inversion.
613 	 *
614 	 * Userspace will keep on repeating the operation
615 	 * (thanks to EAGAIN) until either we hit the fast
616 	 * path or the worker completes. If the worker is
617 	 * cancelled or superseded, the task is still run
618 	 * but the results ignored. (This leads to
619 	 * complications that we may have a stray object
620 	 * refcount that we need to be wary of when
621 	 * checking for existing objects during creation.)
622 	 * If the worker encounters an error, it reports
623 	 * that error back to this function through
624 	 * obj->userptr.work = ERR_PTR.
625 	 */
626 	if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
627 		return -EAGAIN;
628 
629 	work = kmalloc(sizeof(*work), GFP_KERNEL);
630 	if (work == NULL)
631 		return -ENOMEM;
632 
633 	obj->userptr.work = &work->work;
634 	obj->userptr.workers++;
635 
636 	work->obj = obj;
637 	drm_gem_object_reference(&obj->base);
638 
639 	work->task = current;
640 	get_task_struct(work->task);
641 
642 	INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
643 	schedule_work(&work->work);
644 
645 	*active = true;
646 	return -EAGAIN;
647 }
648 #endif
649 
650 static int
651 i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
652 {
653 #if 0
654 	const int num_pages = obj->base.size >> PAGE_SHIFT;
655 	struct page **pvec;
656 	int pinned, ret;
657 	bool active;
658 
659 	/* If userspace should engineer that these pages are replaced in
660 	 * the vma between us binding this page into the GTT and completion
661 	 * of rendering... Their loss. If they change the mapping of their
662 	 * pages they need to create a new bo to point to the new vma.
663 	 *
664 	 * However, that still leaves open the possibility of the vma
665 	 * being copied upon fork. Which falls under the same userspace
666 	 * synchronisation issue as a regular bo, except that this time
667 	 * the process may not be expecting that a particular piece of
668 	 * memory is tied to the GPU.
669 	 *
670 	 * Fortunately, we can hook into the mmu_notifier in order to
671 	 * discard the page references prior to anything nasty happening
672 	 * to the vma (discard or cloning) which should prevent the more
673 	 * egregious cases from causing harm.
674 	 */
675 	if (IS_ERR(obj->userptr.work)) {
676 		/* active flag will have been dropped already by the worker */
677 		ret = PTR_ERR(obj->userptr.work);
678 		obj->userptr.work = NULL;
679 		return ret;
680 	}
681 	if (obj->userptr.work)
682 		/* active flag should still be held for the pending work */
683 		return -EAGAIN;
684 
685 	/* Let the mmu-notifier know that we have begun and need cancellation */
686 	ret = __i915_gem_userptr_set_active(obj, true);
687 	if (ret)
688 		return ret;
689 
690 	pvec = NULL;
691 	pinned = 0;
692 	if (obj->userptr.mm->mm == current->mm) {
693 		pvec = kmalloc(num_pages*sizeof(struct page *),
694 			       GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
695 		if (pvec == NULL) {
696 			pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
697 			if (pvec == NULL) {
698 				__i915_gem_userptr_set_active(obj, false);
699 				return -ENOMEM;
700 			}
701 		}
702 
703 		pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
704 					       !obj->userptr.read_only, pvec);
705 	}
706 
707 	active = false;
708 	if (pinned < 0)
709 		ret = pinned, pinned = 0;
710 	else if (pinned < num_pages)
711 		ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
712 	else
713 		ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
714 	if (ret) {
715 		__i915_gem_userptr_set_active(obj, active);
716 		release_pages(pvec, pinned, 0);
717 	}
718 	drm_free_large(pvec);
719 	return ret;
720 #else
721 	return 0;
722 #endif	/* 0 */
723 }
724 
725 static void
726 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
727 {
728 	struct sg_page_iter sg_iter;
729 
730 	BUG_ON(obj->userptr.work != NULL);
731 	__i915_gem_userptr_set_active(obj, false);
732 
733 	if (obj->madv != I915_MADV_WILLNEED)
734 		obj->dirty = 0;
735 
736 	i915_gem_gtt_finish_object(obj);
737 
738 	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
739 		struct page *page = sg_page_iter_page(&sg_iter);
740 
741 		if (obj->dirty)
742 			set_page_dirty(page);
743 
744 		mark_page_accessed(page);
745 #if 0
746 		page_cache_release(page);
747 #endif
748 	}
749 	obj->dirty = 0;
750 
751 	sg_free_table(obj->pages);
752 	kfree(obj->pages);
753 }
754 
755 static void
756 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
757 {
758 	i915_gem_userptr_release__mmu_notifier(obj);
759 	i915_gem_userptr_release__mm_struct(obj);
760 }
761 
762 static int
763 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
764 {
765 	if (obj->userptr.mmu_object)
766 		return 0;
767 
768 	return i915_gem_userptr_init__mmu_notifier(obj, 0);
769 }
770 
771 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
772 	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
773 	.get_pages = i915_gem_userptr_get_pages,
774 	.put_pages = i915_gem_userptr_put_pages,
775 	.dmabuf_export = i915_gem_userptr_dmabuf_export,
776 	.release = i915_gem_userptr_release,
777 };
778 
779 /**
780  * Creates a new mm object that wraps some normal memory from the process
781  * context - user memory.
782  *
783  * We impose several restrictions upon the memory being mapped
784  * into the GPU.
785  * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
786  * 2. It must be normal system memory, not a pointer into another map of IO
787  *    space (e.g. it must not be a GTT mmapping of another object).
788  * 3. We only allow a bo as large as we could in theory map into the GTT,
789  *    that is we limit the size to the total size of the GTT.
790  * 4. The bo is marked as being snoopable. The backing pages are left
791  *    accessible directly by the CPU, but reads and writes by the GPU may
792  *    incur the cost of a snoop (unless you have an LLC architecture).
793  *
794  * Synchronisation between multiple users and the GPU is left to userspace
795  * through the normal set-domain-ioctl. The kernel will enforce that the
796  * GPU relinquishes the VMA before it is returned back to the system
797  * i.e. upon free(), munmap() or process termination. However, the userspace
798  * malloc() library may not immediately relinquish the VMA after free() and
799  * instead reuse it whilst the GPU is still reading and writing to the VMA.
800  * Caveat emptor.
801  *
802  * Also note, that the object created here is not currently a "first class"
803  * object, in that several ioctls are banned. These are the CPU access
804  * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
805  * direct access via your pointer rather than use those ioctls. Another
806  * restriction is that we do not allow userptr surfaces to be pinned to the
807  * hardware and so we reject any attempt to create a framebuffer out of a
808  * userptr.
809  *
810  * If you think this is a good interface to use to pass GPU memory between
811  * drivers, please use dma-buf instead. In fact, wherever possible use
812  * dma-buf instead.
813  */
814 int
815 i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
816 {
817 	struct drm_i915_gem_userptr *args = data;
818 	struct drm_i915_gem_object *obj;
819 	int ret;
820 	u32 handle;
821 
822 	if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) {
823 		/* We cannot support coherent userptr objects on hw without
824 		 * LLC and broken snooping.
825 		 */
826 		return -ENODEV;
827 	}
828 
829 	if (args->flags & ~(I915_USERPTR_READ_ONLY |
830 			    I915_USERPTR_UNSYNCHRONIZED))
831 		return -EINVAL;
832 
833 	if (offset_in_page(args->user_ptr | args->user_size))
834 		return -EINVAL;
835 
836 #if 0
837 	if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
838 		       (char __user *)(unsigned long)args->user_ptr, args->user_size))
839 		return -EFAULT;
840 #endif
841 
842 	if (args->flags & I915_USERPTR_READ_ONLY) {
843 		/* On almost all of the current hw, we cannot tell the GPU that a
844 		 * page is readonly, so this is just a placeholder in the uAPI.
845 		 */
846 		return -ENODEV;
847 	}
848 
849 	obj = i915_gem_object_alloc(dev);
850 	if (obj == NULL)
851 		return -ENOMEM;
852 
853 	drm_gem_private_object_init(dev, &obj->base, args->user_size);
854 	i915_gem_object_init(obj, &i915_gem_userptr_ops);
855 	obj->cache_level = I915_CACHE_LLC;
856 	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
857 	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
858 
859 	obj->userptr.ptr = args->user_ptr;
860 	obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
861 
862 	/* And keep a pointer to the current->mm for resolving the user pages
863 	 * at binding. This means that we need to hook into the mmu_notifier
864 	 * in order to detect if the mmu is destroyed.
865 	 */
866 	ret = i915_gem_userptr_init__mm_struct(obj);
867 	if (ret == 0)
868 		ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
869 	if (ret == 0)
870 		ret = drm_gem_handle_create(file, &obj->base, &handle);
871 
872 	/* drop reference from allocate - handle holds it now */
873 	drm_gem_object_unreference_unlocked(&obj->base);
874 	if (ret)
875 		return ret;
876 
877 	args->handle = handle;
878 	return 0;
879 }
880 
881 int
882 i915_gem_init_userptr(struct drm_device *dev)
883 {
884 	struct drm_i915_private *dev_priv = to_i915(dev);
885 	lockinit(&dev_priv->mm_lock, "i915dmm", 0, LK_CANRECURSE);
886 #if 0
887 	hash_init(dev_priv->mm_structs);
888 #endif
889 	return 0;
890 }
891