1 /* 2 * Copyright © 2012-2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <drm/drmP.h> 26 #include <drm/i915_drm.h> 27 #include "i915_drv.h" 28 #include "i915_trace.h" 29 #include "intel_drv.h" 30 31 struct i915_mm_struct { 32 struct mm_struct *mm; 33 struct drm_i915_private *i915; 34 struct i915_mmu_notifier *mn; 35 struct hlist_node node; 36 struct kref kref; 37 struct work_struct work; 38 }; 39 40 #if defined(CONFIG_MMU_NOTIFIER) 41 #include <linux/interval_tree.h> 42 43 struct i915_mmu_notifier { 44 spinlock_t lock; 45 struct hlist_node node; 46 struct mmu_notifier mn; 47 struct rb_root objects; 48 struct workqueue_struct *wq; 49 }; 50 51 struct i915_mmu_object { 52 struct i915_mmu_notifier *mn; 53 struct drm_i915_gem_object *obj; 54 struct interval_tree_node it; 55 struct list_head link; 56 struct work_struct work; 57 bool attached; 58 }; 59 60 static void wait_rendering(struct drm_i915_gem_object *obj) 61 { 62 struct drm_device *dev = obj->base.dev; 63 struct drm_i915_gem_request *requests[I915_NUM_ENGINES]; 64 int i, n; 65 66 if (!obj->active) 67 return; 68 69 n = 0; 70 for (i = 0; i < I915_NUM_ENGINES; i++) { 71 struct drm_i915_gem_request *req; 72 73 req = obj->last_read_req[i]; 74 if (req == NULL) 75 continue; 76 77 requests[n++] = i915_gem_request_reference(req); 78 } 79 80 mutex_unlock(&dev->struct_mutex); 81 82 for (i = 0; i < n; i++) 83 __i915_wait_request(requests[i], false, NULL, NULL); 84 85 mutex_lock(&dev->struct_mutex); 86 87 for (i = 0; i < n; i++) 88 i915_gem_request_unreference(requests[i]); 89 } 90 91 static void cancel_userptr(struct work_struct *work) 92 { 93 struct i915_mmu_object *mo = container_of(work, typeof(*mo), work); 94 struct drm_i915_gem_object *obj = mo->obj; 95 struct drm_device *dev = obj->base.dev; 96 97 mutex_lock(&dev->struct_mutex); 98 /* Cancel any active worker and force us to re-evaluate gup */ 99 obj->userptr.work = NULL; 100 101 if (obj->pages != NULL) { 102 struct drm_i915_private *dev_priv = to_i915(dev); 103 struct i915_vma *vma, *tmp; 104 bool was_interruptible; 105 106 wait_rendering(obj); 107 108 was_interruptible = dev_priv->mm.interruptible; 109 dev_priv->mm.interruptible = false; 110 111 list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) 112 WARN_ON(i915_vma_unbind(vma)); 113 WARN_ON(i915_gem_object_put_pages(obj)); 114 115 dev_priv->mm.interruptible = was_interruptible; 116 } 117 118 drm_gem_object_unreference(&obj->base); 119 mutex_unlock(&dev->struct_mutex); 120 } 121 122 static void add_object(struct i915_mmu_object *mo) 123 { 124 if (mo->attached) 125 return; 126 127 interval_tree_insert(&mo->it, &mo->mn->objects); 128 mo->attached = true; 129 } 130 131 static void del_object(struct i915_mmu_object *mo) 132 { 133 if (!mo->attached) 134 return; 135 136 interval_tree_remove(&mo->it, &mo->mn->objects); 137 mo->attached = false; 138 } 139 140 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, 141 struct mm_struct *mm, 142 unsigned long start, 143 unsigned long end) 144 { 145 struct i915_mmu_notifier *mn = 146 container_of(_mn, struct i915_mmu_notifier, mn); 147 struct i915_mmu_object *mo; 148 struct interval_tree_node *it; 149 LIST_HEAD(cancelled); 150 151 if (RB_EMPTY_ROOT(&mn->objects)) 152 return; 153 154 /* interval ranges are inclusive, but invalidate range is exclusive */ 155 end--; 156 157 spin_lock(&mn->lock); 158 it = interval_tree_iter_first(&mn->objects, start, end); 159 while (it) { 160 /* The mmu_object is released late when destroying the 161 * GEM object so it is entirely possible to gain a 162 * reference on an object in the process of being freed 163 * since our serialisation is via the spinlock and not 164 * the struct_mutex - and consequently use it after it 165 * is freed and then double free it. To prevent that 166 * use-after-free we only acquire a reference on the 167 * object if it is not in the process of being destroyed. 168 */ 169 mo = container_of(it, struct i915_mmu_object, it); 170 if (kref_get_unless_zero(&mo->obj->base.refcount)) 171 queue_work(mn->wq, &mo->work); 172 173 list_add(&mo->link, &cancelled); 174 it = interval_tree_iter_next(it, start, end); 175 } 176 list_for_each_entry(mo, &cancelled, link) 177 del_object(mo); 178 spin_unlock(&mn->lock); 179 180 flush_workqueue(mn->wq); 181 } 182 183 static const struct mmu_notifier_ops i915_gem_userptr_notifier = { 184 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start, 185 }; 186 187 static struct i915_mmu_notifier * 188 i915_mmu_notifier_create(struct mm_struct *mm) 189 { 190 struct i915_mmu_notifier *mn; 191 int ret; 192 193 mn = kmalloc(sizeof(*mn), GFP_KERNEL); 194 if (mn == NULL) 195 return ERR_PTR(-ENOMEM); 196 197 spin_lock_init(&mn->lock); 198 mn->mn.ops = &i915_gem_userptr_notifier; 199 mn->objects = RB_ROOT; 200 mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0); 201 if (mn->wq == NULL) { 202 kfree(mn); 203 return ERR_PTR(-ENOMEM); 204 } 205 206 /* Protected by mmap_sem (write-lock) */ 207 ret = __mmu_notifier_register(&mn->mn, mm); 208 if (ret) { 209 destroy_workqueue(mn->wq); 210 kfree(mn); 211 return ERR_PTR(ret); 212 } 213 214 return mn; 215 } 216 217 static void 218 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) 219 { 220 struct i915_mmu_object *mo; 221 222 mo = obj->userptr.mmu_object; 223 if (mo == NULL) 224 return; 225 226 spin_lock(&mo->mn->lock); 227 del_object(mo); 228 spin_unlock(&mo->mn->lock); 229 kfree(mo); 230 231 obj->userptr.mmu_object = NULL; 232 } 233 234 static struct i915_mmu_notifier * 235 i915_mmu_notifier_find(struct i915_mm_struct *mm) 236 { 237 struct i915_mmu_notifier *mn = mm->mn; 238 239 mn = mm->mn; 240 if (mn) 241 return mn; 242 243 down_write(&mm->mm->mmap_sem); 244 mutex_lock(&mm->i915->mm_lock); 245 if ((mn = mm->mn) == NULL) { 246 mn = i915_mmu_notifier_create(mm->mm); 247 if (!IS_ERR(mn)) 248 mm->mn = mn; 249 } 250 mutex_unlock(&mm->i915->mm_lock); 251 up_write(&mm->mm->mmap_sem); 252 253 return mn; 254 } 255 256 static int 257 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, 258 unsigned flags) 259 { 260 struct i915_mmu_notifier *mn; 261 struct i915_mmu_object *mo; 262 263 if (flags & I915_USERPTR_UNSYNCHRONIZED) 264 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; 265 266 if (WARN_ON(obj->userptr.mm == NULL)) 267 return -EINVAL; 268 269 mn = i915_mmu_notifier_find(obj->userptr.mm); 270 if (IS_ERR(mn)) 271 return PTR_ERR(mn); 272 273 mo = kzalloc(sizeof(*mo), GFP_KERNEL); 274 if (mo == NULL) 275 return -ENOMEM; 276 277 mo->mn = mn; 278 mo->obj = obj; 279 mo->it.start = obj->userptr.ptr; 280 mo->it.last = obj->userptr.ptr + obj->base.size - 1; 281 INIT_WORK(&mo->work, cancel_userptr); 282 283 obj->userptr.mmu_object = mo; 284 return 0; 285 } 286 287 static void 288 i915_mmu_notifier_free(struct i915_mmu_notifier *mn, 289 struct mm_struct *mm) 290 { 291 if (mn == NULL) 292 return; 293 294 mmu_notifier_unregister(&mn->mn, mm); 295 destroy_workqueue(mn->wq); 296 kfree(mn); 297 } 298 299 #else 300 301 #if 0 302 static void 303 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) 304 { 305 } 306 307 static int 308 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, 309 unsigned flags) 310 { 311 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0) 312 return -ENODEV; 313 314 if (!capable(CAP_SYS_ADMIN)) 315 return -EPERM; 316 317 return 0; 318 } 319 320 static void 321 i915_mmu_notifier_free(struct i915_mmu_notifier *mn, 322 struct mm_struct *mm) 323 { 324 } 325 #endif 326 327 #endif 328 329 #if 0 330 static struct i915_mm_struct * 331 __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real) 332 { 333 struct i915_mm_struct *mm; 334 335 /* Protected by dev_priv->mm_lock */ 336 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real) 337 if (mm->mm == real) 338 return mm; 339 340 return NULL; 341 } 342 343 static int 344 i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) 345 { 346 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 347 struct i915_mm_struct *mm; 348 int ret = 0; 349 350 /* During release of the GEM object we hold the struct_mutex. This 351 * precludes us from calling mmput() at that time as that may be 352 * the last reference and so call exit_mmap(). exit_mmap() will 353 * attempt to reap the vma, and if we were holding a GTT mmap 354 * would then call drm_gem_vm_close() and attempt to reacquire 355 * the struct mutex. So in order to avoid that recursion, we have 356 * to defer releasing the mm reference until after we drop the 357 * struct_mutex, i.e. we need to schedule a worker to do the clean 358 * up. 359 */ 360 mutex_lock(&dev_priv->mm_lock); 361 mm = __i915_mm_struct_find(dev_priv, current->mm); 362 if (mm == NULL) { 363 mm = kmalloc(sizeof(*mm), GFP_KERNEL); 364 if (mm == NULL) { 365 ret = -ENOMEM; 366 goto out; 367 } 368 369 kref_init(&mm->kref); 370 mm->i915 = to_i915(obj->base.dev); 371 372 mm->mm = current->mm; 373 atomic_inc(¤t->mm->mm_count); 374 375 mm->mn = NULL; 376 377 /* Protected by dev_priv->mm_lock */ 378 hash_add(dev_priv->mm_structs, 379 &mm->node, (unsigned long)mm->mm); 380 } else 381 kref_get(&mm->kref); 382 383 obj->userptr.mm = mm; 384 out: 385 mutex_unlock(&dev_priv->mm_lock); 386 return ret; 387 } 388 389 static void 390 __i915_mm_struct_free__worker(struct work_struct *work) 391 { 392 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work); 393 i915_mmu_notifier_free(mm->mn, mm->mm); 394 mmdrop(mm->mm); 395 kfree(mm); 396 } 397 398 static void 399 __i915_mm_struct_free(struct kref *kref) 400 { 401 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref); 402 403 /* Protected by dev_priv->mm_lock */ 404 hash_del(&mm->node); 405 mutex_unlock(&mm->i915->mm_lock); 406 407 INIT_WORK(&mm->work, __i915_mm_struct_free__worker); 408 schedule_work(&mm->work); 409 } 410 411 static void 412 i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj) 413 { 414 if (obj->userptr.mm == NULL) 415 return; 416 417 kref_put_mutex(&obj->userptr.mm->kref, 418 __i915_mm_struct_free, 419 &to_i915(obj->base.dev)->mm_lock); 420 obj->userptr.mm = NULL; 421 } 422 #endif 423 424 struct get_pages_work { 425 struct work_struct work; 426 struct drm_i915_gem_object *obj; 427 struct task_struct *task; 428 }; 429 430 #if IS_ENABLED(CONFIG_SWIOTLB) 431 #define swiotlb_active() swiotlb_nr_tbl() 432 #else 433 #define swiotlb_active() 0 434 #endif 435 436 #if 0 437 static int 438 st_set_pages(struct sg_table **st, struct page **pvec, int num_pages) 439 { 440 struct scatterlist *sg; 441 int ret, n; 442 443 *st = kmalloc(sizeof(**st), GFP_KERNEL); 444 if (*st == NULL) 445 return -ENOMEM; 446 447 if (swiotlb_active()) { 448 ret = sg_alloc_table(*st, num_pages, GFP_KERNEL); 449 if (ret) 450 goto err; 451 452 for_each_sg((*st)->sgl, sg, num_pages, n) 453 sg_set_page(sg, pvec[n], PAGE_SIZE, 0); 454 } else { 455 ret = sg_alloc_table_from_pages(*st, pvec, num_pages, 456 0, num_pages << PAGE_SHIFT, 457 GFP_KERNEL); 458 if (ret) 459 goto err; 460 } 461 462 return 0; 463 464 err: 465 kfree(*st); 466 *st = NULL; 467 return ret; 468 } 469 470 static int 471 __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj, 472 struct page **pvec, int num_pages) 473 { 474 int ret; 475 476 ret = st_set_pages(&obj->pages, pvec, num_pages); 477 if (ret) 478 return ret; 479 480 ret = i915_gem_gtt_prepare_object(obj); 481 if (ret) { 482 sg_free_table(obj->pages); 483 kfree(obj->pages); 484 obj->pages = NULL; 485 } 486 487 return ret; 488 } 489 490 static int 491 __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, 492 bool value) 493 { 494 int ret = 0; 495 496 /* During mm_invalidate_range we need to cancel any userptr that 497 * overlaps the range being invalidated. Doing so requires the 498 * struct_mutex, and that risks recursion. In order to cause 499 * recursion, the user must alias the userptr address space with 500 * a GTT mmapping (possible with a MAP_FIXED) - then when we have 501 * to invalidate that mmaping, mm_invalidate_range is called with 502 * the userptr address *and* the struct_mutex held. To prevent that 503 * we set a flag under the i915_mmu_notifier spinlock to indicate 504 * whether this object is valid. 505 */ 506 #if defined(CONFIG_MMU_NOTIFIER) 507 if (obj->userptr.mmu_object == NULL) 508 return 0; 509 510 spin_lock(&obj->userptr.mmu_object->mn->lock); 511 /* In order to serialise get_pages with an outstanding 512 * cancel_userptr, we must drop the struct_mutex and try again. 513 */ 514 if (!value) 515 del_object(obj->userptr.mmu_object); 516 else if (!work_pending(&obj->userptr.mmu_object->work)) 517 add_object(obj->userptr.mmu_object); 518 else 519 ret = -EAGAIN; 520 spin_unlock(&obj->userptr.mmu_object->mn->lock); 521 #endif 522 523 return ret; 524 } 525 526 static void 527 __i915_gem_userptr_get_pages_worker(struct work_struct *_work) 528 { 529 struct get_pages_work *work = container_of(_work, typeof(*work), work); 530 struct drm_i915_gem_object *obj = work->obj; 531 struct drm_device *dev = obj->base.dev; 532 const int npages = obj->base.size >> PAGE_SHIFT; 533 struct page **pvec; 534 int pinned, ret; 535 536 ret = -ENOMEM; 537 pinned = 0; 538 539 pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY); 540 if (pvec != NULL) { 541 struct mm_struct *mm = obj->userptr.mm->mm; 542 543 ret = -EFAULT; 544 if (atomic_inc_not_zero(&mm->mm_users)) { 545 down_read(&mm->mmap_sem); 546 while (pinned < npages) { 547 ret = get_user_pages_remote 548 (work->task, mm, 549 obj->userptr.ptr + pinned * PAGE_SIZE, 550 npages - pinned, 551 !obj->userptr.read_only, 0, 552 pvec + pinned, NULL); 553 if (ret < 0) 554 break; 555 556 pinned += ret; 557 } 558 up_read(&mm->mmap_sem); 559 mmput(mm); 560 } 561 } 562 563 mutex_lock(&dev->struct_mutex); 564 if (obj->userptr.work == &work->work) { 565 if (pinned == npages) { 566 ret = __i915_gem_userptr_set_pages(obj, pvec, npages); 567 if (ret == 0) { 568 list_add_tail(&obj->global_list, 569 &to_i915(dev)->mm.unbound_list); 570 obj->get_page.sg = obj->pages->sgl; 571 obj->get_page.last = 0; 572 pinned = 0; 573 } 574 } 575 obj->userptr.work = ERR_PTR(ret); 576 if (ret) 577 __i915_gem_userptr_set_active(obj, false); 578 } 579 580 obj->userptr.workers--; 581 drm_gem_object_unreference(&obj->base); 582 mutex_unlock(&dev->struct_mutex); 583 584 release_pages(pvec, pinned, 0); 585 drm_free_large(pvec); 586 587 put_task_struct(work->task); 588 kfree(work); 589 } 590 591 static int 592 __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj, 593 bool *active) 594 { 595 struct get_pages_work *work; 596 597 /* Spawn a worker so that we can acquire the 598 * user pages without holding our mutex. Access 599 * to the user pages requires mmap_sem, and we have 600 * a strict lock ordering of mmap_sem, struct_mutex - 601 * we already hold struct_mutex here and so cannot 602 * call gup without encountering a lock inversion. 603 * 604 * Userspace will keep on repeating the operation 605 * (thanks to EAGAIN) until either we hit the fast 606 * path or the worker completes. If the worker is 607 * cancelled or superseded, the task is still run 608 * but the results ignored. (This leads to 609 * complications that we may have a stray object 610 * refcount that we need to be wary of when 611 * checking for existing objects during creation.) 612 * If the worker encounters an error, it reports 613 * that error back to this function through 614 * obj->userptr.work = ERR_PTR. 615 */ 616 if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS) 617 return -EAGAIN; 618 619 work = kmalloc(sizeof(*work), GFP_KERNEL); 620 if (work == NULL) 621 return -ENOMEM; 622 623 obj->userptr.work = &work->work; 624 obj->userptr.workers++; 625 626 work->obj = obj; 627 drm_gem_object_reference(&obj->base); 628 629 work->task = current; 630 get_task_struct(work->task); 631 632 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker); 633 schedule_work(&work->work); 634 635 *active = true; 636 return -EAGAIN; 637 } 638 639 static int 640 i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) 641 { 642 const int num_pages = obj->base.size >> PAGE_SHIFT; 643 struct page **pvec; 644 int pinned, ret; 645 bool active; 646 647 /* If userspace should engineer that these pages are replaced in 648 * the vma between us binding this page into the GTT and completion 649 * of rendering... Their loss. If they change the mapping of their 650 * pages they need to create a new bo to point to the new vma. 651 * 652 * However, that still leaves open the possibility of the vma 653 * being copied upon fork. Which falls under the same userspace 654 * synchronisation issue as a regular bo, except that this time 655 * the process may not be expecting that a particular piece of 656 * memory is tied to the GPU. 657 * 658 * Fortunately, we can hook into the mmu_notifier in order to 659 * discard the page references prior to anything nasty happening 660 * to the vma (discard or cloning) which should prevent the more 661 * egregious cases from causing harm. 662 */ 663 if (IS_ERR(obj->userptr.work)) { 664 /* active flag will have been dropped already by the worker */ 665 ret = PTR_ERR(obj->userptr.work); 666 obj->userptr.work = NULL; 667 return ret; 668 } 669 if (obj->userptr.work) 670 /* active flag should still be held for the pending work */ 671 return -EAGAIN; 672 673 /* Let the mmu-notifier know that we have begun and need cancellation */ 674 ret = __i915_gem_userptr_set_active(obj, true); 675 if (ret) 676 return ret; 677 678 pvec = NULL; 679 pinned = 0; 680 if (obj->userptr.mm->mm == current->mm) { 681 pvec = kmalloc(num_pages*sizeof(struct page *), 682 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); 683 if (pvec == NULL) { 684 pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); 685 if (pvec == NULL) { 686 __i915_gem_userptr_set_active(obj, false); 687 return -ENOMEM; 688 } 689 } 690 691 pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages, 692 !obj->userptr.read_only, pvec); 693 } 694 695 active = false; 696 if (pinned < 0) 697 ret = pinned, pinned = 0; 698 else if (pinned < num_pages) 699 ret = __i915_gem_userptr_get_pages_schedule(obj, &active); 700 else 701 ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages); 702 if (ret) { 703 __i915_gem_userptr_set_active(obj, active); 704 release_pages(pvec, pinned, 0); 705 } 706 drm_free_large(pvec); 707 return ret; 708 } 709 710 static void 711 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) 712 { 713 struct sg_page_iter sg_iter; 714 715 BUG_ON(obj->userptr.work != NULL); 716 __i915_gem_userptr_set_active(obj, false); 717 718 if (obj->madv != I915_MADV_WILLNEED) 719 obj->dirty = 0; 720 721 i915_gem_gtt_finish_object(obj); 722 723 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 724 struct page *page = sg_page_iter_page(&sg_iter); 725 726 if (obj->dirty) 727 set_page_dirty(page); 728 729 mark_page_accessed(page); 730 page_cache_release(page); 731 } 732 obj->dirty = 0; 733 734 sg_free_table(obj->pages); 735 kfree(obj->pages); 736 } 737 738 static void 739 i915_gem_userptr_release(struct drm_i915_gem_object *obj) 740 { 741 i915_gem_userptr_release__mmu_notifier(obj); 742 i915_gem_userptr_release__mm_struct(obj); 743 } 744 745 static int 746 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) 747 { 748 if (obj->userptr.mmu_object) 749 return 0; 750 751 return i915_gem_userptr_init__mmu_notifier(obj, 0); 752 } 753 754 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { 755 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE, 756 .get_pages = i915_gem_userptr_get_pages, 757 .put_pages = i915_gem_userptr_put_pages, 758 .dmabuf_export = i915_gem_userptr_dmabuf_export, 759 .release = i915_gem_userptr_release, 760 }; 761 762 /** 763 * Creates a new mm object that wraps some normal memory from the process 764 * context - user memory. 765 * 766 * We impose several restrictions upon the memory being mapped 767 * into the GPU. 768 * 1. It must be page aligned (both start/end addresses, i.e ptr and size). 769 * 2. It must be normal system memory, not a pointer into another map of IO 770 * space (e.g. it must not be a GTT mmapping of another object). 771 * 3. We only allow a bo as large as we could in theory map into the GTT, 772 * that is we limit the size to the total size of the GTT. 773 * 4. The bo is marked as being snoopable. The backing pages are left 774 * accessible directly by the CPU, but reads and writes by the GPU may 775 * incur the cost of a snoop (unless you have an LLC architecture). 776 * 777 * Synchronisation between multiple users and the GPU is left to userspace 778 * through the normal set-domain-ioctl. The kernel will enforce that the 779 * GPU relinquishes the VMA before it is returned back to the system 780 * i.e. upon free(), munmap() or process termination. However, the userspace 781 * malloc() library may not immediately relinquish the VMA after free() and 782 * instead reuse it whilst the GPU is still reading and writing to the VMA. 783 * Caveat emptor. 784 * 785 * Also note, that the object created here is not currently a "first class" 786 * object, in that several ioctls are banned. These are the CPU access 787 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use 788 * direct access via your pointer rather than use those ioctls. Another 789 * restriction is that we do not allow userptr surfaces to be pinned to the 790 * hardware and so we reject any attempt to create a framebuffer out of a 791 * userptr. 792 * 793 * If you think this is a good interface to use to pass GPU memory between 794 * drivers, please use dma-buf instead. In fact, wherever possible use 795 * dma-buf instead. 796 */ 797 int 798 i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 799 { 800 struct drm_i915_gem_userptr *args = data; 801 struct drm_i915_gem_object *obj; 802 int ret; 803 u32 handle; 804 805 if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) { 806 /* We cannot support coherent userptr objects on hw without 807 * LLC and broken snooping. 808 */ 809 return -ENODEV; 810 } 811 812 if (args->flags & ~(I915_USERPTR_READ_ONLY | 813 I915_USERPTR_UNSYNCHRONIZED)) 814 return -EINVAL; 815 816 if (offset_in_page(args->user_ptr | args->user_size)) 817 return -EINVAL; 818 819 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE, 820 (char __user *)(unsigned long)args->user_ptr, args->user_size)) 821 return -EFAULT; 822 823 if (args->flags & I915_USERPTR_READ_ONLY) { 824 /* On almost all of the current hw, we cannot tell the GPU that a 825 * page is readonly, so this is just a placeholder in the uAPI. 826 */ 827 return -ENODEV; 828 } 829 830 obj = i915_gem_object_alloc(dev); 831 if (obj == NULL) 832 return -ENOMEM; 833 834 drm_gem_private_object_init(dev, &obj->base, args->user_size); 835 i915_gem_object_init(obj, &i915_gem_userptr_ops); 836 obj->cache_level = I915_CACHE_LLC; 837 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 838 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 839 840 obj->userptr.ptr = args->user_ptr; 841 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY); 842 843 /* And keep a pointer to the current->mm for resolving the user pages 844 * at binding. This means that we need to hook into the mmu_notifier 845 * in order to detect if the mmu is destroyed. 846 */ 847 ret = i915_gem_userptr_init__mm_struct(obj); 848 if (ret == 0) 849 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); 850 if (ret == 0) 851 ret = drm_gem_handle_create(file, &obj->base, &handle); 852 853 /* drop reference from allocate - handle holds it now */ 854 drm_gem_object_unreference_unlocked(&obj->base); 855 if (ret) 856 return ret; 857 858 args->handle = handle; 859 return 0; 860 } 861 #endif 862 863 int 864 i915_gem_init_userptr(struct drm_device *dev) 865 { 866 struct drm_i915_private *dev_priv = to_i915(dev); 867 lockinit(&dev_priv->mm_lock, "i915dmm", 0, LK_CANRECURSE); 868 #if 0 869 hash_init(dev_priv->mm_structs); 870 #endif 871 return 0; 872 } 873