1 /* 2 * Copyright © 2012-2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <drm/drmP.h> 26 #include <drm/i915_drm.h> 27 #include "i915_drv.h" 28 #include "i915_trace.h" 29 #include "intel_drv.h" 30 #include <linux/mmu_context.h> 31 #include <linux/mmu_notifier.h> 32 #include <linux/mempolicy.h> 33 #include <linux/swap.h> 34 #include <linux/sched/mm.h> 35 36 struct i915_mm_struct { 37 struct mm_struct *mm; 38 struct drm_i915_private *i915; 39 struct i915_mmu_notifier *mn; 40 struct hlist_node node; 41 struct kref kref; 42 struct work_struct work; 43 }; 44 45 #if defined(CONFIG_MMU_NOTIFIER) 46 #include <linux/interval_tree.h> 47 48 struct i915_mmu_notifier { 49 spinlock_t lock; 50 struct hlist_node node; 51 struct mmu_notifier mn; 52 struct rb_root objects; 53 struct workqueue_struct *wq; 54 }; 55 56 struct i915_mmu_object { 57 struct i915_mmu_notifier *mn; 58 struct drm_i915_gem_object *obj; 59 struct interval_tree_node it; 60 struct list_head link; 61 struct work_struct work; 62 bool attached; 63 }; 64 65 static void cancel_userptr(struct work_struct *work) 66 { 67 struct i915_mmu_object *mo = container_of(work, typeof(*mo), work); 68 struct drm_i915_gem_object *obj = mo->obj; 69 struct work_struct *active; 70 71 /* Cancel any active worker and force us to re-evaluate gup */ 72 mutex_lock(&obj->mm.lock); 73 active = fetch_and_zero(&obj->userptr.work); 74 mutex_unlock(&obj->mm.lock); 75 if (active) 76 goto out; 77 78 i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL); 79 80 mutex_lock(&obj->base.dev->struct_mutex); 81 82 /* We are inside a kthread context and can't be interrupted */ 83 if (i915_gem_object_unbind(obj) == 0) 84 __i915_gem_object_put_pages(obj, I915_MM_NORMAL); 85 WARN_ONCE(i915_gem_object_has_pages(obj), 86 "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n", 87 obj->bind_count, 88 atomic_read(&obj->mm.pages_pin_count), 89 obj->pin_global); 90 91 mutex_unlock(&obj->base.dev->struct_mutex); 92 93 out: 94 i915_gem_object_put(obj); 95 } 96 97 static void add_object(struct i915_mmu_object *mo) 98 { 99 if (mo->attached) 100 return; 101 102 interval_tree_insert(&mo->it, &mo->mn->objects); 103 mo->attached = true; 104 } 105 106 static void del_object(struct i915_mmu_object *mo) 107 { 108 if (!mo->attached) 109 return; 110 111 interval_tree_remove(&mo->it, &mo->mn->objects); 112 mo->attached = false; 113 } 114 115 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, 116 struct mm_struct *mm, 117 unsigned long start, 118 unsigned long end) 119 { 120 struct i915_mmu_notifier *mn = 121 container_of(_mn, struct i915_mmu_notifier, mn); 122 struct i915_mmu_object *mo; 123 struct interval_tree_node *it; 124 LINUX_LIST_HEAD(cancelled); 125 126 if (RB_EMPTY_ROOT(&mn->objects)) 127 return; 128 129 /* interval ranges are inclusive, but invalidate range is exclusive */ 130 end--; 131 132 lockmgr(&mn->lock, LK_EXCLUSIVE); 133 it = interval_tree_iter_first(&mn->objects, start, end); 134 while (it) { 135 /* The mmu_object is released late when destroying the 136 * GEM object so it is entirely possible to gain a 137 * reference on an object in the process of being freed 138 * since our serialisation is via the spinlock and not 139 * the struct_mutex - and consequently use it after it 140 * is freed and then double free it. To prevent that 141 * use-after-free we only acquire a reference on the 142 * object if it is not in the process of being destroyed. 143 */ 144 mo = container_of(it, struct i915_mmu_object, it); 145 if (kref_get_unless_zero(&mo->obj->base.refcount)) 146 queue_work(mn->wq, &mo->work); 147 148 list_add(&mo->link, &cancelled); 149 it = interval_tree_iter_next(it, start, end); 150 } 151 list_for_each_entry(mo, &cancelled, link) 152 del_object(mo); 153 lockmgr(&mn->lock, LK_RELEASE); 154 155 if (!list_empty(&cancelled)) 156 flush_workqueue(mn->wq); 157 } 158 159 static const struct mmu_notifier_ops i915_gem_userptr_notifier = { 160 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start, 161 }; 162 163 static struct i915_mmu_notifier * 164 i915_mmu_notifier_create(struct mm_struct *mm) 165 { 166 struct i915_mmu_notifier *mn; 167 168 mn = kmalloc(sizeof(*mn), M_DRM, GFP_KERNEL); 169 if (mn == NULL) 170 return ERR_PTR(-ENOMEM); 171 172 spin_lock_init(&mn->lock); 173 mn->mn.ops = &i915_gem_userptr_notifier; 174 mn->objects = LINUX_RB_ROOT; 175 mn->wq = alloc_workqueue("i915-userptr-release", 176 WQ_UNBOUND | WQ_MEM_RECLAIM, 177 0); 178 if (mn->wq == NULL) { 179 kfree(mn); 180 return ERR_PTR(-ENOMEM); 181 } 182 183 return mn; 184 } 185 186 static void 187 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) 188 { 189 struct i915_mmu_object *mo; 190 191 mo = obj->userptr.mmu_object; 192 if (mo == NULL) 193 return; 194 195 lockmgr(&mo->mn->lock, LK_EXCLUSIVE); 196 del_object(mo); 197 lockmgr(&mo->mn->lock, LK_RELEASE); 198 kfree(mo); 199 200 obj->userptr.mmu_object = NULL; 201 } 202 203 static struct i915_mmu_notifier * 204 i915_mmu_notifier_find(struct i915_mm_struct *mm) 205 { 206 struct i915_mmu_notifier *mn; 207 int err = 0; 208 209 mn = mm->mn; 210 if (mn) 211 return mn; 212 213 mn = i915_mmu_notifier_create(mm->mm); 214 if (IS_ERR(mn)) 215 err = PTR_ERR(mn); 216 217 down_write(&mm->mm->mmap_sem); 218 mutex_lock(&mm->i915->mm_lock); 219 if (mm->mn == NULL && !err) { 220 /* Protected by mmap_sem (write-lock) */ 221 err = __mmu_notifier_register(&mn->mn, mm->mm); 222 if (!err) { 223 /* Protected by mm_lock */ 224 mm->mn = fetch_and_zero(&mn); 225 } 226 } else if (mm->mn) { 227 /* 228 * Someone else raced and successfully installed the mmu 229 * notifier, we can cancel our own errors. 230 */ 231 err = 0; 232 } 233 mutex_unlock(&mm->i915->mm_lock); 234 up_write(&mm->mm->mmap_sem); 235 236 if (mn && !IS_ERR(mn)) { 237 destroy_workqueue(mn->wq); 238 kfree(mn); 239 } 240 241 return err ? ERR_PTR(err) : mm->mn; 242 } 243 244 static int 245 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, 246 unsigned flags) 247 { 248 struct i915_mmu_notifier *mn; 249 struct i915_mmu_object *mo; 250 251 if (flags & I915_USERPTR_UNSYNCHRONIZED) 252 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; 253 254 if (WARN_ON(obj->userptr.mm == NULL)) 255 return -EINVAL; 256 257 mn = i915_mmu_notifier_find(obj->userptr.mm); 258 if (IS_ERR(mn)) 259 return PTR_ERR(mn); 260 261 mo = kzalloc(sizeof(*mo), GFP_KERNEL); 262 if (mo == NULL) 263 return -ENOMEM; 264 265 mo->mn = mn; 266 mo->obj = obj; 267 mo->it.start = obj->userptr.ptr; 268 mo->it.last = obj->userptr.ptr + obj->base.size - 1; 269 INIT_WORK(&mo->work, cancel_userptr); 270 271 obj->userptr.mmu_object = mo; 272 return 0; 273 } 274 275 static void 276 i915_mmu_notifier_free(struct i915_mmu_notifier *mn, 277 struct mm_struct *mm) 278 { 279 if (mn == NULL) 280 return; 281 282 mmu_notifier_unregister(&mn->mn, mm); 283 destroy_workqueue(mn->wq); 284 kfree(mn); 285 } 286 287 #else 288 289 static void 290 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) 291 { 292 } 293 294 static int 295 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, 296 unsigned flags) 297 { 298 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0) 299 return -ENODEV; 300 301 if (!capable(CAP_SYS_ADMIN)) 302 return -EPERM; 303 304 return 0; 305 } 306 307 static void 308 i915_mmu_notifier_free(struct i915_mmu_notifier *mn, 309 struct mm_struct *mm) 310 { 311 } 312 313 #endif 314 315 static struct i915_mm_struct * 316 __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real) 317 { 318 struct i915_mm_struct *mm; 319 320 /* Protected by dev_priv->mm_lock */ 321 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real) 322 if (mm->mm == real) 323 return mm; 324 325 return NULL; 326 } 327 328 static int 329 i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) 330 { 331 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 332 struct i915_mm_struct *mm; 333 int ret = 0; 334 335 /* During release of the GEM object we hold the struct_mutex. This 336 * precludes us from calling mmput() at that time as that may be 337 * the last reference and so call exit_mmap(). exit_mmap() will 338 * attempt to reap the vma, and if we were holding a GTT mmap 339 * would then call drm_gem_vm_close() and attempt to reacquire 340 * the struct mutex. So in order to avoid that recursion, we have 341 * to defer releasing the mm reference until after we drop the 342 * struct_mutex, i.e. we need to schedule a worker to do the clean 343 * up. 344 */ 345 mutex_lock(&dev_priv->mm_lock); 346 mm = __i915_mm_struct_find(dev_priv, current->mm); 347 if (mm == NULL) { 348 mm = kmalloc(sizeof(*mm), M_DRM, GFP_KERNEL); 349 if (mm == NULL) { 350 ret = -ENOMEM; 351 goto out; 352 } 353 354 kref_init(&mm->kref); 355 mm->i915 = to_i915(obj->base.dev); 356 357 mm->mm = current->mm; 358 mmgrab(current->mm); 359 360 mm->mn = NULL; 361 362 /* Protected by dev_priv->mm_lock */ 363 hash_add(dev_priv->mm_structs, 364 &mm->node, (unsigned long)mm->mm); 365 } else 366 kref_get(&mm->kref); 367 368 obj->userptr.mm = mm; 369 out: 370 mutex_unlock(&dev_priv->mm_lock); 371 return ret; 372 } 373 374 static void 375 __i915_mm_struct_free__worker(struct work_struct *work) 376 { 377 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work); 378 i915_mmu_notifier_free(mm->mn, mm->mm); 379 #if 0 380 mmdrop(mm->mm); 381 #endif 382 kfree(mm); 383 } 384 385 static void 386 __i915_mm_struct_free(struct kref *kref) 387 { 388 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref); 389 390 /* Protected by dev_priv->mm_lock */ 391 hash_del(&mm->node); 392 mutex_unlock(&mm->i915->mm_lock); 393 394 INIT_WORK(&mm->work, __i915_mm_struct_free__worker); 395 queue_work(mm->i915->mm.userptr_wq, &mm->work); 396 } 397 398 static void 399 i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj) 400 { 401 if (obj->userptr.mm == NULL) 402 return; 403 404 kref_put_mutex(&obj->userptr.mm->kref, 405 __i915_mm_struct_free, 406 &to_i915(obj->base.dev)->mm_lock); 407 obj->userptr.mm = NULL; 408 } 409 410 struct get_pages_work { 411 struct work_struct work; 412 struct drm_i915_gem_object *obj; 413 struct task_struct *task; 414 }; 415 416 #if 0 417 static struct sg_table * 418 __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj, 419 struct page **pvec, int num_pages) 420 { 421 unsigned int max_segment = i915_sg_segment_size(); 422 struct sg_table *st; 423 unsigned int sg_page_sizes; 424 int ret; 425 426 st = kmalloc(sizeof(*st), M_DRM, GFP_KERNEL); 427 if (!st) 428 return ERR_PTR(-ENOMEM); 429 430 alloc_table: 431 ret = __sg_alloc_table_from_pages(st, pvec, num_pages, 432 0, num_pages << PAGE_SHIFT, 433 max_segment, 434 GFP_KERNEL); 435 if (ret) { 436 kfree(st); 437 return ERR_PTR(ret); 438 } 439 440 ret = i915_gem_gtt_prepare_pages(obj, st); 441 if (ret) { 442 sg_free_table(st); 443 444 if (max_segment > PAGE_SIZE) { 445 max_segment = PAGE_SIZE; 446 goto alloc_table; 447 } 448 449 kfree(st); 450 return ERR_PTR(ret); 451 } 452 453 sg_page_sizes = i915_sg_page_sizes(st->sgl); 454 455 __i915_gem_object_set_pages(obj, st, sg_page_sizes); 456 457 return st; 458 } 459 #endif 460 461 static int 462 __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, 463 bool value) 464 { 465 int ret = 0; 466 467 /* During mm_invalidate_range we need to cancel any userptr that 468 * overlaps the range being invalidated. Doing so requires the 469 * struct_mutex, and that risks recursion. In order to cause 470 * recursion, the user must alias the userptr address space with 471 * a GTT mmapping (possible with a MAP_FIXED) - then when we have 472 * to invalidate that mmaping, mm_invalidate_range is called with 473 * the userptr address *and* the struct_mutex held. To prevent that 474 * we set a flag under the i915_mmu_notifier spinlock to indicate 475 * whether this object is valid. 476 */ 477 #if defined(CONFIG_MMU_NOTIFIER) 478 if (obj->userptr.mmu_object == NULL) 479 return 0; 480 481 lockmgr(&obj->userptr.mmu_object->mn->lock, LK_EXCLUSIVE); 482 /* In order to serialise get_pages with an outstanding 483 * cancel_userptr, we must drop the struct_mutex and try again. 484 */ 485 if (!value) 486 del_object(obj->userptr.mmu_object); 487 else if (!work_pending(&obj->userptr.mmu_object->work)) 488 add_object(obj->userptr.mmu_object); 489 else 490 ret = -EAGAIN; 491 lockmgr(&obj->userptr.mmu_object->mn->lock, LK_RELEASE); 492 #endif 493 494 return ret; 495 } 496 497 #if 0 498 static void 499 __i915_gem_userptr_get_pages_worker(struct work_struct *_work) 500 { 501 struct get_pages_work *work = container_of(_work, typeof(*work), work); 502 struct drm_i915_gem_object *obj = work->obj; 503 const int npages = obj->base.size >> PAGE_SHIFT; 504 struct page **pvec; 505 int pinned, ret; 506 507 ret = -ENOMEM; 508 pinned = 0; 509 510 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_TEMPORARY); 511 if (pvec != NULL) { 512 struct mm_struct *mm = obj->userptr.mm->mm; 513 unsigned int flags = 0; 514 515 if (!obj->userptr.read_only) 516 flags |= FOLL_WRITE; 517 518 ret = -EFAULT; 519 if (mmget_not_zero(mm)) { 520 down_read(&mm->mmap_sem); 521 while (pinned < npages) { 522 ret = get_user_pages_remote 523 (work->task, mm, 524 obj->userptr.ptr + pinned * PAGE_SIZE, 525 npages - pinned, 526 flags, 527 pvec + pinned, NULL, NULL); 528 if (ret < 0) 529 break; 530 531 pinned += ret; 532 } 533 up_read(&mm->mmap_sem); 534 mmput(mm); 535 } 536 } 537 538 mutex_lock(&obj->mm.lock); 539 if (obj->userptr.work == &work->work) { 540 struct sg_table *pages = ERR_PTR(ret); 541 542 if (pinned == npages) { 543 pages = __i915_gem_userptr_alloc_pages(obj, pvec, 544 npages); 545 if (!IS_ERR(pages)) { 546 pinned = 0; 547 pages = NULL; 548 } 549 } 550 551 obj->userptr.work = ERR_CAST(pages); 552 if (IS_ERR(pages)) 553 __i915_gem_userptr_set_active(obj, false); 554 } 555 mutex_unlock(&obj->mm.lock); 556 557 release_pages(pvec, pinned); 558 kvfree(pvec); 559 560 i915_gem_object_put(obj); 561 put_task_struct(work->task); 562 kfree(work); 563 } 564 565 static struct sg_table * 566 __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj) 567 { 568 struct get_pages_work *work; 569 570 /* Spawn a worker so that we can acquire the 571 * user pages without holding our mutex. Access 572 * to the user pages requires mmap_sem, and we have 573 * a strict lock ordering of mmap_sem, struct_mutex - 574 * we already hold struct_mutex here and so cannot 575 * call gup without encountering a lock inversion. 576 * 577 * Userspace will keep on repeating the operation 578 * (thanks to EAGAIN) until either we hit the fast 579 * path or the worker completes. If the worker is 580 * cancelled or superseded, the task is still run 581 * but the results ignored. (This leads to 582 * complications that we may have a stray object 583 * refcount that we need to be wary of when 584 * checking for existing objects during creation.) 585 * If the worker encounters an error, it reports 586 * that error back to this function through 587 * obj->userptr.work = ERR_PTR. 588 */ 589 work = kmalloc(sizeof(*work), M_DRM, GFP_KERNEL); 590 if (work == NULL) 591 return ERR_PTR(-ENOMEM); 592 593 obj->userptr.work = &work->work; 594 595 work->obj = i915_gem_object_get(obj); 596 597 work->task = current; 598 get_task_struct(work->task); 599 600 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker); 601 queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work); 602 603 return ERR_PTR(-EAGAIN); 604 } 605 #endif 606 607 static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) 608 { 609 #if 0 610 const int num_pages = obj->base.size >> PAGE_SHIFT; 611 struct mm_struct *mm = obj->userptr.mm->mm; 612 struct page **pvec; 613 struct sg_table *pages; 614 bool active; 615 int pinned; 616 617 /* If userspace should engineer that these pages are replaced in 618 * the vma between us binding this page into the GTT and completion 619 * of rendering... Their loss. If they change the mapping of their 620 * pages they need to create a new bo to point to the new vma. 621 * 622 * However, that still leaves open the possibility of the vma 623 * being copied upon fork. Which falls under the same userspace 624 * synchronisation issue as a regular bo, except that this time 625 * the process may not be expecting that a particular piece of 626 * memory is tied to the GPU. 627 * 628 * Fortunately, we can hook into the mmu_notifier in order to 629 * discard the page references prior to anything nasty happening 630 * to the vma (discard or cloning) which should prevent the more 631 * egregious cases from causing harm. 632 */ 633 634 if (obj->userptr.work) { 635 /* active flag should still be held for the pending work */ 636 if (IS_ERR(obj->userptr.work)) 637 return PTR_ERR(obj->userptr.work); 638 else 639 return -EAGAIN; 640 } 641 642 pvec = NULL; 643 pinned = 0; 644 645 if (mm == current->mm) { 646 pvec = kvmalloc_array(num_pages, sizeof(struct page *), 647 GFP_KERNEL | 648 __GFP_NORETRY | 649 __GFP_NOWARN); 650 if (pvec) /* defer to worker if malloc fails */ 651 pinned = __get_user_pages_fast(obj->userptr.ptr, 652 num_pages, 653 !obj->userptr.read_only, 654 pvec); 655 } 656 657 active = false; 658 if (pinned < 0) { 659 pages = ERR_PTR(pinned); 660 pinned = 0; 661 } else if (pinned < num_pages) { 662 pages = __i915_gem_userptr_get_pages_schedule(obj); 663 active = pages == ERR_PTR(-EAGAIN); 664 } else { 665 pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages); 666 active = !IS_ERR(pages); 667 } 668 if (active) 669 __i915_gem_userptr_set_active(obj, true); 670 671 if (IS_ERR(pages)) 672 release_pages(pvec, pinned); 673 kvfree(pvec); 674 675 return PTR_ERR_OR_ZERO(pages); 676 #endif 677 return -EINVAL; 678 } 679 680 static void 681 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, 682 struct sg_table *pages) 683 { 684 struct sgt_iter sgt_iter; 685 struct page *page; 686 687 BUG_ON(obj->userptr.work != NULL); 688 __i915_gem_userptr_set_active(obj, false); 689 690 if (obj->mm.madv != I915_MADV_WILLNEED) 691 obj->mm.dirty = false; 692 693 i915_gem_gtt_finish_pages(obj, pages); 694 695 for_each_sgt_page(page, sgt_iter, pages) { 696 if (obj->mm.dirty) 697 set_page_dirty(page); 698 699 mark_page_accessed(page); 700 put_page(page); 701 } 702 obj->mm.dirty = false; 703 704 sg_free_table(pages); 705 kfree(pages); 706 } 707 708 static void 709 i915_gem_userptr_release(struct drm_i915_gem_object *obj) 710 { 711 i915_gem_userptr_release__mmu_notifier(obj); 712 i915_gem_userptr_release__mm_struct(obj); 713 } 714 715 static int 716 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) 717 { 718 if (obj->userptr.mmu_object) 719 return 0; 720 721 return i915_gem_userptr_init__mmu_notifier(obj, 0); 722 } 723 724 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { 725 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | 726 I915_GEM_OBJECT_IS_SHRINKABLE, 727 .get_pages = i915_gem_userptr_get_pages, 728 .put_pages = i915_gem_userptr_put_pages, 729 .dmabuf_export = i915_gem_userptr_dmabuf_export, 730 .release = i915_gem_userptr_release, 731 }; 732 733 /** 734 * Creates a new mm object that wraps some normal memory from the process 735 * context - user memory. 736 * 737 * We impose several restrictions upon the memory being mapped 738 * into the GPU. 739 * 1. It must be page aligned (both start/end addresses, i.e ptr and size). 740 * 2. It must be normal system memory, not a pointer into another map of IO 741 * space (e.g. it must not be a GTT mmapping of another object). 742 * 3. We only allow a bo as large as we could in theory map into the GTT, 743 * that is we limit the size to the total size of the GTT. 744 * 4. The bo is marked as being snoopable. The backing pages are left 745 * accessible directly by the CPU, but reads and writes by the GPU may 746 * incur the cost of a snoop (unless you have an LLC architecture). 747 * 748 * Synchronisation between multiple users and the GPU is left to userspace 749 * through the normal set-domain-ioctl. The kernel will enforce that the 750 * GPU relinquishes the VMA before it is returned back to the system 751 * i.e. upon free(), munmap() or process termination. However, the userspace 752 * malloc() library may not immediately relinquish the VMA after free() and 753 * instead reuse it whilst the GPU is still reading and writing to the VMA. 754 * Caveat emptor. 755 * 756 * Also note, that the object created here is not currently a "first class" 757 * object, in that several ioctls are banned. These are the CPU access 758 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use 759 * direct access via your pointer rather than use those ioctls. Another 760 * restriction is that we do not allow userptr surfaces to be pinned to the 761 * hardware and so we reject any attempt to create a framebuffer out of a 762 * userptr. 763 * 764 * If you think this is a good interface to use to pass GPU memory between 765 * drivers, please use dma-buf instead. In fact, wherever possible use 766 * dma-buf instead. 767 */ 768 int 769 i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 770 { 771 struct drm_i915_private *dev_priv = to_i915(dev); 772 struct drm_i915_gem_userptr *args = data; 773 struct drm_i915_gem_object *obj; 774 int ret; 775 u32 handle; 776 777 if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) { 778 /* We cannot support coherent userptr objects on hw without 779 * LLC and broken snooping. 780 */ 781 return -ENODEV; 782 } 783 784 if (args->flags & ~(I915_USERPTR_READ_ONLY | 785 I915_USERPTR_UNSYNCHRONIZED)) 786 return -EINVAL; 787 788 if (offset_in_page(args->user_ptr | args->user_size)) 789 return -EINVAL; 790 791 #if 0 792 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE, 793 (char __user *)(unsigned long)args->user_ptr, args->user_size)) 794 return -EFAULT; 795 #endif 796 797 if (args->flags & I915_USERPTR_READ_ONLY) { 798 /* On almost all of the current hw, we cannot tell the GPU that a 799 * page is readonly, so this is just a placeholder in the uAPI. 800 */ 801 return -ENODEV; 802 } 803 804 obj = i915_gem_object_alloc(dev_priv); 805 if (obj == NULL) 806 return -ENOMEM; 807 808 drm_gem_private_object_init(dev, &obj->base, args->user_size); 809 i915_gem_object_init(obj, &i915_gem_userptr_ops); 810 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 811 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 812 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); 813 814 obj->userptr.ptr = args->user_ptr; 815 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY); 816 817 /* And keep a pointer to the current->mm for resolving the user pages 818 * at binding. This means that we need to hook into the mmu_notifier 819 * in order to detect if the mmu is destroyed. 820 */ 821 ret = i915_gem_userptr_init__mm_struct(obj); 822 if (ret == 0) 823 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); 824 if (ret == 0) 825 ret = drm_gem_handle_create(file, &obj->base, &handle); 826 827 /* drop reference from allocate - handle holds it now */ 828 i915_gem_object_put(obj); 829 if (ret) 830 return ret; 831 832 args->handle = handle; 833 return 0; 834 } 835 836 int i915_gem_init_userptr(struct drm_i915_private *dev_priv) 837 { 838 lockinit(&dev_priv->mm_lock, "i915dmm", 0, LK_CANRECURSE); 839 hash_init(dev_priv->mm_structs); 840 841 dev_priv->mm.userptr_wq = 842 alloc_workqueue("i915-userptr-acquire", 843 WQ_HIGHPRI | WQ_UNBOUND, 844 0); 845 if (!dev_priv->mm.userptr_wq) 846 return -ENOMEM; 847 848 return 0; 849 } 850 851 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv) 852 { 853 destroy_workqueue(dev_priv->mm.userptr_wq); 854 } 855