1 /* 2 * Copyright © 2008-2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <drm/drmP.h> 29 #include <drm/drm_vma_manager.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 #include "i915_gem_clflush.h" 33 #include "i915_vgpu.h" 34 #include "i915_trace.h" 35 #include "intel_drv.h" 36 #include "intel_frontbuffer.h" 37 #include "intel_mocs.h" 38 #include <linux/dma-fence-array.h> 39 #include <linux/kthread.h> 40 #include <linux/reservation.h> 41 #include <linux/shmem_fs.h> 42 #include <linux/slab.h> 43 #include <linux/stop_machine.h> 44 #include <linux/swap.h> 45 #include <linux/pci.h> 46 #include <linux/dma-buf.h> 47 #include <linux/swiotlb.h> 48 49 #include <sys/mman.h> 50 #include <vm/vm_map.h> 51 #include <vm/vm_param.h> 52 53 static void i915_gem_flush_free_objects(struct drm_i915_private *i915); 54 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); 55 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); 56 57 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) 58 { 59 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) 60 return false; 61 62 if (!i915_gem_object_is_coherent(obj)) 63 return true; 64 65 return obj->pin_display; 66 } 67 68 static int 69 insert_mappable_node(struct i915_ggtt *ggtt, 70 struct drm_mm_node *node, u32 size) 71 { 72 memset(node, 0, sizeof(*node)); 73 return drm_mm_insert_node_in_range(&ggtt->base.mm, node, 74 size, 0, I915_COLOR_UNEVICTABLE, 75 0, ggtt->mappable_end, 76 DRM_MM_INSERT_LOW); 77 } 78 79 static void 80 remove_mappable_node(struct drm_mm_node *node) 81 { 82 drm_mm_remove_node(node); 83 } 84 85 /* some bookkeeping */ 86 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 87 u64 size) 88 { 89 lockmgr(&dev_priv->mm.object_stat_lock, LK_EXCLUSIVE); 90 dev_priv->mm.object_count++; 91 dev_priv->mm.object_memory += size; 92 lockmgr(&dev_priv->mm.object_stat_lock, LK_RELEASE); 93 } 94 95 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, 96 u64 size) 97 { 98 lockmgr(&dev_priv->mm.object_stat_lock, LK_EXCLUSIVE); 99 dev_priv->mm.object_count--; 100 dev_priv->mm.object_memory -= size; 101 lockmgr(&dev_priv->mm.object_stat_lock, LK_RELEASE); 102 } 103 104 static int 105 i915_gem_wait_for_error(struct i915_gpu_error *error) 106 { 107 int ret; 108 109 might_sleep(); 110 111 /* 112 * Only wait 10 seconds for the gpu reset to complete to avoid hanging 113 * userspace. If it takes that long something really bad is going on and 114 * we should simply try to bail out and fail as gracefully as possible. 115 */ 116 ret = wait_event_interruptible_timeout(error->reset_queue, 117 !i915_reset_backoff(error), 118 I915_RESET_TIMEOUT); 119 if (ret == 0) { 120 DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); 121 return -EIO; 122 } else if (ret < 0) { 123 return ret; 124 } else { 125 return 0; 126 } 127 } 128 129 int i915_mutex_lock_interruptible(struct drm_device *dev) 130 { 131 struct drm_i915_private *dev_priv = to_i915(dev); 132 int ret; 133 134 ret = i915_gem_wait_for_error(&dev_priv->gpu_error); 135 if (ret) 136 return ret; 137 138 ret = mutex_lock_interruptible(&dev->struct_mutex); 139 if (ret) 140 return ret; 141 142 return 0; 143 } 144 145 int 146 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 147 struct drm_file *file) 148 { 149 struct drm_i915_private *dev_priv = to_i915(dev); 150 struct i915_ggtt *ggtt = &dev_priv->ggtt; 151 struct drm_i915_gem_get_aperture *args = data; 152 struct i915_vma *vma; 153 size_t pinned; 154 155 pinned = 0; 156 mutex_lock(&dev->struct_mutex); 157 list_for_each_entry(vma, &ggtt->base.active_list, vm_link) 158 if (i915_vma_is_pinned(vma)) 159 pinned += vma->node.size; 160 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link) 161 if (i915_vma_is_pinned(vma)) 162 pinned += vma->node.size; 163 mutex_unlock(&dev->struct_mutex); 164 165 args->aper_size = ggtt->base.total; 166 args->aper_available_size = args->aper_size - pinned; 167 168 return 0; 169 } 170 171 static struct sg_table * 172 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) 173 { 174 #if 0 175 struct address_space *mapping = obj->base.filp->f_mapping; 176 #else 177 vm_object_t vm_obj = obj->base.filp; 178 #endif 179 drm_dma_handle_t *phys; 180 struct sg_table *st; 181 struct scatterlist *sg; 182 char *vaddr; 183 int i; 184 185 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) 186 return ERR_PTR(-EINVAL); 187 188 /* Always aligning to the object size, allows a single allocation 189 * to handle all possible callers, and given typical object sizes, 190 * the alignment of the buddy allocation will naturally match. 191 */ 192 phys = drm_pci_alloc(obj->base.dev, 193 obj->base.size, 194 roundup_pow_of_two(obj->base.size)); 195 if (!phys) 196 return ERR_PTR(-ENOMEM); 197 198 vaddr = phys->vaddr; 199 VM_OBJECT_LOCK(vm_obj); 200 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 201 struct page *page; 202 char *src; 203 204 #if 0 205 page = shmem_read_mapping_page(mapping, i); 206 #else 207 page = shmem_read_mapping_page(vm_obj, i); 208 #endif 209 if (IS_ERR(page)) { 210 st = ERR_CAST(page); 211 goto err_phys; 212 } 213 214 src = kmap_atomic(page); 215 memcpy(vaddr, src, PAGE_SIZE); 216 drm_clflush_virt_range(vaddr, PAGE_SIZE); 217 kunmap_atomic(src); 218 219 put_page(page); 220 vaddr += PAGE_SIZE; 221 } 222 VM_OBJECT_UNLOCK(vm_obj); 223 224 i915_gem_chipset_flush(to_i915(obj->base.dev)); 225 226 st = kmalloc(sizeof(*st), M_DRM, GFP_KERNEL); 227 if (!st) { 228 st = ERR_PTR(-ENOMEM); 229 goto err_phys; 230 } 231 232 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 233 kfree(st); 234 st = ERR_PTR(-ENOMEM); 235 goto err_phys; 236 } 237 238 sg = st->sgl; 239 sg->offset = 0; 240 sg->length = obj->base.size; 241 242 sg_dma_address(sg) = phys->busaddr; 243 sg_dma_len(sg) = obj->base.size; 244 245 obj->phys_handle = phys; 246 return st; 247 248 err_phys: 249 drm_pci_free(obj->base.dev, phys); 250 return st; 251 } 252 253 static void 254 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 255 struct sg_table *pages, 256 bool needs_clflush) 257 { 258 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); 259 260 if (obj->mm.madv == I915_MADV_DONTNEED) 261 obj->mm.dirty = false; 262 263 if (needs_clflush && 264 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 && 265 !i915_gem_object_is_coherent(obj)) 266 drm_clflush_sg(pages); 267 268 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 269 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 270 } 271 272 static void 273 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, 274 struct sg_table *pages) 275 { 276 __i915_gem_object_release_shmem(obj, pages, false); 277 278 if (obj->mm.dirty) { 279 #if 0 280 struct address_space *mapping = obj->base.filp->f_mapping; 281 #else 282 vm_object_t vm_obj = obj->base.filp; 283 #endif 284 char *vaddr = obj->phys_handle->vaddr; 285 int i; 286 287 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 288 struct page *page; 289 char *dst; 290 291 page = shmem_read_mapping_page(vm_obj, i); 292 if (IS_ERR(page)) 293 continue; 294 295 dst = kmap_atomic(page); 296 drm_clflush_virt_range(vaddr, PAGE_SIZE); 297 memcpy(dst, vaddr, PAGE_SIZE); 298 kunmap_atomic(dst); 299 300 set_page_dirty(page); 301 if (obj->mm.madv == I915_MADV_WILLNEED) 302 mark_page_accessed(page); 303 put_page(page); 304 vaddr += PAGE_SIZE; 305 } 306 obj->mm.dirty = false; 307 } 308 309 sg_free_table(pages); 310 kfree(pages); 311 312 drm_pci_free(obj->base.dev, obj->phys_handle); 313 } 314 315 static void 316 i915_gem_object_release_phys(struct drm_i915_gem_object *obj) 317 { 318 i915_gem_object_unpin_pages(obj); 319 } 320 321 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { 322 .get_pages = i915_gem_object_get_pages_phys, 323 .put_pages = i915_gem_object_put_pages_phys, 324 .release = i915_gem_object_release_phys, 325 }; 326 327 static const struct drm_i915_gem_object_ops i915_gem_object_ops; 328 329 int i915_gem_object_unbind(struct drm_i915_gem_object *obj) 330 { 331 struct i915_vma *vma; 332 LINUX_LIST_HEAD(still_in_list); 333 int ret; 334 335 lockdep_assert_held(&obj->base.dev->struct_mutex); 336 337 /* Closed vma are removed from the obj->vma_list - but they may 338 * still have an active binding on the object. To remove those we 339 * must wait for all rendering to complete to the object (as unbinding 340 * must anyway), and retire the requests. 341 */ 342 ret = i915_gem_object_wait(obj, 343 I915_WAIT_INTERRUPTIBLE | 344 I915_WAIT_LOCKED | 345 I915_WAIT_ALL, 346 MAX_SCHEDULE_TIMEOUT, 347 NULL); 348 if (ret) 349 return ret; 350 351 i915_gem_retire_requests(to_i915(obj->base.dev)); 352 353 while ((vma = list_first_entry_or_null(&obj->vma_list, 354 struct i915_vma, 355 obj_link))) { 356 list_move_tail(&vma->obj_link, &still_in_list); 357 ret = i915_vma_unbind(vma); 358 if (ret) 359 break; 360 } 361 list_splice(&still_in_list, &obj->vma_list); 362 363 return ret; 364 } 365 366 static long 367 i915_gem_object_wait_fence(struct dma_fence *fence, 368 unsigned int flags, 369 long timeout, 370 struct intel_rps_client *rps) 371 { 372 struct drm_i915_gem_request *rq; 373 374 BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1); 375 376 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 377 return timeout; 378 379 if (!dma_fence_is_i915(fence)) 380 return dma_fence_wait_timeout(fence, 381 flags & I915_WAIT_INTERRUPTIBLE, 382 timeout); 383 384 rq = to_request(fence); 385 if (i915_gem_request_completed(rq)) 386 goto out; 387 388 /* This client is about to stall waiting for the GPU. In many cases 389 * this is undesirable and limits the throughput of the system, as 390 * many clients cannot continue processing user input/output whilst 391 * blocked. RPS autotuning may take tens of milliseconds to respond 392 * to the GPU load and thus incurs additional latency for the client. 393 * We can circumvent that by promoting the GPU frequency to maximum 394 * before we wait. This makes the GPU throttle up much more quickly 395 * (good for benchmarks and user experience, e.g. window animations), 396 * but at a cost of spending more power processing the workload 397 * (bad for battery). Not all clients even want their results 398 * immediately and for them we should just let the GPU select its own 399 * frequency to maximise efficiency. To prevent a single client from 400 * forcing the clocks too high for the whole system, we only allow 401 * each client to waitboost once in a busy period. 402 */ 403 if (rps) { 404 if (INTEL_GEN(rq->i915) >= 6) 405 gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies); 406 else 407 rps = NULL; 408 } 409 410 timeout = i915_wait_request(rq, flags, timeout); 411 412 out: 413 if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq)) 414 i915_gem_request_retire_upto(rq); 415 416 if (rps && i915_gem_request_global_seqno(rq) == intel_engine_last_submit(rq->engine)) { 417 /* The GPU is now idle and this client has stalled. 418 * Since no other client has submitted a request in the 419 * meantime, assume that this client is the only one 420 * supplying work to the GPU but is unable to keep that 421 * work supplied because it is waiting. Since the GPU is 422 * then never kept fully busy, RPS autoclocking will 423 * keep the clocks relatively low, causing further delays. 424 * Compensate by giving the synchronous client credit for 425 * a waitboost next time. 426 */ 427 lockmgr(&rq->i915->rps.client_lock, LK_EXCLUSIVE); 428 list_del_init(&rps->link); 429 lockmgr(&rq->i915->rps.client_lock, LK_RELEASE); 430 } 431 432 return timeout; 433 } 434 435 static long 436 i915_gem_object_wait_reservation(struct reservation_object *resv, 437 unsigned int flags, 438 long timeout, 439 struct intel_rps_client *rps) 440 { 441 unsigned int seq = __read_seqcount_begin(&resv->seq); 442 struct dma_fence *excl; 443 bool prune_fences = false; 444 445 if (flags & I915_WAIT_ALL) { 446 struct dma_fence **shared; 447 unsigned int count, i; 448 int ret; 449 450 ret = reservation_object_get_fences_rcu(resv, 451 &excl, &count, &shared); 452 if (ret) 453 return ret; 454 455 for (i = 0; i < count; i++) { 456 timeout = i915_gem_object_wait_fence(shared[i], 457 flags, timeout, 458 rps); 459 if (timeout < 0) 460 break; 461 462 dma_fence_put(shared[i]); 463 } 464 465 for (; i < count; i++) 466 dma_fence_put(shared[i]); 467 kfree(shared); 468 469 prune_fences = count && timeout >= 0; 470 } else { 471 excl = reservation_object_get_excl_rcu(resv); 472 } 473 474 if (excl && timeout >= 0) { 475 timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps); 476 prune_fences = timeout >= 0; 477 } 478 479 dma_fence_put(excl); 480 481 /* Oportunistically prune the fences iff we know they have *all* been 482 * signaled and that the reservation object has not been changed (i.e. 483 * no new fences have been added). 484 */ 485 if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) { 486 if (reservation_object_trylock(resv)) { 487 if (!__read_seqcount_retry(&resv->seq, seq)) 488 reservation_object_add_excl_fence(resv, NULL); 489 reservation_object_unlock(resv); 490 } 491 } 492 493 return timeout; 494 } 495 496 static void __fence_set_priority(struct dma_fence *fence, int prio) 497 { 498 struct drm_i915_gem_request *rq; 499 struct intel_engine_cs *engine; 500 501 if (!dma_fence_is_i915(fence)) 502 return; 503 504 rq = to_request(fence); 505 engine = rq->engine; 506 if (!engine->schedule) 507 return; 508 509 engine->schedule(rq, prio); 510 } 511 512 static void fence_set_priority(struct dma_fence *fence, int prio) 513 { 514 /* Recurse once into a fence-array */ 515 if (dma_fence_is_array(fence)) { 516 struct dma_fence_array *array = to_dma_fence_array(fence); 517 int i; 518 519 for (i = 0; i < array->num_fences; i++) 520 __fence_set_priority(array->fences[i], prio); 521 } else { 522 __fence_set_priority(fence, prio); 523 } 524 } 525 526 int 527 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 528 unsigned int flags, 529 int prio) 530 { 531 struct dma_fence *excl; 532 533 if (flags & I915_WAIT_ALL) { 534 struct dma_fence **shared; 535 unsigned int count, i; 536 int ret; 537 538 ret = reservation_object_get_fences_rcu(obj->resv, 539 &excl, &count, &shared); 540 if (ret) 541 return ret; 542 543 for (i = 0; i < count; i++) { 544 fence_set_priority(shared[i], prio); 545 dma_fence_put(shared[i]); 546 } 547 548 kfree(shared); 549 } else { 550 excl = reservation_object_get_excl_rcu(obj->resv); 551 } 552 553 if (excl) { 554 fence_set_priority(excl, prio); 555 dma_fence_put(excl); 556 } 557 return 0; 558 } 559 560 /** 561 * Waits for rendering to the object to be completed 562 * @obj: i915 gem object 563 * @flags: how to wait (under a lock, for all rendering or just for writes etc) 564 * @timeout: how long to wait 565 * @rps: client (user process) to charge for any waitboosting 566 */ 567 int 568 i915_gem_object_wait(struct drm_i915_gem_object *obj, 569 unsigned int flags, 570 long timeout, 571 struct intel_rps_client *rps) 572 { 573 might_sleep(); 574 #if IS_ENABLED(CONFIG_LOCKDEP) 575 GEM_BUG_ON(debug_locks && 576 !!lockdep_is_held(&obj->base.dev->struct_mutex) != 577 !!(flags & I915_WAIT_LOCKED)); 578 #endif 579 GEM_BUG_ON(timeout < 0); 580 581 timeout = i915_gem_object_wait_reservation(obj->resv, 582 flags, timeout, 583 rps); 584 return timeout < 0 ? timeout : 0; 585 } 586 587 static struct intel_rps_client *to_rps_client(struct drm_file *file) 588 { 589 struct drm_i915_file_private *fpriv = file->driver_priv; 590 591 return &fpriv->rps; 592 } 593 594 int 595 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 596 int align) 597 { 598 int ret; 599 600 if (align > obj->base.size) 601 return -EINVAL; 602 603 if (obj->ops == &i915_gem_phys_ops) 604 return 0; 605 606 if (obj->mm.madv != I915_MADV_WILLNEED) 607 return -EFAULT; 608 609 if (obj->base.filp == NULL) 610 return -EINVAL; 611 612 ret = i915_gem_object_unbind(obj); 613 if (ret) 614 return ret; 615 616 __i915_gem_object_put_pages(obj, I915_MM_NORMAL); 617 if (obj->mm.pages) 618 return -EBUSY; 619 620 GEM_BUG_ON(obj->ops != &i915_gem_object_ops); 621 obj->ops = &i915_gem_phys_ops; 622 623 ret = i915_gem_object_pin_pages(obj); 624 if (ret) 625 goto err_xfer; 626 627 return 0; 628 629 err_xfer: 630 obj->ops = &i915_gem_object_ops; 631 return ret; 632 } 633 634 static int 635 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, 636 struct drm_i915_gem_pwrite *args, 637 struct drm_file *file) 638 { 639 void *vaddr = obj->phys_handle->vaddr + args->offset; 640 char __user *user_data = u64_to_user_ptr(args->data_ptr); 641 642 /* We manually control the domain here and pretend that it 643 * remains coherent i.e. in the GTT domain, like shmem_pwrite. 644 */ 645 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 646 if (copy_from_user(vaddr, user_data, args->size)) 647 return -EFAULT; 648 649 drm_clflush_virt_range(vaddr, args->size); 650 i915_gem_chipset_flush(to_i915(obj->base.dev)); 651 652 intel_fb_obj_flush(obj, ORIGIN_CPU); 653 return 0; 654 } 655 656 void *i915_gem_object_alloc(struct drm_i915_private *dev_priv) 657 { 658 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL); 659 } 660 661 void i915_gem_object_free(struct drm_i915_gem_object *obj) 662 { 663 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 664 kmem_cache_free(dev_priv->objects, obj); 665 } 666 667 static int 668 i915_gem_create(struct drm_file *file, 669 struct drm_i915_private *dev_priv, 670 uint64_t size, 671 uint32_t *handle_p) 672 { 673 struct drm_i915_gem_object *obj; 674 int ret; 675 u32 handle; 676 677 size = roundup(size, PAGE_SIZE); 678 if (size == 0) 679 return -EINVAL; 680 681 /* Allocate the new object */ 682 obj = i915_gem_object_create(dev_priv, size); 683 if (IS_ERR(obj)) 684 return PTR_ERR(obj); 685 686 ret = drm_gem_handle_create(file, &obj->base, &handle); 687 /* drop reference from allocate - handle holds it now */ 688 i915_gem_object_put(obj); 689 if (ret) 690 return ret; 691 692 *handle_p = handle; 693 return 0; 694 } 695 696 int 697 i915_gem_dumb_create(struct drm_file *file, 698 struct drm_device *dev, 699 struct drm_mode_create_dumb *args) 700 { 701 /* have to work out size/pitch and return them */ 702 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); 703 args->size = args->pitch * args->height; 704 return i915_gem_create(file, to_i915(dev), 705 args->size, &args->handle); 706 } 707 708 /** 709 * Creates a new mm object and returns a handle to it. 710 * @dev: drm device pointer 711 * @data: ioctl data blob 712 * @file: drm file pointer 713 */ 714 int 715 i915_gem_create_ioctl(struct drm_device *dev, void *data, 716 struct drm_file *file) 717 { 718 struct drm_i915_private *dev_priv = to_i915(dev); 719 struct drm_i915_gem_create *args = data; 720 721 i915_gem_flush_free_objects(dev_priv); 722 723 return i915_gem_create(file, dev_priv, 724 args->size, &args->handle); 725 } 726 727 static inline int 728 __copy_to_user_swizzled(char __user *cpu_vaddr, 729 const char *gpu_vaddr, int gpu_offset, 730 int length) 731 { 732 int ret, cpu_offset = 0; 733 734 while (length > 0) { 735 int cacheline_end = ALIGN(gpu_offset + 1, 64); 736 int this_length = min(cacheline_end - gpu_offset, length); 737 int swizzled_gpu_offset = gpu_offset ^ 64; 738 739 ret = __copy_to_user(cpu_vaddr + cpu_offset, 740 gpu_vaddr + swizzled_gpu_offset, 741 this_length); 742 if (ret) 743 return ret + length; 744 745 cpu_offset += this_length; 746 gpu_offset += this_length; 747 length -= this_length; 748 } 749 750 return 0; 751 } 752 753 static inline int 754 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, 755 const char __user *cpu_vaddr, 756 int length) 757 { 758 int ret, cpu_offset = 0; 759 760 while (length > 0) { 761 int cacheline_end = ALIGN(gpu_offset + 1, 64); 762 int this_length = min(cacheline_end - gpu_offset, length); 763 int swizzled_gpu_offset = gpu_offset ^ 64; 764 765 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset, 766 cpu_vaddr + cpu_offset, 767 this_length); 768 if (ret) 769 return ret + length; 770 771 cpu_offset += this_length; 772 gpu_offset += this_length; 773 length -= this_length; 774 } 775 776 return 0; 777 } 778 779 /* 780 * Pins the specified object's pages and synchronizes the object with 781 * GPU accesses. Sets needs_clflush to non-zero if the caller should 782 * flush the object from the CPU cache. 783 */ 784 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 785 unsigned int *needs_clflush) 786 { 787 int ret; 788 789 lockdep_assert_held(&obj->base.dev->struct_mutex); 790 791 *needs_clflush = 0; 792 if (!i915_gem_object_has_struct_page(obj)) 793 return -ENODEV; 794 795 ret = i915_gem_object_wait(obj, 796 I915_WAIT_INTERRUPTIBLE | 797 I915_WAIT_LOCKED, 798 MAX_SCHEDULE_TIMEOUT, 799 NULL); 800 if (ret) 801 return ret; 802 803 ret = i915_gem_object_pin_pages(obj); 804 if (ret) 805 return ret; 806 807 if (i915_gem_object_is_coherent(obj) || 808 !static_cpu_has(X86_FEATURE_CLFLUSH)) { 809 ret = i915_gem_object_set_to_cpu_domain(obj, false); 810 if (ret) 811 goto err_unpin; 812 else 813 goto out; 814 } 815 816 i915_gem_object_flush_gtt_write_domain(obj); 817 818 /* If we're not in the cpu read domain, set ourself into the gtt 819 * read domain and manually flush cachelines (if required). This 820 * optimizes for the case when the gpu will dirty the data 821 * anyway again before the next pread happens. 822 */ 823 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) 824 *needs_clflush = CLFLUSH_BEFORE; 825 826 out: 827 /* return with the pages pinned */ 828 return 0; 829 830 err_unpin: 831 i915_gem_object_unpin_pages(obj); 832 return ret; 833 } 834 835 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, 836 unsigned int *needs_clflush) 837 { 838 int ret; 839 840 lockdep_assert_held(&obj->base.dev->struct_mutex); 841 842 *needs_clflush = 0; 843 if (!i915_gem_object_has_struct_page(obj)) 844 return -ENODEV; 845 846 ret = i915_gem_object_wait(obj, 847 I915_WAIT_INTERRUPTIBLE | 848 I915_WAIT_LOCKED | 849 I915_WAIT_ALL, 850 MAX_SCHEDULE_TIMEOUT, 851 NULL); 852 if (ret) 853 return ret; 854 855 ret = i915_gem_object_pin_pages(obj); 856 if (ret) 857 return ret; 858 859 if (i915_gem_object_is_coherent(obj) || 860 !static_cpu_has(X86_FEATURE_CLFLUSH)) { 861 ret = i915_gem_object_set_to_cpu_domain(obj, true); 862 if (ret) 863 goto err_unpin; 864 else 865 goto out; 866 } 867 868 i915_gem_object_flush_gtt_write_domain(obj); 869 870 /* If we're not in the cpu write domain, set ourself into the 871 * gtt write domain and manually flush cachelines (as required). 872 * This optimizes for the case when the gpu will use the data 873 * right away and we therefore have to clflush anyway. 874 */ 875 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) 876 *needs_clflush |= CLFLUSH_AFTER; 877 878 /* Same trick applies to invalidate partially written cachelines read 879 * before writing. 880 */ 881 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) 882 *needs_clflush |= CLFLUSH_BEFORE; 883 884 out: 885 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 886 obj->mm.dirty = true; 887 /* return with the pages pinned */ 888 return 0; 889 890 err_unpin: 891 i915_gem_object_unpin_pages(obj); 892 return ret; 893 } 894 895 static void 896 shmem_clflush_swizzled_range(char *addr, unsigned long length, 897 bool swizzled) 898 { 899 if (unlikely(swizzled)) { 900 unsigned long start = (unsigned long) addr; 901 unsigned long end = (unsigned long) addr + length; 902 903 /* For swizzling simply ensure that we always flush both 904 * channels. Lame, but simple and it works. Swizzled 905 * pwrite/pread is far from a hotpath - current userspace 906 * doesn't use it at all. */ 907 start = round_down(start, 128); 908 end = round_up(end, 128); 909 910 drm_clflush_virt_range((void *)start, end - start); 911 } else { 912 drm_clflush_virt_range(addr, length); 913 } 914 915 } 916 917 /* Only difference to the fast-path function is that this can handle bit17 918 * and uses non-atomic copy and kmap functions. */ 919 static int 920 shmem_pread_slow(struct page *page, int offset, int length, 921 char __user *user_data, 922 bool page_do_bit17_swizzling, bool needs_clflush) 923 { 924 char *vaddr; 925 int ret; 926 927 vaddr = kmap(page); 928 if (needs_clflush) 929 shmem_clflush_swizzled_range(vaddr + offset, length, 930 page_do_bit17_swizzling); 931 932 if (page_do_bit17_swizzling) 933 ret = __copy_to_user_swizzled(user_data, vaddr, offset, length); 934 else 935 ret = __copy_to_user(user_data, vaddr + offset, length); 936 kunmap(page); 937 938 return ret ? - EFAULT : 0; 939 } 940 941 static int 942 shmem_pread(struct page *page, int offset, int length, char __user *user_data, 943 bool page_do_bit17_swizzling, bool needs_clflush) 944 { 945 int ret; 946 947 ret = -ENODEV; 948 if (!page_do_bit17_swizzling) { 949 char *vaddr = kmap_atomic(page); 950 951 if (needs_clflush) 952 drm_clflush_virt_range(vaddr + offset, length); 953 ret = __copy_to_user_inatomic(user_data, vaddr + offset, length); 954 kunmap_atomic(vaddr); 955 } 956 if (ret == 0) 957 return 0; 958 959 return shmem_pread_slow(page, offset, length, user_data, 960 page_do_bit17_swizzling, needs_clflush); 961 } 962 963 static int 964 i915_gem_shmem_pread(struct drm_i915_gem_object *obj, 965 struct drm_i915_gem_pread *args) 966 { 967 char __user *user_data; 968 u64 remain; 969 unsigned int obj_do_bit17_swizzling; 970 unsigned int needs_clflush; 971 unsigned int idx, offset; 972 int ret; 973 974 obj_do_bit17_swizzling = 0; 975 if (i915_gem_object_needs_bit17_swizzle(obj)) 976 obj_do_bit17_swizzling = BIT(17); 977 978 ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex); 979 if (ret) 980 return ret; 981 982 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); 983 mutex_unlock(&obj->base.dev->struct_mutex); 984 if (ret) 985 return ret; 986 987 remain = args->size; 988 user_data = u64_to_user_ptr(args->data_ptr); 989 offset = offset_in_page(args->offset); 990 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { 991 struct page *page = i915_gem_object_get_page(obj, idx); 992 int length; 993 994 length = remain; 995 if (offset + length > PAGE_SIZE) 996 length = PAGE_SIZE - offset; 997 998 ret = shmem_pread(page, offset, length, user_data, 999 page_to_phys(page) & obj_do_bit17_swizzling, 1000 needs_clflush); 1001 if (ret) 1002 break; 1003 1004 remain -= length; 1005 user_data += length; 1006 offset = 0; 1007 } 1008 1009 i915_gem_obj_finish_shmem_access(obj); 1010 return ret; 1011 } 1012 1013 static inline bool 1014 gtt_user_read(struct io_mapping *mapping, 1015 loff_t base, int offset, 1016 char __user *user_data, int length) 1017 { 1018 void *vaddr; 1019 unsigned long unwritten; 1020 1021 /* We can use the cpu mem copy function because this is X86. */ 1022 vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base); 1023 unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length); 1024 io_mapping_unmap_atomic(vaddr); 1025 if (unwritten) { 1026 vaddr = (void __force *) 1027 io_mapping_map_wc(mapping, base, PAGE_SIZE); 1028 unwritten = copy_to_user(user_data, vaddr + offset, length); 1029 io_mapping_unmap(vaddr); 1030 } 1031 return unwritten; 1032 } 1033 1034 static int 1035 i915_gem_gtt_pread(struct drm_i915_gem_object *obj, 1036 const struct drm_i915_gem_pread *args) 1037 { 1038 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1039 struct i915_ggtt *ggtt = &i915->ggtt; 1040 struct drm_mm_node node; 1041 struct i915_vma *vma; 1042 void __user *user_data; 1043 u64 remain, offset; 1044 int ret; 1045 1046 ret = mutex_lock_interruptible(&i915->drm.struct_mutex); 1047 if (ret) 1048 return ret; 1049 1050 intel_runtime_pm_get(i915); 1051 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 1052 PIN_MAPPABLE | PIN_NONBLOCK); 1053 if (!IS_ERR(vma)) { 1054 node.start = i915_ggtt_offset(vma); 1055 node.allocated = false; 1056 ret = i915_vma_put_fence(vma); 1057 if (ret) { 1058 i915_vma_unpin(vma); 1059 vma = ERR_PTR(ret); 1060 } 1061 } 1062 if (IS_ERR(vma)) { 1063 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); 1064 if (ret) 1065 goto out_unlock; 1066 GEM_BUG_ON(!node.allocated); 1067 } 1068 1069 ret = i915_gem_object_set_to_gtt_domain(obj, false); 1070 if (ret) 1071 goto out_unpin; 1072 1073 mutex_unlock(&i915->drm.struct_mutex); 1074 1075 user_data = u64_to_user_ptr(args->data_ptr); 1076 remain = args->size; 1077 offset = args->offset; 1078 1079 while (remain > 0) { 1080 /* Operation in this page 1081 * 1082 * page_base = page offset within aperture 1083 * page_offset = offset within page 1084 * page_length = bytes to copy for this page 1085 */ 1086 u32 page_base = node.start; 1087 unsigned page_offset = offset_in_page(offset); 1088 unsigned page_length = PAGE_SIZE - page_offset; 1089 page_length = remain < page_length ? remain : page_length; 1090 if (node.allocated) { 1091 wmb(); 1092 ggtt->base.insert_page(&ggtt->base, 1093 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), 1094 node.start, I915_CACHE_NONE, 0); 1095 wmb(); 1096 } else { 1097 page_base += offset & LINUX_PAGE_MASK; 1098 } 1099 1100 if (gtt_user_read(&ggtt->mappable, page_base, page_offset, 1101 user_data, page_length)) { 1102 ret = -EFAULT; 1103 break; 1104 } 1105 1106 remain -= page_length; 1107 user_data += page_length; 1108 offset += page_length; 1109 } 1110 1111 mutex_lock(&i915->drm.struct_mutex); 1112 out_unpin: 1113 if (node.allocated) { 1114 wmb(); 1115 ggtt->base.clear_range(&ggtt->base, 1116 node.start, node.size); 1117 remove_mappable_node(&node); 1118 } else { 1119 i915_vma_unpin(vma); 1120 } 1121 out_unlock: 1122 intel_runtime_pm_put(i915); 1123 mutex_unlock(&i915->drm.struct_mutex); 1124 1125 return ret; 1126 } 1127 1128 /** 1129 * Reads data from the object referenced by handle. 1130 * @dev: drm device pointer 1131 * @data: ioctl data blob 1132 * @file: drm file pointer 1133 * 1134 * On error, the contents of *data are undefined. 1135 */ 1136 int 1137 i915_gem_pread_ioctl(struct drm_device *dev, void *data, 1138 struct drm_file *file) 1139 { 1140 struct drm_i915_gem_pread *args = data; 1141 struct drm_i915_gem_object *obj; 1142 int ret; 1143 1144 if (args->size == 0) 1145 return 0; 1146 1147 #if 0 1148 if (!access_ok(VERIFY_WRITE, 1149 u64_to_user_ptr(args->data_ptr), 1150 args->size)) 1151 return -EFAULT; 1152 #endif 1153 1154 obj = i915_gem_object_lookup(file, args->handle); 1155 if (!obj) 1156 return -ENOENT; 1157 1158 /* Bounds check source. */ 1159 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { 1160 ret = -EINVAL; 1161 goto out; 1162 } 1163 1164 trace_i915_gem_object_pread(obj, args->offset, args->size); 1165 1166 ret = i915_gem_object_wait(obj, 1167 I915_WAIT_INTERRUPTIBLE, 1168 MAX_SCHEDULE_TIMEOUT, 1169 to_rps_client(file)); 1170 if (ret) 1171 goto out; 1172 1173 ret = i915_gem_object_pin_pages(obj); 1174 if (ret) 1175 goto out; 1176 1177 ret = i915_gem_shmem_pread(obj, args); 1178 if (ret == -EFAULT || ret == -ENODEV) 1179 ret = i915_gem_gtt_pread(obj, args); 1180 1181 i915_gem_object_unpin_pages(obj); 1182 out: 1183 i915_gem_object_put(obj); 1184 return ret; 1185 } 1186 1187 /* This is the fast write path which cannot handle 1188 * page faults in the source data 1189 */ 1190 1191 static inline bool 1192 ggtt_write(struct io_mapping *mapping, 1193 loff_t base, int offset, 1194 char __user *user_data, int length) 1195 { 1196 void *vaddr; 1197 unsigned long unwritten; 1198 1199 /* We can use the cpu mem copy function because this is X86. */ 1200 vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base); 1201 unwritten = __copy_from_user_inatomic_nocache(vaddr + offset, 1202 user_data, length); 1203 io_mapping_unmap_atomic(vaddr); 1204 if (unwritten) { 1205 vaddr = (void __force *) 1206 io_mapping_map_wc(mapping, base, PAGE_SIZE); 1207 unwritten = copy_from_user(vaddr + offset, user_data, length); 1208 io_mapping_unmap(vaddr); 1209 } 1210 1211 return unwritten; 1212 } 1213 1214 /** 1215 * This is the fast pwrite path, where we copy the data directly from the 1216 * user into the GTT, uncached. 1217 * @obj: i915 GEM object 1218 * @args: pwrite arguments structure 1219 */ 1220 static int 1221 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, 1222 const struct drm_i915_gem_pwrite *args) 1223 { 1224 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1225 struct i915_ggtt *ggtt = &i915->ggtt; 1226 struct drm_mm_node node; 1227 struct i915_vma *vma; 1228 u64 remain, offset; 1229 void __user *user_data; 1230 int ret; 1231 1232 ret = mutex_lock_interruptible(&i915->drm.struct_mutex); 1233 if (ret) 1234 return ret; 1235 1236 intel_runtime_pm_get(i915); 1237 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 1238 PIN_MAPPABLE | PIN_NONBLOCK); 1239 if (!IS_ERR(vma)) { 1240 node.start = i915_ggtt_offset(vma); 1241 node.allocated = false; 1242 ret = i915_vma_put_fence(vma); 1243 if (ret) { 1244 i915_vma_unpin(vma); 1245 vma = ERR_PTR(ret); 1246 } 1247 } 1248 if (IS_ERR(vma)) { 1249 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); 1250 if (ret) 1251 goto out_unlock; 1252 GEM_BUG_ON(!node.allocated); 1253 } 1254 1255 ret = i915_gem_object_set_to_gtt_domain(obj, true); 1256 if (ret) 1257 goto out_unpin; 1258 1259 mutex_unlock(&i915->drm.struct_mutex); 1260 1261 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 1262 1263 user_data = u64_to_user_ptr(args->data_ptr); 1264 offset = args->offset; 1265 remain = args->size; 1266 while (remain) { 1267 /* Operation in this page 1268 * 1269 * page_base = page offset within aperture 1270 * page_offset = offset within page 1271 * page_length = bytes to copy for this page 1272 */ 1273 u32 page_base = node.start; 1274 unsigned int page_offset = offset_in_page(offset); 1275 unsigned int page_length = PAGE_SIZE - page_offset; 1276 page_length = remain < page_length ? remain : page_length; 1277 if (node.allocated) { 1278 wmb(); /* flush the write before we modify the GGTT */ 1279 ggtt->base.insert_page(&ggtt->base, 1280 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), 1281 node.start, I915_CACHE_NONE, 0); 1282 wmb(); /* flush modifications to the GGTT (insert_page) */ 1283 } else { 1284 page_base += offset & LINUX_PAGE_MASK; 1285 } 1286 /* If we get a fault while copying data, then (presumably) our 1287 * source page isn't available. Return the error and we'll 1288 * retry in the slow path. 1289 * If the object is non-shmem backed, we retry again with the 1290 * path that handles page fault. 1291 */ 1292 if (ggtt_write(&ggtt->mappable, page_base, page_offset, 1293 user_data, page_length)) { 1294 ret = -EFAULT; 1295 break; 1296 } 1297 1298 remain -= page_length; 1299 user_data += page_length; 1300 offset += page_length; 1301 } 1302 intel_fb_obj_flush(obj, ORIGIN_CPU); 1303 1304 mutex_lock(&i915->drm.struct_mutex); 1305 out_unpin: 1306 if (node.allocated) { 1307 wmb(); 1308 ggtt->base.clear_range(&ggtt->base, 1309 node.start, node.size); 1310 remove_mappable_node(&node); 1311 } else { 1312 i915_vma_unpin(vma); 1313 } 1314 out_unlock: 1315 intel_runtime_pm_put(i915); 1316 mutex_unlock(&i915->drm.struct_mutex); 1317 return ret; 1318 } 1319 1320 static int 1321 shmem_pwrite_slow(struct page *page, int offset, int length, 1322 char __user *user_data, 1323 bool page_do_bit17_swizzling, 1324 bool needs_clflush_before, 1325 bool needs_clflush_after) 1326 { 1327 char *vaddr; 1328 int ret; 1329 1330 vaddr = kmap(page); 1331 if (unlikely(needs_clflush_before || page_do_bit17_swizzling)) 1332 shmem_clflush_swizzled_range(vaddr + offset, length, 1333 page_do_bit17_swizzling); 1334 if (page_do_bit17_swizzling) 1335 ret = __copy_from_user_swizzled(vaddr, offset, user_data, 1336 length); 1337 else 1338 ret = __copy_from_user(vaddr + offset, user_data, length); 1339 if (needs_clflush_after) 1340 shmem_clflush_swizzled_range(vaddr + offset, length, 1341 page_do_bit17_swizzling); 1342 kunmap(page); 1343 1344 return ret ? -EFAULT : 0; 1345 } 1346 1347 /* Per-page copy function for the shmem pwrite fastpath. 1348 * Flushes invalid cachelines before writing to the target if 1349 * needs_clflush_before is set and flushes out any written cachelines after 1350 * writing if needs_clflush is set. 1351 */ 1352 static int 1353 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data, 1354 bool page_do_bit17_swizzling, 1355 bool needs_clflush_before, 1356 bool needs_clflush_after) 1357 { 1358 int ret; 1359 1360 ret = -ENODEV; 1361 if (!page_do_bit17_swizzling) { 1362 char *vaddr = kmap_atomic(page); 1363 1364 if (needs_clflush_before) 1365 drm_clflush_virt_range(vaddr + offset, len); 1366 ret = __copy_from_user_inatomic(vaddr + offset, user_data, len); 1367 if (needs_clflush_after) 1368 drm_clflush_virt_range(vaddr + offset, len); 1369 1370 kunmap_atomic(vaddr); 1371 } 1372 if (ret == 0) 1373 return ret; 1374 1375 return shmem_pwrite_slow(page, offset, len, user_data, 1376 page_do_bit17_swizzling, 1377 needs_clflush_before, 1378 needs_clflush_after); 1379 } 1380 1381 static int 1382 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, 1383 const struct drm_i915_gem_pwrite *args) 1384 { 1385 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1386 void __user *user_data; 1387 u64 remain; 1388 unsigned int obj_do_bit17_swizzling; 1389 unsigned int partial_cacheline_write; 1390 unsigned int needs_clflush; 1391 unsigned int offset, idx; 1392 int ret; 1393 #ifdef __DragonFly__ 1394 vm_object_t vm_obj; 1395 #endif 1396 1397 ret = mutex_lock_interruptible(&i915->drm.struct_mutex); 1398 if (ret) 1399 return ret; 1400 1401 ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush); 1402 mutex_unlock(&i915->drm.struct_mutex); 1403 if (ret) 1404 return ret; 1405 1406 obj_do_bit17_swizzling = 0; 1407 if (i915_gem_object_needs_bit17_swizzle(obj)) 1408 obj_do_bit17_swizzling = BIT(17); 1409 1410 /* If we don't overwrite a cacheline completely we need to be 1411 * careful to have up-to-date data by first clflushing. Don't 1412 * overcomplicate things and flush the entire patch. 1413 */ 1414 partial_cacheline_write = 0; 1415 if (needs_clflush & CLFLUSH_BEFORE) 1416 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1; 1417 1418 user_data = u64_to_user_ptr(args->data_ptr); 1419 remain = args->size; 1420 offset = offset_in_page(args->offset); 1421 #ifdef __DragonFly__ 1422 vm_obj = obj->base.filp; 1423 VM_OBJECT_LOCK(vm_obj); 1424 vm_object_pip_add(vm_obj, 1); 1425 #endif 1426 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { 1427 struct page *page = i915_gem_object_get_page(obj, idx); 1428 int length; 1429 1430 length = remain; 1431 if (offset + length > PAGE_SIZE) 1432 length = PAGE_SIZE - offset; 1433 1434 ret = shmem_pwrite(page, offset, length, user_data, 1435 page_to_phys(page) & obj_do_bit17_swizzling, 1436 (offset | length) & partial_cacheline_write, 1437 needs_clflush & CLFLUSH_AFTER); 1438 if (ret) 1439 break; 1440 1441 remain -= length; 1442 user_data += length; 1443 offset = 0; 1444 } 1445 #ifdef __DragonFly__ 1446 if (vm_obj != obj->base.filp) { 1447 kprintf("i915_gem_shmem_pwrite: VM_OBJECT CHANGED! %p %p\n", 1448 vm_obj, obj->base.filp); 1449 } 1450 vm_object_pip_wakeup(vm_obj); 1451 VM_OBJECT_UNLOCK(vm_obj); 1452 #endif 1453 1454 intel_fb_obj_flush(obj, ORIGIN_CPU); 1455 i915_gem_obj_finish_shmem_access(obj); 1456 return ret; 1457 } 1458 1459 /** 1460 * Writes data to the object referenced by handle. 1461 * @dev: drm device 1462 * @data: ioctl data blob 1463 * @file: drm file 1464 * 1465 * On error, the contents of the buffer that were to be modified are undefined. 1466 */ 1467 int 1468 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1469 struct drm_file *file) 1470 { 1471 struct drm_i915_gem_pwrite *args = data; 1472 struct drm_i915_gem_object *obj; 1473 int ret; 1474 1475 if (args->size == 0) 1476 return 0; 1477 1478 #if 0 1479 if (!access_ok(VERIFY_READ, 1480 u64_to_user_ptr(args->data_ptr), 1481 args->size)) 1482 return -EFAULT; 1483 #endif 1484 1485 obj = i915_gem_object_lookup(file, args->handle); 1486 if (!obj) 1487 return -ENOENT; 1488 1489 /* Bounds check destination. */ 1490 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { 1491 ret = -EINVAL; 1492 goto err; 1493 } 1494 1495 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 1496 1497 ret = -ENODEV; 1498 if (obj->ops->pwrite) 1499 ret = obj->ops->pwrite(obj, args); 1500 if (ret != -ENODEV) 1501 goto err; 1502 1503 ret = i915_gem_object_wait(obj, 1504 I915_WAIT_INTERRUPTIBLE | 1505 I915_WAIT_ALL, 1506 MAX_SCHEDULE_TIMEOUT, 1507 to_rps_client(file)); 1508 if (ret) 1509 goto err; 1510 1511 ret = i915_gem_object_pin_pages(obj); 1512 if (ret) 1513 goto err; 1514 1515 ret = -EFAULT; 1516 /* We can only do the GTT pwrite on untiled buffers, as otherwise 1517 * it would end up going through the fenced access, and we'll get 1518 * different detiling behavior between reading and writing. 1519 * pread/pwrite currently are reading and writing from the CPU 1520 * perspective, requiring manual detiling by the client. 1521 */ 1522 if (!i915_gem_object_has_struct_page(obj) || 1523 cpu_write_needs_clflush(obj)) 1524 /* Note that the gtt paths might fail with non-page-backed user 1525 * pointers (e.g. gtt mappings when moving data between 1526 * textures). Fallback to the shmem path in that case. 1527 */ 1528 ret = i915_gem_gtt_pwrite_fast(obj, args); 1529 1530 if (ret == -EFAULT || ret == -ENOSPC) { 1531 if (obj->phys_handle) 1532 ret = i915_gem_phys_pwrite(obj, args, file); 1533 else 1534 ret = i915_gem_shmem_pwrite(obj, args); 1535 } 1536 1537 i915_gem_object_unpin_pages(obj); 1538 err: 1539 i915_gem_object_put(obj); 1540 return ret; 1541 } 1542 1543 static inline enum fb_op_origin 1544 write_origin(struct drm_i915_gem_object *obj, unsigned domain) 1545 { 1546 return (domain == I915_GEM_DOMAIN_GTT ? 1547 obj->frontbuffer_ggtt_origin : ORIGIN_CPU); 1548 } 1549 1550 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) 1551 { 1552 struct drm_i915_private *i915; 1553 struct list_head *list; 1554 struct i915_vma *vma; 1555 1556 list_for_each_entry(vma, &obj->vma_list, obj_link) { 1557 if (!i915_vma_is_ggtt(vma)) 1558 break; 1559 1560 if (i915_vma_is_active(vma)) 1561 continue; 1562 1563 if (!drm_mm_node_allocated(&vma->node)) 1564 continue; 1565 1566 list_move_tail(&vma->vm_link, &vma->vm->inactive_list); 1567 } 1568 1569 i915 = to_i915(obj->base.dev); 1570 list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list; 1571 list_move_tail(&obj->global_link, list); 1572 } 1573 1574 /** 1575 * Called when user space prepares to use an object with the CPU, either 1576 * through the mmap ioctl's mapping or a GTT mapping. 1577 * @dev: drm device 1578 * @data: ioctl data blob 1579 * @file: drm file 1580 */ 1581 int 1582 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 1583 struct drm_file *file) 1584 { 1585 struct drm_i915_gem_set_domain *args = data; 1586 struct drm_i915_gem_object *obj; 1587 uint32_t read_domains = args->read_domains; 1588 uint32_t write_domain = args->write_domain; 1589 int err; 1590 1591 /* Only handle setting domains to types used by the CPU. */ 1592 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS) 1593 return -EINVAL; 1594 1595 /* Having something in the write domain implies it's in the read 1596 * domain, and only that read domain. Enforce that in the request. 1597 */ 1598 if (write_domain != 0 && read_domains != write_domain) 1599 return -EINVAL; 1600 1601 obj = i915_gem_object_lookup(file, args->handle); 1602 if (!obj) 1603 return -ENOENT; 1604 1605 /* Try to flush the object off the GPU without holding the lock. 1606 * We will repeat the flush holding the lock in the normal manner 1607 * to catch cases where we are gazumped. 1608 */ 1609 err = i915_gem_object_wait(obj, 1610 I915_WAIT_INTERRUPTIBLE | 1611 (write_domain ? I915_WAIT_ALL : 0), 1612 MAX_SCHEDULE_TIMEOUT, 1613 to_rps_client(file)); 1614 if (err) 1615 goto out; 1616 1617 /* Flush and acquire obj->pages so that we are coherent through 1618 * direct access in memory with previous cached writes through 1619 * shmemfs and that our cache domain tracking remains valid. 1620 * For example, if the obj->filp was moved to swap without us 1621 * being notified and releasing the pages, we would mistakenly 1622 * continue to assume that the obj remained out of the CPU cached 1623 * domain. 1624 */ 1625 err = i915_gem_object_pin_pages(obj); 1626 if (err) 1627 goto out; 1628 1629 err = i915_mutex_lock_interruptible(dev); 1630 if (err) 1631 goto out_unpin; 1632 1633 if (read_domains & I915_GEM_DOMAIN_GTT) 1634 err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); 1635 else 1636 err = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); 1637 1638 /* And bump the LRU for this access */ 1639 i915_gem_object_bump_inactive_ggtt(obj); 1640 1641 mutex_unlock(&dev->struct_mutex); 1642 1643 if (write_domain != 0) 1644 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain)); 1645 1646 out_unpin: 1647 i915_gem_object_unpin_pages(obj); 1648 out: 1649 i915_gem_object_put(obj); 1650 return err; 1651 } 1652 1653 /** 1654 * Called when user space has done writes to this buffer 1655 * @dev: drm device 1656 * @data: ioctl data blob 1657 * @file: drm file 1658 */ 1659 int 1660 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 1661 struct drm_file *file) 1662 { 1663 struct drm_i915_gem_sw_finish *args = data; 1664 struct drm_i915_gem_object *obj; 1665 1666 obj = i915_gem_object_lookup(file, args->handle); 1667 if (!obj) 1668 return -ENOENT; 1669 1670 /* Pinned buffers may be scanout, so flush the cache */ 1671 i915_gem_object_flush_if_display(obj); 1672 i915_gem_object_put(obj); 1673 1674 return 0; 1675 } 1676 1677 /** 1678 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address 1679 * it is mapped to. 1680 * @dev: drm device 1681 * @data: ioctl data blob 1682 * @file: drm file 1683 * 1684 * While the mapping holds a reference on the contents of the object, it doesn't 1685 * imply a ref on the object itself. 1686 * 1687 * IMPORTANT: 1688 * 1689 * DRM driver writers who look a this function as an example for how to do GEM 1690 * mmap support, please don't implement mmap support like here. The modern way 1691 * to implement DRM mmap support is with an mmap offset ioctl (like 1692 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. 1693 * That way debug tooling like valgrind will understand what's going on, hiding 1694 * the mmap call in a driver private ioctl will break that. The i915 driver only 1695 * does cpu mmaps this way because we didn't know better. 1696 */ 1697 int 1698 i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 1699 struct drm_file *file) 1700 { 1701 struct drm_i915_gem_mmap *args = data; 1702 struct drm_i915_gem_object *obj; 1703 unsigned long addr; 1704 #ifdef __DragonFly__ 1705 struct proc *p = curproc; 1706 vm_map_t map = &p->p_vmspace->vm_map; 1707 vm_size_t size; 1708 int error = 0, rv; 1709 #endif 1710 1711 if (args->flags & ~(I915_MMAP_WC)) 1712 return -EINVAL; 1713 1714 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT)) 1715 return -ENODEV; 1716 1717 obj = i915_gem_object_lookup(file, args->handle); 1718 if (!obj) 1719 return -ENOENT; 1720 1721 /* prime objects have no backing filp to GEM mmap 1722 * pages from. 1723 */ 1724 if (!obj->base.filp) { 1725 i915_gem_object_put(obj); 1726 return -EINVAL; 1727 } 1728 1729 if (args->size == 0) 1730 goto out; 1731 1732 size = round_page(args->size); 1733 if (map->size + size > p->p_rlimit[RLIMIT_VMEM].rlim_cur) { 1734 error = -ENOMEM; 1735 goto out; 1736 } 1737 1738 /* 1739 * Call hint to ensure that NULL is not returned as a valid address 1740 * and to reduce vm_map traversals. XXX causes instability, use a 1741 * fixed low address as the start point instead to avoid the NULL 1742 * return issue. 1743 */ 1744 addr = PAGE_SIZE; 1745 1746 /* 1747 * Use 256KB alignment. It is unclear why this matters for a 1748 * virtual address but it appears to fix a number of application/X 1749 * crashes and kms console switching is much faster. 1750 */ 1751 vm_object_hold(obj->base.filp); 1752 vm_object_reference_locked(obj->base.filp); 1753 vm_object_drop(obj->base.filp); 1754 1755 /* Something gets wrong here: fails to mmap 4096 */ 1756 rv = vm_map_find(map, obj->base.filp, NULL, 1757 args->offset, &addr, args->size, 1758 256 * 1024, /* align */ 1759 TRUE, /* fitit */ 1760 VM_MAPTYPE_NORMAL, VM_SUBSYS_DRM_GEM, 1761 VM_PROT_READ | VM_PROT_WRITE, /* prot */ 1762 VM_PROT_READ | VM_PROT_WRITE, /* max */ 1763 MAP_SHARED /* cow */); 1764 if (rv != KERN_SUCCESS) { 1765 vm_object_deallocate(obj->base.filp); 1766 error = -vm_mmap_to_errno(rv); 1767 } else { 1768 args->addr_ptr = (uint64_t)addr; 1769 } 1770 1771 if (args->flags & I915_MMAP_WC) { /* I915_PARAM_MMAP_VERSION */ 1772 #if 0 1773 addr = vm_mmap(obj->base.filp, 0, args->size, 1774 PROT_READ | PROT_WRITE, MAP_SHARED, 1775 args->offset); 1776 if (args->flags & I915_MMAP_WC) { 1777 struct mm_struct *mm = current->mm; 1778 struct vm_area_struct *vma; 1779 1780 if (down_write_killable(&mm->mmap_sem)) { 1781 i915_gem_object_put(obj); 1782 return -EINTR; 1783 } 1784 vma = find_vma(mm, addr); 1785 if (vma) 1786 vma->vm_page_prot = 1787 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1788 else 1789 addr = -ENOMEM; 1790 up_write(&mm->mmap_sem); 1791 #endif 1792 1793 /* This may race, but that's ok, it only gets set */ 1794 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU); 1795 } 1796 1797 out: 1798 i915_gem_object_put(obj); 1799 if (IS_ERR((void *)addr)) 1800 return addr; 1801 1802 args->addr_ptr = (uint64_t) addr; 1803 1804 return 0; 1805 } 1806 1807 static unsigned int tile_row_pages(struct drm_i915_gem_object *obj) 1808 { 1809 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT; 1810 } 1811 1812 /** 1813 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps 1814 * 1815 * A history of the GTT mmap interface: 1816 * 1817 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to 1818 * aligned and suitable for fencing, and still fit into the available 1819 * mappable space left by the pinned display objects. A classic problem 1820 * we called the page-fault-of-doom where we would ping-pong between 1821 * two objects that could not fit inside the GTT and so the memcpy 1822 * would page one object in at the expense of the other between every 1823 * single byte. 1824 * 1825 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none 1826 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the 1827 * object is too large for the available space (or simply too large 1828 * for the mappable aperture!), a view is created instead and faulted 1829 * into userspace. (This view is aligned and sized appropriately for 1830 * fenced access.) 1831 * 1832 * Restrictions: 1833 * 1834 * * snoopable objects cannot be accessed via the GTT. It can cause machine 1835 * hangs on some architectures, corruption on others. An attempt to service 1836 * a GTT page fault from a snoopable object will generate a SIGBUS. 1837 * 1838 * * the object must be able to fit into RAM (physical memory, though no 1839 * limited to the mappable aperture). 1840 * 1841 * 1842 * Caveats: 1843 * 1844 * * a new GTT page fault will synchronize rendering from the GPU and flush 1845 * all data to system memory. Subsequent access will not be synchronized. 1846 * 1847 * * all mappings are revoked on runtime device suspend. 1848 * 1849 * * there are only 8, 16 or 32 fence registers to share between all users 1850 * (older machines require fence register for display and blitter access 1851 * as well). Contention of the fence registers will cause the previous users 1852 * to be unmapped and any new access will generate new page faults. 1853 * 1854 * * running out of memory while servicing a fault may generate a SIGBUS, 1855 * rather than the expected SIGSEGV. 1856 */ 1857 int i915_gem_mmap_gtt_version(void) 1858 { 1859 return 1; 1860 } 1861 1862 static inline struct i915_ggtt_view 1863 compute_partial_view(struct drm_i915_gem_object *obj, 1864 pgoff_t page_offset, 1865 unsigned int chunk) 1866 { 1867 struct i915_ggtt_view view; 1868 1869 if (i915_gem_object_is_tiled(obj)) 1870 chunk = roundup(chunk, tile_row_pages(obj)); 1871 1872 view.type = I915_GGTT_VIEW_PARTIAL; 1873 view.partial.offset = rounddown(page_offset, chunk); 1874 view.partial.size = 1875 min_t(unsigned int, chunk, 1876 (obj->base.size >> PAGE_SHIFT) - view.partial.offset); 1877 1878 /* If the partial covers the entire object, just create a normal VMA. */ 1879 if (chunk >= obj->base.size >> PAGE_SHIFT) 1880 view.type = I915_GGTT_VIEW_NORMAL; 1881 1882 return view; 1883 } 1884 1885 /** 1886 * i915_gem_fault - fault a page into the GTT 1887 * 1888 * vm_obj is locked on entry and expected to be locked on return. 1889 * 1890 * The vm_pager has placemarked the object with an anonymous memory page 1891 * which we must replace atomically to avoid races against concurrent faults 1892 * on the same page. XXX we currently are unable to do this atomically. 1893 * 1894 * If we are to return an error we should not touch the anonymous page, 1895 * the caller will deallocate it. 1896 * 1897 * XXX Most GEM calls appear to be interruptable, but we can't hard loop 1898 * in that case. Release all resources and wait 1 tick before retrying. 1899 * This is a huge problem which needs to be fixed by getting rid of most 1900 * of the interruptability. The linux code does not retry but does appear 1901 * to have some sort of mechanism (VM_FAULT_NOPAGE ?) for the higher level 1902 * to be able to retry. 1903 * 1904 * -- 1905 * @vma: VMA in question 1906 * @vmf: fault info 1907 * 1908 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped 1909 * from userspace. The fault handler takes care of binding the object to 1910 * the GTT (if needed), allocating and programming a fence register (again, 1911 * only if needed based on whether the old reg is still valid or the object 1912 * is tiled) and inserting a new PTE into the faulting process. 1913 * 1914 * Note that the faulting process may involve evicting existing objects 1915 * from the GTT and/or fence registers to make room. So performance may 1916 * suffer if the GTT working set is large or there are few fence registers 1917 * left. 1918 * 1919 * The current feature set supported by i915_gem_fault() and thus GTT mmaps 1920 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version). 1921 * vm_obj is locked on entry and expected to be locked on return. The VM 1922 * pager has placed an anonymous memory page at (obj,offset) which we have 1923 * to replace. 1924 */ 1925 int i915_gem_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres) 1926 { 1927 #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */ 1928 struct drm_i915_gem_object *obj = to_intel_bo(vm_obj->handle); 1929 struct drm_device *dev = obj->base.dev; 1930 struct drm_i915_private *dev_priv = to_i915(dev); 1931 struct i915_ggtt *ggtt = &dev_priv->ggtt; 1932 bool write = !!(prot & VM_PROT_WRITE); 1933 struct i915_vma *vma; 1934 pgoff_t page_offset; 1935 vm_page_t m; 1936 unsigned int flags; 1937 int ret; 1938 #ifdef __DragonFly__ 1939 int didref = 0; 1940 struct vm_area_struct tmp_vm_area; 1941 struct vm_area_struct *area = &tmp_vm_area; 1942 1943 /* Fill-in vm_area_struct */ 1944 area->vm_private_data = vm_obj->handle; 1945 area->vm_start = 0; 1946 area->vm_end = obj->base.size; 1947 #endif 1948 1949 /* We don't use vmf->pgoff since that has the fake offset */ 1950 page_offset = (unsigned long)offset >> PAGE_SHIFT; 1951 1952 /* 1953 * vm_fault() has supplied us with a busied page placeholding 1954 * the operation. This presents a lock order reversal issue 1955 * again i915_gem_release_mmap() for our device mutex. 1956 * 1957 * Deal with the problem by getting rid of the placeholder now, 1958 * and then dealing with the potential for a new placeholder when 1959 * we try to insert later. 1960 */ 1961 if (*mres != NULL) { 1962 m = *mres; 1963 *mres = NULL; 1964 if ((m->busy_count & PBUSY_LOCKED) == 0) 1965 kprintf("i915_gem_fault: Page was not busy\n"); 1966 else 1967 vm_page_remove(m); 1968 vm_page_free(m); 1969 } 1970 1971 m = NULL; 1972 1973 retry: 1974 trace_i915_gem_object_fault(obj, page_offset, true, write); 1975 1976 /* Try to flush the object off the GPU first without holding the lock. 1977 * Upon acquiring the lock, we will perform our sanity checks and then 1978 * repeat the flush holding the lock in the normal manner to catch cases 1979 * where we are gazumped. 1980 */ 1981 ret = i915_gem_object_wait(obj, 1982 I915_WAIT_INTERRUPTIBLE, 1983 MAX_SCHEDULE_TIMEOUT, 1984 NULL); 1985 if (ret) 1986 goto err; 1987 1988 ret = i915_gem_object_pin_pages(obj); 1989 if (ret) 1990 goto err; 1991 1992 intel_runtime_pm_get(dev_priv); 1993 1994 ret = i915_mutex_lock_interruptible(dev); 1995 if (ret) { 1996 if (ret != -EINTR) 1997 kprintf("i915: caught bug(%d) (mutex_lock_inter)\n", ret); 1998 goto err_rpm; 1999 } 2000 2001 /* Access to snoopable pages through the GTT is incoherent. */ 2002 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) { 2003 kprintf("i915: caught bug() (cache_level %d %d)\n", 2004 (obj->cache_level), !HAS_LLC(dev_priv)); 2005 ret = -EFAULT; 2006 goto err_unlock; 2007 } 2008 2009 /* If the object is smaller than a couple of partial vma, it is 2010 * not worth only creating a single partial vma - we may as well 2011 * clear enough space for the full object. 2012 */ 2013 flags = PIN_MAPPABLE; 2014 if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT) 2015 flags |= PIN_NONBLOCK | PIN_NONFAULT; 2016 2017 /* Now pin it into the GTT as needed */ 2018 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags); 2019 if (IS_ERR(vma)) { 2020 /* Use a partial view if it is bigger than available space */ 2021 struct i915_ggtt_view view = 2022 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); 2023 2024 kprintf("i915_gem_fault: CHUNKING PASS\n"); 2025 2026 /* Userspace is now writing through an untracked VMA, abandon 2027 * all hope that the hardware is able to track future writes. 2028 */ 2029 obj->frontbuffer_ggtt_origin = ORIGIN_CPU; 2030 2031 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); 2032 } 2033 if (IS_ERR(vma)) { 2034 kprintf("i915: caught bug() (VMA error %ld objsize %ld)\n", 2035 PTR_ERR(vma), obj->base.size); 2036 ret = PTR_ERR(vma); 2037 goto err_unlock; 2038 } 2039 2040 ret = i915_gem_object_set_to_gtt_domain(obj, write); 2041 if (ret) { 2042 kprintf("i915: caught bug(%d) (set_to_gtt_dom)\n", ret); 2043 goto err_unpin; 2044 } 2045 2046 ret = i915_vma_get_fence(vma); 2047 if (ret) { 2048 kprintf("i915: caught bug(%d) (vma_get_fence)\n", ret); 2049 goto err_unpin; 2050 } 2051 2052 /* 2053 * START FREEBSD MAGIC 2054 * 2055 * Add a pip count to avoid destruction and certain other 2056 * complex operations (such as collapses?) while unlocked. 2057 */ 2058 vm_object_pip_add(vm_obj, 1); 2059 didref = 1; 2060 2061 ret = 0; 2062 m = NULL; 2063 2064 /* 2065 * Since the object lock was dropped, another thread might have 2066 * faulted on the same GTT address and instantiated the mapping. 2067 * Recheck. 2068 */ 2069 m = vm_page_lookup(vm_obj, OFF_TO_IDX(offset)); 2070 if (m != NULL) { 2071 /* 2072 * Try to busy the page, retry on failure (non-zero ret). 2073 */ 2074 if (vm_page_busy_try(m, false)) { 2075 kprintf("i915_gem_fault: BUSY\n"); 2076 ret = -EINTR; 2077 goto err_unpin; 2078 } 2079 goto have_page; 2080 } 2081 /* END FREEBSD MAGIC */ 2082 2083 /* Mark as being mmapped into userspace for later revocation */ 2084 assert_rpm_wakelock_held(dev_priv); 2085 if (list_empty(&obj->userfault_link)) 2086 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list); 2087 2088 /* Finally, remap it using the new GTT offset */ 2089 m = vm_phys_fictitious_to_vm_page(ggtt->mappable_base + 2090 i915_ggtt_offset(vma) + offset); 2091 if (m == NULL) { 2092 kprintf("i915: caught bug() (phys_fict_to_vm)\n"); 2093 ret = -EFAULT; 2094 goto err_unpin; 2095 } 2096 KASSERT((m->flags & PG_FICTITIOUS) != 0, ("not fictitious %p", m)); 2097 KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m)); 2098 2099 /* 2100 * Try to busy the page. Fails on non-zero return. 2101 */ 2102 if (vm_page_busy_try(m, false)) { 2103 kprintf("i915_gem_fault: BUSY(2)\n"); 2104 ret = -EINTR; 2105 goto err_unpin; 2106 } 2107 m->valid = VM_PAGE_BITS_ALL; 2108 2109 #if 1 2110 /* 2111 * This should always work since we already checked via a lookup 2112 * above. 2113 */ 2114 if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset)) == FALSE) { 2115 kprintf("i915:gem_fault: page %p,%jd already in object\n", 2116 vm_obj, 2117 OFF_TO_IDX(offset)); 2118 vm_page_wakeup(m); 2119 ret = -EINTR; 2120 goto err_unpin; 2121 } 2122 #endif 2123 2124 have_page: 2125 *mres = m; 2126 2127 __i915_vma_unpin(vma); 2128 mutex_unlock(&dev->struct_mutex); 2129 ret = VM_PAGER_OK; 2130 goto done; 2131 2132 /* 2133 * ALTERNATIVE ERROR RETURN. 2134 * 2135 * OBJECT EXPECTED TO BE LOCKED. 2136 */ 2137 err_unpin: 2138 __i915_vma_unpin(vma); 2139 err_unlock: 2140 mutex_unlock(&dev->struct_mutex); 2141 err_rpm: 2142 intel_runtime_pm_put(dev_priv); 2143 i915_gem_object_unpin_pages(obj); 2144 err: 2145 switch (ret) { 2146 case -EIO: 2147 /* 2148 * We eat errors when the gpu is terminally wedged to avoid 2149 * userspace unduly crashing (gl has no provisions for mmaps to 2150 * fail). But any other -EIO isn't ours (e.g. swap in failure) 2151 * and so needs to be reported. 2152 */ 2153 if (!i915_terminally_wedged(&dev_priv->gpu_error)) { 2154 // ret = VM_FAULT_SIGBUS; 2155 break; 2156 } 2157 case -EAGAIN: 2158 /* 2159 * EAGAIN means the gpu is hung and we'll wait for the error 2160 * handler to reset everything when re-faulting in 2161 * i915_mutex_lock_interruptible. 2162 */ 2163 case -ERESTARTSYS: 2164 case -EINTR: 2165 #ifdef __DragonFly__ 2166 if (didref) { 2167 kprintf("i915: caught bug(%d) (retry)\n", ret); 2168 vm_object_pip_wakeup(vm_obj); 2169 didref = 0; 2170 } 2171 VM_OBJECT_UNLOCK(vm_obj); 2172 int dummy; 2173 tsleep(&dummy, 0, "delay", 1); /* XXX */ 2174 VM_OBJECT_LOCK(vm_obj); 2175 goto retry; 2176 #endif 2177 default: 2178 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret); 2179 ret = VM_PAGER_ERROR; 2180 break; 2181 } 2182 2183 #ifdef __DragonFly__ 2184 done: 2185 if (didref) 2186 vm_object_pip_wakeup(vm_obj); 2187 else 2188 kprintf("i915: caught bug(%d)\n", ret); 2189 #endif 2190 2191 return ret; 2192 } 2193 2194 #ifdef __DragonFly__ 2195 static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node, 2196 struct address_space *file_mapping) 2197 { 2198 struct drm_i915_gem_object *obj = container_of( 2199 node,struct drm_i915_gem_object, base.vma_node); 2200 vm_object_t devobj; 2201 vm_page_t m; 2202 int i, page_count; 2203 2204 devobj = cdev_pager_lookup(obj); 2205 if (devobj != NULL) { 2206 page_count = OFF_TO_IDX(obj->base.size); 2207 2208 VM_OBJECT_LOCK(devobj); 2209 for (i = 0; i < page_count; i++) { 2210 m = vm_page_lookup_busy_wait(devobj, i, TRUE, "915unm"); 2211 if (m == NULL) 2212 continue; 2213 cdev_pager_free_page(devobj, m); 2214 } 2215 VM_OBJECT_UNLOCK(devobj); 2216 vm_object_deallocate(devobj); 2217 } 2218 } 2219 #endif 2220 2221 /** 2222 * i915_gem_release_mmap - remove physical page mappings 2223 * @obj: obj in question 2224 * 2225 * Preserve the reservation of the mmapping with the DRM core code, but 2226 * relinquish ownership of the pages back to the system. 2227 * 2228 * It is vital that we remove the page mapping if we have mapped a tiled 2229 * object through the GTT and then lose the fence register due to 2230 * resource pressure. Similarly if the object has been moved out of the 2231 * aperture, than pages mapped into userspace must be revoked. Removing the 2232 * mapping will then trigger a page fault on the next user access, allowing 2233 * fixup by i915_gem_fault(). 2234 */ 2235 void 2236 i915_gem_release_mmap(struct drm_i915_gem_object *obj) 2237 { 2238 struct drm_i915_private *i915 = to_i915(obj->base.dev); 2239 2240 /* Serialisation between user GTT access and our code depends upon 2241 * revoking the CPU's PTE whilst the mutex is held. The next user 2242 * pagefault then has to wait until we release the mutex. 2243 * 2244 * Note that RPM complicates somewhat by adding an additional 2245 * requirement that operations to the GGTT be made holding the RPM 2246 * wakeref. 2247 */ 2248 lockdep_assert_held(&i915->drm.struct_mutex); 2249 intel_runtime_pm_get(i915); 2250 2251 if (list_empty(&obj->userfault_link)) 2252 goto out; 2253 2254 list_del_init(&obj->userfault_link); 2255 #ifndef __DragonFly__ 2256 drm_vma_node_unmap(&obj->base.vma_node, 2257 obj->base.dev->anon_inode->i_mapping); 2258 #else 2259 drm_vma_node_unmap(&obj->base.vma_node, NULL); 2260 #endif 2261 2262 /* Ensure that the CPU's PTE are revoked and there are not outstanding 2263 * memory transactions from userspace before we return. The TLB 2264 * flushing implied above by changing the PTE above *should* be 2265 * sufficient, an extra barrier here just provides us with a bit 2266 * of paranoid documentation about our requirement to serialise 2267 * memory writes before touching registers / GSM. 2268 */ 2269 wmb(); 2270 2271 out: 2272 intel_runtime_pm_put(i915); 2273 } 2274 2275 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) 2276 { 2277 struct drm_i915_gem_object *obj, *on; 2278 int i; 2279 2280 /* 2281 * Only called during RPM suspend. All users of the userfault_list 2282 * must be holding an RPM wakeref to ensure that this can not 2283 * run concurrently with themselves (and use the struct_mutex for 2284 * protection between themselves). 2285 */ 2286 2287 list_for_each_entry_safe(obj, on, 2288 &dev_priv->mm.userfault_list, userfault_link) { 2289 list_del_init(&obj->userfault_link); 2290 #ifndef __DragonFly__ 2291 drm_vma_node_unmap(&obj->base.vma_node, 2292 obj->base.dev->anon_inode->i_mapping); 2293 #else 2294 drm_vma_node_unmap(&obj->base.vma_node, NULL); 2295 #endif 2296 } 2297 2298 /* The fence will be lost when the device powers down. If any were 2299 * in use by hardware (i.e. they are pinned), we should not be powering 2300 * down! All other fences will be reacquired by the user upon waking. 2301 */ 2302 for (i = 0; i < dev_priv->num_fence_regs; i++) { 2303 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 2304 2305 /* Ideally we want to assert that the fence register is not 2306 * live at this point (i.e. that no piece of code will be 2307 * trying to write through fence + GTT, as that both violates 2308 * our tracking of activity and associated locking/barriers, 2309 * but also is illegal given that the hw is powered down). 2310 * 2311 * Previously we used reg->pin_count as a "liveness" indicator. 2312 * That is not sufficient, and we need a more fine-grained 2313 * tool if we want to have a sanity check here. 2314 */ 2315 2316 if (!reg->vma) 2317 continue; 2318 2319 GEM_BUG_ON(!list_empty(®->vma->obj->userfault_link)); 2320 reg->dirty = true; 2321 } 2322 } 2323 2324 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) 2325 { 2326 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 2327 int err; 2328 2329 err = drm_gem_create_mmap_offset(&obj->base); 2330 if (likely(!err)) 2331 return 0; 2332 2333 /* Attempt to reap some mmap space from dead objects */ 2334 do { 2335 err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE); 2336 if (err) 2337 break; 2338 2339 i915_gem_drain_freed_objects(dev_priv); 2340 err = drm_gem_create_mmap_offset(&obj->base); 2341 if (!err) 2342 break; 2343 2344 } while (flush_delayed_work(&dev_priv->gt.retire_work)); 2345 2346 return err; 2347 } 2348 2349 #if 0 2350 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) 2351 { 2352 drm_gem_free_mmap_offset(&obj->base); 2353 } 2354 #endif 2355 2356 int 2357 i915_gem_mmap_gtt(struct drm_file *file, 2358 struct drm_device *dev, 2359 uint32_t handle, 2360 uint64_t *offset) 2361 { 2362 struct drm_i915_gem_object *obj; 2363 int ret; 2364 2365 obj = i915_gem_object_lookup(file, handle); 2366 if (!obj) 2367 return -ENOENT; 2368 2369 ret = i915_gem_object_create_mmap_offset(obj); 2370 if (ret == 0) 2371 *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) | 2372 DRM_GEM_MAPPING_KEY; 2373 2374 i915_gem_object_put(obj); 2375 return ret; 2376 } 2377 2378 /** 2379 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing 2380 * @dev: DRM device 2381 * @data: GTT mapping ioctl data 2382 * @file: GEM object info 2383 * 2384 * Simply returns the fake offset to userspace so it can mmap it. 2385 * The mmap call will end up in drm_gem_mmap(), which will set things 2386 * up so we can get faults in the handler above. 2387 * 2388 * The fault handler will take care of binding the object into the GTT 2389 * (since it may have been evicted to make room for something), allocating 2390 * a fence register, and mapping the appropriate aperture address into 2391 * userspace. 2392 */ 2393 int 2394 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 2395 struct drm_file *file) 2396 { 2397 struct drm_i915_gem_mmap_gtt *args = data; 2398 2399 return i915_gem_mmap_gtt(file, dev, args->handle, (uint64_t *)&args->offset); 2400 } 2401 2402 /* Immediately discard the backing storage */ 2403 static void 2404 i915_gem_object_truncate(struct drm_i915_gem_object *obj) 2405 { 2406 vm_object_t vm_obj = obj->base.filp; 2407 2408 if (obj->base.filp == NULL) 2409 return; 2410 2411 VM_OBJECT_LOCK(vm_obj); 2412 vm_object_page_remove(vm_obj, 0, 0, false); 2413 VM_OBJECT_UNLOCK(vm_obj); 2414 2415 /* Our goal here is to return as much of the memory as 2416 * is possible back to the system as we are called from OOM. 2417 * To do this we must instruct the shmfs to drop all of its 2418 * backing pages, *now*. 2419 */ 2420 #if 0 2421 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 2422 #endif 2423 obj->mm.madv = __I915_MADV_PURGED; 2424 obj->mm.pages = ERR_PTR(-EFAULT); 2425 } 2426 2427 /* Try to discard unwanted pages */ 2428 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj) 2429 { 2430 #if 0 2431 struct address_space *mapping; 2432 #endif 2433 2434 lockdep_assert_held(&obj->mm.lock); 2435 GEM_BUG_ON(obj->mm.pages); 2436 2437 switch (obj->mm.madv) { 2438 case I915_MADV_DONTNEED: 2439 i915_gem_object_truncate(obj); 2440 case __I915_MADV_PURGED: 2441 return; 2442 } 2443 2444 if (obj->base.filp == NULL) 2445 return; 2446 2447 #if 0 2448 mapping = obj->base.filp->f_mapping, 2449 invalidate_mapping_pages(mapping, 0, (loff_t)-1); 2450 #endif 2451 invalidate_mapping_pages(obj->base.filp, 0, (loff_t)-1); 2452 } 2453 2454 static void 2455 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, 2456 struct sg_table *pages) 2457 { 2458 struct sgt_iter sgt_iter; 2459 struct page *page; 2460 2461 __i915_gem_object_release_shmem(obj, pages, true); 2462 2463 i915_gem_gtt_finish_pages(obj, pages); 2464 2465 if (i915_gem_object_needs_bit17_swizzle(obj)) 2466 i915_gem_object_save_bit_17_swizzle(obj, pages); 2467 2468 for_each_sgt_page(page, sgt_iter, pages) { 2469 if (obj->mm.dirty) 2470 set_page_dirty(page); 2471 2472 if (obj->mm.madv == I915_MADV_WILLNEED) 2473 mark_page_accessed(page); 2474 2475 put_page(page); 2476 } 2477 obj->mm.dirty = false; 2478 2479 sg_free_table(pages); 2480 kfree(pages); 2481 } 2482 2483 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) 2484 { 2485 struct radix_tree_iter iter; 2486 void **slot; 2487 2488 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0) 2489 radix_tree_delete(&obj->mm.get_page.radix, iter.index); 2490 } 2491 2492 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, 2493 enum i915_mm_subclass subclass) 2494 { 2495 struct sg_table *pages; 2496 2497 if (i915_gem_object_has_pinned_pages(obj)) 2498 return; 2499 2500 GEM_BUG_ON(obj->bind_count); 2501 if (!READ_ONCE(obj->mm.pages)) 2502 return; 2503 2504 /* May be called by shrinker from within get_pages() (on another bo) */ 2505 mutex_lock_nested(&obj->mm.lock, subclass); 2506 if (unlikely(atomic_read(&obj->mm.pages_pin_count))) 2507 goto unlock; 2508 2509 /* ->put_pages might need to allocate memory for the bit17 swizzle 2510 * array, hence protect them from being reaped by removing them from gtt 2511 * lists early. */ 2512 pages = fetch_and_zero(&obj->mm.pages); 2513 GEM_BUG_ON(!pages); 2514 2515 if (obj->mm.mapping) { 2516 void *ptr; 2517 2518 ptr = ptr_mask_bits(obj->mm.mapping); 2519 if (is_vmalloc_addr(ptr)) 2520 vunmap(ptr); 2521 else 2522 kunmap(kmap_to_page(ptr)); 2523 2524 obj->mm.mapping = NULL; 2525 } 2526 2527 __i915_gem_object_reset_page_iter(obj); 2528 2529 if (!IS_ERR(pages)) 2530 obj->ops->put_pages(obj, pages); 2531 2532 unlock: 2533 mutex_unlock(&obj->mm.lock); 2534 } 2535 2536 static bool i915_sg_trim(struct sg_table *orig_st) 2537 { 2538 struct sg_table new_st; 2539 struct scatterlist *sg, *new_sg; 2540 unsigned int i; 2541 2542 if (orig_st->nents == orig_st->orig_nents) 2543 return false; 2544 2545 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN)) 2546 return false; 2547 2548 new_sg = new_st.sgl; 2549 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) { 2550 sg_set_page(new_sg, sg_page(sg), sg->length, 0); 2551 /* called before being DMA mapped, no need to copy sg->dma_* */ 2552 new_sg = sg_next(new_sg); 2553 } 2554 GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */ 2555 2556 sg_free_table(orig_st); 2557 2558 *orig_st = new_st; 2559 return true; 2560 } 2561 2562 static struct sg_table * 2563 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) 2564 { 2565 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 2566 const unsigned long page_count = obj->base.size / PAGE_SIZE; 2567 unsigned long i; 2568 struct vm_object *mapping; 2569 struct sg_table *st; 2570 struct scatterlist *sg; 2571 struct sgt_iter sgt_iter; 2572 struct page *page; 2573 unsigned long last_pfn = 0; /* suppress gcc warning */ 2574 unsigned int max_segment; 2575 gfp_t noreclaim; 2576 int ret; 2577 2578 /* Assert that the object is not currently in any GPU domain. As it 2579 * wasn't in the GTT, there shouldn't be any way it could have been in 2580 * a GPU cache 2581 */ 2582 GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); 2583 GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); 2584 2585 max_segment = swiotlb_max_segment(); 2586 if (!max_segment) 2587 max_segment = rounddown(UINT_MAX, PAGE_SIZE); 2588 2589 st = kmalloc(sizeof(*st), M_DRM, GFP_KERNEL); 2590 if (st == NULL) 2591 return ERR_PTR(-ENOMEM); 2592 2593 rebuild_st: 2594 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { 2595 kfree(st); 2596 return ERR_PTR(-ENOMEM); 2597 } 2598 2599 /* Get the list of pages out of our struct file. They'll be pinned 2600 * at this point until we release them. 2601 * 2602 * Fail silently without starting the shrinker 2603 */ 2604 #ifdef __DragonFly__ 2605 mapping = obj->base.filp; 2606 VM_OBJECT_LOCK(mapping); 2607 #endif 2608 noreclaim = mapping_gfp_constraint(mapping, 2609 ~(__GFP_IO | __GFP_RECLAIM)); 2610 noreclaim |= __GFP_NORETRY | __GFP_NOWARN; 2611 2612 sg = st->sgl; 2613 st->nents = 0; 2614 for (i = 0; i < page_count; i++) { 2615 const unsigned int shrink[] = { 2616 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE, 2617 0, 2618 }, *s = shrink; 2619 gfp_t gfp = noreclaim; 2620 2621 do { 2622 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2623 if (likely(!IS_ERR(page))) 2624 break; 2625 2626 if (!*s) { 2627 ret = PTR_ERR(page); 2628 goto err_sg; 2629 } 2630 2631 i915_gem_shrink(dev_priv, 2 * page_count, *s++); 2632 cond_resched(); 2633 2634 /* We've tried hard to allocate the memory by reaping 2635 * our own buffer, now let the real VM do its job and 2636 * go down in flames if truly OOM. 2637 * 2638 * However, since graphics tend to be disposable, 2639 * defer the oom here by reporting the ENOMEM back 2640 * to userspace. 2641 */ 2642 if (!*s) { 2643 /* reclaim and warn, but no oom */ 2644 gfp = mapping_gfp_mask(mapping); 2645 2646 /* Our bo are always dirty and so we require 2647 * kswapd to reclaim our pages (direct reclaim 2648 * does not effectively begin pageout of our 2649 * buffers on its own). However, direct reclaim 2650 * only waits for kswapd when under allocation 2651 * congestion. So as a result __GFP_RECLAIM is 2652 * unreliable and fails to actually reclaim our 2653 * dirty pages -- unless you try over and over 2654 * again with !__GFP_NORETRY. However, we still 2655 * want to fail this allocation rather than 2656 * trigger the out-of-memory killer and for 2657 * this we want the future __GFP_MAYFAIL. 2658 */ 2659 } 2660 } while (1); 2661 2662 if (!i || 2663 sg->length >= max_segment || 2664 page_to_pfn(page) != last_pfn + 1) { 2665 if (i) 2666 sg = sg_next(sg); 2667 st->nents++; 2668 sg_set_page(sg, page, PAGE_SIZE, 0); 2669 } else { 2670 sg->length += PAGE_SIZE; 2671 } 2672 last_pfn = page_to_pfn(page); 2673 2674 /* Check that the i965g/gm workaround works. */ 2675 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); 2676 } 2677 if (sg) /* loop terminated early; short sg table */ 2678 sg_mark_end(sg); 2679 #ifdef __DragonFly__ 2680 VM_OBJECT_UNLOCK(mapping); 2681 #endif 2682 2683 /* Trim unused sg entries to avoid wasting memory. */ 2684 i915_sg_trim(st); 2685 2686 ret = i915_gem_gtt_prepare_pages(obj, st); 2687 if (ret) { 2688 /* DMA remapping failed? One possible cause is that 2689 * it could not reserve enough large entries, asking 2690 * for PAGE_SIZE chunks instead may be helpful. 2691 */ 2692 if (max_segment > PAGE_SIZE) { 2693 for_each_sgt_page(page, sgt_iter, st) 2694 put_page(page); 2695 sg_free_table(st); 2696 2697 max_segment = PAGE_SIZE; 2698 goto rebuild_st; 2699 } else { 2700 dev_warn(&dev_priv->drm.pdev->dev, 2701 "Failed to DMA remap %lu pages\n", 2702 page_count); 2703 goto err_pages; 2704 } 2705 } 2706 2707 if (i915_gem_object_needs_bit17_swizzle(obj)) 2708 i915_gem_object_do_bit_17_swizzle(obj, st); 2709 2710 return st; 2711 2712 err_sg: 2713 sg_mark_end(sg); 2714 err_pages: 2715 for_each_sgt_page(page, sgt_iter, st) 2716 put_page(page); 2717 #ifdef __DragonFly__ 2718 VM_OBJECT_UNLOCK(mapping); 2719 #endif 2720 sg_free_table(st); 2721 kfree(st); 2722 2723 /* shmemfs first checks if there is enough memory to allocate the page 2724 * and reports ENOSPC should there be insufficient, along with the usual 2725 * ENOMEM for a genuine allocation failure. 2726 * 2727 * We use ENOSPC in our driver to mean that we have run out of aperture 2728 * space and so want to translate the error from shmemfs back to our 2729 * usual understanding of ENOMEM. 2730 */ 2731 if (ret == -ENOSPC) 2732 ret = -ENOMEM; 2733 2734 return ERR_PTR(ret); 2735 } 2736 2737 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 2738 struct sg_table *pages) 2739 { 2740 lockdep_assert_held(&obj->mm.lock); 2741 2742 obj->mm.get_page.sg_pos = pages->sgl; 2743 obj->mm.get_page.sg_idx = 0; 2744 2745 obj->mm.pages = pages; 2746 2747 if (i915_gem_object_is_tiled(obj) && 2748 to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 2749 GEM_BUG_ON(obj->mm.quirked); 2750 __i915_gem_object_pin_pages(obj); 2751 obj->mm.quirked = true; 2752 } 2753 } 2754 2755 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 2756 { 2757 struct sg_table *pages; 2758 2759 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); 2760 2761 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { 2762 DRM_DEBUG("Attempting to obtain a purgeable object\n"); 2763 return -EFAULT; 2764 } 2765 2766 pages = obj->ops->get_pages(obj); 2767 if (unlikely(IS_ERR(pages))) 2768 return PTR_ERR(pages); 2769 2770 __i915_gem_object_set_pages(obj, pages); 2771 return 0; 2772 } 2773 2774 /* Ensure that the associated pages are gathered from the backing storage 2775 * and pinned into our object. i915_gem_object_pin_pages() may be called 2776 * multiple times before they are released by a single call to 2777 * i915_gem_object_unpin_pages() - once the pages are no longer referenced 2778 * either as a result of memory pressure (reaping pages under the shrinker) 2779 * or as the object is itself released. 2780 */ 2781 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 2782 { 2783 int err; 2784 2785 err = mutex_lock_interruptible(&obj->mm.lock); 2786 if (err) 2787 return err; 2788 2789 if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) { 2790 err = ____i915_gem_object_get_pages(obj); 2791 if (err) 2792 goto unlock; 2793 2794 smp_mb__before_atomic(); 2795 } 2796 atomic_inc(&obj->mm.pages_pin_count); 2797 2798 unlock: 2799 mutex_unlock(&obj->mm.lock); 2800 return err; 2801 } 2802 2803 /* The 'mapping' part of i915_gem_object_pin_map() below */ 2804 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, 2805 enum i915_map_type type) 2806 { 2807 unsigned long n_pages = obj->base.size >> PAGE_SHIFT; 2808 struct sg_table *sgt = obj->mm.pages; 2809 struct sgt_iter sgt_iter; 2810 struct page *page; 2811 struct page *stack_pages[32]; 2812 struct page **pages = stack_pages; 2813 unsigned long i = 0; 2814 pgprot_t pgprot; 2815 void *addr; 2816 2817 /* A single page can always be kmapped */ 2818 if (n_pages == 1 && type == I915_MAP_WB) 2819 return kmap(sg_page(sgt->sgl)); 2820 2821 if (n_pages > ARRAY_SIZE(stack_pages)) { 2822 /* Too big for stack -- allocate temporary array instead */ 2823 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY); 2824 if (!pages) 2825 return NULL; 2826 } 2827 2828 for_each_sgt_page(page, sgt_iter, sgt) 2829 pages[i++] = page; 2830 2831 /* Check that we have the expected number of pages */ 2832 GEM_BUG_ON(i != n_pages); 2833 2834 switch (type) { 2835 case I915_MAP_WB: 2836 pgprot = PAGE_KERNEL; 2837 break; 2838 case I915_MAP_WC: 2839 pgprot = pgprot_writecombine(PAGE_KERNEL_IO); 2840 break; 2841 } 2842 addr = vmap(pages, n_pages, 0, pgprot); 2843 2844 if (pages != stack_pages) 2845 drm_free_large(pages); 2846 2847 return addr; 2848 } 2849 2850 /* get, pin, and map the pages of the object into kernel space */ 2851 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 2852 enum i915_map_type type) 2853 { 2854 enum i915_map_type has_type; 2855 bool pinned; 2856 void *ptr; 2857 int ret; 2858 2859 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); 2860 2861 ret = mutex_lock_interruptible(&obj->mm.lock); 2862 if (ret) 2863 return ERR_PTR(ret); 2864 2865 pinned = true; 2866 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { 2867 if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) { 2868 ret = ____i915_gem_object_get_pages(obj); 2869 if (ret) 2870 goto err_unlock; 2871 2872 smp_mb__before_atomic(); 2873 } 2874 atomic_inc(&obj->mm.pages_pin_count); 2875 pinned = false; 2876 } 2877 GEM_BUG_ON(!obj->mm.pages); 2878 2879 ptr = ptr_unpack_bits(obj->mm.mapping, has_type); 2880 if (ptr && has_type != type) { 2881 if (pinned) { 2882 ret = -EBUSY; 2883 goto err_unpin; 2884 } 2885 2886 if (is_vmalloc_addr(ptr)) 2887 vunmap(ptr); 2888 else 2889 kunmap(kmap_to_page(ptr)); 2890 2891 ptr = obj->mm.mapping = NULL; 2892 } 2893 2894 if (!ptr) { 2895 ptr = i915_gem_object_map(obj, type); 2896 if (!ptr) { 2897 ret = -ENOMEM; 2898 goto err_unpin; 2899 } 2900 2901 obj->mm.mapping = ptr_pack_bits(ptr, type); 2902 } 2903 2904 out_unlock: 2905 mutex_unlock(&obj->mm.lock); 2906 return ptr; 2907 2908 err_unpin: 2909 atomic_dec(&obj->mm.pages_pin_count); 2910 err_unlock: 2911 ptr = ERR_PTR(ret); 2912 goto out_unlock; 2913 } 2914 2915 static int 2916 i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, 2917 const struct drm_i915_gem_pwrite *arg) 2918 { 2919 #ifndef __DragonFly__ 2920 struct address_space *mapping = obj->base.filp->f_mapping; 2921 #else 2922 struct vm_object *mapping = obj->base.filp; 2923 #endif 2924 char __user *user_data = u64_to_user_ptr(arg->data_ptr); 2925 u64 remain, offset; 2926 unsigned int pg; 2927 2928 /* Before we instantiate/pin the backing store for our use, we 2929 * can prepopulate the shmemfs filp efficiently using a write into 2930 * the pagecache. We avoid the penalty of instantiating all the 2931 * pages, important if the user is just writing to a few and never 2932 * uses the object on the GPU, and using a direct write into shmemfs 2933 * allows it to avoid the cost of retrieving a page (either swapin 2934 * or clearing-before-use) before it is overwritten. 2935 */ 2936 if (READ_ONCE(obj->mm.pages)) 2937 return -ENODEV; 2938 2939 /* Before the pages are instantiated the object is treated as being 2940 * in the CPU domain. The pages will be clflushed as required before 2941 * use, and we can freely write into the pages directly. If userspace 2942 * races pwrite with any other operation; corruption will ensue - 2943 * that is userspace's prerogative! 2944 */ 2945 2946 remain = arg->size; 2947 offset = arg->offset; 2948 pg = offset_in_page(offset); 2949 2950 do { 2951 unsigned int len, unwritten; 2952 struct page *page; 2953 void *vaddr; 2954 #if 0 2955 void *data, *vaddr; 2956 int err; 2957 #endif 2958 2959 len = PAGE_SIZE - pg; 2960 if (len > remain) 2961 len = remain; 2962 2963 #ifndef __DragonFly__ 2964 err = pagecache_write_begin(obj->base.filp, mapping, 2965 offset, len, 0, 2966 &page, &data); 2967 if (err < 0) 2968 return err; 2969 #else 2970 page = shmem_read_mapping_page(mapping, OFF_TO_IDX(offset)); 2971 #endif 2972 2973 vaddr = kmap(page); 2974 unwritten = copy_from_user(vaddr + pg, user_data, len); 2975 kunmap(page); 2976 2977 #ifndef __DragonFly__ 2978 err = pagecache_write_end(obj->base.filp, mapping, 2979 offset, len, len - unwritten, 2980 page, data); 2981 if (err < 0) 2982 return err; 2983 #else 2984 put_page(page); 2985 #endif 2986 2987 if (unwritten) 2988 return -EFAULT; 2989 2990 remain -= len; 2991 user_data += len; 2992 offset += len; 2993 pg = 0; 2994 } while (remain); 2995 2996 return 0; 2997 } 2998 2999 static bool ban_context(const struct i915_gem_context *ctx) 3000 { 3001 return (i915_gem_context_is_bannable(ctx) && 3002 ctx->ban_score >= CONTEXT_SCORE_BAN_THRESHOLD); 3003 } 3004 3005 static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) 3006 { 3007 ctx->guilty_count++; 3008 ctx->ban_score += CONTEXT_SCORE_GUILTY; 3009 if (ban_context(ctx)) 3010 i915_gem_context_set_banned(ctx); 3011 3012 DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n", 3013 ctx->name, ctx->ban_score, 3014 yesno(i915_gem_context_is_banned(ctx))); 3015 3016 if (!i915_gem_context_is_banned(ctx) || IS_ERR_OR_NULL(ctx->file_priv)) 3017 return; 3018 3019 ctx->file_priv->context_bans++; 3020 DRM_DEBUG_DRIVER("client %s has had %d context banned\n", 3021 ctx->name, ctx->file_priv->context_bans); 3022 } 3023 3024 static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) 3025 { 3026 ctx->active_count++; 3027 } 3028 3029 struct drm_i915_gem_request * 3030 i915_gem_find_active_request(struct intel_engine_cs *engine) 3031 { 3032 struct drm_i915_gem_request *request, *active = NULL; 3033 unsigned long flags; 3034 3035 /* We are called by the error capture and reset at a random 3036 * point in time. In particular, note that neither is crucially 3037 * ordered with an interrupt. After a hang, the GPU is dead and we 3038 * assume that no more writes can happen (we waited long enough for 3039 * all writes that were in transaction to be flushed) - adding an 3040 * extra delay for a recent interrupt is pointless. Hence, we do 3041 * not need an engine->irq_seqno_barrier() before the seqno reads. 3042 */ 3043 spin_lock_irqsave(&engine->timeline->lock, flags); 3044 list_for_each_entry(request, &engine->timeline->requests, link) { 3045 if (__i915_gem_request_completed(request, 3046 request->global_seqno)) 3047 continue; 3048 3049 GEM_BUG_ON(request->engine != engine); 3050 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 3051 &request->fence.flags)); 3052 3053 active = request; 3054 break; 3055 } 3056 spin_unlock_irqrestore(&engine->timeline->lock, flags); 3057 3058 return active; 3059 } 3060 3061 static bool engine_stalled(struct intel_engine_cs *engine) 3062 { 3063 if (!engine->hangcheck.stalled) 3064 return false; 3065 3066 /* Check for possible seqno movement after hang declaration */ 3067 if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) { 3068 DRM_DEBUG_DRIVER("%s pardoned\n", engine->name); 3069 return false; 3070 } 3071 3072 return true; 3073 } 3074 3075 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) 3076 { 3077 struct intel_engine_cs *engine; 3078 enum intel_engine_id id; 3079 int err = 0; 3080 3081 /* Ensure irq handler finishes, and not run again. */ 3082 for_each_engine(engine, dev_priv, id) { 3083 struct drm_i915_gem_request *request; 3084 3085 /* Prevent the signaler thread from updating the request 3086 * state (by calling dma_fence_signal) as we are processing 3087 * the reset. The write from the GPU of the seqno is 3088 * asynchronous and the signaler thread may see a different 3089 * value to us and declare the request complete, even though 3090 * the reset routine have picked that request as the active 3091 * (incomplete) request. This conflict is not handled 3092 * gracefully! 3093 */ 3094 kthread_park(engine->breadcrumbs.signaler); 3095 3096 /* Prevent request submission to the hardware until we have 3097 * completed the reset in i915_gem_reset_finish(). If a request 3098 * is completed by one engine, it may then queue a request 3099 * to a second via its engine->irq_tasklet *just* as we are 3100 * calling engine->init_hw() and also writing the ELSP. 3101 * Turning off the engine->irq_tasklet until the reset is over 3102 * prevents the race. 3103 */ 3104 tasklet_kill(&engine->irq_tasklet); 3105 tasklet_disable(&engine->irq_tasklet); 3106 3107 if (engine->irq_seqno_barrier) 3108 engine->irq_seqno_barrier(engine); 3109 3110 if (engine_stalled(engine)) { 3111 request = i915_gem_find_active_request(engine); 3112 if (request && request->fence.error == -EIO) 3113 err = -EIO; /* Previous reset failed! */ 3114 } 3115 } 3116 3117 i915_gem_revoke_fences(dev_priv); 3118 3119 return err; 3120 } 3121 3122 static void skip_request(struct drm_i915_gem_request *request) 3123 { 3124 void *vaddr = request->ring->vaddr; 3125 u32 head; 3126 3127 /* As this request likely depends on state from the lost 3128 * context, clear out all the user operations leaving the 3129 * breadcrumb at the end (so we get the fence notifications). 3130 */ 3131 head = request->head; 3132 if (request->postfix < head) { 3133 memset(vaddr + head, 0, request->ring->size - head); 3134 head = 0; 3135 } 3136 memset(vaddr + head, 0, request->postfix - head); 3137 3138 dma_fence_set_error(&request->fence, -EIO); 3139 } 3140 3141 static void engine_skip_context(struct drm_i915_gem_request *request) 3142 { 3143 struct intel_engine_cs *engine = request->engine; 3144 struct i915_gem_context *hung_ctx = request->ctx; 3145 struct intel_timeline *timeline; 3146 unsigned long flags; 3147 3148 timeline = i915_gem_context_lookup_timeline(hung_ctx, engine); 3149 3150 spin_lock_irqsave(&engine->timeline->lock, flags); 3151 lockmgr(&timeline->lock, LK_EXCLUSIVE); 3152 3153 list_for_each_entry_continue(request, &engine->timeline->requests, link) 3154 if (request->ctx == hung_ctx) 3155 skip_request(request); 3156 3157 list_for_each_entry(request, &timeline->requests, link) 3158 skip_request(request); 3159 3160 lockmgr(&timeline->lock, LK_RELEASE); 3161 spin_unlock_irqrestore(&engine->timeline->lock, flags); 3162 } 3163 3164 /* Returns true if the request was guilty of hang */ 3165 static bool i915_gem_reset_request(struct drm_i915_gem_request *request) 3166 { 3167 /* Read once and return the resolution */ 3168 const bool guilty = engine_stalled(request->engine); 3169 3170 /* The guilty request will get skipped on a hung engine. 3171 * 3172 * Users of client default contexts do not rely on logical 3173 * state preserved between batches so it is safe to execute 3174 * queued requests following the hang. Non default contexts 3175 * rely on preserved state, so skipping a batch loses the 3176 * evolution of the state and it needs to be considered corrupted. 3177 * Executing more queued batches on top of corrupted state is 3178 * risky. But we take the risk by trying to advance through 3179 * the queued requests in order to make the client behaviour 3180 * more predictable around resets, by not throwing away random 3181 * amount of batches it has prepared for execution. Sophisticated 3182 * clients can use gem_reset_stats_ioctl and dma fence status 3183 * (exported via sync_file info ioctl on explicit fences) to observe 3184 * when it loses the context state and should rebuild accordingly. 3185 * 3186 * The context ban, and ultimately the client ban, mechanism are safety 3187 * valves if client submission ends up resulting in nothing more than 3188 * subsequent hangs. 3189 */ 3190 3191 if (guilty) { 3192 i915_gem_context_mark_guilty(request->ctx); 3193 skip_request(request); 3194 } else { 3195 i915_gem_context_mark_innocent(request->ctx); 3196 dma_fence_set_error(&request->fence, -EAGAIN); 3197 } 3198 3199 return guilty; 3200 } 3201 3202 static void i915_gem_reset_engine(struct intel_engine_cs *engine) 3203 { 3204 struct drm_i915_gem_request *request; 3205 3206 request = i915_gem_find_active_request(engine); 3207 if (request && i915_gem_reset_request(request)) { 3208 DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n", 3209 engine->name, request->global_seqno); 3210 3211 /* If this context is now banned, skip all pending requests. */ 3212 if (i915_gem_context_is_banned(request->ctx)) 3213 engine_skip_context(request); 3214 } 3215 3216 /* Setup the CS to resume from the breadcrumb of the hung request */ 3217 engine->reset_hw(engine, request); 3218 } 3219 3220 void i915_gem_reset(struct drm_i915_private *dev_priv) 3221 { 3222 struct intel_engine_cs *engine; 3223 enum intel_engine_id id; 3224 3225 lockdep_assert_held(&dev_priv->drm.struct_mutex); 3226 3227 i915_gem_retire_requests(dev_priv); 3228 3229 for_each_engine(engine, dev_priv, id) { 3230 struct i915_gem_context *ctx; 3231 3232 i915_gem_reset_engine(engine); 3233 ctx = fetch_and_zero(&engine->last_retired_context); 3234 if (ctx) 3235 engine->context_unpin(engine, ctx); 3236 } 3237 3238 i915_gem_restore_fences(dev_priv); 3239 3240 if (dev_priv->gt.awake) { 3241 intel_sanitize_gt_powersave(dev_priv); 3242 intel_enable_gt_powersave(dev_priv); 3243 if (INTEL_GEN(dev_priv) >= 6) 3244 gen6_rps_busy(dev_priv); 3245 } 3246 } 3247 3248 void i915_gem_reset_finish(struct drm_i915_private *dev_priv) 3249 { 3250 struct intel_engine_cs *engine; 3251 enum intel_engine_id id; 3252 3253 lockdep_assert_held(&dev_priv->drm.struct_mutex); 3254 3255 for_each_engine(engine, dev_priv, id) { 3256 tasklet_enable(&engine->irq_tasklet); 3257 kthread_unpark(engine->breadcrumbs.signaler); 3258 } 3259 } 3260 3261 static void nop_submit_request(struct drm_i915_gem_request *request) 3262 { 3263 dma_fence_set_error(&request->fence, -EIO); 3264 i915_gem_request_submit(request); 3265 intel_engine_init_global_seqno(request->engine, request->global_seqno); 3266 } 3267 3268 static void engine_set_wedged(struct intel_engine_cs *engine) 3269 { 3270 struct drm_i915_gem_request *request; 3271 unsigned long flags; 3272 3273 /* We need to be sure that no thread is running the old callback as 3274 * we install the nop handler (otherwise we would submit a request 3275 * to hardware that will never complete). In order to prevent this 3276 * race, we wait until the machine is idle before making the swap 3277 * (using stop_machine()). 3278 */ 3279 engine->submit_request = nop_submit_request; 3280 3281 /* Mark all executing requests as skipped */ 3282 spin_lock_irqsave(&engine->timeline->lock, flags); 3283 list_for_each_entry(request, &engine->timeline->requests, link) 3284 dma_fence_set_error(&request->fence, -EIO); 3285 spin_unlock_irqrestore(&engine->timeline->lock, flags); 3286 3287 /* Mark all pending requests as complete so that any concurrent 3288 * (lockless) lookup doesn't try and wait upon the request as we 3289 * reset it. 3290 */ 3291 intel_engine_init_global_seqno(engine, 3292 intel_engine_last_submit(engine)); 3293 3294 /* 3295 * Clear the execlists queue up before freeing the requests, as those 3296 * are the ones that keep the context and ringbuffer backing objects 3297 * pinned in place. 3298 */ 3299 3300 if (i915.enable_execlists) { 3301 unsigned long flags; 3302 3303 spin_lock_irqsave(&engine->timeline->lock, flags); 3304 3305 i915_gem_request_put(engine->execlist_port[0].request); 3306 i915_gem_request_put(engine->execlist_port[1].request); 3307 memset(engine->execlist_port, 0, sizeof(engine->execlist_port)); 3308 engine->execlist_queue = LINUX_RB_ROOT; 3309 engine->execlist_first = NULL; 3310 3311 spin_unlock_irqrestore(&engine->timeline->lock, flags); 3312 } 3313 } 3314 3315 static int __i915_gem_set_wedged_BKL(void *data) 3316 { 3317 struct drm_i915_private *i915 = data; 3318 struct intel_engine_cs *engine; 3319 enum intel_engine_id id; 3320 3321 for_each_engine(engine, i915, id) 3322 engine_set_wedged(engine); 3323 3324 return 0; 3325 } 3326 3327 void i915_gem_set_wedged(struct drm_i915_private *dev_priv) 3328 { 3329 lockdep_assert_held(&dev_priv->drm.struct_mutex); 3330 set_bit(I915_WEDGED, &dev_priv->gpu_error.flags); 3331 3332 /* Retire completed requests first so the list of inflight/incomplete 3333 * requests is accurate and we don't try and mark successful requests 3334 * as in error during __i915_gem_set_wedged_BKL(). 3335 */ 3336 i915_gem_retire_requests(dev_priv); 3337 3338 stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL); 3339 3340 i915_gem_context_lost(dev_priv); 3341 3342 mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0); 3343 } 3344 3345 bool i915_gem_unset_wedged(struct drm_i915_private *i915) 3346 { 3347 struct i915_gem_timeline *tl; 3348 int i; 3349 3350 lockdep_assert_held(&i915->drm.struct_mutex); 3351 if (!test_bit(I915_WEDGED, &i915->gpu_error.flags)) 3352 return true; 3353 3354 /* Before unwedging, make sure that all pending operations 3355 * are flushed and errored out - we may have requests waiting upon 3356 * third party fences. We marked all inflight requests as EIO, and 3357 * every execbuf since returned EIO, for consistency we want all 3358 * the currently pending requests to also be marked as EIO, which 3359 * is done inside our nop_submit_request - and so we must wait. 3360 * 3361 * No more can be submitted until we reset the wedged bit. 3362 */ 3363 list_for_each_entry(tl, &i915->gt.timelines, link) { 3364 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) { 3365 struct drm_i915_gem_request *rq; 3366 3367 rq = i915_gem_active_peek(&tl->engine[i].last_request, 3368 &i915->drm.struct_mutex); 3369 if (!rq) 3370 continue; 3371 3372 /* We can't use our normal waiter as we want to 3373 * avoid recursively trying to handle the current 3374 * reset. The basic dma_fence_default_wait() installs 3375 * a callback for dma_fence_signal(), which is 3376 * triggered by our nop handler (indirectly, the 3377 * callback enables the signaler thread which is 3378 * woken by the nop_submit_request() advancing the seqno 3379 * and when the seqno passes the fence, the signaler 3380 * then signals the fence waking us up). 3381 */ 3382 if (dma_fence_default_wait(&rq->fence, true, 3383 MAX_SCHEDULE_TIMEOUT) < 0) 3384 return false; 3385 } 3386 } 3387 3388 /* Undo nop_submit_request. We prevent all new i915 requests from 3389 * being queued (by disallowing execbuf whilst wedged) so having 3390 * waited for all active requests above, we know the system is idle 3391 * and do not have to worry about a thread being inside 3392 * engine->submit_request() as we swap over. So unlike installing 3393 * the nop_submit_request on reset, we can do this from normal 3394 * context and do not require stop_machine(). 3395 */ 3396 intel_engines_reset_default_submission(i915); 3397 3398 smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ 3399 clear_bit(I915_WEDGED, &i915->gpu_error.flags); 3400 3401 return true; 3402 } 3403 3404 static void 3405 i915_gem_retire_work_handler(struct work_struct *work) 3406 { 3407 struct drm_i915_private *dev_priv = 3408 container_of(work, typeof(*dev_priv), gt.retire_work.work); 3409 struct drm_device *dev = &dev_priv->drm; 3410 3411 /* Come back later if the device is busy... */ 3412 if (mutex_trylock(&dev->struct_mutex)) { 3413 i915_gem_retire_requests(dev_priv); 3414 mutex_unlock(&dev->struct_mutex); 3415 } 3416 3417 /* Keep the retire handler running until we are finally idle. 3418 * We do not need to do this test under locking as in the worst-case 3419 * we queue the retire worker once too often. 3420 */ 3421 if (READ_ONCE(dev_priv->gt.awake)) { 3422 i915_queue_hangcheck(dev_priv); 3423 queue_delayed_work(dev_priv->wq, 3424 &dev_priv->gt.retire_work, 3425 round_jiffies_up_relative(HZ)); 3426 } 3427 } 3428 3429 static void 3430 i915_gem_idle_work_handler(struct work_struct *work) 3431 { 3432 struct drm_i915_private *dev_priv = 3433 container_of(work, typeof(*dev_priv), gt.idle_work.work); 3434 struct drm_device *dev = &dev_priv->drm; 3435 struct intel_engine_cs *engine; 3436 enum intel_engine_id id; 3437 bool rearm_hangcheck; 3438 3439 if (!READ_ONCE(dev_priv->gt.awake)) 3440 return; 3441 3442 /* 3443 * Wait for last execlists context complete, but bail out in case a 3444 * new request is submitted. 3445 */ 3446 wait_for(intel_engines_are_idle(dev_priv), 10); 3447 if (READ_ONCE(dev_priv->gt.active_requests)) 3448 return; 3449 3450 rearm_hangcheck = 3451 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 3452 3453 if (!mutex_trylock(&dev->struct_mutex)) { 3454 /* Currently busy, come back later */ 3455 mod_delayed_work(dev_priv->wq, 3456 &dev_priv->gt.idle_work, 3457 msecs_to_jiffies(50)); 3458 goto out_rearm; 3459 } 3460 3461 /* 3462 * New request retired after this work handler started, extend active 3463 * period until next instance of the work. 3464 */ 3465 if (work_pending(work)) 3466 goto out_unlock; 3467 3468 if (dev_priv->gt.active_requests) 3469 goto out_unlock; 3470 3471 if (wait_for(intel_engines_are_idle(dev_priv), 10)) 3472 DRM_ERROR("Timeout waiting for engines to idle\n"); 3473 3474 for_each_engine(engine, dev_priv, id) { 3475 intel_engine_disarm_breadcrumbs(engine); 3476 i915_gem_batch_pool_fini(&engine->batch_pool); 3477 } 3478 3479 GEM_BUG_ON(!dev_priv->gt.awake); 3480 dev_priv->gt.awake = false; 3481 rearm_hangcheck = false; 3482 3483 if (INTEL_GEN(dev_priv) >= 6) 3484 gen6_rps_idle(dev_priv); 3485 intel_runtime_pm_put(dev_priv); 3486 out_unlock: 3487 mutex_unlock(&dev->struct_mutex); 3488 3489 out_rearm: 3490 if (rearm_hangcheck) { 3491 GEM_BUG_ON(!dev_priv->gt.awake); 3492 i915_queue_hangcheck(dev_priv); 3493 } 3494 } 3495 3496 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) 3497 { 3498 struct drm_i915_gem_object *obj = to_intel_bo(gem); 3499 struct drm_i915_file_private *fpriv = file->driver_priv; 3500 struct i915_vma *vma, *vn; 3501 3502 mutex_lock(&obj->base.dev->struct_mutex); 3503 list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link) 3504 if (vma->vm->file == fpriv) 3505 i915_vma_close(vma); 3506 3507 if (i915_gem_object_is_active(obj) && 3508 !i915_gem_object_has_active_reference(obj)) { 3509 i915_gem_object_set_active_reference(obj); 3510 i915_gem_object_get(obj); 3511 } 3512 mutex_unlock(&obj->base.dev->struct_mutex); 3513 } 3514 3515 static unsigned long to_wait_timeout(s64 timeout_ns) 3516 { 3517 if (timeout_ns < 0) 3518 return MAX_SCHEDULE_TIMEOUT; 3519 3520 if (timeout_ns == 0) 3521 return 0; 3522 3523 return nsecs_to_jiffies_timeout(timeout_ns); 3524 } 3525 3526 /** 3527 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT 3528 * @dev: drm device pointer 3529 * @data: ioctl data blob 3530 * @file: drm file pointer 3531 * 3532 * Returns 0 if successful, else an error is returned with the remaining time in 3533 * the timeout parameter. 3534 * -ETIME: object is still busy after timeout 3535 * -ERESTARTSYS: signal interrupted the wait 3536 * -ENONENT: object doesn't exist 3537 * Also possible, but rare: 3538 * -EAGAIN: GPU wedged 3539 * -ENOMEM: damn 3540 * -ENODEV: Internal IRQ fail 3541 * -E?: The add request failed 3542 * 3543 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any 3544 * non-zero timeout parameter the wait ioctl will wait for the given number of 3545 * nanoseconds on an object becoming unbusy. Since the wait itself does so 3546 * without holding struct_mutex the object may become re-busied before this 3547 * function completes. A similar but shorter * race condition exists in the busy 3548 * ioctl 3549 */ 3550 int 3551 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 3552 { 3553 struct drm_i915_gem_wait *args = data; 3554 struct drm_i915_gem_object *obj; 3555 ktime_t start; 3556 long ret; 3557 3558 if (args->flags != 0) 3559 return -EINVAL; 3560 3561 obj = i915_gem_object_lookup(file, args->bo_handle); 3562 if (!obj) 3563 return -ENOENT; 3564 3565 start = ktime_get(); 3566 3567 ret = i915_gem_object_wait(obj, 3568 I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL, 3569 to_wait_timeout(args->timeout_ns), 3570 to_rps_client(file)); 3571 3572 if (args->timeout_ns > 0) { 3573 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); 3574 if (args->timeout_ns < 0) 3575 args->timeout_ns = 0; 3576 3577 /* 3578 * Apparently ktime isn't accurate enough and occasionally has a 3579 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch 3580 * things up to make the test happy. We allow up to 1 jiffy. 3581 * 3582 * This is a regression from the timespec->ktime conversion. 3583 */ 3584 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) 3585 args->timeout_ns = 0; 3586 3587 /* 3588 * Apparently ktime isn't accurate enough and occasionally has a 3589 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch 3590 * things up to make the test happy. We allow up to 1 jiffy. 3591 * 3592 * This is a regression from the timespec->ktime conversion. 3593 */ 3594 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) 3595 args->timeout_ns = 0; 3596 } 3597 3598 i915_gem_object_put(obj); 3599 return ret; 3600 } 3601 3602 static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags) 3603 { 3604 int ret, i; 3605 3606 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) { 3607 ret = i915_gem_active_wait(&tl->engine[i].last_request, flags); 3608 if (ret) 3609 return ret; 3610 } 3611 3612 return 0; 3613 } 3614 3615 static int wait_for_engine(struct intel_engine_cs *engine, int timeout_ms) 3616 { 3617 return wait_for(intel_engine_is_idle(engine), timeout_ms); 3618 } 3619 3620 static int wait_for_engines(struct drm_i915_private *i915) 3621 { 3622 struct intel_engine_cs *engine; 3623 enum intel_engine_id id; 3624 3625 for_each_engine(engine, i915, id) { 3626 if (GEM_WARN_ON(wait_for_engine(engine, 50))) { 3627 i915_gem_set_wedged(i915); 3628 return -EIO; 3629 } 3630 3631 GEM_BUG_ON(intel_engine_get_seqno(engine) != 3632 intel_engine_last_submit(engine)); 3633 } 3634 3635 return 0; 3636 } 3637 3638 int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) 3639 { 3640 int ret; 3641 3642 /* If the device is asleep, we have no requests outstanding */ 3643 if (!READ_ONCE(i915->gt.awake)) 3644 return 0; 3645 3646 if (flags & I915_WAIT_LOCKED) { 3647 struct i915_gem_timeline *tl; 3648 3649 lockdep_assert_held(&i915->drm.struct_mutex); 3650 3651 list_for_each_entry(tl, &i915->gt.timelines, link) { 3652 ret = wait_for_timeline(tl, flags); 3653 if (ret) 3654 return ret; 3655 } 3656 3657 i915_gem_retire_requests(i915); 3658 GEM_BUG_ON(i915->gt.active_requests); 3659 3660 ret = wait_for_engines(i915); 3661 } else { 3662 ret = wait_for_timeline(&i915->gt.global_timeline, flags); 3663 } 3664 3665 return ret; 3666 } 3667 3668 /** Flushes the GTT write domain for the object if it's dirty. */ 3669 static void 3670 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) 3671 { 3672 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 3673 3674 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) 3675 return; 3676 3677 /* No actual flushing is required for the GTT write domain. Writes 3678 * to it "immediately" go to main memory as far as we know, so there's 3679 * no chipset flush. It also doesn't land in render cache. 3680 * 3681 * However, we do have to enforce the order so that all writes through 3682 * the GTT land before any writes to the device, such as updates to 3683 * the GATT itself. 3684 * 3685 * We also have to wait a bit for the writes to land from the GTT. 3686 * An uncached read (i.e. mmio) seems to be ideal for the round-trip 3687 * timing. This issue has only been observed when switching quickly 3688 * between GTT writes and CPU reads from inside the kernel on recent hw, 3689 * and it appears to only affect discrete GTT blocks (i.e. on LLC 3690 * system agents we cannot reproduce this behaviour). 3691 */ 3692 wmb(); 3693 if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) { 3694 if (intel_runtime_pm_get_if_in_use(dev_priv)) { 3695 spin_lock_irq(&dev_priv->uncore.lock); 3696 POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base)); 3697 spin_unlock_irq(&dev_priv->uncore.lock); 3698 intel_runtime_pm_put(dev_priv); 3699 } 3700 } 3701 3702 intel_fb_obj_flush(obj, write_origin(obj, I915_GEM_DOMAIN_GTT)); 3703 3704 obj->base.write_domain = 0; 3705 } 3706 3707 /** Flushes the CPU write domain for the object if it's dirty. */ 3708 static void 3709 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) 3710 { 3711 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) 3712 return; 3713 3714 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); 3715 obj->base.write_domain = 0; 3716 } 3717 3718 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) 3719 { 3720 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU && !obj->cache_dirty) 3721 return; 3722 3723 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE); 3724 obj->base.write_domain = 0; 3725 } 3726 3727 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj) 3728 { 3729 if (!READ_ONCE(obj->pin_display)) 3730 return; 3731 3732 mutex_lock(&obj->base.dev->struct_mutex); 3733 __i915_gem_object_flush_for_display(obj); 3734 mutex_unlock(&obj->base.dev->struct_mutex); 3735 } 3736 3737 /** 3738 * Moves a single object to the GTT read, and possibly write domain. 3739 * @obj: object to act on 3740 * @write: ask for write access or read only 3741 * 3742 * This function returns when the move is complete, including waiting on 3743 * flushes to occur. 3744 */ 3745 int 3746 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) 3747 { 3748 int ret; 3749 3750 lockdep_assert_held(&obj->base.dev->struct_mutex); 3751 3752 ret = i915_gem_object_wait(obj, 3753 I915_WAIT_INTERRUPTIBLE | 3754 I915_WAIT_LOCKED | 3755 (write ? I915_WAIT_ALL : 0), 3756 MAX_SCHEDULE_TIMEOUT, 3757 NULL); 3758 if (ret) 3759 return ret; 3760 3761 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) 3762 return 0; 3763 3764 /* Flush and acquire obj->pages so that we are coherent through 3765 * direct access in memory with previous cached writes through 3766 * shmemfs and that our cache domain tracking remains valid. 3767 * For example, if the obj->filp was moved to swap without us 3768 * being notified and releasing the pages, we would mistakenly 3769 * continue to assume that the obj remained out of the CPU cached 3770 * domain. 3771 */ 3772 ret = i915_gem_object_pin_pages(obj); 3773 if (ret) 3774 return ret; 3775 3776 i915_gem_object_flush_cpu_write_domain(obj); 3777 3778 /* Serialise direct access to this object with the barriers for 3779 * coherent writes from the GPU, by effectively invalidating the 3780 * GTT domain upon first access. 3781 */ 3782 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) 3783 mb(); 3784 3785 /* It should now be out of any other write domains, and we can update 3786 * the domain values for our changes. 3787 */ 3788 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 3789 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3790 if (write) { 3791 obj->base.read_domains = I915_GEM_DOMAIN_GTT; 3792 obj->base.write_domain = I915_GEM_DOMAIN_GTT; 3793 obj->mm.dirty = true; 3794 } 3795 3796 i915_gem_object_unpin_pages(obj); 3797 return 0; 3798 } 3799 3800 /** 3801 * Changes the cache-level of an object across all VMA. 3802 * @obj: object to act on 3803 * @cache_level: new cache level to set for the object 3804 * 3805 * After this function returns, the object will be in the new cache-level 3806 * across all GTT and the contents of the backing storage will be coherent, 3807 * with respect to the new cache-level. In order to keep the backing storage 3808 * coherent for all users, we only allow a single cache level to be set 3809 * globally on the object and prevent it from being changed whilst the 3810 * hardware is reading from the object. That is if the object is currently 3811 * on the scanout it will be set to uncached (or equivalent display 3812 * cache coherency) and all non-MOCS GPU access will also be uncached so 3813 * that all direct access to the scanout remains coherent. 3814 */ 3815 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3816 enum i915_cache_level cache_level) 3817 { 3818 struct i915_vma *vma; 3819 int ret; 3820 3821 lockdep_assert_held(&obj->base.dev->struct_mutex); 3822 3823 if (obj->cache_level == cache_level) 3824 return 0; 3825 3826 /* Inspect the list of currently bound VMA and unbind any that would 3827 * be invalid given the new cache-level. This is principally to 3828 * catch the issue of the CS prefetch crossing page boundaries and 3829 * reading an invalid PTE on older architectures. 3830 */ 3831 restart: 3832 list_for_each_entry(vma, &obj->vma_list, obj_link) { 3833 if (!drm_mm_node_allocated(&vma->node)) 3834 continue; 3835 3836 if (i915_vma_is_pinned(vma)) { 3837 DRM_DEBUG("can not change the cache level of pinned objects\n"); 3838 return -EBUSY; 3839 } 3840 3841 if (i915_gem_valid_gtt_space(vma, cache_level)) 3842 continue; 3843 3844 ret = i915_vma_unbind(vma); 3845 if (ret) 3846 return ret; 3847 3848 /* As unbinding may affect other elements in the 3849 * obj->vma_list (due to side-effects from retiring 3850 * an active vma), play safe and restart the iterator. 3851 */ 3852 goto restart; 3853 } 3854 3855 /* We can reuse the existing drm_mm nodes but need to change the 3856 * cache-level on the PTE. We could simply unbind them all and 3857 * rebind with the correct cache-level on next use. However since 3858 * we already have a valid slot, dma mapping, pages etc, we may as 3859 * rewrite the PTE in the belief that doing so tramples upon less 3860 * state and so involves less work. 3861 */ 3862 if (obj->bind_count) { 3863 /* Before we change the PTE, the GPU must not be accessing it. 3864 * If we wait upon the object, we know that all the bound 3865 * VMA are no longer active. 3866 */ 3867 ret = i915_gem_object_wait(obj, 3868 I915_WAIT_INTERRUPTIBLE | 3869 I915_WAIT_LOCKED | 3870 I915_WAIT_ALL, 3871 MAX_SCHEDULE_TIMEOUT, 3872 NULL); 3873 if (ret) 3874 return ret; 3875 3876 if (!HAS_LLC(to_i915(obj->base.dev)) && 3877 cache_level != I915_CACHE_NONE) { 3878 /* Access to snoopable pages through the GTT is 3879 * incoherent and on some machines causes a hard 3880 * lockup. Relinquish the CPU mmaping to force 3881 * userspace to refault in the pages and we can 3882 * then double check if the GTT mapping is still 3883 * valid for that pointer access. 3884 */ 3885 i915_gem_release_mmap(obj); 3886 3887 /* As we no longer need a fence for GTT access, 3888 * we can relinquish it now (and so prevent having 3889 * to steal a fence from someone else on the next 3890 * fence request). Note GPU activity would have 3891 * dropped the fence as all snoopable access is 3892 * supposed to be linear. 3893 */ 3894 list_for_each_entry(vma, &obj->vma_list, obj_link) { 3895 ret = i915_vma_put_fence(vma); 3896 if (ret) 3897 return ret; 3898 } 3899 } else { 3900 /* We either have incoherent backing store and 3901 * so no GTT access or the architecture is fully 3902 * coherent. In such cases, existing GTT mmaps 3903 * ignore the cache bit in the PTE and we can 3904 * rewrite it without confusing the GPU or having 3905 * to force userspace to fault back in its mmaps. 3906 */ 3907 } 3908 3909 list_for_each_entry(vma, &obj->vma_list, obj_link) { 3910 if (!drm_mm_node_allocated(&vma->node)) 3911 continue; 3912 3913 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE); 3914 if (ret) 3915 return ret; 3916 } 3917 } 3918 3919 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU && 3920 i915_gem_object_is_coherent(obj)) 3921 obj->cache_dirty = true; 3922 3923 list_for_each_entry(vma, &obj->vma_list, obj_link) 3924 vma->node.color = cache_level; 3925 obj->cache_level = cache_level; 3926 3927 return 0; 3928 } 3929 3930 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 3931 struct drm_file *file) 3932 { 3933 struct drm_i915_gem_caching *args = data; 3934 struct drm_i915_gem_object *obj; 3935 int err = 0; 3936 3937 rcu_read_lock(); 3938 obj = i915_gem_object_lookup_rcu(file, args->handle); 3939 if (!obj) { 3940 err = -ENOENT; 3941 goto out; 3942 } 3943 3944 switch (obj->cache_level) { 3945 case I915_CACHE_LLC: 3946 case I915_CACHE_L3_LLC: 3947 args->caching = I915_CACHING_CACHED; 3948 break; 3949 3950 case I915_CACHE_WT: 3951 args->caching = I915_CACHING_DISPLAY; 3952 break; 3953 3954 default: 3955 args->caching = I915_CACHING_NONE; 3956 break; 3957 } 3958 out: 3959 rcu_read_unlock(); 3960 return err; 3961 } 3962 3963 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 3964 struct drm_file *file) 3965 { 3966 struct drm_i915_private *i915 = to_i915(dev); 3967 struct drm_i915_gem_caching *args = data; 3968 struct drm_i915_gem_object *obj; 3969 enum i915_cache_level level; 3970 int ret = 0; 3971 3972 switch (args->caching) { 3973 case I915_CACHING_NONE: 3974 level = I915_CACHE_NONE; 3975 break; 3976 case I915_CACHING_CACHED: 3977 /* 3978 * Due to a HW issue on BXT A stepping, GPU stores via a 3979 * snooped mapping may leave stale data in a corresponding CPU 3980 * cacheline, whereas normally such cachelines would get 3981 * invalidated. 3982 */ 3983 if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) 3984 return -ENODEV; 3985 3986 level = I915_CACHE_LLC; 3987 break; 3988 case I915_CACHING_DISPLAY: 3989 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE; 3990 break; 3991 default: 3992 return -EINVAL; 3993 } 3994 3995 obj = i915_gem_object_lookup(file, args->handle); 3996 if (!obj) 3997 return -ENOENT; 3998 3999 if (obj->cache_level == level) 4000 goto out; 4001 4002 ret = i915_gem_object_wait(obj, 4003 I915_WAIT_INTERRUPTIBLE, 4004 MAX_SCHEDULE_TIMEOUT, 4005 to_rps_client(file)); 4006 if (ret) 4007 goto out; 4008 4009 ret = i915_mutex_lock_interruptible(dev); 4010 if (ret) 4011 goto out; 4012 4013 ret = i915_gem_object_set_cache_level(obj, level); 4014 mutex_unlock(&dev->struct_mutex); 4015 4016 out: 4017 i915_gem_object_put(obj); 4018 return ret; 4019 } 4020 4021 /* 4022 * Prepare buffer for display plane (scanout, cursors, etc). 4023 * Can be called from an uninterruptible phase (modesetting) and allows 4024 * any flushes to be pipelined (for pageflips). 4025 */ 4026 struct i915_vma * 4027 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 4028 u32 alignment, 4029 const struct i915_ggtt_view *view) 4030 { 4031 struct i915_vma *vma; 4032 int ret; 4033 4034 lockdep_assert_held(&obj->base.dev->struct_mutex); 4035 4036 /* Mark the pin_display early so that we account for the 4037 * display coherency whilst setting up the cache domains. 4038 */ 4039 obj->pin_display++; 4040 4041 /* The display engine is not coherent with the LLC cache on gen6. As 4042 * a result, we make sure that the pinning that is about to occur is 4043 * done with uncached PTEs. This is lowest common denominator for all 4044 * chipsets. 4045 * 4046 * However for gen6+, we could do better by using the GFDT bit instead 4047 * of uncaching, which would allow us to flush all the LLC-cached data 4048 * with that bit in the PTE to main memory with just one PIPE_CONTROL. 4049 */ 4050 ret = i915_gem_object_set_cache_level(obj, 4051 HAS_WT(to_i915(obj->base.dev)) ? 4052 I915_CACHE_WT : I915_CACHE_NONE); 4053 if (ret) { 4054 vma = ERR_PTR(ret); 4055 goto err_unpin_display; 4056 } 4057 4058 /* As the user may map the buffer once pinned in the display plane 4059 * (e.g. libkms for the bootup splash), we have to ensure that we 4060 * always use map_and_fenceable for all scanout buffers. However, 4061 * it may simply be too big to fit into mappable, in which case 4062 * put it anyway and hope that userspace can cope (but always first 4063 * try to preserve the existing ABI). 4064 */ 4065 vma = ERR_PTR(-ENOSPC); 4066 if (!view || view->type == I915_GGTT_VIEW_NORMAL) 4067 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 4068 PIN_MAPPABLE | PIN_NONBLOCK); 4069 if (IS_ERR(vma)) { 4070 struct drm_i915_private *i915 = to_i915(obj->base.dev); 4071 unsigned int flags; 4072 4073 /* Valleyview is definitely limited to scanning out the first 4074 * 512MiB. Lets presume this behaviour was inherited from the 4075 * g4x display engine and that all earlier gen are similarly 4076 * limited. Testing suggests that it is a little more 4077 * complicated than this. For example, Cherryview appears quite 4078 * happy to scanout from anywhere within its global aperture. 4079 */ 4080 flags = 0; 4081 if (HAS_GMCH_DISPLAY(i915)) 4082 flags = PIN_MAPPABLE; 4083 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags); 4084 } 4085 if (IS_ERR(vma)) 4086 goto err_unpin_display; 4087 4088 vma->display_alignment = max_t(u64, vma->display_alignment, alignment); 4089 4090 /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */ 4091 __i915_gem_object_flush_for_display(obj); 4092 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB); 4093 4094 /* It should now be out of any other write domains, and we can update 4095 * the domain values for our changes. 4096 */ 4097 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 4098 4099 return vma; 4100 4101 err_unpin_display: 4102 obj->pin_display--; 4103 return vma; 4104 } 4105 4106 void 4107 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) 4108 { 4109 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 4110 4111 if (WARN_ON(vma->obj->pin_display == 0)) 4112 return; 4113 4114 if (--vma->obj->pin_display == 0) 4115 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; 4116 4117 /* Bump the LRU to try and avoid premature eviction whilst flipping */ 4118 i915_gem_object_bump_inactive_ggtt(vma->obj); 4119 4120 i915_vma_unpin(vma); 4121 } 4122 4123 /** 4124 * Moves a single object to the CPU read, and possibly write domain. 4125 * @obj: object to act on 4126 * @write: requesting write or read-only access 4127 * 4128 * This function returns when the move is complete, including waiting on 4129 * flushes to occur. 4130 */ 4131 int 4132 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) 4133 { 4134 int ret; 4135 4136 lockdep_assert_held(&obj->base.dev->struct_mutex); 4137 4138 ret = i915_gem_object_wait(obj, 4139 I915_WAIT_INTERRUPTIBLE | 4140 I915_WAIT_LOCKED | 4141 (write ? I915_WAIT_ALL : 0), 4142 MAX_SCHEDULE_TIMEOUT, 4143 NULL); 4144 if (ret) 4145 return ret; 4146 4147 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) 4148 return 0; 4149 4150 i915_gem_object_flush_gtt_write_domain(obj); 4151 4152 /* Flush the CPU cache if it's still invalid. */ 4153 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { 4154 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); 4155 obj->base.read_domains |= I915_GEM_DOMAIN_CPU; 4156 } 4157 4158 /* It should now be out of any other write domains, and we can update 4159 * the domain values for our changes. 4160 */ 4161 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); 4162 4163 /* If we're writing through the CPU, then the GPU read domains will 4164 * need to be invalidated at next use. 4165 */ 4166 if (write) { 4167 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 4168 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 4169 } 4170 4171 return 0; 4172 } 4173 4174 /* Throttle our rendering by waiting until the ring has completed our requests 4175 * emitted over 20 msec ago. 4176 * 4177 * Note that if we were to use the current jiffies each time around the loop, 4178 * we wouldn't escape the function with any frames outstanding if the time to 4179 * render a frame was over 20ms. 4180 * 4181 * This should get us reasonable parallelism between CPU and GPU but also 4182 * relatively low latency when blocking on a particular request to finish. 4183 */ 4184 static int 4185 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) 4186 { 4187 struct drm_i915_private *dev_priv = to_i915(dev); 4188 struct drm_i915_file_private *file_priv = file->driver_priv; 4189 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; 4190 struct drm_i915_gem_request *request, *target = NULL; 4191 long ret; 4192 4193 /* ABI: return -EIO if already wedged */ 4194 if (i915_terminally_wedged(&dev_priv->gpu_error)) 4195 return -EIO; 4196 4197 lockmgr(&file_priv->mm.lock, LK_EXCLUSIVE); 4198 list_for_each_entry(request, &file_priv->mm.request_list, client_link) { 4199 if (time_after_eq(request->emitted_jiffies, recent_enough)) 4200 break; 4201 4202 if (target) { 4203 list_del(&target->client_link); 4204 target->file_priv = NULL; 4205 } 4206 4207 target = request; 4208 } 4209 if (target) 4210 i915_gem_request_get(target); 4211 lockmgr(&file_priv->mm.lock, LK_RELEASE); 4212 4213 if (target == NULL) 4214 return 0; 4215 4216 ret = i915_wait_request(target, 4217 I915_WAIT_INTERRUPTIBLE, 4218 MAX_SCHEDULE_TIMEOUT); 4219 i915_gem_request_put(target); 4220 4221 return ret < 0 ? ret : 0; 4222 } 4223 4224 struct i915_vma * 4225 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 4226 const struct i915_ggtt_view *view, 4227 u64 size, 4228 u64 alignment, 4229 u64 flags) 4230 { 4231 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 4232 struct i915_address_space *vm = &dev_priv->ggtt.base; 4233 struct i915_vma *vma; 4234 int ret; 4235 4236 lockdep_assert_held(&obj->base.dev->struct_mutex); 4237 4238 vma = i915_vma_instance(obj, vm, view); 4239 if (unlikely(IS_ERR(vma))) 4240 return vma; 4241 4242 if (i915_vma_misplaced(vma, size, alignment, flags)) { 4243 if (flags & PIN_NONBLOCK && 4244 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) 4245 return ERR_PTR(-ENOSPC); 4246 4247 if (flags & PIN_MAPPABLE) { 4248 /* If the required space is larger than the available 4249 * aperture, we will not able to find a slot for the 4250 * object and unbinding the object now will be in 4251 * vain. Worse, doing so may cause us to ping-pong 4252 * the object in and out of the Global GTT and 4253 * waste a lot of cycles under the mutex. 4254 */ 4255 if (vma->fence_size > dev_priv->ggtt.mappable_end) 4256 return ERR_PTR(-E2BIG); 4257 4258 /* If NONBLOCK is set the caller is optimistically 4259 * trying to cache the full object within the mappable 4260 * aperture, and *must* have a fallback in place for 4261 * situations where we cannot bind the object. We 4262 * can be a little more lax here and use the fallback 4263 * more often to avoid costly migrations of ourselves 4264 * and other objects within the aperture. 4265 * 4266 * Half-the-aperture is used as a simple heuristic. 4267 * More interesting would to do search for a free 4268 * block prior to making the commitment to unbind. 4269 * That caters for the self-harm case, and with a 4270 * little more heuristics (e.g. NOFAULT, NOEVICT) 4271 * we could try to minimise harm to others. 4272 */ 4273 if (flags & PIN_NONBLOCK && 4274 vma->fence_size > dev_priv->ggtt.mappable_end / 2) 4275 return ERR_PTR(-ENOSPC); 4276 } 4277 4278 WARN(i915_vma_is_pinned(vma), 4279 "bo is already pinned in ggtt with incorrect alignment:" 4280 " offset=%08x, req.alignment=%llx," 4281 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n", 4282 i915_ggtt_offset(vma), alignment, 4283 !!(flags & PIN_MAPPABLE), 4284 i915_vma_is_map_and_fenceable(vma)); 4285 ret = i915_vma_unbind(vma); 4286 if (ret) 4287 return ERR_PTR(ret); 4288 } 4289 4290 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); 4291 if (ret) 4292 return ERR_PTR(ret); 4293 4294 return vma; 4295 } 4296 4297 static __always_inline unsigned int __busy_read_flag(unsigned int id) 4298 { 4299 /* Note that we could alias engines in the execbuf API, but 4300 * that would be very unwise as it prevents userspace from 4301 * fine control over engine selection. Ahem. 4302 * 4303 * This should be something like EXEC_MAX_ENGINE instead of 4304 * I915_NUM_ENGINES. 4305 */ 4306 BUILD_BUG_ON(I915_NUM_ENGINES > 16); 4307 return 0x10000 << id; 4308 } 4309 4310 static __always_inline unsigned int __busy_write_id(unsigned int id) 4311 { 4312 /* The uABI guarantees an active writer is also amongst the read 4313 * engines. This would be true if we accessed the activity tracking 4314 * under the lock, but as we perform the lookup of the object and 4315 * its activity locklessly we can not guarantee that the last_write 4316 * being active implies that we have set the same engine flag from 4317 * last_read - hence we always set both read and write busy for 4318 * last_write. 4319 */ 4320 return id | __busy_read_flag(id); 4321 } 4322 4323 #pragma GCC diagnostic push 4324 #pragma GCC diagnostic ignored "-Wdiscarded-qualifiers" 4325 4326 static __always_inline unsigned int 4327 __busy_set_if_active(const struct dma_fence *fence, 4328 unsigned int (*flag)(unsigned int id)) 4329 { 4330 struct drm_i915_gem_request *rq; 4331 4332 /* We have to check the current hw status of the fence as the uABI 4333 * guarantees forward progress. We could rely on the idle worker 4334 * to eventually flush us, but to minimise latency just ask the 4335 * hardware. 4336 * 4337 * Note we only report on the status of native fences. 4338 */ 4339 if (!dma_fence_is_i915(fence)) 4340 return 0; 4341 4342 /* opencode to_request() in order to avoid const warnings */ 4343 rq = container_of(fence, struct drm_i915_gem_request, fence); 4344 if (i915_gem_request_completed(rq)) 4345 return 0; 4346 4347 return flag(rq->engine->exec_id); 4348 } 4349 #pragma GCC diagnostic pop 4350 4351 static __always_inline unsigned int 4352 busy_check_reader(const struct dma_fence *fence) 4353 { 4354 return __busy_set_if_active(fence, __busy_read_flag); 4355 } 4356 4357 static __always_inline unsigned int 4358 busy_check_writer(const struct dma_fence *fence) 4359 { 4360 if (!fence) 4361 return 0; 4362 4363 return __busy_set_if_active(fence, __busy_write_id); 4364 } 4365 4366 int 4367 i915_gem_busy_ioctl(struct drm_device *dev, void *data, 4368 struct drm_file *file) 4369 { 4370 struct drm_i915_gem_busy *args = data; 4371 struct drm_i915_gem_object *obj; 4372 struct reservation_object_list *list; 4373 unsigned int seq; 4374 int err; 4375 4376 err = -ENOENT; 4377 rcu_read_lock(); 4378 obj = i915_gem_object_lookup_rcu(file, args->handle); 4379 if (!obj) 4380 goto out; 4381 4382 /* A discrepancy here is that we do not report the status of 4383 * non-i915 fences, i.e. even though we may report the object as idle, 4384 * a call to set-domain may still stall waiting for foreign rendering. 4385 * This also means that wait-ioctl may report an object as busy, 4386 * where busy-ioctl considers it idle. 4387 * 4388 * We trade the ability to warn of foreign fences to report on which 4389 * i915 engines are active for the object. 4390 * 4391 * Alternatively, we can trade that extra information on read/write 4392 * activity with 4393 * args->busy = 4394 * !reservation_object_test_signaled_rcu(obj->resv, true); 4395 * to report the overall busyness. This is what the wait-ioctl does. 4396 * 4397 */ 4398 retry: 4399 seq = raw_read_seqcount(&obj->resv->seq); 4400 4401 /* Translate the exclusive fence to the READ *and* WRITE engine */ 4402 args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl)); 4403 4404 /* Translate shared fences to READ set of engines */ 4405 list = rcu_dereference(obj->resv->fence); 4406 if (list) { 4407 unsigned int shared_count = list->shared_count, i; 4408 4409 for (i = 0; i < shared_count; ++i) { 4410 struct dma_fence *fence = 4411 rcu_dereference(list->shared[i]); 4412 4413 args->busy |= busy_check_reader(fence); 4414 } 4415 } 4416 4417 if (args->busy && read_seqcount_retry(&obj->resv->seq, seq)) 4418 goto retry; 4419 4420 err = 0; 4421 out: 4422 rcu_read_unlock(); 4423 return err; 4424 } 4425 4426 int 4427 i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 4428 struct drm_file *file_priv) 4429 { 4430 return i915_gem_ring_throttle(dev, file_priv); 4431 } 4432 4433 int 4434 i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 4435 struct drm_file *file_priv) 4436 { 4437 struct drm_i915_private *dev_priv = to_i915(dev); 4438 struct drm_i915_gem_madvise *args = data; 4439 struct drm_i915_gem_object *obj; 4440 int err; 4441 4442 switch (args->madv) { 4443 case I915_MADV_DONTNEED: 4444 case I915_MADV_WILLNEED: 4445 break; 4446 default: 4447 return -EINVAL; 4448 } 4449 4450 obj = i915_gem_object_lookup(file_priv, args->handle); 4451 if (!obj) 4452 return -ENOENT; 4453 4454 err = mutex_lock_interruptible(&obj->mm.lock); 4455 if (err) 4456 goto out; 4457 4458 if (obj->mm.pages && 4459 i915_gem_object_is_tiled(obj) && 4460 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 4461 if (obj->mm.madv == I915_MADV_WILLNEED) { 4462 GEM_BUG_ON(!obj->mm.quirked); 4463 __i915_gem_object_unpin_pages(obj); 4464 obj->mm.quirked = false; 4465 } 4466 if (args->madv == I915_MADV_WILLNEED) { 4467 GEM_BUG_ON(obj->mm.quirked); 4468 __i915_gem_object_pin_pages(obj); 4469 obj->mm.quirked = true; 4470 } 4471 } 4472 4473 if (obj->mm.madv != __I915_MADV_PURGED) 4474 obj->mm.madv = args->madv; 4475 4476 /* if the object is no longer attached, discard its backing storage */ 4477 if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages) 4478 i915_gem_object_truncate(obj); 4479 4480 args->retained = obj->mm.madv != __I915_MADV_PURGED; 4481 mutex_unlock(&obj->mm.lock); 4482 4483 out: 4484 i915_gem_object_put(obj); 4485 return err; 4486 } 4487 4488 static void 4489 frontbuffer_retire(struct i915_gem_active *active, 4490 struct drm_i915_gem_request *request) 4491 { 4492 struct drm_i915_gem_object *obj = 4493 container_of(active, typeof(*obj), frontbuffer_write); 4494 4495 intel_fb_obj_flush(obj, ORIGIN_CS); 4496 } 4497 4498 void i915_gem_object_init(struct drm_i915_gem_object *obj, 4499 const struct drm_i915_gem_object_ops *ops) 4500 { 4501 lockinit(&obj->mm.lock, "i9goml", 0, LK_CANRECURSE); 4502 4503 INIT_LIST_HEAD(&obj->global_link); 4504 INIT_LIST_HEAD(&obj->userfault_link); 4505 INIT_LIST_HEAD(&obj->obj_exec_link); 4506 INIT_LIST_HEAD(&obj->vma_list); 4507 INIT_LIST_HEAD(&obj->batch_pool_link); 4508 4509 obj->ops = ops; 4510 4511 reservation_object_init(&obj->__builtin_resv); 4512 obj->resv = &obj->__builtin_resv; 4513 4514 obj->frontbuffer_ggtt_origin = ORIGIN_GTT; 4515 init_request_active(&obj->frontbuffer_write, frontbuffer_retire); 4516 4517 obj->mm.madv = I915_MADV_WILLNEED; 4518 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); 4519 lockinit(&obj->mm.get_page.lock, "i915ogpl", 0, LK_CANRECURSE); 4520 4521 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); 4522 } 4523 4524 static const struct drm_i915_gem_object_ops i915_gem_object_ops = { 4525 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | 4526 I915_GEM_OBJECT_IS_SHRINKABLE, 4527 4528 .get_pages = i915_gem_object_get_pages_gtt, 4529 .put_pages = i915_gem_object_put_pages_gtt, 4530 4531 .pwrite = i915_gem_object_pwrite_gtt, 4532 }; 4533 4534 struct drm_i915_gem_object * 4535 i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size) 4536 { 4537 struct drm_i915_gem_object *obj; 4538 #if 0 4539 struct address_space *mapping; 4540 #endif 4541 gfp_t mask; 4542 int ret; 4543 4544 /* There is a prevalence of the assumption that we fit the object's 4545 * page count inside a 32bit _signed_ variable. Let's document this and 4546 * catch if we ever need to fix it. In the meantime, if you do spot 4547 * such a local variable, please consider fixing! 4548 */ 4549 if (WARN_ON(size >> PAGE_SHIFT > INT_MAX)) 4550 return ERR_PTR(-E2BIG); 4551 4552 if (overflows_type(size, obj->base.size)) 4553 return ERR_PTR(-E2BIG); 4554 4555 obj = i915_gem_object_alloc(dev_priv); 4556 if (obj == NULL) 4557 return ERR_PTR(-ENOMEM); 4558 4559 ret = drm_gem_object_init(&dev_priv->drm, &obj->base, size); 4560 if (ret) 4561 goto fail; 4562 4563 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 4564 if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) { 4565 /* 965gm cannot relocate objects above 4GiB. */ 4566 mask &= ~__GFP_HIGHMEM; 4567 mask |= __GFP_DMA32; 4568 } 4569 4570 #if 0 4571 mapping = obj->base.filp->f_mapping; 4572 mapping_set_gfp_mask(mapping, mask); 4573 #endif 4574 4575 i915_gem_object_init(obj, &i915_gem_object_ops); 4576 4577 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 4578 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 4579 4580 if (HAS_LLC(dev_priv)) { 4581 /* On some devices, we can have the GPU use the LLC (the CPU 4582 * cache) for about a 10% performance improvement 4583 * compared to uncached. Graphics requests other than 4584 * display scanout are coherent with the CPU in 4585 * accessing this cache. This means in this mode we 4586 * don't need to clflush on the CPU side, and on the 4587 * GPU side we only need to flush internal caches to 4588 * get data visible to the CPU. 4589 * 4590 * However, we maintain the display planes as UC, and so 4591 * need to rebind when first used as such. 4592 */ 4593 obj->cache_level = I915_CACHE_LLC; 4594 } else 4595 obj->cache_level = I915_CACHE_NONE; 4596 4597 trace_i915_gem_object_create(obj); 4598 4599 return obj; 4600 4601 fail: 4602 i915_gem_object_free(obj); 4603 return ERR_PTR(ret); 4604 } 4605 4606 static bool discard_backing_storage(struct drm_i915_gem_object *obj) 4607 { 4608 /* If we are the last user of the backing storage (be it shmemfs 4609 * pages or stolen etc), we know that the pages are going to be 4610 * immediately released. In this case, we can then skip copying 4611 * back the contents from the GPU. 4612 */ 4613 4614 if (obj->mm.madv != I915_MADV_WILLNEED) 4615 return false; 4616 4617 if (obj->base.filp == NULL) 4618 return true; 4619 4620 /* At first glance, this looks racy, but then again so would be 4621 * userspace racing mmap against close. However, the first external 4622 * reference to the filp can only be obtained through the 4623 * i915_gem_mmap_ioctl() which safeguards us against the user 4624 * acquiring such a reference whilst we are in the middle of 4625 * freeing the object. 4626 */ 4627 #if 0 4628 return atomic_long_read(&obj->base.filp->f_count) == 1; 4629 #else 4630 return false; 4631 #endif 4632 } 4633 4634 static void __i915_gem_free_objects(struct drm_i915_private *i915, 4635 struct llist_node *freed) 4636 { 4637 struct drm_i915_gem_object *obj, *on; 4638 4639 mutex_lock(&i915->drm.struct_mutex); 4640 intel_runtime_pm_get(i915); 4641 llist_for_each_entry_safe(obj, on, freed, freed) { 4642 struct i915_vma *vma, *vn; 4643 4644 trace_i915_gem_object_destroy(obj); 4645 4646 GEM_BUG_ON(i915_gem_object_is_active(obj)); 4647 list_for_each_entry_safe(vma, vn, 4648 &obj->vma_list, obj_link) { 4649 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 4650 GEM_BUG_ON(i915_vma_is_active(vma)); 4651 vma->flags &= ~I915_VMA_PIN_MASK; 4652 i915_vma_close(vma); 4653 } 4654 GEM_BUG_ON(!list_empty(&obj->vma_list)); 4655 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree)); 4656 4657 list_del(&obj->global_link); 4658 } 4659 intel_runtime_pm_put(i915); 4660 mutex_unlock(&i915->drm.struct_mutex); 4661 4662 llist_for_each_entry_safe(obj, on, freed, freed) { 4663 GEM_BUG_ON(obj->bind_count); 4664 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits)); 4665 4666 if (obj->ops->release) 4667 obj->ops->release(obj); 4668 4669 #if 0 4670 if (WARN_ON(i915_gem_object_has_pinned_pages(obj))) 4671 #else 4672 if (i915_gem_object_has_pinned_pages(obj)) 4673 #endif 4674 atomic_set(&obj->mm.pages_pin_count, 0); 4675 __i915_gem_object_put_pages(obj, I915_MM_NORMAL); 4676 GEM_BUG_ON(obj->mm.pages); 4677 4678 if (obj->base.import_attach) 4679 drm_prime_gem_destroy(&obj->base, NULL); 4680 4681 reservation_object_fini(&obj->__builtin_resv); 4682 drm_gem_object_release(&obj->base); 4683 i915_gem_info_remove_obj(i915, obj->base.size); 4684 4685 kfree(obj->bit_17); 4686 i915_gem_object_free(obj); 4687 } 4688 } 4689 4690 static void i915_gem_flush_free_objects(struct drm_i915_private *i915) 4691 { 4692 struct llist_node *freed; 4693 4694 freed = llist_del_all(&i915->mm.free_list); 4695 if (unlikely(freed)) 4696 __i915_gem_free_objects(i915, freed); 4697 } 4698 4699 static void __i915_gem_free_work(struct work_struct *work) 4700 { 4701 struct drm_i915_private *i915 = 4702 container_of(work, struct drm_i915_private, mm.free_work); 4703 struct llist_node *freed; 4704 4705 /* All file-owned VMA should have been released by this point through 4706 * i915_gem_close_object(), or earlier by i915_gem_context_close(). 4707 * However, the object may also be bound into the global GTT (e.g. 4708 * older GPUs without per-process support, or for direct access through 4709 * the GTT either for the user or for scanout). Those VMA still need to 4710 * unbound now. 4711 */ 4712 4713 while ((freed = llist_del_all(&i915->mm.free_list))) 4714 __i915_gem_free_objects(i915, freed); 4715 } 4716 4717 static void __i915_gem_free_object_rcu(struct rcu_head *head) 4718 { 4719 struct drm_i915_gem_object *obj = 4720 container_of(head, typeof(*obj), rcu); 4721 struct drm_i915_private *i915 = to_i915(obj->base.dev); 4722 4723 /* We can't simply use call_rcu() from i915_gem_free_object() 4724 * as we need to block whilst unbinding, and the call_rcu 4725 * task may be called from softirq context. So we take a 4726 * detour through a worker. 4727 */ 4728 if (llist_add(&obj->freed, &i915->mm.free_list)) 4729 schedule_work(&i915->mm.free_work); 4730 } 4731 4732 void i915_gem_free_object(struct drm_gem_object *gem_obj) 4733 { 4734 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 4735 4736 if (obj->mm.quirked) 4737 __i915_gem_object_unpin_pages(obj); 4738 4739 if (discard_backing_storage(obj)) 4740 obj->mm.madv = I915_MADV_DONTNEED; 4741 4742 /* Before we free the object, make sure any pure RCU-only 4743 * read-side critical sections are complete, e.g. 4744 * i915_gem_busy_ioctl(). For the corresponding synchronized 4745 * lookup see i915_gem_object_lookup_rcu(). 4746 */ 4747 call_rcu(&obj->rcu, __i915_gem_free_object_rcu); 4748 } 4749 4750 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj) 4751 { 4752 lockdep_assert_held(&obj->base.dev->struct_mutex); 4753 4754 GEM_BUG_ON(i915_gem_object_has_active_reference(obj)); 4755 if (i915_gem_object_is_active(obj)) 4756 i915_gem_object_set_active_reference(obj); 4757 else 4758 i915_gem_object_put(obj); 4759 } 4760 4761 static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv) 4762 { 4763 struct intel_engine_cs *engine; 4764 enum intel_engine_id id; 4765 4766 for_each_engine(engine, dev_priv, id) 4767 GEM_BUG_ON(engine->last_retired_context && 4768 !i915_gem_context_is_kernel(engine->last_retired_context)); 4769 } 4770 4771 void i915_gem_sanitize(struct drm_i915_private *i915) 4772 { 4773 /* 4774 * If we inherit context state from the BIOS or earlier occupants 4775 * of the GPU, the GPU may be in an inconsistent state when we 4776 * try to take over. The only way to remove the earlier state 4777 * is by resetting. However, resetting on earlier gen is tricky as 4778 * it may impact the display and we are uncertain about the stability 4779 * of the reset, so we only reset recent machines with logical 4780 * context support (that must be reset to remove any stray contexts). 4781 */ 4782 if (HAS_HW_CONTEXTS(i915)) { 4783 int reset = intel_gpu_reset(i915, ALL_ENGINES); 4784 WARN_ON(reset && reset != -ENODEV); 4785 } 4786 } 4787 4788 int i915_gem_suspend(struct drm_i915_private *dev_priv) 4789 { 4790 struct drm_device *dev = &dev_priv->drm; 4791 int ret; 4792 4793 intel_runtime_pm_get(dev_priv); 4794 intel_suspend_gt_powersave(dev_priv); 4795 4796 mutex_lock(&dev->struct_mutex); 4797 4798 /* We have to flush all the executing contexts to main memory so 4799 * that they can saved in the hibernation image. To ensure the last 4800 * context image is coherent, we have to switch away from it. That 4801 * leaves the dev_priv->kernel_context still active when 4802 * we actually suspend, and its image in memory may not match the GPU 4803 * state. Fortunately, the kernel_context is disposable and we do 4804 * not rely on its state. 4805 */ 4806 ret = i915_gem_switch_to_kernel_context(dev_priv); 4807 if (ret) 4808 goto err_unlock; 4809 4810 ret = i915_gem_wait_for_idle(dev_priv, 4811 I915_WAIT_INTERRUPTIBLE | 4812 I915_WAIT_LOCKED); 4813 if (ret) 4814 goto err_unlock; 4815 4816 assert_kernel_context_is_current(dev_priv); 4817 i915_gem_context_lost(dev_priv); 4818 mutex_unlock(&dev->struct_mutex); 4819 4820 intel_guc_suspend(dev_priv); 4821 4822 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 4823 cancel_delayed_work_sync(&dev_priv->gt.retire_work); 4824 4825 /* As the idle_work is rearming if it detects a race, play safe and 4826 * repeat the flush until it is definitely idle. 4827 */ 4828 while (flush_delayed_work(&dev_priv->gt.idle_work)) 4829 ; 4830 4831 i915_gem_drain_freed_objects(dev_priv); 4832 4833 /* Assert that we sucessfully flushed all the work and 4834 * reset the GPU back to its idle, low power state. 4835 */ 4836 WARN_ON(dev_priv->gt.awake); 4837 WARN_ON(!intel_engines_are_idle(dev_priv)); 4838 4839 /* 4840 * Neither the BIOS, ourselves or any other kernel 4841 * expects the system to be in execlists mode on startup, 4842 * so we need to reset the GPU back to legacy mode. And the only 4843 * known way to disable logical contexts is through a GPU reset. 4844 * 4845 * So in order to leave the system in a known default configuration, 4846 * always reset the GPU upon unload and suspend. Afterwards we then 4847 * clean up the GEM state tracking, flushing off the requests and 4848 * leaving the system in a known idle state. 4849 * 4850 * Note that is of the upmost importance that the GPU is idle and 4851 * all stray writes are flushed *before* we dismantle the backing 4852 * storage for the pinned objects. 4853 * 4854 * However, since we are uncertain that resetting the GPU on older 4855 * machines is a good idea, we don't - just in case it leaves the 4856 * machine in an unusable condition. 4857 */ 4858 i915_gem_sanitize(dev_priv); 4859 goto out_rpm_put; 4860 4861 err_unlock: 4862 mutex_unlock(&dev->struct_mutex); 4863 out_rpm_put: 4864 intel_runtime_pm_put(dev_priv); 4865 return ret; 4866 } 4867 4868 void i915_gem_resume(struct drm_i915_private *dev_priv) 4869 { 4870 struct drm_device *dev = &dev_priv->drm; 4871 4872 WARN_ON(dev_priv->gt.awake); 4873 4874 mutex_lock(&dev->struct_mutex); 4875 i915_gem_restore_gtt_mappings(dev_priv); 4876 4877 /* As we didn't flush the kernel context before suspend, we cannot 4878 * guarantee that the context image is complete. So let's just reset 4879 * it and start again. 4880 */ 4881 dev_priv->gt.resume(dev_priv); 4882 4883 mutex_unlock(&dev->struct_mutex); 4884 } 4885 4886 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv) 4887 { 4888 if (INTEL_GEN(dev_priv) < 5 || 4889 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) 4890 return; 4891 4892 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | 4893 DISP_TILE_SURFACE_SWIZZLING); 4894 4895 if (IS_GEN5(dev_priv)) 4896 return; 4897 4898 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); 4899 if (IS_GEN6(dev_priv)) 4900 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); 4901 else if (IS_GEN7(dev_priv)) 4902 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); 4903 else if (IS_GEN8(dev_priv)) 4904 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW)); 4905 else 4906 BUG(); 4907 } 4908 4909 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base) 4910 { 4911 I915_WRITE(RING_CTL(base), 0); 4912 I915_WRITE(RING_HEAD(base), 0); 4913 I915_WRITE(RING_TAIL(base), 0); 4914 I915_WRITE(RING_START(base), 0); 4915 } 4916 4917 static void init_unused_rings(struct drm_i915_private *dev_priv) 4918 { 4919 if (IS_I830(dev_priv)) { 4920 init_unused_ring(dev_priv, PRB1_BASE); 4921 init_unused_ring(dev_priv, SRB0_BASE); 4922 init_unused_ring(dev_priv, SRB1_BASE); 4923 init_unused_ring(dev_priv, SRB2_BASE); 4924 init_unused_ring(dev_priv, SRB3_BASE); 4925 } else if (IS_GEN2(dev_priv)) { 4926 init_unused_ring(dev_priv, SRB0_BASE); 4927 init_unused_ring(dev_priv, SRB1_BASE); 4928 } else if (IS_GEN3(dev_priv)) { 4929 init_unused_ring(dev_priv, PRB1_BASE); 4930 init_unused_ring(dev_priv, PRB2_BASE); 4931 } 4932 } 4933 4934 static int __i915_gem_restart_engines(void *data) 4935 { 4936 struct drm_i915_private *i915 = data; 4937 struct intel_engine_cs *engine; 4938 enum intel_engine_id id; 4939 int err; 4940 4941 for_each_engine(engine, i915, id) { 4942 err = engine->init_hw(engine); 4943 if (err) 4944 return err; 4945 } 4946 4947 return 0; 4948 } 4949 4950 int i915_gem_init_hw(struct drm_i915_private *dev_priv) 4951 { 4952 int ret; 4953 4954 dev_priv->gt.last_init_time = ktime_get(); 4955 4956 /* Double layer security blanket, see i915_gem_init() */ 4957 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4958 4959 if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9) 4960 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 4961 4962 if (IS_HASWELL(dev_priv)) 4963 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ? 4964 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); 4965 4966 if (HAS_PCH_NOP(dev_priv)) { 4967 if (IS_IVYBRIDGE(dev_priv)) { 4968 u32 temp = I915_READ(GEN7_MSG_CTL); 4969 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); 4970 I915_WRITE(GEN7_MSG_CTL, temp); 4971 } else if (INTEL_GEN(dev_priv) >= 7) { 4972 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT); 4973 temp &= ~RESET_PCH_HANDSHAKE_ENABLE; 4974 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp); 4975 } 4976 } 4977 4978 i915_gem_init_swizzling(dev_priv); 4979 4980 /* 4981 * At least 830 can leave some of the unused rings 4982 * "active" (ie. head != tail) after resume which 4983 * will prevent c3 entry. Makes sure all unused rings 4984 * are totally idle. 4985 */ 4986 init_unused_rings(dev_priv); 4987 4988 BUG_ON(!dev_priv->kernel_context); 4989 4990 ret = i915_ppgtt_init_hw(dev_priv); 4991 if (ret) { 4992 DRM_ERROR("PPGTT enable HW failed %d\n", ret); 4993 goto out; 4994 } 4995 4996 /* Need to do basic initialisation of all rings first: */ 4997 ret = __i915_gem_restart_engines(dev_priv); 4998 if (ret) 4999 goto out; 5000 5001 intel_mocs_init_l3cc_table(dev_priv); 5002 5003 /* We can't enable contexts until all firmware is loaded */ 5004 ret = intel_uc_init_hw(dev_priv); 5005 if (ret) 5006 goto out; 5007 5008 out: 5009 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5010 return ret; 5011 } 5012 5013 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value) 5014 { 5015 if (INTEL_INFO(dev_priv)->gen < 6) 5016 return false; 5017 5018 /* TODO: make semaphores and Execlists play nicely together */ 5019 if (i915.enable_execlists) 5020 return false; 5021 5022 if (value >= 0) 5023 return value; 5024 5025 #ifdef CONFIG_INTEL_IOMMU 5026 /* Enable semaphores on SNB when IO remapping is off */ 5027 if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped) 5028 return false; 5029 #endif 5030 5031 return true; 5032 } 5033 5034 int i915_gem_init(struct drm_i915_private *dev_priv) 5035 { 5036 int ret; 5037 5038 mutex_lock(&dev_priv->drm.struct_mutex); 5039 5040 i915_gem_clflush_init(dev_priv); 5041 5042 if (!i915.enable_execlists) { 5043 dev_priv->gt.resume = intel_legacy_submission_resume; 5044 dev_priv->gt.cleanup_engine = intel_engine_cleanup; 5045 } else { 5046 dev_priv->gt.resume = intel_lr_context_resume; 5047 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup; 5048 } 5049 5050 /* This is just a security blanket to placate dragons. 5051 * On some systems, we very sporadically observe that the first TLBs 5052 * used by the CS may be stale, despite us poking the TLB reset. If 5053 * we hold the forcewake during initialisation these problems 5054 * just magically go away. 5055 */ 5056 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5057 5058 i915_gem_init_userptr(dev_priv); 5059 5060 ret = i915_gem_init_ggtt(dev_priv); 5061 if (ret) 5062 goto out_unlock; 5063 5064 ret = i915_gem_context_init(dev_priv); 5065 if (ret) 5066 goto out_unlock; 5067 5068 ret = intel_engines_init(dev_priv); 5069 if (ret) 5070 goto out_unlock; 5071 5072 ret = i915_gem_init_hw(dev_priv); 5073 if (ret == -EIO) { 5074 /* Allow engine initialisation to fail by marking the GPU as 5075 * wedged. But we only want to do this where the GPU is angry, 5076 * for all other failure, such as an allocation failure, bail. 5077 */ 5078 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n"); 5079 i915_gem_set_wedged(dev_priv); 5080 ret = 0; 5081 } 5082 5083 out_unlock: 5084 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5085 mutex_unlock(&dev_priv->drm.struct_mutex); 5086 5087 return ret; 5088 } 5089 5090 void i915_gem_init_mmio(struct drm_i915_private *i915) 5091 { 5092 i915_gem_sanitize(i915); 5093 } 5094 5095 void 5096 i915_gem_cleanup_engines(struct drm_i915_private *dev_priv) 5097 { 5098 struct intel_engine_cs *engine; 5099 enum intel_engine_id id; 5100 5101 for_each_engine(engine, dev_priv, id) 5102 dev_priv->gt.cleanup_engine(engine); 5103 } 5104 5105 void 5106 i915_gem_load_init_fences(struct drm_i915_private *dev_priv) 5107 { 5108 int i; 5109 5110 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && 5111 !IS_CHERRYVIEW(dev_priv)) 5112 dev_priv->num_fence_regs = 32; 5113 else if (INTEL_INFO(dev_priv)->gen >= 4 || 5114 IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 5115 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) 5116 dev_priv->num_fence_regs = 16; 5117 else 5118 dev_priv->num_fence_regs = 8; 5119 5120 if (intel_vgpu_active(dev_priv)) 5121 dev_priv->num_fence_regs = 5122 I915_READ(vgtif_reg(avail_rs.fence_num)); 5123 5124 /* Initialize fence registers to zero */ 5125 for (i = 0; i < dev_priv->num_fence_regs; i++) { 5126 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i]; 5127 5128 fence->i915 = dev_priv; 5129 fence->id = i; 5130 list_add_tail(&fence->link, &dev_priv->mm.fence_list); 5131 } 5132 i915_gem_restore_fences(dev_priv); 5133 5134 i915_gem_detect_bit_6_swizzle(dev_priv); 5135 } 5136 5137 int 5138 i915_gem_load_init(struct drm_i915_private *dev_priv) 5139 { 5140 int err = -ENOMEM; 5141 5142 dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); 5143 if (!dev_priv->objects) 5144 goto err_out; 5145 5146 dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); 5147 if (!dev_priv->vmas) 5148 goto err_objects; 5149 5150 dev_priv->requests = KMEM_CACHE(drm_i915_gem_request, 5151 SLAB_HWCACHE_ALIGN | 5152 SLAB_RECLAIM_ACCOUNT | 5153 SLAB_TYPESAFE_BY_RCU); 5154 if (!dev_priv->requests) 5155 goto err_vmas; 5156 5157 dev_priv->dependencies = KMEM_CACHE(i915_dependency, 5158 SLAB_HWCACHE_ALIGN | 5159 SLAB_RECLAIM_ACCOUNT); 5160 if (!dev_priv->dependencies) 5161 goto err_requests; 5162 5163 mutex_lock(&dev_priv->drm.struct_mutex); 5164 INIT_LIST_HEAD(&dev_priv->gt.timelines); 5165 err = i915_gem_timeline_init__global(dev_priv); 5166 mutex_unlock(&dev_priv->drm.struct_mutex); 5167 if (err) 5168 goto err_dependencies; 5169 5170 INIT_LIST_HEAD(&dev_priv->context_list); 5171 INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work); 5172 init_llist_head(&dev_priv->mm.free_list); 5173 INIT_LIST_HEAD(&dev_priv->mm.unbound_list); 5174 INIT_LIST_HEAD(&dev_priv->mm.bound_list); 5175 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 5176 INIT_LIST_HEAD(&dev_priv->mm.userfault_list); 5177 INIT_DELAYED_WORK(&dev_priv->gt.retire_work, 5178 i915_gem_retire_work_handler); 5179 INIT_DELAYED_WORK(&dev_priv->gt.idle_work, 5180 i915_gem_idle_work_handler); 5181 init_waitqueue_head(&dev_priv->gpu_error.wait_queue); 5182 init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 5183 5184 init_waitqueue_head(&dev_priv->pending_flip_queue); 5185 5186 dev_priv->mm.interruptible = true; 5187 5188 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0); 5189 5190 lockinit(&dev_priv->fb_tracking.lock, "drmftl", 0, 0); 5191 5192 return 0; 5193 5194 err_dependencies: 5195 kmem_cache_destroy(dev_priv->dependencies); 5196 err_requests: 5197 kmem_cache_destroy(dev_priv->requests); 5198 err_vmas: 5199 kmem_cache_destroy(dev_priv->vmas); 5200 err_objects: 5201 kmem_cache_destroy(dev_priv->objects); 5202 err_out: 5203 return err; 5204 } 5205 5206 void i915_gem_load_cleanup(struct drm_i915_private *dev_priv) 5207 { 5208 i915_gem_drain_freed_objects(dev_priv); 5209 WARN_ON(!llist_empty(&dev_priv->mm.free_list)); 5210 WARN_ON(dev_priv->mm.object_count); 5211 5212 mutex_lock(&dev_priv->drm.struct_mutex); 5213 i915_gem_timeline_fini(&dev_priv->gt.global_timeline); 5214 WARN_ON(!list_empty(&dev_priv->gt.timelines)); 5215 mutex_unlock(&dev_priv->drm.struct_mutex); 5216 5217 kmem_cache_destroy(dev_priv->dependencies); 5218 kmem_cache_destroy(dev_priv->requests); 5219 kmem_cache_destroy(dev_priv->vmas); 5220 kmem_cache_destroy(dev_priv->objects); 5221 5222 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */ 5223 rcu_barrier(); 5224 } 5225 5226 int i915_gem_freeze(struct drm_i915_private *dev_priv) 5227 { 5228 mutex_lock(&dev_priv->drm.struct_mutex); 5229 i915_gem_shrink_all(dev_priv); 5230 mutex_unlock(&dev_priv->drm.struct_mutex); 5231 5232 return 0; 5233 } 5234 5235 int i915_gem_freeze_late(struct drm_i915_private *dev_priv) 5236 { 5237 struct drm_i915_gem_object *obj; 5238 struct list_head *phases[] = { 5239 &dev_priv->mm.unbound_list, 5240 &dev_priv->mm.bound_list, 5241 NULL 5242 }, **p; 5243 5244 /* Called just before we write the hibernation image. 5245 * 5246 * We need to update the domain tracking to reflect that the CPU 5247 * will be accessing all the pages to create and restore from the 5248 * hibernation, and so upon restoration those pages will be in the 5249 * CPU domain. 5250 * 5251 * To make sure the hibernation image contains the latest state, 5252 * we update that state just before writing out the image. 5253 * 5254 * To try and reduce the hibernation image, we manually shrink 5255 * the objects as well. 5256 */ 5257 5258 mutex_lock(&dev_priv->drm.struct_mutex); 5259 i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND); 5260 5261 for (p = phases; *p; p++) { 5262 list_for_each_entry(obj, *p, global_link) { 5263 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 5264 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 5265 } 5266 } 5267 mutex_unlock(&dev_priv->drm.struct_mutex); 5268 5269 return 0; 5270 } 5271 5272 void i915_gem_release(struct drm_device *dev, struct drm_file *file) 5273 { 5274 struct drm_i915_file_private *file_priv = file->driver_priv; 5275 struct drm_i915_gem_request *request; 5276 5277 /* Clean up our request list when the client is going away, so that 5278 * later retire_requests won't dereference our soon-to-be-gone 5279 * file_priv. 5280 */ 5281 lockmgr(&file_priv->mm.lock, LK_EXCLUSIVE); 5282 list_for_each_entry(request, &file_priv->mm.request_list, client_link) 5283 request->file_priv = NULL; 5284 lockmgr(&file_priv->mm.lock, LK_RELEASE); 5285 5286 if (!list_empty(&file_priv->rps.link)) { 5287 lockmgr(&to_i915(dev)->rps.client_lock, LK_EXCLUSIVE); 5288 list_del(&file_priv->rps.link); 5289 lockmgr(&to_i915(dev)->rps.client_lock, LK_RELEASE); 5290 } 5291 } 5292 5293 #ifdef __DragonFly__ 5294 int 5295 i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 5296 vm_ooffset_t foff, struct ucred *cred, u_short *color) 5297 { 5298 *color = 0; /* XXXKIB */ 5299 return (0); 5300 } 5301 5302 void 5303 i915_gem_pager_dtor(void *handle) 5304 { 5305 struct drm_gem_object *obj = handle; 5306 struct drm_device *dev = obj->dev; 5307 5308 drm_gem_free_mmap_offset(obj); 5309 mutex_lock(&dev->struct_mutex); 5310 i915_gem_release_mmap(to_intel_bo(obj)); 5311 drm_gem_object_unreference(obj); 5312 mutex_unlock(&dev->struct_mutex); 5313 } 5314 #endif 5315 5316 int i915_gem_open(struct drm_device *dev, struct drm_file *file) 5317 { 5318 struct drm_i915_file_private *file_priv; 5319 int ret; 5320 5321 DRM_DEBUG("\n"); 5322 5323 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); 5324 if (!file_priv) 5325 return -ENOMEM; 5326 5327 file->driver_priv = file_priv; 5328 file_priv->dev_priv = to_i915(dev); 5329 file_priv->file = file; 5330 INIT_LIST_HEAD(&file_priv->rps.link); 5331 5332 lockinit(&file_priv->mm.lock, "i915_priv", 0, 0); 5333 INIT_LIST_HEAD(&file_priv->mm.request_list); 5334 5335 file_priv->bsd_engine = -1; 5336 5337 ret = i915_gem_context_open(dev, file); 5338 if (ret) 5339 kfree(file_priv); 5340 5341 return ret; 5342 } 5343 5344 /** 5345 * i915_gem_track_fb - update frontbuffer tracking 5346 * @old: current GEM buffer for the frontbuffer slots 5347 * @new: new GEM buffer for the frontbuffer slots 5348 * @frontbuffer_bits: bitmask of frontbuffer slots 5349 * 5350 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them 5351 * from @old and setting them in @new. Both @old and @new can be NULL. 5352 */ 5353 void i915_gem_track_fb(struct drm_i915_gem_object *old, 5354 struct drm_i915_gem_object *new, 5355 unsigned frontbuffer_bits) 5356 { 5357 /* Control of individual bits within the mask are guarded by 5358 * the owning plane->mutex, i.e. we can never see concurrent 5359 * manipulation of individual bits. But since the bitfield as a whole 5360 * is updated using RMW, we need to use atomics in order to update 5361 * the bits. 5362 */ 5363 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 5364 sizeof(atomic_t) * BITS_PER_BYTE); 5365 5366 if (old) { 5367 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits)); 5368 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits); 5369 } 5370 5371 if (new) { 5372 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits); 5373 atomic_or(frontbuffer_bits, &new->frontbuffer_bits); 5374 } 5375 } 5376 5377 /* XXX */ 5378 static int 5379 pagecache_write_begin(struct vm_object *obj, struct address_space *mapping, 5380 loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) 5381 { 5382 *pagep = shmem_read_mapping_page(obj, OFF_TO_IDX(pos)); 5383 5384 return 0; 5385 } 5386 5387 #if 0 5388 static int 5389 pagecache_write_end(struct file *, struct address_space *mapping, 5390 loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) 5391 { 5392 } 5393 #endif 5394 5395 /* Allocate a new GEM object and fill it with the supplied data */ 5396 struct drm_i915_gem_object * 5397 i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, 5398 const void *data, size_t size) 5399 { 5400 struct drm_i915_gem_object *obj; 5401 struct vm_object *file; 5402 size_t offset; 5403 int err; 5404 5405 obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE)); 5406 if (IS_ERR(obj)) 5407 return obj; 5408 5409 GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU); 5410 5411 file = obj->base.filp; 5412 offset = 0; 5413 do { 5414 unsigned int len = min_t(typeof(size), size, PAGE_SIZE); 5415 struct page *page; 5416 void *pgdata, *vaddr; 5417 5418 err = pagecache_write_begin(file, NULL, 5419 offset, len, 0, 5420 &page, &pgdata); 5421 if (err < 0) 5422 goto fail; 5423 5424 vaddr = kmap(page); 5425 memcpy(vaddr, data, len); 5426 kunmap(page); 5427 5428 #ifndef __DragonFly__ 5429 err = pagecache_write_end(file, file->f_mapping, 5430 offset, len, len, 5431 page, pgdata); 5432 if (err < 0) 5433 goto fail; 5434 #else 5435 put_page(page); 5436 #endif 5437 5438 size -= len; 5439 data += len; 5440 offset += len; 5441 } while (size); 5442 5443 return obj; 5444 5445 fail: 5446 i915_gem_object_put(obj); 5447 return ERR_PTR(err); 5448 } 5449 5450 struct scatterlist * 5451 i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 5452 unsigned int n, 5453 unsigned int *offset) 5454 { 5455 struct i915_gem_object_page_iter *iter = &obj->mm.get_page; 5456 struct scatterlist *sg; 5457 unsigned int idx, count; 5458 5459 might_sleep(); 5460 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT); 5461 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 5462 5463 /* As we iterate forward through the sg, we record each entry in a 5464 * radixtree for quick repeated (backwards) lookups. If we have seen 5465 * this index previously, we will have an entry for it. 5466 * 5467 * Initial lookup is O(N), but this is amortized to O(1) for 5468 * sequential page access (where each new request is consecutive 5469 * to the previous one). Repeated lookups are O(lg(obj->base.size)), 5470 * i.e. O(1) with a large constant! 5471 */ 5472 if (n < READ_ONCE(iter->sg_idx)) 5473 goto lookup; 5474 5475 mutex_lock(&iter->lock); 5476 5477 /* We prefer to reuse the last sg so that repeated lookup of this 5478 * (or the subsequent) sg are fast - comparing against the last 5479 * sg is faster than going through the radixtree. 5480 */ 5481 5482 sg = iter->sg_pos; 5483 idx = iter->sg_idx; 5484 count = __sg_page_count(sg); 5485 5486 while (idx + count <= n) { 5487 unsigned long exception, i; 5488 int ret; 5489 5490 /* If we cannot allocate and insert this entry, or the 5491 * individual pages from this range, cancel updating the 5492 * sg_idx so that on this lookup we are forced to linearly 5493 * scan onwards, but on future lookups we will try the 5494 * insertion again (in which case we need to be careful of 5495 * the error return reporting that we have already inserted 5496 * this index). 5497 */ 5498 ret = radix_tree_insert(&iter->radix, idx, sg); 5499 if (ret && ret != -EEXIST) 5500 goto scan; 5501 5502 exception = 5503 RADIX_TREE_EXCEPTIONAL_ENTRY | 5504 idx << RADIX_TREE_EXCEPTIONAL_SHIFT; 5505 for (i = 1; i < count; i++) { 5506 ret = radix_tree_insert(&iter->radix, idx + i, 5507 (void *)exception); 5508 if (ret && ret != -EEXIST) 5509 goto scan; 5510 } 5511 5512 idx += count; 5513 sg = ____sg_next(sg); 5514 count = __sg_page_count(sg); 5515 } 5516 5517 scan: 5518 iter->sg_pos = sg; 5519 iter->sg_idx = idx; 5520 5521 mutex_unlock(&iter->lock); 5522 5523 if (unlikely(n < idx)) /* insertion completed by another thread */ 5524 goto lookup; 5525 5526 /* In case we failed to insert the entry into the radixtree, we need 5527 * to look beyond the current sg. 5528 */ 5529 while (idx + count <= n) { 5530 idx += count; 5531 sg = ____sg_next(sg); 5532 count = __sg_page_count(sg); 5533 } 5534 5535 *offset = n - idx; 5536 return sg; 5537 5538 lookup: 5539 rcu_read_lock(); 5540 5541 sg = radix_tree_lookup(&iter->radix, n); 5542 GEM_BUG_ON(!sg); 5543 5544 /* If this index is in the middle of multi-page sg entry, 5545 * the radixtree will contain an exceptional entry that points 5546 * to the start of that range. We will return the pointer to 5547 * the base page and the offset of this page within the 5548 * sg entry's range. 5549 */ 5550 *offset = 0; 5551 if (unlikely(radix_tree_exception(sg))) { 5552 unsigned long base = 5553 (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT; 5554 5555 sg = radix_tree_lookup(&iter->radix, base); 5556 GEM_BUG_ON(!sg); 5557 5558 *offset = n - base; 5559 } 5560 5561 rcu_read_unlock(); 5562 5563 return sg; 5564 } 5565 5566 struct page * 5567 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n) 5568 { 5569 struct scatterlist *sg; 5570 unsigned int offset; 5571 5572 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); 5573 5574 sg = i915_gem_object_get_sg(obj, n, &offset); 5575 return nth_page(sg_page(sg), offset); 5576 } 5577 5578 /* Like i915_gem_object_get_page(), but mark the returned page dirty */ 5579 struct page * 5580 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, 5581 unsigned int n) 5582 { 5583 struct page *page; 5584 5585 page = i915_gem_object_get_page(obj, n); 5586 if (!obj->mm.dirty) 5587 set_page_dirty(page); 5588 5589 return page; 5590 } 5591 5592 dma_addr_t 5593 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 5594 unsigned long n) 5595 { 5596 struct scatterlist *sg; 5597 unsigned int offset; 5598 5599 sg = i915_gem_object_get_sg(obj, n, &offset); 5600 return sg_dma_address(sg) + (offset << PAGE_SHIFT); 5601 } 5602 5603 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 5604 #include "selftests/scatterlist.c" 5605 #include "selftests/mock_gem_device.c" 5606 #include "selftests/huge_gem_object.c" 5607 #include "selftests/i915_gem_object.c" 5608 #include "selftests/i915_gem_coherency.c" 5609 #endif 5610